forked from luck/tmp_suning_uos_patched
sched: fix load scaling in group balancing
doing the load balance will change cfs_rq->load.weight (that's the whole point) but since that's part of the scale factor, we'll scale back with a different amount. Weight getting smaller would result in an inflated moved_load which causes it to stop balancing too soon. Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com> Cc: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
408ed066b1
commit
42a3ac7d5c
@ -1444,6 +1444,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
|
|
||||||
list_for_each_entry(tg, &task_groups, list) {
|
list_for_each_entry(tg, &task_groups, list) {
|
||||||
struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
|
struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
|
||||||
|
unsigned long busiest_h_load = busiest_cfs_rq->h_load;
|
||||||
|
unsigned long busiest_weight = busiest_cfs_rq->load.weight;
|
||||||
long rem_load, moved_load;
|
long rem_load, moved_load;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1452,8 +1454,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
if (!busiest_cfs_rq->task_weight)
|
if (!busiest_cfs_rq->task_weight)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
rem_load = rem_load_move * busiest_cfs_rq->load.weight;
|
rem_load = rem_load_move * busiest_weight;
|
||||||
rem_load /= busiest_cfs_rq->h_load + 1;
|
rem_load /= busiest_h_load + 1;
|
||||||
|
|
||||||
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
moved_load = __load_balance_fair(this_rq, this_cpu, busiest,
|
||||||
rem_load, sd, idle, all_pinned, this_best_prio,
|
rem_load, sd, idle, all_pinned, this_best_prio,
|
||||||
@ -1462,8 +1464,8 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
|
|||||||
if (!moved_load)
|
if (!moved_load)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
moved_load *= busiest_cfs_rq->h_load;
|
moved_load *= busiest_h_load;
|
||||||
moved_load /= busiest_cfs_rq->load.weight + 1;
|
moved_load /= busiest_weight + 1;
|
||||||
|
|
||||||
rem_load_move -= moved_load;
|
rem_load_move -= moved_load;
|
||||||
if (rem_load_move < 0)
|
if (rem_load_move < 0)
|
||||||
|
Loading…
Reference in New Issue
Block a user