forked from luck/tmp_suning_uos_patched
sched: introduce avg_wakeup
Introduce a new avg_wakeup statistic. avg_wakeup is a measure of how frequently a task wakes up other tasks, it represents the average time between wakeups, with a limit of avg_runtime for when it doesn't wake up anybody. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
a6525042bf
commit
831451ac4e
@ -1046,6 +1046,9 @@ struct sched_entity {
|
||||
u64 exec_max;
|
||||
u64 slice_max;
|
||||
|
||||
u64 start_runtime;
|
||||
u64 avg_wakeup;
|
||||
|
||||
u64 nr_migrations;
|
||||
u64 nr_migrations_cold;
|
||||
u64 nr_failed_migrations_affine;
|
||||
|
@ -1705,6 +1705,9 @@ static void update_avg(u64 *avg, u64 sample)
|
||||
|
||||
static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
{
|
||||
if (wakeup)
|
||||
p->se.start_runtime = p->se.sum_exec_runtime;
|
||||
|
||||
sched_info_queued(p);
|
||||
p->sched_class->enqueue_task(rq, p, wakeup);
|
||||
p->se.on_rq = 1;
|
||||
@ -1712,10 +1715,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
|
||||
|
||||
static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
|
||||
{
|
||||
if (sleep && p->se.last_wakeup) {
|
||||
update_avg(&p->se.avg_overlap,
|
||||
p->se.sum_exec_runtime - p->se.last_wakeup);
|
||||
p->se.last_wakeup = 0;
|
||||
if (sleep) {
|
||||
if (p->se.last_wakeup) {
|
||||
update_avg(&p->se.avg_overlap,
|
||||
p->se.sum_exec_runtime - p->se.last_wakeup);
|
||||
p->se.last_wakeup = 0;
|
||||
} else {
|
||||
update_avg(&p->se.avg_wakeup,
|
||||
sysctl_sched_wakeup_granularity);
|
||||
}
|
||||
}
|
||||
|
||||
sched_info_dequeued(p);
|
||||
@ -2345,6 +2353,22 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
||||
activate_task(rq, p, 1);
|
||||
success = 1;
|
||||
|
||||
/*
|
||||
* Only attribute actual wakeups done by this task.
|
||||
*/
|
||||
if (!in_interrupt()) {
|
||||
struct sched_entity *se = ¤t->se;
|
||||
u64 sample = se->sum_exec_runtime;
|
||||
|
||||
if (se->last_wakeup)
|
||||
sample -= se->last_wakeup;
|
||||
else
|
||||
sample -= se->start_runtime;
|
||||
update_avg(&se->avg_wakeup, sample);
|
||||
|
||||
se->last_wakeup = se->sum_exec_runtime;
|
||||
}
|
||||
|
||||
out_running:
|
||||
trace_sched_wakeup(rq, p, success);
|
||||
check_preempt_curr(rq, p, sync);
|
||||
@ -2355,8 +2379,6 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
|
||||
p->sched_class->task_wake_up(rq, p);
|
||||
#endif
|
||||
out:
|
||||
current->se.last_wakeup = current->se.sum_exec_runtime;
|
||||
|
||||
task_rq_unlock(rq, &flags);
|
||||
|
||||
return success;
|
||||
@ -2386,6 +2408,8 @@ static void __sched_fork(struct task_struct *p)
|
||||
p->se.prev_sum_exec_runtime = 0;
|
||||
p->se.last_wakeup = 0;
|
||||
p->se.avg_overlap = 0;
|
||||
p->se.start_runtime = 0;
|
||||
p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
|
||||
|
||||
#ifdef CONFIG_SCHEDSTATS
|
||||
p->se.wait_start = 0;
|
||||
|
@ -397,6 +397,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
|
||||
PN(se.vruntime);
|
||||
PN(se.sum_exec_runtime);
|
||||
PN(se.avg_overlap);
|
||||
PN(se.avg_wakeup);
|
||||
|
||||
nr_switches = p->nvcsw + p->nivcsw;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user