From fa717060f1ab7eb6570f2fb49136f838fc9195a9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 25 Jan 2008 21:08:27 +0100 Subject: [PATCH] sched: sched_rt_entity Move the task_struct members specific to rt scheduling together. A future optimization could be to put sched_entity and sched_rt_entity into a union. Signed-off-by: Peter Zijlstra CC: Srivatsa Vaddagiri Signed-off-by: Ingo Molnar --- include/linux/init_task.h | 5 +++-- include/linux/sched.h | 8 ++++++-- kernel/sched.c | 2 +- kernel/sched_rt.c | 20 ++++++++++---------- mm/oom_kill.c | 2 +- 5 files changed, 21 insertions(+), 16 deletions(-) diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 572c65bcc80f..ee65d87bedb7 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -133,9 +133,10 @@ extern struct group_info init_groups; .nr_cpus_allowed = NR_CPUS, \ .mm = NULL, \ .active_mm = &init_mm, \ - .run_list = LIST_HEAD_INIT(tsk.run_list), \ + .rt = { \ + .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ + .time_slice = HZ, }, \ .ioprio = 0, \ - .time_slice = HZ, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 72e1b8ecfbe1..a06d09ebd5c6 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -929,6 +929,11 @@ struct sched_entity { #endif }; +struct sched_rt_entity { + struct list_head run_list; + unsigned int time_slice; +}; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -945,9 +950,9 @@ struct task_struct { #endif int prio, static_prio, normal_prio; - struct list_head run_list; const struct sched_class *sched_class; struct sched_entity se; + struct sched_rt_entity rt; #ifdef CONFIG_PREEMPT_NOTIFIERS /* list of struct preempt_notifier: */ @@ -972,7 +977,6 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; int nr_cpus_allowed; - unsigned int time_slice; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; diff --git a/kernel/sched.c b/kernel/sched.c index 02d468844a91..c2cedd09d895 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1685,7 +1685,7 @@ static void __sched_fork(struct task_struct *p) p->se.wait_max = 0; #endif - INIT_LIST_HEAD(&p->run_list); + INIT_LIST_HEAD(&p->rt.run_list); p->se.on_rq = 0; #ifdef CONFIG_PREEMPT_NOTIFIERS diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 9affb3c9d3db..29963af782ae 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -111,7 +111,7 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup) { struct rt_prio_array *array = &rq->rt.active; - list_add_tail(&p->run_list, array->queue + p->prio); + list_add_tail(&p->rt.run_list, array->queue + p->prio); __set_bit(p->prio, array->bitmap); inc_cpu_load(rq, p->se.load.weight); @@ -127,7 +127,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep) update_curr_rt(rq); - list_del(&p->run_list); + list_del(&p->rt.run_list); if (list_empty(array->queue + p->prio)) __clear_bit(p->prio, array->bitmap); dec_cpu_load(rq, p->se.load.weight); @@ -143,7 +143,7 @@ static void requeue_task_rt(struct rq *rq, struct task_struct *p) { struct rt_prio_array *array = &rq->rt.active; - list_move_tail(&p->run_list, array->queue + p->prio); + list_move_tail(&p->rt.run_list, array->queue + p->prio); } static void @@ -212,7 +212,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) return NULL; queue = array->queue + idx; - next = list_entry(queue->next, struct task_struct, run_list); + next = list_entry(queue->next, struct task_struct, rt.run_list); next->se.exec_start = rq->clock; @@ -261,14 +261,14 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) queue = array->queue + idx; BUG_ON(list_empty(queue)); - next = list_entry(queue->next, struct task_struct, run_list); + next = list_entry(queue->next, struct task_struct, rt.run_list); if (unlikely(pick_rt_task(rq, next, cpu))) goto out; if (queue->next->next != queue) { /* same prio task */ next = list_entry(queue->next->next, struct task_struct, - run_list); + rt.run_list); if (pick_rt_task(rq, next, cpu)) goto out; } @@ -282,7 +282,7 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu) queue = array->queue + idx; BUG_ON(list_empty(queue)); - list_for_each_entry(next, queue, run_list) { + list_for_each_entry(next, queue, rt.run_list) { if (pick_rt_task(rq, next, cpu)) goto out; } @@ -846,16 +846,16 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p) if (p->policy != SCHED_RR) return; - if (--p->time_slice) + if (--p->rt.time_slice) return; - p->time_slice = DEF_TIMESLICE; + p->rt.time_slice = DEF_TIMESLICE; /* * Requeue to the end of queue if we are not the only element * on the queue: */ - if (p->run_list.prev != p->run_list.next) { + if (p->rt.run_list.prev != p->rt.run_list.next) { requeue_task_rt(rq, p); set_tsk_need_resched(p); } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 91a081a82f55..96473b482099 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -286,7 +286,7 @@ static void __oom_kill_task(struct task_struct *p, int verbose) * all the memory it needs. That way it should be able to * exit() and clear out its resources quickly... */ - p->time_slice = HZ; + p->rt.time_slice = HZ; set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p);