forked from luck/tmp_suning_uos_patched
sched: eliminate __rq_clock() use
eliminate __rq_clock() use by changing it to: __update_rq_clock(rq) now = rq->clock; identity transformation - no change in behavior. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
2ab81159fa
commit
c1b3da3ecd
|
@ -1967,9 +1967,12 @@ static void update_cpu_load(struct rq *this_rq)
|
|||
unsigned long total_load = this_rq->ls.load.weight;
|
||||
unsigned long this_load = total_load;
|
||||
struct load_stat *ls = &this_rq->ls;
|
||||
u64 now = __rq_clock(this_rq);
|
||||
u64 now;
|
||||
int i, scale;
|
||||
|
||||
__update_rq_clock(this_rq);
|
||||
now = this_rq->clock;
|
||||
|
||||
this_rq->nr_load_updates++;
|
||||
if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
|
||||
goto do_avg;
|
||||
|
@ -3458,7 +3461,8 @@ asmlinkage void __sched schedule(void)
|
|||
|
||||
spin_lock_irq(&rq->lock);
|
||||
clear_tsk_need_resched(prev);
|
||||
now = __rq_clock(rq);
|
||||
__update_rq_clock(rq);
|
||||
now = rq->clock;
|
||||
|
||||
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
|
||||
if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
|
||||
|
|
|
@ -672,7 +672,10 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
|
|||
{
|
||||
struct rq *rq = rq_of(cfs_rq);
|
||||
struct sched_entity *next;
|
||||
u64 now = __rq_clock(rq);
|
||||
u64 now;
|
||||
|
||||
__update_rq_clock(rq);
|
||||
now = rq->clock;
|
||||
|
||||
/*
|
||||
* Dequeue and enqueue the task to update its
|
||||
|
@ -824,8 +827,10 @@ dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep, u64 now)
|
|||
static void yield_task_fair(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
struct cfs_rq *cfs_rq = task_cfs_rq(p);
|
||||
u64 now = __rq_clock(rq);
|
||||
u64 now;
|
||||
|
||||
__update_rq_clock(rq);
|
||||
now = rq->clock;
|
||||
/*
|
||||
* Dequeue and enqueue the task to update its
|
||||
* position within the tree:
|
||||
|
|
Loading…
Reference in New Issue
Block a user