forked from luck/tmp_suning_uos_patched
timers: split process wide cpu clocks/timers, fix
To decrease the chance of a missed enable, always enable the timer when we sample it, we'll always disable it when we find that there are no active timers in the jiffy tick. This fixes a flood of warnings reported by Mike Galbraith. Reported-by: Mike Galbraith <efault@gmx.de> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
ff08f76d73
commit
3fccfd67df
|
@ -2209,6 +2209,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&cputimer->lock, flags);
|
||||
cputimer->running = 1;
|
||||
*times = cputimer->cputime;
|
||||
spin_unlock_irqrestore(&cputimer->lock, flags);
|
||||
}
|
||||
|
|
|
@ -488,7 +488,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
|
|||
{
|
||||
struct task_cputime cputime;
|
||||
|
||||
thread_group_cputime(tsk, &cputime);
|
||||
thread_group_cputimer(tsk, &cputime);
|
||||
cleanup_timers(tsk->signal->cpu_timers,
|
||||
cputime.utime, cputime.stime, cputime.sum_exec_runtime);
|
||||
}
|
||||
|
@ -506,29 +506,6 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
|
|||
now);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable the process wide cpu timer accounting.
|
||||
*
|
||||
* serialized using ->sighand->siglock
|
||||
*/
|
||||
static void start_process_timers(struct task_struct *tsk)
|
||||
{
|
||||
tsk->signal->cputimer.running = 1;
|
||||
barrier();
|
||||
}
|
||||
|
||||
/*
|
||||
* Release the process wide timer accounting -- timer stops ticking when
|
||||
* nobody cares about it.
|
||||
*
|
||||
* serialized using ->sighand->siglock
|
||||
*/
|
||||
static void stop_process_timers(struct task_struct *tsk)
|
||||
{
|
||||
tsk->signal->cputimer.running = 0;
|
||||
barrier();
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert the timer on the appropriate list before any timers that
|
||||
* expire later. This must be called with the tasklist_lock held
|
||||
|
@ -549,9 +526,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
|
|||
BUG_ON(!irqs_disabled());
|
||||
spin_lock(&p->sighand->siglock);
|
||||
|
||||
if (!CPUCLOCK_PERTHREAD(timer->it_clock))
|
||||
start_process_timers(p);
|
||||
|
||||
listpos = head;
|
||||
if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) {
|
||||
list_for_each_entry(next, head, entry) {
|
||||
|
@ -1021,6 +995,19 @@ static void check_thread_timers(struct task_struct *tsk,
|
|||
}
|
||||
}
|
||||
|
||||
static void stop_process_timers(struct task_struct *tsk)
|
||||
{
|
||||
struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
|
||||
unsigned long flags;
|
||||
|
||||
if (!cputimer->running)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&cputimer->lock, flags);
|
||||
cputimer->running = 0;
|
||||
spin_unlock_irqrestore(&cputimer->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for any per-thread CPU timers that have fired and move them
|
||||
* off the tsk->*_timers list onto the firing list. Per-thread timers
|
||||
|
@ -1427,7 +1414,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
|||
struct list_head *head;
|
||||
|
||||
BUG_ON(clock_idx == CPUCLOCK_SCHED);
|
||||
start_process_timers(tsk);
|
||||
cpu_timer_sample_group(clock_idx, tsk, &now);
|
||||
|
||||
if (oldval) {
|
||||
|
|
Loading…
Reference in New Issue
Block a user