forked from luck/tmp_suning_uos_patched
Merge branch 'timers/nohz-posix-timers-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/nohz
Pull posix cpu timers handling on full dynticks from Frederic Weisbecker. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
a166fcf04d
|
@ -122,6 +122,8 @@ void run_posix_cpu_timers(struct task_struct *task);
|
|||
void posix_cpu_timers_exit(struct task_struct *task);
|
||||
void posix_cpu_timers_exit_group(struct task_struct *task);
|
||||
|
||||
bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk);
|
||||
|
||||
void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
|
||||
cputime_t *newval, cputime_t *oldval);
|
||||
|
||||
|
|
|
@ -160,9 +160,13 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
|
|||
#ifdef CONFIG_NO_HZ_FULL
|
||||
extern void tick_nohz_init(void);
|
||||
extern int tick_nohz_full_cpu(int cpu);
|
||||
extern void tick_nohz_full_kick(void);
|
||||
extern void tick_nohz_full_kick_all(void);
|
||||
#else
|
||||
static inline void tick_nohz_init(void) { }
|
||||
static inline int tick_nohz_full_cpu(int cpu) { return 0; }
|
||||
static inline void tick_nohz_full_kick(void) { }
|
||||
static inline void tick_nohz_full_kick_all(void) { }
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
#include <linux/kernel_stat.h>
|
||||
#include <trace/events/timer.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
/*
|
||||
* Called after updating RLIMIT_CPU to run cpu timer and update
|
||||
|
@ -153,6 +155,21 @@ static void bump_cpu_timer(struct k_itimer *timer,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* task_cputime_zero - Check a task_cputime struct for all zero fields.
|
||||
*
|
||||
* @cputime: The struct to compare.
|
||||
*
|
||||
* Checks @cputime to see if all fields are zero. Returns true if all fields
|
||||
* are zero, false if any field is nonzero.
|
||||
*/
|
||||
static inline int task_cputime_zero(const struct task_cputime *cputime)
|
||||
{
|
||||
if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline cputime_t prof_ticks(struct task_struct *p)
|
||||
{
|
||||
cputime_t utime, stime;
|
||||
|
@ -636,6 +653,37 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ_FULL
|
||||
static void nohz_kick_work_fn(struct work_struct *work)
|
||||
{
|
||||
tick_nohz_full_kick_all();
|
||||
}
|
||||
|
||||
static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
|
||||
|
||||
/*
|
||||
* We need the IPIs to be sent from sane process context.
|
||||
* The posix cpu timers are always set with irqs disabled.
|
||||
*/
|
||||
static void posix_cpu_timer_kick_nohz(void)
|
||||
{
|
||||
schedule_work(&nohz_kick_work);
|
||||
}
|
||||
|
||||
bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
|
||||
{
|
||||
if (!task_cputime_zero(&tsk->cputime_expires))
|
||||
return true;
|
||||
|
||||
if (tsk->signal->cputimer.running)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
#else
|
||||
static inline void posix_cpu_timer_kick_nohz(void) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Guts of sys_timer_settime for CPU timers.
|
||||
* This is called with the timer locked and interrupts disabled.
|
||||
|
@ -794,6 +842,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
|
|||
sample_to_timespec(timer->it_clock,
|
||||
old_incr, &old->it_interval);
|
||||
}
|
||||
if (!ret)
|
||||
posix_cpu_timer_kick_nohz();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1008,21 +1058,6 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* task_cputime_zero - Check a task_cputime struct for all zero fields.
|
||||
*
|
||||
* @cputime: The struct to compare.
|
||||
*
|
||||
* Checks @cputime to see if all fields are zero. Returns true if all fields
|
||||
* are zero, false if any field is nonzero.
|
||||
*/
|
||||
static inline int task_cputime_zero(const struct task_cputime *cputime)
|
||||
{
|
||||
if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check for any per-thread CPU timers that have fired and move them
|
||||
* off the tsk->*_timers list onto the firing list. Per-thread timers
|
||||
|
@ -1336,6 +1371,13 @@ void run_posix_cpu_timers(struct task_struct *tsk)
|
|||
cpu_timer_fire(timer);
|
||||
spin_unlock(&timer->it_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* In case some timers were rescheduled after the queue got emptied,
|
||||
* wake up full dynticks CPUs.
|
||||
*/
|
||||
if (tsk->signal->cputimer.running)
|
||||
posix_cpu_timer_kick_nohz();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1366,7 +1408,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
|||
}
|
||||
|
||||
if (!*newval)
|
||||
return;
|
||||
goto out;
|
||||
*newval += now.cpu;
|
||||
}
|
||||
|
||||
|
@ -1384,6 +1426,8 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
|
|||
tsk->signal->cputime_expires.virt_exp = *newval;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
posix_cpu_timer_kick_nohz();
|
||||
}
|
||||
|
||||
static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
|
||||
|
|
|
@ -111,6 +111,7 @@ config NO_HZ_FULL
|
|||
select RCU_USER_QS
|
||||
select RCU_NOCB_CPU
|
||||
select CONTEXT_TRACKING_FORCE
|
||||
select IRQ_WORK
|
||||
help
|
||||
Adaptively try to shutdown the tick whenever possible, even when
|
||||
the CPU is running tasks. Typically this requires running a single
|
||||
|
|
|
@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
|
|||
static cpumask_var_t nohz_full_mask;
|
||||
bool have_nohz_full_mask;
|
||||
|
||||
/*
|
||||
* Re-evaluate the need for the tick on the current CPU
|
||||
* and restart it if necessary.
|
||||
*/
|
||||
static void tick_nohz_full_check(void)
|
||||
{
|
||||
/*
|
||||
* STUB for now, will be filled with the full tick stop/restart
|
||||
* infrastructure patches
|
||||
*/
|
||||
}
|
||||
|
||||
static void nohz_full_kick_work_func(struct irq_work *work)
|
||||
{
|
||||
tick_nohz_full_check();
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
|
||||
.func = nohz_full_kick_work_func,
|
||||
};
|
||||
|
||||
/*
|
||||
* Kick the current CPU if it's full dynticks in order to force it to
|
||||
* re-evaluate its dependency on the tick and restart it if necessary.
|
||||
*/
|
||||
void tick_nohz_full_kick(void)
|
||||
{
|
||||
if (tick_nohz_full_cpu(smp_processor_id()))
|
||||
irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
|
||||
}
|
||||
|
||||
static void nohz_full_kick_ipi(void *info)
|
||||
{
|
||||
tick_nohz_full_check();
|
||||
}
|
||||
|
||||
/*
|
||||
* Kick all full dynticks CPUs in order to force these to re-evaluate
|
||||
* their dependency on the tick and restart it if necessary.
|
||||
*/
|
||||
void tick_nohz_full_kick_all(void)
|
||||
{
|
||||
if (!have_nohz_full_mask)
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
smp_call_function_many(nohz_full_mask,
|
||||
nohz_full_kick_ipi, NULL, false);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
int tick_nohz_full_cpu(int cpu)
|
||||
{
|
||||
if (!have_nohz_full_mask)
|
||||
|
|
Loading…
Reference in New Issue
Block a user