forked from luck/tmp_suning_uos_patched
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: cputime fixes, two deadline scheduler fixes and a cgroups scheduling fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/cputime: Fix omitted ticks passed in parameter sched/cputime: Fix steal time accounting sched/deadline: Fix lock pinning warning during CPU hotplug sched/cputime: Mitigate performance regression in times()/clock_gettime() sched/fair: Fix typo in sync_throttle() sched/deadline: Fix wrap-around in DL heap
This commit is contained in:
commit
e6e7214fbb
@ -74,6 +74,7 @@
|
|||||||
#include <linux/context_tracking.h>
|
#include <linux/context_tracking.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/frame.h>
|
#include <linux/frame.h>
|
||||||
|
#include <linux/prefetch.h>
|
||||||
|
|
||||||
#include <asm/switch_to.h>
|
#include <asm/switch_to.h>
|
||||||
#include <asm/tlb.h>
|
#include <asm/tlb.h>
|
||||||
@ -2971,6 +2972,23 @@ DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
|
|||||||
EXPORT_PER_CPU_SYMBOL(kstat);
|
EXPORT_PER_CPU_SYMBOL(kstat);
|
||||||
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The function fair_sched_class.update_curr accesses the struct curr
|
||||||
|
* and its field curr->exec_start; when called from task_sched_runtime(),
|
||||||
|
* we observe a high rate of cache misses in practice.
|
||||||
|
* Prefetching this data results in improved performance.
|
||||||
|
*/
|
||||||
|
static inline void prefetch_curr_exec_start(struct task_struct *p)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
struct sched_entity *curr = (&p->se)->cfs_rq->curr;
|
||||||
|
#else
|
||||||
|
struct sched_entity *curr = (&task_rq(p)->cfs)->curr;
|
||||||
|
#endif
|
||||||
|
prefetch(curr);
|
||||||
|
prefetch(&curr->exec_start);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return accounted runtime for the task.
|
* Return accounted runtime for the task.
|
||||||
* In case the task is currently running, return the runtime plus current's
|
* In case the task is currently running, return the runtime plus current's
|
||||||
@ -3005,6 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
|
|||||||
* thread, breaking clock_gettime().
|
* thread, breaking clock_gettime().
|
||||||
*/
|
*/
|
||||||
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
if (task_current(rq, p) && task_on_rq_queued(p)) {
|
||||||
|
prefetch_curr_exec_start(p);
|
||||||
update_rq_clock(rq);
|
update_rq_clock(rq);
|
||||||
p->sched_class->update_curr(rq);
|
p->sched_class->update_curr(rq);
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
|
|||||||
|
|
||||||
if (old_idx == IDX_INVALID) {
|
if (old_idx == IDX_INVALID) {
|
||||||
cp->size++;
|
cp->size++;
|
||||||
cp->elements[cp->size - 1].dl = 0;
|
cp->elements[cp->size - 1].dl = dl;
|
||||||
cp->elements[cp->size - 1].cpu = cpu;
|
cp->elements[cp->size - 1].cpu = cpu;
|
||||||
cp->elements[cpu].idx = cp->size - 1;
|
cp->elements[cpu].idx = cp->size - 1;
|
||||||
cpudl_change_key(cp, cp->size - 1, dl);
|
cpudl_change_key(cp, cp->size - 1, dl);
|
||||||
|
@ -508,13 +508,21 @@ void account_process_tick(struct task_struct *p, int user_tick)
|
|||||||
*/
|
*/
|
||||||
void account_idle_ticks(unsigned long ticks)
|
void account_idle_ticks(unsigned long ticks)
|
||||||
{
|
{
|
||||||
|
cputime_t cputime, steal;
|
||||||
|
|
||||||
if (sched_clock_irqtime) {
|
if (sched_clock_irqtime) {
|
||||||
irqtime_account_idle_ticks(ticks);
|
irqtime_account_idle_ticks(ticks);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
account_idle_time(jiffies_to_cputime(ticks));
|
cputime = jiffies_to_cputime(ticks);
|
||||||
|
steal = steal_account_process_time(cputime);
|
||||||
|
|
||||||
|
if (steal >= cputime)
|
||||||
|
return;
|
||||||
|
|
||||||
|
cputime -= steal;
|
||||||
|
account_idle_time(cputime);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -658,8 +658,11 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
|
|||||||
*
|
*
|
||||||
* XXX figure out if select_task_rq_dl() deals with offline cpus.
|
* XXX figure out if select_task_rq_dl() deals with offline cpus.
|
||||||
*/
|
*/
|
||||||
if (unlikely(!rq->online))
|
if (unlikely(!rq->online)) {
|
||||||
|
lockdep_unpin_lock(&rq->lock, rf.cookie);
|
||||||
rq = dl_task_offline_migration(rq, p);
|
rq = dl_task_offline_migration(rq, p);
|
||||||
|
rf.cookie = lockdep_pin_lock(&rq->lock);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Queueing this task back might have overloaded rq, check if we need
|
* Queueing this task back might have overloaded rq, check if we need
|
||||||
|
@ -4269,7 +4269,7 @@ static void sync_throttle(struct task_group *tg, int cpu)
|
|||||||
pcfs_rq = tg->parent->cfs_rq[cpu];
|
pcfs_rq = tg->parent->cfs_rq[cpu];
|
||||||
|
|
||||||
cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
cfs_rq->throttle_count = pcfs_rq->throttle_count;
|
||||||
pcfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
|
cfs_rq->throttled_clock_task = rq_clock_task(cpu_rq(cpu));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
/* conditionally throttle active cfs_rq's from put_prev_entity() */
|
||||||
|
Loading…
Reference in New Issue
Block a user