forked from luck/tmp_suning_uos_patched
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Three CPU hotplug related fixes and a debugging improvement" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/debug: Add debugfs knob for "sched_debug" sched/core: WARN() when migrating to an offline CPU sched/fair: Plug hole between hotplug and active_load_balance() sched/fair: Avoid newidle balance for !active CPUs
This commit is contained in:
commit
ec846ecd63
|
@ -1173,6 +1173,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
|
|||
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
|
||||
lockdep_is_held(&task_rq(p)->lock)));
|
||||
#endif
|
||||
/*
|
||||
* Clearly, migrating tasks to offline CPUs is a fairly daft thing.
|
||||
*/
|
||||
WARN_ON_ONCE(!cpu_online(new_cpu));
|
||||
#endif
|
||||
|
||||
trace_sched_migrate_task(p, new_cpu);
|
||||
|
|
|
@ -181,11 +181,16 @@ static const struct file_operations sched_feat_fops = {
|
|||
.release = single_release,
|
||||
};
|
||||
|
||||
__read_mostly bool sched_debug_enabled;
|
||||
|
||||
static __init int sched_init_debug(void)
|
||||
{
|
||||
debugfs_create_file("sched_features", 0644, NULL, NULL,
|
||||
&sched_feat_fops);
|
||||
|
||||
debugfs_create_bool("sched_debug", 0644, NULL,
|
||||
&sched_debug_enabled);
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(sched_init_debug);
|
||||
|
|
|
@ -8436,6 +8436,12 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
|
|||
*/
|
||||
this_rq->idle_stamp = rq_clock(this_rq);
|
||||
|
||||
/*
|
||||
* Do not pull tasks towards !active CPUs...
|
||||
*/
|
||||
if (!cpu_active(this_cpu))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* This is OK, because current is on_cpu, which avoids it being picked
|
||||
* for load-balance and preemption/IRQs are still disabled avoiding
|
||||
|
@ -8543,6 +8549,13 @@ static int active_load_balance_cpu_stop(void *data)
|
|||
struct rq_flags rf;
|
||||
|
||||
rq_lock_irq(busiest_rq, &rf);
|
||||
/*
|
||||
* Between queueing the stop-work and running it is a hole in which
|
||||
* CPUs can become inactive. We should not move tasks from or to
|
||||
* inactive CPUs.
|
||||
*/
|
||||
if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
|
||||
goto out_unlock;
|
||||
|
||||
/* make sure the requested cpu hasn't gone down in the meantime */
|
||||
if (unlikely(busiest_cpu != smp_processor_id() ||
|
||||
|
|
|
@ -1951,6 +1951,8 @@ extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
|
|||
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);
|
||||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
extern bool sched_debug_enabled;
|
||||
|
||||
extern void print_cfs_stats(struct seq_file *m, int cpu);
|
||||
extern void print_rt_stats(struct seq_file *m, int cpu);
|
||||
extern void print_dl_stats(struct seq_file *m, int cpu);
|
||||
|
|
|
@ -14,11 +14,9 @@ cpumask_var_t sched_domains_tmpmask2;
|
|||
|
||||
#ifdef CONFIG_SCHED_DEBUG
|
||||
|
||||
static __read_mostly int sched_debug_enabled;
|
||||
|
||||
static int __init sched_debug_setup(char *str)
|
||||
{
|
||||
sched_debug_enabled = 1;
|
||||
sched_debug_enabled = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user