forked from luck/tmp_suning_uos_patched
sched: convert nohz_cpu_mask to cpumask_var_t.
Impact: (future) size reduction for large NR_CPUS. Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space for small nr_cpu_ids but big CONFIG_NR_CPUS. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6c99e9ad47
commit
6a7b3dc344
|
@ -249,7 +249,7 @@ extern void init_idle_bootup_task(struct task_struct *idle);
|
||||||
extern int runqueue_is_locked(void);
|
extern int runqueue_is_locked(void);
|
||||||
extern void task_rq_unlock_wait(struct task_struct *p);
|
extern void task_rq_unlock_wait(struct task_struct *p);
|
||||||
|
|
||||||
extern cpumask_t nohz_cpu_mask;
|
extern cpumask_var_t nohz_cpu_mask;
|
||||||
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
|
||||||
extern int select_nohz_load_balancer(int cpu);
|
extern int select_nohz_load_balancer(int cpu);
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -393,7 +393,7 @@ static void rcu_start_batch(struct rcu_ctrlblk *rcp)
|
||||||
* unnecessarily.
|
* unnecessarily.
|
||||||
*/
|
*/
|
||||||
smp_mb();
|
smp_mb();
|
||||||
cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
|
cpumask_andnot(&rcp->cpumask, cpu_online_mask, nohz_cpu_mask);
|
||||||
|
|
||||||
rcp->signaled = 0;
|
rcp->signaled = 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5870,9 +5870,9 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||||
* indicates which cpus entered this state. This is used
|
* indicates which cpus entered this state. This is used
|
||||||
* in the rcu update to wait only for active cpus. For system
|
* in the rcu update to wait only for active cpus. For system
|
||||||
* which do not switch off the HZ timer nohz_cpu_mask should
|
* which do not switch off the HZ timer nohz_cpu_mask should
|
||||||
* always be CPU_MASK_NONE.
|
* always be CPU_BITS_NONE.
|
||||||
*/
|
*/
|
||||||
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
|
cpumask_var_t nohz_cpu_mask;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Increase the granularity value when there are more CPUs,
|
* Increase the granularity value when there are more CPUs,
|
||||||
|
@ -8274,6 +8274,9 @@ void __init sched_init(void)
|
||||||
*/
|
*/
|
||||||
current->sched_class = &fair_sched_class;
|
current->sched_class = &fair_sched_class;
|
||||||
|
|
||||||
|
/* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
|
||||||
|
alloc_bootmem_cpumask_var(&nohz_cpu_mask);
|
||||||
|
|
||||||
scheduler_running = 1;
|
scheduler_running = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -144,7 +144,7 @@ void tick_nohz_update_jiffies(void)
|
||||||
if (!ts->tick_stopped)
|
if (!ts->tick_stopped)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cpu_clear(cpu, nohz_cpu_mask);
|
cpumask_clear_cpu(cpu, nohz_cpu_mask);
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
ts->idle_waketime = now;
|
ts->idle_waketime = now;
|
||||||
|
|
||||||
|
@ -283,7 +283,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||||
if ((long)delta_jiffies >= 1) {
|
if ((long)delta_jiffies >= 1) {
|
||||||
|
|
||||||
if (delta_jiffies > 1)
|
if (delta_jiffies > 1)
|
||||||
cpu_set(cpu, nohz_cpu_mask);
|
cpumask_set_cpu(cpu, nohz_cpu_mask);
|
||||||
/*
|
/*
|
||||||
* nohz_stop_sched_tick can be called several times before
|
* nohz_stop_sched_tick can be called several times before
|
||||||
* the nohz_restart_sched_tick is called. This happens when
|
* the nohz_restart_sched_tick is called. This happens when
|
||||||
|
@ -296,7 +296,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||||
/*
|
/*
|
||||||
* sched tick not stopped!
|
* sched tick not stopped!
|
||||||
*/
|
*/
|
||||||
cpu_clear(cpu, nohz_cpu_mask);
|
cpumask_clear_cpu(cpu, nohz_cpu_mask);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,7 +354,7 @@ void tick_nohz_stop_sched_tick(int inidle)
|
||||||
* softirq.
|
* softirq.
|
||||||
*/
|
*/
|
||||||
tick_do_update_jiffies64(ktime_get());
|
tick_do_update_jiffies64(ktime_get());
|
||||||
cpu_clear(cpu, nohz_cpu_mask);
|
cpumask_clear_cpu(cpu, nohz_cpu_mask);
|
||||||
}
|
}
|
||||||
raise_softirq_irqoff(TIMER_SOFTIRQ);
|
raise_softirq_irqoff(TIMER_SOFTIRQ);
|
||||||
out:
|
out:
|
||||||
|
@ -432,7 +432,7 @@ void tick_nohz_restart_sched_tick(void)
|
||||||
select_nohz_load_balancer(0);
|
select_nohz_load_balancer(0);
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
tick_do_update_jiffies64(now);
|
tick_do_update_jiffies64(now);
|
||||||
cpu_clear(cpu, nohz_cpu_mask);
|
cpumask_clear_cpu(cpu, nohz_cpu_mask);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We stopped the tick in idle. Update process times would miss the
|
* We stopped the tick in idle. Update process times would miss the
|
||||||
|
|
Loading…
Reference in New Issue
Block a user