forked from luck/tmp_suning_uos_patched
x86: remove some alloc_bootmem_cpumask_var calling
Now that we set up the slab allocator earlier, we can get rid of some alloc_bootmem_cpumask_var() calls in boot code. Cc: Ingo Molnar <mingo@elte.hu> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Yinghai Lu <yinghai@kernel.org> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
This commit is contained in:
parent
a5f4f52e82
commit
38c7fed2f5
|
@ -185,8 +185,8 @@ int __init arch_early_irq_init(void)
|
|||
for (i = 0; i < count; i++) {
|
||||
desc = irq_to_desc(i);
|
||||
desc->chip_data = &cfg[i];
|
||||
alloc_bootmem_cpumask_var(&cfg[i].domain);
|
||||
alloc_bootmem_cpumask_var(&cfg[i].old_domain);
|
||||
alloc_cpumask_var(&cfg[i].domain, GFP_NOWAIT);
|
||||
alloc_cpumask_var(&cfg[i].old_domain, GFP_NOWAIT);
|
||||
if (i < NR_IRQS_LEGACY)
|
||||
cpumask_setall(cfg[i].domain);
|
||||
}
|
||||
|
|
|
@ -430,23 +430,19 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
|
|||
* Returns true if successful (or not required).
|
||||
*/
|
||||
static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
|
||||
bool boot)
|
||||
bool boot)
|
||||
{
|
||||
gfp_t gfp = GFP_ATOMIC;
|
||||
|
||||
if (boot)
|
||||
gfp = GFP_NOWAIT;
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
if (boot) {
|
||||
alloc_bootmem_cpumask_var(&desc->affinity);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
alloc_bootmem_cpumask_var(&desc->pending_mask);
|
||||
#endif
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
|
||||
if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
|
||||
return false;
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
|
||||
if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
|
||||
free_cpumask_var(desc->affinity);
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1857,7 +1857,7 @@ struct cgroup_subsys cpuset_subsys = {
|
|||
|
||||
int __init cpuset_init_early(void)
|
||||
{
|
||||
alloc_bootmem_cpumask_var(&top_cpuset.cpus_allowed);
|
||||
alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_NOWAIT);
|
||||
|
||||
top_cpuset.mems_generation = cpuset_mems_generation++;
|
||||
return 0;
|
||||
|
|
|
@ -111,12 +111,6 @@ int __ref profile_init(void)
|
|||
/* only text is profiled */
|
||||
prof_len = (_etext - _stext) >> prof_shift;
|
||||
buffer_bytes = prof_len*sizeof(atomic_t);
|
||||
if (!slab_is_available()) {
|
||||
prof_buffer = alloc_bootmem(buffer_bytes);
|
||||
alloc_bootmem_cpumask_var(&prof_cpu_mask);
|
||||
cpumask_copy(prof_cpu_mask, cpu_possible_mask);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var(&prof_cpu_mask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -92,15 +92,8 @@ int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
|
|||
*/
|
||||
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
|
||||
{
|
||||
if (likely(slab_is_available()))
|
||||
*mask = kmalloc_node(cpumask_size(), flags, node);
|
||||
else {
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
printk(KERN_ERR
|
||||
"=> alloc_cpumask_var: kmalloc not available!\n");
|
||||
#endif
|
||||
*mask = NULL;
|
||||
}
|
||||
*mask = kmalloc_node(cpumask_size(), flags, node);
|
||||
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
if (!*mask) {
|
||||
printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
|
||||
|
|
Loading…
Reference in New Issue
Block a user