forked from luck/tmp_suning_uos_patched
powerpc: query dynamic DEBUG_PAGEALLOC setting
We can disable debug_pagealloc processing even if the code is compiled with CONFIG_DEBUG_PAGEALLOC. This patch changes the code to query whether it is enabled or not in runtime. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
505f6d22db
commit
e7df0d88c4
|
@ -203,9 +203,8 @@ static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
|
|||
#ifdef CONFIG_SMP
|
||||
printk("SMP NR_CPUS=%d ", NR_CPUS);
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
printk("DEBUG_PAGEALLOC ");
|
||||
#endif
|
||||
if (debug_pagealloc_enabled())
|
||||
printk("DEBUG_PAGEALLOC ");
|
||||
#ifdef CONFIG_NUMA
|
||||
printk("NUMA ");
|
||||
#endif
|
||||
|
|
|
@ -255,8 +255,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
|
|||
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
if ((paddr >> PAGE_SHIFT) < linear_map_hash_count)
|
||||
if (debug_pagealloc_enabled() &&
|
||||
(paddr >> PAGE_SHIFT) < linear_map_hash_count)
|
||||
linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80;
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
}
|
||||
|
@ -512,17 +514,17 @@ static void __init htab_init_page_sizes(void)
|
|||
if (mmu_has_feature(MMU_FTR_16M_PAGE))
|
||||
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
|
||||
sizeof(mmu_psize_defaults_gp));
|
||||
found:
|
||||
#ifndef CONFIG_DEBUG_PAGEALLOC
|
||||
/*
|
||||
* Pick a size for the linear mapping. Currently, we only support
|
||||
* 16M, 1M and 4K which is the default
|
||||
*/
|
||||
if (mmu_psize_defs[MMU_PAGE_16M].shift)
|
||||
mmu_linear_psize = MMU_PAGE_16M;
|
||||
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
|
||||
mmu_linear_psize = MMU_PAGE_1M;
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
found:
|
||||
if (!debug_pagealloc_enabled()) {
|
||||
/*
|
||||
* Pick a size for the linear mapping. Currently, we only
|
||||
* support 16M, 1M and 4K which is the default
|
||||
*/
|
||||
if (mmu_psize_defs[MMU_PAGE_16M].shift)
|
||||
mmu_linear_psize = MMU_PAGE_16M;
|
||||
else if (mmu_psize_defs[MMU_PAGE_1M].shift)
|
||||
mmu_linear_psize = MMU_PAGE_1M;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
/*
|
||||
|
@ -721,10 +723,12 @@ static void __init htab_initialize(void)
|
|||
prot = pgprot_val(PAGE_KERNEL);
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
|
||||
1, ppc64_rma_size));
|
||||
memset(linear_map_hash_slots, 0, linear_map_hash_count);
|
||||
if (debug_pagealloc_enabled()) {
|
||||
linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
|
||||
linear_map_hash_slots = __va(memblock_alloc_base(
|
||||
linear_map_hash_count, 1, ppc64_rma_size));
|
||||
memset(linear_map_hash_slots, 0, linear_map_hash_count);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
/* On U3 based machines, we need to reserve the DART area and
|
||||
|
|
|
@ -112,10 +112,10 @@ void __init MMU_setup(void)
|
|||
if (strstr(boot_command_line, "noltlbs")) {
|
||||
__map_without_ltlbs = 1;
|
||||
}
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
__map_without_bats = 1;
|
||||
__map_without_ltlbs = 1;
|
||||
#endif
|
||||
if (debug_pagealloc_enabled()) {
|
||||
__map_without_bats = 1;
|
||||
__map_without_ltlbs = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user