forked from luck/tmp_suning_uos_patched
Revert "x86/mm: Fix the size calculation of mapping tables"
Commit:722bc6b167
x86/mm: Fix the size calculation of mapping tables Tried to address the issue that the first 2/4M should use 4k pages if PSE enabled, but extra counts should only be valid for x86_32. This commit caused a kdump regression: the kdump kernel hangs. Work is in progress to fundamentally fix the various page table initialization issues that we have, via the design suggested by H. Peter Anvin, but it's not ready yet to be merged. So, to get a working kdump revert to the last known working version, which is the revert of this commit and of a followup fix (which was incomplete):bd2753b2dd
x86/mm: Only add extra pages count for the first memory range during pre-allocation Tested kdump on physical and virtual machines. Signed-off-by: Dave Young <dyoung@redhat.com> Acked-by: Yinghai Lu <yinghai@kernel.org> Acked-by: Cong Wang <xiyou.wangcong@gmail.com> Acked-by: Flavio Leitner <fbl@redhat.com> Tested-by: Flavio Leitner <fbl@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: Flavio Leitner <fbl@redhat.com> Cc: Tejun Heo <tj@kernel.org> Cc: ianfang.cn@gmail.com Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: <stable@kernel.org> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
0e9e3e306c
commit
7b16bbf973
|
@ -29,14 +29,8 @@ int direct_gbpages
|
|||
#endif
|
||||
;
|
||||
|
||||
struct map_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned page_size_mask;
|
||||
};
|
||||
|
||||
static void __init find_early_table_space(struct map_range *mr, unsigned long end,
|
||||
int use_pse, int use_gbpages)
|
||||
static void __init find_early_table_space(unsigned long end, int use_pse,
|
||||
int use_gbpages)
|
||||
{
|
||||
unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
|
||||
phys_addr_t base;
|
||||
|
@ -61,10 +55,6 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
|
|||
#ifdef CONFIG_X86_32
|
||||
extra += PMD_SIZE;
|
||||
#endif
|
||||
/* The first 2/4M doesn't use large pages. */
|
||||
if (mr->start < PMD_SIZE)
|
||||
extra += mr->end - mr->start;
|
||||
|
||||
ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
} else
|
||||
ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
@ -95,6 +85,12 @@ void __init native_pagetable_reserve(u64 start, u64 end)
|
|||
memblock_reserve(start, end - start);
|
||||
}
|
||||
|
||||
struct map_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned page_size_mask;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define NR_RANGE_MR 3
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
@ -267,7 +263,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
|||
* nodes are discovered.
|
||||
*/
|
||||
if (!after_bootmem)
|
||||
find_early_table_space(&mr[0], end, use_pse, use_gbpages);
|
||||
find_early_table_space(end, use_pse, use_gbpages);
|
||||
|
||||
for (i = 0; i < nr_range; i++)
|
||||
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
|
||||
|
|
Loading…
Reference in New Issue
Block a user