forked from luck/tmp_suning_uos_patched
mm/sparse: only sub-section aligned range would be populated
There are two code path which invoke __populate_section_memmap() * sparse_init_nid() * sparse_add_section() For both case, we are sure the memory range is sub-section aligned. * we pass PAGES_PER_SECTION to sparse_init_nid() * we check range by check_pfn_span() before calling sparse_add_section() Also, the counterpart of __populate_section_memmap(), we don't do such calculation and check since the range is checked by check_pfn_span() in __remove_pages(). Clear the calculation and check to keep it simple and comply with its counterpart. Signed-off-by: Wei Yang <richard.weiyang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Acked-by: David Hildenbrand <david@redhat.com> Link: http://lkml.kernel.org/r/20200703031828.14645-1-richard.weiyang@linux.alibaba.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
ef69bc9f68
commit
6cda72047e
|
@ -251,20 +251,12 @@ int __meminit vmemmap_populate_basepages(unsigned long start, unsigned long end,
|
||||||
struct page * __meminit __populate_section_memmap(unsigned long pfn,
|
struct page * __meminit __populate_section_memmap(unsigned long pfn,
|
||||||
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
|
unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
|
||||||
{
|
{
|
||||||
unsigned long start;
|
unsigned long start = (unsigned long) pfn_to_page(pfn);
|
||||||
unsigned long end;
|
unsigned long end = start + nr_pages * sizeof(struct page);
|
||||||
|
|
||||||
/*
|
if (WARN_ON_ONCE(!IS_ALIGNED(pfn, PAGES_PER_SUBSECTION) ||
|
||||||
* The minimum granularity of memmap extensions is
|
!IS_ALIGNED(nr_pages, PAGES_PER_SUBSECTION)))
|
||||||
* PAGES_PER_SUBSECTION as allocations are tracked in the
|
return NULL;
|
||||||
* 'subsection_map' bitmap of the section.
|
|
||||||
*/
|
|
||||||
end = ALIGN(pfn + nr_pages, PAGES_PER_SUBSECTION);
|
|
||||||
pfn &= PAGE_SUBSECTION_MASK;
|
|
||||||
nr_pages = end - pfn;
|
|
||||||
|
|
||||||
start = (unsigned long) pfn_to_page(pfn);
|
|
||||||
end = start + nr_pages * sizeof(struct page);
|
|
||||||
|
|
||||||
if (vmemmap_populate(start, end, nid, altmap))
|
if (vmemmap_populate(start, end, nid, altmap))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user