forked from luck/tmp_suning_uos_patched
mm/memory_hotplug: we always have a zone in find_(smallest|biggest)_section_pfn
With shrink_pgdat_span() out of the way, we now always have a valid zone. Link: http://lkml.kernel.org/r/20191006085646.5768-8-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Michal Hocko <mhocko@suse.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> Cc: Pavel Tatashin <pasha.tatashin@soleen.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Logan Gunthorpe <logang@deltatee.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Pankaj Gupta <pagupta@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
d33695b16a
commit
9b05158f5d
|
@ -355,7 +355,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
|
||||||
if (unlikely(pfn_to_nid(start_pfn) != nid))
|
if (unlikely(pfn_to_nid(start_pfn) != nid))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (zone && zone != page_zone(pfn_to_page(start_pfn)))
|
if (zone != page_zone(pfn_to_page(start_pfn)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
return start_pfn;
|
return start_pfn;
|
||||||
|
@ -380,7 +380,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
|
||||||
if (unlikely(pfn_to_nid(pfn) != nid))
|
if (unlikely(pfn_to_nid(pfn) != nid))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (zone && zone != page_zone(pfn_to_page(pfn)))
|
if (zone != page_zone(pfn_to_page(pfn)))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
return pfn;
|
return pfn;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user