forked from luck/tmp_suning_uos_patched
mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint
commit ce8f86ee94fabcc98537ddccd7e82cfd360a4dc5 upstream. The trace point *trace_mm_page_alloc_zone_locked()* in __rmqueue() does not currently cover all branches. Add the missing tracepoint and check the page before do that. [akpm@linux-foundation.org: use IS_ENABLED() to suppress warning] Link: https://lkml.kernel.org/r/20201228132901.41523-1-carver4lio@163.com Signed-off-by: Hailong liu <liu.hailong6@zte.com.cn> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Ivan Babrou <ivan@cloudflare.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
c6fd968f58
commit
c11f7749f1
|
@ -2846,20 +2846,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
|||
{
|
||||
struct page *page;
|
||||
|
||||
#ifdef CONFIG_CMA
|
||||
/*
|
||||
* Balance movable allocations between regular and CMA areas by
|
||||
* allocating from CMA when over half of the zone's free memory
|
||||
* is in the CMA area.
|
||||
*/
|
||||
if (alloc_flags & ALLOC_CMA &&
|
||||
zone_page_state(zone, NR_FREE_CMA_PAGES) >
|
||||
zone_page_state(zone, NR_FREE_PAGES) / 2) {
|
||||
page = __rmqueue_cma_fallback(zone, order);
|
||||
if (page)
|
||||
return page;
|
||||
if (IS_ENABLED(CONFIG_CMA)) {
|
||||
/*
|
||||
* Balance movable allocations between regular and CMA areas by
|
||||
* allocating from CMA when over half of the zone's free memory
|
||||
* is in the CMA area.
|
||||
*/
|
||||
if (alloc_flags & ALLOC_CMA &&
|
||||
zone_page_state(zone, NR_FREE_CMA_PAGES) >
|
||||
zone_page_state(zone, NR_FREE_PAGES) / 2) {
|
||||
page = __rmqueue_cma_fallback(zone, order);
|
||||
if (page)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
retry:
|
||||
page = __rmqueue_smallest(zone, order, migratetype);
|
||||
if (unlikely(!page)) {
|
||||
|
@ -2870,8 +2870,9 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
|
|||
alloc_flags))
|
||||
goto retry;
|
||||
}
|
||||
|
||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
||||
out:
|
||||
if (page)
|
||||
trace_mm_page_alloc_zone_locked(page, order, migratetype);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user