forked from luck/tmp_suning_uos_patched
revert "Revert "mm: remove __GFP_NO_KSWAPD""
It apepars that this patch was innocent, and we hope that "mm: avoid waking kswapd for THP allocations when compaction is deferred or contended" will fix the final kswapd-spinning cause. Cc: Zdenek Kabelac <zkabelac@redhat.com> Cc: Seth Jennings <sjenning@linux.vnet.ibm.com> Cc: Valdis Kletnieks <Valdis.Kletnieks@vt.edu> Cc: Jiri Slaby <jirislaby@gmail.com> Cc: Rik van Riel <riel@redhat.com> Cc: Robert Jennings <rcj@linux.vnet.ibm.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
60cefed485
commit
a50915394f
|
@ -1077,8 +1077,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
|
|||
* until the request succeeds or until the allocation size falls below
|
||||
* the system page size. This attempts to make sure it does not adversely
|
||||
* impact system performance, so when allocating more than one page, we
|
||||
* ask the memory allocator to avoid re-trying, swapping, writing back
|
||||
* or performing I/O.
|
||||
* ask the memory allocator to avoid re-trying.
|
||||
*
|
||||
* Note, this function also makes sure that the allocated buffer is aligned to
|
||||
* the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
|
||||
|
@ -1092,8 +1091,7 @@ EXPORT_SYMBOL_GPL(mtd_writev);
|
|||
*/
|
||||
void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
|
||||
{
|
||||
gfp_t flags = __GFP_NOWARN | __GFP_WAIT |
|
||||
__GFP_NORETRY | __GFP_NO_KSWAPD;
|
||||
gfp_t flags = __GFP_NOWARN | __GFP_WAIT | __GFP_NORETRY;
|
||||
size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
|
||||
void *kbuf;
|
||||
|
||||
|
|
|
@ -30,10 +30,9 @@ struct vm_area_struct;
|
|||
#define ___GFP_HARDWALL 0x20000u
|
||||
#define ___GFP_THISNODE 0x40000u
|
||||
#define ___GFP_RECLAIMABLE 0x80000u
|
||||
#define ___GFP_NOTRACK 0x200000u
|
||||
#define ___GFP_NO_KSWAPD 0x400000u
|
||||
#define ___GFP_OTHER_NODE 0x800000u
|
||||
#define ___GFP_WRITE 0x1000000u
|
||||
#define ___GFP_NOTRACK 0x100000u
|
||||
#define ___GFP_OTHER_NODE 0x200000u
|
||||
#define ___GFP_WRITE 0x400000u
|
||||
|
||||
/*
|
||||
* GFP bitmasks..
|
||||
|
@ -86,7 +85,6 @@ struct vm_area_struct;
|
|||
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
|
||||
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK) /* Don't track with kmemcheck */
|
||||
|
||||
#define __GFP_NO_KSWAPD ((__force gfp_t)___GFP_NO_KSWAPD)
|
||||
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
|
||||
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
|
||||
|
||||
|
@ -96,7 +94,7 @@ struct vm_area_struct;
|
|||
*/
|
||||
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
|
||||
|
||||
#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_SHIFT 23 /* Room for N __GFP_FOO bits */
|
||||
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
|
||||
|
||||
/* This equals 0, but use constants in case they ever change */
|
||||
|
@ -116,8 +114,7 @@ struct vm_area_struct;
|
|||
__GFP_MOVABLE)
|
||||
#define GFP_IOFS (__GFP_IO | __GFP_FS)
|
||||
#define GFP_TRANSHUGE (GFP_HIGHUSER_MOVABLE | __GFP_COMP | \
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
|
||||
__GFP_NO_KSWAPD)
|
||||
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
{(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
|
||||
{(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"}, \
|
||||
{(unsigned long)__GFP_NOTRACK, "GFP_NOTRACK"}, \
|
||||
{(unsigned long)__GFP_NO_KSWAPD, "GFP_NO_KSWAPD"}, \
|
||||
{(unsigned long)__GFP_OTHER_NODE, "GFP_OTHER_NODE"} \
|
||||
) : "GFP_NOWAIT"
|
||||
|
||||
|
|
|
@ -2416,9 +2416,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||
goto nopage;
|
||||
|
||||
restart:
|
||||
if (!(gfp_mask & __GFP_NO_KSWAPD))
|
||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||
zone_idx(preferred_zone));
|
||||
wake_all_kswapd(order, zonelist, high_zoneidx,
|
||||
zone_idx(preferred_zone));
|
||||
|
||||
/*
|
||||
* OK, we're below the kswapd watermark and have kicked background
|
||||
|
@ -2495,7 +2494,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
|||
* system then fail the allocation instead of entering direct reclaim.
|
||||
*/
|
||||
if ((deferred_compaction || contended_compaction) &&
|
||||
(gfp_mask & __GFP_NO_KSWAPD))
|
||||
(gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
|
||||
goto nopage;
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
|
|
Loading…
Reference in New Issue
Block a user