forked from luck/tmp_suning_uos_patched
mm, page_alloc: fix dirtyable highmem calculation
When I tested vmscale in mmtest in 32bit, I found the benchmark was slow down 0.5 times. base node 1 global-1 User 12.98 16.04 System 147.61 166.42 Elapsed 26.48 38.08 With vmstat, I found IO wait avg is much increased compared to base. The reason was highmem_dirtyable_memory accumulates free pages and highmem_file_pages from HIGHMEM to MOVABLE zones which was wrong. With that, dirth_thresh in throtlle_vm_write is always 0 so that it calls congestion_wait frequently if writeback starts. With this patch, it is much recovered. base node fi 1 global-1 fix User 12.98 16.04 13.78 System 147.61 166.42 143.92 Elapsed 26.48 38.08 29.64 Link: http://lkml.kernel.org/r/1468404004-5085-4-git-send-email-mgorman@techsingularity.net Signed-off-by: Minchan Kim <minchan@kernel.org> Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bca6759258
commit
9cb937e219
|
@ -307,27 +307,31 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
|
|||
{
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
int node;
|
||||
unsigned long x = 0;
|
||||
unsigned long x;
|
||||
int i;
|
||||
unsigned long dirtyable = atomic_read(&highmem_file_pages);
|
||||
unsigned long dirtyable = 0;
|
||||
|
||||
for_each_node_state(node, N_HIGH_MEMORY) {
|
||||
for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) {
|
||||
struct zone *z;
|
||||
unsigned long nr_pages;
|
||||
|
||||
if (!is_highmem_idx(i))
|
||||
continue;
|
||||
|
||||
z = &NODE_DATA(node)->node_zones[i];
|
||||
dirtyable += zone_page_state(z, NR_FREE_PAGES);
|
||||
if (!populated_zone(z))
|
||||
continue;
|
||||
|
||||
nr_pages = zone_page_state(z, NR_FREE_PAGES);
|
||||
/* watch for underflows */
|
||||
dirtyable -= min(dirtyable, high_wmark_pages(z));
|
||||
|
||||
x += dirtyable;
|
||||
nr_pages -= min(nr_pages, high_wmark_pages(z));
|
||||
dirtyable += nr_pages;
|
||||
}
|
||||
}
|
||||
|
||||
x = dirtyable + atomic_read(&highmem_file_pages);
|
||||
|
||||
/*
|
||||
* Unreclaimable memory (kernel memory or anonymous memory
|
||||
* without swap) can bring down the dirtyable pages below
|
||||
|
|
Loading…
Reference in New Issue
Block a user