forked from luck/tmp_suning_uos_patched
mm: numa: Scan pages with elevated page_mapcount
Currently automatic NUMA balancing is unable to distinguish between false shared versus private pages except by ignoring pages with an elevated page_mapcount entirely. This avoids shared pages bouncing between the nodes whose task is using them but that is ignored quite a lot of data. This patch kicks away the training wheels in preparation for adding support for identifying shared/private pages is now in place. The ordering is so that the impact of the shared/private detection can be easily measured. Note that the patch does not migrate shared, file-backed within vmas marked VM_EXEC as these are generally shared library pages. Migrating such pages is not beneficial as there is an expectation they are read-shared between caches and iTLB and iCache pressure is generally low. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-28-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
9ff1d9ff3c
commit
1bc115d87d
|
@ -90,11 +90,12 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
|
|||
#endif /* CONFIG_MIGRATION */
|
||||
|
||||
#ifdef CONFIG_NUMA_BALANCING
|
||||
extern int migrate_misplaced_page(struct page *page, int node);
|
||||
extern int migrate_misplaced_page(struct page *page, int node);
|
||||
extern int migrate_misplaced_page(struct page *page,
|
||||
struct vm_area_struct *vma, int node);
|
||||
extern bool migrate_ratelimited(int node);
|
||||
#else
|
||||
static inline int migrate_misplaced_page(struct page *page, int node)
|
||||
static inline int migrate_misplaced_page(struct page *page,
|
||||
struct vm_area_struct *vma, int node)
|
||||
{
|
||||
return -EAGAIN; /* can't migrate now */
|
||||
}
|
||||
|
|
|
@ -1484,14 +1484,12 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
struct page *page = pmd_page(*pmd);
|
||||
|
||||
/*
|
||||
* Only check non-shared pages. Do not trap faults
|
||||
* against the zero page. The read-only data is likely
|
||||
* to be read-cached on the local CPU cache and it is
|
||||
* less useful to know about local vs remote hits on
|
||||
* the zero page.
|
||||
* Do not trap faults against the zero page. The
|
||||
* read-only data is likely to be read-cached on the
|
||||
* local CPU cache and it is less useful to know about
|
||||
* local vs remote hits on the zero page.
|
||||
*/
|
||||
if (page_mapcount(page) == 1 &&
|
||||
!is_huge_zero_page(page) &&
|
||||
if (!is_huge_zero_page(page) &&
|
||||
!pmd_numa(*pmd)) {
|
||||
entry = pmdp_get_and_clear(mm, addr, pmd);
|
||||
entry = pmd_mknuma(entry);
|
||||
|
|
|
@ -3577,7 +3577,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
}
|
||||
|
||||
/* Migrate to the requested node */
|
||||
migrated = migrate_misplaced_page(page, target_nid);
|
||||
migrated = migrate_misplaced_page(page, vma, target_nid);
|
||||
if (migrated)
|
||||
page_nid = target_nid;
|
||||
|
||||
|
@ -3642,16 +3642,13 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
page = vm_normal_page(vma, addr, pteval);
|
||||
if (unlikely(!page))
|
||||
continue;
|
||||
/* only check non-shared pages */
|
||||
if (unlikely(page_mapcount(page) != 1))
|
||||
continue;
|
||||
|
||||
last_nid = page_nid_last(page);
|
||||
page_nid = page_to_nid(page);
|
||||
target_nid = numa_migrate_prep(page, vma, addr, page_nid);
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
if (target_nid != -1) {
|
||||
migrated = migrate_misplaced_page(page, target_nid);
|
||||
migrated = migrate_misplaced_page(page, vma, target_nid);
|
||||
if (migrated)
|
||||
page_nid = target_nid;
|
||||
} else {
|
||||
|
|
17
mm/migrate.c
17
mm/migrate.c
|
@ -1599,7 +1599,8 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
|
|||
* node. Caller is expected to have an elevated reference count on
|
||||
* the page that will be dropped by this function before returning.
|
||||
*/
|
||||
int migrate_misplaced_page(struct page *page, int node)
|
||||
int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
|
||||
int node)
|
||||
{
|
||||
pg_data_t *pgdat = NODE_DATA(node);
|
||||
int isolated;
|
||||
|
@ -1607,10 +1608,11 @@ int migrate_misplaced_page(struct page *page, int node)
|
|||
LIST_HEAD(migratepages);
|
||||
|
||||
/*
|
||||
* Don't migrate pages that are mapped in multiple processes.
|
||||
* TODO: Handle false sharing detection instead of this hammer
|
||||
* Don't migrate file pages that are mapped in multiple processes
|
||||
* with execute permissions as they are probably shared libraries.
|
||||
*/
|
||||
if (page_mapcount(page) != 1)
|
||||
if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
|
||||
(vma->vm_flags & VM_EXEC))
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
@ -1660,13 +1662,6 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
|
|||
struct mem_cgroup *memcg = NULL;
|
||||
int page_lru = page_is_file_cache(page);
|
||||
|
||||
/*
|
||||
* Don't migrate pages that are mapped in multiple processes.
|
||||
* TODO: Handle false sharing detection instead of this hammer
|
||||
*/
|
||||
if (page_mapcount(page) != 1)
|
||||
goto out_dropref;
|
||||
|
||||
/*
|
||||
* Rate-limit the amount of data that is being migrated to a node.
|
||||
* Optimal placement is no good if the memory bus is saturated and
|
||||
|
|
|
@ -69,9 +69,7 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
|
|||
if (last_nid != this_nid)
|
||||
all_same_node = false;
|
||||
|
||||
/* only check non-shared pages */
|
||||
if (!pte_numa(oldpte) &&
|
||||
page_mapcount(page) == 1) {
|
||||
if (!pte_numa(oldpte)) {
|
||||
ptent = pte_mknuma(ptent);
|
||||
updated = true;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user