mm: cleanup: remove #ifdef CONFIG_MIGRATION
#ifdef in *.c file decrease source readability a bit. removing is better. This patch doesn't have any functional change. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
1b0bd11886
commit
64cdd548ff
|
@ -7,6 +7,8 @@
|
|||
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
|
||||
|
||||
#ifdef CONFIG_MIGRATION
|
||||
#define PAGE_MIGRATION 1
|
||||
|
||||
extern int putback_lru_pages(struct list_head *l);
|
||||
extern int migrate_page(struct address_space *,
|
||||
struct page *, struct page *);
|
||||
|
@ -20,6 +22,8 @@ extern int migrate_vmas(struct mm_struct *mm,
|
|||
const nodemask_t *from, const nodemask_t *to,
|
||||
unsigned long flags);
|
||||
#else
|
||||
#define PAGE_MIGRATION 0
|
||||
|
||||
static inline int putback_lru_pages(struct list_head *l) { return 0; }
|
||||
static inline int migrate_pages(struct list_head *l, new_page_t x,
|
||||
unsigned long private) { return -ENOSYS; }
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <linux/swapops.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/migrate.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -59,8 +60,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||
ptent = pte_mkwrite(ptent);
|
||||
|
||||
ptep_modify_prot_commit(mm, addr, pte, ptent);
|
||||
#ifdef CONFIG_MIGRATION
|
||||
} else if (!pte_file(oldpte)) {
|
||||
} else if (PAGE_MIGRATION && !pte_file(oldpte)) {
|
||||
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
||||
|
||||
if (is_write_migration_entry(entry)) {
|
||||
|
@ -72,9 +72,7 @@ static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||
set_pte_at(mm, addr, pte,
|
||||
swp_entry_to_pte(entry));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
arch_leave_lazy_mmu_mode();
|
||||
pte_unmap_unlock(pte - 1, ptl);
|
||||
|
|
10
mm/rmap.c
10
mm/rmap.c
|
@ -50,6 +50,7 @@
|
|||
#include <linux/kallsyms.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/mmu_notifier.h>
|
||||
#include <linux/migrate.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
|
@ -818,8 +819,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
spin_unlock(&mmlist_lock);
|
||||
}
|
||||
dec_mm_counter(mm, anon_rss);
|
||||
#ifdef CONFIG_MIGRATION
|
||||
} else {
|
||||
} else if (PAGE_MIGRATION) {
|
||||
/*
|
||||
* Store the pfn of the page in a special migration
|
||||
* pte. do_swap_page() will wait until the migration
|
||||
|
@ -827,19 +827,15 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
|||
*/
|
||||
BUG_ON(!migration);
|
||||
entry = make_migration_entry(page, pte_write(pteval));
|
||||
#endif
|
||||
}
|
||||
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
|
||||
BUG_ON(pte_file(*pte));
|
||||
} else
|
||||
#ifdef CONFIG_MIGRATION
|
||||
if (migration) {
|
||||
} else if (PAGE_MIGRATION && migration) {
|
||||
/* Establish migration entry for a file page */
|
||||
swp_entry_t entry;
|
||||
entry = make_migration_entry(page, pte_write(pteval));
|
||||
set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
|
||||
} else
|
||||
#endif
|
||||
dec_mm_counter(mm, file_rss);
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user