mm: remove __hugepage_set_anon_rmap()
This function is identical to __page_set_anon_rmap() since the time, when it was introduced (8 years ago). The patch removes the function, and makes its users to use __page_set_anon_rmap() instead. Link: http://lkml.kernel.org/r/154504875359.30235.6237926369392564851.stgit@localhost.localdomain Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reviewed-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Jerome Glisse <jglisse@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4918e7625f
commit
451b9514a5
25
mm/rmap.c
25
mm/rmap.c
|
@ -1019,7 +1019,7 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __page_set_anon_rmap - set up new anonymous rmap
|
* __page_set_anon_rmap - set up new anonymous rmap
|
||||||
* @page: Page to add to rmap
|
* @page: Page or Hugepage to add to rmap
|
||||||
* @vma: VM area to add page to.
|
* @vma: VM area to add page to.
|
||||||
* @address: User virtual address of the mapping
|
* @address: User virtual address of the mapping
|
||||||
* @exclusive: the page is exclusively owned by the current process
|
* @exclusive: the page is exclusively owned by the current process
|
||||||
|
@ -1916,27 +1916,10 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc)
|
||||||
|
|
||||||
#ifdef CONFIG_HUGETLB_PAGE
|
#ifdef CONFIG_HUGETLB_PAGE
|
||||||
/*
|
/*
|
||||||
* The following three functions are for anonymous (private mapped) hugepages.
|
* The following two functions are for anonymous (private mapped) hugepages.
|
||||||
* Unlike common anonymous pages, anonymous hugepages have no accounting code
|
* Unlike common anonymous pages, anonymous hugepages have no accounting code
|
||||||
* and no lru code, because we handle hugepages differently from common pages.
|
* and no lru code, because we handle hugepages differently from common pages.
|
||||||
*/
|
*/
|
||||||
static void __hugepage_set_anon_rmap(struct page *page,
|
|
||||||
struct vm_area_struct *vma, unsigned long address, int exclusive)
|
|
||||||
{
|
|
||||||
struct anon_vma *anon_vma = vma->anon_vma;
|
|
||||||
|
|
||||||
BUG_ON(!anon_vma);
|
|
||||||
|
|
||||||
if (PageAnon(page))
|
|
||||||
return;
|
|
||||||
if (!exclusive)
|
|
||||||
anon_vma = anon_vma->root;
|
|
||||||
|
|
||||||
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
|
||||||
page->mapping = (struct address_space *) anon_vma;
|
|
||||||
page->index = linear_page_index(vma, address);
|
|
||||||
}
|
|
||||||
|
|
||||||
void hugepage_add_anon_rmap(struct page *page,
|
void hugepage_add_anon_rmap(struct page *page,
|
||||||
struct vm_area_struct *vma, unsigned long address)
|
struct vm_area_struct *vma, unsigned long address)
|
||||||
{
|
{
|
||||||
|
@ -1948,7 +1931,7 @@ void hugepage_add_anon_rmap(struct page *page,
|
||||||
/* address might be in next vma when migration races vma_adjust */
|
/* address might be in next vma when migration races vma_adjust */
|
||||||
first = atomic_inc_and_test(compound_mapcount_ptr(page));
|
first = atomic_inc_and_test(compound_mapcount_ptr(page));
|
||||||
if (first)
|
if (first)
|
||||||
__hugepage_set_anon_rmap(page, vma, address, 0);
|
__page_set_anon_rmap(page, vma, address, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void hugepage_add_new_anon_rmap(struct page *page,
|
void hugepage_add_new_anon_rmap(struct page *page,
|
||||||
|
@ -1956,6 +1939,6 @@ void hugepage_add_new_anon_rmap(struct page *page,
|
||||||
{
|
{
|
||||||
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
BUG_ON(address < vma->vm_start || address >= vma->vm_end);
|
||||||
atomic_set(compound_mapcount_ptr(page), 0);
|
atomic_set(compound_mapcount_ptr(page), 0);
|
||||||
__hugepage_set_anon_rmap(page, vma, address, 1);
|
__page_set_anon_rmap(page, vma, address, 1);
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_HUGETLB_PAGE */
|
#endif /* CONFIG_HUGETLB_PAGE */
|
||||||
|
|
Loading…
Reference in New Issue
Block a user