forked from luck/tmp_suning_uos_patched
mm: clean up page_remove_rmap()
page_remove_rmap() has multiple PageAnon() tests and it has deep nesting. Clean this up. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Wu Fengguang <fengguang.wu@intel.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
57dd28fb05
commit
b904dcfed6
57
mm/rmap.c
57
mm/rmap.c
|
@ -739,34 +739,37 @@ void page_dup_rmap(struct page *page, struct vm_area_struct *vma, unsigned long
|
|||
*/
|
||||
void page_remove_rmap(struct page *page)
|
||||
{
|
||||
if (atomic_add_negative(-1, &page->_mapcount)) {
|
||||
/*
|
||||
* Now that the last pte has gone, s390 must transfer dirty
|
||||
* flag from storage key to struct page. We can usually skip
|
||||
* this if the page is anon, so about to be freed; but perhaps
|
||||
* not if it's in swapcache - there might be another pte slot
|
||||
* containing the swap entry, but page not yet written to swap.
|
||||
*/
|
||||
if ((!PageAnon(page) || PageSwapCache(page)) &&
|
||||
page_test_dirty(page)) {
|
||||
page_clear_dirty(page);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
if (PageAnon(page))
|
||||
mem_cgroup_uncharge_page(page);
|
||||
__dec_zone_page_state(page,
|
||||
PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED);
|
||||
mem_cgroup_update_mapped_file_stat(page, -1);
|
||||
/*
|
||||
* It would be tidy to reset the PageAnon mapping here,
|
||||
* but that might overwrite a racing page_add_anon_rmap
|
||||
* which increments mapcount after us but sets mapping
|
||||
* before us: so leave the reset to free_hot_cold_page,
|
||||
* and remember that it's only reliable while mapped.
|
||||
* Leaving it set also helps swapoff to reinstate ptes
|
||||
* faster for those pages still in swapcache.
|
||||
*/
|
||||
/* page still mapped by someone else? */
|
||||
if (!atomic_add_negative(-1, &page->_mapcount))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Now that the last pte has gone, s390 must transfer dirty
|
||||
* flag from storage key to struct page. We can usually skip
|
||||
* this if the page is anon, so about to be freed; but perhaps
|
||||
* not if it's in swapcache - there might be another pte slot
|
||||
* containing the swap entry, but page not yet written to swap.
|
||||
*/
|
||||
if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
|
||||
page_clear_dirty(page);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
if (PageAnon(page)) {
|
||||
mem_cgroup_uncharge_page(page);
|
||||
__dec_zone_page_state(page, NR_ANON_PAGES);
|
||||
} else {
|
||||
__dec_zone_page_state(page, NR_FILE_MAPPED);
|
||||
}
|
||||
mem_cgroup_update_mapped_file_stat(page, -1);
|
||||
/*
|
||||
* It would be tidy to reset the PageAnon mapping here,
|
||||
* but that might overwrite a racing page_add_anon_rmap
|
||||
* which increments mapcount after us but sets mapping
|
||||
* before us: so leave the reset to free_hot_cold_page,
|
||||
* and remember that it's only reliable while mapped.
|
||||
* Leaving it set also helps swapoff to reinstate ptes
|
||||
* faster for those pages still in swapcache.
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user