[PATCH] mm: simplify vmscan vs release refcounting

The VM has an interesting race where a page refcount can drop to zero, but it
is still on the LRU lists for a short time.  This was solved by testing a 0->1
refcount transition when picking up pages from the LRU, and dropping the
refcount in that case.

Instead, use atomic_add_unless to ensure we never pick up a 0 refcount page
from the LRU, thus a 0 refcount page will never have its refcount elevated
until it is allocated again.

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Nick Piggin 2006-03-22 00:08:03 -08:00 committed by Linus Torvalds
parent f205b2fe62
commit 7c8ee9a863
2 changed files with 22 additions and 22 deletions

View File

@ -301,17 +301,20 @@ struct page {
* Drop a ref, return true if the logical refcount fell to zero (the page has * Drop a ref, return true if the logical refcount fell to zero (the page has
* no users) * no users)
*/ */
#define put_page_testzero(p) \ static inline int put_page_testzero(struct page *page)
({ \ {
BUG_ON(atomic_read(&(p)->_count) == -1);\ BUG_ON(atomic_read(&page->_count) == -1);
atomic_add_negative(-1, &(p)->_count); \ return atomic_add_negative(-1, &page->_count);
}) }
/* /*
* Grab a ref, return true if the page previously had a logical refcount of * Try to grab a ref unless the page has a refcount of zero, return false if
* zero. ie: returns true if we just grabbed an already-deemed-to-be-free page * that is the case.
*/ */
#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) static inline int get_page_unless_zero(struct page *page)
{
return atomic_add_unless(&page->_count, 1, -1);
}
#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) #define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1)
#define __put_page(p) atomic_dec(&(p)->_count) #define __put_page(p) atomic_dec(&(p)->_count)

View File

@ -1083,29 +1083,26 @@ static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
int scan = 0; int scan = 0;
while (scan++ < nr_to_scan && !list_empty(src)) { while (scan++ < nr_to_scan && !list_empty(src)) {
struct list_head *target;
page = lru_to_page(src); page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags); prefetchw_prev_lru_page(page, src, flags);
BUG_ON(!PageLRU(page)); BUG_ON(!PageLRU(page));
list_del(&page->lru); list_del(&page->lru);
if (unlikely(get_page_testone(page))) { target = src;
if (likely(get_page_unless_zero(page))) {
/* /*
* It is being freed elsewhere * Be careful not to clear PageLRU until after we're
* sure the page is not being freed elsewhere -- the
* page release code relies on it.
*/ */
__put_page(page); ClearPageLRU(page);
list_add(&page->lru, src); target = dst;
continue; nr_taken++;
} } /* else it is being freed elsewhere */
/* list_add(&page->lru, target);
* Be careful not to clear PageLRU until after we're sure
* the page is not being freed elsewhere -- the page release
* code relies on it.
*/
ClearPageLRU(page);
list_add(&page->lru, dst);
nr_taken++;
} }
*scanned = scan; *scanned = scan;