forked from luck/tmp_suning_uos_patched
[PATCH] splice: fix page LRU accounting
Currently we rely on the PIPE_BUF_FLAG_LRU flag being set correctly to know whether we need to fiddle with page LRU state after stealing it, however for some origins we just don't know if the page is on the LRU list or not. So remove PIPE_BUF_FLAG_LRU and do this check/add manually in pipe_to_file() instead. Signed-off-by: Jens Axboe <axboe@suse.de>
This commit is contained in:
parent
7591489a8f
commit
a893b99be7
31
fs/splice.c
31
fs/splice.c
|
@ -78,7 +78,6 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *info,
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
buf->flags |= PIPE_BUF_FLAG_LRU;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,8 +85,6 @@ static void page_cache_pipe_buf_release(struct pipe_inode_info *info,
|
||||||
struct pipe_buffer *buf)
|
struct pipe_buffer *buf)
|
||||||
{
|
{
|
||||||
page_cache_release(buf->page);
|
page_cache_release(buf->page);
|
||||||
buf->page = NULL;
|
|
||||||
buf->flags &= ~PIPE_BUF_FLAG_LRU;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
|
static int page_cache_pipe_buf_pin(struct pipe_inode_info *info,
|
||||||
|
@ -570,22 +567,36 @@ static int pipe_to_file(struct pipe_inode_info *info, struct pipe_buffer *buf,
|
||||||
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
|
if ((sd->flags & SPLICE_F_MOVE) && this_len == PAGE_CACHE_SIZE) {
|
||||||
/*
|
/*
|
||||||
* If steal succeeds, buf->page is now pruned from the vm
|
* If steal succeeds, buf->page is now pruned from the vm
|
||||||
* side (LRU and page cache) and we can reuse it. The page
|
* side (page cache) and we can reuse it. The page will also
|
||||||
* will also be looked on successful return.
|
* be locked on successful return.
|
||||||
*/
|
*/
|
||||||
if (buf->ops->steal(info, buf))
|
if (buf->ops->steal(info, buf))
|
||||||
goto find_page;
|
goto find_page;
|
||||||
|
|
||||||
page = buf->page;
|
page = buf->page;
|
||||||
|
page_cache_get(page);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* page must be on the LRU for adding to the pagecache.
|
||||||
|
* Check this without grabbing the zone lock, if it isn't
|
||||||
|
* the do grab the zone lock, recheck, and add if necessary.
|
||||||
|
*/
|
||||||
|
if (!PageLRU(page)) {
|
||||||
|
struct zone *zone = page_zone(page);
|
||||||
|
|
||||||
|
spin_lock_irq(&zone->lru_lock);
|
||||||
|
if (!PageLRU(page)) {
|
||||||
|
SetPageLRU(page);
|
||||||
|
add_page_to_inactive_list(zone, page);
|
||||||
|
}
|
||||||
|
spin_unlock_irq(&zone->lru_lock);
|
||||||
|
}
|
||||||
|
|
||||||
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
|
if (add_to_page_cache(page, mapping, index, gfp_mask)) {
|
||||||
|
page_cache_release(page);
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
goto find_page;
|
goto find_page;
|
||||||
}
|
}
|
||||||
|
|
||||||
page_cache_get(page);
|
|
||||||
|
|
||||||
if (!(buf->flags & PIPE_BUF_FLAG_LRU))
|
|
||||||
lru_cache_add(page);
|
|
||||||
} else {
|
} else {
|
||||||
find_page:
|
find_page:
|
||||||
page = find_lock_page(mapping, index);
|
page = find_lock_page(mapping, index);
|
||||||
|
|
|
@ -5,9 +5,8 @@
|
||||||
|
|
||||||
#define PIPE_BUFFERS (16)
|
#define PIPE_BUFFERS (16)
|
||||||
|
|
||||||
#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
|
#define PIPE_BUF_FLAG_ATOMIC 0x01 /* was atomically mapped */
|
||||||
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
|
#define PIPE_BUF_FLAG_GIFT 0x02 /* page is a gift */
|
||||||
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
|
|
||||||
|
|
||||||
struct pipe_buffer {
|
struct pipe_buffer {
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
Loading…
Reference in New Issue
Block a user