forked from luck/tmp_suning_uos_patched
f2fs: introduce f2fs_read_single_page() for cleanup
This patch introduces f2fs_read_single_page() to wrap core operations of reading one page in f2fs_mpage_readpages(). In addition, if we failed in f2fs_mpage_readpages(), propagate error number to f2fs_read_data_page(), for f2fs_read_data_pages() path, always return success. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
5c533b19ae
commit
2df0ab0457
214
fs/f2fs/data.c
214
fs/f2fs/data.c
|
@ -1508,6 +1508,118 @@ int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int f2fs_read_single_page(struct inode *inode, struct page *page,
|
||||
unsigned nr_pages,
|
||||
struct f2fs_map_blocks *map,
|
||||
struct bio **bio_ret,
|
||||
sector_t *last_block_in_bio,
|
||||
bool is_readahead)
|
||||
{
|
||||
struct bio *bio = *bio_ret;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
const unsigned blocksize = 1 << blkbits;
|
||||
sector_t block_in_file;
|
||||
sector_t last_block;
|
||||
sector_t last_block_in_file;
|
||||
sector_t block_nr;
|
||||
int ret = 0;
|
||||
|
||||
block_in_file = (sector_t)page->index;
|
||||
last_block = block_in_file + nr_pages;
|
||||
last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
|
||||
blkbits;
|
||||
if (last_block > last_block_in_file)
|
||||
last_block = last_block_in_file;
|
||||
|
||||
/* just zeroing out page which is beyond EOF */
|
||||
if (block_in_file >= last_block)
|
||||
goto zero_out;
|
||||
/*
|
||||
* Map blocks using the previous result first.
|
||||
*/
|
||||
if ((map->m_flags & F2FS_MAP_MAPPED) &&
|
||||
block_in_file > map->m_lblk &&
|
||||
block_in_file < (map->m_lblk + map->m_len))
|
||||
goto got_it;
|
||||
|
||||
/*
|
||||
* Then do more f2fs_map_blocks() calls until we are
|
||||
* done with this page.
|
||||
*/
|
||||
map->m_lblk = block_in_file;
|
||||
map->m_len = last_block - block_in_file;
|
||||
|
||||
ret = f2fs_map_blocks(inode, map, 0, F2FS_GET_BLOCK_DEFAULT);
|
||||
if (ret)
|
||||
goto out;
|
||||
got_it:
|
||||
if ((map->m_flags & F2FS_MAP_MAPPED)) {
|
||||
block_nr = map->m_pblk + block_in_file - map->m_lblk;
|
||||
SetPageMappedToDisk(page);
|
||||
|
||||
if (!PageUptodate(page) && !cleancache_get_page(page)) {
|
||||
SetPageUptodate(page);
|
||||
goto confused;
|
||||
}
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
|
||||
DATA_GENERIC)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
zero_out:
|
||||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
if (!PageUptodate(page))
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* This page will go to BIO. Do we need to send this
|
||||
* BIO off first?
|
||||
*/
|
||||
if (bio && (*last_block_in_bio != block_nr - 1 ||
|
||||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
|
||||
submit_and_realloc:
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
bio = NULL;
|
||||
}
|
||||
if (bio == NULL) {
|
||||
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
|
||||
is_readahead ? REQ_RAHEAD : 0);
|
||||
if (IS_ERR(bio)) {
|
||||
ret = PTR_ERR(bio);
|
||||
bio = NULL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the page is under writeback, we need to wait for
|
||||
* its completion to see the correct decrypted data.
|
||||
*/
|
||||
f2fs_wait_on_block_writeback(inode, block_nr);
|
||||
|
||||
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
|
||||
goto submit_and_realloc;
|
||||
|
||||
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
|
||||
ClearPageError(page);
|
||||
*last_block_in_bio = block_nr;
|
||||
goto out;
|
||||
confused:
|
||||
if (bio) {
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
bio = NULL;
|
||||
}
|
||||
unlock_page(page);
|
||||
out:
|
||||
*bio_ret = bio;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function was originally taken from fs/mpage.c, and customized for f2fs.
|
||||
* Major change was from block_size == page_size in f2fs by default.
|
||||
|
@ -1524,13 +1636,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
struct bio *bio = NULL;
|
||||
sector_t last_block_in_bio = 0;
|
||||
struct inode *inode = mapping->host;
|
||||
const unsigned blkbits = inode->i_blkbits;
|
||||
const unsigned blocksize = 1 << blkbits;
|
||||
sector_t block_in_file;
|
||||
sector_t last_block;
|
||||
sector_t last_block_in_file;
|
||||
sector_t block_nr;
|
||||
struct f2fs_map_blocks map;
|
||||
int ret = 0;
|
||||
|
||||
map.m_pblk = 0;
|
||||
map.m_lblk = 0;
|
||||
|
@ -1553,98 +1660,13 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
goto next_page;
|
||||
}
|
||||
|
||||
block_in_file = (sector_t)page->index;
|
||||
last_block = block_in_file + nr_pages;
|
||||
last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
|
||||
blkbits;
|
||||
if (last_block > last_block_in_file)
|
||||
last_block = last_block_in_file;
|
||||
|
||||
/* just zeroing out page which is beyond EOF */
|
||||
if (block_in_file >= last_block)
|
||||
goto zero_out;
|
||||
/*
|
||||
* Map blocks using the previous result first.
|
||||
*/
|
||||
if ((map.m_flags & F2FS_MAP_MAPPED) &&
|
||||
block_in_file > map.m_lblk &&
|
||||
block_in_file < (map.m_lblk + map.m_len))
|
||||
goto got_it;
|
||||
|
||||
/*
|
||||
* Then do more f2fs_map_blocks() calls until we are
|
||||
* done with this page.
|
||||
*/
|
||||
map.m_lblk = block_in_file;
|
||||
map.m_len = last_block - block_in_file;
|
||||
|
||||
if (f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT))
|
||||
goto set_error_page;
|
||||
got_it:
|
||||
if ((map.m_flags & F2FS_MAP_MAPPED)) {
|
||||
block_nr = map.m_pblk + block_in_file - map.m_lblk;
|
||||
SetPageMappedToDisk(page);
|
||||
|
||||
if (!PageUptodate(page) && !cleancache_get_page(page)) {
|
||||
SetPageUptodate(page);
|
||||
goto confused;
|
||||
}
|
||||
|
||||
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
|
||||
DATA_GENERIC))
|
||||
goto set_error_page;
|
||||
} else {
|
||||
zero_out:
|
||||
ret = f2fs_read_single_page(inode, page, nr_pages, &map, &bio,
|
||||
&last_block_in_bio, is_readahead);
|
||||
if (ret) {
|
||||
SetPageError(page);
|
||||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
if (!PageUptodate(page))
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
goto next_page;
|
||||
}
|
||||
|
||||
/*
|
||||
* This page will go to BIO. Do we need to send this
|
||||
* BIO off first?
|
||||
*/
|
||||
if (bio && (last_block_in_bio != block_nr - 1 ||
|
||||
!__same_bdev(F2FS_I_SB(inode), block_nr, bio))) {
|
||||
submit_and_realloc:
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
bio = NULL;
|
||||
}
|
||||
if (bio == NULL) {
|
||||
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
|
||||
is_readahead ? REQ_RAHEAD : 0);
|
||||
if (IS_ERR(bio)) {
|
||||
bio = NULL;
|
||||
goto set_error_page;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the page is under writeback, we need to wait for
|
||||
* its completion to see the correct decrypted data.
|
||||
*/
|
||||
f2fs_wait_on_block_writeback(inode, block_nr);
|
||||
|
||||
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
|
||||
goto submit_and_realloc;
|
||||
|
||||
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
|
||||
ClearPageError(page);
|
||||
last_block_in_bio = block_nr;
|
||||
goto next_page;
|
||||
set_error_page:
|
||||
SetPageError(page);
|
||||
zero_user_segment(page, 0, PAGE_SIZE);
|
||||
unlock_page(page);
|
||||
goto next_page;
|
||||
confused:
|
||||
if (bio) {
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
bio = NULL;
|
||||
}
|
||||
unlock_page(page);
|
||||
next_page:
|
||||
if (pages)
|
||||
put_page(page);
|
||||
|
@ -1652,7 +1674,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
|
|||
BUG_ON(pages && !list_empty(pages));
|
||||
if (bio)
|
||||
__submit_bio(F2FS_I_SB(inode), bio, DATA);
|
||||
return 0;
|
||||
return pages ? 0 : ret;
|
||||
}
|
||||
|
||||
static int f2fs_read_data_page(struct file *file, struct page *page)
|
||||
|
|
Loading…
Reference in New Issue
Block a user