forked from luck/tmp_suning_uos_patched
staging: erofs: move stagingpage operations to compress.h
stagingpages are behaved as bounce pages for temporary use. Move to compress.h since the upcoming decompressor will allocate stagingpages as well. Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
fa61a33f53
commit
274812334b
40
drivers/staging/erofs/compress.h
Normal file
40
drivers/staging/erofs/compress.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* linux/drivers/staging/erofs/compress.h
|
||||
*
|
||||
* Copyright (C) 2019 HUAWEI, Inc.
|
||||
* http://www.huawei.com/
|
||||
* Created by Gao Xiang <gaoxiang25@huawei.com>
|
||||
*/
|
||||
#ifndef __EROFS_FS_COMPRESS_H
|
||||
#define __EROFS_FS_COMPRESS_H
|
||||
|
||||
/*
|
||||
* - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
|
||||
* used to mark temporary allocated pages from other
|
||||
* file/cached pages and NULL mapping pages.
|
||||
*/
|
||||
#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
|
||||
|
||||
/* check if a page is marked as staging */
|
||||
static inline bool z_erofs_page_is_staging(struct page *page)
|
||||
{
|
||||
return page->mapping == Z_EROFS_MAPPING_STAGING;
|
||||
}
|
||||
|
||||
static inline bool z_erofs_put_stagingpage(struct list_head *pagepool,
|
||||
struct page *page)
|
||||
{
|
||||
if (!z_erofs_page_is_staging(page))
|
||||
return false;
|
||||
|
||||
/* staging pages should not be used by others at the same time */
|
||||
if (page_ref_count(page) > 1)
|
||||
put_page(page);
|
||||
else
|
||||
list_add(&page->lru, pagepool);
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
@ -11,6 +11,7 @@
|
|||
* distribution for more details.
|
||||
*/
|
||||
#include "unzip_vle.h"
|
||||
#include "compress.h"
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
#include <trace/events/erofs.h>
|
||||
|
@ -855,7 +856,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
|
|||
DBG_BUGON(PageUptodate(page));
|
||||
DBG_BUGON(!page->mapping);
|
||||
|
||||
if (unlikely(!sbi && !z_erofs_is_stagingpage(page))) {
|
||||
if (unlikely(!sbi && !z_erofs_page_is_staging(page))) {
|
||||
sbi = EROFS_SB(page->mapping->host->i_sb);
|
||||
|
||||
if (time_to_inject(sbi, FAULT_READ_IO)) {
|
||||
|
@ -947,7 +948,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|||
DBG_BUGON(!page);
|
||||
DBG_BUGON(!page->mapping);
|
||||
|
||||
if (z_erofs_gather_if_stagingpage(page_pool, page))
|
||||
if (z_erofs_put_stagingpage(page_pool, page))
|
||||
continue;
|
||||
|
||||
if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
|
||||
|
@ -977,7 +978,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|||
DBG_BUGON(!page);
|
||||
DBG_BUGON(!page->mapping);
|
||||
|
||||
if (!z_erofs_is_stagingpage(page)) {
|
||||
if (!z_erofs_page_is_staging(page)) {
|
||||
if (erofs_page_is_managed(sbi, page)) {
|
||||
if (unlikely(!PageUptodate(page)))
|
||||
err = -EIO;
|
||||
|
@ -1055,7 +1056,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|||
continue;
|
||||
|
||||
/* recycle all individual staging pages */
|
||||
(void)z_erofs_gather_if_stagingpage(page_pool, page);
|
||||
(void)z_erofs_put_stagingpage(page_pool, page);
|
||||
|
||||
WRITE_ONCE(compressed_pages[i], NULL);
|
||||
}
|
||||
|
@ -1068,7 +1069,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
|
|||
DBG_BUGON(!page->mapping);
|
||||
|
||||
/* recycle all individual staging pages */
|
||||
if (z_erofs_gather_if_stagingpage(page_pool, page))
|
||||
if (z_erofs_put_stagingpage(page_pool, page))
|
||||
continue;
|
||||
|
||||
if (unlikely(err < 0))
|
||||
|
|
|
@ -16,26 +16,6 @@
|
|||
#include "internal.h"
|
||||
#include "unzip_pagevec.h"
|
||||
|
||||
/*
|
||||
* - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
|
||||
* used for temporary allocated pages (via erofs_allocpage),
|
||||
* in order to seperate those from NULL mapping (eg. truncated pages)
|
||||
*/
|
||||
#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
|
||||
|
||||
#define z_erofs_is_stagingpage(page) \
|
||||
((page)->mapping == Z_EROFS_MAPPING_STAGING)
|
||||
|
||||
static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
|
||||
struct page *page)
|
||||
{
|
||||
if (z_erofs_is_stagingpage(page)) {
|
||||
list_add(&page->lru, page_pool);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Structure fields follow one of the following exclusion rules.
|
||||
*
|
||||
|
|
Loading…
Reference in New Issue
Block a user