forked from luck/tmp_suning_uos_patched
staging: erofs: clean up internal.h
Tidy up relative order of variables / declarations in internal.h, move some local static functions out into other files and add tags at the end of #endif acrossing several lines. No logic change. Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Gao Xiang <gaoxiang25@huawei.com> Link: https://lore.kernel.org/r/20190731155752.210602-7-gaoxiang25@huawei.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
b1897c60b1
commit
14f362b4f4
|
@ -223,6 +223,33 @@ static void copy_from_pcpubuf(struct page **out, const char *dst,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void *erofs_vmap(struct page **pages, unsigned int count)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
|
||||||
|
/* retry two more times (totally 3 times) */
|
||||||
|
if (addr || ++i >= 3)
|
||||||
|
return addr;
|
||||||
|
vm_unmap_aliases();
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
#else
|
||||||
|
return vmap(pages, count, VM_MAP, PAGE_KERNEL);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
static void erofs_vunmap(const void *mem, unsigned int count)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
|
||||||
|
vm_unmap_ram(mem, count);
|
||||||
|
#else
|
||||||
|
vunmap(mem);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
static int decompress_generic(struct z_erofs_decompress_req *rq,
|
static int decompress_generic(struct z_erofs_decompress_req *rq,
|
||||||
struct list_head *pagepool)
|
struct list_head *pagepool)
|
||||||
{
|
{
|
||||||
|
|
|
@ -6,8 +6,8 @@
|
||||||
* http://www.huawei.com/
|
* http://www.huawei.com/
|
||||||
* Created by Gao Xiang <gaoxiang25@huawei.com>
|
* Created by Gao Xiang <gaoxiang25@huawei.com>
|
||||||
*/
|
*/
|
||||||
#ifndef __INTERNAL_H
|
#ifndef __EROFS_INTERNAL_H
|
||||||
#define __INTERNAL_H
|
#define __EROFS_INTERNAL_H
|
||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/dcache.h>
|
#include <linux/dcache.h>
|
||||||
|
@ -28,15 +28,11 @@
|
||||||
#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
|
#define infoln(x, ...) pr_info(x "\n", ##__VA_ARGS__)
|
||||||
#ifdef CONFIG_EROFS_FS_DEBUG
|
#ifdef CONFIG_EROFS_FS_DEBUG
|
||||||
#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
|
#define debugln(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
|
||||||
|
|
||||||
#define dbg_might_sleep might_sleep
|
|
||||||
#define DBG_BUGON BUG_ON
|
#define DBG_BUGON BUG_ON
|
||||||
#else
|
#else
|
||||||
#define debugln(x, ...) ((void)0)
|
#define debugln(x, ...) ((void)0)
|
||||||
|
|
||||||
#define dbg_might_sleep() ((void)0)
|
|
||||||
#define DBG_BUGON(x) ((void)(x))
|
#define DBG_BUGON(x) ((void)(x))
|
||||||
#endif
|
#endif /* !CONFIG_EROFS_FS_DEBUG */
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
FAULT_KMALLOC,
|
FAULT_KMALLOC,
|
||||||
|
@ -53,7 +49,7 @@ struct erofs_fault_info {
|
||||||
unsigned int inject_rate;
|
unsigned int inject_rate;
|
||||||
unsigned int inject_type;
|
unsigned int inject_type;
|
||||||
};
|
};
|
||||||
#endif
|
#endif /* CONFIG_EROFS_FAULT_INJECTION */
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
|
#ifdef CONFIG_EROFS_FS_ZIP_CACHE_BIPOLAR
|
||||||
#define EROFS_FS_ZIP_CACHE_LVL (2)
|
#define EROFS_FS_ZIP_CACHE_LVL (2)
|
||||||
|
@ -71,6 +67,9 @@ struct erofs_fault_info {
|
||||||
#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
|
#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
|
||||||
|
|
||||||
typedef u64 erofs_nid_t;
|
typedef u64 erofs_nid_t;
|
||||||
|
typedef u64 erofs_off_t;
|
||||||
|
/* data type for filesystem-wide blocks number */
|
||||||
|
typedef u32 erofs_blk_t;
|
||||||
|
|
||||||
struct erofs_sb_info {
|
struct erofs_sb_info {
|
||||||
/* list for all registered superblocks, mainly for shrinker */
|
/* list for all registered superblocks, mainly for shrinker */
|
||||||
|
@ -154,7 +153,7 @@ static inline bool time_to_inject(struct erofs_sb_info *sbi, int type)
|
||||||
static inline void erofs_show_injection_info(int type)
|
static inline void erofs_show_injection_info(int type)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* !CONFIG_EROFS_FAULT_INJECTION */
|
||||||
|
|
||||||
static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
|
static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
|
||||||
size_t size, gfp_t flags)
|
size_t size, gfp_t flags)
|
||||||
|
@ -179,6 +178,8 @@ static inline void *erofs_kmalloc(struct erofs_sb_info *sbi,
|
||||||
#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
|
#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP
|
#ifdef CONFIG_EROFS_FS_ZIP
|
||||||
|
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
|
||||||
|
|
||||||
/* basic unit of the workstation of a super_block */
|
/* basic unit of the workstation of a super_block */
|
||||||
struct erofs_workgroup {
|
struct erofs_workgroup {
|
||||||
/* the workgroup index in the workstation */
|
/* the workgroup index in the workstation */
|
||||||
|
@ -188,8 +189,6 @@ struct erofs_workgroup {
|
||||||
atomic_t refcount;
|
atomic_t refcount;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
|
|
||||||
|
|
||||||
#if defined(CONFIG_SMP)
|
#if defined(CONFIG_SMP)
|
||||||
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
|
||||||
int val)
|
int val)
|
||||||
|
@ -246,50 +245,24 @@ static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
|
||||||
DBG_BUGON(v == EROFS_LOCKED_MAGIC);
|
DBG_BUGON(v == EROFS_LOCKED_MAGIC);
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* !CONFIG_SMP */
|
||||||
|
|
||||||
int erofs_workgroup_put(struct erofs_workgroup *grp);
|
/* hard limit of pages per compressed cluster */
|
||||||
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
|
#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
|
||||||
pgoff_t index, bool *tag);
|
#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
|
||||||
int erofs_register_workgroup(struct super_block *sb,
|
|
||||||
struct erofs_workgroup *grp, bool tag);
|
|
||||||
unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
|
|
||||||
unsigned long nr_shrink, bool cleanup);
|
|
||||||
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
|
|
||||||
|
|
||||||
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
/* page count of a compressed cluster */
|
||||||
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
#define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
|
||||||
struct erofs_workgroup *egrp);
|
|
||||||
int erofs_try_to_free_cached_page(struct address_space *mapping,
|
|
||||||
struct page *page);
|
|
||||||
|
|
||||||
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
|
|
||||||
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
|
|
||||||
struct page *page)
|
|
||||||
{
|
|
||||||
return page->mapping == MNGD_MAPPING(sbi);
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
#define MNGD_MAPPING(sbi) (NULL)
|
|
||||||
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
|
|
||||||
struct page *page) { return false; }
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define DEFAULT_MAX_SYNC_DECOMPRESS_PAGES 3
|
|
||||||
|
|
||||||
static inline bool __should_decompress_synchronously(struct erofs_sb_info *sbi,
|
|
||||||
unsigned int nr)
|
|
||||||
{
|
|
||||||
return nr <= sbi->max_sync_decompress_pages;
|
|
||||||
}
|
|
||||||
|
|
||||||
int __init z_erofs_init_zip_subsystem(void);
|
int __init z_erofs_init_zip_subsystem(void);
|
||||||
void z_erofs_exit_zip_subsystem(void);
|
void z_erofs_exit_zip_subsystem(void);
|
||||||
#else
|
#else
|
||||||
|
#define EROFS_PCPUBUF_NR_PAGES 0
|
||||||
|
|
||||||
/* dummy initializer/finalizer for the decompression subsystem */
|
/* dummy initializer/finalizer for the decompression subsystem */
|
||||||
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
|
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
|
||||||
static inline void z_erofs_exit_zip_subsystem(void) {}
|
static inline void z_erofs_exit_zip_subsystem(void) {}
|
||||||
#endif
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||||||
|
|
||||||
/* we strictly follow PAGE_SIZE and no buffer head yet */
|
/* we strictly follow PAGE_SIZE and no buffer head yet */
|
||||||
#define LOG_BLOCK_SIZE PAGE_SHIFT
|
#define LOG_BLOCK_SIZE PAGE_SHIFT
|
||||||
|
@ -308,23 +281,6 @@ static inline void z_erofs_exit_zip_subsystem(void) {}
|
||||||
|
|
||||||
#define ROOT_NID(sb) ((sb)->root_nid)
|
#define ROOT_NID(sb) ((sb)->root_nid)
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP
|
|
||||||
/* hard limit of pages per compressed cluster */
|
|
||||||
#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
|
|
||||||
|
|
||||||
/* page count of a compressed cluster */
|
|
||||||
#define erofs_clusterpages(sbi) ((1 << (sbi)->clusterbits) / PAGE_SIZE)
|
|
||||||
|
|
||||||
#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
|
|
||||||
#else
|
|
||||||
#define EROFS_PCPUBUF_NR_PAGES 0
|
|
||||||
#endif
|
|
||||||
|
|
||||||
typedef u64 erofs_off_t;
|
|
||||||
|
|
||||||
/* data type for filesystem-wide blocks number */
|
|
||||||
typedef u32 erofs_blk_t;
|
|
||||||
|
|
||||||
#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
|
#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
|
||||||
#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
|
#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
|
||||||
#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
|
#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
|
||||||
|
@ -364,7 +320,7 @@ struct erofs_vnode {
|
||||||
unsigned char z_logical_clusterbits;
|
unsigned char z_logical_clusterbits;
|
||||||
unsigned char z_physical_clusterbits[2];
|
unsigned char z_physical_clusterbits[2];
|
||||||
};
|
};
|
||||||
#endif
|
#endif /* CONFIG_EROFS_FS_ZIP */
|
||||||
};
|
};
|
||||||
/* the corresponding vfs inode */
|
/* the corresponding vfs inode */
|
||||||
struct inode vfs_inode;
|
struct inode vfs_inode;
|
||||||
|
@ -472,13 +428,14 @@ static inline int z_erofs_map_blocks_iter(struct inode *inode,
|
||||||
{
|
{
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* !CONFIG_EROFS_FS_ZIP */
|
||||||
|
|
||||||
/* data.c */
|
/* data.c */
|
||||||
static inline struct bio *
|
static inline struct bio *erofs_grab_bio(struct super_block *sb,
|
||||||
erofs_grab_bio(struct super_block *sb,
|
erofs_blk_t blkaddr,
|
||||||
erofs_blk_t blkaddr, unsigned int nr_pages, void *bi_private,
|
unsigned int nr_pages,
|
||||||
bio_end_io_t endio, bool nofail)
|
void *bi_private, bio_end_io_t endio,
|
||||||
|
bool nofail)
|
||||||
{
|
{
|
||||||
const gfp_t gfp = GFP_NOIO;
|
const gfp_t gfp = GFP_NOIO;
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
|
@ -525,20 +482,13 @@ static inline struct page *erofs_get_meta_page(struct super_block *sb,
|
||||||
return __erofs_get_meta_page(sb, blkaddr, prio, false);
|
return __erofs_get_meta_page(sb, blkaddr, prio, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct page *erofs_get_meta_page_nofail(struct super_block *sb,
|
|
||||||
erofs_blk_t blkaddr, bool prio)
|
|
||||||
{
|
|
||||||
return __erofs_get_meta_page(sb, blkaddr, prio, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
|
int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
|
||||||
|
|
||||||
static inline struct page *
|
static inline struct page *erofs_get_inline_page(struct inode *inode,
|
||||||
erofs_get_inline_page(struct inode *inode,
|
erofs_blk_t blkaddr)
|
||||||
erofs_blk_t blkaddr)
|
|
||||||
{
|
{
|
||||||
return erofs_get_meta_page(inode->i_sb,
|
return erofs_get_meta_page(inode->i_sb, blkaddr,
|
||||||
blkaddr, S_ISDIR(inode->i_mode));
|
S_ISDIR(inode->i_mode));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* inode.c */
|
/* inode.c */
|
||||||
|
@ -578,34 +528,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
|
||||||
/* dir.c */
|
/* dir.c */
|
||||||
extern const struct file_operations erofs_dir_fops;
|
extern const struct file_operations erofs_dir_fops;
|
||||||
|
|
||||||
static inline void *erofs_vmap(struct page **pages, unsigned int count)
|
/* utils.c / zdata.c */
|
||||||
{
|
|
||||||
#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
while (1) {
|
|
||||||
void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
|
|
||||||
/* retry two more times (totally 3 times) */
|
|
||||||
if (addr || ++i >= 3)
|
|
||||||
return addr;
|
|
||||||
vm_unmap_aliases();
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
#else
|
|
||||||
return vmap(pages, count, VM_MAP, PAGE_KERNEL);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void erofs_vunmap(const void *mem, unsigned int count)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_EROFS_FS_USE_VM_MAP_RAM
|
|
||||||
vm_unmap_ram(mem, count);
|
|
||||||
#else
|
|
||||||
vunmap(mem);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
/* utils.c */
|
|
||||||
extern struct shrinker erofs_shrinker_info;
|
extern struct shrinker erofs_shrinker_info;
|
||||||
|
|
||||||
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
|
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
|
||||||
|
@ -625,12 +548,20 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
|
||||||
#define erofs_put_pcpubuf(buf) do {} while (0)
|
#define erofs_put_pcpubuf(buf) do {} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
int erofs_workgroup_put(struct erofs_workgroup *grp);
|
||||||
|
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
|
||||||
|
pgoff_t index, bool *tag);
|
||||||
|
int erofs_register_workgroup(struct super_block *sb,
|
||||||
|
struct erofs_workgroup *grp, bool tag);
|
||||||
|
unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
|
||||||
|
unsigned long nr_shrink, bool cleanup);
|
||||||
|
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
|
||||||
|
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
|
||||||
|
struct erofs_workgroup *egrp);
|
||||||
|
int erofs_try_to_free_cached_page(struct address_space *mapping,
|
||||||
|
struct page *page);
|
||||||
void erofs_register_super(struct super_block *sb);
|
void erofs_register_super(struct super_block *sb);
|
||||||
void erofs_unregister_super(struct super_block *sb);
|
void erofs_unregister_super(struct super_block *sb);
|
||||||
|
|
||||||
#ifndef lru_to_page
|
#endif /* __EROFS_INTERNAL_H */
|
||||||
#define lru_to_page(head) (list_entry((head)->prev, struct page, lru))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
|
@ -211,7 +211,7 @@ static void default_options(struct erofs_sb_info *sbi)
|
||||||
{
|
{
|
||||||
/* set up some FS parameters */
|
/* set up some FS parameters */
|
||||||
#ifdef CONFIG_EROFS_FS_ZIP
|
#ifdef CONFIG_EROFS_FS_ZIP
|
||||||
sbi->max_sync_decompress_pages = DEFAULT_MAX_SYNC_DECOMPRESS_PAGES;
|
sbi->max_sync_decompress_pages = 3;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_EROFS_FS_XATTR
|
#ifdef CONFIG_EROFS_FS_XATTR
|
||||||
|
|
|
@ -1509,6 +1509,12 @@ static int z_erofs_vle_normalaccess_readpage(struct file *file,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
|
||||||
|
unsigned int nr)
|
||||||
|
{
|
||||||
|
return nr <= sbi->max_sync_decompress_pages;
|
||||||
|
}
|
||||||
|
|
||||||
static int z_erofs_vle_normalaccess_readpages(struct file *filp,
|
static int z_erofs_vle_normalaccess_readpages(struct file *filp,
|
||||||
struct address_space *mapping,
|
struct address_space *mapping,
|
||||||
struct list_head *pages,
|
struct list_head *pages,
|
||||||
|
@ -1517,7 +1523,7 @@ static int z_erofs_vle_normalaccess_readpages(struct file *filp,
|
||||||
struct inode *const inode = mapping->host;
|
struct inode *const inode = mapping->host;
|
||||||
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
|
||||||
|
|
||||||
bool sync = __should_decompress_synchronously(sbi, nr_pages);
|
bool sync = should_decompress_synchronously(sbi, nr_pages);
|
||||||
struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
|
struct z_erofs_vle_frontend f = VLE_FRONTEND_INIT(inode);
|
||||||
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
|
gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
|
||||||
struct page *head = NULL;
|
struct page *head = NULL;
|
||||||
|
|
|
@ -104,6 +104,19 @@ struct z_erofs_vle_unzip_io_sb {
|
||||||
struct super_block *sb;
|
struct super_block *sb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef EROFS_FS_HAS_MANAGED_CACHE
|
||||||
|
#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
|
||||||
|
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
|
||||||
|
struct page *page)
|
||||||
|
{
|
||||||
|
return page->mapping == MNGD_MAPPING(sbi);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define MNGD_MAPPING(sbi) (NULL)
|
||||||
|
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
|
||||||
|
struct page *page) { return false; }
|
||||||
|
#endif /* !EROFS_FS_HAS_MANAGED_CACHE */
|
||||||
|
|
||||||
#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
|
#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
|
||||||
#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
|
#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
|
||||||
#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
|
#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user