Btrfs: add skeleton code for compression heuristic

Add skeleton code for compresison heuristics. Now it iterates over all
the pages, but in the end always says "yes, compress please", ie it does
not change the current behaviour.

In the future we're going to add various heuristics to analyze the data.
This patch can be used as a baseline for measuring if the effectivness
and performance.

Signed-off-by: Timofey Titovets <nefelim4ag@gmail.com>
Reviewed-by: David Sterba <dsterba@suse.com>
[ enhanced changelog, modified comments ]
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Timofey Titovets 2017-07-17 16:52:58 +03:00 committed by David Sterba
parent 131ce4367a
commit c2fcdcdf36
3 changed files with 39 additions and 4 deletions

View File

@ -1047,3 +1047,36 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
return 1;
}
/*
* Compression heuristic.
*
* For now is's a naive and optimistic 'return true', we'll extend the logic to
* quickly (compared to direct compression) detect data characteristics
* (compressible/uncompressible) to avoid wasting CPU time on uncompressible
* data.
*
* The following types of analysis can be performed:
* - detect mostly zero data
* - detect data with low "byte set" size (text, etc)
* - detect data with low/high "core byte" set
*
* Return non-zero if the compression should be done, 0 otherwise.
*/
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
u64 index = start >> PAGE_SHIFT;
u64 end_index = end >> PAGE_SHIFT;
struct page *page;
int ret = 1;
while (index <= end_index) {
page = find_get_page(inode->i_mapping, index);
kmap(page);
kunmap(page);
put_page(page);
index++;
}
return ret;
}

View File

@ -129,4 +129,6 @@ struct btrfs_compress_op {
extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
#endif

View File

@ -392,7 +392,7 @@ static noinline int add_async_extent(struct async_cow *cow,
return 0;
}
static inline int inode_need_compress(struct inode *inode)
static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
@ -405,7 +405,7 @@ static inline int inode_need_compress(struct inode *inode)
if (btrfs_test_opt(fs_info, COMPRESS) ||
BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
BTRFS_I(inode)->force_compress)
return 1;
return btrfs_compress_heuristic(inode, start, end);
return 0;
}
@ -503,7 +503,7 @@ static noinline void compress_file_range(struct inode *inode,
* inode has not been flagged as nocompress. This flag can
* change at any time if we discover bad compression ratios.
*/
if (inode_need_compress(inode)) {
if (inode_need_compress(inode, start, end)) {
WARN_ON(pages);
pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS);
if (!pages) {
@ -1576,7 +1576,7 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
} else if (!inode_need_compress(inode)) {
} else if (!inode_need_compress(inode, start, end)) {
ret = cow_file_range(inode, locked_page, start, end, end,
page_started, nr_written, 1, NULL);
} else {