forked from luck/tmp_suning_uos_patched
093749e296
There are several issues in current background GC algorithm: - valid blocks is one of key factors during cost overhead calculation, so if segment has less valid block, however even its age is young or it locates hot segment, CB algorithm will still choose the segment as victim, it's not appropriate. - GCed data/node will go to existing logs, no matter in-there datas' update frequency is the same or not, it may mix hot and cold data again. - GC alloctor mainly use LFS type segment, it will cost free segment more quickly. This patch introduces a new algorithm named age threshold based garbage collection to solve above issues, there are three steps mainly: 1. select a source victim: - set an age threshold, and select candidates beased threshold: e.g. 0 means youngest, 100 means oldest, if we set age threshold to 80 then select dirty segments which has age in range of [80, 100] as candiddates; - set candidate_ratio threshold, and select candidates based the ratio, so that we can shrink candidates to those oldest segments; - select target segment with fewest valid blocks in order to migrate blocks with minimum cost; 2. select a target victim: - select candidates beased age threshold; - set candidate_radius threshold, search candidates whose age is around source victims, searching radius should less than the radius threshold. - select target segment with most valid blocks in order to avoid migrating current target segment. 3. merge valid blocks from source victim into target victim with SSR alloctor. Test steps: - create 160 dirty segments: * half of them have 128 valid blocks per segment * left of them have 384 valid blocks per segment - run background GC Benefit: GC count and block movement count both decrease obviously: - Before: - Valid: 86 - Dirty: 1 - Prefree: 11 - Free: 6001 (6001) GC calls: 162 (BG: 220) - data segments : 160 (160) - node segments : 2 (2) Try to move 41454 blocks (BG: 41454) - data blocks : 40960 (40960) - node blocks : 494 (494) IPU: 0 blocks SSR: 0 blocks in 0 segments LFS: 41364 blocks in 81 segments - After: - Valid: 87 - Dirty: 0 - Prefree: 4 - Free: 6008 (6008) GC calls: 75 (BG: 76) - data segments : 74 (74) - node segments : 1 (1) Try to move 12813 blocks (BG: 12813) - data blocks : 12544 (12544) - node blocks : 269 (269) IPU: 0 blocks SSR: 12032 blocks in 77 segments LFS: 855 blocks in 2 segments Signed-off-by: Chao Yu <yuchao0@huawei.com> [Jaegeuk Kim: fix a bug along with pinfile in-mem segment & clean up] Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
172 lines
4.8 KiB
C
172 lines
4.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* fs/f2fs/gc.h
|
|
*
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
* http://www.samsung.com/
|
|
*/
|
|
#define GC_THREAD_MIN_WB_PAGES 1 /*
|
|
* a threshold to determine
|
|
* whether IO subsystem is idle
|
|
* or not
|
|
*/
|
|
#define DEF_GC_THREAD_URGENT_SLEEP_TIME 500 /* 500 ms */
|
|
#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
|
|
#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
|
|
#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
|
|
|
|
/* choose candidates from sections which has age of more than 7 days */
|
|
#define DEF_GC_THREAD_AGE_THRESHOLD (60 * 60 * 24 * 7)
|
|
#define DEF_GC_THREAD_CANDIDATE_RATIO 20 /* select 20% oldest sections as candidates */
|
|
#define DEF_GC_THREAD_MAX_CANDIDATE_COUNT 10 /* select at most 10 sections as candidates */
|
|
#define DEF_GC_THREAD_AGE_WEIGHT 60 /* age weight */
|
|
#define DEFAULT_ACCURACY_CLASS 10000 /* accuracy class */
|
|
|
|
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
|
|
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
|
|
|
|
#define DEF_GC_FAILED_PINNED_FILES 2048
|
|
|
|
/* Search max. number of dirty segments to select a victim segment */
|
|
#define DEF_MAX_VICTIM_SEARCH 4096 /* covers 8GB */
|
|
|
|
struct f2fs_gc_kthread {
|
|
struct task_struct *f2fs_gc_task;
|
|
wait_queue_head_t gc_wait_queue_head;
|
|
|
|
/* for gc sleep time */
|
|
unsigned int urgent_sleep_time;
|
|
unsigned int min_sleep_time;
|
|
unsigned int max_sleep_time;
|
|
unsigned int no_gc_sleep_time;
|
|
|
|
/* for changing gc mode */
|
|
unsigned int gc_wake;
|
|
};
|
|
|
|
struct gc_inode_list {
|
|
struct list_head ilist;
|
|
struct radix_tree_root iroot;
|
|
};
|
|
|
|
struct victim_info {
|
|
unsigned long long mtime; /* mtime of section */
|
|
unsigned int segno; /* section No. */
|
|
};
|
|
|
|
struct victim_entry {
|
|
struct rb_node rb_node; /* rb node located in rb-tree */
|
|
union {
|
|
struct {
|
|
unsigned long long mtime; /* mtime of section */
|
|
unsigned int segno; /* segment No. */
|
|
};
|
|
struct victim_info vi; /* victim info */
|
|
};
|
|
struct list_head list;
|
|
};
|
|
|
|
/*
|
|
* inline functions
|
|
*/
|
|
|
|
/*
|
|
* On a Zoned device zone-capacity can be less than zone-size and if
|
|
* zone-capacity is not aligned to f2fs segment size(2MB), then the segment
|
|
* starting just before zone-capacity has some blocks spanning across the
|
|
* zone-capacity, these blocks are not usable.
|
|
* Such spanning segments can be in free list so calculate the sum of usable
|
|
* blocks in currently free segments including normal and spanning segments.
|
|
*/
|
|
static inline block_t free_segs_blk_count_zoned(struct f2fs_sb_info *sbi)
|
|
{
|
|
block_t free_seg_blks = 0;
|
|
struct free_segmap_info *free_i = FREE_I(sbi);
|
|
int j;
|
|
|
|
spin_lock(&free_i->segmap_lock);
|
|
for (j = 0; j < MAIN_SEGS(sbi); j++)
|
|
if (!test_bit(j, free_i->free_segmap))
|
|
free_seg_blks += f2fs_usable_blks_in_seg(sbi, j);
|
|
spin_unlock(&free_i->segmap_lock);
|
|
|
|
return free_seg_blks;
|
|
}
|
|
|
|
static inline block_t free_segs_blk_count(struct f2fs_sb_info *sbi)
|
|
{
|
|
if (f2fs_sb_has_blkzoned(sbi))
|
|
return free_segs_blk_count_zoned(sbi);
|
|
|
|
return free_segments(sbi) << sbi->log_blocks_per_seg;
|
|
}
|
|
|
|
static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
|
|
{
|
|
block_t free_blks, ovp_blks;
|
|
|
|
free_blks = free_segs_blk_count(sbi);
|
|
ovp_blks = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
|
|
|
|
if (free_blks < ovp_blks)
|
|
return 0;
|
|
|
|
return free_blks - ovp_blks;
|
|
}
|
|
|
|
static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
|
|
{
|
|
return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
|
|
}
|
|
|
|
static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
|
|
{
|
|
block_t reclaimable_user_blocks = sbi->user_block_count -
|
|
written_block_count(sbi);
|
|
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
|
|
}
|
|
|
|
static inline void increase_sleep_time(struct f2fs_gc_kthread *gc_th,
|
|
unsigned int *wait)
|
|
{
|
|
unsigned int min_time = gc_th->min_sleep_time;
|
|
unsigned int max_time = gc_th->max_sleep_time;
|
|
|
|
if (*wait == gc_th->no_gc_sleep_time)
|
|
return;
|
|
|
|
if ((long long)*wait + (long long)min_time > (long long)max_time)
|
|
*wait = max_time;
|
|
else
|
|
*wait += min_time;
|
|
}
|
|
|
|
static inline void decrease_sleep_time(struct f2fs_gc_kthread *gc_th,
|
|
unsigned int *wait)
|
|
{
|
|
unsigned int min_time = gc_th->min_sleep_time;
|
|
|
|
if (*wait == gc_th->no_gc_sleep_time)
|
|
*wait = gc_th->max_sleep_time;
|
|
|
|
if ((long long)*wait - (long long)min_time < (long long)min_time)
|
|
*wait = min_time;
|
|
else
|
|
*wait -= min_time;
|
|
}
|
|
|
|
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
|
|
{
|
|
block_t invalid_user_blocks = sbi->user_block_count -
|
|
written_block_count(sbi);
|
|
/*
|
|
* Background GC is triggered with the following conditions.
|
|
* 1. There are a number of invalid blocks.
|
|
* 2. There is not enough free space.
|
|
*/
|
|
if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
|
|
free_user_blocks(sbi) < limit_free_user_blocks(sbi))
|
|
return true;
|
|
return false;
|
|
}
|