bitmap: genericize percpu bitmap region iterators

Bitmaps are fairly popular for their space efficiency, but we don't have
generic iterators available. Make percpu's bitmap region iterators
available to everyone.

Reviewed-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: Dennis Zhou <dennis@kernel.org>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Dennis Zhou 2019-12-13 16:22:10 -08:00 committed by David Sterba
parent 147a097cf0
commit e837dfde15
2 changed files with 51 additions and 45 deletions

View File

@ -456,6 +456,41 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
} }
static inline void bitmap_next_clear_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
{
*rs = find_next_zero_bit(bitmap, end, *rs);
*re = find_next_bit(bitmap, end, *rs + 1);
}
static inline void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
/*
* Bitmap region iterators. Iterates over the bitmap between [@start, @end).
* @rs and @re should be integer variables and will be set to start and end
* index of the current clear or set region.
*/
#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
for ((rs) = (start), \
bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, \
bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
for ((rs) = (start), \
bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, \
bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
/** /**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value * @n: u64 value

View File

@ -270,33 +270,6 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
pcpu_unit_page_offset(cpu, page_idx); pcpu_unit_page_offset(cpu, page_idx);
} }
static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
{
*rs = find_next_zero_bit(bitmap, end, *rs);
*re = find_next_bit(bitmap, end, *rs + 1);
}
static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
{
*rs = find_next_bit(bitmap, end, *rs);
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
/*
* Bitmap region iterators. Iterates over the bitmap between
* [@start, @end) in @chunk. @rs and @re should be integer variables
* and will be set to start and end index of the current free region.
*/
#define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \
for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
#define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \
for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
(rs) < (re); \
(rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
/* /*
* The following are helper functions to help access bitmaps and convert * The following are helper functions to help access bitmaps and convert
* between bitmap offsets to address offsets. * between bitmap offsets to address offsets.
@ -732,9 +705,8 @@ static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
} }
bits = 0; bits = 0;
pcpu_for_each_md_free_region(chunk, bit_off, bits) { pcpu_for_each_md_free_region(chunk, bit_off, bits)
pcpu_block_update(chunk_md, bit_off, bit_off + bits); pcpu_block_update(chunk_md, bit_off, bit_off + bits);
}
} }
/** /**
@ -749,7 +721,7 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
{ {
struct pcpu_block_md *block = chunk->md_blocks + index; struct pcpu_block_md *block = chunk->md_blocks + index;
unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index); unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
int rs, re, start; /* region start, region end */ unsigned int rs, re, start; /* region start, region end */
/* promote scan_hint to contig_hint */ /* promote scan_hint to contig_hint */
if (block->scan_hint) { if (block->scan_hint) {
@ -765,10 +737,9 @@ static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
block->right_free = 0; block->right_free = 0;
/* iterate over free areas and update the contig hints */ /* iterate over free areas and update the contig hints */
pcpu_for_each_unpop_region(alloc_map, rs, re, start, bitmap_for_each_clear_region(alloc_map, rs, re, start,
PCPU_BITMAP_BLOCK_BITS) { PCPU_BITMAP_BLOCK_BITS)
pcpu_block_update(block, rs, re); pcpu_block_update(block, rs, re);
}
} }
/** /**
@ -1041,13 +1012,13 @@ static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits, static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
int *next_off) int *next_off)
{ {
int page_start, page_end, rs, re; unsigned int page_start, page_end, rs, re;
page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE); page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE); page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
rs = page_start; rs = page_start;
pcpu_next_unpop(chunk->populated, &rs, &re, page_end); bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
if (rs >= page_end) if (rs >= page_end)
return true; return true;
@ -1702,13 +1673,13 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
/* populate if not all pages are already there */ /* populate if not all pages are already there */
if (!is_atomic) { if (!is_atomic) {
int page_start, page_end, rs, re; unsigned int page_start, page_end, rs, re;
page_start = PFN_DOWN(off); page_start = PFN_DOWN(off);
page_end = PFN_UP(off + size); page_end = PFN_UP(off + size);
pcpu_for_each_unpop_region(chunk->populated, rs, re, bitmap_for_each_clear_region(chunk->populated, rs, re,
page_start, page_end) { page_start, page_end) {
WARN_ON(chunk->immutable); WARN_ON(chunk->immutable);
ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp); ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
@ -1858,10 +1829,10 @@ static void pcpu_balance_workfn(struct work_struct *work)
spin_unlock_irq(&pcpu_lock); spin_unlock_irq(&pcpu_lock);
list_for_each_entry_safe(chunk, next, &to_free, list) { list_for_each_entry_safe(chunk, next, &to_free, list) {
int rs, re; unsigned int rs, re;
pcpu_for_each_pop_region(chunk->populated, rs, re, 0, bitmap_for_each_set_region(chunk->populated, rs, re, 0,
chunk->nr_pages) { chunk->nr_pages) {
pcpu_depopulate_chunk(chunk, rs, re); pcpu_depopulate_chunk(chunk, rs, re);
spin_lock_irq(&pcpu_lock); spin_lock_irq(&pcpu_lock);
pcpu_chunk_depopulated(chunk, rs, re); pcpu_chunk_depopulated(chunk, rs, re);
@ -1893,7 +1864,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
} }
for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) { for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
int nr_unpop = 0, rs, re; unsigned int nr_unpop = 0, rs, re;
if (!nr_to_pop) if (!nr_to_pop)
break; break;
@ -1910,9 +1881,9 @@ static void pcpu_balance_workfn(struct work_struct *work)
continue; continue;
/* @chunk can't go away while pcpu_alloc_mutex is held */ /* @chunk can't go away while pcpu_alloc_mutex is held */
pcpu_for_each_unpop_region(chunk->populated, rs, re, 0, bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
chunk->nr_pages) { chunk->nr_pages) {
int nr = min(re - rs, nr_to_pop); int nr = min_t(int, re - rs, nr_to_pop);
ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp); ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
if (!ret) { if (!ret) {