lightnvm: pblk: move ring buffer alloc/free rb init

pblk's read/write buffer currently takes a buffer and its size and uses
it to create the metadata around it to use it as a ring buffer. This
puts the responsibility of allocating/freeing ring buffer memory on the
ring buffer user. Instead, move it inside of the ring buffer helpers
(pblk-rb.c). This simplifies creation/destruction routines.

Signed-off-by: Javier González <javier@cnexlabs.com>
Signed-off-by: Matias Bjørling <mb@lightnvm.io>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Javier González 2018-10-09 13:12:10 +02:00 committed by Jens Axboe
parent 40b8657dcc
commit 9bd1f875c0
3 changed files with 38 additions and 40 deletions

View File

@ -185,17 +185,14 @@ static void pblk_rwb_free(struct pblk *pblk)
if (pblk_rb_tear_down_check(&pblk->rwb)) if (pblk_rb_tear_down_check(&pblk->rwb))
pblk_err(pblk, "write buffer error on tear down\n"); pblk_err(pblk, "write buffer error on tear down\n");
pblk_rb_data_free(&pblk->rwb); pblk_rb_free(&pblk->rwb);
vfree(pblk_rb_entries_ref(&pblk->rwb));
} }
static int pblk_rwb_init(struct pblk *pblk) static int pblk_rwb_init(struct pblk *pblk)
{ {
struct nvm_tgt_dev *dev = pblk->dev; struct nvm_tgt_dev *dev = pblk->dev;
struct nvm_geo *geo = &dev->geo; struct nvm_geo *geo = &dev->geo;
struct pblk_rb_entry *entries; unsigned long buffer_size;
unsigned long nr_entries, buffer_size;
unsigned int power_size, power_seg_sz;
int pgs_in_buffer; int pgs_in_buffer;
pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns; pgs_in_buffer = max(geo->mw_cunits, geo->ws_opt) * geo->all_luns;
@ -205,16 +202,7 @@ static int pblk_rwb_init(struct pblk *pblk)
else else
buffer_size = pgs_in_buffer; buffer_size = pgs_in_buffer;
nr_entries = pblk_rb_calculate_size(buffer_size); return pblk_rb_init(&pblk->rwb, buffer_size, geo->csecs);
entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
if (!entries)
return -ENOMEM;
power_size = get_count_order(nr_entries);
power_seg_sz = get_count_order(geo->csecs);
return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
} }
/* Minimum pages needed within a lun */ /* Minimum pages needed within a lun */

View File

@ -23,7 +23,7 @@
static DECLARE_RWSEM(pblk_rb_lock); static DECLARE_RWSEM(pblk_rb_lock);
void pblk_rb_data_free(struct pblk_rb *rb) static void pblk_rb_data_free(struct pblk_rb *rb)
{ {
struct pblk_rb_pages *p, *t; struct pblk_rb_pages *p, *t;
@ -36,22 +36,46 @@ void pblk_rb_data_free(struct pblk_rb *rb)
up_write(&pblk_rb_lock); up_write(&pblk_rb_lock);
} }
void pblk_rb_free(struct pblk_rb *rb)
{
pblk_rb_data_free(rb);
vfree(rb->entries);
}
/*
* pblk_rb_calculate_size -- calculate the size of the write buffer
*/
static unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
{
/* Alloc a write buffer that can at least fit 128 entries */
return (1 << max(get_count_order(nr_entries), 7));
}
/* /*
* Initialize ring buffer. The data and metadata buffers must be previously * Initialize ring buffer. The data and metadata buffers must be previously
* allocated and their size must be a power of two * allocated and their size must be a power of two
* (Documentation/core-api/circular-buffers.rst) * (Documentation/core-api/circular-buffers.rst)
*/ */
int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int seg_size)
unsigned int power_size, unsigned int power_seg_sz)
{ {
struct pblk *pblk = container_of(rb, struct pblk, rwb); struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entries;
unsigned int init_entry = 0; unsigned int init_entry = 0;
unsigned int alloc_order = power_size;
unsigned int max_order = MAX_ORDER - 1; unsigned int max_order = MAX_ORDER - 1;
unsigned int order, iter; unsigned int power_size, power_seg_sz;
unsigned int alloc_order, order, iter;
unsigned int nr_entries;
nr_entries = pblk_rb_calculate_size(size);
entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
if (!entries)
return -ENOMEM;
power_size = get_count_order(size);
power_seg_sz = get_count_order(seg_size);
down_write(&pblk_rb_lock); down_write(&pblk_rb_lock);
rb->entries = rb_entry_base; rb->entries = entries;
rb->seg_size = (1 << power_seg_sz); rb->seg_size = (1 << power_seg_sz);
rb->nr_entries = (1 << power_size); rb->nr_entries = (1 << power_size);
rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
@ -62,6 +86,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
INIT_LIST_HEAD(&rb->pages); INIT_LIST_HEAD(&rb->pages);
alloc_order = power_size;
if (alloc_order >= max_order) { if (alloc_order >= max_order) {
order = max_order; order = max_order;
iter = (1 << (alloc_order - max_order)); iter = (1 << (alloc_order - max_order));
@ -80,6 +105,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL); page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
if (!page_set) { if (!page_set) {
up_write(&pblk_rb_lock); up_write(&pblk_rb_lock);
vfree(entries);
return -ENOMEM; return -ENOMEM;
} }
@ -89,6 +115,7 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
kfree(page_set); kfree(page_set);
pblk_rb_data_free(rb); pblk_rb_data_free(rb);
up_write(&pblk_rb_lock); up_write(&pblk_rb_lock);
vfree(entries);
return -ENOMEM; return -ENOMEM;
} }
kaddr = page_address(page_set->pages); kaddr = page_address(page_set->pages);
@ -125,20 +152,6 @@ int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
return 0; return 0;
} }
/*
* pblk_rb_calculate_size -- calculate the size of the write buffer
*/
unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
{
/* Alloc a write buffer that can at least fit 128 entries */
return (1 << max(get_count_order(nr_entries), 7));
}
void *pblk_rb_entries_ref(struct pblk_rb *rb)
{
return rb->entries;
}
static void clean_wctx(struct pblk_w_ctx *w_ctx) static void clean_wctx(struct pblk_w_ctx *w_ctx)
{ {
int flags; int flags;

View File

@ -734,10 +734,7 @@ struct pblk_line_ws {
/* /*
* pblk ring buffer operations * pblk ring buffer operations
*/ */
int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int seg_sz);
unsigned int power_size, unsigned int power_seg_sz);
unsigned int pblk_rb_calculate_size(unsigned int nr_entries);
void *pblk_rb_entries_ref(struct pblk_rb *rb);
int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio, int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
unsigned int nr_entries, unsigned int *pos); unsigned int nr_entries, unsigned int *pos);
int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries, int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
@ -771,7 +768,7 @@ unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos);
int pblk_rb_tear_down_check(struct pblk_rb *rb); int pblk_rb_tear_down_check(struct pblk_rb *rb);
int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos); int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos);
void pblk_rb_data_free(struct pblk_rb *rb); void pblk_rb_free(struct pblk_rb *rb);
ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf); ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf);
/* /*