UBIFS: add bulk-read facility
Some flash media are capable of reading sequentially at faster rates. UBIFS bulk-read facility is designed to take advantage of that, by reading in one go consecutive data nodes that are also located consecutively in the same LEB. Read speed on Arm platform with OneNAND goes from 17 MiB/s to 19 MiB/s. Signed-off-by: Adrian Hunter <ext-adrian.hunter@nokia.com>
This commit is contained in:
parent
a70948b564
commit
4793e7c5e1
|
@ -86,6 +86,9 @@ norm_unmount (*) commit on unmount; the journal is committed
|
|||
fast_unmount do not commit on unmount; this option makes
|
||||
unmount faster, but the next mount slower
|
||||
because of the need to replay the journal.
|
||||
bulk_read read more in one go to take advantage of flash
|
||||
media that read faster sequentially
|
||||
no_bulk_read (*) do not bulk-read
|
||||
|
||||
|
||||
Quick usage instructions
|
||||
|
|
248
fs/ubifs/file.c
248
fs/ubifs/file.c
|
@ -577,8 +577,256 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
|
|||
return copied;
|
||||
}
|
||||
|
||||
/**
|
||||
* populate_page - copy data nodes into a page for bulk-read.
|
||||
* @c: UBIFS file-system description object
|
||||
* @page: page
|
||||
* @bu: bulk-read information
|
||||
* @n: next zbranch slot
|
||||
*
|
||||
* This function returns %0 on success and a negative error code on failure.
|
||||
*/
|
||||
static int populate_page(struct ubifs_info *c, struct page *page,
|
||||
struct bu_info *bu, int *n)
|
||||
{
|
||||
int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 1, read = 0;
|
||||
struct inode *inode = page->mapping->host;
|
||||
loff_t i_size = i_size_read(inode);
|
||||
unsigned int page_block;
|
||||
void *addr, *zaddr;
|
||||
pgoff_t end_index;
|
||||
|
||||
dbg_gen("ino %lu, pg %lu, i_size %lld, flags %#lx",
|
||||
inode->i_ino, page->index, i_size, page->flags);
|
||||
|
||||
addr = zaddr = kmap(page);
|
||||
|
||||
end_index = (i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||
if (!i_size || page->index > end_index) {
|
||||
memset(addr, 0, PAGE_CACHE_SIZE);
|
||||
goto out_hole;
|
||||
}
|
||||
|
||||
page_block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
|
||||
while (1) {
|
||||
int err, len, out_len, dlen;
|
||||
|
||||
if (nn >= bu->cnt ||
|
||||
key_block(c, &bu->zbranch[nn].key) != page_block)
|
||||
memset(addr, 0, UBIFS_BLOCK_SIZE);
|
||||
else {
|
||||
struct ubifs_data_node *dn;
|
||||
|
||||
dn = bu->buf + (bu->zbranch[nn].offs - offs);
|
||||
|
||||
ubifs_assert(dn->ch.sqnum >
|
||||
ubifs_inode(inode)->creat_sqnum);
|
||||
|
||||
len = le32_to_cpu(dn->size);
|
||||
if (len <= 0 || len > UBIFS_BLOCK_SIZE)
|
||||
goto out_err;
|
||||
|
||||
dlen = le32_to_cpu(dn->ch.len) - UBIFS_DATA_NODE_SZ;
|
||||
out_len = UBIFS_BLOCK_SIZE;
|
||||
err = ubifs_decompress(&dn->data, dlen, addr, &out_len,
|
||||
le16_to_cpu(dn->compr_type));
|
||||
if (err || len != out_len)
|
||||
goto out_err;
|
||||
|
||||
if (len < UBIFS_BLOCK_SIZE)
|
||||
memset(addr + len, 0, UBIFS_BLOCK_SIZE - len);
|
||||
|
||||
nn += 1;
|
||||
hole = 0;
|
||||
read = (i << UBIFS_BLOCK_SHIFT) + len;
|
||||
}
|
||||
if (++i >= UBIFS_BLOCKS_PER_PAGE)
|
||||
break;
|
||||
addr += UBIFS_BLOCK_SIZE;
|
||||
page_block += 1;
|
||||
}
|
||||
|
||||
if (end_index == page->index) {
|
||||
int len = i_size & (PAGE_CACHE_SIZE - 1);
|
||||
|
||||
if (len < read)
|
||||
memset(zaddr + len, 0, read - len);
|
||||
}
|
||||
|
||||
out_hole:
|
||||
if (hole) {
|
||||
SetPageChecked(page);
|
||||
dbg_gen("hole");
|
||||
}
|
||||
|
||||
SetPageUptodate(page);
|
||||
ClearPageError(page);
|
||||
flush_dcache_page(page);
|
||||
kunmap(page);
|
||||
*n = nn;
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
ClearPageUptodate(page);
|
||||
SetPageError(page);
|
||||
flush_dcache_page(page);
|
||||
kunmap(page);
|
||||
ubifs_err("bad data node (block %u, inode %lu)",
|
||||
page_block, inode->i_ino);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubifs_do_bulk_read - do bulk-read.
|
||||
* @c: UBIFS file-system description object
|
||||
* @page1: first page
|
||||
*
|
||||
* This function returns %1 if the bulk-read is done, otherwise %0 is returned.
|
||||
*/
|
||||
static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1)
|
||||
{
|
||||
pgoff_t offset = page1->index, end_index;
|
||||
struct address_space *mapping = page1->mapping;
|
||||
struct inode *inode = mapping->host;
|
||||
struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
struct bu_info *bu;
|
||||
int err, page_idx, page_cnt, ret = 0, n = 0;
|
||||
loff_t isize;
|
||||
|
||||
bu = kmalloc(sizeof(struct bu_info), GFP_NOFS);
|
||||
if (!bu)
|
||||
return 0;
|
||||
|
||||
bu->buf_len = c->bulk_read_buf_size;
|
||||
bu->buf = kmalloc(bu->buf_len, GFP_NOFS);
|
||||
if (!bu->buf)
|
||||
goto out_free;
|
||||
|
||||
data_key_init(c, &bu->key, inode->i_ino,
|
||||
offset << UBIFS_BLOCKS_PER_PAGE_SHIFT);
|
||||
|
||||
err = ubifs_tnc_get_bu_keys(c, bu);
|
||||
if (err)
|
||||
goto out_warn;
|
||||
|
||||
if (bu->eof) {
|
||||
/* Turn off bulk-read at the end of the file */
|
||||
ui->read_in_a_row = 1;
|
||||
ui->bulk_read = 0;
|
||||
}
|
||||
|
||||
page_cnt = bu->blk_cnt >> UBIFS_BLOCKS_PER_PAGE_SHIFT;
|
||||
if (!page_cnt) {
|
||||
/*
|
||||
* This happens when there are multiple blocks per page and the
|
||||
* blocks for the first page we are looking for, are not
|
||||
* together. If all the pages were like this, bulk-read would
|
||||
* reduce performance, so we turn it off for a while.
|
||||
*/
|
||||
ui->read_in_a_row = 0;
|
||||
ui->bulk_read = 0;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (bu->cnt) {
|
||||
err = ubifs_tnc_bulk_read(c, bu);
|
||||
if (err)
|
||||
goto out_warn;
|
||||
}
|
||||
|
||||
err = populate_page(c, page1, bu, &n);
|
||||
if (err)
|
||||
goto out_warn;
|
||||
|
||||
unlock_page(page1);
|
||||
ret = 1;
|
||||
|
||||
isize = i_size_read(inode);
|
||||
if (isize == 0)
|
||||
goto out_free;
|
||||
end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
|
||||
|
||||
for (page_idx = 1; page_idx < page_cnt; page_idx++) {
|
||||
pgoff_t page_offset = offset + page_idx;
|
||||
struct page *page;
|
||||
|
||||
if (page_offset > end_index)
|
||||
break;
|
||||
page = find_or_create_page(mapping, page_offset,
|
||||
GFP_NOFS | __GFP_COLD);
|
||||
if (!page)
|
||||
break;
|
||||
if (!PageUptodate(page))
|
||||
err = populate_page(c, page, bu, &n);
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
ui->last_page_read = offset + page_idx - 1;
|
||||
|
||||
out_free:
|
||||
kfree(bu->buf);
|
||||
kfree(bu);
|
||||
return ret;
|
||||
|
||||
out_warn:
|
||||
ubifs_warn("ignoring error %d and skipping bulk-read", err);
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubifs_bulk_read - determine whether to bulk-read and, if so, do it.
|
||||
* @page: page from which to start bulk-read.
|
||||
*
|
||||
* Some flash media are capable of reading sequentially at faster rates. UBIFS
|
||||
* bulk-read facility is designed to take advantage of that, by reading in one
|
||||
* go consecutive data nodes that are also located consecutively in the same
|
||||
* LEB. This function returns %1 if a bulk-read is done and %0 otherwise.
|
||||
*/
|
||||
static int ubifs_bulk_read(struct page *page)
|
||||
{
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct ubifs_info *c = inode->i_sb->s_fs_info;
|
||||
struct ubifs_inode *ui = ubifs_inode(inode);
|
||||
pgoff_t index = page->index, last_page_read = ui->last_page_read;
|
||||
int ret = 0;
|
||||
|
||||
ui->last_page_read = index;
|
||||
|
||||
if (!c->bulk_read)
|
||||
return 0;
|
||||
/*
|
||||
* Bulk-read is protected by ui_mutex, but it is an optimization, so
|
||||
* don't bother if we cannot lock the mutex.
|
||||
*/
|
||||
if (!mutex_trylock(&ui->ui_mutex))
|
||||
return 0;
|
||||
if (index != last_page_read + 1) {
|
||||
/* Turn off bulk-read if we stop reading sequentially */
|
||||
ui->read_in_a_row = 1;
|
||||
if (ui->bulk_read)
|
||||
ui->bulk_read = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (!ui->bulk_read) {
|
||||
ui->read_in_a_row += 1;
|
||||
if (ui->read_in_a_row < 3)
|
||||
goto out_unlock;
|
||||
/* Three reads in a row, so switch on bulk-read */
|
||||
ui->bulk_read = 1;
|
||||
}
|
||||
ret = ubifs_do_bulk_read(c, page);
|
||||
out_unlock:
|
||||
mutex_unlock(&ui->ui_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ubifs_readpage(struct file *file, struct page *page)
|
||||
{
|
||||
if (ubifs_bulk_read(page))
|
||||
return 0;
|
||||
do_readpage(page);
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
|
|
|
@ -484,7 +484,7 @@ static inline void key_copy(const struct ubifs_info *c,
|
|||
* @key2: the second key to compare
|
||||
*
|
||||
* This function compares 2 keys and returns %-1 if @key1 is less than
|
||||
* @key2, 0 if the keys are equivalent and %1 if @key1 is greater than @key2.
|
||||
* @key2, %0 if the keys are equivalent and %1 if @key1 is greater than @key2.
|
||||
*/
|
||||
static inline int keys_cmp(const struct ubifs_info *c,
|
||||
const union ubifs_key *key1,
|
||||
|
@ -502,6 +502,26 @@ static inline int keys_cmp(const struct ubifs_info *c,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* keys_eq - determine if keys are equivalent.
|
||||
* @c: UBIFS file-system description object
|
||||
* @key1: the first key to compare
|
||||
* @key2: the second key to compare
|
||||
*
|
||||
* This function compares 2 keys and returns %1 if @key1 is equal to @key2 and
|
||||
* %0 if not.
|
||||
*/
|
||||
static inline int keys_eq(const struct ubifs_info *c,
|
||||
const union ubifs_key *key1,
|
||||
const union ubifs_key *key2)
|
||||
{
|
||||
if (key1->u32[0] != key2->u32[0])
|
||||
return 0;
|
||||
if (key1->u32[1] != key2->u32[1])
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* is_hash_key - is a key vulnerable to hash collisions.
|
||||
* @c: UBIFS file-system description object
|
||||
|
|
|
@ -401,6 +401,11 @@ static int ubifs_show_options(struct seq_file *s, struct vfsmount *mnt)
|
|||
else if (c->mount_opts.unmount_mode == 1)
|
||||
seq_printf(s, ",norm_unmount");
|
||||
|
||||
if (c->mount_opts.bulk_read == 2)
|
||||
seq_printf(s, ",bulk_read");
|
||||
else if (c->mount_opts.bulk_read == 1)
|
||||
seq_printf(s, ",no_bulk_read");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -538,6 +543,18 @@ static int init_constants_early(struct ubifs_info *c)
|
|||
* calculations when reporting free space.
|
||||
*/
|
||||
c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ;
|
||||
/* Buffer size for bulk-reads */
|
||||
c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
|
||||
if (c->bulk_read_buf_size > c->leb_size)
|
||||
c->bulk_read_buf_size = c->leb_size;
|
||||
if (c->bulk_read_buf_size > 128 * 1024) {
|
||||
/* Check if we can kmalloc more than 128KiB */
|
||||
void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL);
|
||||
|
||||
kfree(try);
|
||||
if (!try)
|
||||
c->bulk_read_buf_size = 128 * 1024;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -840,17 +857,23 @@ static int check_volume_empty(struct ubifs_info *c)
|
|||
*
|
||||
* Opt_fast_unmount: do not run a journal commit before un-mounting
|
||||
* Opt_norm_unmount: run a journal commit before un-mounting
|
||||
* Opt_bulk_read: enable bulk-reads
|
||||
* Opt_no_bulk_read: disable bulk-reads
|
||||
* Opt_err: just end of array marker
|
||||
*/
|
||||
enum {
|
||||
Opt_fast_unmount,
|
||||
Opt_norm_unmount,
|
||||
Opt_bulk_read,
|
||||
Opt_no_bulk_read,
|
||||
Opt_err,
|
||||
};
|
||||
|
||||
static match_table_t tokens = {
|
||||
{Opt_fast_unmount, "fast_unmount"},
|
||||
{Opt_norm_unmount, "norm_unmount"},
|
||||
{Opt_bulk_read, "bulk_read"},
|
||||
{Opt_no_bulk_read, "no_bulk_read"},
|
||||
{Opt_err, NULL},
|
||||
};
|
||||
|
||||
|
@ -888,6 +911,14 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
|
|||
c->mount_opts.unmount_mode = 1;
|
||||
c->fast_unmount = 0;
|
||||
break;
|
||||
case Opt_bulk_read:
|
||||
c->mount_opts.bulk_read = 2;
|
||||
c->bulk_read = 1;
|
||||
break;
|
||||
case Opt_no_bulk_read:
|
||||
c->mount_opts.bulk_read = 1;
|
||||
c->bulk_read = 0;
|
||||
break;
|
||||
default:
|
||||
ubifs_err("unrecognized mount option \"%s\" "
|
||||
"or missing value", p);
|
||||
|
|
283
fs/ubifs/tnc.c
283
fs/ubifs/tnc.c
|
@ -1491,6 +1491,289 @@ int ubifs_tnc_locate(struct ubifs_info *c, const union ubifs_key *key,
|
|||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubifs_tnc_get_bu_keys - lookup keys for bulk-read.
|
||||
* @c: UBIFS file-system description object
|
||||
* @bu: bulk-read parameters and results
|
||||
*
|
||||
* Lookup consecutive data node keys for the same inode that reside
|
||||
* consecutively in the same LEB.
|
||||
*/
|
||||
int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
|
||||
{
|
||||
int n, err = 0, lnum = -1, uninitialized_var(offs);
|
||||
int uninitialized_var(len);
|
||||
unsigned int block = key_block(c, &bu->key);
|
||||
struct ubifs_znode *znode;
|
||||
|
||||
bu->cnt = 0;
|
||||
bu->blk_cnt = 0;
|
||||
bu->eof = 0;
|
||||
|
||||
mutex_lock(&c->tnc_mutex);
|
||||
/* Find first key */
|
||||
err = ubifs_lookup_level0(c, &bu->key, &znode, &n);
|
||||
if (err < 0)
|
||||
goto out;
|
||||
if (err) {
|
||||
/* Key found */
|
||||
len = znode->zbranch[n].len;
|
||||
/* The buffer must be big enough for at least 1 node */
|
||||
if (len > bu->buf_len) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
/* Add this key */
|
||||
bu->zbranch[bu->cnt++] = znode->zbranch[n];
|
||||
bu->blk_cnt += 1;
|
||||
lnum = znode->zbranch[n].lnum;
|
||||
offs = ALIGN(znode->zbranch[n].offs + len, 8);
|
||||
}
|
||||
while (1) {
|
||||
struct ubifs_zbranch *zbr;
|
||||
union ubifs_key *key;
|
||||
unsigned int next_block;
|
||||
|
||||
/* Find next key */
|
||||
err = tnc_next(c, &znode, &n);
|
||||
if (err)
|
||||
goto out;
|
||||
zbr = &znode->zbranch[n];
|
||||
key = &zbr->key;
|
||||
/* See if there is another data key for this file */
|
||||
if (key_inum(c, key) != key_inum(c, &bu->key) ||
|
||||
key_type(c, key) != UBIFS_DATA_KEY) {
|
||||
err = -ENOENT;
|
||||
goto out;
|
||||
}
|
||||
if (lnum < 0) {
|
||||
/* First key found */
|
||||
lnum = zbr->lnum;
|
||||
offs = ALIGN(zbr->offs + zbr->len, 8);
|
||||
len = zbr->len;
|
||||
if (len > bu->buf_len) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* The data nodes must be in consecutive positions in
|
||||
* the same LEB.
|
||||
*/
|
||||
if (zbr->lnum != lnum || zbr->offs != offs)
|
||||
goto out;
|
||||
offs += ALIGN(zbr->len, 8);
|
||||
len = ALIGN(len, 8) + zbr->len;
|
||||
/* Must not exceed buffer length */
|
||||
if (len > bu->buf_len)
|
||||
goto out;
|
||||
}
|
||||
/* Allow for holes */
|
||||
next_block = key_block(c, key);
|
||||
bu->blk_cnt += (next_block - block - 1);
|
||||
if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
|
||||
goto out;
|
||||
block = next_block;
|
||||
/* Add this key */
|
||||
bu->zbranch[bu->cnt++] = *zbr;
|
||||
bu->blk_cnt += 1;
|
||||
/* See if we have room for more */
|
||||
if (bu->cnt >= UBIFS_MAX_BULK_READ)
|
||||
goto out;
|
||||
if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
if (err == -ENOENT) {
|
||||
bu->eof = 1;
|
||||
err = 0;
|
||||
}
|
||||
bu->gc_seq = c->gc_seq;
|
||||
mutex_unlock(&c->tnc_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
/*
|
||||
* An enormous hole could cause bulk-read to encompass too many
|
||||
* page cache pages, so limit the number here.
|
||||
*/
|
||||
if (bu->blk_cnt >= UBIFS_MAX_BULK_READ)
|
||||
bu->blk_cnt = UBIFS_MAX_BULK_READ;
|
||||
/*
|
||||
* Ensure that bulk-read covers a whole number of page cache
|
||||
* pages.
|
||||
*/
|
||||
if (UBIFS_BLOCKS_PER_PAGE == 1 ||
|
||||
!(bu->blk_cnt & (UBIFS_BLOCKS_PER_PAGE - 1)))
|
||||
return 0;
|
||||
if (bu->eof) {
|
||||
/* At the end of file we can round up */
|
||||
bu->blk_cnt += UBIFS_BLOCKS_PER_PAGE - 1;
|
||||
return 0;
|
||||
}
|
||||
/* Exclude data nodes that do not make up a whole page cache page */
|
||||
block = key_block(c, &bu->key) + bu->blk_cnt;
|
||||
block &= ~(UBIFS_BLOCKS_PER_PAGE - 1);
|
||||
while (bu->cnt) {
|
||||
if (key_block(c, &bu->zbranch[bu->cnt - 1].key) < block)
|
||||
break;
|
||||
bu->cnt -= 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* read_wbuf - bulk-read from a LEB with a wbuf.
|
||||
* @wbuf: wbuf that may overlap the read
|
||||
* @buf: buffer into which to read
|
||||
* @len: read length
|
||||
* @lnum: LEB number from which to read
|
||||
* @offs: offset from which to read
|
||||
*
|
||||
* This functions returns %0 on success or a negative error code on failure.
|
||||
*/
|
||||
static int read_wbuf(struct ubifs_wbuf *wbuf, void *buf, int len, int lnum,
|
||||
int offs)
|
||||
{
|
||||
const struct ubifs_info *c = wbuf->c;
|
||||
int rlen, overlap;
|
||||
|
||||
dbg_io("LEB %d:%d, length %d", lnum, offs, len);
|
||||
ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
|
||||
ubifs_assert(!(offs & 7) && offs < c->leb_size);
|
||||
ubifs_assert(offs + len <= c->leb_size);
|
||||
|
||||
spin_lock(&wbuf->lock);
|
||||
overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs);
|
||||
if (!overlap) {
|
||||
/* We may safely unlock the write-buffer and read the data */
|
||||
spin_unlock(&wbuf->lock);
|
||||
return ubi_read(c->ubi, lnum, buf, offs, len);
|
||||
}
|
||||
|
||||
/* Don't read under wbuf */
|
||||
rlen = wbuf->offs - offs;
|
||||
if (rlen < 0)
|
||||
rlen = 0;
|
||||
|
||||
/* Copy the rest from the write-buffer */
|
||||
memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen);
|
||||
spin_unlock(&wbuf->lock);
|
||||
|
||||
if (rlen > 0)
|
||||
/* Read everything that goes before write-buffer */
|
||||
return ubi_read(c->ubi, lnum, buf, offs, rlen);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* validate_data_node - validate data nodes for bulk-read.
|
||||
* @c: UBIFS file-system description object
|
||||
* @buf: buffer containing data node to validate
|
||||
* @zbr: zbranch of data node to validate
|
||||
*
|
||||
* This functions returns %0 on success or a negative error code on failure.
|
||||
*/
|
||||
static int validate_data_node(struct ubifs_info *c, void *buf,
|
||||
struct ubifs_zbranch *zbr)
|
||||
{
|
||||
union ubifs_key key1;
|
||||
struct ubifs_ch *ch = buf;
|
||||
int err, len;
|
||||
|
||||
if (ch->node_type != UBIFS_DATA_NODE) {
|
||||
ubifs_err("bad node type (%d but expected %d)",
|
||||
ch->node_type, UBIFS_DATA_NODE);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = ubifs_check_node(c, buf, zbr->lnum, zbr->offs, 0);
|
||||
if (err) {
|
||||
ubifs_err("expected node type %d", UBIFS_DATA_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
len = le32_to_cpu(ch->len);
|
||||
if (len != zbr->len) {
|
||||
ubifs_err("bad node length %d, expected %d", len, zbr->len);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Make sure the key of the read node is correct */
|
||||
key_read(c, buf + UBIFS_KEY_OFFSET, &key1);
|
||||
if (!keys_eq(c, &zbr->key, &key1)) {
|
||||
ubifs_err("bad key in node at LEB %d:%d",
|
||||
zbr->lnum, zbr->offs);
|
||||
dbg_tnc("looked for key %s found node's key %s",
|
||||
DBGKEY(&zbr->key), DBGKEY1(&key1));
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
err = -EINVAL;
|
||||
out:
|
||||
ubifs_err("bad node at LEB %d:%d", zbr->lnum, zbr->offs);
|
||||
dbg_dump_node(c, buf);
|
||||
dbg_dump_stack();
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubifs_tnc_bulk_read - read a number of data nodes in one go.
|
||||
* @c: UBIFS file-system description object
|
||||
* @bu: bulk-read parameters and results
|
||||
*
|
||||
* This functions reads and validates the data nodes that were identified by the
|
||||
* 'ubifs_tnc_get_bu_keys()' function. This functions returns %0 on success,
|
||||
* -EAGAIN to indicate a race with GC, or another negative error code on
|
||||
* failure.
|
||||
*/
|
||||
int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu)
|
||||
{
|
||||
int lnum = bu->zbranch[0].lnum, offs = bu->zbranch[0].offs, len, err, i;
|
||||
struct ubifs_wbuf *wbuf;
|
||||
void *buf;
|
||||
|
||||
len = bu->zbranch[bu->cnt - 1].offs;
|
||||
len += bu->zbranch[bu->cnt - 1].len - offs;
|
||||
if (len > bu->buf_len) {
|
||||
ubifs_err("buffer too small %d vs %d", bu->buf_len, len);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Do the read */
|
||||
wbuf = ubifs_get_wbuf(c, lnum);
|
||||
if (wbuf)
|
||||
err = read_wbuf(wbuf, bu->buf, len, lnum, offs);
|
||||
else
|
||||
err = ubi_read(c->ubi, lnum, bu->buf, offs, len);
|
||||
|
||||
/* Check for a race with GC */
|
||||
if (maybe_leb_gced(c, lnum, bu->gc_seq))
|
||||
return -EAGAIN;
|
||||
|
||||
if (err && err != -EBADMSG) {
|
||||
ubifs_err("failed to read from LEB %d:%d, error %d",
|
||||
lnum, offs, err);
|
||||
dbg_dump_stack();
|
||||
dbg_tnc("key %s", DBGKEY(&bu->key));
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Validate the nodes read */
|
||||
buf = bu->buf;
|
||||
for (i = 0; i < bu->cnt; i++) {
|
||||
err = validate_data_node(c, buf, &bu->zbranch[i]);
|
||||
if (err)
|
||||
return err;
|
||||
buf = buf + ALIGN(bu->zbranch[i].len, 8);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_lookup_nm- look up a "hashed" node.
|
||||
* @c: UBIFS file-system description object
|
||||
|
|
|
@ -142,6 +142,9 @@
|
|||
/* Maximum expected tree height for use by bottom_up_buf */
|
||||
#define BOTTOM_UP_HEIGHT 64
|
||||
|
||||
/* Maximum number of data nodes to bulk-read */
|
||||
#define UBIFS_MAX_BULK_READ 32
|
||||
|
||||
/*
|
||||
* Lockdep classes for UBIFS inode @ui_mutex.
|
||||
*/
|
||||
|
@ -329,8 +332,8 @@ struct ubifs_gced_idx_leb {
|
|||
* @dirty: non-zero if the inode is dirty
|
||||
* @xattr: non-zero if this is an extended attribute inode
|
||||
* @ui_mutex: serializes inode write-back with the rest of VFS operations,
|
||||
* serializes "clean <-> dirty" state changes, protects @dirty,
|
||||
* @ui_size, and @xattr_size
|
||||
* serializes "clean <-> dirty" state changes, serializes bulk-read,
|
||||
* protects @dirty, @ui_size, and @xattr_size
|
||||
* @ui_lock: protects @synced_i_size
|
||||
* @synced_i_size: synchronized size of inode, i.e. the value of inode size
|
||||
* currently stored on the flash; used only for regular file
|
||||
|
@ -338,6 +341,9 @@ struct ubifs_gced_idx_leb {
|
|||
* @ui_size: inode size used by UBIFS when writing to flash
|
||||
* @flags: inode flags (@UBIFS_COMPR_FL, etc)
|
||||
* @compr_type: default compression type used for this inode
|
||||
* @last_page_read: page number of last page read (for bulk read)
|
||||
* @read_in_a_row: number of consecutive pages read in a row (for bulk read)
|
||||
* @bulk_read: indicates whether bulk-read should be used
|
||||
* @data_len: length of the data attached to the inode
|
||||
* @data: inode's data
|
||||
*
|
||||
|
@ -385,6 +391,9 @@ struct ubifs_inode {
|
|||
loff_t ui_size;
|
||||
int flags;
|
||||
int compr_type;
|
||||
pgoff_t last_page_read;
|
||||
pgoff_t read_in_a_row;
|
||||
int bulk_read;
|
||||
int data_len;
|
||||
void *data;
|
||||
};
|
||||
|
@ -743,6 +752,28 @@ struct ubifs_znode {
|
|||
struct ubifs_zbranch zbranch[];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct bu_info - bulk-read information
|
||||
* @key: first data node key
|
||||
* @zbranch: zbranches of data nodes to bulk read
|
||||
* @buf: buffer to read into
|
||||
* @buf_len: buffer length
|
||||
* @gc_seq: GC sequence number to detect races with GC
|
||||
* @cnt: number of data nodes for bulk read
|
||||
* @blk_cnt: number of data blocks including holes
|
||||
* @oef: end of file reached
|
||||
*/
|
||||
struct bu_info {
|
||||
union ubifs_key key;
|
||||
struct ubifs_zbranch zbranch[UBIFS_MAX_BULK_READ];
|
||||
void *buf;
|
||||
int buf_len;
|
||||
int gc_seq;
|
||||
int cnt;
|
||||
int blk_cnt;
|
||||
int eof;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ubifs_node_range - node length range description data structure.
|
||||
* @len: fixed node length
|
||||
|
@ -862,9 +893,11 @@ struct ubifs_orphan {
|
|||
/**
|
||||
* struct ubifs_mount_opts - UBIFS-specific mount options information.
|
||||
* @unmount_mode: selected unmount mode (%0 default, %1 normal, %2 fast)
|
||||
* @bulk_read: enable bulk-reads
|
||||
*/
|
||||
struct ubifs_mount_opts {
|
||||
unsigned int unmount_mode:2;
|
||||
unsigned int bulk_read:2;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -965,6 +998,9 @@ struct ubifs_mount_opts {
|
|||
* @old_leb_cnt: count of logical eraseblocks before re-size
|
||||
* @ro_media: the underlying UBI volume is read-only
|
||||
*
|
||||
* @bulk_read: enable bulk-reads
|
||||
* @bulk_read_buf_size: buffer size for bulk-reads
|
||||
*
|
||||
* @dirty_pg_cnt: number of dirty pages (not used)
|
||||
* @dirty_zn_cnt: number of dirty znodes
|
||||
* @clean_zn_cnt: number of clean znodes
|
||||
|
@ -1205,6 +1241,9 @@ struct ubifs_info {
|
|||
int old_leb_cnt;
|
||||
int ro_media;
|
||||
|
||||
int bulk_read;
|
||||
int bulk_read_buf_size;
|
||||
|
||||
atomic_long_t dirty_pg_cnt;
|
||||
atomic_long_t dirty_zn_cnt;
|
||||
atomic_long_t clean_zn_cnt;
|
||||
|
@ -1490,6 +1529,8 @@ void destroy_old_idx(struct ubifs_info *c);
|
|||
int is_idx_node_in_tnc(struct ubifs_info *c, union ubifs_key *key, int level,
|
||||
int lnum, int offs);
|
||||
int insert_old_idx_znode(struct ubifs_info *c, struct ubifs_znode *znode);
|
||||
int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu);
|
||||
int ubifs_tnc_bulk_read(struct ubifs_info *c, struct bu_info *bu);
|
||||
|
||||
/* tnc_misc.c */
|
||||
struct ubifs_znode *ubifs_tnc_levelorder_next(struct ubifs_znode *zr,
|
||||
|
|
Loading…
Reference in New Issue
Block a user