forked from luck/tmp_suning_uos_patched
ext4: convert to mbcache2
The conversion is generally straightforward. The only tricky part is that xattr block corresponding to found mbcache entry can get freed before we get buffer lock for that block. So we have to check whether the entry is still valid after getting buffer lock. Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
This commit is contained in:
parent
f9a61eb4e2
commit
82939d7999
@ -1468,7 +1468,7 @@ struct ext4_sb_info {
|
||||
struct list_head s_es_list; /* List of inodes with reclaimable extents */
|
||||
long s_es_nr_inode;
|
||||
struct ext4_es_stats s_es_stats;
|
||||
struct mb_cache *s_mb_cache;
|
||||
struct mb2_cache *s_mb_cache;
|
||||
spinlock_t s_es_lock ____cacheline_aligned_in_smp;
|
||||
|
||||
/* Ratelimit ext4 messages. */
|
||||
|
@ -844,7 +844,6 @@ static void ext4_put_super(struct super_block *sb)
|
||||
ext4_release_system_zone(sb);
|
||||
ext4_mb_release(sb);
|
||||
ext4_ext_release(sb);
|
||||
ext4_xattr_put_super(sb);
|
||||
|
||||
if (!(sb->s_flags & MS_RDONLY)) {
|
||||
ext4_clear_feature_journal_needs_recovery(sb);
|
||||
@ -3797,7 +3796,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
no_journal:
|
||||
if (ext4_mballoc_ready) {
|
||||
sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
|
||||
sbi->s_mb_cache = ext4_xattr_create_cache();
|
||||
if (!sbi->s_mb_cache) {
|
||||
ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
|
||||
goto failed_mount_wq;
|
||||
@ -4027,6 +4026,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
if (EXT4_SB(sb)->rsv_conversion_wq)
|
||||
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
|
||||
failed_mount_wq:
|
||||
if (sbi->s_mb_cache) {
|
||||
ext4_xattr_destroy_cache(sbi->s_mb_cache);
|
||||
sbi->s_mb_cache = NULL;
|
||||
}
|
||||
if (sbi->s_journal) {
|
||||
jbd2_journal_destroy(sbi->s_journal);
|
||||
sbi->s_journal = NULL;
|
||||
|
136
fs/ext4/xattr.c
136
fs/ext4/xattr.c
@ -53,7 +53,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mbcache.h>
|
||||
#include <linux/mbcache2.h>
|
||||
#include <linux/quotaops.h>
|
||||
#include "ext4_jbd2.h"
|
||||
#include "ext4.h"
|
||||
@ -78,10 +78,10 @@
|
||||
# define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
|
||||
#endif
|
||||
|
||||
static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
|
||||
static void ext4_xattr_cache_insert(struct mb2_cache *, struct buffer_head *);
|
||||
static struct buffer_head *ext4_xattr_cache_find(struct inode *,
|
||||
struct ext4_xattr_header *,
|
||||
struct mb_cache_entry **);
|
||||
struct mb2_cache_entry **);
|
||||
static void ext4_xattr_rehash(struct ext4_xattr_header *,
|
||||
struct ext4_xattr_entry *);
|
||||
static int ext4_xattr_list(struct dentry *dentry, char *buffer,
|
||||
@ -276,7 +276,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
||||
struct ext4_xattr_entry *entry;
|
||||
size_t size;
|
||||
int error;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
|
||||
name_index, name, buffer, (long)buffer_size);
|
||||
@ -428,7 +428,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
|
||||
struct inode *inode = d_inode(dentry);
|
||||
struct buffer_head *bh = NULL;
|
||||
int error;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
|
||||
buffer, (long)buffer_size);
|
||||
@ -545,11 +545,8 @@ static void
|
||||
ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
||||
struct buffer_head *bh)
|
||||
{
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
int error = 0;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
|
||||
BUFFER_TRACE(bh, "get_write_access");
|
||||
error = ext4_journal_get_write_access(handle, bh);
|
||||
if (error)
|
||||
@ -557,9 +554,15 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
||||
|
||||
lock_buffer(bh);
|
||||
if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
|
||||
__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
|
||||
|
||||
ea_bdebug(bh, "refcount now=0; freeing");
|
||||
if (ce)
|
||||
mb_cache_entry_free(ce);
|
||||
/*
|
||||
* This must happen under buffer lock for
|
||||
* ext4_xattr_block_set() to reliably detect freed block
|
||||
*/
|
||||
mb2_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash,
|
||||
bh->b_blocknr);
|
||||
get_bh(bh);
|
||||
unlock_buffer(bh);
|
||||
ext4_free_blocks(handle, inode, bh, 0, 1,
|
||||
@ -567,8 +570,6 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
||||
EXT4_FREE_BLOCKS_FORGET);
|
||||
} else {
|
||||
le32_add_cpu(&BHDR(bh)->h_refcount, -1);
|
||||
if (ce)
|
||||
mb_cache_entry_release(ce);
|
||||
/*
|
||||
* Beware of this ugliness: Releasing of xattr block references
|
||||
* from different inodes can race and so we have to protect
|
||||
@ -781,17 +782,15 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct buffer_head *new_bh = NULL;
|
||||
struct ext4_xattr_search *s = &bs->s;
|
||||
struct mb_cache_entry *ce = NULL;
|
||||
struct mb2_cache_entry *ce = NULL;
|
||||
int error = 0;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
#define header(x) ((struct ext4_xattr_header *)(x))
|
||||
|
||||
if (i->value && i->value_len > sb->s_blocksize)
|
||||
return -ENOSPC;
|
||||
if (s->base) {
|
||||
ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
|
||||
bs->bh->b_blocknr);
|
||||
BUFFER_TRACE(bs->bh, "get_write_access");
|
||||
error = ext4_journal_get_write_access(handle, bs->bh);
|
||||
if (error)
|
||||
@ -799,10 +798,15 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
lock_buffer(bs->bh);
|
||||
|
||||
if (header(s->base)->h_refcount == cpu_to_le32(1)) {
|
||||
if (ce) {
|
||||
mb_cache_entry_free(ce);
|
||||
ce = NULL;
|
||||
}
|
||||
__u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
|
||||
|
||||
/*
|
||||
* This must happen under buffer lock for
|
||||
* ext4_xattr_block_set() to reliably detect modified
|
||||
* block
|
||||
*/
|
||||
mb2_cache_entry_delete_block(ext4_mb_cache, hash,
|
||||
bs->bh->b_blocknr);
|
||||
ea_bdebug(bs->bh, "modifying in-place");
|
||||
error = ext4_xattr_set_entry(i, s);
|
||||
if (!error) {
|
||||
@ -826,10 +830,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
int offset = (char *)s->here - bs->bh->b_data;
|
||||
|
||||
unlock_buffer(bs->bh);
|
||||
if (ce) {
|
||||
mb_cache_entry_release(ce);
|
||||
ce = NULL;
|
||||
}
|
||||
ea_bdebug(bs->bh, "cloning");
|
||||
s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
|
||||
error = -ENOMEM;
|
||||
@ -884,6 +884,31 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
if (error)
|
||||
goto cleanup_dquot;
|
||||
lock_buffer(new_bh);
|
||||
/*
|
||||
* We have to be careful about races with
|
||||
* freeing or rehashing of xattr block. Once we
|
||||
* hold buffer lock xattr block's state is
|
||||
* stable so we can check whether the block got
|
||||
* freed / rehashed or not. Since we unhash
|
||||
* mbcache entry under buffer lock when freeing
|
||||
* / rehashing xattr block, checking whether
|
||||
* entry is still hashed is reliable.
|
||||
*/
|
||||
if (hlist_bl_unhashed(&ce->e_hash_list)) {
|
||||
/*
|
||||
* Undo everything and check mbcache
|
||||
* again.
|
||||
*/
|
||||
unlock_buffer(new_bh);
|
||||
dquot_free_block(inode,
|
||||
EXT4_C2B(EXT4_SB(sb),
|
||||
1));
|
||||
brelse(new_bh);
|
||||
mb2_cache_entry_put(ext4_mb_cache, ce);
|
||||
ce = NULL;
|
||||
new_bh = NULL;
|
||||
goto inserted;
|
||||
}
|
||||
le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
|
||||
ea_bdebug(new_bh, "reusing; refcount now=%d",
|
||||
le32_to_cpu(BHDR(new_bh)->h_refcount));
|
||||
@ -894,7 +919,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
if (error)
|
||||
goto cleanup_dquot;
|
||||
}
|
||||
mb_cache_entry_release(ce);
|
||||
mb2_cache_entry_touch(ext4_mb_cache, ce);
|
||||
mb2_cache_entry_put(ext4_mb_cache, ce);
|
||||
ce = NULL;
|
||||
} else if (bs->bh && s->base == bs->bh->b_data) {
|
||||
/* We were modifying this block in-place. */
|
||||
@ -959,7 +985,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
||||
|
||||
cleanup:
|
||||
if (ce)
|
||||
mb_cache_entry_release(ce);
|
||||
mb2_cache_entry_put(ext4_mb_cache, ce);
|
||||
brelse(new_bh);
|
||||
if (!(bs->bh && s->base == bs->bh->b_data))
|
||||
kfree(s->base);
|
||||
@ -1511,17 +1537,6 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
|
||||
brelse(bh);
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_xattr_put_super()
|
||||
*
|
||||
* This is called when a file system is unmounted.
|
||||
*/
|
||||
void
|
||||
ext4_xattr_put_super(struct super_block *sb)
|
||||
{
|
||||
mb_cache_shrink(sb->s_bdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* ext4_xattr_cache_insert()
|
||||
*
|
||||
@ -1531,28 +1546,18 @@ ext4_xattr_put_super(struct super_block *sb)
|
||||
* Returns 0, or a negative error number on failure.
|
||||
*/
|
||||
static void
|
||||
ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
|
||||
ext4_xattr_cache_insert(struct mb2_cache *ext4_mb_cache, struct buffer_head *bh)
|
||||
{
|
||||
__u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
|
||||
struct mb_cache_entry *ce;
|
||||
int error;
|
||||
|
||||
ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
|
||||
if (!ce) {
|
||||
ea_bdebug(bh, "out of memory");
|
||||
return;
|
||||
}
|
||||
error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
|
||||
error = mb2_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash,
|
||||
bh->b_blocknr);
|
||||
if (error) {
|
||||
mb_cache_entry_free(ce);
|
||||
if (error == -EBUSY) {
|
||||
if (error == -EBUSY)
|
||||
ea_bdebug(bh, "already in cache");
|
||||
error = 0;
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
ea_bdebug(bh, "inserting [%x]", (int)hash);
|
||||
mb_cache_entry_release(ce);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1605,26 +1610,19 @@ ext4_xattr_cmp(struct ext4_xattr_header *header1,
|
||||
*/
|
||||
static struct buffer_head *
|
||||
ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
|
||||
struct mb_cache_entry **pce)
|
||||
struct mb2_cache_entry **pce)
|
||||
{
|
||||
__u32 hash = le32_to_cpu(header->h_hash);
|
||||
struct mb_cache_entry *ce;
|
||||
struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
struct mb2_cache_entry *ce;
|
||||
struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
|
||||
|
||||
if (!header->h_hash)
|
||||
return NULL; /* never share */
|
||||
ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
|
||||
again:
|
||||
ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
|
||||
hash);
|
||||
ce = mb2_cache_entry_find_first(ext4_mb_cache, hash);
|
||||
while (ce) {
|
||||
struct buffer_head *bh;
|
||||
|
||||
if (IS_ERR(ce)) {
|
||||
if (PTR_ERR(ce) == -EAGAIN)
|
||||
goto again;
|
||||
break;
|
||||
}
|
||||
bh = sb_bread(inode->i_sb, ce->e_block);
|
||||
if (!bh) {
|
||||
EXT4_ERROR_INODE(inode, "block %lu read error",
|
||||
@ -1640,7 +1638,7 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
|
||||
return bh;
|
||||
}
|
||||
brelse(bh);
|
||||
ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
|
||||
ce = mb2_cache_entry_find_next(ext4_mb_cache, ce);
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
@ -1715,15 +1713,15 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
|
||||
|
||||
#define HASH_BUCKET_BITS 10
|
||||
|
||||
struct mb_cache *
|
||||
ext4_xattr_create_cache(char *name)
|
||||
struct mb2_cache *
|
||||
ext4_xattr_create_cache(void)
|
||||
{
|
||||
return mb_cache_create(name, HASH_BUCKET_BITS);
|
||||
return mb2_cache_create(HASH_BUCKET_BITS);
|
||||
}
|
||||
|
||||
void ext4_xattr_destroy_cache(struct mb_cache *cache)
|
||||
void ext4_xattr_destroy_cache(struct mb2_cache *cache)
|
||||
{
|
||||
if (cache)
|
||||
mb_cache_destroy(cache);
|
||||
mb2_cache_destroy(cache);
|
||||
}
|
||||
|
||||
|
@ -108,7 +108,6 @@ extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_
|
||||
extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
|
||||
|
||||
extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
|
||||
extern void ext4_xattr_put_super(struct super_block *);
|
||||
|
||||
extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
||||
struct ext4_inode *raw_inode, handle_t *handle);
|
||||
@ -124,8 +123,8 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
|
||||
struct ext4_xattr_info *i,
|
||||
struct ext4_xattr_ibody_find *is);
|
||||
|
||||
extern struct mb_cache *ext4_xattr_create_cache(char *name);
|
||||
extern void ext4_xattr_destroy_cache(struct mb_cache *);
|
||||
extern struct mb2_cache *ext4_xattr_create_cache(void);
|
||||
extern void ext4_xattr_destroy_cache(struct mb2_cache *);
|
||||
|
||||
#ifdef CONFIG_EXT4_FS_SECURITY
|
||||
extern int ext4_init_security(handle_t *handle, struct inode *inode,
|
||||
|
Loading…
Reference in New Issue
Block a user