forked from luck/tmp_suning_uos_patched
Btrfs: use slabs for delayed reference allocation
The delayed reference allocation is in the fast path of the IO, so use slabs to improve the speed of the allocation. And besides that, it can do check for leaked objects when the module is removed. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
This commit is contained in:
parent
6f60cbd3ae
commit
78a6184a3f
|
@ -23,6 +23,10 @@
|
|||
#include "delayed-ref.h"
|
||||
#include "transaction.h"
|
||||
|
||||
struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
/*
|
||||
* delayed back reference update tracking. For subvolume trees
|
||||
* we queue up extent allocations and backref maintenance for
|
||||
|
@ -511,7 +515,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
|
|||
ref->extent_op->flags_to_set;
|
||||
existing_ref->extent_op->update_flags = 1;
|
||||
}
|
||||
kfree(ref->extent_op);
|
||||
btrfs_free_delayed_extent_op(ref->extent_op);
|
||||
}
|
||||
}
|
||||
/*
|
||||
|
@ -592,7 +596,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
|
|||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(head_ref);
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
|
||||
} else {
|
||||
delayed_refs->num_heads++;
|
||||
delayed_refs->num_heads_ready++;
|
||||
|
@ -653,7 +657,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(full_ref);
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
|
||||
} else {
|
||||
delayed_refs->num_entries++;
|
||||
trans->delayed_ref_updates++;
|
||||
|
@ -714,7 +718,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
* we've updated the existing ref, free the newly
|
||||
* allocated ref
|
||||
*/
|
||||
kfree(full_ref);
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
|
||||
} else {
|
||||
delayed_refs->num_entries++;
|
||||
trans->delayed_ref_updates++;
|
||||
|
@ -738,13 +742,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
BUG_ON(extent_op && extent_op->is_data);
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -786,13 +790,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
BUG_ON(extent_op && !extent_op->is_data);
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref) {
|
||||
kfree(ref);
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -826,7 +830,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
|
|||
struct btrfs_delayed_ref_head *head_ref;
|
||||
struct btrfs_delayed_ref_root *delayed_refs;
|
||||
|
||||
head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
|
||||
head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
|
||||
if (!head_ref)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -860,3 +864,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
|
|||
return btrfs_delayed_node_to_head(ref);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void btrfs_delayed_ref_exit(void)
|
||||
{
|
||||
if (btrfs_delayed_ref_head_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
|
||||
if (btrfs_delayed_tree_ref_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
|
||||
if (btrfs_delayed_data_ref_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
|
||||
if (btrfs_delayed_extent_op_cachep)
|
||||
kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
|
||||
}
|
||||
|
||||
int btrfs_delayed_ref_init(void)
|
||||
{
|
||||
btrfs_delayed_ref_head_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_ref_head",
|
||||
sizeof(struct btrfs_delayed_ref_head), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_ref_head_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_tree_ref_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_tree_ref",
|
||||
sizeof(struct btrfs_delayed_tree_ref), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_tree_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_data_ref_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_data_ref",
|
||||
sizeof(struct btrfs_delayed_data_ref), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_data_ref_cachep)
|
||||
goto fail;
|
||||
|
||||
btrfs_delayed_extent_op_cachep = kmem_cache_create(
|
||||
"btrfs_delayed_extent_op",
|
||||
sizeof(struct btrfs_delayed_extent_op), 0,
|
||||
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
|
||||
if (!btrfs_delayed_extent_op_cachep)
|
||||
goto fail;
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
btrfs_delayed_ref_exit();
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -141,12 +141,47 @@ struct btrfs_delayed_ref_root {
|
|||
u64 run_delayed_start;
|
||||
};
|
||||
|
||||
extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
|
||||
extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
|
||||
|
||||
int btrfs_delayed_ref_init(void);
|
||||
void btrfs_delayed_ref_exit(void);
|
||||
|
||||
static inline struct btrfs_delayed_extent_op *
|
||||
btrfs_alloc_delayed_extent_op(void)
|
||||
{
|
||||
return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
|
||||
}
|
||||
|
||||
static inline void
|
||||
btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
|
||||
{
|
||||
if (op)
|
||||
kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
|
||||
}
|
||||
|
||||
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
|
||||
{
|
||||
WARN_ON(atomic_read(&ref->refs) == 0);
|
||||
if (atomic_dec_and_test(&ref->refs)) {
|
||||
WARN_ON(ref->in_tree);
|
||||
kfree(ref);
|
||||
switch (ref->type) {
|
||||
case BTRFS_TREE_BLOCK_REF_KEY:
|
||||
case BTRFS_SHARED_BLOCK_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
|
||||
break;
|
||||
case BTRFS_EXTENT_DATA_REF_KEY:
|
||||
case BTRFS_SHARED_DATA_REF_KEY:
|
||||
kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
|
||||
break;
|
||||
case 0:
|
||||
kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3614,7 +3614,7 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
|
|||
continue;
|
||||
}
|
||||
|
||||
kfree(head->extent_op);
|
||||
btrfs_free_delayed_extent_op(head->extent_op);
|
||||
delayed_refs->num_heads--;
|
||||
if (list_empty(&head->cluster))
|
||||
delayed_refs->num_heads_ready--;
|
||||
|
|
|
@ -2285,7 +2285,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
|
|||
ref = &locked_ref->node;
|
||||
|
||||
if (extent_op && must_insert_reserved) {
|
||||
kfree(extent_op);
|
||||
btrfs_free_delayed_extent_op(extent_op);
|
||||
extent_op = NULL;
|
||||
}
|
||||
|
||||
|
@ -2294,7 +2294,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
|
|||
|
||||
ret = run_delayed_extent_op(trans, root,
|
||||
ref, extent_op);
|
||||
kfree(extent_op);
|
||||
btrfs_free_delayed_extent_op(extent_op);
|
||||
|
||||
if (ret) {
|
||||
list_del_init(&locked_ref->cluster);
|
||||
|
@ -2338,7 +2338,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
|
|||
must_insert_reserved);
|
||||
|
||||
btrfs_put_delayed_ref(ref);
|
||||
kfree(extent_op);
|
||||
btrfs_free_delayed_extent_op(extent_op);
|
||||
count++;
|
||||
|
||||
if (ret) {
|
||||
|
@ -2586,7 +2586,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_delayed_extent_op *extent_op;
|
||||
int ret;
|
||||
|
||||
extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
|
||||
extent_op = btrfs_alloc_delayed_extent_op();
|
||||
if (!extent_op)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2598,7 +2598,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
|
|||
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
|
||||
num_bytes, extent_op);
|
||||
if (ret)
|
||||
kfree(extent_op);
|
||||
btrfs_free_delayed_extent_op(extent_op);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5330,7 +5330,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
|
|||
if (head->extent_op) {
|
||||
if (!head->must_insert_reserved)
|
||||
goto out;
|
||||
kfree(head->extent_op);
|
||||
btrfs_free_delayed_extent_op(head->extent_op);
|
||||
head->extent_op = NULL;
|
||||
}
|
||||
|
||||
|
@ -6400,7 +6400,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
|
|||
|
||||
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
|
||||
struct btrfs_delayed_extent_op *extent_op;
|
||||
extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
|
||||
extent_op = btrfs_alloc_delayed_extent_op();
|
||||
BUG_ON(!extent_op); /* -ENOMEM */
|
||||
if (key)
|
||||
memcpy(&extent_op->key, key, sizeof(extent_op->key));
|
||||
|
|
|
@ -1684,10 +1684,14 @@ static int __init init_btrfs_fs(void)
|
|||
if (err)
|
||||
goto free_delayed_inode;
|
||||
|
||||
err = btrfs_interface_init();
|
||||
err = btrfs_delayed_ref_init();
|
||||
if (err)
|
||||
goto free_auto_defrag;
|
||||
|
||||
err = btrfs_interface_init();
|
||||
if (err)
|
||||
goto free_delayed_ref;
|
||||
|
||||
err = register_filesystem(&btrfs_fs_type);
|
||||
if (err)
|
||||
goto unregister_ioctl;
|
||||
|
@ -1699,6 +1703,8 @@ static int __init init_btrfs_fs(void)
|
|||
|
||||
unregister_ioctl:
|
||||
btrfs_interface_exit();
|
||||
free_delayed_ref:
|
||||
btrfs_delayed_ref_exit();
|
||||
free_auto_defrag:
|
||||
btrfs_auto_defrag_exit();
|
||||
free_delayed_inode:
|
||||
|
@ -1720,6 +1726,7 @@ static int __init init_btrfs_fs(void)
|
|||
static void __exit exit_btrfs_fs(void)
|
||||
{
|
||||
btrfs_destroy_cachep();
|
||||
btrfs_delayed_ref_exit();
|
||||
btrfs_auto_defrag_exit();
|
||||
btrfs_delayed_inode_exit();
|
||||
ordered_data_exit();
|
||||
|
|
Loading…
Reference in New Issue
Block a user