forked from luck/tmp_suning_uos_patched
Merge branch 'dev/gfp-flags' into for-chris-4.6
This commit is contained in:
commit
e22b3d1fbe
|
@ -5361,7 +5361,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
|
|||
goto out;
|
||||
}
|
||||
|
||||
tmp_buf = kmalloc(left_root->nodesize, GFP_NOFS);
|
||||
tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL);
|
||||
if (!tmp_buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
|
@ -802,7 +802,7 @@ static int btrfs_dev_replace_kthread(void *data)
|
|||
struct btrfs_ioctl_dev_replace_args *status_args;
|
||||
u64 progress;
|
||||
|
||||
status_args = kzalloc(sizeof(*status_args), GFP_NOFS);
|
||||
status_args = kzalloc(sizeof(*status_args), GFP_KERNEL);
|
||||
if (status_args) {
|
||||
btrfs_dev_replace_status(fs_info, status_args);
|
||||
progress = status_args->status.progress_1000;
|
||||
|
|
|
@ -1296,9 +1296,10 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
|
|||
spin_lock_init(&root->root_item_lock);
|
||||
}
|
||||
|
||||
static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
|
||||
static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
|
||||
gfp_t flags)
|
||||
{
|
||||
struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
|
||||
struct btrfs_root *root = kzalloc(sizeof(*root), flags);
|
||||
if (root)
|
||||
root->fs_info = fs_info;
|
||||
return root;
|
||||
|
@ -1310,7 +1311,7 @@ struct btrfs_root *btrfs_alloc_dummy_root(void)
|
|||
{
|
||||
struct btrfs_root *root;
|
||||
|
||||
root = btrfs_alloc_root(NULL);
|
||||
root = btrfs_alloc_root(NULL, GFP_KERNEL);
|
||||
if (!root)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
__setup_root(4096, 4096, 4096, root, NULL, 1);
|
||||
|
@ -1332,7 +1333,7 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
|
|||
int ret = 0;
|
||||
uuid_le uuid;
|
||||
|
||||
root = btrfs_alloc_root(fs_info);
|
||||
root = btrfs_alloc_root(fs_info, GFP_KERNEL);
|
||||
if (!root)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -1408,7 +1409,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
|
|||
struct btrfs_root *tree_root = fs_info->tree_root;
|
||||
struct extent_buffer *leaf;
|
||||
|
||||
root = btrfs_alloc_root(fs_info);
|
||||
root = btrfs_alloc_root(fs_info, GFP_NOFS);
|
||||
if (!root)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -1506,7 +1507,7 @@ static struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
|
|||
if (!path)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
root = btrfs_alloc_root(fs_info);
|
||||
root = btrfs_alloc_root(fs_info, GFP_NOFS);
|
||||
if (!root) {
|
||||
ret = -ENOMEM;
|
||||
goto alloc_fail;
|
||||
|
@ -2385,7 +2386,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
log_tree_root = btrfs_alloc_root(fs_info);
|
||||
log_tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
|
||||
if (!log_tree_root)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2510,8 +2511,8 @@ int open_ctree(struct super_block *sb,
|
|||
int backup_index = 0;
|
||||
int max_active;
|
||||
|
||||
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
|
||||
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
|
||||
tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
|
||||
chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
|
||||
if (!tree_root || !chunk_root) {
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
|
@ -2622,7 +2623,7 @@ int open_ctree(struct super_block *sb,
|
|||
INIT_LIST_HEAD(&fs_info->ordered_roots);
|
||||
spin_lock_init(&fs_info->ordered_root_lock);
|
||||
fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
|
||||
GFP_NOFS);
|
||||
GFP_KERNEL);
|
||||
if (!fs_info->delayed_root) {
|
||||
err = -ENOMEM;
|
||||
goto fail_iput;
|
||||
|
|
|
@ -2636,7 +2636,7 @@ static int add_falloc_range(struct list_head *head, u64 start, u64 len)
|
|||
return 0;
|
||||
}
|
||||
insert:
|
||||
range = kmalloc(sizeof(*range), GFP_NOFS);
|
||||
range = kmalloc(sizeof(*range), GFP_KERNEL);
|
||||
if (!range)
|
||||
return -ENOMEM;
|
||||
range->start = start;
|
||||
|
@ -2737,7 +2737,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
btrfs_put_ordered_extent(ordered);
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree,
|
||||
alloc_start, locked_end,
|
||||
&cached_state, GFP_NOFS);
|
||||
&cached_state, GFP_KERNEL);
|
||||
/*
|
||||
* we can't wait on the range with the transaction
|
||||
* running or with the extent lock held
|
||||
|
@ -2831,7 +2831,7 @@ static long btrfs_fallocate(struct file *file, int mode,
|
|||
}
|
||||
out_unlock:
|
||||
unlock_extent_cached(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
|
||||
&cached_state, GFP_NOFS);
|
||||
&cached_state, GFP_KERNEL);
|
||||
out:
|
||||
/*
|
||||
* As we waited the extent range, the data_rsv_map must be empty
|
||||
|
|
|
@ -5793,7 +5793,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
|
|||
if (name_len <= sizeof(tmp_name)) {
|
||||
name_ptr = tmp_name;
|
||||
} else {
|
||||
name_ptr = kmalloc(name_len, GFP_NOFS);
|
||||
name_ptr = kmalloc(name_len, GFP_KERNEL);
|
||||
if (!name_ptr) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
|
|
|
@ -2925,8 +2925,8 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff,
|
|||
* of the array is bounded by len, which is in turn bounded by
|
||||
* BTRFS_MAX_DEDUPE_LEN.
|
||||
*/
|
||||
src_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
|
||||
dst_pgarr = kzalloc(num_pages * sizeof(struct page *), GFP_NOFS);
|
||||
src_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
dst_pgarr = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!src_pgarr || !dst_pgarr) {
|
||||
kfree(src_pgarr);
|
||||
kfree(dst_pgarr);
|
||||
|
|
|
@ -280,7 +280,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
|
|||
end = start + cache->key.offset - 1;
|
||||
btrfs_put_block_group(cache);
|
||||
|
||||
zone = kzalloc(sizeof(*zone), GFP_NOFS);
|
||||
zone = kzalloc(sizeof(*zone), GFP_KERNEL);
|
||||
if (!zone)
|
||||
return NULL;
|
||||
|
||||
|
@ -343,7 +343,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
|
|||
if (re)
|
||||
return re;
|
||||
|
||||
re = kzalloc(sizeof(*re), GFP_NOFS);
|
||||
re = kzalloc(sizeof(*re), GFP_KERNEL);
|
||||
if (!re)
|
||||
return NULL;
|
||||
|
||||
|
@ -566,7 +566,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical,
|
|||
if (!re)
|
||||
return -1;
|
||||
|
||||
rec = kzalloc(sizeof(*rec), GFP_NOFS);
|
||||
rec = kzalloc(sizeof(*rec), GFP_KERNEL);
|
||||
if (!rec) {
|
||||
reada_extent_put(root->fs_info, re);
|
||||
return -ENOMEM;
|
||||
|
@ -791,7 +791,7 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info)
|
|||
{
|
||||
struct reada_machine_work *rmw;
|
||||
|
||||
rmw = kzalloc(sizeof(*rmw), GFP_NOFS);
|
||||
rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
|
||||
if (!rmw) {
|
||||
/* FIXME we cannot handle this properly right now */
|
||||
BUG();
|
||||
|
@ -926,7 +926,7 @@ struct reada_control *btrfs_reada_add(struct btrfs_root *root,
|
|||
.offset = (u64)-1
|
||||
};
|
||||
|
||||
rc = kzalloc(sizeof(*rc), GFP_NOFS);
|
||||
rc = kzalloc(sizeof(*rc), GFP_KERNEL);
|
||||
if (!rc)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -461,7 +461,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
|||
struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
|
||||
int ret;
|
||||
|
||||
sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
|
||||
sctx = kzalloc(sizeof(*sctx), GFP_KERNEL);
|
||||
if (!sctx)
|
||||
goto nomem;
|
||||
atomic_set(&sctx->refs, 1);
|
||||
|
@ -472,7 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
|
|||
for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
|
||||
struct scrub_bio *sbio;
|
||||
|
||||
sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
|
||||
sbio = kzalloc(sizeof(*sbio), GFP_KERNEL);
|
||||
if (!sbio)
|
||||
goto nomem;
|
||||
sctx->bios[i] = sbio;
|
||||
|
@ -1654,7 +1654,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|||
again:
|
||||
if (!wr_ctx->wr_curr_bio) {
|
||||
wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
|
||||
GFP_NOFS);
|
||||
GFP_KERNEL);
|
||||
if (!wr_ctx->wr_curr_bio) {
|
||||
mutex_unlock(&wr_ctx->wr_lock);
|
||||
return -ENOMEM;
|
||||
|
@ -1671,7 +1671,8 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
|
|||
sbio->dev = wr_ctx->tgtdev;
|
||||
bio = sbio->bio;
|
||||
if (!bio) {
|
||||
bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
|
||||
bio = btrfs_io_bio_alloc(GFP_KERNEL,
|
||||
wr_ctx->pages_per_wr_bio);
|
||||
if (!bio) {
|
||||
mutex_unlock(&wr_ctx->wr_lock);
|
||||
return -ENOMEM;
|
||||
|
@ -2076,7 +2077,8 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
|
|||
sbio->dev = spage->dev;
|
||||
bio = sbio->bio;
|
||||
if (!bio) {
|
||||
bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
|
||||
bio = btrfs_io_bio_alloc(GFP_KERNEL,
|
||||
sctx->pages_per_rd_bio);
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
sbio->bio = bio;
|
||||
|
@ -2241,7 +2243,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|||
struct scrub_block *sblock;
|
||||
int index;
|
||||
|
||||
sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
|
||||
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
||||
if (!sblock) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.malloc_errors++;
|
||||
|
@ -2259,7 +2261,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|||
struct scrub_page *spage;
|
||||
u64 l = min_t(u64, len, PAGE_SIZE);
|
||||
|
||||
spage = kzalloc(sizeof(*spage), GFP_NOFS);
|
||||
spage = kzalloc(sizeof(*spage), GFP_KERNEL);
|
||||
if (!spage) {
|
||||
leave_nomem:
|
||||
spin_lock(&sctx->stat_lock);
|
||||
|
@ -2286,7 +2288,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
|
|||
spage->have_csum = 0;
|
||||
}
|
||||
sblock->page_count++;
|
||||
spage->page = alloc_page(GFP_NOFS);
|
||||
spage->page = alloc_page(GFP_KERNEL);
|
||||
if (!spage->page)
|
||||
goto leave_nomem;
|
||||
len -= l;
|
||||
|
@ -2541,7 +2543,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
|
|||
struct scrub_block *sblock;
|
||||
int index;
|
||||
|
||||
sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
|
||||
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
||||
if (!sblock) {
|
||||
spin_lock(&sctx->stat_lock);
|
||||
sctx->stat.malloc_errors++;
|
||||
|
@ -2561,7 +2563,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
|
|||
struct scrub_page *spage;
|
||||
u64 l = min_t(u64, len, PAGE_SIZE);
|
||||
|
||||
spage = kzalloc(sizeof(*spage), GFP_NOFS);
|
||||
spage = kzalloc(sizeof(*spage), GFP_KERNEL);
|
||||
if (!spage) {
|
||||
leave_nomem:
|
||||
spin_lock(&sctx->stat_lock);
|
||||
|
@ -2591,7 +2593,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
|
|||
spage->have_csum = 0;
|
||||
}
|
||||
sblock->page_count++;
|
||||
spage->page = alloc_page(GFP_NOFS);
|
||||
spage->page = alloc_page(GFP_KERNEL);
|
||||
if (!spage->page)
|
||||
goto leave_nomem;
|
||||
len -= l;
|
||||
|
|
|
@ -304,7 +304,7 @@ static struct fs_path *fs_path_alloc(void)
|
|||
{
|
||||
struct fs_path *p;
|
||||
|
||||
p = kmalloc(sizeof(*p), GFP_NOFS);
|
||||
p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return NULL;
|
||||
p->reversed = 0;
|
||||
|
@ -363,11 +363,11 @@ static int fs_path_ensure_buf(struct fs_path *p, int len)
|
|||
* First time the inline_buf does not suffice
|
||||
*/
|
||||
if (p->buf == p->inline_buf) {
|
||||
tmp_buf = kmalloc(len, GFP_NOFS);
|
||||
tmp_buf = kmalloc(len, GFP_KERNEL);
|
||||
if (tmp_buf)
|
||||
memcpy(tmp_buf, p->buf, old_buf_len);
|
||||
} else {
|
||||
tmp_buf = krealloc(p->buf, len, GFP_NOFS);
|
||||
tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
|
||||
}
|
||||
if (!tmp_buf)
|
||||
return -ENOMEM;
|
||||
|
@ -995,7 +995,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
|
|||
* values are small.
|
||||
*/
|
||||
buf_len = PATH_MAX;
|
||||
buf = kmalloc(buf_len, GFP_NOFS);
|
||||
buf = kmalloc(buf_len, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1042,7 +1042,7 @@ static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
|
|||
buf = NULL;
|
||||
} else {
|
||||
char *tmp = krealloc(buf, buf_len,
|
||||
GFP_NOFS | __GFP_NOWARN);
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
|
||||
if (!tmp)
|
||||
kfree(buf);
|
||||
|
@ -1303,7 +1303,7 @@ static int find_extent_clone(struct send_ctx *sctx,
|
|||
/* We only use this path under the commit sem */
|
||||
tmp_path->need_commit_sem = 0;
|
||||
|
||||
backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS);
|
||||
backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
|
||||
if (!backref_ctx) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -1984,7 +1984,7 @@ static int name_cache_insert(struct send_ctx *sctx,
|
|||
nce_head = radix_tree_lookup(&sctx->name_cache,
|
||||
(unsigned long)nce->ino);
|
||||
if (!nce_head) {
|
||||
nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS);
|
||||
nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
|
||||
if (!nce_head) {
|
||||
kfree(nce);
|
||||
return -ENOMEM;
|
||||
|
@ -2179,7 +2179,7 @@ static int __get_cur_name_and_parent(struct send_ctx *sctx,
|
|||
/*
|
||||
* Store the result of the lookup in the name cache.
|
||||
*/
|
||||
nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS);
|
||||
nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
|
||||
if (!nce) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -2315,7 +2315,7 @@ static int send_subvol_begin(struct send_ctx *sctx)
|
|||
if (!path)
|
||||
return -ENOMEM;
|
||||
|
||||
name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS);
|
||||
name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
|
||||
if (!name) {
|
||||
btrfs_free_path(path);
|
||||
return -ENOMEM;
|
||||
|
@ -2730,7 +2730,7 @@ static int __record_ref(struct list_head *head, u64 dir,
|
|||
{
|
||||
struct recorded_ref *ref;
|
||||
|
||||
ref = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
ref = kmalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (!ref)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2755,7 +2755,7 @@ static int dup_ref(struct recorded_ref *ref, struct list_head *list)
|
|||
{
|
||||
struct recorded_ref *new;
|
||||
|
||||
new = kmalloc(sizeof(*ref), GFP_NOFS);
|
||||
new = kmalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -2818,7 +2818,7 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
|
|||
struct rb_node *parent = NULL;
|
||||
struct orphan_dir_info *entry, *odi;
|
||||
|
||||
odi = kmalloc(sizeof(*odi), GFP_NOFS);
|
||||
odi = kmalloc(sizeof(*odi), GFP_KERNEL);
|
||||
if (!odi)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
odi->ino = dir_ino;
|
||||
|
@ -2973,7 +2973,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
|
|||
struct rb_node *parent = NULL;
|
||||
struct waiting_dir_move *entry, *dm;
|
||||
|
||||
dm = kmalloc(sizeof(*dm), GFP_NOFS);
|
||||
dm = kmalloc(sizeof(*dm), GFP_KERNEL);
|
||||
if (!dm)
|
||||
return -ENOMEM;
|
||||
dm->ino = ino;
|
||||
|
@ -3040,7 +3040,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
|
|||
int exists = 0;
|
||||
int ret;
|
||||
|
||||
pm = kmalloc(sizeof(*pm), GFP_NOFS);
|
||||
pm = kmalloc(sizeof(*pm), GFP_KERNEL);
|
||||
if (!pm)
|
||||
return -ENOMEM;
|
||||
pm->parent_ino = parent_ino;
|
||||
|
@ -4280,7 +4280,7 @@ static int __find_xattr(int num, struct btrfs_key *di_key,
|
|||
strncmp(name, ctx->name, name_len) == 0) {
|
||||
ctx->found_idx = num;
|
||||
ctx->found_data_len = data_len;
|
||||
ctx->found_data = kmemdup(data, data_len, GFP_NOFS);
|
||||
ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
|
||||
if (!ctx->found_data)
|
||||
return -ENOMEM;
|
||||
return 1;
|
||||
|
@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
|
|||
while (index <= last_index) {
|
||||
unsigned cur_len = min_t(unsigned, len,
|
||||
PAGE_CACHE_SIZE - pg_offset);
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
|
||||
page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
|
||||
if (!page) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
|
@ -5989,7 +5989,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
goto out;
|
||||
}
|
||||
|
||||
sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
|
||||
sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
|
||||
if (!sctx) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -5997,7 +5997,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
|
|||
|
||||
INIT_LIST_HEAD(&sctx->new_refs);
|
||||
INIT_LIST_HEAD(&sctx->deleted_refs);
|
||||
INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
|
||||
INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
|
||||
INIT_LIST_HEAD(&sctx->name_cache_list);
|
||||
|
||||
sctx->flags = arg->flags;
|
||||
|
|
|
@ -138,7 +138,7 @@ static struct btrfs_fs_devices *__alloc_fs_devices(void)
|
|||
{
|
||||
struct btrfs_fs_devices *fs_devs;
|
||||
|
||||
fs_devs = kzalloc(sizeof(*fs_devs), GFP_NOFS);
|
||||
fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
|
||||
if (!fs_devs)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -220,7 +220,7 @@ static struct btrfs_device *__alloc_device(void)
|
|||
{
|
||||
struct btrfs_device *dev;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_NOFS);
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -733,7 +733,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
|
|||
* uuid mutex so nothing we touch in here is going to disappear.
|
||||
*/
|
||||
if (orig_dev->name) {
|
||||
name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
|
||||
name = rcu_string_strdup(orig_dev->name->str,
|
||||
GFP_KERNEL);
|
||||
if (!name) {
|
||||
kfree(device);
|
||||
goto error;
|
||||
|
@ -2287,7 +2288,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
|
|||
goto error;
|
||||
}
|
||||
|
||||
name = rcu_string_strdup(device_path, GFP_NOFS);
|
||||
name = rcu_string_strdup(device_path, GFP_KERNEL);
|
||||
if (!name) {
|
||||
kfree(device);
|
||||
ret = -ENOMEM;
|
||||
|
|
Loading…
Reference in New Issue
Block a user