Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: fix reservations in btrfs_page_mkwrite
  Btrfs: advance window_start if we're using a bitmap
  btrfs: mask out gfp flags in releasepage
  Btrfs: fix enospc error caused by wrong checks of the chunk
  Btrfs: do not defrag a file partially
  Btrfs: fix warning for 32-bit build of fs/btrfs/check-integrity.c
  Btrfs: use cluster->window_start when allocating from a cluster bitmap
  Btrfs: Check for NULL page in extent_range_uptodate
  btrfs: Fix busyloops in transaction waiting code
  Btrfs: make sure a bitmap has enough bytes
  Btrfs: fix uninit warning in backref.c
This commit is contained in:
Linus Torvalds 2012-01-28 17:00:19 -08:00
commit 67d2433ee7
9 changed files with 59 additions and 35 deletions

View File

@ -297,7 +297,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
struct btrfs_delayed_extent_op *extent_op = head->extent_op;
struct rb_node *n = &head->node.rb_node;
int sgn;
int ret;
int ret = 0;
if (extent_op && extent_op->update_key)
btrfs_disk_key_to_cpu(info_key, &extent_op->key);
@ -392,7 +392,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
struct btrfs_key *info_key, int *info_level,
struct list_head *prefs)
{
int ret;
int ret = 0;
int slot;
struct extent_buffer *leaf;
struct btrfs_key key;

View File

@ -1662,7 +1662,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
&state->block_hashtable);
if (NULL != block) {
u64 bytenr;
u64 bytenr = 0;
struct list_head *elem_ref_to;
struct list_head *tmp_ref_to;
@ -2777,9 +2777,10 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
printk(KERN_INFO
"submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu),"
" size=%lu, data=%p, bdev=%p)\n",
rw, bh->b_blocknr,
(unsigned long long)dev_bytenr, bh->b_size,
bh->b_data, bh->b_bdev);
rw, (unsigned long)bh->b_blocknr,
(unsigned long long)dev_bytenr,
(unsigned long)bh->b_size, bh->b_data,
bh->b_bdev);
btrfsic_process_written_block(dev_state, dev_bytenr,
bh->b_data, bh->b_size, NULL,
NULL, bh, rw);
@ -2844,7 +2845,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
printk(KERN_INFO
"submit_bio(rw=0x%x, bi_vcnt=%u,"
" bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n",
rw, bio->bi_vcnt, bio->bi_sector,
rw, bio->bi_vcnt, (unsigned long)bio->bi_sector,
(unsigned long long)dev_bytenr,
bio->bi_bdev);

View File

@ -962,6 +962,13 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
tree = &BTRFS_I(page->mapping->host)->io_tree;
map = &BTRFS_I(page->mapping->host)->extent_tree;
/*
* We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
* slab allocation from alloc_extent_state down the callchain where
* it'd hit a BUG_ON as those flags are not allowed.
*/
gfp_flags &= ~GFP_SLAB_BUG_MASK;
ret = try_release_extent_state(map, tree, page, gfp_flags);
if (!ret)
return 0;

View File

@ -34,23 +34,24 @@
#include "locking.h"
#include "free-space-cache.h"
/* control flags for do_chunk_alloc's force field
/*
* control flags for do_chunk_alloc's force field
* CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
* if we really need one.
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
*
* CHUNK_ALLOC_LIMITED means to only try and allocate one
* if we have very few chunks already allocated. This is
* used as part of the clustering code to help make sure
* we have a good pool of storage to cluster in, without
* filling the FS with empty chunks
*
* CHUNK_ALLOC_FORCE means it must try to allocate one
*
*/
enum {
CHUNK_ALLOC_NO_FORCE = 0,
CHUNK_ALLOC_FORCE = 1,
CHUNK_ALLOC_LIMITED = 2,
CHUNK_ALLOC_LIMITED = 1,
CHUNK_ALLOC_FORCE = 2,
};
/*
@ -3414,7 +3415,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
again:
spin_lock(&space_info->lock);
if (space_info->force_alloc)
if (force < space_info->force_alloc)
force = space_info->force_alloc;
if (space_info->full) {
spin_unlock(&space_info->lock);
@ -5794,6 +5795,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
u64 search_end, struct btrfs_key *ins,
u64 data)
{
bool final_tried = false;
int ret;
u64 search_start = 0;
@ -5813,15 +5815,17 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
search_start, search_end, hint_byte,
ins, data);
if (ret == -ENOSPC && num_bytes > min_alloc_size) {
if (ret == -ENOSPC) {
if (!final_tried) {
num_bytes = num_bytes >> 1;
num_bytes = num_bytes & ~(root->sectorsize - 1);
num_bytes = max(num_bytes, min_alloc_size);
do_chunk_alloc(trans, root->fs_info->extent_root,
num_bytes, data, CHUNK_ALLOC_FORCE);
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
}
if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
} else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, data);
@ -5830,6 +5834,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
(unsigned long long)num_bytes);
dump_space_info(sinfo, num_bytes, 1);
}
}
trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);

View File

@ -3909,6 +3909,8 @@ int extent_range_uptodate(struct extent_io_tree *tree,
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
page = find_get_page(tree->mapping, index);
if (!page)
return 1;
uptodate = PageUptodate(page);
page_cache_release(page);
if (!uptodate) {

View File

@ -2242,7 +2242,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
if (entry->bitmap) {
ret = btrfs_alloc_from_bitmap(block_group,
cluster, entry, bytes,
min_start);
cluster->window_start);
if (ret == 0) {
node = rb_next(&entry->offset_index);
if (!node)
@ -2251,6 +2251,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
offset_index);
continue;
}
cluster->window_start += bytes;
} else {
ret = entry->offset;
@ -2475,7 +2476,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
}
list_for_each_entry(entry, bitmaps, list) {
if (entry->bytes < min_bytes)
if (entry->bytes < bytes)
continue;
ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
bytes, cont1_bytes, min_bytes);

View File

@ -6401,18 +6401,23 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long zero_start;
loff_t size;
int ret;
int reserved = 0;
u64 page_start;
u64 page_end;
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
if (!ret)
if (!ret) {
ret = btrfs_update_time(vma->vm_file);
reserved = 1;
}
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
else /* -ENOSPC, -EIO, etc */
ret = VM_FAULT_SIGBUS;
if (reserved)
goto out;
goto out_noreserve;
}
ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
@ -6495,6 +6500,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
unlock_page(page);
out:
btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
out_noreserve:
return ret;
}

View File

@ -1065,7 +1065,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
i = range->start >> PAGE_CACHE_SHIFT;
}
if (!max_to_defrag)
max_to_defrag = last_index;
max_to_defrag = last_index + 1;
/*
* make writeback starts from i, so the defrag range can be

View File

@ -1957,7 +1957,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans,
finish_wait(&root->log_commit_wait[index], &wait);
mutex_lock(&root->log_mutex);
} while (root->log_transid < transid + 2 &&
} while (root->fs_info->last_trans_log_full_commit !=
trans->transid && root->log_transid < transid + 2 &&
atomic_read(&root->log_commit[index]));
return 0;
}
@ -1966,7 +1967,8 @@ static int wait_for_writer(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
DEFINE_WAIT(wait);
while (atomic_read(&root->log_writers)) {
while (root->fs_info->last_trans_log_full_commit !=
trans->transid && atomic_read(&root->log_writers)) {
prepare_to_wait(&root->log_writer_wait,
&wait, TASK_UNINTERRUPTIBLE);
mutex_unlock(&root->log_mutex);