Merge branch 'bcache-for-3.14' of git://evilpiepirate.org/~kent/linux-bcache into for-linus

This commit is contained in:
Jens Axboe 2014-01-30 12:57:55 -07:00
commit 96d2e8b5e2
6 changed files with 15 additions and 10 deletions

View File

@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
#define GC_SECTORS_USED_SIZE 13
#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"

View File

@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
printk(KERN_ERR "block %u key %zi/%u: ", set,
printk(KERN_ERR "block %u key %li/%u: ", set,
(uint64_t *) k - i->d, i->keys);
if (b->ops->key_dump)
@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
struct page *outp;
BUG_ON(order > state->page_order);
out = page_address(mempool_alloc(state->pool, GFP_NOIO));
outp = mempool_alloc(state->pool, GFP_NOIO);
out = page_address(outp);
used_mempool = true;
order = state->page_order;
}

View File

@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
(1 << 14) - 1));
MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
static size_t insert_u64s_remaining(struct btree *b)
{
ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
long ret = bch_btree_keys_u64s_remaining(&b->keys);
/*
* Might land in the middle of an existing extent and have to split it

View File

@ -194,7 +194,7 @@ static bool btree_ptr_bad_expensive(struct btree *b, const struct bkey *k)
mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b,
"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;

View File

@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
if (op->bypass)
return bch_data_invalidate(cl);
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
if (op->bypass)
return bch_data_invalidate(cl);
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.

View File

@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
return MAP_CONTINUE;
}
int bch_bset_print_stats(struct cache_set *c, char *buf)
static int bch_bset_print_stats(struct cache_set *c, char *buf)
{
struct bset_stats_op op;
int ret;