kernel_optimize_test/drivers/md/persistent-data/dm-transaction-manager.c
Joe Thornber a9d45396f5 dm transaction manager: fix corruption due to non-atomic transaction commit
The persistent-data library used by dm-thin, dm-cache, etc is
transactional.  If anything goes wrong, such as an io error when writing
new metadata or a power failure, then we roll back to the last
transaction.

Atomicity when committing a transaction is achieved by:

a) Never overwriting data from the previous transaction.
b) Writing the superblock last, after all other metadata has hit the
   disk.

This commit and the following commit ("dm: take care to copy the space
map roots before locking the superblock") fix a bug associated with (b).
When committing it was possible for the superblock to still be written
in spite of an io error occurring during the preceeding metadata flush.
With these commits we're careful not to take the write lock out on the
superblock until after the metadata flush has completed.

Change the transaction manager's semantics for dm_tm_commit() to assume
all data has been flushed _before_ the single superblock that is passed
in.

As a prerequisite, split the block manager's block unlocking and
flushing by simplifying dm_bm_flush_and_unlock() to dm_bm_flush().  Now
the unlocking must be done separately.

This issue was discovered by forcing io errors at the crucial time
using dm-flakey.

Signed-off-by: Joe Thornber <ejt@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org
2014-03-27 16:56:23 -04:00

383 lines
8.0 KiB
C

/*
* Copyright (C) 2011 Red Hat, Inc.
*
* This file is released under the GPL.
*/
#include "dm-transaction-manager.h"
#include "dm-space-map.h"
#include "dm-space-map-disk.h"
#include "dm-space-map-metadata.h"
#include "dm-persistent-data-internal.h"
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/device-mapper.h>
#define DM_MSG_PREFIX "transaction manager"
/*----------------------------------------------------------------*/
struct shadow_info {
struct hlist_node hlist;
dm_block_t where;
};
/*
* It would be nice if we scaled with the size of transaction.
*/
#define DM_HASH_SIZE 256
#define DM_HASH_MASK (DM_HASH_SIZE - 1)
struct dm_transaction_manager {
int is_clone;
struct dm_transaction_manager *real;
struct dm_block_manager *bm;
struct dm_space_map *sm;
spinlock_t lock;
struct hlist_head buckets[DM_HASH_SIZE];
};
/*----------------------------------------------------------------*/
static int is_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
int r = 0;
unsigned bucket = dm_hash_block(b, DM_HASH_MASK);
struct shadow_info *si;
spin_lock(&tm->lock);
hlist_for_each_entry(si, tm->buckets + bucket, hlist)
if (si->where == b) {
r = 1;
break;
}
spin_unlock(&tm->lock);
return r;
}
/*
* This can silently fail if there's no memory. We're ok with this since
* creating redundant shadows causes no harm.
*/
static void insert_shadow(struct dm_transaction_manager *tm, dm_block_t b)
{
unsigned bucket;
struct shadow_info *si;
si = kmalloc(sizeof(*si), GFP_NOIO);
if (si) {
si->where = b;
bucket = dm_hash_block(b, DM_HASH_MASK);
spin_lock(&tm->lock);
hlist_add_head(&si->hlist, tm->buckets + bucket);
spin_unlock(&tm->lock);
}
}
static void wipe_shadow_table(struct dm_transaction_manager *tm)
{
struct shadow_info *si;
struct hlist_node *tmp;
struct hlist_head *bucket;
int i;
spin_lock(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++) {
bucket = tm->buckets + i;
hlist_for_each_entry_safe(si, tmp, bucket, hlist)
kfree(si);
INIT_HLIST_HEAD(bucket);
}
spin_unlock(&tm->lock);
}
/*----------------------------------------------------------------*/
static struct dm_transaction_manager *dm_tm_create(struct dm_block_manager *bm,
struct dm_space_map *sm)
{
int i;
struct dm_transaction_manager *tm;
tm = kmalloc(sizeof(*tm), GFP_KERNEL);
if (!tm)
return ERR_PTR(-ENOMEM);
tm->is_clone = 0;
tm->real = NULL;
tm->bm = bm;
tm->sm = sm;
spin_lock_init(&tm->lock);
for (i = 0; i < DM_HASH_SIZE; i++)
INIT_HLIST_HEAD(tm->buckets + i);
return tm;
}
struct dm_transaction_manager *dm_tm_create_non_blocking_clone(struct dm_transaction_manager *real)
{
struct dm_transaction_manager *tm;
tm = kmalloc(sizeof(*tm), GFP_KERNEL);
if (tm) {
tm->is_clone = 1;
tm->real = real;
}
return tm;
}
EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
void dm_tm_destroy(struct dm_transaction_manager *tm)
{
if (!tm->is_clone)
wipe_shadow_table(tm);
kfree(tm);
}
EXPORT_SYMBOL_GPL(dm_tm_destroy);
int dm_tm_pre_commit(struct dm_transaction_manager *tm)
{
int r;
if (tm->is_clone)
return -EWOULDBLOCK;
r = dm_sm_commit(tm->sm);
if (r < 0)
return r;
return dm_bm_flush(tm->bm);
}
EXPORT_SYMBOL_GPL(dm_tm_pre_commit);
int dm_tm_commit(struct dm_transaction_manager *tm, struct dm_block *root)
{
if (tm->is_clone)
return -EWOULDBLOCK;
wipe_shadow_table(tm);
dm_bm_unlock(root);
return dm_bm_flush(tm->bm);
}
EXPORT_SYMBOL_GPL(dm_tm_commit);
int dm_tm_new_block(struct dm_transaction_manager *tm,
struct dm_block_validator *v,
struct dm_block **result)
{
int r;
dm_block_t new_block;
if (tm->is_clone)
return -EWOULDBLOCK;
r = dm_sm_new_block(tm->sm, &new_block);
if (r < 0)
return r;
r = dm_bm_write_lock_zero(tm->bm, new_block, v, result);
if (r < 0) {
dm_sm_dec_block(tm->sm, new_block);
return r;
}
/*
* New blocks count as shadows in that they don't need to be
* shadowed again.
*/
insert_shadow(tm, new_block);
return 0;
}
static int __shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
struct dm_block_validator *v,
struct dm_block **result)
{
int r;
dm_block_t new;
struct dm_block *orig_block;
r = dm_sm_new_block(tm->sm, &new);
if (r < 0)
return r;
r = dm_sm_dec_block(tm->sm, orig);
if (r < 0)
return r;
r = dm_bm_read_lock(tm->bm, orig, v, &orig_block);
if (r < 0)
return r;
/*
* It would be tempting to use dm_bm_unlock_move here, but some
* code, such as the space maps, keeps using the old data structures
* secure in the knowledge they won't be changed until the next
* transaction. Using unlock_move would force a synchronous read
* since the old block would no longer be in the cache.
*/
r = dm_bm_write_lock_zero(tm->bm, new, v, result);
if (r) {
dm_bm_unlock(orig_block);
return r;
}
memcpy(dm_block_data(*result), dm_block_data(orig_block),
dm_bm_block_size(tm->bm));
dm_bm_unlock(orig_block);
return r;
}
int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
struct dm_block_validator *v, struct dm_block **result,
int *inc_children)
{
int r;
if (tm->is_clone)
return -EWOULDBLOCK;
r = dm_sm_count_is_more_than_one(tm->sm, orig, inc_children);
if (r < 0)
return r;
if (is_shadow(tm, orig) && !*inc_children)
return dm_bm_write_lock(tm->bm, orig, v, result);
r = __shadow_block(tm, orig, v, result);
if (r < 0)
return r;
insert_shadow(tm, dm_block_location(*result));
return r;
}
EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
struct dm_block_validator *v,
struct dm_block **blk)
{
if (tm->is_clone)
return dm_bm_read_try_lock(tm->real->bm, b, v, blk);
return dm_bm_read_lock(tm->bm, b, v, blk);
}
EXPORT_SYMBOL_GPL(dm_tm_read_lock);
int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
{
return dm_bm_unlock(b);
}
EXPORT_SYMBOL_GPL(dm_tm_unlock);
void dm_tm_inc(struct dm_transaction_manager *tm, dm_block_t b)
{
/*
* The non-blocking clone doesn't support this.
*/
BUG_ON(tm->is_clone);
dm_sm_inc_block(tm->sm, b);
}
EXPORT_SYMBOL_GPL(dm_tm_inc);
void dm_tm_dec(struct dm_transaction_manager *tm, dm_block_t b)
{
/*
* The non-blocking clone doesn't support this.
*/
BUG_ON(tm->is_clone);
dm_sm_dec_block(tm->sm, b);
}
EXPORT_SYMBOL_GPL(dm_tm_dec);
int dm_tm_ref(struct dm_transaction_manager *tm, dm_block_t b,
uint32_t *result)
{
if (tm->is_clone)
return -EWOULDBLOCK;
return dm_sm_get_count(tm->sm, b, result);
}
struct dm_block_manager *dm_tm_get_bm(struct dm_transaction_manager *tm)
{
return tm->bm;
}
/*----------------------------------------------------------------*/
static int dm_tm_create_internal(struct dm_block_manager *bm,
dm_block_t sb_location,
struct dm_transaction_manager **tm,
struct dm_space_map **sm,
int create,
void *sm_root, size_t sm_len)
{
int r;
*sm = dm_sm_metadata_init();
if (IS_ERR(*sm))
return PTR_ERR(*sm);
*tm = dm_tm_create(bm, *sm);
if (IS_ERR(*tm)) {
dm_sm_destroy(*sm);
return PTR_ERR(*tm);
}
if (create) {
r = dm_sm_metadata_create(*sm, *tm, dm_bm_nr_blocks(bm),
sb_location);
if (r) {
DMERR("couldn't create metadata space map");
goto bad;
}
} else {
r = dm_sm_metadata_open(*sm, *tm, sm_root, sm_len);
if (r) {
DMERR("couldn't open metadata space map");
goto bad;
}
}
return 0;
bad:
dm_tm_destroy(*tm);
dm_sm_destroy(*sm);
return r;
}
int dm_tm_create_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
struct dm_transaction_manager **tm,
struct dm_space_map **sm)
{
return dm_tm_create_internal(bm, sb_location, tm, sm, 1, NULL, 0);
}
EXPORT_SYMBOL_GPL(dm_tm_create_with_sm);
int dm_tm_open_with_sm(struct dm_block_manager *bm, dm_block_t sb_location,
void *sm_root, size_t root_len,
struct dm_transaction_manager **tm,
struct dm_space_map **sm)
{
return dm_tm_create_internal(bm, sb_location, tm, sm, 0, sm_root, root_len);
}
EXPORT_SYMBOL_GPL(dm_tm_open_with_sm);
/*----------------------------------------------------------------*/