dm snapshot: split out exception store implementations
Move the existing snapshot exception store implementations out into separate files. Later patches will place these behind a new interface in preparation for alternative implementations. Signed-off-by: Alasdair G Kergon <agk@redhat.com>
This commit is contained in:
parent
1ae25f9c93
commit
4db6bfe02b
|
@ -5,7 +5,8 @@
|
|||
dm-mod-objs := dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
|
||||
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o
|
||||
dm-multipath-objs := dm-path-selector.o dm-mpath.o
|
||||
dm-snapshot-objs := dm-snap.o dm-exception-store.o
|
||||
dm-snapshot-objs := dm-snap.o dm-exception-store.o dm-snap-transient.o \
|
||||
dm-snap-persistent.o
|
||||
dm-mirror-objs := dm-raid1.o
|
||||
md-mod-objs := md.o bitmap.o
|
||||
raid456-objs := raid5.o raid6algos.o raid6recov.o raid6tables.o \
|
||||
|
|
|
@ -1,757 +1,46 @@
|
|||
/*
|
||||
* dm-exception-store.c
|
||||
*
|
||||
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
|
||||
* Copyright (C) 2006 Red Hat GmbH
|
||||
* Copyright (C) 2006-2008 Red Hat GmbH
|
||||
*
|
||||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#include "dm-exception-store.h"
|
||||
#include "dm-snap.h"
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dm-io.h>
|
||||
#include <linux/dm-kcopyd.h>
|
||||
#include <linux/device-mapper.h>
|
||||
|
||||
#define DM_MSG_PREFIX "snapshots"
|
||||
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
|
||||
#define DM_MSG_PREFIX "snapshot exception stores"
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Persistent snapshots, by persistent we mean that the snapshot
|
||||
* will survive a reboot.
|
||||
*---------------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
* We need to store a record of which parts of the origin have
|
||||
* been copied to the snapshot device. The snapshot code
|
||||
* requires that we copy exception chunks to chunk aligned areas
|
||||
* of the COW store. It makes sense therefore, to store the
|
||||
* metadata in chunk size blocks.
|
||||
*
|
||||
* There is no backward or forward compatibility implemented,
|
||||
* snapshots with different disk versions than the kernel will
|
||||
* not be usable. It is expected that "lvcreate" will blank out
|
||||
* the start of a fresh COW device before calling the snapshot
|
||||
* constructor.
|
||||
*
|
||||
* The first chunk of the COW device just contains the header.
|
||||
* After this there is a chunk filled with exception metadata,
|
||||
* followed by as many exception chunks as can fit in the
|
||||
* metadata areas.
|
||||
*
|
||||
* All on disk structures are in little-endian format. The end
|
||||
* of the exceptions info is indicated by an exception with a
|
||||
* new_chunk of 0, which is invalid since it would point to the
|
||||
* header chunk.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Magic for persistent snapshots: "SnAp" - Feeble isn't it.
|
||||
*/
|
||||
#define SNAP_MAGIC 0x70416e53
|
||||
|
||||
/*
|
||||
* The on-disk version of the metadata.
|
||||
*/
|
||||
#define SNAPSHOT_DISK_VERSION 1
|
||||
|
||||
struct disk_header {
|
||||
uint32_t magic;
|
||||
|
||||
/*
|
||||
* Is this snapshot valid. There is no way of recovering
|
||||
* an invalid snapshot.
|
||||
*/
|
||||
uint32_t valid;
|
||||
|
||||
/*
|
||||
* Simple, incrementing version. no backward
|
||||
* compatibility.
|
||||
*/
|
||||
uint32_t version;
|
||||
|
||||
/* In sectors */
|
||||
uint32_t chunk_size;
|
||||
};
|
||||
|
||||
struct disk_exception {
|
||||
uint64_t old_chunk;
|
||||
uint64_t new_chunk;
|
||||
};
|
||||
|
||||
struct commit_callback {
|
||||
void (*callback)(void *, int success);
|
||||
void *context;
|
||||
};
|
||||
|
||||
/*
|
||||
* The top level structure for a persistent exception store.
|
||||
*/
|
||||
struct pstore {
|
||||
struct dm_snapshot *snap; /* up pointer to my snapshot */
|
||||
int version;
|
||||
int valid;
|
||||
uint32_t exceptions_per_area;
|
||||
|
||||
/*
|
||||
* Now that we have an asynchronous kcopyd there is no
|
||||
* need for large chunk sizes, so it wont hurt to have a
|
||||
* whole chunks worth of metadata in memory at once.
|
||||
*/
|
||||
void *area;
|
||||
|
||||
/*
|
||||
* An area of zeros used to clear the next area.
|
||||
*/
|
||||
void *zero_area;
|
||||
|
||||
/*
|
||||
* Used to keep track of which metadata area the data in
|
||||
* 'chunk' refers to.
|
||||
*/
|
||||
chunk_t current_area;
|
||||
|
||||
/*
|
||||
* The next free chunk for an exception.
|
||||
*/
|
||||
chunk_t next_free;
|
||||
|
||||
/*
|
||||
* The index of next free exception in the current
|
||||
* metadata area.
|
||||
*/
|
||||
uint32_t current_committed;
|
||||
|
||||
atomic_t pending_count;
|
||||
uint32_t callback_count;
|
||||
struct commit_callback *callbacks;
|
||||
struct dm_io_client *io_client;
|
||||
|
||||
struct workqueue_struct *metadata_wq;
|
||||
};
|
||||
|
||||
static unsigned sectors_to_pages(unsigned sectors)
|
||||
{
|
||||
return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
|
||||
}
|
||||
|
||||
static int alloc_area(struct pstore *ps)
|
||||
{
|
||||
int r = -ENOMEM;
|
||||
size_t len;
|
||||
|
||||
len = ps->snap->chunk_size << SECTOR_SHIFT;
|
||||
|
||||
/*
|
||||
* Allocate the chunk_size block of memory that will hold
|
||||
* a single metadata area.
|
||||
*/
|
||||
ps->area = vmalloc(len);
|
||||
if (!ps->area)
|
||||
return r;
|
||||
|
||||
ps->zero_area = vmalloc(len);
|
||||
if (!ps->zero_area) {
|
||||
vfree(ps->area);
|
||||
return r;
|
||||
}
|
||||
memset(ps->zero_area, 0, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_area(struct pstore *ps)
|
||||
{
|
||||
vfree(ps->area);
|
||||
ps->area = NULL;
|
||||
vfree(ps->zero_area);
|
||||
ps->zero_area = NULL;
|
||||
}
|
||||
|
||||
struct mdata_req {
|
||||
struct dm_io_region *where;
|
||||
struct dm_io_request *io_req;
|
||||
struct work_struct work;
|
||||
int result;
|
||||
};
|
||||
|
||||
static void do_metadata(struct work_struct *work)
|
||||
{
|
||||
struct mdata_req *req = container_of(work, struct mdata_req, work);
|
||||
|
||||
req->result = dm_io(req->io_req, 1, req->where, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read or write a chunk aligned and sized block of data from a device.
|
||||
*/
|
||||
static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
|
||||
{
|
||||
struct dm_io_region where = {
|
||||
.bdev = ps->snap->cow->bdev,
|
||||
.sector = ps->snap->chunk_size * chunk,
|
||||
.count = ps->snap->chunk_size,
|
||||
};
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = rw,
|
||||
.mem.type = DM_IO_VMA,
|
||||
.mem.ptr.vma = ps->area,
|
||||
.client = ps->io_client,
|
||||
.notify.fn = NULL,
|
||||
};
|
||||
struct mdata_req req;
|
||||
|
||||
if (!metadata)
|
||||
return dm_io(&io_req, 1, &where, NULL);
|
||||
|
||||
req.where = &where;
|
||||
req.io_req = &io_req;
|
||||
|
||||
/*
|
||||
* Issue the synchronous I/O from a different thread
|
||||
* to avoid generic_make_request recursion.
|
||||
*/
|
||||
INIT_WORK(&req.work, do_metadata);
|
||||
queue_work(ps->metadata_wq, &req.work);
|
||||
flush_workqueue(ps->metadata_wq);
|
||||
|
||||
return req.result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a metadata area index to a chunk index.
|
||||
*/
|
||||
static chunk_t area_location(struct pstore *ps, chunk_t area)
|
||||
{
|
||||
return 1 + ((ps->exceptions_per_area + 1) * area);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read or write a metadata area. Remembering to skip the first
|
||||
* chunk which holds the header.
|
||||
*/
|
||||
static int area_io(struct pstore *ps, int rw)
|
||||
int dm_exception_store_init(void)
|
||||
{
|
||||
int r;
|
||||
chunk_t chunk;
|
||||
|
||||
chunk = area_location(ps, ps->current_area);
|
||||
r = dm_transient_snapshot_init();
|
||||
if (r) {
|
||||
DMERR("Unable to register transient exception store type.");
|
||||
goto transient_fail;
|
||||
}
|
||||
|
||||
r = chunk_io(ps, chunk, rw, 0);
|
||||
if (r)
|
||||
return r;
|
||||
r = dm_persistent_snapshot_init();
|
||||
if (r) {
|
||||
DMERR("Unable to register persistent exception store type");
|
||||
goto persistent_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zero_memory_area(struct pstore *ps)
|
||||
{
|
||||
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
static int zero_disk_area(struct pstore *ps, chunk_t area)
|
||||
{
|
||||
struct dm_io_region where = {
|
||||
.bdev = ps->snap->cow->bdev,
|
||||
.sector = ps->snap->chunk_size * area_location(ps, area),
|
||||
.count = ps->snap->chunk_size,
|
||||
};
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = WRITE,
|
||||
.mem.type = DM_IO_VMA,
|
||||
.mem.ptr.vma = ps->zero_area,
|
||||
.client = ps->io_client,
|
||||
.notify.fn = NULL,
|
||||
};
|
||||
|
||||
return dm_io(&io_req, 1, &where, NULL);
|
||||
}
|
||||
|
||||
static int read_header(struct pstore *ps, int *new_snapshot)
|
||||
{
|
||||
int r;
|
||||
struct disk_header *dh;
|
||||
chunk_t chunk_size;
|
||||
int chunk_size_supplied = 1;
|
||||
|
||||
/*
|
||||
* Use default chunk size (or hardsect_size, if larger) if none supplied
|
||||
*/
|
||||
if (!ps->snap->chunk_size) {
|
||||
ps->snap->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
|
||||
bdev_hardsect_size(ps->snap->cow->bdev) >> 9);
|
||||
ps->snap->chunk_mask = ps->snap->chunk_size - 1;
|
||||
ps->snap->chunk_shift = ffs(ps->snap->chunk_size) - 1;
|
||||
chunk_size_supplied = 0;
|
||||
}
|
||||
|
||||
ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
|
||||
chunk_size));
|
||||
if (IS_ERR(ps->io_client))
|
||||
return PTR_ERR(ps->io_client);
|
||||
|
||||
r = alloc_area(ps);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = chunk_io(ps, 0, READ, 1);
|
||||
if (r)
|
||||
goto bad;
|
||||
|
||||
dh = (struct disk_header *) ps->area;
|
||||
|
||||
if (le32_to_cpu(dh->magic) == 0) {
|
||||
*new_snapshot = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
|
||||
DMWARN("Invalid or corrupt snapshot");
|
||||
r = -ENXIO;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
*new_snapshot = 0;
|
||||
ps->valid = le32_to_cpu(dh->valid);
|
||||
ps->version = le32_to_cpu(dh->version);
|
||||
chunk_size = le32_to_cpu(dh->chunk_size);
|
||||
|
||||
if (!chunk_size_supplied || ps->snap->chunk_size == chunk_size)
|
||||
return 0;
|
||||
|
||||
DMWARN("chunk size %llu in device metadata overrides "
|
||||
"table chunk size of %llu.",
|
||||
(unsigned long long)chunk_size,
|
||||
(unsigned long long)ps->snap->chunk_size);
|
||||
|
||||
/* We had a bogus chunk_size. Fix stuff up. */
|
||||
free_area(ps);
|
||||
|
||||
ps->snap->chunk_size = chunk_size;
|
||||
ps->snap->chunk_mask = chunk_size - 1;
|
||||
ps->snap->chunk_shift = ffs(chunk_size) - 1;
|
||||
|
||||
r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
|
||||
ps->io_client);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = alloc_area(ps);
|
||||
return r;
|
||||
|
||||
bad:
|
||||
free_area(ps);
|
||||
persistent_fail:
|
||||
dm_persistent_snapshot_exit();
|
||||
transient_fail:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int write_header(struct pstore *ps)
|
||||
void dm_exception_store_exit(void)
|
||||
{
|
||||
struct disk_header *dh;
|
||||
|
||||
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
|
||||
|
||||
dh = (struct disk_header *) ps->area;
|
||||
dh->magic = cpu_to_le32(SNAP_MAGIC);
|
||||
dh->valid = cpu_to_le32(ps->valid);
|
||||
dh->version = cpu_to_le32(ps->version);
|
||||
dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
|
||||
|
||||
return chunk_io(ps, 0, WRITE, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access functions for the disk exceptions, these do the endian conversions.
|
||||
*/
|
||||
static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
|
||||
{
|
||||
BUG_ON(index >= ps->exceptions_per_area);
|
||||
|
||||
return ((struct disk_exception *) ps->area) + index;
|
||||
}
|
||||
|
||||
static void read_exception(struct pstore *ps,
|
||||
uint32_t index, struct disk_exception *result)
|
||||
{
|
||||
struct disk_exception *e = get_exception(ps, index);
|
||||
|
||||
/* copy it */
|
||||
result->old_chunk = le64_to_cpu(e->old_chunk);
|
||||
result->new_chunk = le64_to_cpu(e->new_chunk);
|
||||
}
|
||||
|
||||
static void write_exception(struct pstore *ps,
|
||||
uint32_t index, struct disk_exception *de)
|
||||
{
|
||||
struct disk_exception *e = get_exception(ps, index);
|
||||
|
||||
/* copy it */
|
||||
e->old_chunk = cpu_to_le64(de->old_chunk);
|
||||
e->new_chunk = cpu_to_le64(de->new_chunk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Registers the exceptions that are present in the current area.
|
||||
* 'full' is filled in to indicate if the area has been
|
||||
* filled.
|
||||
*/
|
||||
static int insert_exceptions(struct pstore *ps, int *full)
|
||||
{
|
||||
int r;
|
||||
unsigned int i;
|
||||
struct disk_exception de;
|
||||
|
||||
/* presume the area is full */
|
||||
*full = 1;
|
||||
|
||||
for (i = 0; i < ps->exceptions_per_area; i++) {
|
||||
read_exception(ps, i, &de);
|
||||
|
||||
/*
|
||||
* If the new_chunk is pointing at the start of
|
||||
* the COW device, where the first metadata area
|
||||
* is we know that we've hit the end of the
|
||||
* exceptions. Therefore the area is not full.
|
||||
*/
|
||||
if (de.new_chunk == 0LL) {
|
||||
ps->current_committed = i;
|
||||
*full = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep track of the start of the free chunks.
|
||||
*/
|
||||
if (ps->next_free <= de.new_chunk)
|
||||
ps->next_free = de.new_chunk + 1;
|
||||
|
||||
/*
|
||||
* Otherwise we add the exception to the snapshot.
|
||||
*/
|
||||
r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_exceptions(struct pstore *ps)
|
||||
{
|
||||
int r, full = 1;
|
||||
|
||||
/*
|
||||
* Keeping reading chunks and inserting exceptions until
|
||||
* we find a partially full area.
|
||||
*/
|
||||
for (ps->current_area = 0; full; ps->current_area++) {
|
||||
r = area_io(ps, READ);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = insert_exceptions(ps, &full);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
ps->current_area--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pstore *get_info(struct dm_exception_store *store)
|
||||
{
|
||||
return (struct pstore *) store->context;
|
||||
}
|
||||
|
||||
static void persistent_fraction_full(struct dm_exception_store *store,
|
||||
sector_t *numerator, sector_t *denominator)
|
||||
{
|
||||
*numerator = get_info(store)->next_free * store->snap->chunk_size;
|
||||
*denominator = get_dev_size(store->snap->cow->bdev);
|
||||
}
|
||||
|
||||
static void persistent_destroy(struct dm_exception_store *store)
|
||||
{
|
||||
struct pstore *ps = get_info(store);
|
||||
|
||||
destroy_workqueue(ps->metadata_wq);
|
||||
dm_io_client_destroy(ps->io_client);
|
||||
vfree(ps->callbacks);
|
||||
free_area(ps);
|
||||
kfree(ps);
|
||||
}
|
||||
|
||||
static int persistent_read_metadata(struct dm_exception_store *store)
|
||||
{
|
||||
int r, uninitialized_var(new_snapshot);
|
||||
struct pstore *ps = get_info(store);
|
||||
|
||||
/*
|
||||
* Read the snapshot header.
|
||||
*/
|
||||
r = read_header(ps, &new_snapshot);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Now we know correct chunk_size, complete the initialisation.
|
||||
*/
|
||||
ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
|
||||
sizeof(struct disk_exception);
|
||||
ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
|
||||
sizeof(*ps->callbacks));
|
||||
if (!ps->callbacks)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Do we need to setup a new snapshot ?
|
||||
*/
|
||||
if (new_snapshot) {
|
||||
r = write_header(ps);
|
||||
if (r) {
|
||||
DMWARN("write_header failed");
|
||||
return r;
|
||||
}
|
||||
|
||||
ps->current_area = 0;
|
||||
zero_memory_area(ps);
|
||||
r = zero_disk_area(ps, 0);
|
||||
if (r) {
|
||||
DMWARN("zero_disk_area(0) failed");
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Sanity checks.
|
||||
*/
|
||||
if (ps->version != SNAPSHOT_DISK_VERSION) {
|
||||
DMWARN("unable to handle snapshot disk version %d",
|
||||
ps->version);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Metadata are valid, but snapshot is invalidated
|
||||
*/
|
||||
if (!ps->valid)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Read the metadata.
|
||||
*/
|
||||
r = read_exceptions(ps);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int persistent_prepare(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e)
|
||||
{
|
||||
struct pstore *ps = get_info(store);
|
||||
uint32_t stride;
|
||||
chunk_t next_free;
|
||||
sector_t size = get_dev_size(store->snap->cow->bdev);
|
||||
|
||||
/* Is there enough room ? */
|
||||
if (size < ((ps->next_free + 1) * store->snap->chunk_size))
|
||||
return -ENOSPC;
|
||||
|
||||
e->new_chunk = ps->next_free;
|
||||
|
||||
/*
|
||||
* Move onto the next free pending, making sure to take
|
||||
* into account the location of the metadata chunks.
|
||||
*/
|
||||
stride = (ps->exceptions_per_area + 1);
|
||||
next_free = ++ps->next_free;
|
||||
if (sector_div(next_free, stride) == 1)
|
||||
ps->next_free++;
|
||||
|
||||
atomic_inc(&ps->pending_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void persistent_commit(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e,
|
||||
void (*callback) (void *, int success),
|
||||
void *callback_context)
|
||||
{
|
||||
unsigned int i;
|
||||
struct pstore *ps = get_info(store);
|
||||
struct disk_exception de;
|
||||
struct commit_callback *cb;
|
||||
|
||||
de.old_chunk = e->old_chunk;
|
||||
de.new_chunk = e->new_chunk;
|
||||
write_exception(ps, ps->current_committed++, &de);
|
||||
|
||||
/*
|
||||
* Add the callback to the back of the array. This code
|
||||
* is the only place where the callback array is
|
||||
* manipulated, and we know that it will never be called
|
||||
* multiple times concurrently.
|
||||
*/
|
||||
cb = ps->callbacks + ps->callback_count++;
|
||||
cb->callback = callback;
|
||||
cb->context = callback_context;
|
||||
|
||||
/*
|
||||
* If there are exceptions in flight and we have not yet
|
||||
* filled this metadata area there's nothing more to do.
|
||||
*/
|
||||
if (!atomic_dec_and_test(&ps->pending_count) &&
|
||||
(ps->current_committed != ps->exceptions_per_area))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we completely filled the current area, then wipe the next one.
|
||||
*/
|
||||
if ((ps->current_committed == ps->exceptions_per_area) &&
|
||||
zero_disk_area(ps, ps->current_area + 1))
|
||||
ps->valid = 0;
|
||||
|
||||
/*
|
||||
* Commit exceptions to disk.
|
||||
*/
|
||||
if (ps->valid && area_io(ps, WRITE))
|
||||
ps->valid = 0;
|
||||
|
||||
/*
|
||||
* Advance to the next area if this one is full.
|
||||
*/
|
||||
if (ps->current_committed == ps->exceptions_per_area) {
|
||||
ps->current_committed = 0;
|
||||
ps->current_area++;
|
||||
zero_memory_area(ps);
|
||||
}
|
||||
|
||||
for (i = 0; i < ps->callback_count; i++) {
|
||||
cb = ps->callbacks + i;
|
||||
cb->callback(cb->context, ps->valid);
|
||||
}
|
||||
|
||||
ps->callback_count = 0;
|
||||
}
|
||||
|
||||
static void persistent_drop(struct dm_exception_store *store)
|
||||
{
|
||||
struct pstore *ps = get_info(store);
|
||||
|
||||
ps->valid = 0;
|
||||
if (write_header(ps))
|
||||
DMWARN("write header failed");
|
||||
}
|
||||
|
||||
int dm_create_persistent(struct dm_exception_store *store)
|
||||
{
|
||||
struct pstore *ps;
|
||||
|
||||
/* allocate the pstore */
|
||||
ps = kmalloc(sizeof(*ps), GFP_KERNEL);
|
||||
if (!ps)
|
||||
return -ENOMEM;
|
||||
|
||||
ps->snap = store->snap;
|
||||
ps->valid = 1;
|
||||
ps->version = SNAPSHOT_DISK_VERSION;
|
||||
ps->area = NULL;
|
||||
ps->next_free = 2; /* skipping the header and first area */
|
||||
ps->current_committed = 0;
|
||||
|
||||
ps->callback_count = 0;
|
||||
atomic_set(&ps->pending_count, 0);
|
||||
ps->callbacks = NULL;
|
||||
|
||||
ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
|
||||
if (!ps->metadata_wq) {
|
||||
kfree(ps);
|
||||
DMERR("couldn't start header metadata update thread");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
store->destroy = persistent_destroy;
|
||||
store->read_metadata = persistent_read_metadata;
|
||||
store->prepare_exception = persistent_prepare;
|
||||
store->commit_exception = persistent_commit;
|
||||
store->drop_snapshot = persistent_drop;
|
||||
store->fraction_full = persistent_fraction_full;
|
||||
store->context = ps;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Implementation of the store for non-persistent snapshots.
|
||||
*---------------------------------------------------------------*/
|
||||
struct transient_c {
|
||||
sector_t next_free;
|
||||
};
|
||||
|
||||
static void transient_destroy(struct dm_exception_store *store)
|
||||
{
|
||||
kfree(store->context);
|
||||
}
|
||||
|
||||
static int transient_read_metadata(struct dm_exception_store *store)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int transient_prepare(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e)
|
||||
{
|
||||
struct transient_c *tc = (struct transient_c *) store->context;
|
||||
sector_t size = get_dev_size(store->snap->cow->bdev);
|
||||
|
||||
if (size < (tc->next_free + store->snap->chunk_size))
|
||||
return -1;
|
||||
|
||||
e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
|
||||
tc->next_free += store->snap->chunk_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void transient_commit(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e,
|
||||
void (*callback) (void *, int success),
|
||||
void *callback_context)
|
||||
{
|
||||
/* Just succeed */
|
||||
callback(callback_context, 1);
|
||||
}
|
||||
|
||||
static void transient_fraction_full(struct dm_exception_store *store,
|
||||
sector_t *numerator, sector_t *denominator)
|
||||
{
|
||||
*numerator = ((struct transient_c *) store->context)->next_free;
|
||||
*denominator = get_dev_size(store->snap->cow->bdev);
|
||||
}
|
||||
|
||||
int dm_create_transient(struct dm_exception_store *store)
|
||||
{
|
||||
struct transient_c *tc;
|
||||
|
||||
store->destroy = transient_destroy;
|
||||
store->read_metadata = transient_read_metadata;
|
||||
store->prepare_exception = transient_prepare;
|
||||
store->commit_exception = transient_commit;
|
||||
store->drop_snapshot = NULL;
|
||||
store->fraction_full = transient_fraction_full;
|
||||
|
||||
tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
|
||||
if (!tc)
|
||||
return -ENOMEM;
|
||||
|
||||
tc->next_free = 0;
|
||||
store->context = tc;
|
||||
|
||||
return 0;
|
||||
dm_persistent_snapshot_exit();
|
||||
dm_transient_snapshot_exit();
|
||||
}
|
||||
|
|
|
@ -122,9 +122,18 @@ static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
|
|||
|
||||
# endif
|
||||
|
||||
int dm_exception_store_init(void);
|
||||
void dm_exception_store_exit(void);
|
||||
|
||||
/*
|
||||
* Two exception store implementations.
|
||||
*/
|
||||
int dm_persistent_snapshot_init(void);
|
||||
void dm_persistent_snapshot_exit(void);
|
||||
|
||||
int dm_transient_snapshot_init(void);
|
||||
void dm_transient_snapshot_exit(void);
|
||||
|
||||
int dm_create_persistent(struct dm_exception_store *store);
|
||||
|
||||
int dm_create_transient(struct dm_exception_store *store);
|
||||
|
|
694
drivers/md/dm-snap-persistent.c
Normal file
694
drivers/md/dm-snap-persistent.c
Normal file
|
@ -0,0 +1,694 @@
|
|||
/*
|
||||
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
|
||||
* Copyright (C) 2006-2008 Red Hat GmbH
|
||||
*
|
||||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#include "dm-exception-store.h"
|
||||
#include "dm-snap.h"
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dm-io.h>
|
||||
|
||||
#define DM_MSG_PREFIX "persistent snapshot"
|
||||
#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Persistent snapshots, by persistent we mean that the snapshot
|
||||
* will survive a reboot.
|
||||
*---------------------------------------------------------------*/
|
||||
|
||||
/*
|
||||
* We need to store a record of which parts of the origin have
|
||||
* been copied to the snapshot device. The snapshot code
|
||||
* requires that we copy exception chunks to chunk aligned areas
|
||||
* of the COW store. It makes sense therefore, to store the
|
||||
* metadata in chunk size blocks.
|
||||
*
|
||||
* There is no backward or forward compatibility implemented,
|
||||
* snapshots with different disk versions than the kernel will
|
||||
* not be usable. It is expected that "lvcreate" will blank out
|
||||
* the start of a fresh COW device before calling the snapshot
|
||||
* constructor.
|
||||
*
|
||||
* The first chunk of the COW device just contains the header.
|
||||
* After this there is a chunk filled with exception metadata,
|
||||
* followed by as many exception chunks as can fit in the
|
||||
* metadata areas.
|
||||
*
|
||||
* All on disk structures are in little-endian format. The end
|
||||
* of the exceptions info is indicated by an exception with a
|
||||
* new_chunk of 0, which is invalid since it would point to the
|
||||
* header chunk.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Magic for persistent snapshots: "SnAp" - Feeble isn't it.
|
||||
*/
|
||||
#define SNAP_MAGIC 0x70416e53
|
||||
|
||||
/*
|
||||
* The on-disk version of the metadata.
|
||||
*/
|
||||
#define SNAPSHOT_DISK_VERSION 1
|
||||
|
||||
struct disk_header {
|
||||
uint32_t magic;
|
||||
|
||||
/*
|
||||
* Is this snapshot valid. There is no way of recovering
|
||||
* an invalid snapshot.
|
||||
*/
|
||||
uint32_t valid;
|
||||
|
||||
/*
|
||||
* Simple, incrementing version. no backward
|
||||
* compatibility.
|
||||
*/
|
||||
uint32_t version;
|
||||
|
||||
/* In sectors */
|
||||
uint32_t chunk_size;
|
||||
};
|
||||
|
||||
struct disk_exception {
|
||||
uint64_t old_chunk;
|
||||
uint64_t new_chunk;
|
||||
};
|
||||
|
||||
struct commit_callback {
|
||||
void (*callback)(void *, int success);
|
||||
void *context;
|
||||
};
|
||||
|
||||
/*
|
||||
* The top level structure for a persistent exception store.
|
||||
*/
|
||||
struct pstore {
|
||||
struct dm_snapshot *snap; /* up pointer to my snapshot */
|
||||
int version;
|
||||
int valid;
|
||||
uint32_t exceptions_per_area;
|
||||
|
||||
/*
|
||||
* Now that we have an asynchronous kcopyd there is no
|
||||
* need for large chunk sizes, so it wont hurt to have a
|
||||
* whole chunks worth of metadata in memory at once.
|
||||
*/
|
||||
void *area;
|
||||
|
||||
/*
|
||||
* An area of zeros used to clear the next area.
|
||||
*/
|
||||
void *zero_area;
|
||||
|
||||
/*
|
||||
* Used to keep track of which metadata area the data in
|
||||
* 'chunk' refers to.
|
||||
*/
|
||||
chunk_t current_area;
|
||||
|
||||
/*
|
||||
* The next free chunk for an exception.
|
||||
*/
|
||||
chunk_t next_free;
|
||||
|
||||
/*
|
||||
* The index of next free exception in the current
|
||||
* metadata area.
|
||||
*/
|
||||
uint32_t current_committed;
|
||||
|
||||
atomic_t pending_count;
|
||||
uint32_t callback_count;
|
||||
struct commit_callback *callbacks;
|
||||
struct dm_io_client *io_client;
|
||||
|
||||
struct workqueue_struct *metadata_wq;
|
||||
};
|
||||
|
||||
static unsigned sectors_to_pages(unsigned sectors)
|
||||
{
|
||||
return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
|
||||
}
|
||||
|
||||
static int alloc_area(struct pstore *ps)
|
||||
{
|
||||
int r = -ENOMEM;
|
||||
size_t len;
|
||||
|
||||
len = ps->snap->chunk_size << SECTOR_SHIFT;
|
||||
|
||||
/*
|
||||
* Allocate the chunk_size block of memory that will hold
|
||||
* a single metadata area.
|
||||
*/
|
||||
ps->area = vmalloc(len);
|
||||
if (!ps->area)
|
||||
return r;
|
||||
|
||||
ps->zero_area = vmalloc(len);
|
||||
if (!ps->zero_area) {
|
||||
vfree(ps->area);
|
||||
return r;
|
||||
}
|
||||
memset(ps->zero_area, 0, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void free_area(struct pstore *ps)
|
||||
{
|
||||
vfree(ps->area);
|
||||
ps->area = NULL;
|
||||
vfree(ps->zero_area);
|
||||
ps->zero_area = NULL;
|
||||
}
|
||||
|
||||
struct mdata_req {
|
||||
struct dm_io_region *where;
|
||||
struct dm_io_request *io_req;
|
||||
struct work_struct work;
|
||||
int result;
|
||||
};
|
||||
|
||||
static void do_metadata(struct work_struct *work)
|
||||
{
|
||||
struct mdata_req *req = container_of(work, struct mdata_req, work);
|
||||
|
||||
req->result = dm_io(req->io_req, 1, req->where, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read or write a chunk aligned and sized block of data from a device.
|
||||
*/
|
||||
static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
|
||||
{
|
||||
struct dm_io_region where = {
|
||||
.bdev = ps->snap->cow->bdev,
|
||||
.sector = ps->snap->chunk_size * chunk,
|
||||
.count = ps->snap->chunk_size,
|
||||
};
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = rw,
|
||||
.mem.type = DM_IO_VMA,
|
||||
.mem.ptr.vma = ps->area,
|
||||
.client = ps->io_client,
|
||||
.notify.fn = NULL,
|
||||
};
|
||||
struct mdata_req req;
|
||||
|
||||
if (!metadata)
|
||||
return dm_io(&io_req, 1, &where, NULL);
|
||||
|
||||
req.where = &where;
|
||||
req.io_req = &io_req;
|
||||
|
||||
/*
|
||||
* Issue the synchronous I/O from a different thread
|
||||
* to avoid generic_make_request recursion.
|
||||
*/
|
||||
INIT_WORK(&req.work, do_metadata);
|
||||
queue_work(ps->metadata_wq, &req.work);
|
||||
flush_workqueue(ps->metadata_wq);
|
||||
|
||||
return req.result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert a metadata area index to a chunk index.
|
||||
*/
|
||||
static chunk_t area_location(struct pstore *ps, chunk_t area)
|
||||
{
|
||||
return 1 + ((ps->exceptions_per_area + 1) * area);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read or write a metadata area. Remembering to skip the first
|
||||
* chunk which holds the header.
|
||||
*/
|
||||
static int area_io(struct pstore *ps, int rw)
|
||||
{
|
||||
int r;
|
||||
chunk_t chunk;
|
||||
|
||||
chunk = area_location(ps, ps->current_area);
|
||||
|
||||
r = chunk_io(ps, chunk, rw, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void zero_memory_area(struct pstore *ps)
|
||||
{
|
||||
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
|
||||
}
|
||||
|
||||
static int zero_disk_area(struct pstore *ps, chunk_t area)
|
||||
{
|
||||
struct dm_io_region where = {
|
||||
.bdev = ps->snap->cow->bdev,
|
||||
.sector = ps->snap->chunk_size * area_location(ps, area),
|
||||
.count = ps->snap->chunk_size,
|
||||
};
|
||||
struct dm_io_request io_req = {
|
||||
.bi_rw = WRITE,
|
||||
.mem.type = DM_IO_VMA,
|
||||
.mem.ptr.vma = ps->zero_area,
|
||||
.client = ps->io_client,
|
||||
.notify.fn = NULL,
|
||||
};
|
||||
|
||||
return dm_io(&io_req, 1, &where, NULL);
|
||||
}
|
||||
|
||||
static int read_header(struct pstore *ps, int *new_snapshot)
|
||||
{
|
||||
int r;
|
||||
struct disk_header *dh;
|
||||
chunk_t chunk_size;
|
||||
int chunk_size_supplied = 1;
|
||||
|
||||
/*
|
||||
* Use default chunk size (or hardsect_size, if larger) if none supplied
|
||||
*/
|
||||
if (!ps->snap->chunk_size) {
|
||||
ps->snap->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
|
||||
bdev_hardsect_size(ps->snap->cow->bdev) >> 9);
|
||||
ps->snap->chunk_mask = ps->snap->chunk_size - 1;
|
||||
ps->snap->chunk_shift = ffs(ps->snap->chunk_size) - 1;
|
||||
chunk_size_supplied = 0;
|
||||
}
|
||||
|
||||
ps->io_client = dm_io_client_create(sectors_to_pages(ps->snap->
|
||||
chunk_size));
|
||||
if (IS_ERR(ps->io_client))
|
||||
return PTR_ERR(ps->io_client);
|
||||
|
||||
r = alloc_area(ps);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = chunk_io(ps, 0, READ, 1);
|
||||
if (r)
|
||||
goto bad;
|
||||
|
||||
dh = (struct disk_header *) ps->area;
|
||||
|
||||
if (le32_to_cpu(dh->magic) == 0) {
|
||||
*new_snapshot = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
|
||||
DMWARN("Invalid or corrupt snapshot");
|
||||
r = -ENXIO;
|
||||
goto bad;
|
||||
}
|
||||
|
||||
*new_snapshot = 0;
|
||||
ps->valid = le32_to_cpu(dh->valid);
|
||||
ps->version = le32_to_cpu(dh->version);
|
||||
chunk_size = le32_to_cpu(dh->chunk_size);
|
||||
|
||||
if (!chunk_size_supplied || ps->snap->chunk_size == chunk_size)
|
||||
return 0;
|
||||
|
||||
DMWARN("chunk size %llu in device metadata overrides "
|
||||
"table chunk size of %llu.",
|
||||
(unsigned long long)chunk_size,
|
||||
(unsigned long long)ps->snap->chunk_size);
|
||||
|
||||
/* We had a bogus chunk_size. Fix stuff up. */
|
||||
free_area(ps);
|
||||
|
||||
ps->snap->chunk_size = chunk_size;
|
||||
ps->snap->chunk_mask = chunk_size - 1;
|
||||
ps->snap->chunk_shift = ffs(chunk_size) - 1;
|
||||
|
||||
r = dm_io_client_resize(sectors_to_pages(ps->snap->chunk_size),
|
||||
ps->io_client);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = alloc_area(ps);
|
||||
return r;
|
||||
|
||||
bad:
|
||||
free_area(ps);
|
||||
return r;
|
||||
}
|
||||
|
||||
static int write_header(struct pstore *ps)
|
||||
{
|
||||
struct disk_header *dh;
|
||||
|
||||
memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT);
|
||||
|
||||
dh = (struct disk_header *) ps->area;
|
||||
dh->magic = cpu_to_le32(SNAP_MAGIC);
|
||||
dh->valid = cpu_to_le32(ps->valid);
|
||||
dh->version = cpu_to_le32(ps->version);
|
||||
dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
|
||||
|
||||
return chunk_io(ps, 0, WRITE, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Access functions for the disk exceptions, these do the endian conversions.
|
||||
*/
|
||||
static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
|
||||
{
|
||||
BUG_ON(index >= ps->exceptions_per_area);
|
||||
|
||||
return ((struct disk_exception *) ps->area) + index;
|
||||
}
|
||||
|
||||
static void read_exception(struct pstore *ps,
|
||||
uint32_t index, struct disk_exception *result)
|
||||
{
|
||||
struct disk_exception *e = get_exception(ps, index);
|
||||
|
||||
/* copy it */
|
||||
result->old_chunk = le64_to_cpu(e->old_chunk);
|
||||
result->new_chunk = le64_to_cpu(e->new_chunk);
|
||||
}
|
||||
|
||||
static void write_exception(struct pstore *ps,
|
||||
uint32_t index, struct disk_exception *de)
|
||||
{
|
||||
struct disk_exception *e = get_exception(ps, index);
|
||||
|
||||
/* copy it */
|
||||
e->old_chunk = cpu_to_le64(de->old_chunk);
|
||||
e->new_chunk = cpu_to_le64(de->new_chunk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Registers the exceptions that are present in the current area.
|
||||
* 'full' is filled in to indicate if the area has been
|
||||
* filled.
|
||||
*/
|
||||
static int insert_exceptions(struct pstore *ps, int *full)
|
||||
{
|
||||
int r;
|
||||
unsigned int i;
|
||||
struct disk_exception de;
|
||||
|
||||
/* presume the area is full */
|
||||
*full = 1;
|
||||
|
||||
for (i = 0; i < ps->exceptions_per_area; i++) {
|
||||
read_exception(ps, i, &de);
|
||||
|
||||
/*
|
||||
* If the new_chunk is pointing at the start of
|
||||
* the COW device, where the first metadata area
|
||||
* is we know that we've hit the end of the
|
||||
* exceptions. Therefore the area is not full.
|
||||
*/
|
||||
if (de.new_chunk == 0LL) {
|
||||
ps->current_committed = i;
|
||||
*full = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Keep track of the start of the free chunks.
|
||||
*/
|
||||
if (ps->next_free <= de.new_chunk)
|
||||
ps->next_free = de.new_chunk + 1;
|
||||
|
||||
/*
|
||||
* Otherwise we add the exception to the snapshot.
|
||||
*/
|
||||
r = dm_add_exception(ps->snap, de.old_chunk, de.new_chunk);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int read_exceptions(struct pstore *ps)
|
||||
{
|
||||
int r, full = 1;
|
||||
|
||||
/*
|
||||
* Keeping reading chunks and inserting exceptions until
|
||||
* we find a partially full area.
|
||||
*/
|
||||
for (ps->current_area = 0; full; ps->current_area++) {
|
||||
r = area_io(ps, READ);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = insert_exceptions(ps, &full);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
ps->current_area--;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct pstore *get_info(struct dm_exception_store *store)
|
||||
{
|
||||
return (struct pstore *) store->context;
|
||||
}
|
||||
|
||||
static void persistent_fraction_full(struct dm_exception_store *store,
|
||||
sector_t *numerator, sector_t *denominator)
|
||||
{
|
||||
*numerator = get_info(store)->next_free * store->snap->chunk_size;
|
||||
*denominator = get_dev_size(store->snap->cow->bdev);
|
||||
}
|
||||
|
||||
static void persistent_destroy(struct dm_exception_store *store)
|
||||
{
|
||||
struct pstore *ps = get_info(store);
|
||||
|
||||
destroy_workqueue(ps->metadata_wq);
|
||||
dm_io_client_destroy(ps->io_client);
|
||||
vfree(ps->callbacks);
|
||||
free_area(ps);
|
||||
kfree(ps);
|
||||
}
|
||||
|
||||
static int persistent_read_metadata(struct dm_exception_store *store)
|
||||
{
|
||||
int r, uninitialized_var(new_snapshot);
|
||||
struct pstore *ps = get_info(store);
|
||||
|
||||
/*
|
||||
* Read the snapshot header.
|
||||
*/
|
||||
r = read_header(ps, &new_snapshot);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/*
|
||||
* Now we know correct chunk_size, complete the initialisation.
|
||||
*/
|
||||
ps->exceptions_per_area = (ps->snap->chunk_size << SECTOR_SHIFT) /
|
||||
sizeof(struct disk_exception);
|
||||
ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
|
||||
sizeof(*ps->callbacks));
|
||||
if (!ps->callbacks)
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* Do we need to setup a new snapshot ?
|
||||
*/
|
||||
if (new_snapshot) {
|
||||
r = write_header(ps);
|
||||
if (r) {
|
||||
DMWARN("write_header failed");
|
||||
return r;
|
||||
}
|
||||
|
||||
ps->current_area = 0;
|
||||
zero_memory_area(ps);
|
||||
r = zero_disk_area(ps, 0);
|
||||
if (r) {
|
||||
DMWARN("zero_disk_area(0) failed");
|
||||
return r;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Sanity checks.
|
||||
*/
|
||||
if (ps->version != SNAPSHOT_DISK_VERSION) {
|
||||
DMWARN("unable to handle snapshot disk version %d",
|
||||
ps->version);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Metadata are valid, but snapshot is invalidated
|
||||
*/
|
||||
if (!ps->valid)
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* Read the metadata.
|
||||
*/
|
||||
r = read_exceptions(ps);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int persistent_prepare(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e)
|
||||
{
|
||||
struct pstore *ps = get_info(store);
|
||||
uint32_t stride;
|
||||
chunk_t next_free;
|
||||
sector_t size = get_dev_size(store->snap->cow->bdev);
|
||||
|
||||
/* Is there enough room ? */
|
||||
if (size < ((ps->next_free + 1) * store->snap->chunk_size))
|
||||
return -ENOSPC;
|
||||
|
||||
e->new_chunk = ps->next_free;
|
||||
|
||||
/*
|
||||
* Move onto the next free pending, making sure to take
|
||||
* into account the location of the metadata chunks.
|
||||
*/
|
||||
stride = (ps->exceptions_per_area + 1);
|
||||
next_free = ++ps->next_free;
|
||||
if (sector_div(next_free, stride) == 1)
|
||||
ps->next_free++;
|
||||
|
||||
atomic_inc(&ps->pending_count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void persistent_commit(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e,
|
||||
void (*callback) (void *, int success),
|
||||
void *callback_context)
|
||||
{
|
||||
unsigned int i;
|
||||
struct pstore *ps = get_info(store);
|
||||
struct disk_exception de;
|
||||
struct commit_callback *cb;
|
||||
|
||||
de.old_chunk = e->old_chunk;
|
||||
de.new_chunk = e->new_chunk;
|
||||
write_exception(ps, ps->current_committed++, &de);
|
||||
|
||||
/*
|
||||
* Add the callback to the back of the array. This code
|
||||
* is the only place where the callback array is
|
||||
* manipulated, and we know that it will never be called
|
||||
* multiple times concurrently.
|
||||
*/
|
||||
cb = ps->callbacks + ps->callback_count++;
|
||||
cb->callback = callback;
|
||||
cb->context = callback_context;
|
||||
|
||||
/*
|
||||
* If there are exceptions in flight and we have not yet
|
||||
* filled this metadata area there's nothing more to do.
|
||||
*/
|
||||
if (!atomic_dec_and_test(&ps->pending_count) &&
|
||||
(ps->current_committed != ps->exceptions_per_area))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If we completely filled the current area, then wipe the next one.
|
||||
*/
|
||||
if ((ps->current_committed == ps->exceptions_per_area) &&
|
||||
zero_disk_area(ps, ps->current_area + 1))
|
||||
ps->valid = 0;
|
||||
|
||||
/*
|
||||
* Commit exceptions to disk.
|
||||
*/
|
||||
if (ps->valid && area_io(ps, WRITE))
|
||||
ps->valid = 0;
|
||||
|
||||
/*
|
||||
* Advance to the next area if this one is full.
|
||||
*/
|
||||
if (ps->current_committed == ps->exceptions_per_area) {
|
||||
ps->current_committed = 0;
|
||||
ps->current_area++;
|
||||
zero_memory_area(ps);
|
||||
}
|
||||
|
||||
for (i = 0; i < ps->callback_count; i++) {
|
||||
cb = ps->callbacks + i;
|
||||
cb->callback(cb->context, ps->valid);
|
||||
}
|
||||
|
||||
ps->callback_count = 0;
|
||||
}
|
||||
|
||||
static void persistent_drop(struct dm_exception_store *store)
|
||||
{
|
||||
struct pstore *ps = get_info(store);
|
||||
|
||||
ps->valid = 0;
|
||||
if (write_header(ps))
|
||||
DMWARN("write header failed");
|
||||
}
|
||||
|
||||
int dm_create_persistent(struct dm_exception_store *store)
|
||||
{
|
||||
struct pstore *ps;
|
||||
|
||||
/* allocate the pstore */
|
||||
ps = kmalloc(sizeof(*ps), GFP_KERNEL);
|
||||
if (!ps)
|
||||
return -ENOMEM;
|
||||
|
||||
ps->snap = store->snap;
|
||||
ps->valid = 1;
|
||||
ps->version = SNAPSHOT_DISK_VERSION;
|
||||
ps->area = NULL;
|
||||
ps->next_free = 2; /* skipping the header and first area */
|
||||
ps->current_committed = 0;
|
||||
|
||||
ps->callback_count = 0;
|
||||
atomic_set(&ps->pending_count, 0);
|
||||
ps->callbacks = NULL;
|
||||
|
||||
ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
|
||||
if (!ps->metadata_wq) {
|
||||
kfree(ps);
|
||||
DMERR("couldn't start header metadata update thread");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
store->destroy = persistent_destroy;
|
||||
store->read_metadata = persistent_read_metadata;
|
||||
store->prepare_exception = persistent_prepare;
|
||||
store->commit_exception = persistent_commit;
|
||||
store->drop_snapshot = persistent_drop;
|
||||
store->fraction_full = persistent_fraction_full;
|
||||
store->context = ps;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_persistent_snapshot_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_persistent_snapshot_exit(void)
|
||||
{
|
||||
}
|
95
drivers/md/dm-snap-transient.c
Normal file
95
drivers/md/dm-snap-transient.c
Normal file
|
@ -0,0 +1,95 @@
|
|||
/*
|
||||
* Copyright (C) 2001-2002 Sistina Software (UK) Limited.
|
||||
* Copyright (C) 2006-2008 Red Hat GmbH
|
||||
*
|
||||
* This file is released under the GPL.
|
||||
*/
|
||||
|
||||
#include "dm-exception-store.h"
|
||||
#include "dm-snap.h"
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/dm-io.h>
|
||||
|
||||
#define DM_MSG_PREFIX "transient snapshot"
|
||||
|
||||
/*-----------------------------------------------------------------
|
||||
* Implementation of the store for non-persistent snapshots.
|
||||
*---------------------------------------------------------------*/
|
||||
struct transient_c {
|
||||
sector_t next_free;
|
||||
};
|
||||
|
||||
static void transient_destroy(struct dm_exception_store *store)
|
||||
{
|
||||
kfree(store->context);
|
||||
}
|
||||
|
||||
static int transient_read_metadata(struct dm_exception_store *store)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int transient_prepare(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e)
|
||||
{
|
||||
struct transient_c *tc = (struct transient_c *) store->context;
|
||||
sector_t size = get_dev_size(store->snap->cow->bdev);
|
||||
|
||||
if (size < (tc->next_free + store->snap->chunk_size))
|
||||
return -1;
|
||||
|
||||
e->new_chunk = sector_to_chunk(store->snap, tc->next_free);
|
||||
tc->next_free += store->snap->chunk_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void transient_commit(struct dm_exception_store *store,
|
||||
struct dm_snap_exception *e,
|
||||
void (*callback) (void *, int success),
|
||||
void *callback_context)
|
||||
{
|
||||
/* Just succeed */
|
||||
callback(callback_context, 1);
|
||||
}
|
||||
|
||||
static void transient_fraction_full(struct dm_exception_store *store,
|
||||
sector_t *numerator, sector_t *denominator)
|
||||
{
|
||||
*numerator = ((struct transient_c *) store->context)->next_free;
|
||||
*denominator = get_dev_size(store->snap->cow->bdev);
|
||||
}
|
||||
|
||||
int dm_create_transient(struct dm_exception_store *store)
|
||||
{
|
||||
struct transient_c *tc;
|
||||
|
||||
store->destroy = transient_destroy;
|
||||
store->read_metadata = transient_read_metadata;
|
||||
store->prepare_exception = transient_prepare;
|
||||
store->commit_exception = transient_commit;
|
||||
store->drop_snapshot = NULL;
|
||||
store->fraction_full = transient_fraction_full;
|
||||
|
||||
tc = kmalloc(sizeof(struct transient_c), GFP_KERNEL);
|
||||
if (!tc)
|
||||
return -ENOMEM;
|
||||
|
||||
tc->next_free = 0;
|
||||
store->context = tc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dm_transient_snapshot_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dm_transient_snapshot_exit(void)
|
||||
{
|
||||
}
|
|
@ -1406,6 +1406,12 @@ static int __init dm_snapshot_init(void)
|
|||
{
|
||||
int r;
|
||||
|
||||
r = dm_exception_store_init();
|
||||
if (r) {
|
||||
DMERR("Failed to initialize exception stores");
|
||||
return r;
|
||||
}
|
||||
|
||||
r = dm_register_target(&snapshot_target);
|
||||
if (r) {
|
||||
DMERR("snapshot target register failed %d", r);
|
||||
|
@ -1454,17 +1460,17 @@ static int __init dm_snapshot_init(void)
|
|||
|
||||
return 0;
|
||||
|
||||
bad_pending_pool:
|
||||
bad_pending_pool:
|
||||
kmem_cache_destroy(tracked_chunk_cache);
|
||||
bad5:
|
||||
bad5:
|
||||
kmem_cache_destroy(pending_cache);
|
||||
bad4:
|
||||
bad4:
|
||||
kmem_cache_destroy(exception_cache);
|
||||
bad3:
|
||||
bad3:
|
||||
exit_origin_hash();
|
||||
bad2:
|
||||
bad2:
|
||||
dm_unregister_target(&origin_target);
|
||||
bad1:
|
||||
bad1:
|
||||
dm_unregister_target(&snapshot_target);
|
||||
return r;
|
||||
}
|
||||
|
@ -1480,6 +1486,8 @@ static void __exit dm_snapshot_exit(void)
|
|||
kmem_cache_destroy(pending_cache);
|
||||
kmem_cache_destroy(exception_cache);
|
||||
kmem_cache_destroy(tracked_chunk_cache);
|
||||
|
||||
dm_exception_store_exit();
|
||||
}
|
||||
|
||||
/* Module hooks */
|
||||
|
|
Loading…
Reference in New Issue
Block a user