blob: 1799205cd945745e548106479ddd3d32ad1b7853 [file] [log] [blame]
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00001/*
2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006-2008 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-exception-store.h"
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00009
10#include <linux/mm.h>
11#include <linux/pagemap.h>
12#include <linux/vmalloc.h>
13#include <linux/slab.h>
14#include <linux/dm-io.h>
15
16#define DM_MSG_PREFIX "persistent snapshot"
17#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
18
19/*-----------------------------------------------------------------
20 * Persistent snapshots, by persistent we mean that the snapshot
21 * will survive a reboot.
22 *---------------------------------------------------------------*/
23
24/*
25 * We need to store a record of which parts of the origin have
26 * been copied to the snapshot device. The snapshot code
27 * requires that we copy exception chunks to chunk aligned areas
28 * of the COW store. It makes sense therefore, to store the
29 * metadata in chunk size blocks.
30 *
31 * There is no backward or forward compatibility implemented,
32 * snapshots with different disk versions than the kernel will
33 * not be usable. It is expected that "lvcreate" will blank out
34 * the start of a fresh COW device before calling the snapshot
35 * constructor.
36 *
37 * The first chunk of the COW device just contains the header.
38 * After this there is a chunk filled with exception metadata,
39 * followed by as many exception chunks as can fit in the
40 * metadata areas.
41 *
42 * All on disk structures are in little-endian format. The end
43 * of the exceptions info is indicated by an exception with a
44 * new_chunk of 0, which is invalid since it would point to the
45 * header chunk.
46 */
47
48/*
49 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
50 */
51#define SNAP_MAGIC 0x70416e53
52
53/*
54 * The on-disk version of the metadata.
55 */
56#define SNAPSHOT_DISK_VERSION 1
57
58struct disk_header {
59 uint32_t magic;
60
61 /*
62 * Is this snapshot valid. There is no way of recovering
63 * an invalid snapshot.
64 */
65 uint32_t valid;
66
67 /*
68 * Simple, incrementing version. no backward
69 * compatibility.
70 */
71 uint32_t version;
72
73 /* In sectors */
74 uint32_t chunk_size;
75};
76
77struct disk_exception {
78 uint64_t old_chunk;
79 uint64_t new_chunk;
80};
81
82struct commit_callback {
83 void (*callback)(void *, int success);
84 void *context;
85};
86
87/*
88 * The top level structure for a persistent exception store.
89 */
90struct pstore {
Jonathan Brassow71fab002009-04-02 19:55:33 +010091 struct dm_exception_store *store;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +000092 int version;
93 int valid;
94 uint32_t exceptions_per_area;
95
96 /*
97 * Now that we have an asynchronous kcopyd there is no
98 * need for large chunk sizes, so it wont hurt to have a
99 * whole chunks worth of metadata in memory at once.
100 */
101 void *area;
102
103 /*
104 * An area of zeros used to clear the next area.
105 */
106 void *zero_area;
107
108 /*
109 * Used to keep track of which metadata area the data in
110 * 'chunk' refers to.
111 */
112 chunk_t current_area;
113
114 /*
115 * The next free chunk for an exception.
116 */
117 chunk_t next_free;
118
119 /*
120 * The index of next free exception in the current
121 * metadata area.
122 */
123 uint32_t current_committed;
124
125 atomic_t pending_count;
126 uint32_t callback_count;
127 struct commit_callback *callbacks;
128 struct dm_io_client *io_client;
129
130 struct workqueue_struct *metadata_wq;
131};
132
133static unsigned sectors_to_pages(unsigned sectors)
134{
135 return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
136}
137
138static int alloc_area(struct pstore *ps)
139{
140 int r = -ENOMEM;
141 size_t len;
142
Jonathan Brassow71fab002009-04-02 19:55:33 +0100143 len = ps->store->chunk_size << SECTOR_SHIFT;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000144
145 /*
146 * Allocate the chunk_size block of memory that will hold
147 * a single metadata area.
148 */
149 ps->area = vmalloc(len);
150 if (!ps->area)
151 return r;
152
153 ps->zero_area = vmalloc(len);
154 if (!ps->zero_area) {
155 vfree(ps->area);
156 return r;
157 }
158 memset(ps->zero_area, 0, len);
159
160 return 0;
161}
162
163static void free_area(struct pstore *ps)
164{
165 vfree(ps->area);
166 ps->area = NULL;
167 vfree(ps->zero_area);
168 ps->zero_area = NULL;
169}
170
171struct mdata_req {
172 struct dm_io_region *where;
173 struct dm_io_request *io_req;
174 struct work_struct work;
175 int result;
176};
177
178static void do_metadata(struct work_struct *work)
179{
180 struct mdata_req *req = container_of(work, struct mdata_req, work);
181
182 req->result = dm_io(req->io_req, 1, req->where, NULL);
183}
184
185/*
186 * Read or write a chunk aligned and sized block of data from a device.
187 */
188static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata)
189{
190 struct dm_io_region where = {
Jonathan Brassow71fab002009-04-02 19:55:33 +0100191 .bdev = ps->store->cow->bdev,
192 .sector = ps->store->chunk_size * chunk,
193 .count = ps->store->chunk_size,
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000194 };
195 struct dm_io_request io_req = {
196 .bi_rw = rw,
197 .mem.type = DM_IO_VMA,
198 .mem.ptr.vma = ps->area,
199 .client = ps->io_client,
200 .notify.fn = NULL,
201 };
202 struct mdata_req req;
203
204 if (!metadata)
205 return dm_io(&io_req, 1, &where, NULL);
206
207 req.where = &where;
208 req.io_req = &io_req;
209
210 /*
211 * Issue the synchronous I/O from a different thread
212 * to avoid generic_make_request recursion.
213 */
214 INIT_WORK(&req.work, do_metadata);
215 queue_work(ps->metadata_wq, &req.work);
216 flush_workqueue(ps->metadata_wq);
217
218 return req.result;
219}
220
221/*
222 * Convert a metadata area index to a chunk index.
223 */
224static chunk_t area_location(struct pstore *ps, chunk_t area)
225{
226 return 1 + ((ps->exceptions_per_area + 1) * area);
227}
228
229/*
230 * Read or write a metadata area. Remembering to skip the first
231 * chunk which holds the header.
232 */
233static int area_io(struct pstore *ps, int rw)
234{
235 int r;
236 chunk_t chunk;
237
238 chunk = area_location(ps, ps->current_area);
239
240 r = chunk_io(ps, chunk, rw, 0);
241 if (r)
242 return r;
243
244 return 0;
245}
246
247static void zero_memory_area(struct pstore *ps)
248{
Jonathan Brassow71fab002009-04-02 19:55:33 +0100249 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000250}
251
252static int zero_disk_area(struct pstore *ps, chunk_t area)
253{
254 struct dm_io_region where = {
Jonathan Brassow71fab002009-04-02 19:55:33 +0100255 .bdev = ps->store->cow->bdev,
256 .sector = ps->store->chunk_size * area_location(ps, area),
257 .count = ps->store->chunk_size,
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000258 };
259 struct dm_io_request io_req = {
260 .bi_rw = WRITE,
261 .mem.type = DM_IO_VMA,
262 .mem.ptr.vma = ps->zero_area,
263 .client = ps->io_client,
264 .notify.fn = NULL,
265 };
266
267 return dm_io(&io_req, 1, &where, NULL);
268}
269
270static int read_header(struct pstore *ps, int *new_snapshot)
271{
272 int r;
273 struct disk_header *dh;
274 chunk_t chunk_size;
275 int chunk_size_supplied = 1;
276
277 /*
278 * Use default chunk size (or hardsect_size, if larger) if none supplied
279 */
Jonathan Brassow71fab002009-04-02 19:55:33 +0100280 if (!ps->store->chunk_size) {
281 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
282 bdev_hardsect_size(ps->store->cow->bdev) >> 9);
283 ps->store->chunk_mask = ps->store->chunk_size - 1;
284 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000285 chunk_size_supplied = 0;
286 }
287
Jonathan Brassow71fab002009-04-02 19:55:33 +0100288 ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
289 chunk_size));
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000290 if (IS_ERR(ps->io_client))
291 return PTR_ERR(ps->io_client);
292
293 r = alloc_area(ps);
294 if (r)
295 return r;
296
297 r = chunk_io(ps, 0, READ, 1);
298 if (r)
299 goto bad;
300
301 dh = (struct disk_header *) ps->area;
302
303 if (le32_to_cpu(dh->magic) == 0) {
304 *new_snapshot = 1;
305 return 0;
306 }
307
308 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
309 DMWARN("Invalid or corrupt snapshot");
310 r = -ENXIO;
311 goto bad;
312 }
313
314 *new_snapshot = 0;
315 ps->valid = le32_to_cpu(dh->valid);
316 ps->version = le32_to_cpu(dh->version);
317 chunk_size = le32_to_cpu(dh->chunk_size);
318
Jonathan Brassow71fab002009-04-02 19:55:33 +0100319 if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000320 return 0;
321
322 DMWARN("chunk size %llu in device metadata overrides "
323 "table chunk size of %llu.",
324 (unsigned long long)chunk_size,
Jonathan Brassow71fab002009-04-02 19:55:33 +0100325 (unsigned long long)ps->store->chunk_size);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000326
327 /* We had a bogus chunk_size. Fix stuff up. */
328 free_area(ps);
329
Jonathan Brassow71fab002009-04-02 19:55:33 +0100330 ps->store->chunk_size = chunk_size;
331 ps->store->chunk_mask = chunk_size - 1;
332 ps->store->chunk_shift = ffs(chunk_size) - 1;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000333
Jonathan Brassow71fab002009-04-02 19:55:33 +0100334 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000335 ps->io_client);
336 if (r)
337 return r;
338
339 r = alloc_area(ps);
340 return r;
341
342bad:
343 free_area(ps);
344 return r;
345}
346
347static int write_header(struct pstore *ps)
348{
349 struct disk_header *dh;
350
Jonathan Brassow71fab002009-04-02 19:55:33 +0100351 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000352
353 dh = (struct disk_header *) ps->area;
354 dh->magic = cpu_to_le32(SNAP_MAGIC);
355 dh->valid = cpu_to_le32(ps->valid);
356 dh->version = cpu_to_le32(ps->version);
Jonathan Brassow71fab002009-04-02 19:55:33 +0100357 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000358
359 return chunk_io(ps, 0, WRITE, 1);
360}
361
362/*
363 * Access functions for the disk exceptions, these do the endian conversions.
364 */
365static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
366{
367 BUG_ON(index >= ps->exceptions_per_area);
368
369 return ((struct disk_exception *) ps->area) + index;
370}
371
372static void read_exception(struct pstore *ps,
373 uint32_t index, struct disk_exception *result)
374{
375 struct disk_exception *e = get_exception(ps, index);
376
377 /* copy it */
378 result->old_chunk = le64_to_cpu(e->old_chunk);
379 result->new_chunk = le64_to_cpu(e->new_chunk);
380}
381
382static void write_exception(struct pstore *ps,
383 uint32_t index, struct disk_exception *de)
384{
385 struct disk_exception *e = get_exception(ps, index);
386
387 /* copy it */
388 e->old_chunk = cpu_to_le64(de->old_chunk);
389 e->new_chunk = cpu_to_le64(de->new_chunk);
390}
391
392/*
393 * Registers the exceptions that are present in the current area.
394 * 'full' is filled in to indicate if the area has been
395 * filled.
396 */
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000397static int insert_exceptions(struct pstore *ps,
398 int (*callback)(void *callback_context,
399 chunk_t old, chunk_t new),
400 void *callback_context,
401 int *full)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000402{
403 int r;
404 unsigned int i;
405 struct disk_exception de;
406
407 /* presume the area is full */
408 *full = 1;
409
410 for (i = 0; i < ps->exceptions_per_area; i++) {
411 read_exception(ps, i, &de);
412
413 /*
414 * If the new_chunk is pointing at the start of
415 * the COW device, where the first metadata area
416 * is we know that we've hit the end of the
417 * exceptions. Therefore the area is not full.
418 */
419 if (de.new_chunk == 0LL) {
420 ps->current_committed = i;
421 *full = 0;
422 break;
423 }
424
425 /*
426 * Keep track of the start of the free chunks.
427 */
428 if (ps->next_free <= de.new_chunk)
429 ps->next_free = de.new_chunk + 1;
430
431 /*
432 * Otherwise we add the exception to the snapshot.
433 */
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000434 r = callback(callback_context, de.old_chunk, de.new_chunk);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000435 if (r)
436 return r;
437 }
438
439 return 0;
440}
441
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000442static int read_exceptions(struct pstore *ps,
443 int (*callback)(void *callback_context, chunk_t old,
444 chunk_t new),
445 void *callback_context)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000446{
447 int r, full = 1;
448
449 /*
450 * Keeping reading chunks and inserting exceptions until
451 * we find a partially full area.
452 */
453 for (ps->current_area = 0; full; ps->current_area++) {
454 r = area_io(ps, READ);
455 if (r)
456 return r;
457
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000458 r = insert_exceptions(ps, callback, callback_context, &full);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000459 if (r)
460 return r;
461 }
462
463 ps->current_area--;
464
465 return 0;
466}
467
468static struct pstore *get_info(struct dm_exception_store *store)
469{
470 return (struct pstore *) store->context;
471}
472
473static void persistent_fraction_full(struct dm_exception_store *store,
474 sector_t *numerator, sector_t *denominator)
475{
Jonathan Brassowd0216842009-04-02 19:55:32 +0100476 *numerator = get_info(store)->next_free * store->chunk_size;
Jonathan Brassow49beb2b2009-04-02 19:55:33 +0100477 *denominator = get_dev_size(store->cow->bdev);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000478}
479
Jonathan Brassow493df712009-04-02 19:55:31 +0100480static void persistent_dtr(struct dm_exception_store *store)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000481{
482 struct pstore *ps = get_info(store);
483
484 destroy_workqueue(ps->metadata_wq);
485 dm_io_client_destroy(ps->io_client);
486 vfree(ps->callbacks);
487 free_area(ps);
488 kfree(ps);
489}
490
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000491static int persistent_read_metadata(struct dm_exception_store *store,
492 int (*callback)(void *callback_context,
493 chunk_t old, chunk_t new),
494 void *callback_context)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000495{
496 int r, uninitialized_var(new_snapshot);
497 struct pstore *ps = get_info(store);
498
499 /*
500 * Read the snapshot header.
501 */
502 r = read_header(ps, &new_snapshot);
503 if (r)
504 return r;
505
506 /*
507 * Now we know correct chunk_size, complete the initialisation.
508 */
Jonathan Brassow71fab002009-04-02 19:55:33 +0100509 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
510 sizeof(struct disk_exception);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000511 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
512 sizeof(*ps->callbacks));
513 if (!ps->callbacks)
514 return -ENOMEM;
515
516 /*
517 * Do we need to setup a new snapshot ?
518 */
519 if (new_snapshot) {
520 r = write_header(ps);
521 if (r) {
522 DMWARN("write_header failed");
523 return r;
524 }
525
526 ps->current_area = 0;
527 zero_memory_area(ps);
528 r = zero_disk_area(ps, 0);
529 if (r) {
530 DMWARN("zero_disk_area(0) failed");
531 return r;
532 }
533 } else {
534 /*
535 * Sanity checks.
536 */
537 if (ps->version != SNAPSHOT_DISK_VERSION) {
538 DMWARN("unable to handle snapshot disk version %d",
539 ps->version);
540 return -EINVAL;
541 }
542
543 /*
544 * Metadata are valid, but snapshot is invalidated
545 */
546 if (!ps->valid)
547 return 1;
548
549 /*
550 * Read the metadata.
551 */
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000552 r = read_exceptions(ps, callback, callback_context);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000553 if (r)
554 return r;
555 }
556
557 return 0;
558}
559
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000560static int persistent_prepare_exception(struct dm_exception_store *store,
561 struct dm_snap_exception *e)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000562{
563 struct pstore *ps = get_info(store);
564 uint32_t stride;
565 chunk_t next_free;
Jonathan Brassow49beb2b2009-04-02 19:55:33 +0100566 sector_t size = get_dev_size(store->cow->bdev);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000567
568 /* Is there enough room ? */
Jonathan Brassowd0216842009-04-02 19:55:32 +0100569 if (size < ((ps->next_free + 1) * store->chunk_size))
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000570 return -ENOSPC;
571
572 e->new_chunk = ps->next_free;
573
574 /*
575 * Move onto the next free pending, making sure to take
576 * into account the location of the metadata chunks.
577 */
578 stride = (ps->exceptions_per_area + 1);
579 next_free = ++ps->next_free;
580 if (sector_div(next_free, stride) == 1)
581 ps->next_free++;
582
583 atomic_inc(&ps->pending_count);
584 return 0;
585}
586
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000587static void persistent_commit_exception(struct dm_exception_store *store,
588 struct dm_snap_exception *e,
589 void (*callback) (void *, int success),
590 void *callback_context)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000591{
592 unsigned int i;
593 struct pstore *ps = get_info(store);
594 struct disk_exception de;
595 struct commit_callback *cb;
596
597 de.old_chunk = e->old_chunk;
598 de.new_chunk = e->new_chunk;
599 write_exception(ps, ps->current_committed++, &de);
600
601 /*
602 * Add the callback to the back of the array. This code
603 * is the only place where the callback array is
604 * manipulated, and we know that it will never be called
605 * multiple times concurrently.
606 */
607 cb = ps->callbacks + ps->callback_count++;
608 cb->callback = callback;
609 cb->context = callback_context;
610
611 /*
612 * If there are exceptions in flight and we have not yet
613 * filled this metadata area there's nothing more to do.
614 */
615 if (!atomic_dec_and_test(&ps->pending_count) &&
616 (ps->current_committed != ps->exceptions_per_area))
617 return;
618
619 /*
620 * If we completely filled the current area, then wipe the next one.
621 */
622 if ((ps->current_committed == ps->exceptions_per_area) &&
623 zero_disk_area(ps, ps->current_area + 1))
624 ps->valid = 0;
625
626 /*
627 * Commit exceptions to disk.
628 */
629 if (ps->valid && area_io(ps, WRITE))
630 ps->valid = 0;
631
632 /*
633 * Advance to the next area if this one is full.
634 */
635 if (ps->current_committed == ps->exceptions_per_area) {
636 ps->current_committed = 0;
637 ps->current_area++;
638 zero_memory_area(ps);
639 }
640
641 for (i = 0; i < ps->callback_count; i++) {
642 cb = ps->callbacks + i;
643 cb->callback(cb->context, ps->valid);
644 }
645
646 ps->callback_count = 0;
647}
648
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000649static void persistent_drop_snapshot(struct dm_exception_store *store)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000650{
651 struct pstore *ps = get_info(store);
652
653 ps->valid = 0;
654 if (write_header(ps))
655 DMWARN("write header failed");
656}
657
Jonathan Brassow493df712009-04-02 19:55:31 +0100658static int persistent_ctr(struct dm_exception_store *store,
659 unsigned argc, char **argv)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000660{
661 struct pstore *ps;
662
663 /* allocate the pstore */
664 ps = kmalloc(sizeof(*ps), GFP_KERNEL);
665 if (!ps)
666 return -ENOMEM;
667
Jonathan Brassow71fab002009-04-02 19:55:33 +0100668 ps->store = store;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000669 ps->valid = 1;
670 ps->version = SNAPSHOT_DISK_VERSION;
671 ps->area = NULL;
672 ps->next_free = 2; /* skipping the header and first area */
673 ps->current_committed = 0;
674
675 ps->callback_count = 0;
676 atomic_set(&ps->pending_count, 0);
677 ps->callbacks = NULL;
678
679 ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
680 if (!ps->metadata_wq) {
681 kfree(ps);
682 DMERR("couldn't start header metadata update thread");
683 return -ENOMEM;
684 }
685
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000686 store->context = ps;
687
688 return 0;
689}
690
Jonathan Brassow1e302a92009-04-02 19:55:35 +0100691static unsigned persistent_status(struct dm_exception_store *store,
692 status_type_t status, char *result,
693 unsigned maxlen)
Jonathan Brassow493df712009-04-02 19:55:31 +0100694{
Jonathan Brassow1e302a92009-04-02 19:55:35 +0100695 unsigned sz = 0;
696
697 switch (status) {
698 case STATUSTYPE_INFO:
699 break;
700 case STATUSTYPE_TABLE:
701 DMEMIT(" %s P %llu", store->cow->name,
702 (unsigned long long)store->chunk_size);
703 }
Jonathan Brassow493df712009-04-02 19:55:31 +0100704
705 return sz;
706}
707
708static struct dm_exception_store_type _persistent_type = {
709 .name = "persistent",
710 .module = THIS_MODULE,
711 .ctr = persistent_ctr,
712 .dtr = persistent_dtr,
713 .read_metadata = persistent_read_metadata,
714 .prepare_exception = persistent_prepare_exception,
715 .commit_exception = persistent_commit_exception,
716 .drop_snapshot = persistent_drop_snapshot,
717 .fraction_full = persistent_fraction_full,
718 .status = persistent_status,
719};
720
721static struct dm_exception_store_type _persistent_compat_type = {
722 .name = "P",
723 .module = THIS_MODULE,
724 .ctr = persistent_ctr,
725 .dtr = persistent_dtr,
726 .read_metadata = persistent_read_metadata,
727 .prepare_exception = persistent_prepare_exception,
728 .commit_exception = persistent_commit_exception,
729 .drop_snapshot = persistent_drop_snapshot,
730 .fraction_full = persistent_fraction_full,
731 .status = persistent_status,
732};
733
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000734int dm_persistent_snapshot_init(void)
735{
Jonathan Brassow493df712009-04-02 19:55:31 +0100736 int r;
737
738 r = dm_exception_store_type_register(&_persistent_type);
739 if (r) {
740 DMERR("Unable to register persistent exception store type");
741 return r;
742 }
743
744 r = dm_exception_store_type_register(&_persistent_compat_type);
745 if (r) {
746 DMERR("Unable to register old-style persistent exception "
747 "store type");
748 dm_exception_store_type_unregister(&_persistent_type);
749 return r;
750 }
751
752 return r;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000753}
754
755void dm_persistent_snapshot_exit(void)
756{
Jonathan Brassow493df712009-04-02 19:55:31 +0100757 dm_exception_store_type_unregister(&_persistent_type);
758 dm_exception_store_type_unregister(&_persistent_compat_type);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000759}