blob: 2a3d626a98d990cd60de32ad34199feb337dffdc [file] [log] [blame]
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00001/*
2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006-2008 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-exception-store.h"
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +00009
10#include <linux/mm.h>
11#include <linux/pagemap.h>
12#include <linux/vmalloc.h>
13#include <linux/slab.h>
14#include <linux/dm-io.h>
15
16#define DM_MSG_PREFIX "persistent snapshot"
17#define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
18
19/*-----------------------------------------------------------------
20 * Persistent snapshots, by persistent we mean that the snapshot
21 * will survive a reboot.
22 *---------------------------------------------------------------*/
23
24/*
25 * We need to store a record of which parts of the origin have
26 * been copied to the snapshot device. The snapshot code
27 * requires that we copy exception chunks to chunk aligned areas
28 * of the COW store. It makes sense therefore, to store the
29 * metadata in chunk size blocks.
30 *
31 * There is no backward or forward compatibility implemented,
32 * snapshots with different disk versions than the kernel will
33 * not be usable. It is expected that "lvcreate" will blank out
34 * the start of a fresh COW device before calling the snapshot
35 * constructor.
36 *
37 * The first chunk of the COW device just contains the header.
38 * After this there is a chunk filled with exception metadata,
39 * followed by as many exception chunks as can fit in the
40 * metadata areas.
41 *
42 * All on disk structures are in little-endian format. The end
43 * of the exceptions info is indicated by an exception with a
44 * new_chunk of 0, which is invalid since it would point to the
45 * header chunk.
46 */
47
48/*
49 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
50 */
51#define SNAP_MAGIC 0x70416e53
52
53/*
54 * The on-disk version of the metadata.
55 */
56#define SNAPSHOT_DISK_VERSION 1
57
58struct disk_header {
59 uint32_t magic;
60
61 /*
62 * Is this snapshot valid. There is no way of recovering
63 * an invalid snapshot.
64 */
65 uint32_t valid;
66
67 /*
68 * Simple, incrementing version. no backward
69 * compatibility.
70 */
71 uint32_t version;
72
73 /* In sectors */
74 uint32_t chunk_size;
75};
76
77struct disk_exception {
78 uint64_t old_chunk;
79 uint64_t new_chunk;
80};
81
82struct commit_callback {
83 void (*callback)(void *, int success);
84 void *context;
85};
86
87/*
88 * The top level structure for a persistent exception store.
89 */
90struct pstore {
Jonathan Brassow71fab002009-04-02 19:55:33 +010091 struct dm_exception_store *store;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +000092 int version;
93 int valid;
94 uint32_t exceptions_per_area;
95
96 /*
97 * Now that we have an asynchronous kcopyd there is no
98 * need for large chunk sizes, so it wont hurt to have a
99 * whole chunks worth of metadata in memory at once.
100 */
101 void *area;
102
103 /*
104 * An area of zeros used to clear the next area.
105 */
106 void *zero_area;
107
108 /*
109 * Used to keep track of which metadata area the data in
110 * 'chunk' refers to.
111 */
112 chunk_t current_area;
113
114 /*
115 * The next free chunk for an exception.
116 */
117 chunk_t next_free;
118
119 /*
120 * The index of next free exception in the current
121 * metadata area.
122 */
123 uint32_t current_committed;
124
125 atomic_t pending_count;
126 uint32_t callback_count;
127 struct commit_callback *callbacks;
128 struct dm_io_client *io_client;
129
130 struct workqueue_struct *metadata_wq;
131};
132
133static unsigned sectors_to_pages(unsigned sectors)
134{
135 return DIV_ROUND_UP(sectors, PAGE_SIZE >> 9);
136}
137
138static int alloc_area(struct pstore *ps)
139{
140 int r = -ENOMEM;
141 size_t len;
142
Jonathan Brassow71fab002009-04-02 19:55:33 +0100143 len = ps->store->chunk_size << SECTOR_SHIFT;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000144
145 /*
146 * Allocate the chunk_size block of memory that will hold
147 * a single metadata area.
148 */
149 ps->area = vmalloc(len);
150 if (!ps->area)
151 return r;
152
153 ps->zero_area = vmalloc(len);
154 if (!ps->zero_area) {
155 vfree(ps->area);
156 return r;
157 }
158 memset(ps->zero_area, 0, len);
159
160 return 0;
161}
162
163static void free_area(struct pstore *ps)
164{
Jonathan Brassowa32079c2009-04-02 19:55:35 +0100165 if (ps->area)
166 vfree(ps->area);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000167 ps->area = NULL;
Jonathan Brassowa32079c2009-04-02 19:55:35 +0100168
169 if (ps->zero_area)
170 vfree(ps->zero_area);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000171 ps->zero_area = NULL;
172}
173
174struct mdata_req {
175 struct dm_io_region *where;
176 struct dm_io_request *io_req;
177 struct work_struct work;
178 int result;
179};
180
181static void do_metadata(struct work_struct *work)
182{
183 struct mdata_req *req = container_of(work, struct mdata_req, work);
184
185 req->result = dm_io(req->io_req, 1, req->where, NULL);
186}
187
188/*
189 * Read or write a chunk aligned and sized block of data from a device.
190 */
Mikulas Patocka02d2fd32009-09-04 20:40:37 +0100191static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw,
192 int metadata)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000193{
194 struct dm_io_region where = {
Jonathan Brassow71fab002009-04-02 19:55:33 +0100195 .bdev = ps->store->cow->bdev,
196 .sector = ps->store->chunk_size * chunk,
197 .count = ps->store->chunk_size,
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000198 };
199 struct dm_io_request io_req = {
200 .bi_rw = rw,
201 .mem.type = DM_IO_VMA,
Mikulas Patocka02d2fd32009-09-04 20:40:37 +0100202 .mem.ptr.vma = area,
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000203 .client = ps->io_client,
204 .notify.fn = NULL,
205 };
206 struct mdata_req req;
207
208 if (!metadata)
209 return dm_io(&io_req, 1, &where, NULL);
210
211 req.where = &where;
212 req.io_req = &io_req;
213
214 /*
215 * Issue the synchronous I/O from a different thread
216 * to avoid generic_make_request recursion.
217 */
218 INIT_WORK(&req.work, do_metadata);
219 queue_work(ps->metadata_wq, &req.work);
220 flush_workqueue(ps->metadata_wq);
221
222 return req.result;
223}
224
225/*
226 * Convert a metadata area index to a chunk index.
227 */
228static chunk_t area_location(struct pstore *ps, chunk_t area)
229{
230 return 1 + ((ps->exceptions_per_area + 1) * area);
231}
232
233/*
234 * Read or write a metadata area. Remembering to skip the first
235 * chunk which holds the header.
236 */
237static int area_io(struct pstore *ps, int rw)
238{
239 int r;
240 chunk_t chunk;
241
242 chunk = area_location(ps, ps->current_area);
243
Mikulas Patocka02d2fd32009-09-04 20:40:37 +0100244 r = chunk_io(ps, ps->area, chunk, rw, 0);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000245 if (r)
246 return r;
247
248 return 0;
249}
250
251static void zero_memory_area(struct pstore *ps)
252{
Jonathan Brassow71fab002009-04-02 19:55:33 +0100253 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000254}
255
256static int zero_disk_area(struct pstore *ps, chunk_t area)
257{
Mikulas Patocka02d2fd32009-09-04 20:40:37 +0100258 return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000259}
260
261static int read_header(struct pstore *ps, int *new_snapshot)
262{
263 int r;
264 struct disk_header *dh;
265 chunk_t chunk_size;
266 int chunk_size_supplied = 1;
267
268 /*
269 * Use default chunk size (or hardsect_size, if larger) if none supplied
270 */
Jonathan Brassow71fab002009-04-02 19:55:33 +0100271 if (!ps->store->chunk_size) {
272 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
Martin K. Petersene1defc42009-05-22 17:17:49 -0400273 bdev_logical_block_size(ps->store->cow->bdev) >> 9);
Jonathan Brassow71fab002009-04-02 19:55:33 +0100274 ps->store->chunk_mask = ps->store->chunk_size - 1;
275 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000276 chunk_size_supplied = 0;
277 }
278
Jonathan Brassow71fab002009-04-02 19:55:33 +0100279 ps->io_client = dm_io_client_create(sectors_to_pages(ps->store->
280 chunk_size));
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000281 if (IS_ERR(ps->io_client))
282 return PTR_ERR(ps->io_client);
283
284 r = alloc_area(ps);
285 if (r)
286 return r;
287
Mikulas Patocka02d2fd32009-09-04 20:40:37 +0100288 r = chunk_io(ps, ps->area, 0, READ, 1);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000289 if (r)
290 goto bad;
291
292 dh = (struct disk_header *) ps->area;
293
294 if (le32_to_cpu(dh->magic) == 0) {
295 *new_snapshot = 1;
296 return 0;
297 }
298
299 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
300 DMWARN("Invalid or corrupt snapshot");
301 r = -ENXIO;
302 goto bad;
303 }
304
305 *new_snapshot = 0;
306 ps->valid = le32_to_cpu(dh->valid);
307 ps->version = le32_to_cpu(dh->version);
308 chunk_size = le32_to_cpu(dh->chunk_size);
309
Jonathan Brassow71fab002009-04-02 19:55:33 +0100310 if (!chunk_size_supplied || ps->store->chunk_size == chunk_size)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000311 return 0;
312
313 DMWARN("chunk size %llu in device metadata overrides "
314 "table chunk size of %llu.",
315 (unsigned long long)chunk_size,
Jonathan Brassow71fab002009-04-02 19:55:33 +0100316 (unsigned long long)ps->store->chunk_size);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000317
318 /* We had a bogus chunk_size. Fix stuff up. */
319 free_area(ps);
320
Jonathan Brassow71fab002009-04-02 19:55:33 +0100321 ps->store->chunk_size = chunk_size;
322 ps->store->chunk_mask = chunk_size - 1;
323 ps->store->chunk_shift = ffs(chunk_size) - 1;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000324
Jonathan Brassow71fab002009-04-02 19:55:33 +0100325 r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size),
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000326 ps->io_client);
327 if (r)
328 return r;
329
330 r = alloc_area(ps);
331 return r;
332
333bad:
334 free_area(ps);
335 return r;
336}
337
338static int write_header(struct pstore *ps)
339{
340 struct disk_header *dh;
341
Jonathan Brassow71fab002009-04-02 19:55:33 +0100342 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000343
344 dh = (struct disk_header *) ps->area;
345 dh->magic = cpu_to_le32(SNAP_MAGIC);
346 dh->valid = cpu_to_le32(ps->valid);
347 dh->version = cpu_to_le32(ps->version);
Jonathan Brassow71fab002009-04-02 19:55:33 +0100348 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000349
Mikulas Patocka02d2fd32009-09-04 20:40:37 +0100350 return chunk_io(ps, ps->area, 0, WRITE, 1);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000351}
352
353/*
354 * Access functions for the disk exceptions, these do the endian conversions.
355 */
356static struct disk_exception *get_exception(struct pstore *ps, uint32_t index)
357{
358 BUG_ON(index >= ps->exceptions_per_area);
359
360 return ((struct disk_exception *) ps->area) + index;
361}
362
363static void read_exception(struct pstore *ps,
364 uint32_t index, struct disk_exception *result)
365{
366 struct disk_exception *e = get_exception(ps, index);
367
368 /* copy it */
369 result->old_chunk = le64_to_cpu(e->old_chunk);
370 result->new_chunk = le64_to_cpu(e->new_chunk);
371}
372
373static void write_exception(struct pstore *ps,
374 uint32_t index, struct disk_exception *de)
375{
376 struct disk_exception *e = get_exception(ps, index);
377
378 /* copy it */
379 e->old_chunk = cpu_to_le64(de->old_chunk);
380 e->new_chunk = cpu_to_le64(de->new_chunk);
381}
382
383/*
384 * Registers the exceptions that are present in the current area.
385 * 'full' is filled in to indicate if the area has been
386 * filled.
387 */
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000388static int insert_exceptions(struct pstore *ps,
389 int (*callback)(void *callback_context,
390 chunk_t old, chunk_t new),
391 void *callback_context,
392 int *full)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000393{
394 int r;
395 unsigned int i;
396 struct disk_exception de;
397
398 /* presume the area is full */
399 *full = 1;
400
401 for (i = 0; i < ps->exceptions_per_area; i++) {
402 read_exception(ps, i, &de);
403
404 /*
405 * If the new_chunk is pointing at the start of
406 * the COW device, where the first metadata area
407 * is we know that we've hit the end of the
408 * exceptions. Therefore the area is not full.
409 */
410 if (de.new_chunk == 0LL) {
411 ps->current_committed = i;
412 *full = 0;
413 break;
414 }
415
416 /*
417 * Keep track of the start of the free chunks.
418 */
419 if (ps->next_free <= de.new_chunk)
420 ps->next_free = de.new_chunk + 1;
421
422 /*
423 * Otherwise we add the exception to the snapshot.
424 */
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000425 r = callback(callback_context, de.old_chunk, de.new_chunk);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000426 if (r)
427 return r;
428 }
429
430 return 0;
431}
432
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000433static int read_exceptions(struct pstore *ps,
434 int (*callback)(void *callback_context, chunk_t old,
435 chunk_t new),
436 void *callback_context)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000437{
438 int r, full = 1;
439
440 /*
441 * Keeping reading chunks and inserting exceptions until
442 * we find a partially full area.
443 */
444 for (ps->current_area = 0; full; ps->current_area++) {
445 r = area_io(ps, READ);
446 if (r)
447 return r;
448
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000449 r = insert_exceptions(ps, callback, callback_context, &full);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000450 if (r)
451 return r;
452 }
453
454 ps->current_area--;
455
456 return 0;
457}
458
459static struct pstore *get_info(struct dm_exception_store *store)
460{
461 return (struct pstore *) store->context;
462}
463
464static void persistent_fraction_full(struct dm_exception_store *store,
465 sector_t *numerator, sector_t *denominator)
466{
Jonathan Brassowd0216842009-04-02 19:55:32 +0100467 *numerator = get_info(store)->next_free * store->chunk_size;
Jonathan Brassow49beb2b2009-04-02 19:55:33 +0100468 *denominator = get_dev_size(store->cow->bdev);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000469}
470
Jonathan Brassow493df712009-04-02 19:55:31 +0100471static void persistent_dtr(struct dm_exception_store *store)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000472{
473 struct pstore *ps = get_info(store);
474
475 destroy_workqueue(ps->metadata_wq);
Jonathan Brassowa32079c2009-04-02 19:55:35 +0100476
477 /* Created in read_header */
478 if (ps->io_client)
479 dm_io_client_destroy(ps->io_client);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000480 free_area(ps);
Jonathan Brassowa32079c2009-04-02 19:55:35 +0100481
482 /* Allocated in persistent_read_metadata */
483 if (ps->callbacks)
484 vfree(ps->callbacks);
485
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000486 kfree(ps);
487}
488
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000489static int persistent_read_metadata(struct dm_exception_store *store,
490 int (*callback)(void *callback_context,
491 chunk_t old, chunk_t new),
492 void *callback_context)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000493{
494 int r, uninitialized_var(new_snapshot);
495 struct pstore *ps = get_info(store);
496
497 /*
498 * Read the snapshot header.
499 */
500 r = read_header(ps, &new_snapshot);
501 if (r)
502 return r;
503
504 /*
505 * Now we know correct chunk_size, complete the initialisation.
506 */
Jonathan Brassow71fab002009-04-02 19:55:33 +0100507 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
508 sizeof(struct disk_exception);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000509 ps->callbacks = dm_vcalloc(ps->exceptions_per_area,
510 sizeof(*ps->callbacks));
511 if (!ps->callbacks)
512 return -ENOMEM;
513
514 /*
515 * Do we need to setup a new snapshot ?
516 */
517 if (new_snapshot) {
518 r = write_header(ps);
519 if (r) {
520 DMWARN("write_header failed");
521 return r;
522 }
523
524 ps->current_area = 0;
525 zero_memory_area(ps);
526 r = zero_disk_area(ps, 0);
527 if (r) {
528 DMWARN("zero_disk_area(0) failed");
529 return r;
530 }
531 } else {
532 /*
533 * Sanity checks.
534 */
535 if (ps->version != SNAPSHOT_DISK_VERSION) {
536 DMWARN("unable to handle snapshot disk version %d",
537 ps->version);
538 return -EINVAL;
539 }
540
541 /*
542 * Metadata are valid, but snapshot is invalidated
543 */
544 if (!ps->valid)
545 return 1;
546
547 /*
548 * Read the metadata.
549 */
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000550 r = read_exceptions(ps, callback, callback_context);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000551 if (r)
552 return r;
553 }
554
555 return 0;
556}
557
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000558static int persistent_prepare_exception(struct dm_exception_store *store,
559 struct dm_snap_exception *e)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000560{
561 struct pstore *ps = get_info(store);
562 uint32_t stride;
563 chunk_t next_free;
Jonathan Brassow49beb2b2009-04-02 19:55:33 +0100564 sector_t size = get_dev_size(store->cow->bdev);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000565
566 /* Is there enough room ? */
Jonathan Brassowd0216842009-04-02 19:55:32 +0100567 if (size < ((ps->next_free + 1) * store->chunk_size))
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000568 return -ENOSPC;
569
570 e->new_chunk = ps->next_free;
571
572 /*
573 * Move onto the next free pending, making sure to take
574 * into account the location of the metadata chunks.
575 */
576 stride = (ps->exceptions_per_area + 1);
577 next_free = ++ps->next_free;
578 if (sector_div(next_free, stride) == 1)
579 ps->next_free++;
580
581 atomic_inc(&ps->pending_count);
582 return 0;
583}
584
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000585static void persistent_commit_exception(struct dm_exception_store *store,
586 struct dm_snap_exception *e,
587 void (*callback) (void *, int success),
588 void *callback_context)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000589{
590 unsigned int i;
591 struct pstore *ps = get_info(store);
592 struct disk_exception de;
593 struct commit_callback *cb;
594
595 de.old_chunk = e->old_chunk;
596 de.new_chunk = e->new_chunk;
597 write_exception(ps, ps->current_committed++, &de);
598
599 /*
600 * Add the callback to the back of the array. This code
601 * is the only place where the callback array is
602 * manipulated, and we know that it will never be called
603 * multiple times concurrently.
604 */
605 cb = ps->callbacks + ps->callback_count++;
606 cb->callback = callback;
607 cb->context = callback_context;
608
609 /*
610 * If there are exceptions in flight and we have not yet
611 * filled this metadata area there's nothing more to do.
612 */
613 if (!atomic_dec_and_test(&ps->pending_count) &&
614 (ps->current_committed != ps->exceptions_per_area))
615 return;
616
617 /*
618 * If we completely filled the current area, then wipe the next one.
619 */
620 if ((ps->current_committed == ps->exceptions_per_area) &&
621 zero_disk_area(ps, ps->current_area + 1))
622 ps->valid = 0;
623
624 /*
625 * Commit exceptions to disk.
626 */
Mikulas Patocka2bd02342009-06-22 10:12:26 +0100627 if (ps->valid && area_io(ps, WRITE_BARRIER))
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000628 ps->valid = 0;
629
630 /*
631 * Advance to the next area if this one is full.
632 */
633 if (ps->current_committed == ps->exceptions_per_area) {
634 ps->current_committed = 0;
635 ps->current_area++;
636 zero_memory_area(ps);
637 }
638
639 for (i = 0; i < ps->callback_count; i++) {
640 cb = ps->callbacks + i;
641 cb->callback(cb->context, ps->valid);
642 }
643
644 ps->callback_count = 0;
645}
646
Jonathan Brassowa159c1a2009-01-06 03:05:19 +0000647static void persistent_drop_snapshot(struct dm_exception_store *store)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000648{
649 struct pstore *ps = get_info(store);
650
651 ps->valid = 0;
652 if (write_header(ps))
653 DMWARN("write header failed");
654}
655
Jonathan Brassow493df712009-04-02 19:55:31 +0100656static int persistent_ctr(struct dm_exception_store *store,
657 unsigned argc, char **argv)
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000658{
659 struct pstore *ps;
660
661 /* allocate the pstore */
Jonathan Brassowa32079c2009-04-02 19:55:35 +0100662 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000663 if (!ps)
664 return -ENOMEM;
665
Jonathan Brassow71fab002009-04-02 19:55:33 +0100666 ps->store = store;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000667 ps->valid = 1;
668 ps->version = SNAPSHOT_DISK_VERSION;
669 ps->area = NULL;
670 ps->next_free = 2; /* skipping the header and first area */
671 ps->current_committed = 0;
672
673 ps->callback_count = 0;
674 atomic_set(&ps->pending_count, 0);
675 ps->callbacks = NULL;
676
677 ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
678 if (!ps->metadata_wq) {
679 kfree(ps);
680 DMERR("couldn't start header metadata update thread");
681 return -ENOMEM;
682 }
683
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000684 store->context = ps;
685
686 return 0;
687}
688
Jonathan Brassow1e302a92009-04-02 19:55:35 +0100689static unsigned persistent_status(struct dm_exception_store *store,
690 status_type_t status, char *result,
691 unsigned maxlen)
Jonathan Brassow493df712009-04-02 19:55:31 +0100692{
Jonathan Brassow1e302a92009-04-02 19:55:35 +0100693 unsigned sz = 0;
694
695 switch (status) {
696 case STATUSTYPE_INFO:
697 break;
698 case STATUSTYPE_TABLE:
699 DMEMIT(" %s P %llu", store->cow->name,
700 (unsigned long long)store->chunk_size);
701 }
Jonathan Brassow493df712009-04-02 19:55:31 +0100702
703 return sz;
704}
705
706static struct dm_exception_store_type _persistent_type = {
707 .name = "persistent",
708 .module = THIS_MODULE,
709 .ctr = persistent_ctr,
710 .dtr = persistent_dtr,
711 .read_metadata = persistent_read_metadata,
712 .prepare_exception = persistent_prepare_exception,
713 .commit_exception = persistent_commit_exception,
714 .drop_snapshot = persistent_drop_snapshot,
715 .fraction_full = persistent_fraction_full,
716 .status = persistent_status,
717};
718
719static struct dm_exception_store_type _persistent_compat_type = {
720 .name = "P",
721 .module = THIS_MODULE,
722 .ctr = persistent_ctr,
723 .dtr = persistent_dtr,
724 .read_metadata = persistent_read_metadata,
725 .prepare_exception = persistent_prepare_exception,
726 .commit_exception = persistent_commit_exception,
727 .drop_snapshot = persistent_drop_snapshot,
728 .fraction_full = persistent_fraction_full,
729 .status = persistent_status,
730};
731
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000732int dm_persistent_snapshot_init(void)
733{
Jonathan Brassow493df712009-04-02 19:55:31 +0100734 int r;
735
736 r = dm_exception_store_type_register(&_persistent_type);
737 if (r) {
738 DMERR("Unable to register persistent exception store type");
739 return r;
740 }
741
742 r = dm_exception_store_type_register(&_persistent_compat_type);
743 if (r) {
744 DMERR("Unable to register old-style persistent exception "
745 "store type");
746 dm_exception_store_type_unregister(&_persistent_type);
747 return r;
748 }
749
750 return r;
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000751}
752
753void dm_persistent_snapshot_exit(void)
754{
Jonathan Brassow493df712009-04-02 19:55:31 +0100755 dm_exception_store_type_unregister(&_persistent_type);
756 dm_exception_store_type_unregister(&_persistent_compat_type);
Alasdair G Kergon4db6bfe2009-01-06 03:05:17 +0000757}