1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
4 * Copyright (C) 2006-2008 Red Hat GmbH
6 * This file is released under the GPL.
9 #include "dm-exception-store.h"
11 #include <linux/ctype.h>
13 #include <linux/pagemap.h>
14 #include <linux/vmalloc.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-bufio.h>
20 #define DM_MSG_PREFIX "persistent snapshot"
21 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */
23 #define DM_PREFETCH_CHUNKS 12
26 *---------------------------------------------------------------
27 * Persistent snapshots, by persistent we mean that the snapshot
28 * will survive a reboot.
29 *---------------------------------------------------------------
33 * We need to store a record of which parts of the origin have
34 * been copied to the snapshot device. The snapshot code
35 * requires that we copy exception chunks to chunk aligned areas
36 * of the COW store. It makes sense therefore, to store the
37 * metadata in chunk size blocks.
39 * There is no backward or forward compatibility implemented,
40 * snapshots with different disk versions than the kernel will
41 * not be usable. It is expected that "lvcreate" will blank out
42 * the start of a fresh COW device before calling the snapshot
45 * The first chunk of the COW device just contains the header.
46 * After this there is a chunk filled with exception metadata,
47 * followed by as many exception chunks as can fit in the
50 * All on disk structures are in little-endian format. The end
51 * of the exceptions info is indicated by an exception with a
52 * new_chunk of 0, which is invalid since it would point to the
57 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
59 #define SNAP_MAGIC 0x70416e53
62 * The on-disk version of the metadata.
64 #define SNAPSHOT_DISK_VERSION 1
66 #define NUM_SNAPSHOT_HDR_CHUNKS 1
72 * Is this snapshot valid. There is no way of recovering
73 * an invalid snapshot.
78 * Simple, incrementing version. no backward
87 struct disk_exception {
92 struct core_exception {
97 struct commit_callback {
98 void (*callback)(void *ref, int success);
103 * The top level structure for a persistent exception store.
106 struct dm_exception_store *store;
109 uint32_t exceptions_per_area;
112 * Now that we have an asynchronous kcopyd there is no
113 * need for large chunk sizes, so it wont hurt to have a
114 * whole chunks worth of metadata in memory at once.
119 * An area of zeros used to clear the next area.
124 * An area used for header. The header can be written
125 * concurrently with metadata (when invalidating the snapshot),
126 * so it needs a separate buffer.
131 * Used to keep track of which metadata area the data in
134 chunk_t current_area;
137 * The next free chunk for an exception.
139 * When creating exceptions, all the chunks here and above are
140 * free. It holds the next chunk to be allocated. On rare
141 * occasions (e.g. after a system crash) holes can be left in
142 * the exception store because chunks can be committed out of
145 * When merging exceptions, it does not necessarily mean all the
146 * chunks here and above are free. It holds the value it would
147 * have held if all chunks had been committed in order of
148 * allocation. Consequently the value may occasionally be
149 * slightly too low, but since it's only used for 'status' and
150 * it can never reach its minimum value too early this doesn't
157 * The index of next free exception in the current
160 uint32_t current_committed;
162 atomic_t pending_count;
163 uint32_t callback_count;
164 struct commit_callback *callbacks;
165 struct dm_io_client *io_client;
167 struct workqueue_struct *metadata_wq;
170 static int alloc_area(struct pstore *ps)
175 len = ps->store->chunk_size << SECTOR_SHIFT;
178 * Allocate the chunk_size block of memory that will hold
179 * a single metadata area.
181 ps->area = vmalloc(len);
185 ps->zero_area = vzalloc(len);
189 ps->header_area = vmalloc(len);
190 if (!ps->header_area)
191 goto err_header_area;
196 vfree(ps->zero_area);
205 static void free_area(struct pstore *ps)
209 vfree(ps->zero_area);
210 ps->zero_area = NULL;
211 vfree(ps->header_area);
212 ps->header_area = NULL;
216 struct dm_io_region *where;
217 struct dm_io_request *io_req;
218 struct work_struct work;
222 static void do_metadata(struct work_struct *work)
224 struct mdata_req *req = container_of(work, struct mdata_req, work);
226 req->result = dm_io(req->io_req, 1, req->where, NULL);
230 * Read or write a chunk aligned and sized block of data from a device.
232 static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf,
235 struct dm_io_region where = {
236 .bdev = dm_snap_cow(ps->store->snap)->bdev,
237 .sector = ps->store->chunk_size * chunk,
238 .count = ps->store->chunk_size,
240 struct dm_io_request io_req = {
242 .mem.type = DM_IO_VMA,
244 .client = ps->io_client,
247 struct mdata_req req;
250 return dm_io(&io_req, 1, &where, NULL);
253 req.io_req = &io_req;
256 * Issue the synchronous I/O from a different thread
257 * to avoid submit_bio_noacct recursion.
259 INIT_WORK_ONSTACK(&req.work, do_metadata);
260 queue_work(ps->metadata_wq, &req.work);
261 flush_workqueue(ps->metadata_wq);
262 destroy_work_on_stack(&req.work);
268 * Convert a metadata area index to a chunk index.
270 static chunk_t area_location(struct pstore *ps, chunk_t area)
272 return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area);
275 static void skip_metadata(struct pstore *ps)
277 uint32_t stride = ps->exceptions_per_area + 1;
278 chunk_t next_free = ps->next_free;
280 if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS)
285 * Read or write a metadata area. Remembering to skip the first
286 * chunk which holds the header.
288 static int area_io(struct pstore *ps, blk_opf_t opf)
290 chunk_t chunk = area_location(ps, ps->current_area);
292 return chunk_io(ps, ps->area, chunk, opf, 0);
295 static void zero_memory_area(struct pstore *ps)
297 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT);
300 static int zero_disk_area(struct pstore *ps, chunk_t area)
302 return chunk_io(ps, ps->zero_area, area_location(ps, area),
306 static int read_header(struct pstore *ps, int *new_snapshot)
309 struct disk_header *dh;
310 unsigned int chunk_size;
311 int chunk_size_supplied = 1;
315 * Use default chunk size (or logical_block_size, if larger)
318 if (!ps->store->chunk_size) {
319 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS,
320 bdev_logical_block_size(dm_snap_cow(ps->store->snap)->
322 ps->store->chunk_mask = ps->store->chunk_size - 1;
323 ps->store->chunk_shift = __ffs(ps->store->chunk_size);
324 chunk_size_supplied = 0;
327 ps->io_client = dm_io_client_create();
328 if (IS_ERR(ps->io_client))
329 return PTR_ERR(ps->io_client);
335 r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1);
339 dh = ps->header_area;
341 if (le32_to_cpu(dh->magic) == 0) {
346 if (le32_to_cpu(dh->magic) != SNAP_MAGIC) {
347 DMWARN("Invalid or corrupt snapshot");
353 ps->valid = le32_to_cpu(dh->valid);
354 ps->version = le32_to_cpu(dh->version);
355 chunk_size = le32_to_cpu(dh->chunk_size);
357 if (ps->store->chunk_size == chunk_size)
360 if (chunk_size_supplied)
361 DMWARN("chunk size %u in device metadata overrides "
362 "table chunk size of %u.",
363 chunk_size, ps->store->chunk_size);
365 /* We had a bogus chunk_size. Fix stuff up. */
368 r = dm_exception_store_set_chunk_size(ps->store, chunk_size,
371 DMERR("invalid on-disk chunk size %u: %s.",
372 chunk_size, chunk_err);
384 static int write_header(struct pstore *ps)
386 struct disk_header *dh;
388 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
390 dh = ps->header_area;
391 dh->magic = cpu_to_le32(SNAP_MAGIC);
392 dh->valid = cpu_to_le32(ps->valid);
393 dh->version = cpu_to_le32(ps->version);
394 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
396 return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1);
400 * Access functions for the disk exceptions, these do the endian conversions.
402 static struct disk_exception *get_exception(struct pstore *ps, void *ps_area,
405 BUG_ON(index >= ps->exceptions_per_area);
407 return ((struct disk_exception *) ps_area) + index;
410 static void read_exception(struct pstore *ps, void *ps_area,
411 uint32_t index, struct core_exception *result)
413 struct disk_exception *de = get_exception(ps, ps_area, index);
416 result->old_chunk = le64_to_cpu(de->old_chunk);
417 result->new_chunk = le64_to_cpu(de->new_chunk);
420 static void write_exception(struct pstore *ps,
421 uint32_t index, struct core_exception *e)
423 struct disk_exception *de = get_exception(ps, ps->area, index);
426 de->old_chunk = cpu_to_le64(e->old_chunk);
427 de->new_chunk = cpu_to_le64(e->new_chunk);
430 static void clear_exception(struct pstore *ps, uint32_t index)
432 struct disk_exception *de = get_exception(ps, ps->area, index);
440 * Registers the exceptions that are present in the current area.
441 * 'full' is filled in to indicate if the area has been
444 static int insert_exceptions(struct pstore *ps, void *ps_area,
445 int (*callback)(void *callback_context,
446 chunk_t old, chunk_t new),
447 void *callback_context,
452 struct core_exception e;
454 /* presume the area is full */
457 for (i = 0; i < ps->exceptions_per_area; i++) {
458 read_exception(ps, ps_area, i, &e);
461 * If the new_chunk is pointing at the start of
462 * the COW device, where the first metadata area
463 * is we know that we've hit the end of the
464 * exceptions. Therefore the area is not full.
466 if (e.new_chunk == 0LL) {
467 ps->current_committed = i;
473 * Keep track of the start of the free chunks.
475 if (ps->next_free <= e.new_chunk)
476 ps->next_free = e.new_chunk + 1;
479 * Otherwise we add the exception to the snapshot.
481 r = callback(callback_context, e.old_chunk, e.new_chunk);
489 static int read_exceptions(struct pstore *ps,
490 int (*callback)(void *callback_context, chunk_t old,
492 void *callback_context)
495 struct dm_bufio_client *client;
496 chunk_t prefetch_area = 0;
498 client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev,
499 ps->store->chunk_size << SECTOR_SHIFT,
500 1, 0, NULL, NULL, 0);
503 return PTR_ERR(client);
506 * Setup for one current buffer + desired readahead buffers.
508 dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS);
511 * Keeping reading chunks and inserting exceptions until
512 * we find a partially full area.
514 for (ps->current_area = 0; full; ps->current_area++) {
515 struct dm_buffer *bp;
519 if (unlikely(prefetch_area < ps->current_area))
520 prefetch_area = ps->current_area;
522 if (DM_PREFETCH_CHUNKS) {
524 chunk_t pf_chunk = area_location(ps, prefetch_area);
526 if (unlikely(pf_chunk >= dm_bufio_get_device_size(client)))
528 dm_bufio_prefetch(client, pf_chunk, 1);
530 if (unlikely(!prefetch_area))
532 } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS);
535 chunk = area_location(ps, ps->current_area);
537 area = dm_bufio_read(client, chunk, &bp);
540 goto ret_destroy_bufio;
543 r = insert_exceptions(ps, area, callback, callback_context,
547 memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
549 dm_bufio_release(bp);
551 dm_bufio_forget(client, chunk);
554 goto ret_destroy_bufio;
564 dm_bufio_client_destroy(client);
569 static struct pstore *get_info(struct dm_exception_store *store)
571 return (struct pstore *) store->context;
574 static void persistent_usage(struct dm_exception_store *store,
575 sector_t *total_sectors,
576 sector_t *sectors_allocated,
577 sector_t *metadata_sectors)
579 struct pstore *ps = get_info(store);
581 *sectors_allocated = ps->next_free * store->chunk_size;
582 *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev);
585 * First chunk is the fixed header.
586 * Then there are (ps->current_area + 1) metadata chunks, each one
587 * separated from the next by ps->exceptions_per_area data chunks.
589 *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) *
593 static void persistent_dtr(struct dm_exception_store *store)
595 struct pstore *ps = get_info(store);
597 destroy_workqueue(ps->metadata_wq);
599 /* Created in read_header */
601 dm_io_client_destroy(ps->io_client);
604 /* Allocated in persistent_read_metadata */
605 kvfree(ps->callbacks);
610 static int persistent_read_metadata(struct dm_exception_store *store,
611 int (*callback)(void *callback_context,
612 chunk_t old, chunk_t new),
613 void *callback_context)
616 struct pstore *ps = get_info(store);
619 * Read the snapshot header.
621 r = read_header(ps, &new_snapshot);
626 * Now we know correct chunk_size, complete the initialisation.
628 ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) /
629 sizeof(struct disk_exception);
630 ps->callbacks = kvcalloc(ps->exceptions_per_area,
631 sizeof(*ps->callbacks), GFP_KERNEL);
636 * Do we need to setup a new snapshot ?
639 r = write_header(ps);
641 DMWARN("write_header failed");
645 ps->current_area = 0;
646 zero_memory_area(ps);
647 r = zero_disk_area(ps, 0);
649 DMWARN("zero_disk_area(0) failed");
655 if (ps->version != SNAPSHOT_DISK_VERSION) {
656 DMWARN("unable to handle snapshot disk version %d",
662 * Metadata are valid, but snapshot is invalidated
670 r = read_exceptions(ps, callback, callback_context);
675 static int persistent_prepare_exception(struct dm_exception_store *store,
676 struct dm_exception *e)
678 struct pstore *ps = get_info(store);
679 sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev);
681 /* Is there enough room ? */
682 if (size < ((ps->next_free + 1) * store->chunk_size))
685 e->new_chunk = ps->next_free;
688 * Move onto the next free pending, making sure to take
689 * into account the location of the metadata chunks.
694 atomic_inc(&ps->pending_count);
698 static void persistent_commit_exception(struct dm_exception_store *store,
699 struct dm_exception *e, int valid,
700 void (*callback)(void *, int success),
701 void *callback_context)
704 struct pstore *ps = get_info(store);
705 struct core_exception ce;
706 struct commit_callback *cb;
711 ce.old_chunk = e->old_chunk;
712 ce.new_chunk = e->new_chunk;
713 write_exception(ps, ps->current_committed++, &ce);
716 * Add the callback to the back of the array. This code
717 * is the only place where the callback array is
718 * manipulated, and we know that it will never be called
719 * multiple times concurrently.
721 cb = ps->callbacks + ps->callback_count++;
722 cb->callback = callback;
723 cb->context = callback_context;
726 * If there are exceptions in flight and we have not yet
727 * filled this metadata area there's nothing more to do.
729 if (!atomic_dec_and_test(&ps->pending_count) &&
730 (ps->current_committed != ps->exceptions_per_area))
734 * If we completely filled the current area, then wipe the next one.
736 if ((ps->current_committed == ps->exceptions_per_area) &&
737 zero_disk_area(ps, ps->current_area + 1))
741 * Commit exceptions to disk.
743 if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA |
748 * Advance to the next area if this one is full.
750 if (ps->current_committed == ps->exceptions_per_area) {
751 ps->current_committed = 0;
753 zero_memory_area(ps);
756 for (i = 0; i < ps->callback_count; i++) {
757 cb = ps->callbacks + i;
758 cb->callback(cb->context, ps->valid);
761 ps->callback_count = 0;
764 static int persistent_prepare_merge(struct dm_exception_store *store,
765 chunk_t *last_old_chunk,
766 chunk_t *last_new_chunk)
768 struct pstore *ps = get_info(store);
769 struct core_exception ce;
774 * When current area is empty, move back to preceding area.
776 if (!ps->current_committed) {
780 if (!ps->current_area)
784 r = area_io(ps, REQ_OP_READ);
787 ps->current_committed = ps->exceptions_per_area;
790 read_exception(ps, ps->area, ps->current_committed - 1, &ce);
791 *last_old_chunk = ce.old_chunk;
792 *last_new_chunk = ce.new_chunk;
795 * Find number of consecutive chunks within the current area,
798 for (nr_consecutive = 1; nr_consecutive < ps->current_committed;
800 read_exception(ps, ps->area,
801 ps->current_committed - 1 - nr_consecutive, &ce);
802 if (ce.old_chunk != *last_old_chunk - nr_consecutive ||
803 ce.new_chunk != *last_new_chunk - nr_consecutive)
807 return nr_consecutive;
810 static int persistent_commit_merge(struct dm_exception_store *store,
814 struct pstore *ps = get_info(store);
816 BUG_ON(nr_merged > ps->current_committed);
818 for (i = 0; i < nr_merged; i++)
819 clear_exception(ps, ps->current_committed - 1 - i);
821 r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA);
825 ps->current_committed -= nr_merged;
828 * At this stage, only persistent_usage() uses ps->next_free, so
829 * we make no attempt to keep ps->next_free strictly accurate
830 * as exceptions may have been committed out-of-order originally.
831 * Once a snapshot has become merging, we set it to the value it
832 * would have held had all the exceptions been committed in order.
834 * ps->current_area does not get reduced by prepare_merge() until
835 * after commit_merge() has removed the nr_merged previous exceptions.
837 ps->next_free = area_location(ps, ps->current_area) +
838 ps->current_committed + 1;
843 static void persistent_drop_snapshot(struct dm_exception_store *store)
845 struct pstore *ps = get_info(store);
848 if (write_header(ps))
849 DMWARN("write header failed");
852 static int persistent_ctr(struct dm_exception_store *store, char *options)
857 /* allocate the pstore */
858 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
864 ps->version = SNAPSHOT_DISK_VERSION;
866 ps->zero_area = NULL;
867 ps->header_area = NULL;
868 ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */
869 ps->current_committed = 0;
871 ps->callback_count = 0;
872 atomic_set(&ps->pending_count, 0);
873 ps->callbacks = NULL;
875 ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0);
876 if (!ps->metadata_wq) {
877 DMERR("couldn't start header metadata update thread");
883 char overflow = toupper(options[0]);
886 store->userspace_supports_overflow = true;
888 DMERR("Unsupported persistent store option: %s", options);
899 destroy_workqueue(ps->metadata_wq);
906 static unsigned int persistent_status(struct dm_exception_store *store,
907 status_type_t status, char *result,
913 case STATUSTYPE_INFO:
915 case STATUSTYPE_TABLE:
916 DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P",
917 (unsigned long long)store->chunk_size);
927 static struct dm_exception_store_type _persistent_type = {
928 .name = "persistent",
929 .module = THIS_MODULE,
930 .ctr = persistent_ctr,
931 .dtr = persistent_dtr,
932 .read_metadata = persistent_read_metadata,
933 .prepare_exception = persistent_prepare_exception,
934 .commit_exception = persistent_commit_exception,
935 .prepare_merge = persistent_prepare_merge,
936 .commit_merge = persistent_commit_merge,
937 .drop_snapshot = persistent_drop_snapshot,
938 .usage = persistent_usage,
939 .status = persistent_status,
942 static struct dm_exception_store_type _persistent_compat_type = {
944 .module = THIS_MODULE,
945 .ctr = persistent_ctr,
946 .dtr = persistent_dtr,
947 .read_metadata = persistent_read_metadata,
948 .prepare_exception = persistent_prepare_exception,
949 .commit_exception = persistent_commit_exception,
950 .prepare_merge = persistent_prepare_merge,
951 .commit_merge = persistent_commit_merge,
952 .drop_snapshot = persistent_drop_snapshot,
953 .usage = persistent_usage,
954 .status = persistent_status,
957 int dm_persistent_snapshot_init(void)
961 r = dm_exception_store_type_register(&_persistent_type);
963 DMERR("Unable to register persistent exception store type");
967 r = dm_exception_store_type_register(&_persistent_compat_type);
969 DMERR("Unable to register old-style persistent exception "
971 dm_exception_store_type_unregister(&_persistent_type);
978 void dm_persistent_snapshot_exit(void)
980 dm_exception_store_type_unregister(&_persistent_type);
981 dm_exception_store_type_unregister(&_persistent_compat_type);