Commit | Line | Data |
---|---|---|
3bd94003 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
4db6bfe0 AK |
2 | /* |
3 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
4 | * Copyright (C) 2006-2008 Red Hat GmbH | |
5 | * | |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
9 | #include "dm-exception-store.h" | |
4db6bfe0 | 10 | |
b0d3cc01 | 11 | #include <linux/ctype.h> |
4db6bfe0 AK |
12 | #include <linux/mm.h> |
13 | #include <linux/pagemap.h> | |
14 | #include <linux/vmalloc.h> | |
daaa5f7c | 15 | #include <linux/export.h> |
4db6bfe0 AK |
16 | #include <linux/slab.h> |
17 | #include <linux/dm-io.h> | |
afa53df8 | 18 | #include <linux/dm-bufio.h> |
4db6bfe0 AK |
19 | |
20 | #define DM_MSG_PREFIX "persistent snapshot" | |
ad6bf88a | 21 | #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32U /* 16KB */ |
4db6bfe0 | 22 | |
55b082e6 MP |
23 | #define DM_PREFETCH_CHUNKS 12 |
24 | ||
4db6bfe0 AK |
25 | /*----------------------------------------------------------------- |
26 | * Persistent snapshots, by persistent we mean that the snapshot | |
27 | * will survive a reboot. | |
28 | *---------------------------------------------------------------*/ | |
29 | ||
30 | /* | |
31 | * We need to store a record of which parts of the origin have | |
32 | * been copied to the snapshot device. The snapshot code | |
33 | * requires that we copy exception chunks to chunk aligned areas | |
34 | * of the COW store. It makes sense therefore, to store the | |
35 | * metadata in chunk size blocks. | |
36 | * | |
37 | * There is no backward or forward compatibility implemented, | |
38 | * snapshots with different disk versions than the kernel will | |
39 | * not be usable. It is expected that "lvcreate" will blank out | |
40 | * the start of a fresh COW device before calling the snapshot | |
41 | * constructor. | |
42 | * | |
43 | * The first chunk of the COW device just contains the header. | |
44 | * After this there is a chunk filled with exception metadata, | |
45 | * followed by as many exception chunks as can fit in the | |
46 | * metadata areas. | |
47 | * | |
48 | * All on disk structures are in little-endian format. The end | |
49 | * of the exceptions info is indicated by an exception with a | |
50 | * new_chunk of 0, which is invalid since it would point to the | |
51 | * header chunk. | |
52 | */ | |
53 | ||
54 | /* | |
55 | * Magic for persistent snapshots: "SnAp" - Feeble isn't it. | |
56 | */ | |
57 | #define SNAP_MAGIC 0x70416e53 | |
58 | ||
59 | /* | |
60 | * The on-disk version of the metadata. | |
61 | */ | |
62 | #define SNAPSHOT_DISK_VERSION 1 | |
63 | ||
4454a621 MP |
64 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 |
65 | ||
4db6bfe0 | 66 | struct disk_header { |
283a8328 | 67 | __le32 magic; |
4db6bfe0 AK |
68 | |
69 | /* | |
70 | * Is this snapshot valid. There is no way of recovering | |
71 | * an invalid snapshot. | |
72 | */ | |
283a8328 | 73 | __le32 valid; |
4db6bfe0 AK |
74 | |
75 | /* | |
76 | * Simple, incrementing version. no backward | |
77 | * compatibility. | |
78 | */ | |
283a8328 | 79 | __le32 version; |
4db6bfe0 AK |
80 | |
81 | /* In sectors */ | |
283a8328 AK |
82 | __le32 chunk_size; |
83 | } __packed; | |
4db6bfe0 AK |
84 | |
85 | struct disk_exception { | |
283a8328 AK |
86 | __le64 old_chunk; |
87 | __le64 new_chunk; | |
88 | } __packed; | |
89 | ||
90 | struct core_exception { | |
4db6bfe0 AK |
91 | uint64_t old_chunk; |
92 | uint64_t new_chunk; | |
93 | }; | |
94 | ||
95 | struct commit_callback { | |
96 | void (*callback)(void *, int success); | |
97 | void *context; | |
98 | }; | |
99 | ||
100 | /* | |
101 | * The top level structure for a persistent exception store. | |
102 | */ | |
103 | struct pstore { | |
71fab00a | 104 | struct dm_exception_store *store; |
4db6bfe0 AK |
105 | int version; |
106 | int valid; | |
107 | uint32_t exceptions_per_area; | |
108 | ||
109 | /* | |
110 | * Now that we have an asynchronous kcopyd there is no | |
111 | * need for large chunk sizes, so it wont hurt to have a | |
112 | * whole chunks worth of metadata in memory at once. | |
113 | */ | |
114 | void *area; | |
115 | ||
116 | /* | |
117 | * An area of zeros used to clear the next area. | |
118 | */ | |
119 | void *zero_area; | |
120 | ||
61578dcd MP |
121 | /* |
122 | * An area used for header. The header can be written | |
123 | * concurrently with metadata (when invalidating the snapshot), | |
124 | * so it needs a separate buffer. | |
125 | */ | |
126 | void *header_area; | |
127 | ||
4db6bfe0 AK |
128 | /* |
129 | * Used to keep track of which metadata area the data in | |
130 | * 'chunk' refers to. | |
131 | */ | |
132 | chunk_t current_area; | |
133 | ||
134 | /* | |
135 | * The next free chunk for an exception. | |
4454a621 MP |
136 | * |
137 | * When creating exceptions, all the chunks here and above are | |
138 | * free. It holds the next chunk to be allocated. On rare | |
139 | * occasions (e.g. after a system crash) holes can be left in | |
140 | * the exception store because chunks can be committed out of | |
141 | * order. | |
142 | * | |
143 | * When merging exceptions, it does not necessarily mean all the | |
144 | * chunks here and above are free. It holds the value it would | |
145 | * have held if all chunks had been committed in order of | |
146 | * allocation. Consequently the value may occasionally be | |
147 | * slightly too low, but since it's only used for 'status' and | |
148 | * it can never reach its minimum value too early this doesn't | |
149 | * matter. | |
4db6bfe0 | 150 | */ |
4454a621 | 151 | |
4db6bfe0 AK |
152 | chunk_t next_free; |
153 | ||
154 | /* | |
155 | * The index of next free exception in the current | |
156 | * metadata area. | |
157 | */ | |
158 | uint32_t current_committed; | |
159 | ||
160 | atomic_t pending_count; | |
161 | uint32_t callback_count; | |
162 | struct commit_callback *callbacks; | |
163 | struct dm_io_client *io_client; | |
164 | ||
165 | struct workqueue_struct *metadata_wq; | |
166 | }; | |
167 | ||
4db6bfe0 AK |
168 | static int alloc_area(struct pstore *ps) |
169 | { | |
170 | int r = -ENOMEM; | |
171 | size_t len; | |
172 | ||
71fab00a | 173 | len = ps->store->chunk_size << SECTOR_SHIFT; |
4db6bfe0 AK |
174 | |
175 | /* | |
176 | * Allocate the chunk_size block of memory that will hold | |
177 | * a single metadata area. | |
178 | */ | |
179 | ps->area = vmalloc(len); | |
180 | if (!ps->area) | |
61578dcd | 181 | goto err_area; |
4db6bfe0 | 182 | |
e29e65aa | 183 | ps->zero_area = vzalloc(len); |
61578dcd MP |
184 | if (!ps->zero_area) |
185 | goto err_zero_area; | |
4db6bfe0 | 186 | |
61578dcd MP |
187 | ps->header_area = vmalloc(len); |
188 | if (!ps->header_area) | |
189 | goto err_header_area; | |
190 | ||
4db6bfe0 | 191 | return 0; |
61578dcd MP |
192 | |
193 | err_header_area: | |
194 | vfree(ps->zero_area); | |
195 | ||
196 | err_zero_area: | |
197 | vfree(ps->area); | |
198 | ||
199 | err_area: | |
200 | return r; | |
4db6bfe0 AK |
201 | } |
202 | ||
203 | static void free_area(struct pstore *ps) | |
204 | { | |
0c8f8632 | 205 | vfree(ps->area); |
4db6bfe0 | 206 | ps->area = NULL; |
0c8f8632 | 207 | vfree(ps->zero_area); |
4db6bfe0 | 208 | ps->zero_area = NULL; |
0c8f8632 | 209 | vfree(ps->header_area); |
61578dcd | 210 | ps->header_area = NULL; |
4db6bfe0 AK |
211 | } |
212 | ||
213 | struct mdata_req { | |
214 | struct dm_io_region *where; | |
215 | struct dm_io_request *io_req; | |
216 | struct work_struct work; | |
217 | int result; | |
218 | }; | |
219 | ||
220 | static void do_metadata(struct work_struct *work) | |
221 | { | |
222 | struct mdata_req *req = container_of(work, struct mdata_req, work); | |
223 | ||
224 | req->result = dm_io(req->io_req, 1, req->where, NULL); | |
225 | } | |
226 | ||
227 | /* | |
228 | * Read or write a chunk aligned and sized block of data from a device. | |
229 | */ | |
6b990139 BVA |
230 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, blk_opf_t opf, |
231 | int metadata) | |
4db6bfe0 AK |
232 | { |
233 | struct dm_io_region where = { | |
fc56f6fb | 234 | .bdev = dm_snap_cow(ps->store->snap)->bdev, |
71fab00a JB |
235 | .sector = ps->store->chunk_size * chunk, |
236 | .count = ps->store->chunk_size, | |
4db6bfe0 AK |
237 | }; |
238 | struct dm_io_request io_req = { | |
6b990139 | 239 | .bi_opf = opf, |
4db6bfe0 | 240 | .mem.type = DM_IO_VMA, |
02d2fd31 | 241 | .mem.ptr.vma = area, |
4db6bfe0 AK |
242 | .client = ps->io_client, |
243 | .notify.fn = NULL, | |
244 | }; | |
245 | struct mdata_req req; | |
246 | ||
247 | if (!metadata) | |
248 | return dm_io(&io_req, 1, &where, NULL); | |
249 | ||
250 | req.where = &where; | |
251 | req.io_req = &io_req; | |
252 | ||
253 | /* | |
254 | * Issue the synchronous I/O from a different thread | |
ed00aabd | 255 | * to avoid submit_bio_noacct recursion. |
4db6bfe0 | 256 | */ |
ca1cab37 | 257 | INIT_WORK_ONSTACK(&req.work, do_metadata); |
4db6bfe0 | 258 | queue_work(ps->metadata_wq, &req.work); |
5ea330a7 | 259 | flush_workqueue(ps->metadata_wq); |
c1a64160 | 260 | destroy_work_on_stack(&req.work); |
4db6bfe0 AK |
261 | |
262 | return req.result; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Convert a metadata area index to a chunk index. | |
267 | */ | |
268 | static chunk_t area_location(struct pstore *ps, chunk_t area) | |
269 | { | |
87c961cb | 270 | return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); |
4db6bfe0 AK |
271 | } |
272 | ||
e9c6a182 MP |
273 | static void skip_metadata(struct pstore *ps) |
274 | { | |
275 | uint32_t stride = ps->exceptions_per_area + 1; | |
276 | chunk_t next_free = ps->next_free; | |
277 | if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) | |
278 | ps->next_free++; | |
279 | } | |
280 | ||
4db6bfe0 AK |
281 | /* |
282 | * Read or write a metadata area. Remembering to skip the first | |
283 | * chunk which holds the header. | |
284 | */ | |
6b990139 | 285 | static int area_io(struct pstore *ps, blk_opf_t opf) |
4db6bfe0 | 286 | { |
7d837c0d | 287 | chunk_t chunk = area_location(ps, ps->current_area); |
4db6bfe0 | 288 | |
6b990139 | 289 | return chunk_io(ps, ps->area, chunk, opf, 0); |
4db6bfe0 AK |
290 | } |
291 | ||
292 | static void zero_memory_area(struct pstore *ps) | |
293 | { | |
71fab00a | 294 | memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 AK |
295 | } |
296 | ||
297 | static int zero_disk_area(struct pstore *ps, chunk_t area) | |
298 | { | |
e6047149 | 299 | return chunk_io(ps, ps->zero_area, area_location(ps, area), |
6b990139 | 300 | REQ_OP_WRITE, 0); |
4db6bfe0 AK |
301 | } |
302 | ||
303 | static int read_header(struct pstore *ps, int *new_snapshot) | |
304 | { | |
305 | int r; | |
306 | struct disk_header *dh; | |
df96eee6 | 307 | unsigned chunk_size; |
4db6bfe0 | 308 | int chunk_size_supplied = 1; |
ae0b7448 | 309 | char *chunk_err; |
4db6bfe0 AK |
310 | |
311 | /* | |
df96eee6 MP |
312 | * Use default chunk size (or logical_block_size, if larger) |
313 | * if none supplied | |
4db6bfe0 | 314 | */ |
71fab00a JB |
315 | if (!ps->store->chunk_size) { |
316 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, | |
fc56f6fb MS |
317 | bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> |
318 | bdev) >> 9); | |
71fab00a | 319 | ps->store->chunk_mask = ps->store->chunk_size - 1; |
a3d939ae | 320 | ps->store->chunk_shift = __ffs(ps->store->chunk_size); |
4db6bfe0 AK |
321 | chunk_size_supplied = 0; |
322 | } | |
323 | ||
bda8efec | 324 | ps->io_client = dm_io_client_create(); |
4db6bfe0 AK |
325 | if (IS_ERR(ps->io_client)) |
326 | return PTR_ERR(ps->io_client); | |
327 | ||
328 | r = alloc_area(ps); | |
329 | if (r) | |
330 | return r; | |
331 | ||
6b990139 | 332 | r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 1); |
4db6bfe0 AK |
333 | if (r) |
334 | goto bad; | |
335 | ||
61578dcd | 336 | dh = ps->header_area; |
4db6bfe0 AK |
337 | |
338 | if (le32_to_cpu(dh->magic) == 0) { | |
339 | *new_snapshot = 1; | |
340 | return 0; | |
341 | } | |
342 | ||
343 | if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { | |
344 | DMWARN("Invalid or corrupt snapshot"); | |
345 | r = -ENXIO; | |
346 | goto bad; | |
347 | } | |
348 | ||
349 | *new_snapshot = 0; | |
350 | ps->valid = le32_to_cpu(dh->valid); | |
351 | ps->version = le32_to_cpu(dh->version); | |
352 | chunk_size = le32_to_cpu(dh->chunk_size); | |
353 | ||
ae0b7448 | 354 | if (ps->store->chunk_size == chunk_size) |
4db6bfe0 AK |
355 | return 0; |
356 | ||
ae0b7448 | 357 | if (chunk_size_supplied) |
df96eee6 MP |
358 | DMWARN("chunk size %u in device metadata overrides " |
359 | "table chunk size of %u.", | |
360 | chunk_size, ps->store->chunk_size); | |
4db6bfe0 AK |
361 | |
362 | /* We had a bogus chunk_size. Fix stuff up. */ | |
363 | free_area(ps); | |
364 | ||
ae0b7448 MP |
365 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
366 | &chunk_err); | |
367 | if (r) { | |
df96eee6 MP |
368 | DMERR("invalid on-disk chunk size %u: %s.", |
369 | chunk_size, chunk_err); | |
ae0b7448 MP |
370 | return r; |
371 | } | |
4db6bfe0 | 372 | |
4db6bfe0 AK |
373 | r = alloc_area(ps); |
374 | return r; | |
375 | ||
376 | bad: | |
377 | free_area(ps); | |
378 | return r; | |
379 | } | |
380 | ||
381 | static int write_header(struct pstore *ps) | |
382 | { | |
383 | struct disk_header *dh; | |
384 | ||
61578dcd | 385 | memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 | 386 | |
61578dcd | 387 | dh = ps->header_area; |
4db6bfe0 AK |
388 | dh->magic = cpu_to_le32(SNAP_MAGIC); |
389 | dh->valid = cpu_to_le32(ps->valid); | |
390 | dh->version = cpu_to_le32(ps->version); | |
71fab00a | 391 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
4db6bfe0 | 392 | |
6b990139 | 393 | return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 1); |
4db6bfe0 AK |
394 | } |
395 | ||
396 | /* | |
397 | * Access functions for the disk exceptions, these do the endian conversions. | |
398 | */ | |
2cadabd5 MP |
399 | static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, |
400 | uint32_t index) | |
4db6bfe0 AK |
401 | { |
402 | BUG_ON(index >= ps->exceptions_per_area); | |
403 | ||
2cadabd5 | 404 | return ((struct disk_exception *) ps_area) + index; |
4db6bfe0 AK |
405 | } |
406 | ||
2cadabd5 | 407 | static void read_exception(struct pstore *ps, void *ps_area, |
283a8328 | 408 | uint32_t index, struct core_exception *result) |
4db6bfe0 | 409 | { |
2cadabd5 | 410 | struct disk_exception *de = get_exception(ps, ps_area, index); |
4db6bfe0 AK |
411 | |
412 | /* copy it */ | |
283a8328 AK |
413 | result->old_chunk = le64_to_cpu(de->old_chunk); |
414 | result->new_chunk = le64_to_cpu(de->new_chunk); | |
4db6bfe0 AK |
415 | } |
416 | ||
417 | static void write_exception(struct pstore *ps, | |
283a8328 | 418 | uint32_t index, struct core_exception *e) |
4db6bfe0 | 419 | { |
2cadabd5 | 420 | struct disk_exception *de = get_exception(ps, ps->area, index); |
4db6bfe0 AK |
421 | |
422 | /* copy it */ | |
283a8328 AK |
423 | de->old_chunk = cpu_to_le64(e->old_chunk); |
424 | de->new_chunk = cpu_to_le64(e->new_chunk); | |
4db6bfe0 AK |
425 | } |
426 | ||
4454a621 MP |
427 | static void clear_exception(struct pstore *ps, uint32_t index) |
428 | { | |
2cadabd5 | 429 | struct disk_exception *de = get_exception(ps, ps->area, index); |
4454a621 MP |
430 | |
431 | /* clear it */ | |
283a8328 AK |
432 | de->old_chunk = 0; |
433 | de->new_chunk = 0; | |
4454a621 MP |
434 | } |
435 | ||
4db6bfe0 AK |
436 | /* |
437 | * Registers the exceptions that are present in the current area. | |
438 | * 'full' is filled in to indicate if the area has been | |
439 | * filled. | |
440 | */ | |
2cadabd5 | 441 | static int insert_exceptions(struct pstore *ps, void *ps_area, |
a159c1ac JB |
442 | int (*callback)(void *callback_context, |
443 | chunk_t old, chunk_t new), | |
444 | void *callback_context, | |
445 | int *full) | |
4db6bfe0 AK |
446 | { |
447 | int r; | |
448 | unsigned int i; | |
283a8328 | 449 | struct core_exception e; |
4db6bfe0 AK |
450 | |
451 | /* presume the area is full */ | |
452 | *full = 1; | |
453 | ||
454 | for (i = 0; i < ps->exceptions_per_area; i++) { | |
2cadabd5 | 455 | read_exception(ps, ps_area, i, &e); |
4db6bfe0 AK |
456 | |
457 | /* | |
458 | * If the new_chunk is pointing at the start of | |
459 | * the COW device, where the first metadata area | |
460 | * is we know that we've hit the end of the | |
461 | * exceptions. Therefore the area is not full. | |
462 | */ | |
283a8328 | 463 | if (e.new_chunk == 0LL) { |
4db6bfe0 AK |
464 | ps->current_committed = i; |
465 | *full = 0; | |
466 | break; | |
467 | } | |
468 | ||
469 | /* | |
470 | * Keep track of the start of the free chunks. | |
471 | */ | |
283a8328 AK |
472 | if (ps->next_free <= e.new_chunk) |
473 | ps->next_free = e.new_chunk + 1; | |
4db6bfe0 AK |
474 | |
475 | /* | |
476 | * Otherwise we add the exception to the snapshot. | |
477 | */ | |
283a8328 | 478 | r = callback(callback_context, e.old_chunk, e.new_chunk); |
4db6bfe0 AK |
479 | if (r) |
480 | return r; | |
481 | } | |
482 | ||
483 | return 0; | |
484 | } | |
485 | ||
a159c1ac JB |
486 | static int read_exceptions(struct pstore *ps, |
487 | int (*callback)(void *callback_context, chunk_t old, | |
488 | chunk_t new), | |
489 | void *callback_context) | |
4db6bfe0 AK |
490 | { |
491 | int r, full = 1; | |
55494bf2 | 492 | struct dm_bufio_client *client; |
55b082e6 | 493 | chunk_t prefetch_area = 0; |
55494bf2 MP |
494 | |
495 | client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, | |
496 | ps->store->chunk_size << SECTOR_SHIFT, | |
0fcb100d | 497 | 1, 0, NULL, NULL, 0); |
55494bf2 MP |
498 | |
499 | if (IS_ERR(client)) | |
500 | return PTR_ERR(client); | |
4db6bfe0 | 501 | |
55b082e6 MP |
502 | /* |
503 | * Setup for one current buffer + desired readahead buffers. | |
504 | */ | |
505 | dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS); | |
506 | ||
4db6bfe0 AK |
507 | /* |
508 | * Keeping reading chunks and inserting exceptions until | |
509 | * we find a partially full area. | |
510 | */ | |
511 | for (ps->current_area = 0; full; ps->current_area++) { | |
55494bf2 MP |
512 | struct dm_buffer *bp; |
513 | void *area; | |
55b082e6 MP |
514 | chunk_t chunk; |
515 | ||
516 | if (unlikely(prefetch_area < ps->current_area)) | |
517 | prefetch_area = ps->current_area; | |
518 | ||
519 | if (DM_PREFETCH_CHUNKS) do { | |
520 | chunk_t pf_chunk = area_location(ps, prefetch_area); | |
521 | if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) | |
522 | break; | |
523 | dm_bufio_prefetch(client, pf_chunk, 1); | |
524 | prefetch_area++; | |
525 | if (unlikely(!prefetch_area)) | |
526 | break; | |
527 | } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); | |
528 | ||
529 | chunk = area_location(ps, ps->current_area); | |
55494bf2 MP |
530 | |
531 | area = dm_bufio_read(client, chunk, &bp); | |
fc0a4461 | 532 | if (IS_ERR(area)) { |
55494bf2 MP |
533 | r = PTR_ERR(area); |
534 | goto ret_destroy_bufio; | |
535 | } | |
4db6bfe0 | 536 | |
55494bf2 | 537 | r = insert_exceptions(ps, area, callback, callback_context, |
2cadabd5 | 538 | &full); |
55494bf2 | 539 | |
2c945820 MP |
540 | if (!full) |
541 | memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); | |
542 | ||
55494bf2 MP |
543 | dm_bufio_release(bp); |
544 | ||
545 | dm_bufio_forget(client, chunk); | |
546 | ||
547 | if (unlikely(r)) | |
548 | goto ret_destroy_bufio; | |
4db6bfe0 AK |
549 | } |
550 | ||
551 | ps->current_area--; | |
552 | ||
e9c6a182 MP |
553 | skip_metadata(ps); |
554 | ||
55494bf2 MP |
555 | r = 0; |
556 | ||
557 | ret_destroy_bufio: | |
558 | dm_bufio_client_destroy(client); | |
559 | ||
560 | return r; | |
4db6bfe0 AK |
561 | } |
562 | ||
563 | static struct pstore *get_info(struct dm_exception_store *store) | |
564 | { | |
565 | return (struct pstore *) store->context; | |
566 | } | |
567 | ||
985903bb MS |
568 | static void persistent_usage(struct dm_exception_store *store, |
569 | sector_t *total_sectors, | |
570 | sector_t *sectors_allocated, | |
571 | sector_t *metadata_sectors) | |
4db6bfe0 | 572 | { |
985903bb MS |
573 | struct pstore *ps = get_info(store); |
574 | ||
575 | *sectors_allocated = ps->next_free * store->chunk_size; | |
fc56f6fb | 576 | *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); |
985903bb MS |
577 | |
578 | /* | |
579 | * First chunk is the fixed header. | |
580 | * Then there are (ps->current_area + 1) metadata chunks, each one | |
581 | * separated from the next by ps->exceptions_per_area data chunks. | |
582 | */ | |
4454a621 MP |
583 | *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * |
584 | store->chunk_size; | |
4db6bfe0 AK |
585 | } |
586 | ||
493df71c | 587 | static void persistent_dtr(struct dm_exception_store *store) |
4db6bfe0 AK |
588 | { |
589 | struct pstore *ps = get_info(store); | |
590 | ||
591 | destroy_workqueue(ps->metadata_wq); | |
a32079ce JB |
592 | |
593 | /* Created in read_header */ | |
594 | if (ps->io_client) | |
595 | dm_io_client_destroy(ps->io_client); | |
4db6bfe0 | 596 | free_area(ps); |
a32079ce JB |
597 | |
598 | /* Allocated in persistent_read_metadata */ | |
7a35693a | 599 | kvfree(ps->callbacks); |
a32079ce | 600 | |
4db6bfe0 AK |
601 | kfree(ps); |
602 | } | |
603 | ||
a159c1ac JB |
604 | static int persistent_read_metadata(struct dm_exception_store *store, |
605 | int (*callback)(void *callback_context, | |
606 | chunk_t old, chunk_t new), | |
607 | void *callback_context) | |
4db6bfe0 | 608 | { |
3f649ab7 | 609 | int r, new_snapshot; |
4db6bfe0 AK |
610 | struct pstore *ps = get_info(store); |
611 | ||
612 | /* | |
613 | * Read the snapshot header. | |
614 | */ | |
615 | r = read_header(ps, &new_snapshot); | |
616 | if (r) | |
617 | return r; | |
618 | ||
619 | /* | |
620 | * Now we know correct chunk_size, complete the initialisation. | |
621 | */ | |
71fab00a JB |
622 | ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / |
623 | sizeof(struct disk_exception); | |
7a35693a MWO |
624 | ps->callbacks = kvcalloc(ps->exceptions_per_area, |
625 | sizeof(*ps->callbacks), GFP_KERNEL); | |
4db6bfe0 AK |
626 | if (!ps->callbacks) |
627 | return -ENOMEM; | |
628 | ||
629 | /* | |
630 | * Do we need to setup a new snapshot ? | |
631 | */ | |
632 | if (new_snapshot) { | |
633 | r = write_header(ps); | |
634 | if (r) { | |
635 | DMWARN("write_header failed"); | |
636 | return r; | |
637 | } | |
638 | ||
639 | ps->current_area = 0; | |
640 | zero_memory_area(ps); | |
641 | r = zero_disk_area(ps, 0); | |
f5acc834 | 642 | if (r) |
4db6bfe0 | 643 | DMWARN("zero_disk_area(0) failed"); |
f5acc834 JB |
644 | return r; |
645 | } | |
646 | /* | |
647 | * Sanity checks. | |
648 | */ | |
649 | if (ps->version != SNAPSHOT_DISK_VERSION) { | |
650 | DMWARN("unable to handle snapshot disk version %d", | |
651 | ps->version); | |
652 | return -EINVAL; | |
653 | } | |
4db6bfe0 | 654 | |
f5acc834 JB |
655 | /* |
656 | * Metadata are valid, but snapshot is invalidated | |
657 | */ | |
658 | if (!ps->valid) | |
659 | return 1; | |
4db6bfe0 | 660 | |
f5acc834 JB |
661 | /* |
662 | * Read the metadata. | |
663 | */ | |
664 | r = read_exceptions(ps, callback, callback_context); | |
4db6bfe0 | 665 | |
f5acc834 | 666 | return r; |
4db6bfe0 AK |
667 | } |
668 | ||
a159c1ac | 669 | static int persistent_prepare_exception(struct dm_exception_store *store, |
1d4989c8 | 670 | struct dm_exception *e) |
4db6bfe0 AK |
671 | { |
672 | struct pstore *ps = get_info(store); | |
fc56f6fb | 673 | sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); |
4db6bfe0 AK |
674 | |
675 | /* Is there enough room ? */ | |
d0216849 | 676 | if (size < ((ps->next_free + 1) * store->chunk_size)) |
4db6bfe0 AK |
677 | return -ENOSPC; |
678 | ||
679 | e->new_chunk = ps->next_free; | |
680 | ||
681 | /* | |
682 | * Move onto the next free pending, making sure to take | |
683 | * into account the location of the metadata chunks. | |
684 | */ | |
e9c6a182 MP |
685 | ps->next_free++; |
686 | skip_metadata(ps); | |
4db6bfe0 AK |
687 | |
688 | atomic_inc(&ps->pending_count); | |
689 | return 0; | |
690 | } | |
691 | ||
a159c1ac | 692 | static void persistent_commit_exception(struct dm_exception_store *store, |
385277bf | 693 | struct dm_exception *e, int valid, |
a159c1ac JB |
694 | void (*callback) (void *, int success), |
695 | void *callback_context) | |
4db6bfe0 AK |
696 | { |
697 | unsigned int i; | |
698 | struct pstore *ps = get_info(store); | |
283a8328 | 699 | struct core_exception ce; |
4db6bfe0 AK |
700 | struct commit_callback *cb; |
701 | ||
385277bf MP |
702 | if (!valid) |
703 | ps->valid = 0; | |
704 | ||
283a8328 AK |
705 | ce.old_chunk = e->old_chunk; |
706 | ce.new_chunk = e->new_chunk; | |
707 | write_exception(ps, ps->current_committed++, &ce); | |
4db6bfe0 AK |
708 | |
709 | /* | |
710 | * Add the callback to the back of the array. This code | |
711 | * is the only place where the callback array is | |
712 | * manipulated, and we know that it will never be called | |
713 | * multiple times concurrently. | |
714 | */ | |
715 | cb = ps->callbacks + ps->callback_count++; | |
716 | cb->callback = callback; | |
717 | cb->context = callback_context; | |
718 | ||
719 | /* | |
720 | * If there are exceptions in flight and we have not yet | |
721 | * filled this metadata area there's nothing more to do. | |
722 | */ | |
723 | if (!atomic_dec_and_test(&ps->pending_count) && | |
724 | (ps->current_committed != ps->exceptions_per_area)) | |
725 | return; | |
726 | ||
727 | /* | |
728 | * If we completely filled the current area, then wipe the next one. | |
729 | */ | |
730 | if ((ps->current_committed == ps->exceptions_per_area) && | |
a2d2b034 | 731 | zero_disk_area(ps, ps->current_area + 1)) |
4db6bfe0 AK |
732 | ps->valid = 0; |
733 | ||
734 | /* | |
735 | * Commit exceptions to disk. | |
736 | */ | |
6b990139 BVA |
737 | if (ps->valid && area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | |
738 | REQ_SYNC)) | |
4db6bfe0 AK |
739 | ps->valid = 0; |
740 | ||
741 | /* | |
742 | * Advance to the next area if this one is full. | |
743 | */ | |
744 | if (ps->current_committed == ps->exceptions_per_area) { | |
745 | ps->current_committed = 0; | |
746 | ps->current_area++; | |
747 | zero_memory_area(ps); | |
748 | } | |
749 | ||
750 | for (i = 0; i < ps->callback_count; i++) { | |
751 | cb = ps->callbacks + i; | |
752 | cb->callback(cb->context, ps->valid); | |
753 | } | |
754 | ||
755 | ps->callback_count = 0; | |
756 | } | |
757 | ||
4454a621 MP |
758 | static int persistent_prepare_merge(struct dm_exception_store *store, |
759 | chunk_t *last_old_chunk, | |
760 | chunk_t *last_new_chunk) | |
761 | { | |
762 | struct pstore *ps = get_info(store); | |
283a8328 | 763 | struct core_exception ce; |
4454a621 MP |
764 | int nr_consecutive; |
765 | int r; | |
766 | ||
767 | /* | |
768 | * When current area is empty, move back to preceding area. | |
769 | */ | |
770 | if (!ps->current_committed) { | |
771 | /* | |
772 | * Have we finished? | |
773 | */ | |
774 | if (!ps->current_area) | |
775 | return 0; | |
776 | ||
777 | ps->current_area--; | |
6b990139 | 778 | r = area_io(ps, REQ_OP_READ); |
4454a621 MP |
779 | if (r < 0) |
780 | return r; | |
781 | ps->current_committed = ps->exceptions_per_area; | |
782 | } | |
783 | ||
2cadabd5 | 784 | read_exception(ps, ps->area, ps->current_committed - 1, &ce); |
283a8328 AK |
785 | *last_old_chunk = ce.old_chunk; |
786 | *last_new_chunk = ce.new_chunk; | |
4454a621 MP |
787 | |
788 | /* | |
789 | * Find number of consecutive chunks within the current area, | |
790 | * working backwards. | |
791 | */ | |
792 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | |
793 | nr_consecutive++) { | |
2cadabd5 MP |
794 | read_exception(ps, ps->area, |
795 | ps->current_committed - 1 - nr_consecutive, &ce); | |
283a8328 AK |
796 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
797 | ce.new_chunk != *last_new_chunk - nr_consecutive) | |
4454a621 MP |
798 | break; |
799 | } | |
800 | ||
801 | return nr_consecutive; | |
802 | } | |
803 | ||
804 | static int persistent_commit_merge(struct dm_exception_store *store, | |
805 | int nr_merged) | |
806 | { | |
807 | int r, i; | |
808 | struct pstore *ps = get_info(store); | |
809 | ||
810 | BUG_ON(nr_merged > ps->current_committed); | |
811 | ||
812 | for (i = 0; i < nr_merged; i++) | |
813 | clear_exception(ps, ps->current_committed - 1 - i); | |
814 | ||
6b990139 | 815 | r = area_io(ps, REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA); |
4454a621 MP |
816 | if (r < 0) |
817 | return r; | |
818 | ||
819 | ps->current_committed -= nr_merged; | |
820 | ||
821 | /* | |
822 | * At this stage, only persistent_usage() uses ps->next_free, so | |
823 | * we make no attempt to keep ps->next_free strictly accurate | |
824 | * as exceptions may have been committed out-of-order originally. | |
825 | * Once a snapshot has become merging, we set it to the value it | |
826 | * would have held had all the exceptions been committed in order. | |
827 | * | |
828 | * ps->current_area does not get reduced by prepare_merge() until | |
829 | * after commit_merge() has removed the nr_merged previous exceptions. | |
830 | */ | |
87c961cb TK |
831 | ps->next_free = area_location(ps, ps->current_area) + |
832 | ps->current_committed + 1; | |
4454a621 MP |
833 | |
834 | return 0; | |
835 | } | |
836 | ||
a159c1ac | 837 | static void persistent_drop_snapshot(struct dm_exception_store *store) |
4db6bfe0 AK |
838 | { |
839 | struct pstore *ps = get_info(store); | |
840 | ||
841 | ps->valid = 0; | |
842 | if (write_header(ps)) | |
843 | DMWARN("write header failed"); | |
844 | } | |
845 | ||
b0d3cc01 | 846 | static int persistent_ctr(struct dm_exception_store *store, char *options) |
4db6bfe0 AK |
847 | { |
848 | struct pstore *ps; | |
a2a678ed | 849 | int r; |
4db6bfe0 AK |
850 | |
851 | /* allocate the pstore */ | |
a32079ce | 852 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
4db6bfe0 AK |
853 | if (!ps) |
854 | return -ENOMEM; | |
855 | ||
71fab00a | 856 | ps->store = store; |
4db6bfe0 AK |
857 | ps->valid = 1; |
858 | ps->version = SNAPSHOT_DISK_VERSION; | |
859 | ps->area = NULL; | |
61578dcd MP |
860 | ps->zero_area = NULL; |
861 | ps->header_area = NULL; | |
4454a621 | 862 | ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ |
4db6bfe0 AK |
863 | ps->current_committed = 0; |
864 | ||
865 | ps->callback_count = 0; | |
866 | atomic_set(&ps->pending_count, 0); | |
867 | ps->callbacks = NULL; | |
868 | ||
239c8dd5 | 869 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); |
4db6bfe0 | 870 | if (!ps->metadata_wq) { |
4db6bfe0 | 871 | DMERR("couldn't start header metadata update thread"); |
a2a678ed SM |
872 | r = -ENOMEM; |
873 | goto err_workqueue; | |
4db6bfe0 AK |
874 | } |
875 | ||
b0d3cc01 MS |
876 | if (options) { |
877 | char overflow = toupper(options[0]); | |
878 | if (overflow == 'O') | |
879 | store->userspace_supports_overflow = true; | |
880 | else { | |
881 | DMERR("Unsupported persistent store option: %s", options); | |
a2a678ed SM |
882 | r = -EINVAL; |
883 | goto err_options; | |
b0d3cc01 MS |
884 | } |
885 | } | |
886 | ||
4db6bfe0 AK |
887 | store->context = ps; |
888 | ||
889 | return 0; | |
a2a678ed SM |
890 | |
891 | err_options: | |
892 | destroy_workqueue(ps->metadata_wq); | |
893 | err_workqueue: | |
894 | kfree(ps); | |
895 | ||
896 | return r; | |
4db6bfe0 AK |
897 | } |
898 | ||
1e302a92 JB |
899 | static unsigned persistent_status(struct dm_exception_store *store, |
900 | status_type_t status, char *result, | |
901 | unsigned maxlen) | |
493df71c | 902 | { |
1e302a92 JB |
903 | unsigned sz = 0; |
904 | ||
905 | switch (status) { | |
906 | case STATUSTYPE_INFO: | |
907 | break; | |
908 | case STATUSTYPE_TABLE: | |
b0d3cc01 MS |
909 | DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", |
910 | (unsigned long long)store->chunk_size); | |
8ec45662 TS |
911 | break; |
912 | case STATUSTYPE_IMA: | |
913 | *result = '\0'; | |
914 | break; | |
1e302a92 | 915 | } |
493df71c JB |
916 | |
917 | return sz; | |
918 | } | |
919 | ||
920 | static struct dm_exception_store_type _persistent_type = { | |
921 | .name = "persistent", | |
922 | .module = THIS_MODULE, | |
923 | .ctr = persistent_ctr, | |
924 | .dtr = persistent_dtr, | |
925 | .read_metadata = persistent_read_metadata, | |
926 | .prepare_exception = persistent_prepare_exception, | |
927 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
928 | .prepare_merge = persistent_prepare_merge, |
929 | .commit_merge = persistent_commit_merge, | |
493df71c | 930 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 931 | .usage = persistent_usage, |
493df71c JB |
932 | .status = persistent_status, |
933 | }; | |
934 | ||
935 | static struct dm_exception_store_type _persistent_compat_type = { | |
936 | .name = "P", | |
937 | .module = THIS_MODULE, | |
938 | .ctr = persistent_ctr, | |
939 | .dtr = persistent_dtr, | |
940 | .read_metadata = persistent_read_metadata, | |
941 | .prepare_exception = persistent_prepare_exception, | |
942 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
943 | .prepare_merge = persistent_prepare_merge, |
944 | .commit_merge = persistent_commit_merge, | |
493df71c | 945 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 946 | .usage = persistent_usage, |
493df71c JB |
947 | .status = persistent_status, |
948 | }; | |
949 | ||
4db6bfe0 AK |
950 | int dm_persistent_snapshot_init(void) |
951 | { | |
493df71c JB |
952 | int r; |
953 | ||
954 | r = dm_exception_store_type_register(&_persistent_type); | |
955 | if (r) { | |
956 | DMERR("Unable to register persistent exception store type"); | |
957 | return r; | |
958 | } | |
959 | ||
960 | r = dm_exception_store_type_register(&_persistent_compat_type); | |
961 | if (r) { | |
962 | DMERR("Unable to register old-style persistent exception " | |
963 | "store type"); | |
964 | dm_exception_store_type_unregister(&_persistent_type); | |
965 | return r; | |
966 | } | |
967 | ||
968 | return r; | |
4db6bfe0 AK |
969 | } |
970 | ||
971 | void dm_persistent_snapshot_exit(void) | |
972 | { | |
493df71c JB |
973 | dm_exception_store_type_unregister(&_persistent_type); |
974 | dm_exception_store_type_unregister(&_persistent_compat_type); | |
4db6bfe0 | 975 | } |