Commit | Line | Data |
---|---|---|
4db6bfe0 AK |
1 | /* |
2 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
3 | * Copyright (C) 2006-2008 Red Hat GmbH | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include "dm-exception-store.h" | |
4db6bfe0 | 9 | |
b0d3cc01 | 10 | #include <linux/ctype.h> |
4db6bfe0 AK |
11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | |
13 | #include <linux/vmalloc.h> | |
daaa5f7c | 14 | #include <linux/export.h> |
4db6bfe0 AK |
15 | #include <linux/slab.h> |
16 | #include <linux/dm-io.h> | |
afa53df8 | 17 | #include <linux/dm-bufio.h> |
4db6bfe0 AK |
18 | |
19 | #define DM_MSG_PREFIX "persistent snapshot" | |
20 | #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */ | |
21 | ||
55b082e6 MP |
22 | #define DM_PREFETCH_CHUNKS 12 |
23 | ||
4db6bfe0 AK |
24 | /*----------------------------------------------------------------- |
25 | * Persistent snapshots, by persistent we mean that the snapshot | |
26 | * will survive a reboot. | |
27 | *---------------------------------------------------------------*/ | |
28 | ||
29 | /* | |
30 | * We need to store a record of which parts of the origin have | |
31 | * been copied to the snapshot device. The snapshot code | |
32 | * requires that we copy exception chunks to chunk aligned areas | |
33 | * of the COW store. It makes sense therefore, to store the | |
34 | * metadata in chunk size blocks. | |
35 | * | |
36 | * There is no backward or forward compatibility implemented, | |
37 | * snapshots with different disk versions than the kernel will | |
38 | * not be usable. It is expected that "lvcreate" will blank out | |
39 | * the start of a fresh COW device before calling the snapshot | |
40 | * constructor. | |
41 | * | |
42 | * The first chunk of the COW device just contains the header. | |
43 | * After this there is a chunk filled with exception metadata, | |
44 | * followed by as many exception chunks as can fit in the | |
45 | * metadata areas. | |
46 | * | |
47 | * All on disk structures are in little-endian format. The end | |
48 | * of the exceptions info is indicated by an exception with a | |
49 | * new_chunk of 0, which is invalid since it would point to the | |
50 | * header chunk. | |
51 | */ | |
52 | ||
53 | /* | |
54 | * Magic for persistent snapshots: "SnAp" - Feeble isn't it. | |
55 | */ | |
56 | #define SNAP_MAGIC 0x70416e53 | |
57 | ||
58 | /* | |
59 | * The on-disk version of the metadata. | |
60 | */ | |
61 | #define SNAPSHOT_DISK_VERSION 1 | |
62 | ||
4454a621 MP |
63 | #define NUM_SNAPSHOT_HDR_CHUNKS 1 |
64 | ||
4db6bfe0 | 65 | struct disk_header { |
283a8328 | 66 | __le32 magic; |
4db6bfe0 AK |
67 | |
68 | /* | |
69 | * Is this snapshot valid. There is no way of recovering | |
70 | * an invalid snapshot. | |
71 | */ | |
283a8328 | 72 | __le32 valid; |
4db6bfe0 AK |
73 | |
74 | /* | |
75 | * Simple, incrementing version. no backward | |
76 | * compatibility. | |
77 | */ | |
283a8328 | 78 | __le32 version; |
4db6bfe0 AK |
79 | |
80 | /* In sectors */ | |
283a8328 AK |
81 | __le32 chunk_size; |
82 | } __packed; | |
4db6bfe0 AK |
83 | |
84 | struct disk_exception { | |
283a8328 AK |
85 | __le64 old_chunk; |
86 | __le64 new_chunk; | |
87 | } __packed; | |
88 | ||
89 | struct core_exception { | |
4db6bfe0 AK |
90 | uint64_t old_chunk; |
91 | uint64_t new_chunk; | |
92 | }; | |
93 | ||
94 | struct commit_callback { | |
95 | void (*callback)(void *, int success); | |
96 | void *context; | |
97 | }; | |
98 | ||
99 | /* | |
100 | * The top level structure for a persistent exception store. | |
101 | */ | |
102 | struct pstore { | |
71fab00a | 103 | struct dm_exception_store *store; |
4db6bfe0 AK |
104 | int version; |
105 | int valid; | |
106 | uint32_t exceptions_per_area; | |
107 | ||
108 | /* | |
109 | * Now that we have an asynchronous kcopyd there is no | |
110 | * need for large chunk sizes, so it wont hurt to have a | |
111 | * whole chunks worth of metadata in memory at once. | |
112 | */ | |
113 | void *area; | |
114 | ||
115 | /* | |
116 | * An area of zeros used to clear the next area. | |
117 | */ | |
118 | void *zero_area; | |
119 | ||
61578dcd MP |
120 | /* |
121 | * An area used for header. The header can be written | |
122 | * concurrently with metadata (when invalidating the snapshot), | |
123 | * so it needs a separate buffer. | |
124 | */ | |
125 | void *header_area; | |
126 | ||
4db6bfe0 AK |
127 | /* |
128 | * Used to keep track of which metadata area the data in | |
129 | * 'chunk' refers to. | |
130 | */ | |
131 | chunk_t current_area; | |
132 | ||
133 | /* | |
134 | * The next free chunk for an exception. | |
4454a621 MP |
135 | * |
136 | * When creating exceptions, all the chunks here and above are | |
137 | * free. It holds the next chunk to be allocated. On rare | |
138 | * occasions (e.g. after a system crash) holes can be left in | |
139 | * the exception store because chunks can be committed out of | |
140 | * order. | |
141 | * | |
142 | * When merging exceptions, it does not necessarily mean all the | |
143 | * chunks here and above are free. It holds the value it would | |
144 | * have held if all chunks had been committed in order of | |
145 | * allocation. Consequently the value may occasionally be | |
146 | * slightly too low, but since it's only used for 'status' and | |
147 | * it can never reach its minimum value too early this doesn't | |
148 | * matter. | |
4db6bfe0 | 149 | */ |
4454a621 | 150 | |
4db6bfe0 AK |
151 | chunk_t next_free; |
152 | ||
153 | /* | |
154 | * The index of next free exception in the current | |
155 | * metadata area. | |
156 | */ | |
157 | uint32_t current_committed; | |
158 | ||
159 | atomic_t pending_count; | |
160 | uint32_t callback_count; | |
161 | struct commit_callback *callbacks; | |
162 | struct dm_io_client *io_client; | |
163 | ||
164 | struct workqueue_struct *metadata_wq; | |
165 | }; | |
166 | ||
4db6bfe0 AK |
167 | static int alloc_area(struct pstore *ps) |
168 | { | |
169 | int r = -ENOMEM; | |
170 | size_t len; | |
171 | ||
71fab00a | 172 | len = ps->store->chunk_size << SECTOR_SHIFT; |
4db6bfe0 AK |
173 | |
174 | /* | |
175 | * Allocate the chunk_size block of memory that will hold | |
176 | * a single metadata area. | |
177 | */ | |
178 | ps->area = vmalloc(len); | |
179 | if (!ps->area) | |
61578dcd | 180 | goto err_area; |
4db6bfe0 | 181 | |
e29e65aa | 182 | ps->zero_area = vzalloc(len); |
61578dcd MP |
183 | if (!ps->zero_area) |
184 | goto err_zero_area; | |
4db6bfe0 | 185 | |
61578dcd MP |
186 | ps->header_area = vmalloc(len); |
187 | if (!ps->header_area) | |
188 | goto err_header_area; | |
189 | ||
4db6bfe0 | 190 | return 0; |
61578dcd MP |
191 | |
192 | err_header_area: | |
193 | vfree(ps->zero_area); | |
194 | ||
195 | err_zero_area: | |
196 | vfree(ps->area); | |
197 | ||
198 | err_area: | |
199 | return r; | |
4db6bfe0 AK |
200 | } |
201 | ||
202 | static void free_area(struct pstore *ps) | |
203 | { | |
0c8f8632 | 204 | vfree(ps->area); |
4db6bfe0 | 205 | ps->area = NULL; |
0c8f8632 | 206 | vfree(ps->zero_area); |
4db6bfe0 | 207 | ps->zero_area = NULL; |
0c8f8632 | 208 | vfree(ps->header_area); |
61578dcd | 209 | ps->header_area = NULL; |
4db6bfe0 AK |
210 | } |
211 | ||
212 | struct mdata_req { | |
213 | struct dm_io_region *where; | |
214 | struct dm_io_request *io_req; | |
215 | struct work_struct work; | |
216 | int result; | |
217 | }; | |
218 | ||
219 | static void do_metadata(struct work_struct *work) | |
220 | { | |
221 | struct mdata_req *req = container_of(work, struct mdata_req, work); | |
222 | ||
223 | req->result = dm_io(req->io_req, 1, req->where, NULL); | |
224 | } | |
225 | ||
226 | /* | |
227 | * Read or write a chunk aligned and sized block of data from a device. | |
228 | */ | |
e6047149 MC |
229 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int op, |
230 | int op_flags, int metadata) | |
4db6bfe0 AK |
231 | { |
232 | struct dm_io_region where = { | |
fc56f6fb | 233 | .bdev = dm_snap_cow(ps->store->snap)->bdev, |
71fab00a JB |
234 | .sector = ps->store->chunk_size * chunk, |
235 | .count = ps->store->chunk_size, | |
4db6bfe0 AK |
236 | }; |
237 | struct dm_io_request io_req = { | |
e6047149 MC |
238 | .bi_op = op, |
239 | .bi_op_flags = op_flags, | |
4db6bfe0 | 240 | .mem.type = DM_IO_VMA, |
02d2fd31 | 241 | .mem.ptr.vma = area, |
4db6bfe0 AK |
242 | .client = ps->io_client, |
243 | .notify.fn = NULL, | |
244 | }; | |
245 | struct mdata_req req; | |
246 | ||
247 | if (!metadata) | |
248 | return dm_io(&io_req, 1, &where, NULL); | |
249 | ||
250 | req.where = &where; | |
251 | req.io_req = &io_req; | |
252 | ||
253 | /* | |
254 | * Issue the synchronous I/O from a different thread | |
255 | * to avoid generic_make_request recursion. | |
256 | */ | |
ca1cab37 | 257 | INIT_WORK_ONSTACK(&req.work, do_metadata); |
4db6bfe0 | 258 | queue_work(ps->metadata_wq, &req.work); |
5ea330a7 | 259 | flush_workqueue(ps->metadata_wq); |
c1a64160 | 260 | destroy_work_on_stack(&req.work); |
4db6bfe0 AK |
261 | |
262 | return req.result; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Convert a metadata area index to a chunk index. | |
267 | */ | |
268 | static chunk_t area_location(struct pstore *ps, chunk_t area) | |
269 | { | |
87c961cb | 270 | return NUM_SNAPSHOT_HDR_CHUNKS + ((ps->exceptions_per_area + 1) * area); |
4db6bfe0 AK |
271 | } |
272 | ||
e9c6a182 MP |
273 | static void skip_metadata(struct pstore *ps) |
274 | { | |
275 | uint32_t stride = ps->exceptions_per_area + 1; | |
276 | chunk_t next_free = ps->next_free; | |
277 | if (sector_div(next_free, stride) == NUM_SNAPSHOT_HDR_CHUNKS) | |
278 | ps->next_free++; | |
279 | } | |
280 | ||
4db6bfe0 AK |
281 | /* |
282 | * Read or write a metadata area. Remembering to skip the first | |
283 | * chunk which holds the header. | |
284 | */ | |
e6047149 | 285 | static int area_io(struct pstore *ps, int op, int op_flags) |
4db6bfe0 AK |
286 | { |
287 | int r; | |
288 | chunk_t chunk; | |
289 | ||
290 | chunk = area_location(ps, ps->current_area); | |
291 | ||
e6047149 | 292 | r = chunk_io(ps, ps->area, chunk, op, op_flags, 0); |
4db6bfe0 AK |
293 | if (r) |
294 | return r; | |
295 | ||
296 | return 0; | |
297 | } | |
298 | ||
299 | static void zero_memory_area(struct pstore *ps) | |
300 | { | |
71fab00a | 301 | memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 AK |
302 | } |
303 | ||
304 | static int zero_disk_area(struct pstore *ps, chunk_t area) | |
305 | { | |
e6047149 MC |
306 | return chunk_io(ps, ps->zero_area, area_location(ps, area), |
307 | REQ_OP_WRITE, 0, 0); | |
4db6bfe0 AK |
308 | } |
309 | ||
310 | static int read_header(struct pstore *ps, int *new_snapshot) | |
311 | { | |
312 | int r; | |
313 | struct disk_header *dh; | |
df96eee6 | 314 | unsigned chunk_size; |
4db6bfe0 | 315 | int chunk_size_supplied = 1; |
ae0b7448 | 316 | char *chunk_err; |
4db6bfe0 AK |
317 | |
318 | /* | |
df96eee6 MP |
319 | * Use default chunk size (or logical_block_size, if larger) |
320 | * if none supplied | |
4db6bfe0 | 321 | */ |
71fab00a JB |
322 | if (!ps->store->chunk_size) { |
323 | ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, | |
fc56f6fb MS |
324 | bdev_logical_block_size(dm_snap_cow(ps->store->snap)-> |
325 | bdev) >> 9); | |
71fab00a | 326 | ps->store->chunk_mask = ps->store->chunk_size - 1; |
a3d939ae | 327 | ps->store->chunk_shift = __ffs(ps->store->chunk_size); |
4db6bfe0 AK |
328 | chunk_size_supplied = 0; |
329 | } | |
330 | ||
bda8efec | 331 | ps->io_client = dm_io_client_create(); |
4db6bfe0 AK |
332 | if (IS_ERR(ps->io_client)) |
333 | return PTR_ERR(ps->io_client); | |
334 | ||
335 | r = alloc_area(ps); | |
336 | if (r) | |
337 | return r; | |
338 | ||
e6047149 | 339 | r = chunk_io(ps, ps->header_area, 0, REQ_OP_READ, 0, 1); |
4db6bfe0 AK |
340 | if (r) |
341 | goto bad; | |
342 | ||
61578dcd | 343 | dh = ps->header_area; |
4db6bfe0 AK |
344 | |
345 | if (le32_to_cpu(dh->magic) == 0) { | |
346 | *new_snapshot = 1; | |
347 | return 0; | |
348 | } | |
349 | ||
350 | if (le32_to_cpu(dh->magic) != SNAP_MAGIC) { | |
351 | DMWARN("Invalid or corrupt snapshot"); | |
352 | r = -ENXIO; | |
353 | goto bad; | |
354 | } | |
355 | ||
356 | *new_snapshot = 0; | |
357 | ps->valid = le32_to_cpu(dh->valid); | |
358 | ps->version = le32_to_cpu(dh->version); | |
359 | chunk_size = le32_to_cpu(dh->chunk_size); | |
360 | ||
ae0b7448 | 361 | if (ps->store->chunk_size == chunk_size) |
4db6bfe0 AK |
362 | return 0; |
363 | ||
ae0b7448 | 364 | if (chunk_size_supplied) |
df96eee6 MP |
365 | DMWARN("chunk size %u in device metadata overrides " |
366 | "table chunk size of %u.", | |
367 | chunk_size, ps->store->chunk_size); | |
4db6bfe0 AK |
368 | |
369 | /* We had a bogus chunk_size. Fix stuff up. */ | |
370 | free_area(ps); | |
371 | ||
ae0b7448 MP |
372 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
373 | &chunk_err); | |
374 | if (r) { | |
df96eee6 MP |
375 | DMERR("invalid on-disk chunk size %u: %s.", |
376 | chunk_size, chunk_err); | |
ae0b7448 MP |
377 | return r; |
378 | } | |
4db6bfe0 | 379 | |
4db6bfe0 AK |
380 | r = alloc_area(ps); |
381 | return r; | |
382 | ||
383 | bad: | |
384 | free_area(ps); | |
385 | return r; | |
386 | } | |
387 | ||
388 | static int write_header(struct pstore *ps) | |
389 | { | |
390 | struct disk_header *dh; | |
391 | ||
61578dcd | 392 | memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
4db6bfe0 | 393 | |
61578dcd | 394 | dh = ps->header_area; |
4db6bfe0 AK |
395 | dh->magic = cpu_to_le32(SNAP_MAGIC); |
396 | dh->valid = cpu_to_le32(ps->valid); | |
397 | dh->version = cpu_to_le32(ps->version); | |
71fab00a | 398 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
4db6bfe0 | 399 | |
e6047149 | 400 | return chunk_io(ps, ps->header_area, 0, REQ_OP_WRITE, 0, 1); |
4db6bfe0 AK |
401 | } |
402 | ||
403 | /* | |
404 | * Access functions for the disk exceptions, these do the endian conversions. | |
405 | */ | |
2cadabd5 MP |
406 | static struct disk_exception *get_exception(struct pstore *ps, void *ps_area, |
407 | uint32_t index) | |
4db6bfe0 AK |
408 | { |
409 | BUG_ON(index >= ps->exceptions_per_area); | |
410 | ||
2cadabd5 | 411 | return ((struct disk_exception *) ps_area) + index; |
4db6bfe0 AK |
412 | } |
413 | ||
2cadabd5 | 414 | static void read_exception(struct pstore *ps, void *ps_area, |
283a8328 | 415 | uint32_t index, struct core_exception *result) |
4db6bfe0 | 416 | { |
2cadabd5 | 417 | struct disk_exception *de = get_exception(ps, ps_area, index); |
4db6bfe0 AK |
418 | |
419 | /* copy it */ | |
283a8328 AK |
420 | result->old_chunk = le64_to_cpu(de->old_chunk); |
421 | result->new_chunk = le64_to_cpu(de->new_chunk); | |
4db6bfe0 AK |
422 | } |
423 | ||
424 | static void write_exception(struct pstore *ps, | |
283a8328 | 425 | uint32_t index, struct core_exception *e) |
4db6bfe0 | 426 | { |
2cadabd5 | 427 | struct disk_exception *de = get_exception(ps, ps->area, index); |
4db6bfe0 AK |
428 | |
429 | /* copy it */ | |
283a8328 AK |
430 | de->old_chunk = cpu_to_le64(e->old_chunk); |
431 | de->new_chunk = cpu_to_le64(e->new_chunk); | |
4db6bfe0 AK |
432 | } |
433 | ||
4454a621 MP |
434 | static void clear_exception(struct pstore *ps, uint32_t index) |
435 | { | |
2cadabd5 | 436 | struct disk_exception *de = get_exception(ps, ps->area, index); |
4454a621 MP |
437 | |
438 | /* clear it */ | |
283a8328 AK |
439 | de->old_chunk = 0; |
440 | de->new_chunk = 0; | |
4454a621 MP |
441 | } |
442 | ||
4db6bfe0 AK |
443 | /* |
444 | * Registers the exceptions that are present in the current area. | |
445 | * 'full' is filled in to indicate if the area has been | |
446 | * filled. | |
447 | */ | |
2cadabd5 | 448 | static int insert_exceptions(struct pstore *ps, void *ps_area, |
a159c1ac JB |
449 | int (*callback)(void *callback_context, |
450 | chunk_t old, chunk_t new), | |
451 | void *callback_context, | |
452 | int *full) | |
4db6bfe0 AK |
453 | { |
454 | int r; | |
455 | unsigned int i; | |
283a8328 | 456 | struct core_exception e; |
4db6bfe0 AK |
457 | |
458 | /* presume the area is full */ | |
459 | *full = 1; | |
460 | ||
461 | for (i = 0; i < ps->exceptions_per_area; i++) { | |
2cadabd5 | 462 | read_exception(ps, ps_area, i, &e); |
4db6bfe0 AK |
463 | |
464 | /* | |
465 | * If the new_chunk is pointing at the start of | |
466 | * the COW device, where the first metadata area | |
467 | * is we know that we've hit the end of the | |
468 | * exceptions. Therefore the area is not full. | |
469 | */ | |
283a8328 | 470 | if (e.new_chunk == 0LL) { |
4db6bfe0 AK |
471 | ps->current_committed = i; |
472 | *full = 0; | |
473 | break; | |
474 | } | |
475 | ||
476 | /* | |
477 | * Keep track of the start of the free chunks. | |
478 | */ | |
283a8328 AK |
479 | if (ps->next_free <= e.new_chunk) |
480 | ps->next_free = e.new_chunk + 1; | |
4db6bfe0 AK |
481 | |
482 | /* | |
483 | * Otherwise we add the exception to the snapshot. | |
484 | */ | |
283a8328 | 485 | r = callback(callback_context, e.old_chunk, e.new_chunk); |
4db6bfe0 AK |
486 | if (r) |
487 | return r; | |
488 | } | |
489 | ||
490 | return 0; | |
491 | } | |
492 | ||
a159c1ac JB |
493 | static int read_exceptions(struct pstore *ps, |
494 | int (*callback)(void *callback_context, chunk_t old, | |
495 | chunk_t new), | |
496 | void *callback_context) | |
4db6bfe0 AK |
497 | { |
498 | int r, full = 1; | |
55494bf2 | 499 | struct dm_bufio_client *client; |
55b082e6 | 500 | chunk_t prefetch_area = 0; |
55494bf2 MP |
501 | |
502 | client = dm_bufio_client_create(dm_snap_cow(ps->store->snap)->bdev, | |
503 | ps->store->chunk_size << SECTOR_SHIFT, | |
504 | 1, 0, NULL, NULL); | |
505 | ||
506 | if (IS_ERR(client)) | |
507 | return PTR_ERR(client); | |
4db6bfe0 | 508 | |
55b082e6 MP |
509 | /* |
510 | * Setup for one current buffer + desired readahead buffers. | |
511 | */ | |
512 | dm_bufio_set_minimum_buffers(client, 1 + DM_PREFETCH_CHUNKS); | |
513 | ||
4db6bfe0 AK |
514 | /* |
515 | * Keeping reading chunks and inserting exceptions until | |
516 | * we find a partially full area. | |
517 | */ | |
518 | for (ps->current_area = 0; full; ps->current_area++) { | |
55494bf2 MP |
519 | struct dm_buffer *bp; |
520 | void *area; | |
55b082e6 MP |
521 | chunk_t chunk; |
522 | ||
523 | if (unlikely(prefetch_area < ps->current_area)) | |
524 | prefetch_area = ps->current_area; | |
525 | ||
526 | if (DM_PREFETCH_CHUNKS) do { | |
527 | chunk_t pf_chunk = area_location(ps, prefetch_area); | |
528 | if (unlikely(pf_chunk >= dm_bufio_get_device_size(client))) | |
529 | break; | |
530 | dm_bufio_prefetch(client, pf_chunk, 1); | |
531 | prefetch_area++; | |
532 | if (unlikely(!prefetch_area)) | |
533 | break; | |
534 | } while (prefetch_area <= ps->current_area + DM_PREFETCH_CHUNKS); | |
535 | ||
536 | chunk = area_location(ps, ps->current_area); | |
55494bf2 MP |
537 | |
538 | area = dm_bufio_read(client, chunk, &bp); | |
fc0a4461 | 539 | if (IS_ERR(area)) { |
55494bf2 MP |
540 | r = PTR_ERR(area); |
541 | goto ret_destroy_bufio; | |
542 | } | |
4db6bfe0 | 543 | |
55494bf2 | 544 | r = insert_exceptions(ps, area, callback, callback_context, |
2cadabd5 | 545 | &full); |
55494bf2 | 546 | |
2c945820 MP |
547 | if (!full) |
548 | memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); | |
549 | ||
55494bf2 MP |
550 | dm_bufio_release(bp); |
551 | ||
552 | dm_bufio_forget(client, chunk); | |
553 | ||
554 | if (unlikely(r)) | |
555 | goto ret_destroy_bufio; | |
4db6bfe0 AK |
556 | } |
557 | ||
558 | ps->current_area--; | |
559 | ||
e9c6a182 MP |
560 | skip_metadata(ps); |
561 | ||
55494bf2 MP |
562 | r = 0; |
563 | ||
564 | ret_destroy_bufio: | |
565 | dm_bufio_client_destroy(client); | |
566 | ||
567 | return r; | |
4db6bfe0 AK |
568 | } |
569 | ||
570 | static struct pstore *get_info(struct dm_exception_store *store) | |
571 | { | |
572 | return (struct pstore *) store->context; | |
573 | } | |
574 | ||
985903bb MS |
575 | static void persistent_usage(struct dm_exception_store *store, |
576 | sector_t *total_sectors, | |
577 | sector_t *sectors_allocated, | |
578 | sector_t *metadata_sectors) | |
4db6bfe0 | 579 | { |
985903bb MS |
580 | struct pstore *ps = get_info(store); |
581 | ||
582 | *sectors_allocated = ps->next_free * store->chunk_size; | |
fc56f6fb | 583 | *total_sectors = get_dev_size(dm_snap_cow(store->snap)->bdev); |
985903bb MS |
584 | |
585 | /* | |
586 | * First chunk is the fixed header. | |
587 | * Then there are (ps->current_area + 1) metadata chunks, each one | |
588 | * separated from the next by ps->exceptions_per_area data chunks. | |
589 | */ | |
4454a621 MP |
590 | *metadata_sectors = (ps->current_area + 1 + NUM_SNAPSHOT_HDR_CHUNKS) * |
591 | store->chunk_size; | |
4db6bfe0 AK |
592 | } |
593 | ||
493df71c | 594 | static void persistent_dtr(struct dm_exception_store *store) |
4db6bfe0 AK |
595 | { |
596 | struct pstore *ps = get_info(store); | |
597 | ||
598 | destroy_workqueue(ps->metadata_wq); | |
a32079ce JB |
599 | |
600 | /* Created in read_header */ | |
601 | if (ps->io_client) | |
602 | dm_io_client_destroy(ps->io_client); | |
4db6bfe0 | 603 | free_area(ps); |
a32079ce JB |
604 | |
605 | /* Allocated in persistent_read_metadata */ | |
0c8f8632 | 606 | vfree(ps->callbacks); |
a32079ce | 607 | |
4db6bfe0 AK |
608 | kfree(ps); |
609 | } | |
610 | ||
a159c1ac JB |
611 | static int persistent_read_metadata(struct dm_exception_store *store, |
612 | int (*callback)(void *callback_context, | |
613 | chunk_t old, chunk_t new), | |
614 | void *callback_context) | |
4db6bfe0 AK |
615 | { |
616 | int r, uninitialized_var(new_snapshot); | |
617 | struct pstore *ps = get_info(store); | |
618 | ||
619 | /* | |
620 | * Read the snapshot header. | |
621 | */ | |
622 | r = read_header(ps, &new_snapshot); | |
623 | if (r) | |
624 | return r; | |
625 | ||
626 | /* | |
627 | * Now we know correct chunk_size, complete the initialisation. | |
628 | */ | |
71fab00a JB |
629 | ps->exceptions_per_area = (ps->store->chunk_size << SECTOR_SHIFT) / |
630 | sizeof(struct disk_exception); | |
4db6bfe0 | 631 | ps->callbacks = dm_vcalloc(ps->exceptions_per_area, |
a2d2b034 | 632 | sizeof(*ps->callbacks)); |
4db6bfe0 AK |
633 | if (!ps->callbacks) |
634 | return -ENOMEM; | |
635 | ||
636 | /* | |
637 | * Do we need to setup a new snapshot ? | |
638 | */ | |
639 | if (new_snapshot) { | |
640 | r = write_header(ps); | |
641 | if (r) { | |
642 | DMWARN("write_header failed"); | |
643 | return r; | |
644 | } | |
645 | ||
646 | ps->current_area = 0; | |
647 | zero_memory_area(ps); | |
648 | r = zero_disk_area(ps, 0); | |
f5acc834 | 649 | if (r) |
4db6bfe0 | 650 | DMWARN("zero_disk_area(0) failed"); |
f5acc834 JB |
651 | return r; |
652 | } | |
653 | /* | |
654 | * Sanity checks. | |
655 | */ | |
656 | if (ps->version != SNAPSHOT_DISK_VERSION) { | |
657 | DMWARN("unable to handle snapshot disk version %d", | |
658 | ps->version); | |
659 | return -EINVAL; | |
660 | } | |
4db6bfe0 | 661 | |
f5acc834 JB |
662 | /* |
663 | * Metadata are valid, but snapshot is invalidated | |
664 | */ | |
665 | if (!ps->valid) | |
666 | return 1; | |
4db6bfe0 | 667 | |
f5acc834 JB |
668 | /* |
669 | * Read the metadata. | |
670 | */ | |
671 | r = read_exceptions(ps, callback, callback_context); | |
4db6bfe0 | 672 | |
f5acc834 | 673 | return r; |
4db6bfe0 AK |
674 | } |
675 | ||
a159c1ac | 676 | static int persistent_prepare_exception(struct dm_exception_store *store, |
1d4989c8 | 677 | struct dm_exception *e) |
4db6bfe0 AK |
678 | { |
679 | struct pstore *ps = get_info(store); | |
fc56f6fb | 680 | sector_t size = get_dev_size(dm_snap_cow(store->snap)->bdev); |
4db6bfe0 AK |
681 | |
682 | /* Is there enough room ? */ | |
d0216849 | 683 | if (size < ((ps->next_free + 1) * store->chunk_size)) |
4db6bfe0 AK |
684 | return -ENOSPC; |
685 | ||
686 | e->new_chunk = ps->next_free; | |
687 | ||
688 | /* | |
689 | * Move onto the next free pending, making sure to take | |
690 | * into account the location of the metadata chunks. | |
691 | */ | |
e9c6a182 MP |
692 | ps->next_free++; |
693 | skip_metadata(ps); | |
4db6bfe0 AK |
694 | |
695 | atomic_inc(&ps->pending_count); | |
696 | return 0; | |
697 | } | |
698 | ||
a159c1ac | 699 | static void persistent_commit_exception(struct dm_exception_store *store, |
385277bf | 700 | struct dm_exception *e, int valid, |
a159c1ac JB |
701 | void (*callback) (void *, int success), |
702 | void *callback_context) | |
4db6bfe0 AK |
703 | { |
704 | unsigned int i; | |
705 | struct pstore *ps = get_info(store); | |
283a8328 | 706 | struct core_exception ce; |
4db6bfe0 AK |
707 | struct commit_callback *cb; |
708 | ||
385277bf MP |
709 | if (!valid) |
710 | ps->valid = 0; | |
711 | ||
283a8328 AK |
712 | ce.old_chunk = e->old_chunk; |
713 | ce.new_chunk = e->new_chunk; | |
714 | write_exception(ps, ps->current_committed++, &ce); | |
4db6bfe0 AK |
715 | |
716 | /* | |
717 | * Add the callback to the back of the array. This code | |
718 | * is the only place where the callback array is | |
719 | * manipulated, and we know that it will never be called | |
720 | * multiple times concurrently. | |
721 | */ | |
722 | cb = ps->callbacks + ps->callback_count++; | |
723 | cb->callback = callback; | |
724 | cb->context = callback_context; | |
725 | ||
726 | /* | |
727 | * If there are exceptions in flight and we have not yet | |
728 | * filled this metadata area there's nothing more to do. | |
729 | */ | |
730 | if (!atomic_dec_and_test(&ps->pending_count) && | |
731 | (ps->current_committed != ps->exceptions_per_area)) | |
732 | return; | |
733 | ||
734 | /* | |
735 | * If we completely filled the current area, then wipe the next one. | |
736 | */ | |
737 | if ((ps->current_committed == ps->exceptions_per_area) && | |
a2d2b034 | 738 | zero_disk_area(ps, ps->current_area + 1)) |
4db6bfe0 AK |
739 | ps->valid = 0; |
740 | ||
741 | /* | |
742 | * Commit exceptions to disk. | |
743 | */ | |
ff0361b3 JK |
744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, |
745 | REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) | |
4db6bfe0 AK |
746 | ps->valid = 0; |
747 | ||
748 | /* | |
749 | * Advance to the next area if this one is full. | |
750 | */ | |
751 | if (ps->current_committed == ps->exceptions_per_area) { | |
752 | ps->current_committed = 0; | |
753 | ps->current_area++; | |
754 | zero_memory_area(ps); | |
755 | } | |
756 | ||
757 | for (i = 0; i < ps->callback_count; i++) { | |
758 | cb = ps->callbacks + i; | |
759 | cb->callback(cb->context, ps->valid); | |
760 | } | |
761 | ||
762 | ps->callback_count = 0; | |
763 | } | |
764 | ||
4454a621 MP |
765 | static int persistent_prepare_merge(struct dm_exception_store *store, |
766 | chunk_t *last_old_chunk, | |
767 | chunk_t *last_new_chunk) | |
768 | { | |
769 | struct pstore *ps = get_info(store); | |
283a8328 | 770 | struct core_exception ce; |
4454a621 MP |
771 | int nr_consecutive; |
772 | int r; | |
773 | ||
774 | /* | |
775 | * When current area is empty, move back to preceding area. | |
776 | */ | |
777 | if (!ps->current_committed) { | |
778 | /* | |
779 | * Have we finished? | |
780 | */ | |
781 | if (!ps->current_area) | |
782 | return 0; | |
783 | ||
784 | ps->current_area--; | |
e6047149 | 785 | r = area_io(ps, REQ_OP_READ, 0); |
4454a621 MP |
786 | if (r < 0) |
787 | return r; | |
788 | ps->current_committed = ps->exceptions_per_area; | |
789 | } | |
790 | ||
2cadabd5 | 791 | read_exception(ps, ps->area, ps->current_committed - 1, &ce); |
283a8328 AK |
792 | *last_old_chunk = ce.old_chunk; |
793 | *last_new_chunk = ce.new_chunk; | |
4454a621 MP |
794 | |
795 | /* | |
796 | * Find number of consecutive chunks within the current area, | |
797 | * working backwards. | |
798 | */ | |
799 | for (nr_consecutive = 1; nr_consecutive < ps->current_committed; | |
800 | nr_consecutive++) { | |
2cadabd5 MP |
801 | read_exception(ps, ps->area, |
802 | ps->current_committed - 1 - nr_consecutive, &ce); | |
283a8328 AK |
803 | if (ce.old_chunk != *last_old_chunk - nr_consecutive || |
804 | ce.new_chunk != *last_new_chunk - nr_consecutive) | |
4454a621 MP |
805 | break; |
806 | } | |
807 | ||
808 | return nr_consecutive; | |
809 | } | |
810 | ||
811 | static int persistent_commit_merge(struct dm_exception_store *store, | |
812 | int nr_merged) | |
813 | { | |
814 | int r, i; | |
815 | struct pstore *ps = get_info(store); | |
816 | ||
817 | BUG_ON(nr_merged > ps->current_committed); | |
818 | ||
819 | for (i = 0; i < nr_merged; i++) | |
820 | clear_exception(ps, ps->current_committed - 1 - i); | |
821 | ||
70fd7614 | 822 | r = area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA); |
4454a621 MP |
823 | if (r < 0) |
824 | return r; | |
825 | ||
826 | ps->current_committed -= nr_merged; | |
827 | ||
828 | /* | |
829 | * At this stage, only persistent_usage() uses ps->next_free, so | |
830 | * we make no attempt to keep ps->next_free strictly accurate | |
831 | * as exceptions may have been committed out-of-order originally. | |
832 | * Once a snapshot has become merging, we set it to the value it | |
833 | * would have held had all the exceptions been committed in order. | |
834 | * | |
835 | * ps->current_area does not get reduced by prepare_merge() until | |
836 | * after commit_merge() has removed the nr_merged previous exceptions. | |
837 | */ | |
87c961cb TK |
838 | ps->next_free = area_location(ps, ps->current_area) + |
839 | ps->current_committed + 1; | |
4454a621 MP |
840 | |
841 | return 0; | |
842 | } | |
843 | ||
a159c1ac | 844 | static void persistent_drop_snapshot(struct dm_exception_store *store) |
4db6bfe0 AK |
845 | { |
846 | struct pstore *ps = get_info(store); | |
847 | ||
848 | ps->valid = 0; | |
849 | if (write_header(ps)) | |
850 | DMWARN("write header failed"); | |
851 | } | |
852 | ||
b0d3cc01 | 853 | static int persistent_ctr(struct dm_exception_store *store, char *options) |
4db6bfe0 AK |
854 | { |
855 | struct pstore *ps; | |
a2a678ed | 856 | int r; |
4db6bfe0 AK |
857 | |
858 | /* allocate the pstore */ | |
a32079ce | 859 | ps = kzalloc(sizeof(*ps), GFP_KERNEL); |
4db6bfe0 AK |
860 | if (!ps) |
861 | return -ENOMEM; | |
862 | ||
71fab00a | 863 | ps->store = store; |
4db6bfe0 AK |
864 | ps->valid = 1; |
865 | ps->version = SNAPSHOT_DISK_VERSION; | |
866 | ps->area = NULL; | |
61578dcd MP |
867 | ps->zero_area = NULL; |
868 | ps->header_area = NULL; | |
4454a621 | 869 | ps->next_free = NUM_SNAPSHOT_HDR_CHUNKS + 1; /* header and 1st area */ |
4db6bfe0 AK |
870 | ps->current_committed = 0; |
871 | ||
872 | ps->callback_count = 0; | |
873 | atomic_set(&ps->pending_count, 0); | |
874 | ps->callbacks = NULL; | |
875 | ||
239c8dd5 | 876 | ps->metadata_wq = alloc_workqueue("ksnaphd", WQ_MEM_RECLAIM, 0); |
4db6bfe0 | 877 | if (!ps->metadata_wq) { |
4db6bfe0 | 878 | DMERR("couldn't start header metadata update thread"); |
a2a678ed SM |
879 | r = -ENOMEM; |
880 | goto err_workqueue; | |
4db6bfe0 AK |
881 | } |
882 | ||
b0d3cc01 MS |
883 | if (options) { |
884 | char overflow = toupper(options[0]); | |
885 | if (overflow == 'O') | |
886 | store->userspace_supports_overflow = true; | |
887 | else { | |
888 | DMERR("Unsupported persistent store option: %s", options); | |
a2a678ed SM |
889 | r = -EINVAL; |
890 | goto err_options; | |
b0d3cc01 MS |
891 | } |
892 | } | |
893 | ||
4db6bfe0 AK |
894 | store->context = ps; |
895 | ||
896 | return 0; | |
a2a678ed SM |
897 | |
898 | err_options: | |
899 | destroy_workqueue(ps->metadata_wq); | |
900 | err_workqueue: | |
901 | kfree(ps); | |
902 | ||
903 | return r; | |
4db6bfe0 AK |
904 | } |
905 | ||
1e302a92 JB |
906 | static unsigned persistent_status(struct dm_exception_store *store, |
907 | status_type_t status, char *result, | |
908 | unsigned maxlen) | |
493df71c | 909 | { |
1e302a92 JB |
910 | unsigned sz = 0; |
911 | ||
912 | switch (status) { | |
913 | case STATUSTYPE_INFO: | |
914 | break; | |
915 | case STATUSTYPE_TABLE: | |
b0d3cc01 MS |
916 | DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", |
917 | (unsigned long long)store->chunk_size); | |
1e302a92 | 918 | } |
493df71c JB |
919 | |
920 | return sz; | |
921 | } | |
922 | ||
923 | static struct dm_exception_store_type _persistent_type = { | |
924 | .name = "persistent", | |
925 | .module = THIS_MODULE, | |
926 | .ctr = persistent_ctr, | |
927 | .dtr = persistent_dtr, | |
928 | .read_metadata = persistent_read_metadata, | |
929 | .prepare_exception = persistent_prepare_exception, | |
930 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
931 | .prepare_merge = persistent_prepare_merge, |
932 | .commit_merge = persistent_commit_merge, | |
493df71c | 933 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 934 | .usage = persistent_usage, |
493df71c JB |
935 | .status = persistent_status, |
936 | }; | |
937 | ||
938 | static struct dm_exception_store_type _persistent_compat_type = { | |
939 | .name = "P", | |
940 | .module = THIS_MODULE, | |
941 | .ctr = persistent_ctr, | |
942 | .dtr = persistent_dtr, | |
943 | .read_metadata = persistent_read_metadata, | |
944 | .prepare_exception = persistent_prepare_exception, | |
945 | .commit_exception = persistent_commit_exception, | |
4454a621 MP |
946 | .prepare_merge = persistent_prepare_merge, |
947 | .commit_merge = persistent_commit_merge, | |
493df71c | 948 | .drop_snapshot = persistent_drop_snapshot, |
985903bb | 949 | .usage = persistent_usage, |
493df71c JB |
950 | .status = persistent_status, |
951 | }; | |
952 | ||
4db6bfe0 AK |
953 | int dm_persistent_snapshot_init(void) |
954 | { | |
493df71c JB |
955 | int r; |
956 | ||
957 | r = dm_exception_store_type_register(&_persistent_type); | |
958 | if (r) { | |
959 | DMERR("Unable to register persistent exception store type"); | |
960 | return r; | |
961 | } | |
962 | ||
963 | r = dm_exception_store_type_register(&_persistent_compat_type); | |
964 | if (r) { | |
965 | DMERR("Unable to register old-style persistent exception " | |
966 | "store type"); | |
967 | dm_exception_store_type_unregister(&_persistent_type); | |
968 | return r; | |
969 | } | |
970 | ||
971 | return r; | |
4db6bfe0 AK |
972 | } |
973 | ||
974 | void dm_persistent_snapshot_exit(void) | |
975 | { | |
493df71c JB |
976 | dm_exception_store_type_unregister(&_persistent_type); |
977 | dm_exception_store_type_unregister(&_persistent_compat_type); | |
4db6bfe0 | 978 | } |