Merge tag 'pci-v4.12-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[linux-block.git] / drivers / md / dm-cache-metadata.c
1 /*
2  * Copyright (C) 2012 Red Hat, Inc.
3  *
4  * This file is released under the GPL.
5  */
6
7 #include "dm-cache-metadata.h"
8
9 #include "persistent-data/dm-array.h"
10 #include "persistent-data/dm-bitset.h"
11 #include "persistent-data/dm-space-map.h"
12 #include "persistent-data/dm-space-map-disk.h"
13 #include "persistent-data/dm-transaction-manager.h"
14
15 #include <linux/device-mapper.h>
16
17 /*----------------------------------------------------------------*/
18
19 #define DM_MSG_PREFIX   "cache metadata"
20
21 #define CACHE_SUPERBLOCK_MAGIC 06142003
22 #define CACHE_SUPERBLOCK_LOCATION 0
23
24 /*
25  * defines a range of metadata versions that this module can handle.
26  */
27 #define MIN_CACHE_VERSION 1
28 #define MAX_CACHE_VERSION 2
29
30 /*
31  *  3 for btree insert +
32  *  2 for btree lookup used within space map
33  */
34 #define CACHE_MAX_CONCURRENT_LOCKS 5
35 #define SPACE_MAP_ROOT_SIZE 128
36
37 enum superblock_flag_bits {
38         /* for spotting crashes that would invalidate the dirty bitset */
39         CLEAN_SHUTDOWN,
40         /* metadata must be checked using the tools */
41         NEEDS_CHECK,
42 };
43
44 /*
45  * Each mapping from cache block -> origin block carries a set of flags.
46  */
47 enum mapping_bits {
48         /*
49          * A valid mapping.  Because we're using an array we clear this
50          * flag for an non existant mapping.
51          */
52         M_VALID = 1,
53
54         /*
55          * The data on the cache is different from that on the origin.
56          * This flag is only used by metadata format 1.
57          */
58         M_DIRTY = 2
59 };
60
61 struct cache_disk_superblock {
62         __le32 csum;
63         __le32 flags;
64         __le64 blocknr;
65
66         __u8 uuid[16];
67         __le64 magic;
68         __le32 version;
69
70         __u8 policy_name[CACHE_POLICY_NAME_SIZE];
71         __le32 policy_hint_size;
72
73         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
74         __le64 mapping_root;
75         __le64 hint_root;
76
77         __le64 discard_root;
78         __le64 discard_block_size;
79         __le64 discard_nr_blocks;
80
81         __le32 data_block_size;
82         __le32 metadata_block_size;
83         __le32 cache_blocks;
84
85         __le32 compat_flags;
86         __le32 compat_ro_flags;
87         __le32 incompat_flags;
88
89         __le32 read_hits;
90         __le32 read_misses;
91         __le32 write_hits;
92         __le32 write_misses;
93
94         __le32 policy_version[CACHE_POLICY_VERSION_SIZE];
95
96         /*
97          * Metadata format 2 fields.
98          */
99         __le64 dirty_root;
100 } __packed;
101
102 struct dm_cache_metadata {
103         atomic_t ref_count;
104         struct list_head list;
105
106         unsigned version;
107         struct block_device *bdev;
108         struct dm_block_manager *bm;
109         struct dm_space_map *metadata_sm;
110         struct dm_transaction_manager *tm;
111
112         struct dm_array_info info;
113         struct dm_array_info hint_info;
114         struct dm_disk_bitset discard_info;
115
116         struct rw_semaphore root_lock;
117         unsigned long flags;
118         dm_block_t root;
119         dm_block_t hint_root;
120         dm_block_t discard_root;
121
122         sector_t discard_block_size;
123         dm_dblock_t discard_nr_blocks;
124
125         sector_t data_block_size;
126         dm_cblock_t cache_blocks;
127         bool changed:1;
128         bool clean_when_opened:1;
129
130         char policy_name[CACHE_POLICY_NAME_SIZE];
131         unsigned policy_version[CACHE_POLICY_VERSION_SIZE];
132         size_t policy_hint_size;
133         struct dm_cache_statistics stats;
134
135         /*
136          * Reading the space map root can fail, so we read it into this
137          * buffer before the superblock is locked and updated.
138          */
139         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
140
141         /*
142          * Set if a transaction has to be aborted but the attempt to roll
143          * back to the previous (good) transaction failed.  The only
144          * metadata operation permissible in this state is the closing of
145          * the device.
146          */
147         bool fail_io:1;
148
149         /*
150          * Metadata format 2 fields.
151          */
152         dm_block_t dirty_root;
153         struct dm_disk_bitset dirty_info;
154
155         /*
156          * These structures are used when loading metadata.  They're too
157          * big to put on the stack.
158          */
159         struct dm_array_cursor mapping_cursor;
160         struct dm_array_cursor hint_cursor;
161         struct dm_bitset_cursor dirty_cursor;
162 };
163
164 /*-------------------------------------------------------------------
165  * superblock validator
166  *-----------------------------------------------------------------*/
167
168 #define SUPERBLOCK_CSUM_XOR 9031977
169
170 static void sb_prepare_for_write(struct dm_block_validator *v,
171                                  struct dm_block *b,
172                                  size_t sb_block_size)
173 {
174         struct cache_disk_superblock *disk_super = dm_block_data(b);
175
176         disk_super->blocknr = cpu_to_le64(dm_block_location(b));
177         disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
178                                                       sb_block_size - sizeof(__le32),
179                                                       SUPERBLOCK_CSUM_XOR));
180 }
181
182 static int check_metadata_version(struct cache_disk_superblock *disk_super)
183 {
184         uint32_t metadata_version = le32_to_cpu(disk_super->version);
185
186         if (metadata_version < MIN_CACHE_VERSION || metadata_version > MAX_CACHE_VERSION) {
187                 DMERR("Cache metadata version %u found, but only versions between %u and %u supported.",
188                       metadata_version, MIN_CACHE_VERSION, MAX_CACHE_VERSION);
189                 return -EINVAL;
190         }
191
192         return 0;
193 }
194
195 static int sb_check(struct dm_block_validator *v,
196                     struct dm_block *b,
197                     size_t sb_block_size)
198 {
199         struct cache_disk_superblock *disk_super = dm_block_data(b);
200         __le32 csum_le;
201
202         if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
203                 DMERR("sb_check failed: blocknr %llu: wanted %llu",
204                       le64_to_cpu(disk_super->blocknr),
205                       (unsigned long long)dm_block_location(b));
206                 return -ENOTBLK;
207         }
208
209         if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
210                 DMERR("sb_check failed: magic %llu: wanted %llu",
211                       le64_to_cpu(disk_super->magic),
212                       (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
213                 return -EILSEQ;
214         }
215
216         csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
217                                              sb_block_size - sizeof(__le32),
218                                              SUPERBLOCK_CSUM_XOR));
219         if (csum_le != disk_super->csum) {
220                 DMERR("sb_check failed: csum %u: wanted %u",
221                       le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
222                 return -EILSEQ;
223         }
224
225         return check_metadata_version(disk_super);
226 }
227
228 static struct dm_block_validator sb_validator = {
229         .name = "superblock",
230         .prepare_for_write = sb_prepare_for_write,
231         .check = sb_check
232 };
233
234 /*----------------------------------------------------------------*/
235
236 static int superblock_read_lock(struct dm_cache_metadata *cmd,
237                                 struct dm_block **sblock)
238 {
239         return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
240                                &sb_validator, sblock);
241 }
242
243 static int superblock_lock_zero(struct dm_cache_metadata *cmd,
244                                 struct dm_block **sblock)
245 {
246         return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
247                                      &sb_validator, sblock);
248 }
249
250 static int superblock_lock(struct dm_cache_metadata *cmd,
251                            struct dm_block **sblock)
252 {
253         return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
254                                 &sb_validator, sblock);
255 }
256
257 /*----------------------------------------------------------------*/
258
259 static int __superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
260 {
261         int r;
262         unsigned i;
263         struct dm_block *b;
264         __le64 *data_le, zero = cpu_to_le64(0);
265         unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
266
267         /*
268          * We can't use a validator here - it may be all zeroes.
269          */
270         r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
271         if (r)
272                 return r;
273
274         data_le = dm_block_data(b);
275         *result = true;
276         for (i = 0; i < sb_block_size; i++) {
277                 if (data_le[i] != zero) {
278                         *result = false;
279                         break;
280                 }
281         }
282
283         dm_bm_unlock(b);
284
285         return 0;
286 }
287
288 static void __setup_mapping_info(struct dm_cache_metadata *cmd)
289 {
290         struct dm_btree_value_type vt;
291
292         vt.context = NULL;
293         vt.size = sizeof(__le64);
294         vt.inc = NULL;
295         vt.dec = NULL;
296         vt.equal = NULL;
297         dm_array_info_init(&cmd->info, cmd->tm, &vt);
298
299         if (cmd->policy_hint_size) {
300                 vt.size = sizeof(__le32);
301                 dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
302         }
303 }
304
305 static int __save_sm_root(struct dm_cache_metadata *cmd)
306 {
307         int r;
308         size_t metadata_len;
309
310         r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
311         if (r < 0)
312                 return r;
313
314         return dm_sm_copy_root(cmd->metadata_sm, &cmd->metadata_space_map_root,
315                                metadata_len);
316 }
317
318 static void __copy_sm_root(struct dm_cache_metadata *cmd,
319                            struct cache_disk_superblock *disk_super)
320 {
321         memcpy(&disk_super->metadata_space_map_root,
322                &cmd->metadata_space_map_root,
323                sizeof(cmd->metadata_space_map_root));
324 }
325
326 static bool separate_dirty_bits(struct dm_cache_metadata *cmd)
327 {
328         return cmd->version >= 2;
329 }
330
331 static int __write_initial_superblock(struct dm_cache_metadata *cmd)
332 {
333         int r;
334         struct dm_block *sblock;
335         struct cache_disk_superblock *disk_super;
336         sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
337
338         /* FIXME: see if we can lose the max sectors limit */
339         if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
340                 bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
341
342         r = dm_tm_pre_commit(cmd->tm);
343         if (r < 0)
344                 return r;
345
346         /*
347          * dm_sm_copy_root() can fail.  So we need to do it before we start
348          * updating the superblock.
349          */
350         r = __save_sm_root(cmd);
351         if (r)
352                 return r;
353
354         r = superblock_lock_zero(cmd, &sblock);
355         if (r)
356                 return r;
357
358         disk_super = dm_block_data(sblock);
359         disk_super->flags = 0;
360         memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
361         disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
362         disk_super->version = cpu_to_le32(cmd->version);
363         memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
364         memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version));
365         disk_super->policy_hint_size = 0;
366
367         __copy_sm_root(cmd, disk_super);
368
369         disk_super->mapping_root = cpu_to_le64(cmd->root);
370         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
371         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
372         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
373         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
374         disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE);
375         disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
376         disk_super->cache_blocks = cpu_to_le32(0);
377
378         disk_super->read_hits = cpu_to_le32(0);
379         disk_super->read_misses = cpu_to_le32(0);
380         disk_super->write_hits = cpu_to_le32(0);
381         disk_super->write_misses = cpu_to_le32(0);
382
383         if (separate_dirty_bits(cmd))
384                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
385
386         return dm_tm_commit(cmd->tm, sblock);
387 }
388
389 static int __format_metadata(struct dm_cache_metadata *cmd)
390 {
391         int r;
392
393         r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
394                                  &cmd->tm, &cmd->metadata_sm);
395         if (r < 0) {
396                 DMERR("tm_create_with_sm failed");
397                 return r;
398         }
399
400         __setup_mapping_info(cmd);
401
402         r = dm_array_empty(&cmd->info, &cmd->root);
403         if (r < 0)
404                 goto bad;
405
406         if (separate_dirty_bits(cmd)) {
407                 dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
408                 r = dm_bitset_empty(&cmd->dirty_info, &cmd->dirty_root);
409                 if (r < 0)
410                         goto bad;
411         }
412
413         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
414         r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
415         if (r < 0)
416                 goto bad;
417
418         cmd->discard_block_size = 0;
419         cmd->discard_nr_blocks = 0;
420
421         r = __write_initial_superblock(cmd);
422         if (r)
423                 goto bad;
424
425         cmd->clean_when_opened = true;
426         return 0;
427
428 bad:
429         dm_tm_destroy(cmd->tm);
430         dm_sm_destroy(cmd->metadata_sm);
431
432         return r;
433 }
434
435 static int __check_incompat_features(struct cache_disk_superblock *disk_super,
436                                      struct dm_cache_metadata *cmd)
437 {
438         uint32_t incompat_flags, features;
439
440         incompat_flags = le32_to_cpu(disk_super->incompat_flags);
441         features = incompat_flags & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
442         if (features) {
443                 DMERR("could not access metadata due to unsupported optional features (%lx).",
444                       (unsigned long)features);
445                 return -EINVAL;
446         }
447
448         /*
449          * Check for read-only metadata to skip the following RDWR checks.
450          */
451         if (get_disk_ro(cmd->bdev->bd_disk))
452                 return 0;
453
454         features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
455         if (features) {
456                 DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
457                       (unsigned long)features);
458                 return -EINVAL;
459         }
460
461         return 0;
462 }
463
464 static int __open_metadata(struct dm_cache_metadata *cmd)
465 {
466         int r;
467         struct dm_block *sblock;
468         struct cache_disk_superblock *disk_super;
469         unsigned long sb_flags;
470
471         r = superblock_read_lock(cmd, &sblock);
472         if (r < 0) {
473                 DMERR("couldn't read lock superblock");
474                 return r;
475         }
476
477         disk_super = dm_block_data(sblock);
478
479         /* Verify the data block size hasn't changed */
480         if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
481                 DMERR("changing the data block size (from %u to %llu) is not supported",
482                       le32_to_cpu(disk_super->data_block_size),
483                       (unsigned long long)cmd->data_block_size);
484                 r = -EINVAL;
485                 goto bad;
486         }
487
488         r = __check_incompat_features(disk_super, cmd);
489         if (r < 0)
490                 goto bad;
491
492         r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
493                                disk_super->metadata_space_map_root,
494                                sizeof(disk_super->metadata_space_map_root),
495                                &cmd->tm, &cmd->metadata_sm);
496         if (r < 0) {
497                 DMERR("tm_open_with_sm failed");
498                 goto bad;
499         }
500
501         __setup_mapping_info(cmd);
502         dm_disk_bitset_init(cmd->tm, &cmd->dirty_info);
503         dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
504         sb_flags = le32_to_cpu(disk_super->flags);
505         cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
506         dm_bm_unlock(sblock);
507
508         return 0;
509
510 bad:
511         dm_bm_unlock(sblock);
512         return r;
513 }
514
515 static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
516                                      bool format_device)
517 {
518         int r;
519         bool unformatted = false;
520
521         r = __superblock_all_zeroes(cmd->bm, &unformatted);
522         if (r)
523                 return r;
524
525         if (unformatted)
526                 return format_device ? __format_metadata(cmd) : -EPERM;
527
528         return __open_metadata(cmd);
529 }
530
531 static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
532                                             bool may_format_device)
533 {
534         int r;
535         cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
536                                           CACHE_MAX_CONCURRENT_LOCKS);
537         if (IS_ERR(cmd->bm)) {
538                 DMERR("could not create block manager");
539                 return PTR_ERR(cmd->bm);
540         }
541
542         r = __open_or_format_metadata(cmd, may_format_device);
543         if (r)
544                 dm_block_manager_destroy(cmd->bm);
545
546         return r;
547 }
548
549 static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
550 {
551         dm_sm_destroy(cmd->metadata_sm);
552         dm_tm_destroy(cmd->tm);
553         dm_block_manager_destroy(cmd->bm);
554 }
555
556 typedef unsigned long (*flags_mutator)(unsigned long);
557
558 static void update_flags(struct cache_disk_superblock *disk_super,
559                          flags_mutator mutator)
560 {
561         uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
562         disk_super->flags = cpu_to_le32(sb_flags);
563 }
564
565 static unsigned long set_clean_shutdown(unsigned long flags)
566 {
567         set_bit(CLEAN_SHUTDOWN, &flags);
568         return flags;
569 }
570
571 static unsigned long clear_clean_shutdown(unsigned long flags)
572 {
573         clear_bit(CLEAN_SHUTDOWN, &flags);
574         return flags;
575 }
576
577 static void read_superblock_fields(struct dm_cache_metadata *cmd,
578                                    struct cache_disk_superblock *disk_super)
579 {
580         cmd->version = le32_to_cpu(disk_super->version);
581         cmd->flags = le32_to_cpu(disk_super->flags);
582         cmd->root = le64_to_cpu(disk_super->mapping_root);
583         cmd->hint_root = le64_to_cpu(disk_super->hint_root);
584         cmd->discard_root = le64_to_cpu(disk_super->discard_root);
585         cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
586         cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
587         cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
588         cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
589         strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
590         cmd->policy_version[0] = le32_to_cpu(disk_super->policy_version[0]);
591         cmd->policy_version[1] = le32_to_cpu(disk_super->policy_version[1]);
592         cmd->policy_version[2] = le32_to_cpu(disk_super->policy_version[2]);
593         cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
594
595         cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
596         cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
597         cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
598         cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
599
600         if (separate_dirty_bits(cmd))
601                 cmd->dirty_root = le64_to_cpu(disk_super->dirty_root);
602
603         cmd->changed = false;
604 }
605
606 /*
607  * The mutator updates the superblock flags.
608  */
609 static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
610                                      flags_mutator mutator)
611 {
612         int r;
613         struct cache_disk_superblock *disk_super;
614         struct dm_block *sblock;
615
616         r = superblock_lock(cmd, &sblock);
617         if (r)
618                 return r;
619
620         disk_super = dm_block_data(sblock);
621         update_flags(disk_super, mutator);
622         read_superblock_fields(cmd, disk_super);
623         dm_bm_unlock(sblock);
624
625         return dm_bm_flush(cmd->bm);
626 }
627
628 static int __begin_transaction(struct dm_cache_metadata *cmd)
629 {
630         int r;
631         struct cache_disk_superblock *disk_super;
632         struct dm_block *sblock;
633
634         /*
635          * We re-read the superblock every time.  Shouldn't need to do this
636          * really.
637          */
638         r = superblock_read_lock(cmd, &sblock);
639         if (r)
640                 return r;
641
642         disk_super = dm_block_data(sblock);
643         read_superblock_fields(cmd, disk_super);
644         dm_bm_unlock(sblock);
645
646         return 0;
647 }
648
649 static int __commit_transaction(struct dm_cache_metadata *cmd,
650                                 flags_mutator mutator)
651 {
652         int r;
653         struct cache_disk_superblock *disk_super;
654         struct dm_block *sblock;
655
656         /*
657          * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
658          */
659         BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
660
661         if (separate_dirty_bits(cmd)) {
662                 r = dm_bitset_flush(&cmd->dirty_info, cmd->dirty_root,
663                                     &cmd->dirty_root);
664                 if (r)
665                         return r;
666         }
667
668         r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
669                             &cmd->discard_root);
670         if (r)
671                 return r;
672
673         r = dm_tm_pre_commit(cmd->tm);
674         if (r < 0)
675                 return r;
676
677         r = __save_sm_root(cmd);
678         if (r)
679                 return r;
680
681         r = superblock_lock(cmd, &sblock);
682         if (r)
683                 return r;
684
685         disk_super = dm_block_data(sblock);
686
687         disk_super->flags = cpu_to_le32(cmd->flags);
688         if (mutator)
689                 update_flags(disk_super, mutator);
690
691         disk_super->mapping_root = cpu_to_le64(cmd->root);
692         if (separate_dirty_bits(cmd))
693                 disk_super->dirty_root = cpu_to_le64(cmd->dirty_root);
694         disk_super->hint_root = cpu_to_le64(cmd->hint_root);
695         disk_super->discard_root = cpu_to_le64(cmd->discard_root);
696         disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
697         disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
698         disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
699         strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
700         disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]);
701         disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]);
702         disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]);
703
704         disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
705         disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
706         disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
707         disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
708         __copy_sm_root(cmd, disk_super);
709
710         return dm_tm_commit(cmd->tm, sblock);
711 }
712
713 /*----------------------------------------------------------------*/
714
715 /*
716  * The mappings are held in a dm-array that has 64-bit values stored in
717  * little-endian format.  The index is the cblock, the high 48bits of the
718  * value are the oblock and the low 16 bit the flags.
719  */
720 #define FLAGS_MASK ((1 << 16) - 1)
721
722 static __le64 pack_value(dm_oblock_t block, unsigned flags)
723 {
724         uint64_t value = from_oblock(block);
725         value <<= 16;
726         value = value | (flags & FLAGS_MASK);
727         return cpu_to_le64(value);
728 }
729
730 static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
731 {
732         uint64_t value = le64_to_cpu(value_le);
733         uint64_t b = value >> 16;
734         *block = to_oblock(b);
735         *flags = value & FLAGS_MASK;
736 }
737
738 /*----------------------------------------------------------------*/
739
740 static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
741                                                sector_t data_block_size,
742                                                bool may_format_device,
743                                                size_t policy_hint_size,
744                                                unsigned metadata_version)
745 {
746         int r;
747         struct dm_cache_metadata *cmd;
748
749         cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
750         if (!cmd) {
751                 DMERR("could not allocate metadata struct");
752                 return ERR_PTR(-ENOMEM);
753         }
754
755         cmd->version = metadata_version;
756         atomic_set(&cmd->ref_count, 1);
757         init_rwsem(&cmd->root_lock);
758         cmd->bdev = bdev;
759         cmd->data_block_size = data_block_size;
760         cmd->cache_blocks = 0;
761         cmd->policy_hint_size = policy_hint_size;
762         cmd->changed = true;
763         cmd->fail_io = false;
764
765         r = __create_persistent_data_objects(cmd, may_format_device);
766         if (r) {
767                 kfree(cmd);
768                 return ERR_PTR(r);
769         }
770
771         r = __begin_transaction_flags(cmd, clear_clean_shutdown);
772         if (r < 0) {
773                 dm_cache_metadata_close(cmd);
774                 return ERR_PTR(r);
775         }
776
777         return cmd;
778 }
779
780 /*
781  * We keep a little list of ref counted metadata objects to prevent two
782  * different target instances creating separate bufio instances.  This is
783  * an issue if a table is reloaded before the suspend.
784  */
785 static DEFINE_MUTEX(table_lock);
786 static LIST_HEAD(table);
787
788 static struct dm_cache_metadata *lookup(struct block_device *bdev)
789 {
790         struct dm_cache_metadata *cmd;
791
792         list_for_each_entry(cmd, &table, list)
793                 if (cmd->bdev == bdev) {
794                         atomic_inc(&cmd->ref_count);
795                         return cmd;
796                 }
797
798         return NULL;
799 }
800
801 static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
802                                                 sector_t data_block_size,
803                                                 bool may_format_device,
804                                                 size_t policy_hint_size,
805                                                 unsigned metadata_version)
806 {
807         struct dm_cache_metadata *cmd, *cmd2;
808
809         mutex_lock(&table_lock);
810         cmd = lookup(bdev);
811         mutex_unlock(&table_lock);
812
813         if (cmd)
814                 return cmd;
815
816         cmd = metadata_open(bdev, data_block_size, may_format_device,
817                             policy_hint_size, metadata_version);
818         if (!IS_ERR(cmd)) {
819                 mutex_lock(&table_lock);
820                 cmd2 = lookup(bdev);
821                 if (cmd2) {
822                         mutex_unlock(&table_lock);
823                         __destroy_persistent_data_objects(cmd);
824                         kfree(cmd);
825                         return cmd2;
826                 }
827                 list_add(&cmd->list, &table);
828                 mutex_unlock(&table_lock);
829         }
830
831         return cmd;
832 }
833
834 static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
835 {
836         if (cmd->data_block_size != data_block_size) {
837                 DMERR("data_block_size (%llu) different from that in metadata (%llu)",
838                       (unsigned long long) data_block_size,
839                       (unsigned long long) cmd->data_block_size);
840                 return false;
841         }
842
843         return true;
844 }
845
846 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
847                                                  sector_t data_block_size,
848                                                  bool may_format_device,
849                                                  size_t policy_hint_size,
850                                                  unsigned metadata_version)
851 {
852         struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size, may_format_device,
853                                                        policy_hint_size, metadata_version);
854
855         if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
856                 dm_cache_metadata_close(cmd);
857                 return ERR_PTR(-EINVAL);
858         }
859
860         return cmd;
861 }
862
863 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
864 {
865         if (atomic_dec_and_test(&cmd->ref_count)) {
866                 mutex_lock(&table_lock);
867                 list_del(&cmd->list);
868                 mutex_unlock(&table_lock);
869
870                 if (!cmd->fail_io)
871                         __destroy_persistent_data_objects(cmd);
872                 kfree(cmd);
873         }
874 }
875
876 /*
877  * Checks that the given cache block is either unmapped or clean.
878  */
879 static int block_clean_combined_dirty(struct dm_cache_metadata *cmd, dm_cblock_t b,
880                                       bool *result)
881 {
882         int r;
883         __le64 value;
884         dm_oblock_t ob;
885         unsigned flags;
886
887         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(b), &value);
888         if (r)
889                 return r;
890
891         unpack_value(value, &ob, &flags);
892         *result = !((flags & M_VALID) && (flags & M_DIRTY));
893
894         return 0;
895 }
896
897 static int blocks_are_clean_combined_dirty(struct dm_cache_metadata *cmd,
898                                            dm_cblock_t begin, dm_cblock_t end,
899                                            bool *result)
900 {
901         int r;
902         *result = true;
903
904         while (begin != end) {
905                 r = block_clean_combined_dirty(cmd, begin, result);
906                 if (r) {
907                         DMERR("block_clean_combined_dirty failed");
908                         return r;
909                 }
910
911                 if (!*result) {
912                         DMERR("cache block %llu is dirty",
913                               (unsigned long long) from_cblock(begin));
914                         return 0;
915                 }
916
917                 begin = to_cblock(from_cblock(begin) + 1);
918         }
919
920         return 0;
921 }
922
923 static int blocks_are_clean_separate_dirty(struct dm_cache_metadata *cmd,
924                                            dm_cblock_t begin, dm_cblock_t end,
925                                            bool *result)
926 {
927         int r;
928         bool dirty_flag;
929         *result = true;
930
931         r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
932                                    from_cblock(cmd->cache_blocks), &cmd->dirty_cursor);
933         if (r) {
934                 DMERR("%s: dm_bitset_cursor_begin for dirty failed", __func__);
935                 return r;
936         }
937
938         r = dm_bitset_cursor_skip(&cmd->dirty_cursor, from_cblock(begin));
939         if (r) {
940                 DMERR("%s: dm_bitset_cursor_skip for dirty failed", __func__);
941                 dm_bitset_cursor_end(&cmd->dirty_cursor);
942                 return r;
943         }
944
945         while (begin != end) {
946                 /*
947                  * We assume that unmapped blocks have their dirty bit
948                  * cleared.
949                  */
950                 dirty_flag = dm_bitset_cursor_get_value(&cmd->dirty_cursor);
951                 if (dirty_flag) {
952                         DMERR("%s: cache block %llu is dirty", __func__,
953                               (unsigned long long) from_cblock(begin));
954                         dm_bitset_cursor_end(&cmd->dirty_cursor);
955                         *result = false;
956                         return 0;
957                 }
958
959                 begin = to_cblock(from_cblock(begin) + 1);
960                 if (begin == end)
961                         break;
962
963                 r = dm_bitset_cursor_next(&cmd->dirty_cursor);
964                 if (r) {
965                         DMERR("%s: dm_bitset_cursor_next for dirty failed", __func__);
966                         dm_bitset_cursor_end(&cmd->dirty_cursor);
967                         return r;
968                 }
969         }
970
971         dm_bitset_cursor_end(&cmd->dirty_cursor);
972
973         return 0;
974 }
975
976 static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
977                                         dm_cblock_t begin, dm_cblock_t end,
978                                         bool *result)
979 {
980         if (separate_dirty_bits(cmd))
981                 return blocks_are_clean_separate_dirty(cmd, begin, end, result);
982         else
983                 return blocks_are_clean_combined_dirty(cmd, begin, end, result);
984 }
985
986 static bool cmd_write_lock(struct dm_cache_metadata *cmd)
987 {
988         down_write(&cmd->root_lock);
989         if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
990                 up_write(&cmd->root_lock);
991                 return false;
992         }
993         return true;
994 }
995
996 #define WRITE_LOCK(cmd)                         \
997         do {                                    \
998                 if (!cmd_write_lock((cmd)))     \
999                         return -EINVAL;         \
1000         } while(0)
1001
1002 #define WRITE_LOCK_VOID(cmd)                    \
1003         do {                                    \
1004                 if (!cmd_write_lock((cmd)))     \
1005                         return;                 \
1006         } while(0)
1007
1008 #define WRITE_UNLOCK(cmd) \
1009         up_write(&(cmd)->root_lock)
1010
1011 static bool cmd_read_lock(struct dm_cache_metadata *cmd)
1012 {
1013         down_read(&cmd->root_lock);
1014         if (cmd->fail_io) {
1015                 up_read(&cmd->root_lock);
1016                 return false;
1017         }
1018         return true;
1019 }
1020
1021 #define READ_LOCK(cmd)                          \
1022         do {                                    \
1023                 if (!cmd_read_lock((cmd)))      \
1024                         return -EINVAL;         \
1025         } while(0)
1026
1027 #define READ_LOCK_VOID(cmd)                     \
1028         do {                                    \
1029                 if (!cmd_read_lock((cmd)))      \
1030                         return;                 \
1031         } while(0)
1032
1033 #define READ_UNLOCK(cmd) \
1034         up_read(&(cmd)->root_lock)
1035
1036 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
1037 {
1038         int r;
1039         bool clean;
1040         __le64 null_mapping = pack_value(0, 0);
1041
1042         WRITE_LOCK(cmd);
1043         __dm_bless_for_disk(&null_mapping);
1044
1045         if (from_cblock(new_cache_size) < from_cblock(cmd->cache_blocks)) {
1046                 r = blocks_are_unmapped_or_clean(cmd, new_cache_size, cmd->cache_blocks, &clean);
1047                 if (r) {
1048                         __dm_unbless_for_disk(&null_mapping);
1049                         goto out;
1050                 }
1051
1052                 if (!clean) {
1053                         DMERR("unable to shrink cache due to dirty blocks");
1054                         r = -EINVAL;
1055                         __dm_unbless_for_disk(&null_mapping);
1056                         goto out;
1057                 }
1058         }
1059
1060         r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
1061                             from_cblock(new_cache_size),
1062                             &null_mapping, &cmd->root);
1063         if (r)
1064                 goto out;
1065
1066         if (separate_dirty_bits(cmd)) {
1067                 r = dm_bitset_resize(&cmd->dirty_info, cmd->dirty_root,
1068                                      from_cblock(cmd->cache_blocks), from_cblock(new_cache_size),
1069                                      false, &cmd->dirty_root);
1070                 if (r)
1071                         goto out;
1072         }
1073
1074         cmd->cache_blocks = new_cache_size;
1075         cmd->changed = true;
1076
1077 out:
1078         WRITE_UNLOCK(cmd);
1079
1080         return r;
1081 }
1082
1083 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
1084                                    sector_t discard_block_size,
1085                                    dm_dblock_t new_nr_entries)
1086 {
1087         int r;
1088
1089         WRITE_LOCK(cmd);
1090         r = dm_bitset_resize(&cmd->discard_info,
1091                              cmd->discard_root,
1092                              from_dblock(cmd->discard_nr_blocks),
1093                              from_dblock(new_nr_entries),
1094                              false, &cmd->discard_root);
1095         if (!r) {
1096                 cmd->discard_block_size = discard_block_size;
1097                 cmd->discard_nr_blocks = new_nr_entries;
1098         }
1099
1100         cmd->changed = true;
1101         WRITE_UNLOCK(cmd);
1102
1103         return r;
1104 }
1105
1106 static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1107 {
1108         return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
1109                                  from_dblock(b), &cmd->discard_root);
1110 }
1111
1112 static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
1113 {
1114         return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
1115                                    from_dblock(b), &cmd->discard_root);
1116 }
1117
1118 static int __discard(struct dm_cache_metadata *cmd,
1119                      dm_dblock_t dblock, bool discard)
1120 {
1121         int r;
1122
1123         r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
1124         if (r)
1125                 return r;
1126
1127         cmd->changed = true;
1128         return 0;
1129 }
1130
1131 int dm_cache_set_discard(struct dm_cache_metadata *cmd,
1132                          dm_dblock_t dblock, bool discard)
1133 {
1134         int r;
1135
1136         WRITE_LOCK(cmd);
1137         r = __discard(cmd, dblock, discard);
1138         WRITE_UNLOCK(cmd);
1139
1140         return r;
1141 }
1142
1143 static int __load_discards(struct dm_cache_metadata *cmd,
1144                            load_discard_fn fn, void *context)
1145 {
1146         int r = 0;
1147         uint32_t b;
1148         struct dm_bitset_cursor c;
1149
1150         if (from_dblock(cmd->discard_nr_blocks) == 0)
1151                 /* nothing to do */
1152                 return 0;
1153
1154         if (cmd->clean_when_opened) {
1155                 r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root, &cmd->discard_root);
1156                 if (r)
1157                         return r;
1158
1159                 r = dm_bitset_cursor_begin(&cmd->discard_info, cmd->discard_root,
1160                                            from_dblock(cmd->discard_nr_blocks), &c);
1161                 if (r)
1162                         return r;
1163
1164                 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1165                         r = fn(context, cmd->discard_block_size, to_dblock(b),
1166                                dm_bitset_cursor_get_value(&c));
1167                         if (r)
1168                                 break;
1169                 }
1170
1171                 dm_bitset_cursor_end(&c);
1172
1173         } else {
1174                 for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
1175                         r = fn(context, cmd->discard_block_size, to_dblock(b), false);
1176                         if (r)
1177                                 return r;
1178                 }
1179         }
1180
1181         return r;
1182 }
1183
1184 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
1185                            load_discard_fn fn, void *context)
1186 {
1187         int r;
1188
1189         READ_LOCK(cmd);
1190         r = __load_discards(cmd, fn, context);
1191         READ_UNLOCK(cmd);
1192
1193         return r;
1194 }
1195
1196 int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result)
1197 {
1198         READ_LOCK(cmd);
1199         *result = cmd->cache_blocks;
1200         READ_UNLOCK(cmd);
1201
1202         return 0;
1203 }
1204
1205 static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1206 {
1207         int r;
1208         __le64 value = pack_value(0, 0);
1209
1210         __dm_bless_for_disk(&value);
1211         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1212                                &value, &cmd->root);
1213         if (r)
1214                 return r;
1215
1216         cmd->changed = true;
1217         return 0;
1218 }
1219
1220 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
1221 {
1222         int r;
1223
1224         WRITE_LOCK(cmd);
1225         r = __remove(cmd, cblock);
1226         WRITE_UNLOCK(cmd);
1227
1228         return r;
1229 }
1230
1231 static int __insert(struct dm_cache_metadata *cmd,
1232                     dm_cblock_t cblock, dm_oblock_t oblock)
1233 {
1234         int r;
1235         __le64 value = pack_value(oblock, M_VALID);
1236         __dm_bless_for_disk(&value);
1237
1238         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1239                                &value, &cmd->root);
1240         if (r)
1241                 return r;
1242
1243         cmd->changed = true;
1244         return 0;
1245 }
1246
1247 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
1248                             dm_cblock_t cblock, dm_oblock_t oblock)
1249 {
1250         int r;
1251
1252         WRITE_LOCK(cmd);
1253         r = __insert(cmd, cblock, oblock);
1254         WRITE_UNLOCK(cmd);
1255
1256         return r;
1257 }
1258
1259 struct thunk {
1260         load_mapping_fn fn;
1261         void *context;
1262
1263         struct dm_cache_metadata *cmd;
1264         bool respect_dirty_flags;
1265         bool hints_valid;
1266 };
1267
1268 static bool policy_unchanged(struct dm_cache_metadata *cmd,
1269                              struct dm_cache_policy *policy)
1270 {
1271         const char *policy_name = dm_cache_policy_get_name(policy);
1272         const unsigned *policy_version = dm_cache_policy_get_version(policy);
1273         size_t policy_hint_size = dm_cache_policy_get_hint_size(policy);
1274
1275         /*
1276          * Ensure policy names match.
1277          */
1278         if (strncmp(cmd->policy_name, policy_name, sizeof(cmd->policy_name)))
1279                 return false;
1280
1281         /*
1282          * Ensure policy major versions match.
1283          */
1284         if (cmd->policy_version[0] != policy_version[0])
1285                 return false;
1286
1287         /*
1288          * Ensure policy hint sizes match.
1289          */
1290         if (cmd->policy_hint_size != policy_hint_size)
1291                 return false;
1292
1293         return true;
1294 }
1295
1296 static bool hints_array_initialized(struct dm_cache_metadata *cmd)
1297 {
1298         return cmd->hint_root && cmd->policy_hint_size;
1299 }
1300
1301 static bool hints_array_available(struct dm_cache_metadata *cmd,
1302                                   struct dm_cache_policy *policy)
1303 {
1304         return cmd->clean_when_opened && policy_unchanged(cmd, policy) &&
1305                 hints_array_initialized(cmd);
1306 }
1307
1308 static int __load_mapping_v1(struct dm_cache_metadata *cmd,
1309                              uint64_t cb, bool hints_valid,
1310                              struct dm_array_cursor *mapping_cursor,
1311                              struct dm_array_cursor *hint_cursor,
1312                              load_mapping_fn fn, void *context)
1313 {
1314         int r = 0;
1315
1316         __le64 mapping;
1317         __le32 hint = 0;
1318
1319         __le64 *mapping_value_le;
1320         __le32 *hint_value_le;
1321
1322         dm_oblock_t oblock;
1323         unsigned flags;
1324
1325         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1326         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1327         unpack_value(mapping, &oblock, &flags);
1328
1329         if (flags & M_VALID) {
1330                 if (hints_valid) {
1331                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1332                         memcpy(&hint, hint_value_le, sizeof(hint));
1333                 }
1334
1335                 r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY,
1336                        le32_to_cpu(hint), hints_valid);
1337                 if (r) {
1338                         DMERR("policy couldn't load cache block %llu",
1339                               (unsigned long long) from_cblock(to_cblock(cb)));
1340                 }
1341         }
1342
1343         return r;
1344 }
1345
1346 static int __load_mapping_v2(struct dm_cache_metadata *cmd,
1347                              uint64_t cb, bool hints_valid,
1348                              struct dm_array_cursor *mapping_cursor,
1349                              struct dm_array_cursor *hint_cursor,
1350                              struct dm_bitset_cursor *dirty_cursor,
1351                              load_mapping_fn fn, void *context)
1352 {
1353         int r = 0;
1354
1355         __le64 mapping;
1356         __le32 hint = 0;
1357
1358         __le64 *mapping_value_le;
1359         __le32 *hint_value_le;
1360
1361         dm_oblock_t oblock;
1362         unsigned flags;
1363         bool dirty;
1364
1365         dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le);
1366         memcpy(&mapping, mapping_value_le, sizeof(mapping));
1367         unpack_value(mapping, &oblock, &flags);
1368
1369         if (flags & M_VALID) {
1370                 if (hints_valid) {
1371                         dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le);
1372                         memcpy(&hint, hint_value_le, sizeof(hint));
1373                 }
1374
1375                 dirty = dm_bitset_cursor_get_value(dirty_cursor);
1376                 r = fn(context, oblock, to_cblock(cb), dirty,
1377                        le32_to_cpu(hint), hints_valid);
1378                 if (r) {
1379                         DMERR("policy couldn't load cache block %llu",
1380                               (unsigned long long) from_cblock(to_cblock(cb)));
1381                 }
1382         }
1383
1384         return r;
1385 }
1386
1387 static int __load_mappings(struct dm_cache_metadata *cmd,
1388                            struct dm_cache_policy *policy,
1389                            load_mapping_fn fn, void *context)
1390 {
1391         int r;
1392         uint64_t cb;
1393
1394         bool hints_valid = hints_array_available(cmd, policy);
1395
1396         if (from_cblock(cmd->cache_blocks) == 0)
1397                 /* Nothing to do */
1398                 return 0;
1399
1400         r = dm_array_cursor_begin(&cmd->info, cmd->root, &cmd->mapping_cursor);
1401         if (r)
1402                 return r;
1403
1404         if (hints_valid) {
1405                 r = dm_array_cursor_begin(&cmd->hint_info, cmd->hint_root, &cmd->hint_cursor);
1406                 if (r) {
1407                         dm_array_cursor_end(&cmd->mapping_cursor);
1408                         return r;
1409                 }
1410         }
1411
1412         if (separate_dirty_bits(cmd)) {
1413                 r = dm_bitset_cursor_begin(&cmd->dirty_info, cmd->dirty_root,
1414                                            from_cblock(cmd->cache_blocks),
1415                                            &cmd->dirty_cursor);
1416                 if (r) {
1417                         dm_array_cursor_end(&cmd->hint_cursor);
1418                         dm_array_cursor_end(&cmd->mapping_cursor);
1419                         return r;
1420                 }
1421         }
1422
1423         for (cb = 0; ; cb++) {
1424                 if (separate_dirty_bits(cmd))
1425                         r = __load_mapping_v2(cmd, cb, hints_valid,
1426                                               &cmd->mapping_cursor,
1427                                               &cmd->hint_cursor,
1428                                               &cmd->dirty_cursor,
1429                                               fn, context);
1430                 else
1431                         r = __load_mapping_v1(cmd, cb, hints_valid,
1432                                               &cmd->mapping_cursor, &cmd->hint_cursor,
1433                                               fn, context);
1434                 if (r)
1435                         goto out;
1436
1437                 /*
1438                  * We need to break out before we move the cursors.
1439                  */
1440                 if (cb >= (from_cblock(cmd->cache_blocks) - 1))
1441                         break;
1442
1443                 r = dm_array_cursor_next(&cmd->mapping_cursor);
1444                 if (r) {
1445                         DMERR("dm_array_cursor_next for mapping failed");
1446                         goto out;
1447                 }
1448
1449                 if (hints_valid) {
1450                         r = dm_array_cursor_next(&cmd->hint_cursor);
1451                         if (r) {
1452                                 DMERR("dm_array_cursor_next for hint failed");
1453                                 goto out;
1454                         }
1455                 }
1456
1457                 if (separate_dirty_bits(cmd)) {
1458                         r = dm_bitset_cursor_next(&cmd->dirty_cursor);
1459                         if (r) {
1460                                 DMERR("dm_bitset_cursor_next for dirty failed");
1461                                 goto out;
1462                         }
1463                 }
1464         }
1465 out:
1466         dm_array_cursor_end(&cmd->mapping_cursor);
1467         if (hints_valid)
1468                 dm_array_cursor_end(&cmd->hint_cursor);
1469
1470         if (separate_dirty_bits(cmd))
1471                 dm_bitset_cursor_end(&cmd->dirty_cursor);
1472
1473         return r;
1474 }
1475
1476 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
1477                            struct dm_cache_policy *policy,
1478                            load_mapping_fn fn, void *context)
1479 {
1480         int r;
1481
1482         READ_LOCK(cmd);
1483         r = __load_mappings(cmd, policy, fn, context);
1484         READ_UNLOCK(cmd);
1485
1486         return r;
1487 }
1488
1489 static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
1490 {
1491         int r = 0;
1492         __le64 value;
1493         dm_oblock_t oblock;
1494         unsigned flags;
1495
1496         memcpy(&value, leaf, sizeof(value));
1497         unpack_value(value, &oblock, &flags);
1498
1499         return r;
1500 }
1501
1502 static int __dump_mappings(struct dm_cache_metadata *cmd)
1503 {
1504         return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
1505 }
1506
1507 void dm_cache_dump(struct dm_cache_metadata *cmd)
1508 {
1509         READ_LOCK_VOID(cmd);
1510         __dump_mappings(cmd);
1511         READ_UNLOCK(cmd);
1512 }
1513
1514 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
1515 {
1516         int r;
1517
1518         READ_LOCK(cmd);
1519         r = cmd->changed;
1520         READ_UNLOCK(cmd);
1521
1522         return r;
1523 }
1524
1525 static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
1526 {
1527         int r;
1528         unsigned flags;
1529         dm_oblock_t oblock;
1530         __le64 value;
1531
1532         r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
1533         if (r)
1534                 return r;
1535
1536         unpack_value(value, &oblock, &flags);
1537
1538         if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
1539                 /* nothing to be done */
1540                 return 0;
1541
1542         value = pack_value(oblock, (flags & ~M_DIRTY) | (dirty ? M_DIRTY : 0));
1543         __dm_bless_for_disk(&value);
1544
1545         r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
1546                                &value, &cmd->root);
1547         if (r)
1548                 return r;
1549
1550         cmd->changed = true;
1551         return 0;
1552
1553 }
1554
1555 static int __set_dirty_bits_v1(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
1556 {
1557         int r;
1558         unsigned i;
1559         for (i = 0; i < nr_bits; i++) {
1560                 r = __dirty(cmd, to_cblock(i), test_bit(i, bits));
1561                 if (r)
1562                         return r;
1563         }
1564
1565         return 0;
1566 }
1567
1568 static int is_dirty_callback(uint32_t index, bool *value, void *context)
1569 {
1570         unsigned long *bits = context;
1571         *value = test_bit(index, bits);
1572         return 0;
1573 }
1574
1575 static int __set_dirty_bits_v2(struct dm_cache_metadata *cmd, unsigned nr_bits, unsigned long *bits)
1576 {
1577         int r = 0;
1578
1579         /* nr_bits is really just a sanity check */
1580         if (nr_bits != from_cblock(cmd->cache_blocks)) {
1581                 DMERR("dirty bitset is wrong size");
1582                 return -EINVAL;
1583         }
1584
1585         r = dm_bitset_del(&cmd->dirty_info, cmd->dirty_root);
1586         if (r)
1587                 return r;
1588
1589         cmd->changed = true;
1590         return dm_bitset_new(&cmd->dirty_info, &cmd->dirty_root, nr_bits, is_dirty_callback, bits);
1591 }
1592
1593 int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
1594                             unsigned nr_bits,
1595                             unsigned long *bits)
1596 {
1597         int r;
1598
1599         WRITE_LOCK(cmd);
1600         if (separate_dirty_bits(cmd))
1601                 r = __set_dirty_bits_v2(cmd, nr_bits, bits);
1602         else
1603                 r = __set_dirty_bits_v1(cmd, nr_bits, bits);
1604         WRITE_UNLOCK(cmd);
1605
1606         return r;
1607 }
1608
1609 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
1610                                  struct dm_cache_statistics *stats)
1611 {
1612         READ_LOCK_VOID(cmd);
1613         *stats = cmd->stats;
1614         READ_UNLOCK(cmd);
1615 }
1616
1617 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
1618                                  struct dm_cache_statistics *stats)
1619 {
1620         WRITE_LOCK_VOID(cmd);
1621         cmd->stats = *stats;
1622         WRITE_UNLOCK(cmd);
1623 }
1624
1625 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
1626 {
1627         int r = -EINVAL;
1628         flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
1629                                  clear_clean_shutdown);
1630
1631         WRITE_LOCK(cmd);
1632         if (cmd->fail_io)
1633                 goto out;
1634
1635         r = __commit_transaction(cmd, mutator);
1636         if (r)
1637                 goto out;
1638
1639         r = __begin_transaction(cmd);
1640 out:
1641         WRITE_UNLOCK(cmd);
1642         return r;
1643 }
1644
1645 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
1646                                            dm_block_t *result)
1647 {
1648         int r = -EINVAL;
1649
1650         READ_LOCK(cmd);
1651         if (!cmd->fail_io)
1652                 r = dm_sm_get_nr_free(cmd->metadata_sm, result);
1653         READ_UNLOCK(cmd);
1654
1655         return r;
1656 }
1657
1658 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
1659                                    dm_block_t *result)
1660 {
1661         int r = -EINVAL;
1662
1663         READ_LOCK(cmd);
1664         if (!cmd->fail_io)
1665                 r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
1666         READ_UNLOCK(cmd);
1667
1668         return r;
1669 }
1670
1671 /*----------------------------------------------------------------*/
1672
1673 static int get_hint(uint32_t index, void *value_le, void *context)
1674 {
1675         uint32_t value;
1676         struct dm_cache_policy *policy = context;
1677
1678         value = policy_get_hint(policy, to_cblock(index));
1679         *((__le32 *) value_le) = cpu_to_le32(value);
1680
1681         return 0;
1682 }
1683
1684 /*
1685  * It's quicker to always delete the hint array, and recreate with
1686  * dm_array_new().
1687  */
1688 static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1689 {
1690         int r;
1691         size_t hint_size;
1692         const char *policy_name = dm_cache_policy_get_name(policy);
1693         const unsigned *policy_version = dm_cache_policy_get_version(policy);
1694
1695         if (!policy_name[0] ||
1696             (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
1697                 return -EINVAL;
1698
1699         strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
1700         memcpy(cmd->policy_version, policy_version, sizeof(cmd->policy_version));
1701
1702         hint_size = dm_cache_policy_get_hint_size(policy);
1703         if (!hint_size)
1704                 return 0; /* short-circuit hints initialization */
1705         cmd->policy_hint_size = hint_size;
1706
1707         if (cmd->hint_root) {
1708                 r = dm_array_del(&cmd->hint_info, cmd->hint_root);
1709                 if (r)
1710                         return r;
1711         }
1712
1713         return dm_array_new(&cmd->hint_info, &cmd->hint_root,
1714                             from_cblock(cmd->cache_blocks),
1715                             get_hint, policy);
1716 }
1717
1718 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
1719 {
1720         int r;
1721
1722         WRITE_LOCK(cmd);
1723         r = write_hints(cmd, policy);
1724         WRITE_UNLOCK(cmd);
1725
1726         return r;
1727 }
1728
1729 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result)
1730 {
1731         int r;
1732
1733         READ_LOCK(cmd);
1734         r = blocks_are_unmapped_or_clean(cmd, 0, cmd->cache_blocks, result);
1735         READ_UNLOCK(cmd);
1736
1737         return r;
1738 }
1739
1740 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd)
1741 {
1742         WRITE_LOCK_VOID(cmd);
1743         dm_bm_set_read_only(cmd->bm);
1744         WRITE_UNLOCK(cmd);
1745 }
1746
1747 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd)
1748 {
1749         WRITE_LOCK_VOID(cmd);
1750         dm_bm_set_read_write(cmd->bm);
1751         WRITE_UNLOCK(cmd);
1752 }
1753
1754 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd)
1755 {
1756         int r;
1757         struct dm_block *sblock;
1758         struct cache_disk_superblock *disk_super;
1759
1760         WRITE_LOCK(cmd);
1761         set_bit(NEEDS_CHECK, &cmd->flags);
1762
1763         r = superblock_lock(cmd, &sblock);
1764         if (r) {
1765                 DMERR("couldn't read superblock");
1766                 goto out;
1767         }
1768
1769         disk_super = dm_block_data(sblock);
1770         disk_super->flags = cpu_to_le32(cmd->flags);
1771
1772         dm_bm_unlock(sblock);
1773
1774 out:
1775         WRITE_UNLOCK(cmd);
1776         return r;
1777 }
1778
1779 int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result)
1780 {
1781         READ_LOCK(cmd);
1782         *result = !!test_bit(NEEDS_CHECK, &cmd->flags);
1783         READ_UNLOCK(cmd);
1784
1785         return 0;
1786 }
1787
1788 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
1789 {
1790         int r;
1791
1792         WRITE_LOCK(cmd);
1793         __destroy_persistent_data_objects(cmd);
1794         r = __create_persistent_data_objects(cmd, false);
1795         if (r)
1796                 cmd->fail_io = true;
1797         WRITE_UNLOCK(cmd);
1798
1799         return r;
1800 }