dm zoned: select reclaim zone based on device index
[linux-block.git] / drivers / md / dm-zoned-metadata.c
CommitLineData
bae9a0aa 1// SPDX-License-Identifier: GPL-2.0-only
3b1a94c8
DLM
2/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-zoned.h"
9
10#include <linux/module.h>
11#include <linux/crc32.h>
bd976e52 12#include <linux/sched/mm.h>
3b1a94c8
DLM
13
14#define DM_MSG_PREFIX "zoned metadata"
15
16/*
17 * Metadata version.
18 */
bd5c4031 19#define DMZ_META_VER 2
3b1a94c8
DLM
20
21/*
22 * On-disk super block magic.
23 */
24#define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
25 (((unsigned int)('Z')) << 16) | \
26 (((unsigned int)('B')) << 8) | \
27 ((unsigned int)('D')))
28
29/*
30 * On disk super block.
31 * This uses only 512 B but uses on disk a full 4KB block. This block is
32 * followed on disk by the mapping table of chunks to zones and the bitmap
33 * blocks indicating zone block validity.
34 * The overall resulting metadata format is:
35 * (1) Super block (1 block)
36 * (2) Chunk mapping table (nr_map_blocks)
37 * (3) Bitmap blocks (nr_bitmap_blocks)
ad1bd578 38 * All metadata blocks are stored in conventional zones, starting from
3b1a94c8
DLM
39 * the first conventional zone found on disk.
40 */
41struct dmz_super {
42 /* Magic number */
43 __le32 magic; /* 4 */
44
45 /* Metadata version number */
46 __le32 version; /* 8 */
47
48 /* Generation number */
49 __le64 gen; /* 16 */
50
51 /* This block number */
52 __le64 sb_block; /* 24 */
53
54 /* The number of metadata blocks, including this super block */
55 __le32 nr_meta_blocks; /* 28 */
56
57 /* The number of sequential zones reserved for reclaim */
58 __le32 nr_reserved_seq; /* 32 */
59
60 /* The number of entries in the mapping table */
61 __le32 nr_chunks; /* 36 */
62
63 /* The number of blocks used for the chunk mapping table */
64 __le32 nr_map_blocks; /* 40 */
65
66 /* The number of blocks used for the block bitmaps */
67 __le32 nr_bitmap_blocks; /* 44 */
68
69 /* Checksum */
70 __le32 crc; /* 48 */
71
bd5c4031
HR
72 /* DM-Zoned label */
73 u8 dmz_label[32]; /* 80 */
74
75 /* DM-Zoned UUID */
76 u8 dmz_uuid[16]; /* 96 */
77
78 /* Device UUID */
79 u8 dev_uuid[16]; /* 112 */
80
3b1a94c8 81 /* Padding to full 512B sector */
bd5c4031 82 u8 reserved[400]; /* 512 */
3b1a94c8
DLM
83};
84
85/*
86 * Chunk mapping entry: entries are indexed by chunk number
87 * and give the zone ID (dzone_id) mapping the chunk on disk.
88 * This zone may be sequential or random. If it is a sequential
89 * zone, a second zone (bzone_id) used as a write buffer may
90 * also be specified. This second zone will always be a randomly
91 * writeable zone.
92 */
93struct dmz_map {
94 __le32 dzone_id;
95 __le32 bzone_id;
96};
97
98/*
99 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
100 */
101#define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
102#define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
103#define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
104#define DMZ_MAP_UNMAPPED UINT_MAX
105
106/*
107 * Meta data block descriptor (for cached metadata blocks).
108 */
109struct dmz_mblock {
110 struct rb_node node;
111 struct list_head link;
112 sector_t no;
33c2865f 113 unsigned int ref;
3b1a94c8
DLM
114 unsigned long state;
115 struct page *page;
116 void *data;
117};
118
119/*
120 * Metadata block state flags.
121 */
122enum {
123 DMZ_META_DIRTY,
124 DMZ_META_READING,
125 DMZ_META_WRITING,
126 DMZ_META_ERROR,
127};
128
129/*
130 * Super block information (one per metadata set).
131 */
132struct dmz_sb {
133 sector_t block;
bf28a3ba 134 struct dmz_dev *dev;
3b1a94c8
DLM
135 struct dmz_mblock *mblk;
136 struct dmz_super *sb;
735bd7e4 137 struct dm_zone *zone;
3b1a94c8
DLM
138};
139
140/*
141 * In-memory metadata.
142 */
143struct dmz_metadata {
144 struct dmz_dev *dev;
bd5c4031 145 unsigned int nr_devs;
3b1a94c8 146
2234e732 147 char devname[BDEVNAME_SIZE];
bd5c4031
HR
148 char label[BDEVNAME_SIZE];
149 uuid_t uuid;
2234e732 150
3b1a94c8
DLM
151 sector_t zone_bitmap_size;
152 unsigned int zone_nr_bitmap_blocks;
b3996295 153 unsigned int zone_bits_per_mblk;
3b1a94c8 154
36820560
HR
155 sector_t zone_nr_blocks;
156 sector_t zone_nr_blocks_shift;
157
158 sector_t zone_nr_sectors;
159 sector_t zone_nr_sectors_shift;
160
3b1a94c8
DLM
161 unsigned int nr_bitmap_blocks;
162 unsigned int nr_map_blocks;
163
36820560 164 unsigned int nr_zones;
3b1a94c8
DLM
165 unsigned int nr_useable_zones;
166 unsigned int nr_meta_blocks;
167 unsigned int nr_meta_zones;
168 unsigned int nr_data_zones;
34f5affd 169 unsigned int nr_cache_zones;
3b1a94c8
DLM
170 unsigned int nr_rnd_zones;
171 unsigned int nr_reserved_seq;
172 unsigned int nr_chunks;
173
174 /* Zone information array */
a92fbc44 175 struct xarray zones;
3b1a94c8 176
5d2c74f3 177 struct dmz_sb sb[2];
3b1a94c8 178 unsigned int mblk_primary;
bd5c4031 179 unsigned int sb_version;
3b1a94c8
DLM
180 u64 sb_gen;
181 unsigned int min_nr_mblks;
182 unsigned int max_nr_mblks;
183 atomic_t nr_mblks;
184 struct rw_semaphore mblk_sem;
185 struct mutex mblk_flush_lock;
186 spinlock_t mblk_lock;
187 struct rb_root mblk_rbtree;
188 struct list_head mblk_lru_list;
189 struct list_head mblk_dirty_list;
190 struct shrinker mblk_shrinker;
191
192 /* Zone allocation management */
193 struct mutex map_lock;
194 struct dmz_mblock **map_mblk;
3b1a94c8 195
34f5affd
HR
196 unsigned int nr_cache;
197 atomic_t unmap_nr_cache;
198 struct list_head unmap_cache_list;
199 struct list_head map_cache_list;
200
3b1a94c8
DLM
201 atomic_t nr_reserved_seq_zones;
202 struct list_head reserved_seq_zones_list;
203
204 wait_queue_head_t free_wq;
205};
206
ca1a7045 207#define dmz_zmd_info(zmd, format, args...) \
bd5c4031 208 DMINFO("(%s): " format, (zmd)->label, ## args)
ca1a7045
HR
209
210#define dmz_zmd_err(zmd, format, args...) \
bd5c4031 211 DMERR("(%s): " format, (zmd)->label, ## args)
ca1a7045
HR
212
213#define dmz_zmd_warn(zmd, format, args...) \
bd5c4031 214 DMWARN("(%s): " format, (zmd)->label, ## args)
ca1a7045
HR
215
216#define dmz_zmd_debug(zmd, format, args...) \
bd5c4031 217 DMDEBUG("(%s): " format, (zmd)->label, ## args)
3b1a94c8
DLM
218/*
219 * Various accessors
220 */
bd5c4031
HR
221static unsigned int dmz_dev_zone_id(struct dmz_metadata *zmd, struct dm_zone *zone)
222{
bd5c4031
HR
223 if (WARN_ON(!zone))
224 return 0;
225
8f22272a 226 return zone->id - zone->dev->zone_offset;
bd5c4031
HR
227}
228
3b1a94c8
DLM
229sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
230{
bd5c4031
HR
231 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
232
233 return (sector_t)zone_id << zmd->zone_nr_sectors_shift;
3b1a94c8
DLM
234}
235
236sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
237{
bd5c4031
HR
238 unsigned int zone_id = dmz_dev_zone_id(zmd, zone);
239
240 return (sector_t)zone_id << zmd->zone_nr_blocks_shift;
3b1a94c8
DLM
241}
242
36820560
HR
243unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
244{
245 return zmd->zone_nr_blocks;
246}
247
248unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
249{
250 return zmd->zone_nr_blocks_shift;
251}
252
253unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
254{
255 return zmd->zone_nr_sectors;
256}
257
258unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
259{
260 return zmd->zone_nr_sectors_shift;
261}
262
bc3d5717
HR
263unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
264{
36820560 265 return zmd->nr_zones;
bc3d5717
HR
266}
267
3b1a94c8
DLM
268unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
269{
270 return zmd->nr_chunks;
271}
272
bd82fdab 273unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd, int idx)
3b1a94c8 274{
bd82fdab 275 return zmd->dev[idx].nr_rnd;
3b1a94c8
DLM
276}
277
bd82fdab 278unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd, int idx)
3b1a94c8 279{
bd82fdab 280 return atomic_read(&zmd->dev[idx].unmap_nr_rnd);
3b1a94c8
DLM
281}
282
34f5affd
HR
283unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd)
284{
285 return zmd->nr_cache;
286}
287
288unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd)
289{
290 return atomic_read(&zmd->unmap_nr_cache);
291}
292
bd82fdab 293unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd, int idx)
bc3d5717 294{
bd82fdab 295 return zmd->dev[idx].nr_seq;
bc3d5717
HR
296}
297
bd82fdab 298unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd, int idx)
bc3d5717 299{
bd82fdab 300 return atomic_read(&zmd->dev[idx].unmap_nr_seq);
bc3d5717
HR
301}
302
a92fbc44
HR
303static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
304{
305 return xa_load(&zmd->zones, zone_id);
306}
307
308static struct dm_zone *dmz_insert(struct dmz_metadata *zmd,
8f22272a 309 unsigned int zone_id, struct dmz_dev *dev)
a92fbc44
HR
310{
311 struct dm_zone *zone = kzalloc(sizeof(struct dm_zone), GFP_KERNEL);
312
313 if (!zone)
314 return ERR_PTR(-ENOMEM);
315
316 if (xa_insert(&zmd->zones, zone_id, zone, GFP_KERNEL)) {
317 kfree(zone);
318 return ERR_PTR(-EBUSY);
319 }
320
321 INIT_LIST_HEAD(&zone->link);
322 atomic_set(&zone->refcount, 0);
323 zone->id = zone_id;
324 zone->chunk = DMZ_MAP_UNMAPPED;
8f22272a 325 zone->dev = dev;
a92fbc44
HR
326
327 return zone;
328}
329
2234e732
HR
330const char *dmz_metadata_label(struct dmz_metadata *zmd)
331{
bd5c4031 332 return (const char *)zmd->label;
2234e732
HR
333}
334
d0e21ce4
HR
335bool dmz_check_dev(struct dmz_metadata *zmd)
336{
bd5c4031
HR
337 unsigned int i;
338
339 for (i = 0; i < zmd->nr_devs; i++) {
340 if (!dmz_check_bdev(&zmd->dev[i]))
341 return false;
342 }
343 return true;
d0e21ce4
HR
344}
345
346bool dmz_dev_is_dying(struct dmz_metadata *zmd)
347{
bd5c4031
HR
348 unsigned int i;
349
350 for (i = 0; i < zmd->nr_devs; i++) {
351 if (dmz_bdev_is_dying(&zmd->dev[i]))
352 return true;
353 }
354 return false;
d0e21ce4
HR
355}
356
3b1a94c8
DLM
357/*
358 * Lock/unlock mapping table.
359 * The map lock also protects all the zone lists.
360 */
361void dmz_lock_map(struct dmz_metadata *zmd)
362{
363 mutex_lock(&zmd->map_lock);
364}
365
366void dmz_unlock_map(struct dmz_metadata *zmd)
367{
368 mutex_unlock(&zmd->map_lock);
369}
370
371/*
372 * Lock/unlock metadata access. This is a "read" lock on a semaphore
373 * that prevents metadata flush from running while metadata are being
374 * modified. The actual metadata write mutual exclusion is achieved with
ad1bd578 375 * the map lock and zone state management (active and reclaim state are
3b1a94c8
DLM
376 * mutually exclusive).
377 */
378void dmz_lock_metadata(struct dmz_metadata *zmd)
379{
380 down_read(&zmd->mblk_sem);
381}
382
383void dmz_unlock_metadata(struct dmz_metadata *zmd)
384{
385 up_read(&zmd->mblk_sem);
386}
387
388/*
389 * Lock/unlock flush: prevent concurrent executions
390 * of dmz_flush_metadata as well as metadata modification in reclaim
391 * while flush is being executed.
392 */
393void dmz_lock_flush(struct dmz_metadata *zmd)
394{
395 mutex_lock(&zmd->mblk_flush_lock);
396}
397
398void dmz_unlock_flush(struct dmz_metadata *zmd)
399{
400 mutex_unlock(&zmd->mblk_flush_lock);
401}
402
403/*
404 * Allocate a metadata block.
405 */
406static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
407 sector_t mblk_no)
408{
409 struct dmz_mblock *mblk = NULL;
410
411 /* See if we can reuse cached blocks */
412 if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
413 spin_lock(&zmd->mblk_lock);
414 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
415 struct dmz_mblock, link);
416 if (mblk) {
417 list_del_init(&mblk->link);
418 rb_erase(&mblk->node, &zmd->mblk_rbtree);
419 mblk->no = mblk_no;
420 }
421 spin_unlock(&zmd->mblk_lock);
422 if (mblk)
423 return mblk;
424 }
425
426 /* Allocate a new block */
427 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
428 if (!mblk)
429 return NULL;
430
431 mblk->page = alloc_page(GFP_NOIO);
432 if (!mblk->page) {
433 kfree(mblk);
434 return NULL;
435 }
436
437 RB_CLEAR_NODE(&mblk->node);
438 INIT_LIST_HEAD(&mblk->link);
33c2865f 439 mblk->ref = 0;
3b1a94c8
DLM
440 mblk->state = 0;
441 mblk->no = mblk_no;
442 mblk->data = page_address(mblk->page);
443
444 atomic_inc(&zmd->nr_mblks);
445
446 return mblk;
447}
448
449/*
450 * Free a metadata block.
451 */
452static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
453{
454 __free_pages(mblk->page, 0);
455 kfree(mblk);
456
457 atomic_dec(&zmd->nr_mblks);
458}
459
460/*
461 * Insert a metadata block in the rbtree.
462 */
463static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
464{
465 struct rb_root *root = &zmd->mblk_rbtree;
466 struct rb_node **new = &(root->rb_node), *parent = NULL;
467 struct dmz_mblock *b;
468
469 /* Figure out where to put the new node */
470 while (*new) {
471 b = container_of(*new, struct dmz_mblock, node);
472 parent = *new;
473 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
474 }
475
476 /* Add new node and rebalance tree */
477 rb_link_node(&mblk->node, parent, new);
478 rb_insert_color(&mblk->node, root);
479}
480
481/*
3d4e7383
DLM
482 * Lookup a metadata block in the rbtree. If the block is found, increment
483 * its reference count.
3b1a94c8 484 */
3d4e7383
DLM
485static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
486 sector_t mblk_no)
3b1a94c8
DLM
487{
488 struct rb_root *root = &zmd->mblk_rbtree;
489 struct rb_node *node = root->rb_node;
490 struct dmz_mblock *mblk;
491
492 while (node) {
493 mblk = container_of(node, struct dmz_mblock, node);
3d4e7383
DLM
494 if (mblk->no == mblk_no) {
495 /*
496 * If this is the first reference to the block,
497 * remove it from the LRU list.
498 */
499 mblk->ref++;
500 if (mblk->ref == 1 &&
501 !test_bit(DMZ_META_DIRTY, &mblk->state))
502 list_del_init(&mblk->link);
3b1a94c8 503 return mblk;
3d4e7383 504 }
3b1a94c8
DLM
505 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
506 }
507
508 return NULL;
509}
510
511/*
512 * Metadata block BIO end callback.
513 */
514static void dmz_mblock_bio_end_io(struct bio *bio)
515{
516 struct dmz_mblock *mblk = bio->bi_private;
517 int flag;
518
519 if (bio->bi_status)
520 set_bit(DMZ_META_ERROR, &mblk->state);
521
522 if (bio_op(bio) == REQ_OP_WRITE)
523 flag = DMZ_META_WRITING;
524 else
525 flag = DMZ_META_READING;
526
527 clear_bit_unlock(flag, &mblk->state);
528 smp_mb__after_atomic();
529 wake_up_bit(&mblk->state, flag);
530
531 bio_put(bio);
532}
533
534/*
3d4e7383 535 * Read an uncached metadata block from disk and add it to the cache.
3b1a94c8 536 */
3d4e7383
DLM
537static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
538 sector_t mblk_no)
3b1a94c8 539{
3d4e7383 540 struct dmz_mblock *mblk, *m;
3b1a94c8 541 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
bf28a3ba 542 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
3b1a94c8
DLM
543 struct bio *bio;
544
bf28a3ba 545 if (dmz_bdev_is_dying(dev))
75d66ffb
DF
546 return ERR_PTR(-EIO);
547
3d4e7383 548 /* Get a new block and a BIO to read it */
3b1a94c8
DLM
549 mblk = dmz_alloc_mblock(zmd, mblk_no);
550 if (!mblk)
75d66ffb 551 return ERR_PTR(-ENOMEM);
3b1a94c8 552
3b1a94c8
DLM
553 bio = bio_alloc(GFP_NOIO, 1);
554 if (!bio) {
555 dmz_free_mblock(zmd, mblk);
75d66ffb 556 return ERR_PTR(-ENOMEM);
3b1a94c8
DLM
557 }
558
3d4e7383
DLM
559 spin_lock(&zmd->mblk_lock);
560
561 /*
562 * Make sure that another context did not start reading
563 * the block already.
564 */
565 m = dmz_get_mblock_fast(zmd, mblk_no);
566 if (m) {
567 spin_unlock(&zmd->mblk_lock);
568 dmz_free_mblock(zmd, mblk);
569 bio_put(bio);
570 return m;
571 }
572
573 mblk->ref++;
574 set_bit(DMZ_META_READING, &mblk->state);
575 dmz_insert_mblock(zmd, mblk);
576
577 spin_unlock(&zmd->mblk_lock);
578
579 /* Submit read BIO */
3b1a94c8 580 bio->bi_iter.bi_sector = dmz_blk2sect(block);
bf28a3ba 581 bio_set_dev(bio, dev->bdev);
3b1a94c8
DLM
582 bio->bi_private = mblk;
583 bio->bi_end_io = dmz_mblock_bio_end_io;
584 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
585 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
586 submit_bio(bio);
587
588 return mblk;
589}
590
591/*
592 * Free metadata blocks.
593 */
594static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
595 unsigned long limit)
596{
597 struct dmz_mblock *mblk;
598 unsigned long count = 0;
599
600 if (!zmd->max_nr_mblks)
601 return 0;
602
603 while (!list_empty(&zmd->mblk_lru_list) &&
604 atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
605 count < limit) {
606 mblk = list_first_entry(&zmd->mblk_lru_list,
607 struct dmz_mblock, link);
608 list_del_init(&mblk->link);
609 rb_erase(&mblk->node, &zmd->mblk_rbtree);
610 dmz_free_mblock(zmd, mblk);
611 count++;
612 }
613
614 return count;
615}
616
617/*
618 * For mblock shrinker: get the number of unused metadata blocks in the cache.
619 */
620static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
621 struct shrink_control *sc)
622{
623 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
624
625 return atomic_read(&zmd->nr_mblks);
626}
627
628/*
629 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
630 */
631static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
632 struct shrink_control *sc)
633{
634 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
635 unsigned long count;
636
637 spin_lock(&zmd->mblk_lock);
638 count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
639 spin_unlock(&zmd->mblk_lock);
640
641 return count ? count : SHRINK_STOP;
642}
643
644/*
645 * Release a metadata block.
646 */
647static void dmz_release_mblock(struct dmz_metadata *zmd,
648 struct dmz_mblock *mblk)
649{
650
651 if (!mblk)
652 return;
653
654 spin_lock(&zmd->mblk_lock);
655
33c2865f
DLM
656 mblk->ref--;
657 if (mblk->ref == 0) {
3b1a94c8
DLM
658 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
659 rb_erase(&mblk->node, &zmd->mblk_rbtree);
660 dmz_free_mblock(zmd, mblk);
661 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
662 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
663 dmz_shrink_mblock_cache(zmd, 1);
664 }
665 }
666
667 spin_unlock(&zmd->mblk_lock);
668}
669
670/*
671 * Get a metadata block from the rbtree. If the block
672 * is not present, read it from disk.
673 */
674static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
675 sector_t mblk_no)
676{
677 struct dmz_mblock *mblk;
bf28a3ba 678 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
3b1a94c8
DLM
679
680 /* Check rbtree */
681 spin_lock(&zmd->mblk_lock);
3d4e7383 682 mblk = dmz_get_mblock_fast(zmd, mblk_no);
3b1a94c8
DLM
683 spin_unlock(&zmd->mblk_lock);
684
685 if (!mblk) {
686 /* Cache miss: read the block from disk */
3d4e7383 687 mblk = dmz_get_mblock_slow(zmd, mblk_no);
75d66ffb
DF
688 if (IS_ERR(mblk))
689 return mblk;
3b1a94c8
DLM
690 }
691
692 /* Wait for on-going read I/O and check for error */
693 wait_on_bit_io(&mblk->state, DMZ_META_READING,
694 TASK_UNINTERRUPTIBLE);
695 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
696 dmz_release_mblock(zmd, mblk);
bf28a3ba 697 dmz_check_bdev(dev);
3b1a94c8
DLM
698 return ERR_PTR(-EIO);
699 }
700
701 return mblk;
702}
703
704/*
705 * Mark a metadata block dirty.
706 */
707static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
708{
709 spin_lock(&zmd->mblk_lock);
710 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
711 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
712 spin_unlock(&zmd->mblk_lock);
713}
714
715/*
716 * Issue a metadata block write BIO.
717 */
75d66ffb
DF
718static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
719 unsigned int set)
3b1a94c8 720{
bf28a3ba 721 struct dmz_dev *dev = zmd->sb[set].dev;
3b1a94c8
DLM
722 sector_t block = zmd->sb[set].block + mblk->no;
723 struct bio *bio;
724
bf28a3ba 725 if (dmz_bdev_is_dying(dev))
75d66ffb
DF
726 return -EIO;
727
3b1a94c8
DLM
728 bio = bio_alloc(GFP_NOIO, 1);
729 if (!bio) {
730 set_bit(DMZ_META_ERROR, &mblk->state);
75d66ffb 731 return -ENOMEM;
3b1a94c8
DLM
732 }
733
734 set_bit(DMZ_META_WRITING, &mblk->state);
735
736 bio->bi_iter.bi_sector = dmz_blk2sect(block);
bf28a3ba 737 bio_set_dev(bio, dev->bdev);
3b1a94c8
DLM
738 bio->bi_private = mblk;
739 bio->bi_end_io = dmz_mblock_bio_end_io;
740 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
741 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
742 submit_bio(bio);
75d66ffb
DF
743
744 return 0;
3b1a94c8
DLM
745}
746
747/*
748 * Read/write a metadata block.
749 */
bf28a3ba
HR
750static int dmz_rdwr_block(struct dmz_dev *dev, int op,
751 sector_t block, struct page *page)
3b1a94c8
DLM
752{
753 struct bio *bio;
754 int ret;
755
bd5c4031
HR
756 if (WARN_ON(!dev))
757 return -EIO;
758
bf28a3ba 759 if (dmz_bdev_is_dying(dev))
75d66ffb
DF
760 return -EIO;
761
3b1a94c8
DLM
762 bio = bio_alloc(GFP_NOIO, 1);
763 if (!bio)
764 return -ENOMEM;
765
766 bio->bi_iter.bi_sector = dmz_blk2sect(block);
bf28a3ba 767 bio_set_dev(bio, dev->bdev);
3b1a94c8
DLM
768 bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
769 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
770 ret = submit_bio_wait(bio);
771 bio_put(bio);
772
e7fad909 773 if (ret)
bf28a3ba 774 dmz_check_bdev(dev);
3b1a94c8
DLM
775 return ret;
776}
777
778/*
779 * Write super block of the specified metadata set.
780 */
781static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
782{
3b1a94c8
DLM
783 struct dmz_mblock *mblk = zmd->sb[set].mblk;
784 struct dmz_super *sb = zmd->sb[set].sb;
bf28a3ba 785 struct dmz_dev *dev = zmd->sb[set].dev;
bd5c4031 786 sector_t sb_block;
3b1a94c8
DLM
787 u64 sb_gen = zmd->sb_gen + 1;
788 int ret;
789
790 sb->magic = cpu_to_le32(DMZ_MAGIC);
bd5c4031
HR
791
792 sb->version = cpu_to_le32(zmd->sb_version);
793 if (zmd->sb_version > 1) {
794 BUILD_BUG_ON(UUID_SIZE != 16);
795 export_uuid(sb->dmz_uuid, &zmd->uuid);
796 memcpy(sb->dmz_label, zmd->label, BDEVNAME_SIZE);
797 export_uuid(sb->dev_uuid, &dev->uuid);
798 }
3b1a94c8
DLM
799
800 sb->gen = cpu_to_le64(sb_gen);
801
bd5c4031
HR
802 /*
803 * The metadata always references the absolute block address,
804 * ie relative to the entire block range, not the per-device
805 * block address.
806 */
807 sb_block = zmd->sb[set].zone->id << zmd->zone_nr_blocks_shift;
808 sb->sb_block = cpu_to_le64(sb_block);
3b1a94c8
DLM
809 sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
810 sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
811 sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
812
813 sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
814 sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
815
816 sb->crc = 0;
817 sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
818
bd5c4031
HR
819 ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block,
820 mblk->page);
3b1a94c8 821 if (ret == 0)
bf28a3ba 822 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
3b1a94c8
DLM
823
824 return ret;
825}
826
827/*
828 * Write dirty metadata blocks to the specified set.
829 */
830static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
831 struct list_head *write_list,
832 unsigned int set)
833{
834 struct dmz_mblock *mblk;
bf28a3ba 835 struct dmz_dev *dev = zmd->sb[set].dev;
3b1a94c8 836 struct blk_plug plug;
75d66ffb 837 int ret = 0, nr_mblks_submitted = 0;
3b1a94c8
DLM
838
839 /* Issue writes */
840 blk_start_plug(&plug);
75d66ffb
DF
841 list_for_each_entry(mblk, write_list, link) {
842 ret = dmz_write_mblock(zmd, mblk, set);
843 if (ret)
844 break;
845 nr_mblks_submitted++;
846 }
3b1a94c8
DLM
847 blk_finish_plug(&plug);
848
849 /* Wait for completion */
850 list_for_each_entry(mblk, write_list, link) {
75d66ffb
DF
851 if (!nr_mblks_submitted)
852 break;
3b1a94c8
DLM
853 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
854 TASK_UNINTERRUPTIBLE);
855 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
856 clear_bit(DMZ_META_ERROR, &mblk->state);
bf28a3ba 857 dmz_check_bdev(dev);
3b1a94c8
DLM
858 ret = -EIO;
859 }
75d66ffb 860 nr_mblks_submitted--;
3b1a94c8
DLM
861 }
862
863 /* Flush drive cache (this will also sync data) */
864 if (ret == 0)
bf28a3ba 865 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
3b1a94c8
DLM
866
867 return ret;
868}
869
870/*
871 * Log dirty metadata blocks.
872 */
873static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
874 struct list_head *write_list)
875{
876 unsigned int log_set = zmd->mblk_primary ^ 0x1;
877 int ret;
878
879 /* Write dirty blocks to the log */
880 ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
881 if (ret)
882 return ret;
883
884 /*
885 * No error so far: now validate the log by updating the
886 * log index super block generation.
887 */
888 ret = dmz_write_sb(zmd, log_set);
889 if (ret)
890 return ret;
891
892 return 0;
893}
894
895/*
896 * Flush dirty metadata blocks.
897 */
898int dmz_flush_metadata(struct dmz_metadata *zmd)
899{
900 struct dmz_mblock *mblk;
901 struct list_head write_list;
bf28a3ba 902 struct dmz_dev *dev;
3b1a94c8
DLM
903 int ret;
904
905 if (WARN_ON(!zmd))
906 return 0;
907
908 INIT_LIST_HEAD(&write_list);
909
910 /*
911 * Make sure that metadata blocks are stable before logging: take
912 * the write lock on the metadata semaphore to prevent target BIOs
913 * from modifying metadata.
914 */
915 down_write(&zmd->mblk_sem);
bf28a3ba 916 dev = zmd->sb[zmd->mblk_primary].dev;
3b1a94c8
DLM
917
918 /*
919 * This is called from the target flush work and reclaim work.
920 * Concurrent execution is not allowed.
921 */
922 dmz_lock_flush(zmd);
923
bf28a3ba 924 if (dmz_bdev_is_dying(dev)) {
75d66ffb
DF
925 ret = -EIO;
926 goto out;
927 }
928
3b1a94c8
DLM
929 /* Get dirty blocks */
930 spin_lock(&zmd->mblk_lock);
931 list_splice_init(&zmd->mblk_dirty_list, &write_list);
932 spin_unlock(&zmd->mblk_lock);
933
934 /* If there are no dirty metadata blocks, just flush the device cache */
935 if (list_empty(&write_list)) {
bf28a3ba 936 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
e7fad909 937 goto err;
3b1a94c8
DLM
938 }
939
940 /*
941 * The primary metadata set is still clean. Keep it this way until
942 * all updates are successful in the secondary set. That is, use
943 * the secondary set as a log.
944 */
945 ret = dmz_log_dirty_mblocks(zmd, &write_list);
946 if (ret)
e7fad909 947 goto err;
3b1a94c8
DLM
948
949 /*
950 * The log is on disk. It is now safe to update in place
951 * in the primary metadata set.
952 */
953 ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
954 if (ret)
e7fad909 955 goto err;
3b1a94c8
DLM
956
957 ret = dmz_write_sb(zmd, zmd->mblk_primary);
958 if (ret)
e7fad909 959 goto err;
3b1a94c8
DLM
960
961 while (!list_empty(&write_list)) {
962 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
963 list_del_init(&mblk->link);
964
965 spin_lock(&zmd->mblk_lock);
966 clear_bit(DMZ_META_DIRTY, &mblk->state);
33c2865f 967 if (mblk->ref == 0)
3b1a94c8
DLM
968 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
969 spin_unlock(&zmd->mblk_lock);
970 }
971
972 zmd->sb_gen++;
973out:
3b1a94c8
DLM
974 dmz_unlock_flush(zmd);
975 up_write(&zmd->mblk_sem);
976
977 return ret;
e7fad909
DF
978
979err:
980 if (!list_empty(&write_list)) {
981 spin_lock(&zmd->mblk_lock);
982 list_splice(&write_list, &zmd->mblk_dirty_list);
983 spin_unlock(&zmd->mblk_lock);
984 }
bf28a3ba 985 if (!dmz_check_bdev(dev))
e7fad909
DF
986 ret = -EIO;
987 goto out;
3b1a94c8
DLM
988}
989
990/*
991 * Check super block.
992 */
5d2c74f3
HR
993static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
994 bool tertiary)
3b1a94c8 995{
5d2c74f3
HR
996 struct dmz_super *sb = dsb->sb;
997 struct dmz_dev *dev = dsb->dev;
3b1a94c8 998 unsigned int nr_meta_zones, nr_data_zones;
3b1a94c8
DLM
999 u32 crc, stored_crc;
1000 u64 gen;
1001
bd5c4031
HR
1002 if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
1003 dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
1004 DMZ_MAGIC, le32_to_cpu(sb->magic));
1005 return -ENXIO;
1006 }
1007
1008 zmd->sb_version = le32_to_cpu(sb->version);
1009 if (zmd->sb_version > DMZ_META_VER) {
1010 dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
1011 DMZ_META_VER, zmd->sb_version);
1012 return -EINVAL;
1013 }
5d2c74f3 1014 if (zmd->sb_version < 2 && tertiary) {
bd5c4031
HR
1015 dmz_dev_err(dev, "Tertiary superblocks are not supported");
1016 return -EINVAL;
1017 }
1018
3b1a94c8
DLM
1019 gen = le64_to_cpu(sb->gen);
1020 stored_crc = le32_to_cpu(sb->crc);
1021 sb->crc = 0;
1022 crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
1023 if (crc != stored_crc) {
1024 dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
1025 crc, stored_crc);
1026 return -ENXIO;
1027 }
1028
bd5c4031
HR
1029 if (zmd->sb_version > 1) {
1030 uuid_t sb_uuid;
1031
1032 import_uuid(&sb_uuid, sb->dmz_uuid);
1033 if (uuid_is_null(&sb_uuid)) {
1034 dmz_dev_err(dev, "NULL DM-Zoned uuid");
1035 return -ENXIO;
1036 } else if (uuid_is_null(&zmd->uuid)) {
1037 uuid_copy(&zmd->uuid, &sb_uuid);
1038 } else if (!uuid_equal(&zmd->uuid, &sb_uuid)) {
1039 dmz_dev_err(dev, "mismatching DM-Zoned uuid, "
1040 "is %pUl expected %pUl",
1041 &sb_uuid, &zmd->uuid);
1042 return -ENXIO;
1043 }
1044 if (!strlen(zmd->label))
1045 memcpy(zmd->label, sb->dmz_label, BDEVNAME_SIZE);
1046 else if (memcmp(zmd->label, sb->dmz_label, BDEVNAME_SIZE)) {
1047 dmz_dev_err(dev, "mismatching DM-Zoned label, "
1048 "is %s expected %s",
1049 sb->dmz_label, zmd->label);
1050 return -ENXIO;
1051 }
1052 import_uuid(&dev->uuid, sb->dev_uuid);
1053 if (uuid_is_null(&dev->uuid)) {
1054 dmz_dev_err(dev, "NULL device uuid");
1055 return -ENXIO;
1056 }
3b1a94c8 1057
5d2c74f3 1058 if (tertiary) {
bd5c4031
HR
1059 /*
1060 * Generation number should be 0, but it doesn't
1061 * really matter if it isn't.
1062 */
1063 if (gen != 0)
1064 dmz_dev_warn(dev, "Invalid generation %llu",
1065 gen);
1066 return 0;
1067 }
3b1a94c8
DLM
1068 }
1069
36820560
HR
1070 nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
1071 >> zmd->zone_nr_blocks_shift;
3b1a94c8
DLM
1072 if (!nr_meta_zones ||
1073 nr_meta_zones >= zmd->nr_rnd_zones) {
1074 dmz_dev_err(dev, "Invalid number of metadata blocks");
1075 return -ENXIO;
1076 }
1077
1078 if (!le32_to_cpu(sb->nr_reserved_seq) ||
1079 le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
1080 dmz_dev_err(dev, "Invalid number of reserved sequential zones");
1081 return -ENXIO;
1082 }
1083
1084 nr_data_zones = zmd->nr_useable_zones -
1085 (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
1086 if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
1087 dmz_dev_err(dev, "Invalid number of chunks %u / %u",
1088 le32_to_cpu(sb->nr_chunks), nr_data_zones);
1089 return -ENXIO;
1090 }
1091
1092 /* OK */
1093 zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
1094 zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
1095 zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
1096 zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
1097 zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
1098 zmd->nr_meta_zones = nr_meta_zones;
1099 zmd->nr_data_zones = nr_data_zones;
1100
1101 return 0;
1102}
1103
1104/*
1105 * Read the first or second super block from disk.
1106 */
5d2c74f3 1107static int dmz_read_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
3b1a94c8 1108{
35d0c96e 1109 dmz_zmd_debug(zmd, "read superblock set %d dev %s block %llu",
5d2c74f3 1110 set, sb->dev->name, sb->block);
35d0c96e 1111
5d2c74f3
HR
1112 return dmz_rdwr_block(sb->dev, REQ_OP_READ,
1113 sb->block, sb->mblk->page);
3b1a94c8
DLM
1114}
1115
1116/*
1117 * Determine the position of the secondary super blocks on disk.
1118 * This is used only if a corruption of the primary super block
1119 * is detected.
1120 */
1121static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
1122{
36820560 1123 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
3b1a94c8 1124 struct dmz_mblock *mblk;
a92fbc44 1125 unsigned int zone_id = zmd->sb[0].zone->id;
3b1a94c8
DLM
1126 int i;
1127
1128 /* Allocate a block */
1129 mblk = dmz_alloc_mblock(zmd, 0);
1130 if (!mblk)
1131 return -ENOMEM;
1132
1133 zmd->sb[1].mblk = mblk;
1134 zmd->sb[1].sb = mblk->data;
1135
1136 /* Bad first super block: search for the second one */
1137 zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
a92fbc44 1138 zmd->sb[1].zone = dmz_get(zmd, zone_id + 1);
1565929b 1139 zmd->sb[1].dev = zmd->sb[0].dev;
a92fbc44 1140 for (i = 1; i < zmd->nr_rnd_zones; i++) {
5d2c74f3 1141 if (dmz_read_sb(zmd, &zmd->sb[1], 1) != 0)
3b1a94c8 1142 break;
a92fbc44 1143 if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC)
3b1a94c8
DLM
1144 return 0;
1145 zmd->sb[1].block += zone_nr_blocks;
a92fbc44 1146 zmd->sb[1].zone = dmz_get(zmd, zone_id + i);
3b1a94c8
DLM
1147 }
1148
1149 dmz_free_mblock(zmd, mblk);
1150 zmd->sb[1].mblk = NULL;
735bd7e4 1151 zmd->sb[1].zone = NULL;
bf28a3ba 1152 zmd->sb[1].dev = NULL;
3b1a94c8
DLM
1153
1154 return -EIO;
1155}
1156
1157/*
5d2c74f3 1158 * Read a super block from disk.
3b1a94c8 1159 */
5d2c74f3 1160static int dmz_get_sb(struct dmz_metadata *zmd, struct dmz_sb *sb, int set)
3b1a94c8
DLM
1161{
1162 struct dmz_mblock *mblk;
1163 int ret;
1164
1165 /* Allocate a block */
1166 mblk = dmz_alloc_mblock(zmd, 0);
1167 if (!mblk)
1168 return -ENOMEM;
1169
5d2c74f3
HR
1170 sb->mblk = mblk;
1171 sb->sb = mblk->data;
3b1a94c8
DLM
1172
1173 /* Read super block */
5d2c74f3 1174 ret = dmz_read_sb(zmd, sb, set);
3b1a94c8
DLM
1175 if (ret) {
1176 dmz_free_mblock(zmd, mblk);
5d2c74f3 1177 sb->mblk = NULL;
3b1a94c8
DLM
1178 return ret;
1179 }
1180
1181 return 0;
1182}
1183
1184/*
1185 * Recover a metadata set.
1186 */
1187static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
1188{
1189 unsigned int src_set = dst_set ^ 0x1;
1190 struct page *page;
1191 int i, ret;
1192
bf28a3ba
HR
1193 dmz_dev_warn(zmd->sb[dst_set].dev,
1194 "Metadata set %u invalid: recovering", dst_set);
3b1a94c8
DLM
1195
1196 if (dst_set == 0)
735bd7e4
HR
1197 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1198 else
1199 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
3b1a94c8 1200
4218a955 1201 page = alloc_page(GFP_NOIO);
3b1a94c8
DLM
1202 if (!page)
1203 return -ENOMEM;
1204
1205 /* Copy metadata blocks */
1206 for (i = 1; i < zmd->nr_meta_blocks; i++) {
bf28a3ba 1207 ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
3b1a94c8
DLM
1208 zmd->sb[src_set].block + i, page);
1209 if (ret)
1210 goto out;
bf28a3ba 1211 ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
3b1a94c8
DLM
1212 zmd->sb[dst_set].block + i, page);
1213 if (ret)
1214 goto out;
1215 }
1216
1217 /* Finalize with the super block */
1218 if (!zmd->sb[dst_set].mblk) {
1219 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1220 if (!zmd->sb[dst_set].mblk) {
1221 ret = -ENOMEM;
1222 goto out;
1223 }
1224 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1225 }
1226
1227 ret = dmz_write_sb(zmd, dst_set);
1228out:
1229 __free_pages(page, 0);
1230
1231 return ret;
1232}
1233
1234/*
1235 * Get super block from disk.
1236 */
1237static int dmz_load_sb(struct dmz_metadata *zmd)
1238{
1239 bool sb_good[2] = {false, false};
1240 u64 sb_gen[2] = {0, 0};
1241 int ret;
1242
735bd7e4 1243 if (!zmd->sb[0].zone) {
ca1a7045 1244 dmz_zmd_err(zmd, "Primary super block zone not set");
735bd7e4
HR
1245 return -ENXIO;
1246 }
1247
3b1a94c8 1248 /* Read and check the primary super block */
735bd7e4 1249 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
8f22272a 1250 zmd->sb[0].dev = zmd->sb[0].zone->dev;
5d2c74f3 1251 ret = dmz_get_sb(zmd, &zmd->sb[0], 0);
3b1a94c8 1252 if (ret) {
bf28a3ba 1253 dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
3b1a94c8
DLM
1254 return ret;
1255 }
1256
5d2c74f3 1257 ret = dmz_check_sb(zmd, &zmd->sb[0], false);
3b1a94c8
DLM
1258
1259 /* Read and check secondary super block */
1260 if (ret == 0) {
1261 sb_good[0] = true;
a92fbc44
HR
1262 if (!zmd->sb[1].zone) {
1263 unsigned int zone_id =
1264 zmd->sb[0].zone->id + zmd->nr_meta_zones;
1265
1266 zmd->sb[1].zone = dmz_get(zmd, zone_id);
1267 }
735bd7e4 1268 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
1565929b 1269 zmd->sb[1].dev = zmd->sb[0].dev;
5d2c74f3 1270 ret = dmz_get_sb(zmd, &zmd->sb[1], 1);
3b1a94c8
DLM
1271 } else
1272 ret = dmz_lookup_secondary_sb(zmd);
1273
1274 if (ret) {
bf28a3ba 1275 dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
3b1a94c8
DLM
1276 return ret;
1277 }
1278
5d2c74f3 1279 ret = dmz_check_sb(zmd, &zmd->sb[1], false);
3b1a94c8
DLM
1280 if (ret == 0)
1281 sb_good[1] = true;
1282
1283 /* Use highest generation sb first */
1284 if (!sb_good[0] && !sb_good[1]) {
ca1a7045 1285 dmz_zmd_err(zmd, "No valid super block found");
3b1a94c8
DLM
1286 return -EIO;
1287 }
1288
1289 if (sb_good[0])
1290 sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
bf28a3ba 1291 else {
3b1a94c8 1292 ret = dmz_recover_mblocks(zmd, 0);
bf28a3ba
HR
1293 if (ret) {
1294 dmz_dev_err(zmd->sb[0].dev,
1295 "Recovery of superblock 0 failed");
1296 return -EIO;
1297 }
1298 }
3b1a94c8
DLM
1299
1300 if (sb_good[1])
1301 sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
bf28a3ba 1302 else {
3b1a94c8
DLM
1303 ret = dmz_recover_mblocks(zmd, 1);
1304
bf28a3ba
HR
1305 if (ret) {
1306 dmz_dev_err(zmd->sb[1].dev,
1307 "Recovery of superblock 1 failed");
1308 return -EIO;
1309 }
3b1a94c8
DLM
1310 }
1311
1312 if (sb_gen[0] >= sb_gen[1]) {
1313 zmd->sb_gen = sb_gen[0];
1314 zmd->mblk_primary = 0;
1315 } else {
1316 zmd->sb_gen = sb_gen[1];
1317 zmd->mblk_primary = 1;
1318 }
1319
bf28a3ba
HR
1320 dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
1321 "Using super block %u (gen %llu)",
3b1a94c8
DLM
1322 zmd->mblk_primary, zmd->sb_gen);
1323
5d2c74f3
HR
1324 if (zmd->sb_version > 1) {
1325 int i;
1326 struct dmz_sb *sb;
1327
1328 sb = kzalloc(sizeof(struct dmz_sb), GFP_KERNEL);
1329 if (!sb)
1330 return -ENOMEM;
1331 for (i = 1; i < zmd->nr_devs; i++) {
1332 sb->block = 0;
1333 sb->zone = dmz_get(zmd, zmd->dev[i].zone_offset);
1334 sb->dev = &zmd->dev[i];
1335 if (!dmz_is_meta(sb->zone)) {
1336 dmz_dev_err(sb->dev,
1337 "Tertiary super block zone %u not marked as metadata zone",
1338 sb->zone->id);
1339 ret = -EINVAL;
1340 goto out_kfree;
1341 }
1342 ret = dmz_get_sb(zmd, sb, i + 1);
1343 if (ret) {
1344 dmz_dev_err(sb->dev,
1345 "Read tertiary super block failed");
1346 dmz_free_mblock(zmd, sb->mblk);
1347 goto out_kfree;
1348 }
1349 ret = dmz_check_sb(zmd, sb, true);
1350 dmz_free_mblock(zmd, sb->mblk);
1351 if (ret == -EINVAL)
1352 goto out_kfree;
bd5c4031 1353 }
5d2c74f3
HR
1354 out_kfree:
1355 kfree(sb);
bd5c4031 1356 }
5d2c74f3 1357 return ret;
3b1a94c8
DLM
1358}
1359
1360/*
1361 * Initialize a zone descriptor.
1362 */
bd5c4031 1363static int dmz_init_zone(struct blk_zone *blkz, unsigned int num, void *data)
3b1a94c8 1364{
18979819
HR
1365 struct dmz_dev *dev = data;
1366 struct dmz_metadata *zmd = dev->metadata;
bd5c4031 1367 int idx = num + dev->zone_offset;
a92fbc44
HR
1368 struct dm_zone *zone;
1369
8f22272a 1370 zone = dmz_insert(zmd, idx, dev);
a92fbc44
HR
1371 if (IS_ERR(zone))
1372 return PTR_ERR(zone);
3b1a94c8 1373
36820560 1374 if (blkz->len != zmd->zone_nr_sectors) {
bd5c4031
HR
1375 if (zmd->sb_version > 1) {
1376 /* Ignore the eventual runt (smaller) zone */
1377 set_bit(DMZ_OFFLINE, &zone->flags);
1378 return 0;
1379 } else if (blkz->start + blkz->len == dev->capacity)
3b1a94c8
DLM
1380 return 0;
1381 return -ENXIO;
1382 }
1383
d4100351
CH
1384 switch (blkz->type) {
1385 case BLK_ZONE_TYPE_CONVENTIONAL:
3b1a94c8 1386 set_bit(DMZ_RND, &zone->flags);
d4100351
CH
1387 break;
1388 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1389 case BLK_ZONE_TYPE_SEQWRITE_PREF:
3b1a94c8 1390 set_bit(DMZ_SEQ, &zone->flags);
d4100351
CH
1391 break;
1392 default:
3b1a94c8 1393 return -ENXIO;
d4100351 1394 }
3b1a94c8
DLM
1395
1396 if (dmz_is_rnd(zone))
1397 zone->wp_block = 0;
1398 else
1399 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1400
d4100351
CH
1401 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1402 set_bit(DMZ_OFFLINE, &zone->flags);
1403 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1404 set_bit(DMZ_READ_ONLY, &zone->flags);
1405 else {
3b1a94c8
DLM
1406 zmd->nr_useable_zones++;
1407 if (dmz_is_rnd(zone)) {
1408 zmd->nr_rnd_zones++;
bd5c4031
HR
1409 if (zmd->nr_devs == 1 && !zmd->sb[0].zone) {
1410 /* Primary super block zone */
735bd7e4 1411 zmd->sb[0].zone = zone;
3b1a94c8
DLM
1412 }
1413 }
5d2c74f3
HR
1414 if (zmd->nr_devs > 1 && num == 0) {
1415 /*
1416 * Tertiary superblock zones are always at the
1417 * start of the zoned devices, so mark them
1418 * as metadata zone.
1419 */
1420 set_bit(DMZ_META, &zone->flags);
bd5c4031 1421 }
3b1a94c8 1422 }
3b1a94c8
DLM
1423 return 0;
1424}
1425
a92fbc44 1426static int dmz_emulate_zones(struct dmz_metadata *zmd, struct dmz_dev *dev)
bd5c4031
HR
1427{
1428 int idx;
1429 sector_t zone_offset = 0;
1430
1431 for(idx = 0; idx < dev->nr_zones; idx++) {
a92fbc44 1432 struct dm_zone *zone;
bd5c4031 1433
8f22272a 1434 zone = dmz_insert(zmd, idx, dev);
a92fbc44
HR
1435 if (IS_ERR(zone))
1436 return PTR_ERR(zone);
34f5affd 1437 set_bit(DMZ_CACHE, &zone->flags);
bd5c4031 1438 zone->wp_block = 0;
34f5affd 1439 zmd->nr_cache_zones++;
bd5c4031
HR
1440 zmd->nr_useable_zones++;
1441 if (dev->capacity - zone_offset < zmd->zone_nr_sectors) {
1442 /* Disable runt zone */
1443 set_bit(DMZ_OFFLINE, &zone->flags);
1444 break;
1445 }
1446 zone_offset += zmd->zone_nr_sectors;
1447 }
a92fbc44 1448 return 0;
bd5c4031
HR
1449}
1450
3b1a94c8
DLM
1451/*
1452 * Free zones descriptors.
1453 */
1454static void dmz_drop_zones(struct dmz_metadata *zmd)
1455{
a92fbc44
HR
1456 int idx;
1457
1458 for(idx = 0; idx < zmd->nr_zones; idx++) {
1459 struct dm_zone *zone = xa_load(&zmd->zones, idx);
1460
1461 kfree(zone);
1462 xa_erase(&zmd->zones, idx);
1463 }
1464 xa_destroy(&zmd->zones);
3b1a94c8
DLM
1465}
1466
3b1a94c8
DLM
1467/*
1468 * Allocate and initialize zone descriptors using the zone
1469 * information from disk.
1470 */
1471static int dmz_init_zones(struct dmz_metadata *zmd)
1472{
bd5c4031
HR
1473 int i, ret;
1474 struct dmz_dev *zoned_dev = &zmd->dev[0];
3b1a94c8
DLM
1475
1476 /* Init */
bd5c4031 1477 zmd->zone_nr_sectors = zmd->dev[0].zone_nr_sectors;
36820560
HR
1478 zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
1479 zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
1480 zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
1481 zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
b3996295
DF
1482 zmd->zone_nr_bitmap_blocks =
1483 max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
36820560 1484 zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
b3996295 1485 DMZ_BLOCK_SIZE_BITS);
3b1a94c8
DLM
1486
1487 /* Allocate zone array */
bd5c4031 1488 zmd->nr_zones = 0;
18979819
HR
1489 for (i = 0; i < zmd->nr_devs; i++) {
1490 struct dmz_dev *dev = &zmd->dev[i];
1491
1492 dev->metadata = zmd;
1493 zmd->nr_zones += dev->nr_zones;
bd82fdab
HR
1494
1495 atomic_set(&dev->unmap_nr_rnd, 0);
1496 INIT_LIST_HEAD(&dev->unmap_rnd_list);
1497 INIT_LIST_HEAD(&dev->map_rnd_list);
1498
1499 atomic_set(&dev->unmap_nr_seq, 0);
1500 INIT_LIST_HEAD(&dev->unmap_seq_list);
1501 INIT_LIST_HEAD(&dev->map_seq_list);
18979819 1502 }
bd5c4031
HR
1503
1504 if (!zmd->nr_zones) {
1505 DMERR("(%s): No zones found", zmd->devname);
1506 return -ENXIO;
1507 }
a92fbc44 1508 xa_init(&zmd->zones);
3b1a94c8 1509
ae3c1f11
HR
1510 DMDEBUG("(%s): Using %zu B for zone information",
1511 zmd->devname, sizeof(struct dm_zone) * zmd->nr_zones);
3b1a94c8 1512
bd5c4031 1513 if (zmd->nr_devs > 1) {
a92fbc44
HR
1514 ret = dmz_emulate_zones(zmd, &zmd->dev[0]);
1515 if (ret < 0) {
1516 DMDEBUG("(%s): Failed to emulate zones, error %d",
1517 zmd->devname, ret);
1518 dmz_drop_zones(zmd);
1519 return ret;
1520 }
1521
bd5c4031
HR
1522 /*
1523 * Primary superblock zone is always at zone 0 when multiple
1524 * drives are present.
1525 */
a92fbc44 1526 zmd->sb[0].zone = dmz_get(zmd, 0);
bd5c4031 1527
4dba1288
HR
1528 for (i = 1; i < zmd->nr_devs; i++) {
1529 zoned_dev = &zmd->dev[i];
1530
1531 ret = blkdev_report_zones(zoned_dev->bdev, 0,
1532 BLK_ALL_ZONES,
1533 dmz_init_zone, zoned_dev);
1534 if (ret < 0) {
1535 DMDEBUG("(%s): Failed to report zones, error %d",
1536 zmd->devname, ret);
1537 dmz_drop_zones(zmd);
1538 return ret;
1539 }
1540 }
1541 return 0;
bd5c4031
HR
1542 }
1543
3b1a94c8 1544 /*
d4100351
CH
1545 * Get zone information and initialize zone descriptors. At the same
1546 * time, determine where the super block should be: first block of the
1547 * first randomly writable zone.
3b1a94c8 1548 */
bd5c4031 1549 ret = blkdev_report_zones(zoned_dev->bdev, 0, BLK_ALL_ZONES,
18979819 1550 dmz_init_zone, zoned_dev);
d4100351 1551 if (ret < 0) {
bd5c4031
HR
1552 DMDEBUG("(%s): Failed to report zones, error %d",
1553 zmd->devname, ret);
d4100351
CH
1554 dmz_drop_zones(zmd);
1555 return ret;
1556 }
3b1a94c8 1557
d4100351
CH
1558 return 0;
1559}
7aedf75f 1560
d4100351
CH
1561static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
1562 void *data)
1563{
1564 struct dm_zone *zone = data;
3b1a94c8 1565
d4100351
CH
1566 clear_bit(DMZ_OFFLINE, &zone->flags);
1567 clear_bit(DMZ_READ_ONLY, &zone->flags);
1568 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1569 set_bit(DMZ_OFFLINE, &zone->flags);
1570 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1571 set_bit(DMZ_READ_ONLY, &zone->flags);
3b1a94c8 1572
d4100351
CH
1573 if (dmz_is_seq(zone))
1574 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1575 else
1576 zone->wp_block = 0;
1577 return 0;
3b1a94c8
DLM
1578}
1579
1580/*
1581 * Update a zone information.
1582 */
1583static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1584{
8f22272a 1585 struct dmz_dev *dev = zone->dev;
bd976e52 1586 unsigned int noio_flag;
3b1a94c8
DLM
1587 int ret;
1588
bd5c4031
HR
1589 if (dev->flags & DMZ_BDEV_REGULAR)
1590 return 0;
1591
bd976e52
DLM
1592 /*
1593 * Get zone information from disk. Since blkdev_report_zones() uses
1594 * GFP_KERNEL by default for memory allocations, set the per-task
1595 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1596 * GFP_NOIO was specified.
1597 */
1598 noio_flag = memalloc_noio_save();
aa821c8d 1599 ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
d4100351 1600 dmz_update_zone_cb, zone);
bd976e52 1601 memalloc_noio_restore(noio_flag);
d4100351
CH
1602
1603 if (ret == 0)
7aedf75f 1604 ret = -EIO;
d4100351 1605 if (ret < 0) {
aa821c8d 1606 dmz_dev_err(dev, "Get zone %u report failed",
b7122873 1607 zone->id);
aa821c8d 1608 dmz_check_bdev(dev);
3b1a94c8
DLM
1609 return ret;
1610 }
1611
3b1a94c8
DLM
1612 return 0;
1613}
1614
1615/*
1616 * Check a zone write pointer position when the zone is marked
1617 * with the sequential write error flag.
1618 */
1619static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1620 struct dm_zone *zone)
1621{
8f22272a 1622 struct dmz_dev *dev = zone->dev;
3b1a94c8
DLM
1623 unsigned int wp = 0;
1624 int ret;
1625
1626 wp = zone->wp_block;
1627 ret = dmz_update_zone(zmd, zone);
1628 if (ret)
1629 return ret;
1630
aa821c8d 1631 dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
b7122873 1632 zone->id, zone->wp_block, wp);
3b1a94c8
DLM
1633
1634 if (zone->wp_block < wp) {
1635 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1636 wp - zone->wp_block);
1637 }
1638
1639 return 0;
1640}
1641
3b1a94c8
DLM
1642/*
1643 * Reset a zone write pointer.
1644 */
1645static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1646{
1647 int ret;
1648
1649 /*
1650 * Ignore offline zones, read only zones,
1651 * and conventional zones.
1652 */
1653 if (dmz_is_offline(zone) ||
1654 dmz_is_readonly(zone) ||
1655 dmz_is_rnd(zone))
1656 return 0;
1657
1658 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
8f22272a 1659 struct dmz_dev *dev = zone->dev;
3b1a94c8 1660
6c1b1da5
AJ
1661 ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
1662 dmz_start_sect(zmd, zone),
36820560 1663 zmd->zone_nr_sectors, GFP_NOIO);
3b1a94c8
DLM
1664 if (ret) {
1665 dmz_dev_err(dev, "Reset zone %u failed %d",
b7122873 1666 zone->id, ret);
3b1a94c8
DLM
1667 return ret;
1668 }
1669 }
1670
1671 /* Clear write error bit and rewind write pointer position */
1672 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1673 zone->wp_block = 0;
1674
1675 return 0;
1676}
1677
1678static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1679
1680/*
1681 * Initialize chunk mapping.
1682 */
1683static int dmz_load_mapping(struct dmz_metadata *zmd)
1684{
3b1a94c8
DLM
1685 struct dm_zone *dzone, *bzone;
1686 struct dmz_mblock *dmap_mblk = NULL;
1687 struct dmz_map *dmap;
1688 unsigned int i = 0, e = 0, chunk = 0;
1689 unsigned int dzone_id;
1690 unsigned int bzone_id;
1691
1692 /* Metadata block array for the chunk mapping table */
1693 zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1694 sizeof(struct dmz_mblk *), GFP_KERNEL);
1695 if (!zmd->map_mblk)
1696 return -ENOMEM;
1697
1698 /* Get chunk mapping table blocks and initialize zone mapping */
1699 while (chunk < zmd->nr_chunks) {
1700 if (!dmap_mblk) {
1701 /* Get mapping block */
1702 dmap_mblk = dmz_get_mblock(zmd, i + 1);
1703 if (IS_ERR(dmap_mblk))
1704 return PTR_ERR(dmap_mblk);
1705 zmd->map_mblk[i] = dmap_mblk;
1706 dmap = (struct dmz_map *) dmap_mblk->data;
1707 i++;
1708 e = 0;
1709 }
1710
1711 /* Check data zone */
1712 dzone_id = le32_to_cpu(dmap[e].dzone_id);
1713 if (dzone_id == DMZ_MAP_UNMAPPED)
1714 goto next;
1715
36820560 1716 if (dzone_id >= zmd->nr_zones) {
ca1a7045 1717 dmz_zmd_err(zmd, "Chunk %u mapping: invalid data zone ID %u",
3b1a94c8
DLM
1718 chunk, dzone_id);
1719 return -EIO;
1720 }
1721
1722 dzone = dmz_get(zmd, dzone_id);
a92fbc44
HR
1723 if (!dzone) {
1724 dmz_zmd_err(zmd, "Chunk %u mapping: data zone %u not present",
1725 chunk, dzone_id);
1726 return -EIO;
1727 }
3b1a94c8
DLM
1728 set_bit(DMZ_DATA, &dzone->flags);
1729 dzone->chunk = chunk;
1730 dmz_get_zone_weight(zmd, dzone);
1731
34f5affd
HR
1732 if (dmz_is_cache(dzone))
1733 list_add_tail(&dzone->link, &zmd->map_cache_list);
1734 else if (dmz_is_rnd(dzone))
bd82fdab 1735 list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
3b1a94c8 1736 else
bd82fdab 1737 list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
3b1a94c8
DLM
1738
1739 /* Check buffer zone */
1740 bzone_id = le32_to_cpu(dmap[e].bzone_id);
1741 if (bzone_id == DMZ_MAP_UNMAPPED)
1742 goto next;
1743
36820560 1744 if (bzone_id >= zmd->nr_zones) {
ca1a7045 1745 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone ID %u",
3b1a94c8
DLM
1746 chunk, bzone_id);
1747 return -EIO;
1748 }
1749
1750 bzone = dmz_get(zmd, bzone_id);
a92fbc44
HR
1751 if (!bzone) {
1752 dmz_zmd_err(zmd, "Chunk %u mapping: buffer zone %u not present",
1753 chunk, bzone_id);
1754 return -EIO;
1755 }
34f5affd 1756 if (!dmz_is_rnd(bzone) && !dmz_is_cache(bzone)) {
ca1a7045 1757 dmz_zmd_err(zmd, "Chunk %u mapping: invalid buffer zone %u",
3b1a94c8
DLM
1758 chunk, bzone_id);
1759 return -EIO;
1760 }
1761
1762 set_bit(DMZ_DATA, &bzone->flags);
1763 set_bit(DMZ_BUF, &bzone->flags);
1764 bzone->chunk = chunk;
1765 bzone->bzone = dzone;
1766 dzone->bzone = bzone;
1767 dmz_get_zone_weight(zmd, bzone);
34f5affd
HR
1768 if (dmz_is_cache(bzone))
1769 list_add_tail(&bzone->link, &zmd->map_cache_list);
1770 else
bd82fdab 1771 list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
3b1a94c8
DLM
1772next:
1773 chunk++;
1774 e++;
1775 if (e >= DMZ_MAP_ENTRIES)
1776 dmap_mblk = NULL;
1777 }
1778
1779 /*
1780 * At this point, only meta zones and mapped data zones were
1781 * fully initialized. All remaining zones are unmapped data
1782 * zones. Finish initializing those here.
1783 */
36820560 1784 for (i = 0; i < zmd->nr_zones; i++) {
3b1a94c8 1785 dzone = dmz_get(zmd, i);
a92fbc44
HR
1786 if (!dzone)
1787 continue;
3b1a94c8
DLM
1788 if (dmz_is_meta(dzone))
1789 continue;
34f5affd
HR
1790 if (dmz_is_offline(dzone))
1791 continue;
3b1a94c8 1792
34f5affd
HR
1793 if (dmz_is_cache(dzone))
1794 zmd->nr_cache++;
1795 else if (dmz_is_rnd(dzone))
bd82fdab 1796 dzone->dev->nr_rnd++;
3b1a94c8 1797 else
bd82fdab 1798 dzone->dev->nr_seq++;
3b1a94c8
DLM
1799
1800 if (dmz_is_data(dzone)) {
1801 /* Already initialized */
1802 continue;
1803 }
1804
1805 /* Unmapped data zone */
1806 set_bit(DMZ_DATA, &dzone->flags);
1807 dzone->chunk = DMZ_MAP_UNMAPPED;
34f5affd
HR
1808 if (dmz_is_cache(dzone)) {
1809 list_add_tail(&dzone->link, &zmd->unmap_cache_list);
1810 atomic_inc(&zmd->unmap_nr_cache);
1811 } else if (dmz_is_rnd(dzone)) {
bd82fdab
HR
1812 list_add_tail(&dzone->link,
1813 &dzone->dev->unmap_rnd_list);
1814 atomic_inc(&dzone->dev->unmap_nr_rnd);
3b1a94c8
DLM
1815 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1816 list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
aec67b4f 1817 set_bit(DMZ_RESERVED, &dzone->flags);
3b1a94c8 1818 atomic_inc(&zmd->nr_reserved_seq_zones);
bd82fdab 1819 dzone->dev->nr_seq--;
3b1a94c8 1820 } else {
bd82fdab
HR
1821 list_add_tail(&dzone->link,
1822 &dzone->dev->unmap_seq_list);
1823 atomic_inc(&dzone->dev->unmap_nr_seq);
3b1a94c8
DLM
1824 }
1825 }
1826
1827 return 0;
1828}
1829
1830/*
1831 * Set a data chunk mapping.
1832 */
1833static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1834 unsigned int dzone_id, unsigned int bzone_id)
1835{
1836 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1837 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1838 int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1839
1840 dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1841 dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1842 dmz_dirty_mblock(zmd, dmap_mblk);
1843}
1844
1845/*
1846 * The list of mapped zones is maintained in LRU order.
1847 * This rotates a zone at the end of its map list.
1848 */
1849static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1850{
1851 if (list_empty(&zone->link))
1852 return;
1853
1854 list_del_init(&zone->link);
1855 if (dmz_is_seq(zone)) {
1856 /* LRU rotate sequential zone */
bd82fdab 1857 list_add_tail(&zone->link, &zone->dev->map_seq_list);
34f5affd
HR
1858 } else if (dmz_is_cache(zone)) {
1859 /* LRU rotate cache zone */
1860 list_add_tail(&zone->link, &zmd->map_cache_list);
3b1a94c8
DLM
1861 } else {
1862 /* LRU rotate random zone */
bd82fdab 1863 list_add_tail(&zone->link, &zone->dev->map_rnd_list);
3b1a94c8
DLM
1864 }
1865}
1866
1867/*
1868 * The list of mapped random zones is maintained
1869 * in LRU order. This rotates a zone at the end of the list.
1870 */
1871static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1872{
1873 __dmz_lru_zone(zmd, zone);
1874 if (zone->bzone)
1875 __dmz_lru_zone(zmd, zone->bzone);
1876}
1877
1878/*
1879 * Wait for any zone to be freed.
1880 */
1881static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1882{
1883 DEFINE_WAIT(wait);
1884
1885 prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1886 dmz_unlock_map(zmd);
1887 dmz_unlock_metadata(zmd);
1888
1889 io_schedule_timeout(HZ);
1890
1891 dmz_lock_metadata(zmd);
1892 dmz_lock_map(zmd);
1893 finish_wait(&zmd->free_wq, &wait);
1894}
1895
1896/*
1897 * Lock a zone for reclaim (set the zone RECLAIM bit).
1898 * Returns false if the zone cannot be locked or if it is already locked
1899 * and 1 otherwise.
1900 */
1901int dmz_lock_zone_reclaim(struct dm_zone *zone)
1902{
1903 /* Active zones cannot be reclaimed */
1904 if (dmz_is_active(zone))
1905 return 0;
1906
1907 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1908}
1909
1910/*
1911 * Clear a zone reclaim flag.
1912 */
1913void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1914{
1915 WARN_ON(dmz_is_active(zone));
1916 WARN_ON(!dmz_in_reclaim(zone));
1917
1918 clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1919 smp_mb__after_atomic();
1920 wake_up_bit(&zone->flags, DMZ_RECLAIM);
1921}
1922
1923/*
1924 * Wait for a zone reclaim to complete.
1925 */
1926static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1927{
1928 dmz_unlock_map(zmd);
1929 dmz_unlock_metadata(zmd);
a16b7dee 1930 set_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
3b1a94c8 1931 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
a16b7dee 1932 clear_bit(DMZ_RECLAIM_TERMINATE, &zone->flags);
3b1a94c8
DLM
1933 dmz_lock_metadata(zmd);
1934 dmz_lock_map(zmd);
1935}
1936
1937/*
34f5affd 1938 * Select a cache or random write zone for reclaim.
3b1a94c8 1939 */
90a9b869 1940static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
69875d44 1941 unsigned int idx, bool idle)
3b1a94c8
DLM
1942{
1943 struct dm_zone *dzone = NULL;
1944 struct dm_zone *zone;
bd82fdab 1945 struct list_head *zone_list;
3b1a94c8 1946
34f5affd 1947 /* If we have cache zones select from the cache zone list */
90a9b869 1948 if (zmd->nr_cache) {
34f5affd 1949 zone_list = &zmd->map_cache_list;
90a9b869 1950 /* Try to relaim random zones, too, when idle */
69875d44
HR
1951 if (idle && list_empty(zone_list))
1952 zone_list = &zmd->dev[idx].map_rnd_list;
1953 } else
1954 zone_list = &zmd->dev[idx].map_rnd_list;
3b1a94c8 1955
34f5affd 1956 list_for_each_entry(zone, zone_list, link) {
69875d44 1957 if (dmz_is_buf(zone)) {
3b1a94c8 1958 dzone = zone->bzone;
69875d44
HR
1959 if (dzone->dev->dev_idx != idx)
1960 continue;
1961 } else
3b1a94c8
DLM
1962 dzone = zone;
1963 if (dmz_lock_zone_reclaim(dzone))
1964 return dzone;
1965 }
1966
489dc0f0 1967 return NULL;
3b1a94c8
DLM
1968}
1969
1970/*
1971 * Select a buffered sequential zone for reclaim.
1972 */
69875d44
HR
1973static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
1974 unsigned int idx)
3b1a94c8
DLM
1975{
1976 struct dm_zone *zone;
bd82fdab 1977
69875d44
HR
1978 list_for_each_entry(zone, &zmd->dev[idx].map_seq_list, link) {
1979 if (!zone->bzone)
1980 continue;
1981 if (dmz_lock_zone_reclaim(zone))
1982 return zone;
3b1a94c8
DLM
1983 }
1984
489dc0f0 1985 return NULL;
3b1a94c8
DLM
1986}
1987
1988/*
1989 * Select a zone for reclaim.
1990 */
69875d44
HR
1991struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
1992 unsigned int dev_idx, bool idle)
3b1a94c8
DLM
1993{
1994 struct dm_zone *zone;
1995
1996 /*
1997 * Search for a zone candidate to reclaim: 2 cases are possible.
1998 * (1) There is no free sequential zones. Then a random data zone
1999 * cannot be reclaimed. So choose a sequential zone to reclaim so
2000 * that afterward a random zone can be reclaimed.
2001 * (2) At least one free sequential zone is available, then choose
2002 * the oldest random zone (data or buffer) that can be locked.
2003 */
2004 dmz_lock_map(zmd);
2005 if (list_empty(&zmd->reserved_seq_zones_list))
69875d44 2006 zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
3b1a94c8 2007 else
69875d44 2008 zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
3b1a94c8
DLM
2009 dmz_unlock_map(zmd);
2010
2011 return zone;
2012}
2013
3b1a94c8
DLM
2014/*
2015 * Get the zone mapping a chunk, if the chunk is mapped already.
2016 * If no mapping exist and the operation is WRITE, a zone is
2017 * allocated and used to map the chunk.
2018 * The zone returned will be set to the active state.
2019 */
2020struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
2021{
2022 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
2023 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
2024 int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
2025 unsigned int dzone_id;
2026 struct dm_zone *dzone = NULL;
2027 int ret = 0;
34f5affd 2028 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
3b1a94c8
DLM
2029
2030 dmz_lock_map(zmd);
2031again:
2032 /* Get the chunk mapping */
2033 dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
2034 if (dzone_id == DMZ_MAP_UNMAPPED) {
2035 /*
2036 * Read or discard in unmapped chunks are fine. But for
2037 * writes, we need a mapping, so get one.
2038 */
2039 if (op != REQ_OP_WRITE)
2040 goto out;
2041
ad1bd578 2042 /* Allocate a random zone */
22c1ef66 2043 dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
3b1a94c8 2044 if (!dzone) {
d0e21ce4 2045 if (dmz_dev_is_dying(zmd)) {
75d66ffb
DF
2046 dzone = ERR_PTR(-EIO);
2047 goto out;
2048 }
3b1a94c8
DLM
2049 dmz_wait_for_free_zones(zmd);
2050 goto again;
2051 }
2052
2053 dmz_map_zone(zmd, dzone, chunk);
2054
2055 } else {
2056 /* The chunk is already mapped: get the mapping zone */
2057 dzone = dmz_get(zmd, dzone_id);
a92fbc44
HR
2058 if (!dzone) {
2059 dzone = ERR_PTR(-EIO);
2060 goto out;
2061 }
3b1a94c8
DLM
2062 if (dzone->chunk != chunk) {
2063 dzone = ERR_PTR(-EIO);
2064 goto out;
2065 }
2066
2067 /* Repair write pointer if the sequential dzone has error */
2068 if (dmz_seq_write_err(dzone)) {
2069 ret = dmz_handle_seq_write_err(zmd, dzone);
2070 if (ret) {
2071 dzone = ERR_PTR(-EIO);
2072 goto out;
2073 }
2074 clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
2075 }
2076 }
2077
2078 /*
2079 * If the zone is being reclaimed, the chunk mapping may change
2080 * to a different zone. So wait for reclaim and retry. Otherwise,
2081 * activate the zone (this will prevent reclaim from touching it).
2082 */
2083 if (dmz_in_reclaim(dzone)) {
2084 dmz_wait_for_reclaim(zmd, dzone);
2085 goto again;
2086 }
2087 dmz_activate_zone(dzone);
2088 dmz_lru_zone(zmd, dzone);
2089out:
2090 dmz_unlock_map(zmd);
2091
2092 return dzone;
2093}
2094
2095/*
2096 * Write and discard change the block validity of data zones and their buffer
2097 * zones. Check here that valid blocks are still present. If all blocks are
2098 * invalid, the zones can be unmapped on the fly without waiting for reclaim
2099 * to do it.
2100 */
2101void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
2102{
2103 struct dm_zone *bzone;
2104
2105 dmz_lock_map(zmd);
2106
2107 bzone = dzone->bzone;
2108 if (bzone) {
2109 if (dmz_weight(bzone))
2110 dmz_lru_zone(zmd, bzone);
2111 else {
2112 /* Empty buffer zone: reclaim it */
2113 dmz_unmap_zone(zmd, bzone);
2114 dmz_free_zone(zmd, bzone);
2115 bzone = NULL;
2116 }
2117 }
2118
2119 /* Deactivate the data zone */
2120 dmz_deactivate_zone(dzone);
2121 if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
2122 dmz_lru_zone(zmd, dzone);
2123 else {
2124 /* Unbuffered inactive empty data zone: reclaim it */
2125 dmz_unmap_zone(zmd, dzone);
2126 dmz_free_zone(zmd, dzone);
2127 }
2128
2129 dmz_unlock_map(zmd);
2130}
2131
2132/*
2133 * Allocate and map a random zone to buffer a chunk
2134 * already mapped to a sequential zone.
2135 */
2136struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
2137 struct dm_zone *dzone)
2138{
2139 struct dm_zone *bzone;
34f5affd 2140 int alloc_flags = zmd->nr_cache ? DMZ_ALLOC_CACHE : DMZ_ALLOC_RND;
3b1a94c8
DLM
2141
2142 dmz_lock_map(zmd);
2143again:
2144 bzone = dzone->bzone;
2145 if (bzone)
2146 goto out;
2147
ad1bd578 2148 /* Allocate a random zone */
22c1ef66 2149 bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
3b1a94c8 2150 if (!bzone) {
d0e21ce4 2151 if (dmz_dev_is_dying(zmd)) {
75d66ffb
DF
2152 bzone = ERR_PTR(-EIO);
2153 goto out;
2154 }
3b1a94c8
DLM
2155 dmz_wait_for_free_zones(zmd);
2156 goto again;
2157 }
2158
2159 /* Update the chunk mapping */
b7122873 2160 dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
3b1a94c8
DLM
2161
2162 set_bit(DMZ_BUF, &bzone->flags);
2163 bzone->chunk = dzone->chunk;
2164 bzone->bzone = dzone;
2165 dzone->bzone = bzone;
34f5affd
HR
2166 if (dmz_is_cache(bzone))
2167 list_add_tail(&bzone->link, &zmd->map_cache_list);
2168 else
bd82fdab 2169 list_add_tail(&bzone->link, &bzone->dev->map_rnd_list);
3b1a94c8
DLM
2170out:
2171 dmz_unlock_map(zmd);
2172
2173 return bzone;
2174}
2175
2176/*
2177 * Get an unmapped (free) zone.
2178 * This must be called with the mapping lock held.
2179 */
22c1ef66
HR
2180struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
2181 unsigned long flags)
3b1a94c8
DLM
2182{
2183 struct list_head *list;
2184 struct dm_zone *zone;
22c1ef66 2185 int i = 0;
3b1a94c8 2186
bd82fdab 2187again:
34f5affd
HR
2188 if (flags & DMZ_ALLOC_CACHE)
2189 list = &zmd->unmap_cache_list;
2190 else if (flags & DMZ_ALLOC_RND)
bd82fdab 2191 list = &zmd->dev[dev_idx].unmap_rnd_list;
3b1a94c8 2192 else
bd82fdab 2193 list = &zmd->dev[dev_idx].unmap_seq_list;
34f5affd 2194
3b1a94c8
DLM
2195 if (list_empty(list)) {
2196 /*
34f5affd 2197 * No free zone: return NULL if this is for not reclaim.
3b1a94c8 2198 */
34f5affd 2199 if (!(flags & DMZ_ALLOC_RECLAIM))
3b1a94c8 2200 return NULL;
22c1ef66
HR
2201 /*
2202 * Try to allocate from other devices
2203 */
2204 if (i < zmd->nr_devs) {
2205 dev_idx = (dev_idx + 1) % zmd->nr_devs;
2206 i++;
bd82fdab
HR
2207 goto again;
2208 }
2209
34f5affd
HR
2210 /*
2211 * Fallback to the reserved sequential zones
2212 */
2213 zone = list_first_entry_or_null(&zmd->reserved_seq_zones_list,
2214 struct dm_zone, link);
2215 if (zone) {
2216 list_del_init(&zone->link);
2217 atomic_dec(&zmd->nr_reserved_seq_zones);
2218 }
3b1a94c8
DLM
2219 return zone;
2220 }
2221
2222 zone = list_first_entry(list, struct dm_zone, link);
2223 list_del_init(&zone->link);
2224
34f5affd
HR
2225 if (dmz_is_cache(zone))
2226 atomic_dec(&zmd->unmap_nr_cache);
2227 else if (dmz_is_rnd(zone))
bd82fdab 2228 atomic_dec(&zone->dev->unmap_nr_rnd);
3b1a94c8 2229 else
bd82fdab 2230 atomic_dec(&zone->dev->unmap_nr_seq);
3b1a94c8
DLM
2231
2232 if (dmz_is_offline(zone)) {
ca1a7045 2233 dmz_zmd_warn(zmd, "Zone %u is offline", zone->id);
3b1a94c8
DLM
2234 zone = NULL;
2235 goto again;
2236 }
dc076c83 2237 if (dmz_is_meta(zone)) {
8f22272a 2238 dmz_zmd_warn(zmd, "Zone %u has metadata", zone->id);
dc076c83
HR
2239 zone = NULL;
2240 goto again;
2241 }
3b1a94c8
DLM
2242 return zone;
2243}
2244
2245/*
2246 * Free a zone.
2247 * This must be called with the mapping lock held.
2248 */
2249void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2250{
2251 /* If this is a sequential zone, reset it */
2252 if (dmz_is_seq(zone))
2253 dmz_reset_zone(zmd, zone);
2254
2255 /* Return the zone to its type unmap list */
34f5affd
HR
2256 if (dmz_is_cache(zone)) {
2257 list_add_tail(&zone->link, &zmd->unmap_cache_list);
2258 atomic_inc(&zmd->unmap_nr_cache);
2259 } else if (dmz_is_rnd(zone)) {
bd82fdab
HR
2260 list_add_tail(&zone->link, &zone->dev->unmap_rnd_list);
2261 atomic_inc(&zone->dev->unmap_nr_rnd);
aec67b4f 2262 } else if (dmz_is_reserved(zone)) {
3b1a94c8
DLM
2263 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
2264 atomic_inc(&zmd->nr_reserved_seq_zones);
2265 } else {
bd82fdab
HR
2266 list_add_tail(&zone->link, &zone->dev->unmap_seq_list);
2267 atomic_inc(&zone->dev->unmap_nr_seq);
3b1a94c8
DLM
2268 }
2269
2270 wake_up_all(&zmd->free_wq);
2271}
2272
2273/*
2274 * Map a chunk to a zone.
2275 * This must be called with the mapping lock held.
2276 */
2277void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
2278 unsigned int chunk)
2279{
2280 /* Set the chunk mapping */
b7122873 2281 dmz_set_chunk_mapping(zmd, chunk, dzone->id,
3b1a94c8
DLM
2282 DMZ_MAP_UNMAPPED);
2283 dzone->chunk = chunk;
34f5affd
HR
2284 if (dmz_is_cache(dzone))
2285 list_add_tail(&dzone->link, &zmd->map_cache_list);
2286 else if (dmz_is_rnd(dzone))
bd82fdab 2287 list_add_tail(&dzone->link, &dzone->dev->map_rnd_list);
3b1a94c8 2288 else
bd82fdab 2289 list_add_tail(&dzone->link, &dzone->dev->map_seq_list);
3b1a94c8
DLM
2290}
2291
2292/*
2293 * Unmap a zone.
2294 * This must be called with the mapping lock held.
2295 */
2296void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
2297{
2298 unsigned int chunk = zone->chunk;
2299 unsigned int dzone_id;
2300
2301 if (chunk == DMZ_MAP_UNMAPPED) {
2302 /* Already unmapped */
2303 return;
2304 }
2305
2306 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
2307 /*
2308 * Unmapping the chunk buffer zone: clear only
2309 * the chunk buffer mapping
2310 */
b7122873 2311 dzone_id = zone->bzone->id;
3b1a94c8
DLM
2312 zone->bzone->bzone = NULL;
2313 zone->bzone = NULL;
2314
2315 } else {
2316 /*
2317 * Unmapping the chunk data zone: the zone must
2318 * not be buffered.
2319 */
2320 if (WARN_ON(zone->bzone)) {
2321 zone->bzone->bzone = NULL;
2322 zone->bzone = NULL;
2323 }
2324 dzone_id = DMZ_MAP_UNMAPPED;
2325 }
2326
2327 dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
2328
2329 zone->chunk = DMZ_MAP_UNMAPPED;
2330 list_del_init(&zone->link);
2331}
2332
2333/*
2334 * Set @nr_bits bits in @bitmap starting from @bit.
2335 * Return the number of bits changed from 0 to 1.
2336 */
2337static unsigned int dmz_set_bits(unsigned long *bitmap,
2338 unsigned int bit, unsigned int nr_bits)
2339{
2340 unsigned long *addr;
2341 unsigned int end = bit + nr_bits;
2342 unsigned int n = 0;
2343
2344 while (bit < end) {
2345 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2346 ((end - bit) >= BITS_PER_LONG)) {
2347 /* Try to set the whole word at once */
2348 addr = bitmap + BIT_WORD(bit);
2349 if (*addr == 0) {
2350 *addr = ULONG_MAX;
2351 n += BITS_PER_LONG;
2352 bit += BITS_PER_LONG;
2353 continue;
2354 }
2355 }
2356
2357 if (!test_and_set_bit(bit, bitmap))
2358 n++;
2359 bit++;
2360 }
2361
2362 return n;
2363}
2364
2365/*
2366 * Get the bitmap block storing the bit for chunk_block in zone.
2367 */
2368static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
2369 struct dm_zone *zone,
2370 sector_t chunk_block)
2371{
2372 sector_t bitmap_block = 1 + zmd->nr_map_blocks +
b7122873 2373 (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
3b1a94c8
DLM
2374 (chunk_block >> DMZ_BLOCK_SHIFT_BITS);
2375
2376 return dmz_get_mblock(zmd, bitmap_block);
2377}
2378
2379/*
2380 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2381 */
2382int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2383 struct dm_zone *to_zone)
2384{
2385 struct dmz_mblock *from_mblk, *to_mblk;
2386 sector_t chunk_block = 0;
2387
2388 /* Get the zones bitmap blocks */
36820560 2389 while (chunk_block < zmd->zone_nr_blocks) {
3b1a94c8
DLM
2390 from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
2391 if (IS_ERR(from_mblk))
2392 return PTR_ERR(from_mblk);
2393 to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
2394 if (IS_ERR(to_mblk)) {
2395 dmz_release_mblock(zmd, from_mblk);
2396 return PTR_ERR(to_mblk);
2397 }
2398
2399 memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2400 dmz_dirty_mblock(zmd, to_mblk);
2401
2402 dmz_release_mblock(zmd, to_mblk);
2403 dmz_release_mblock(zmd, from_mblk);
2404
b3996295 2405 chunk_block += zmd->zone_bits_per_mblk;
3b1a94c8
DLM
2406 }
2407
2408 to_zone->weight = from_zone->weight;
2409
2410 return 0;
2411}
2412
2413/*
2414 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2415 * starting from chunk_block.
2416 */
2417int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2418 struct dm_zone *to_zone, sector_t chunk_block)
2419{
2420 unsigned int nr_blocks;
2421 int ret;
2422
2423 /* Get the zones bitmap blocks */
36820560 2424 while (chunk_block < zmd->zone_nr_blocks) {
3b1a94c8
DLM
2425 /* Get a valid region from the source zone */
2426 ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2427 if (ret <= 0)
2428 return ret;
2429
2430 nr_blocks = ret;
2431 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2432 if (ret)
2433 return ret;
2434
2435 chunk_block += nr_blocks;
2436 }
2437
2438 return 0;
2439}
2440
2441/*
2442 * Validate all the blocks in the range [block..block+nr_blocks-1].
2443 */
2444int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2445 sector_t chunk_block, unsigned int nr_blocks)
2446{
2447 unsigned int count, bit, nr_bits;
36820560 2448 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
3b1a94c8
DLM
2449 struct dmz_mblock *mblk;
2450 unsigned int n = 0;
2451
ca1a7045 2452 dmz_zmd_debug(zmd, "=> VALIDATE zone %u, block %llu, %u blocks",
b7122873 2453 zone->id, (unsigned long long)chunk_block,
3b1a94c8
DLM
2454 nr_blocks);
2455
2456 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2457
2458 while (nr_blocks) {
2459 /* Get bitmap block */
2460 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2461 if (IS_ERR(mblk))
2462 return PTR_ERR(mblk);
2463
2464 /* Set bits */
2465 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2466 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
3b1a94c8
DLM
2467
2468 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2469 if (count) {
2470 dmz_dirty_mblock(zmd, mblk);
2471 n += count;
2472 }
2473 dmz_release_mblock(zmd, mblk);
2474
2475 nr_blocks -= nr_bits;
2476 chunk_block += nr_bits;
2477 }
2478
2479 if (likely(zone->weight + n <= zone_nr_blocks))
2480 zone->weight += n;
2481 else {
ca1a7045 2482 dmz_zmd_warn(zmd, "Zone %u: weight %u should be <= %u",
b7122873 2483 zone->id, zone->weight,
3b1a94c8
DLM
2484 zone_nr_blocks - n);
2485 zone->weight = zone_nr_blocks;
2486 }
2487
2488 return 0;
2489}
2490
2491/*
2492 * Clear nr_bits bits in bitmap starting from bit.
2493 * Return the number of bits cleared.
2494 */
2495static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2496{
2497 unsigned long *addr;
2498 int end = bit + nr_bits;
2499 int n = 0;
2500
2501 while (bit < end) {
2502 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2503 ((end - bit) >= BITS_PER_LONG)) {
2504 /* Try to clear whole word at once */
2505 addr = bitmap + BIT_WORD(bit);
2506 if (*addr == ULONG_MAX) {
2507 *addr = 0;
2508 n += BITS_PER_LONG;
2509 bit += BITS_PER_LONG;
2510 continue;
2511 }
2512 }
2513
2514 if (test_and_clear_bit(bit, bitmap))
2515 n++;
2516 bit++;
2517 }
2518
2519 return n;
2520}
2521
2522/*
2523 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2524 */
2525int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2526 sector_t chunk_block, unsigned int nr_blocks)
2527{
2528 unsigned int count, bit, nr_bits;
2529 struct dmz_mblock *mblk;
2530 unsigned int n = 0;
2531
ca1a7045 2532 dmz_zmd_debug(zmd, "=> INVALIDATE zone %u, block %llu, %u blocks",
b7122873 2533 zone->id, (u64)chunk_block, nr_blocks);
3b1a94c8 2534
36820560 2535 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
3b1a94c8
DLM
2536
2537 while (nr_blocks) {
2538 /* Get bitmap block */
2539 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2540 if (IS_ERR(mblk))
2541 return PTR_ERR(mblk);
2542
2543 /* Clear bits */
2544 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2545 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
3b1a94c8
DLM
2546
2547 count = dmz_clear_bits((unsigned long *)mblk->data,
2548 bit, nr_bits);
2549 if (count) {
2550 dmz_dirty_mblock(zmd, mblk);
2551 n += count;
2552 }
2553 dmz_release_mblock(zmd, mblk);
2554
2555 nr_blocks -= nr_bits;
2556 chunk_block += nr_bits;
2557 }
2558
2559 if (zone->weight >= n)
2560 zone->weight -= n;
2561 else {
ca1a7045 2562 dmz_zmd_warn(zmd, "Zone %u: weight %u should be >= %u",
b7122873 2563 zone->id, zone->weight, n);
3b1a94c8
DLM
2564 zone->weight = 0;
2565 }
2566
2567 return 0;
2568}
2569
2570/*
2571 * Get a block bit value.
2572 */
2573static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2574 sector_t chunk_block)
2575{
2576 struct dmz_mblock *mblk;
2577 int ret;
2578
36820560 2579 WARN_ON(chunk_block >= zmd->zone_nr_blocks);
3b1a94c8
DLM
2580
2581 /* Get bitmap block */
2582 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2583 if (IS_ERR(mblk))
2584 return PTR_ERR(mblk);
2585
2586 /* Get offset */
2587 ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2588 (unsigned long *) mblk->data) != 0;
2589
2590 dmz_release_mblock(zmd, mblk);
2591
2592 return ret;
2593}
2594
2595/*
2596 * Return the number of blocks from chunk_block to the first block with a bit
2597 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2598 */
2599static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2600 sector_t chunk_block, unsigned int nr_blocks,
2601 int set)
2602{
2603 struct dmz_mblock *mblk;
2604 unsigned int bit, set_bit, nr_bits;
b3996295 2605 unsigned int zone_bits = zmd->zone_bits_per_mblk;
3b1a94c8
DLM
2606 unsigned long *bitmap;
2607 int n = 0;
2608
36820560 2609 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
3b1a94c8
DLM
2610
2611 while (nr_blocks) {
2612 /* Get bitmap block */
2613 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2614 if (IS_ERR(mblk))
2615 return PTR_ERR(mblk);
2616
2617 /* Get offset */
2618 bitmap = (unsigned long *) mblk->data;
2619 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2620 nr_bits = min(nr_blocks, zone_bits - bit);
3b1a94c8 2621 if (set)
b3996295 2622 set_bit = find_next_bit(bitmap, zone_bits, bit);
3b1a94c8 2623 else
b3996295 2624 set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
3b1a94c8
DLM
2625 dmz_release_mblock(zmd, mblk);
2626
2627 n += set_bit - bit;
b3996295 2628 if (set_bit < zone_bits)
3b1a94c8
DLM
2629 break;
2630
2631 nr_blocks -= nr_bits;
2632 chunk_block += nr_bits;
2633 }
2634
2635 return n;
2636}
2637
2638/*
2639 * Test if chunk_block is valid. If it is, the number of consecutive
2640 * valid blocks from chunk_block will be returned.
2641 */
2642int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2643 sector_t chunk_block)
2644{
2645 int valid;
2646
2647 valid = dmz_test_block(zmd, zone, chunk_block);
2648 if (valid <= 0)
2649 return valid;
2650
2651 /* The block is valid: get the number of valid blocks from block */
2652 return dmz_to_next_set_block(zmd, zone, chunk_block,
36820560 2653 zmd->zone_nr_blocks - chunk_block, 0);
3b1a94c8
DLM
2654}
2655
2656/*
2657 * Find the first valid block from @chunk_block in @zone.
2658 * If such a block is found, its number is returned using
2659 * @chunk_block and the total number of valid blocks from @chunk_block
2660 * is returned.
2661 */
2662int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2663 sector_t *chunk_block)
2664{
2665 sector_t start_block = *chunk_block;
2666 int ret;
2667
2668 ret = dmz_to_next_set_block(zmd, zone, start_block,
36820560 2669 zmd->zone_nr_blocks - start_block, 1);
3b1a94c8
DLM
2670 if (ret < 0)
2671 return ret;
2672
2673 start_block += ret;
2674 *chunk_block = start_block;
2675
2676 return dmz_to_next_set_block(zmd, zone, start_block,
36820560 2677 zmd->zone_nr_blocks - start_block, 0);
3b1a94c8
DLM
2678}
2679
2680/*
2681 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2682 */
2683static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2684{
2685 unsigned long *addr;
2686 int end = bit + nr_bits;
2687 int n = 0;
2688
2689 while (bit < end) {
2690 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2691 ((end - bit) >= BITS_PER_LONG)) {
2692 addr = (unsigned long *)bitmap + BIT_WORD(bit);
2693 if (*addr == ULONG_MAX) {
2694 n += BITS_PER_LONG;
2695 bit += BITS_PER_LONG;
2696 continue;
2697 }
2698 }
2699
2700 if (test_bit(bit, bitmap))
2701 n++;
2702 bit++;
2703 }
2704
2705 return n;
2706}
2707
2708/*
2709 * Get a zone weight.
2710 */
2711static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2712{
2713 struct dmz_mblock *mblk;
2714 sector_t chunk_block = 0;
2715 unsigned int bit, nr_bits;
36820560 2716 unsigned int nr_blocks = zmd->zone_nr_blocks;
3b1a94c8
DLM
2717 void *bitmap;
2718 int n = 0;
2719
2720 while (nr_blocks) {
2721 /* Get bitmap block */
2722 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2723 if (IS_ERR(mblk)) {
2724 n = 0;
2725 break;
2726 }
2727
2728 /* Count bits in this block */
2729 bitmap = mblk->data;
2730 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2731 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
3b1a94c8
DLM
2732 n += dmz_count_bits(bitmap, bit, nr_bits);
2733
2734 dmz_release_mblock(zmd, mblk);
2735
2736 nr_blocks -= nr_bits;
2737 chunk_block += nr_bits;
2738 }
2739
2740 zone->weight = n;
2741}
2742
2743/*
2744 * Cleanup the zoned metadata resources.
2745 */
2746static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2747{
2748 struct rb_root *root;
2749 struct dmz_mblock *mblk, *next;
2750 int i;
2751
2752 /* Release zone mapping resources */
2753 if (zmd->map_mblk) {
2754 for (i = 0; i < zmd->nr_map_blocks; i++)
2755 dmz_release_mblock(zmd, zmd->map_mblk[i]);
2756 kfree(zmd->map_mblk);
2757 zmd->map_mblk = NULL;
2758 }
2759
2760 /* Release super blocks */
2761 for (i = 0; i < 2; i++) {
2762 if (zmd->sb[i].mblk) {
2763 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2764 zmd->sb[i].mblk = NULL;
2765 }
2766 }
2767
2768 /* Free cached blocks */
2769 while (!list_empty(&zmd->mblk_dirty_list)) {
2770 mblk = list_first_entry(&zmd->mblk_dirty_list,
2771 struct dmz_mblock, link);
ca1a7045 2772 dmz_zmd_warn(zmd, "mblock %llu still in dirty list (ref %u)",
33c2865f 2773 (u64)mblk->no, mblk->ref);
3b1a94c8
DLM
2774 list_del_init(&mblk->link);
2775 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2776 dmz_free_mblock(zmd, mblk);
2777 }
2778
2779 while (!list_empty(&zmd->mblk_lru_list)) {
2780 mblk = list_first_entry(&zmd->mblk_lru_list,
2781 struct dmz_mblock, link);
2782 list_del_init(&mblk->link);
2783 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2784 dmz_free_mblock(zmd, mblk);
2785 }
2786
2787 /* Sanity checks: the mblock rbtree should now be empty */
2788 root = &zmd->mblk_rbtree;
2789 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
ca1a7045 2790 dmz_zmd_warn(zmd, "mblock %llu ref %u still in rbtree",
33c2865f
DLM
2791 (u64)mblk->no, mblk->ref);
2792 mblk->ref = 0;
3b1a94c8
DLM
2793 dmz_free_mblock(zmd, mblk);
2794 }
2795
2796 /* Free the zone descriptors */
2797 dmz_drop_zones(zmd);
d5ffebdd
MS
2798
2799 mutex_destroy(&zmd->mblk_flush_lock);
2800 mutex_destroy(&zmd->map_lock);
3b1a94c8
DLM
2801}
2802
ca1a7045
HR
2803static void dmz_print_dev(struct dmz_metadata *zmd, int num)
2804{
2805 struct dmz_dev *dev = &zmd->dev[num];
2806
bd5c4031
HR
2807 if (bdev_zoned_model(dev->bdev) == BLK_ZONED_NONE)
2808 dmz_dev_info(dev, "Regular block device");
2809 else
2810 dmz_dev_info(dev, "Host-%s zoned block device",
2811 bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
2812 "aware" : "managed");
2813 if (zmd->sb_version > 1) {
2814 sector_t sector_offset =
2815 dev->zone_offset << zmd->zone_nr_sectors_shift;
2816
2817 dmz_dev_info(dev, " %llu 512-byte logical sectors (offset %llu)",
2818 (u64)dev->capacity, (u64)sector_offset);
2819 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors (offset %llu)",
2820 dev->nr_zones, (u64)zmd->zone_nr_sectors,
2821 (u64)dev->zone_offset);
2822 } else {
2823 dmz_dev_info(dev, " %llu 512-byte logical sectors",
2824 (u64)dev->capacity);
2825 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
2826 dev->nr_zones, (u64)zmd->zone_nr_sectors);
2827 }
ca1a7045
HR
2828}
2829
3b1a94c8
DLM
2830/*
2831 * Initialize the zoned metadata.
2832 */
bd5c4031
HR
2833int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
2834 struct dmz_metadata **metadata,
2234e732 2835 const char *devname)
3b1a94c8
DLM
2836{
2837 struct dmz_metadata *zmd;
b7122873 2838 unsigned int i;
3b1a94c8
DLM
2839 struct dm_zone *zone;
2840 int ret;
2841
2842 zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2843 if (!zmd)
2844 return -ENOMEM;
2845
2234e732 2846 strcpy(zmd->devname, devname);
3b1a94c8 2847 zmd->dev = dev;
bd5c4031 2848 zmd->nr_devs = num_dev;
3b1a94c8
DLM
2849 zmd->mblk_rbtree = RB_ROOT;
2850 init_rwsem(&zmd->mblk_sem);
2851 mutex_init(&zmd->mblk_flush_lock);
2852 spin_lock_init(&zmd->mblk_lock);
2853 INIT_LIST_HEAD(&zmd->mblk_lru_list);
2854 INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2855
2856 mutex_init(&zmd->map_lock);
3b1a94c8 2857
34f5affd
HR
2858 atomic_set(&zmd->unmap_nr_cache, 0);
2859 INIT_LIST_HEAD(&zmd->unmap_cache_list);
2860 INIT_LIST_HEAD(&zmd->map_cache_list);
2861
3b1a94c8
DLM
2862 atomic_set(&zmd->nr_reserved_seq_zones, 0);
2863 INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2864
2865 init_waitqueue_head(&zmd->free_wq);
2866
2867 /* Initialize zone descriptors */
2868 ret = dmz_init_zones(zmd);
2869 if (ret)
2870 goto err;
2871
2872 /* Get super block */
2873 ret = dmz_load_sb(zmd);
2874 if (ret)
2875 goto err;
2876
2877 /* Set metadata zones starting from sb_zone */
3b1a94c8 2878 for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
735bd7e4 2879 zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
a92fbc44
HR
2880 if (!zone) {
2881 dmz_zmd_err(zmd,
2882 "metadata zone %u not present", i);
2883 ret = -ENXIO;
2884 goto err;
2885 }
34f5affd 2886 if (!dmz_is_rnd(zone) && !dmz_is_cache(zone)) {
bd5c4031
HR
2887 dmz_zmd_err(zmd,
2888 "metadata zone %d is not random", i);
2889 ret = -ENXIO;
3b1a94c8 2890 goto err;
bd5c4031
HR
2891 }
2892 set_bit(DMZ_META, &zone->flags);
2893 }
3b1a94c8
DLM
2894 /* Load mapping table */
2895 ret = dmz_load_mapping(zmd);
2896 if (ret)
2897 goto err;
2898
2899 /*
2900 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2901 * blocks and enough blocks to be able to cache the bitmap blocks of
2902 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2903 * the cache to add 512 more metadata blocks.
2904 */
2905 zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2906 zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
2907 zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
2908 zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
2909 zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
2910
2911 /* Metadata cache shrinker */
2912 ret = register_shrinker(&zmd->mblk_shrinker);
2913 if (ret) {
ca1a7045 2914 dmz_zmd_err(zmd, "Register metadata cache shrinker failed");
3b1a94c8
DLM
2915 goto err;
2916 }
2917
bd5c4031
HR
2918 dmz_zmd_info(zmd, "DM-Zoned metadata version %d", zmd->sb_version);
2919 for (i = 0; i < zmd->nr_devs; i++)
2920 dmz_print_dev(zmd, i);
ca1a7045
HR
2921
2922 dmz_zmd_info(zmd, " %u zones of %llu 512-byte logical sectors",
36820560 2923 zmd->nr_zones, (u64)zmd->zone_nr_sectors);
ae3c1f11
HR
2924 dmz_zmd_debug(zmd, " %u metadata zones",
2925 zmd->nr_meta_zones * 2);
2926 dmz_zmd_debug(zmd, " %u data zones for %u chunks",
2927 zmd->nr_data_zones, zmd->nr_chunks);
34f5affd
HR
2928 dmz_zmd_debug(zmd, " %u cache zones (%u unmapped)",
2929 zmd->nr_cache, atomic_read(&zmd->unmap_nr_cache));
bd82fdab
HR
2930 for (i = 0; i < zmd->nr_devs; i++) {
2931 dmz_zmd_debug(zmd, " %u random zones (%u unmapped)",
2932 dmz_nr_rnd_zones(zmd, i),
2933 dmz_nr_unmap_rnd_zones(zmd, i));
2934 dmz_zmd_debug(zmd, " %u sequential zones (%u unmapped)",
2935 dmz_nr_seq_zones(zmd, i),
2936 dmz_nr_unmap_seq_zones(zmd, i));
2937 }
ae3c1f11
HR
2938 dmz_zmd_debug(zmd, " %u reserved sequential data zones",
2939 zmd->nr_reserved_seq);
ca1a7045
HR
2940 dmz_zmd_debug(zmd, "Format:");
2941 dmz_zmd_debug(zmd, "%u metadata blocks per set (%u max cache)",
3b1a94c8 2942 zmd->nr_meta_blocks, zmd->max_nr_mblks);
ca1a7045 2943 dmz_zmd_debug(zmd, " %u data zone mapping blocks",
3b1a94c8 2944 zmd->nr_map_blocks);
ca1a7045 2945 dmz_zmd_debug(zmd, " %u bitmap blocks",
3b1a94c8
DLM
2946 zmd->nr_bitmap_blocks);
2947
2948 *metadata = zmd;
2949
2950 return 0;
2951err:
2952 dmz_cleanup_metadata(zmd);
2953 kfree(zmd);
2954 *metadata = NULL;
2955
2956 return ret;
2957}
2958
2959/*
2960 * Cleanup the zoned metadata resources.
2961 */
2962void dmz_dtr_metadata(struct dmz_metadata *zmd)
2963{
2964 unregister_shrinker(&zmd->mblk_shrinker);
2965 dmz_cleanup_metadata(zmd);
2966 kfree(zmd);
2967}
2968
2969/*
2970 * Check zone information on resume.
2971 */
2972int dmz_resume_metadata(struct dmz_metadata *zmd)
2973{
3b1a94c8
DLM
2974 struct dm_zone *zone;
2975 sector_t wp_block;
2976 unsigned int i;
2977 int ret;
2978
2979 /* Check zones */
36820560 2980 for (i = 0; i < zmd->nr_zones; i++) {
3b1a94c8
DLM
2981 zone = dmz_get(zmd, i);
2982 if (!zone) {
ca1a7045 2983 dmz_zmd_err(zmd, "Unable to get zone %u", i);
3b1a94c8
DLM
2984 return -EIO;
2985 }
3b1a94c8
DLM
2986 wp_block = zone->wp_block;
2987
2988 ret = dmz_update_zone(zmd, zone);
2989 if (ret) {
ca1a7045 2990 dmz_zmd_err(zmd, "Broken zone %u", i);
3b1a94c8
DLM
2991 return ret;
2992 }
2993
2994 if (dmz_is_offline(zone)) {
ca1a7045 2995 dmz_zmd_warn(zmd, "Zone %u is offline", i);
3b1a94c8
DLM
2996 continue;
2997 }
2998
2999 /* Check write pointer */
3000 if (!dmz_is_seq(zone))
3001 zone->wp_block = 0;
3002 else if (zone->wp_block != wp_block) {
ca1a7045 3003 dmz_zmd_err(zmd, "Zone %u: Invalid wp (%llu / %llu)",
3b1a94c8
DLM
3004 i, (u64)zone->wp_block, (u64)wp_block);
3005 zone->wp_block = wp_block;
3006 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
36820560 3007 zmd->zone_nr_blocks - zone->wp_block);
3b1a94c8
DLM
3008 }
3009 }
3010
3011 return 0;
3012}