dm zoned: use dmz_zone_to_dev() when handling metadata I/O
[linux-block.git] / drivers / md / dm-zoned-metadata.c
CommitLineData
bae9a0aa 1// SPDX-License-Identifier: GPL-2.0-only
3b1a94c8
DLM
2/*
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm-zoned.h"
9
10#include <linux/module.h>
11#include <linux/crc32.h>
bd976e52 12#include <linux/sched/mm.h>
3b1a94c8
DLM
13
14#define DM_MSG_PREFIX "zoned metadata"
15
16/*
17 * Metadata version.
18 */
19#define DMZ_META_VER 1
20
21/*
22 * On-disk super block magic.
23 */
24#define DMZ_MAGIC ((((unsigned int)('D')) << 24) | \
25 (((unsigned int)('Z')) << 16) | \
26 (((unsigned int)('B')) << 8) | \
27 ((unsigned int)('D')))
28
29/*
30 * On disk super block.
31 * This uses only 512 B but uses on disk a full 4KB block. This block is
32 * followed on disk by the mapping table of chunks to zones and the bitmap
33 * blocks indicating zone block validity.
34 * The overall resulting metadata format is:
35 * (1) Super block (1 block)
36 * (2) Chunk mapping table (nr_map_blocks)
37 * (3) Bitmap blocks (nr_bitmap_blocks)
ad1bd578 38 * All metadata blocks are stored in conventional zones, starting from
3b1a94c8
DLM
39 * the first conventional zone found on disk.
40 */
41struct dmz_super {
42 /* Magic number */
43 __le32 magic; /* 4 */
44
45 /* Metadata version number */
46 __le32 version; /* 8 */
47
48 /* Generation number */
49 __le64 gen; /* 16 */
50
51 /* This block number */
52 __le64 sb_block; /* 24 */
53
54 /* The number of metadata blocks, including this super block */
55 __le32 nr_meta_blocks; /* 28 */
56
57 /* The number of sequential zones reserved for reclaim */
58 __le32 nr_reserved_seq; /* 32 */
59
60 /* The number of entries in the mapping table */
61 __le32 nr_chunks; /* 36 */
62
63 /* The number of blocks used for the chunk mapping table */
64 __le32 nr_map_blocks; /* 40 */
65
66 /* The number of blocks used for the block bitmaps */
67 __le32 nr_bitmap_blocks; /* 44 */
68
69 /* Checksum */
70 __le32 crc; /* 48 */
71
72 /* Padding to full 512B sector */
73 u8 reserved[464]; /* 512 */
74};
75
76/*
77 * Chunk mapping entry: entries are indexed by chunk number
78 * and give the zone ID (dzone_id) mapping the chunk on disk.
79 * This zone may be sequential or random. If it is a sequential
80 * zone, a second zone (bzone_id) used as a write buffer may
81 * also be specified. This second zone will always be a randomly
82 * writeable zone.
83 */
84struct dmz_map {
85 __le32 dzone_id;
86 __le32 bzone_id;
87};
88
89/*
90 * Chunk mapping table metadata: 512 8-bytes entries per 4KB block.
91 */
92#define DMZ_MAP_ENTRIES (DMZ_BLOCK_SIZE / sizeof(struct dmz_map))
93#define DMZ_MAP_ENTRIES_SHIFT (ilog2(DMZ_MAP_ENTRIES))
94#define DMZ_MAP_ENTRIES_MASK (DMZ_MAP_ENTRIES - 1)
95#define DMZ_MAP_UNMAPPED UINT_MAX
96
97/*
98 * Meta data block descriptor (for cached metadata blocks).
99 */
100struct dmz_mblock {
101 struct rb_node node;
102 struct list_head link;
103 sector_t no;
33c2865f 104 unsigned int ref;
3b1a94c8
DLM
105 unsigned long state;
106 struct page *page;
107 void *data;
108};
109
110/*
111 * Metadata block state flags.
112 */
113enum {
114 DMZ_META_DIRTY,
115 DMZ_META_READING,
116 DMZ_META_WRITING,
117 DMZ_META_ERROR,
118};
119
120/*
121 * Super block information (one per metadata set).
122 */
123struct dmz_sb {
124 sector_t block;
bf28a3ba 125 struct dmz_dev *dev;
3b1a94c8
DLM
126 struct dmz_mblock *mblk;
127 struct dmz_super *sb;
735bd7e4 128 struct dm_zone *zone;
3b1a94c8
DLM
129};
130
131/*
132 * In-memory metadata.
133 */
134struct dmz_metadata {
135 struct dmz_dev *dev;
136
2234e732
HR
137 char devname[BDEVNAME_SIZE];
138
3b1a94c8
DLM
139 sector_t zone_bitmap_size;
140 unsigned int zone_nr_bitmap_blocks;
b3996295 141 unsigned int zone_bits_per_mblk;
3b1a94c8 142
36820560
HR
143 sector_t zone_nr_blocks;
144 sector_t zone_nr_blocks_shift;
145
146 sector_t zone_nr_sectors;
147 sector_t zone_nr_sectors_shift;
148
3b1a94c8
DLM
149 unsigned int nr_bitmap_blocks;
150 unsigned int nr_map_blocks;
151
36820560 152 unsigned int nr_zones;
3b1a94c8
DLM
153 unsigned int nr_useable_zones;
154 unsigned int nr_meta_blocks;
155 unsigned int nr_meta_zones;
156 unsigned int nr_data_zones;
157 unsigned int nr_rnd_zones;
158 unsigned int nr_reserved_seq;
159 unsigned int nr_chunks;
160
161 /* Zone information array */
162 struct dm_zone *zones;
163
3b1a94c8
DLM
164 struct dmz_sb sb[2];
165 unsigned int mblk_primary;
166 u64 sb_gen;
167 unsigned int min_nr_mblks;
168 unsigned int max_nr_mblks;
169 atomic_t nr_mblks;
170 struct rw_semaphore mblk_sem;
171 struct mutex mblk_flush_lock;
172 spinlock_t mblk_lock;
173 struct rb_root mblk_rbtree;
174 struct list_head mblk_lru_list;
175 struct list_head mblk_dirty_list;
176 struct shrinker mblk_shrinker;
177
178 /* Zone allocation management */
179 struct mutex map_lock;
180 struct dmz_mblock **map_mblk;
181 unsigned int nr_rnd;
182 atomic_t unmap_nr_rnd;
183 struct list_head unmap_rnd_list;
184 struct list_head map_rnd_list;
185
186 unsigned int nr_seq;
187 atomic_t unmap_nr_seq;
188 struct list_head unmap_seq_list;
189 struct list_head map_seq_list;
190
191 atomic_t nr_reserved_seq_zones;
192 struct list_head reserved_seq_zones_list;
193
194 wait_queue_head_t free_wq;
195};
196
197/*
198 * Various accessors
199 */
3b1a94c8
DLM
200sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone)
201{
36820560 202 return (sector_t)zone->id << zmd->zone_nr_sectors_shift;
3b1a94c8
DLM
203}
204
205sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone)
206{
36820560 207 return (sector_t)zone->id << zmd->zone_nr_blocks_shift;
3b1a94c8
DLM
208}
209
bf28a3ba
HR
210struct dmz_dev *dmz_zone_to_dev(struct dmz_metadata *zmd, struct dm_zone *zone)
211{
212 return &zmd->dev[0];
213}
214
36820560
HR
215unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd)
216{
217 return zmd->zone_nr_blocks;
218}
219
220unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd)
221{
222 return zmd->zone_nr_blocks_shift;
223}
224
225unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd)
226{
227 return zmd->zone_nr_sectors;
228}
229
230unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd)
231{
232 return zmd->zone_nr_sectors_shift;
233}
234
bc3d5717
HR
235unsigned int dmz_nr_zones(struct dmz_metadata *zmd)
236{
36820560 237 return zmd->nr_zones;
bc3d5717
HR
238}
239
3b1a94c8
DLM
240unsigned int dmz_nr_chunks(struct dmz_metadata *zmd)
241{
242 return zmd->nr_chunks;
243}
244
245unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd)
246{
247 return zmd->nr_rnd;
248}
249
250unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd)
251{
252 return atomic_read(&zmd->unmap_nr_rnd);
253}
254
bc3d5717
HR
255unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd)
256{
257 return zmd->nr_seq;
258}
259
260unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd)
261{
262 return atomic_read(&zmd->unmap_nr_seq);
263}
264
2234e732
HR
265const char *dmz_metadata_label(struct dmz_metadata *zmd)
266{
267 return (const char *)zmd->devname;
268}
269
d0e21ce4
HR
270bool dmz_check_dev(struct dmz_metadata *zmd)
271{
272 return dmz_check_bdev(&zmd->dev[0]);
273}
274
275bool dmz_dev_is_dying(struct dmz_metadata *zmd)
276{
277 return dmz_bdev_is_dying(&zmd->dev[0]);
278}
279
3b1a94c8
DLM
280/*
281 * Lock/unlock mapping table.
282 * The map lock also protects all the zone lists.
283 */
284void dmz_lock_map(struct dmz_metadata *zmd)
285{
286 mutex_lock(&zmd->map_lock);
287}
288
289void dmz_unlock_map(struct dmz_metadata *zmd)
290{
291 mutex_unlock(&zmd->map_lock);
292}
293
294/*
295 * Lock/unlock metadata access. This is a "read" lock on a semaphore
296 * that prevents metadata flush from running while metadata are being
297 * modified. The actual metadata write mutual exclusion is achieved with
ad1bd578 298 * the map lock and zone state management (active and reclaim state are
3b1a94c8
DLM
299 * mutually exclusive).
300 */
301void dmz_lock_metadata(struct dmz_metadata *zmd)
302{
303 down_read(&zmd->mblk_sem);
304}
305
306void dmz_unlock_metadata(struct dmz_metadata *zmd)
307{
308 up_read(&zmd->mblk_sem);
309}
310
311/*
312 * Lock/unlock flush: prevent concurrent executions
313 * of dmz_flush_metadata as well as metadata modification in reclaim
314 * while flush is being executed.
315 */
316void dmz_lock_flush(struct dmz_metadata *zmd)
317{
318 mutex_lock(&zmd->mblk_flush_lock);
319}
320
321void dmz_unlock_flush(struct dmz_metadata *zmd)
322{
323 mutex_unlock(&zmd->mblk_flush_lock);
324}
325
326/*
327 * Allocate a metadata block.
328 */
329static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
330 sector_t mblk_no)
331{
332 struct dmz_mblock *mblk = NULL;
333
334 /* See if we can reuse cached blocks */
335 if (zmd->max_nr_mblks && atomic_read(&zmd->nr_mblks) > zmd->max_nr_mblks) {
336 spin_lock(&zmd->mblk_lock);
337 mblk = list_first_entry_or_null(&zmd->mblk_lru_list,
338 struct dmz_mblock, link);
339 if (mblk) {
340 list_del_init(&mblk->link);
341 rb_erase(&mblk->node, &zmd->mblk_rbtree);
342 mblk->no = mblk_no;
343 }
344 spin_unlock(&zmd->mblk_lock);
345 if (mblk)
346 return mblk;
347 }
348
349 /* Allocate a new block */
350 mblk = kmalloc(sizeof(struct dmz_mblock), GFP_NOIO);
351 if (!mblk)
352 return NULL;
353
354 mblk->page = alloc_page(GFP_NOIO);
355 if (!mblk->page) {
356 kfree(mblk);
357 return NULL;
358 }
359
360 RB_CLEAR_NODE(&mblk->node);
361 INIT_LIST_HEAD(&mblk->link);
33c2865f 362 mblk->ref = 0;
3b1a94c8
DLM
363 mblk->state = 0;
364 mblk->no = mblk_no;
365 mblk->data = page_address(mblk->page);
366
367 atomic_inc(&zmd->nr_mblks);
368
369 return mblk;
370}
371
372/*
373 * Free a metadata block.
374 */
375static void dmz_free_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
376{
377 __free_pages(mblk->page, 0);
378 kfree(mblk);
379
380 atomic_dec(&zmd->nr_mblks);
381}
382
383/*
384 * Insert a metadata block in the rbtree.
385 */
386static void dmz_insert_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
387{
388 struct rb_root *root = &zmd->mblk_rbtree;
389 struct rb_node **new = &(root->rb_node), *parent = NULL;
390 struct dmz_mblock *b;
391
392 /* Figure out where to put the new node */
393 while (*new) {
394 b = container_of(*new, struct dmz_mblock, node);
395 parent = *new;
396 new = (b->no < mblk->no) ? &((*new)->rb_left) : &((*new)->rb_right);
397 }
398
399 /* Add new node and rebalance tree */
400 rb_link_node(&mblk->node, parent, new);
401 rb_insert_color(&mblk->node, root);
402}
403
404/*
3d4e7383
DLM
405 * Lookup a metadata block in the rbtree. If the block is found, increment
406 * its reference count.
3b1a94c8 407 */
3d4e7383
DLM
408static struct dmz_mblock *dmz_get_mblock_fast(struct dmz_metadata *zmd,
409 sector_t mblk_no)
3b1a94c8
DLM
410{
411 struct rb_root *root = &zmd->mblk_rbtree;
412 struct rb_node *node = root->rb_node;
413 struct dmz_mblock *mblk;
414
415 while (node) {
416 mblk = container_of(node, struct dmz_mblock, node);
3d4e7383
DLM
417 if (mblk->no == mblk_no) {
418 /*
419 * If this is the first reference to the block,
420 * remove it from the LRU list.
421 */
422 mblk->ref++;
423 if (mblk->ref == 1 &&
424 !test_bit(DMZ_META_DIRTY, &mblk->state))
425 list_del_init(&mblk->link);
3b1a94c8 426 return mblk;
3d4e7383 427 }
3b1a94c8
DLM
428 node = (mblk->no < mblk_no) ? node->rb_left : node->rb_right;
429 }
430
431 return NULL;
432}
433
434/*
435 * Metadata block BIO end callback.
436 */
437static void dmz_mblock_bio_end_io(struct bio *bio)
438{
439 struct dmz_mblock *mblk = bio->bi_private;
440 int flag;
441
442 if (bio->bi_status)
443 set_bit(DMZ_META_ERROR, &mblk->state);
444
445 if (bio_op(bio) == REQ_OP_WRITE)
446 flag = DMZ_META_WRITING;
447 else
448 flag = DMZ_META_READING;
449
450 clear_bit_unlock(flag, &mblk->state);
451 smp_mb__after_atomic();
452 wake_up_bit(&mblk->state, flag);
453
454 bio_put(bio);
455}
456
457/*
3d4e7383 458 * Read an uncached metadata block from disk and add it to the cache.
3b1a94c8 459 */
3d4e7383
DLM
460static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
461 sector_t mblk_no)
3b1a94c8 462{
3d4e7383 463 struct dmz_mblock *mblk, *m;
3b1a94c8 464 sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
bf28a3ba 465 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
3b1a94c8
DLM
466 struct bio *bio;
467
bf28a3ba 468 if (dmz_bdev_is_dying(dev))
75d66ffb
DF
469 return ERR_PTR(-EIO);
470
3d4e7383 471 /* Get a new block and a BIO to read it */
3b1a94c8
DLM
472 mblk = dmz_alloc_mblock(zmd, mblk_no);
473 if (!mblk)
75d66ffb 474 return ERR_PTR(-ENOMEM);
3b1a94c8 475
3b1a94c8
DLM
476 bio = bio_alloc(GFP_NOIO, 1);
477 if (!bio) {
478 dmz_free_mblock(zmd, mblk);
75d66ffb 479 return ERR_PTR(-ENOMEM);
3b1a94c8
DLM
480 }
481
3d4e7383
DLM
482 spin_lock(&zmd->mblk_lock);
483
484 /*
485 * Make sure that another context did not start reading
486 * the block already.
487 */
488 m = dmz_get_mblock_fast(zmd, mblk_no);
489 if (m) {
490 spin_unlock(&zmd->mblk_lock);
491 dmz_free_mblock(zmd, mblk);
492 bio_put(bio);
493 return m;
494 }
495
496 mblk->ref++;
497 set_bit(DMZ_META_READING, &mblk->state);
498 dmz_insert_mblock(zmd, mblk);
499
500 spin_unlock(&zmd->mblk_lock);
501
502 /* Submit read BIO */
3b1a94c8 503 bio->bi_iter.bi_sector = dmz_blk2sect(block);
bf28a3ba 504 bio_set_dev(bio, dev->bdev);
3b1a94c8
DLM
505 bio->bi_private = mblk;
506 bio->bi_end_io = dmz_mblock_bio_end_io;
507 bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
508 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
509 submit_bio(bio);
510
511 return mblk;
512}
513
514/*
515 * Free metadata blocks.
516 */
517static unsigned long dmz_shrink_mblock_cache(struct dmz_metadata *zmd,
518 unsigned long limit)
519{
520 struct dmz_mblock *mblk;
521 unsigned long count = 0;
522
523 if (!zmd->max_nr_mblks)
524 return 0;
525
526 while (!list_empty(&zmd->mblk_lru_list) &&
527 atomic_read(&zmd->nr_mblks) > zmd->min_nr_mblks &&
528 count < limit) {
529 mblk = list_first_entry(&zmd->mblk_lru_list,
530 struct dmz_mblock, link);
531 list_del_init(&mblk->link);
532 rb_erase(&mblk->node, &zmd->mblk_rbtree);
533 dmz_free_mblock(zmd, mblk);
534 count++;
535 }
536
537 return count;
538}
539
540/*
541 * For mblock shrinker: get the number of unused metadata blocks in the cache.
542 */
543static unsigned long dmz_mblock_shrinker_count(struct shrinker *shrink,
544 struct shrink_control *sc)
545{
546 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
547
548 return atomic_read(&zmd->nr_mblks);
549}
550
551/*
552 * For mblock shrinker: scan unused metadata blocks and shrink the cache.
553 */
554static unsigned long dmz_mblock_shrinker_scan(struct shrinker *shrink,
555 struct shrink_control *sc)
556{
557 struct dmz_metadata *zmd = container_of(shrink, struct dmz_metadata, mblk_shrinker);
558 unsigned long count;
559
560 spin_lock(&zmd->mblk_lock);
561 count = dmz_shrink_mblock_cache(zmd, sc->nr_to_scan);
562 spin_unlock(&zmd->mblk_lock);
563
564 return count ? count : SHRINK_STOP;
565}
566
567/*
568 * Release a metadata block.
569 */
570static void dmz_release_mblock(struct dmz_metadata *zmd,
571 struct dmz_mblock *mblk)
572{
573
574 if (!mblk)
575 return;
576
577 spin_lock(&zmd->mblk_lock);
578
33c2865f
DLM
579 mblk->ref--;
580 if (mblk->ref == 0) {
3b1a94c8
DLM
581 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
582 rb_erase(&mblk->node, &zmd->mblk_rbtree);
583 dmz_free_mblock(zmd, mblk);
584 } else if (!test_bit(DMZ_META_DIRTY, &mblk->state)) {
585 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
586 dmz_shrink_mblock_cache(zmd, 1);
587 }
588 }
589
590 spin_unlock(&zmd->mblk_lock);
591}
592
593/*
594 * Get a metadata block from the rbtree. If the block
595 * is not present, read it from disk.
596 */
597static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
598 sector_t mblk_no)
599{
600 struct dmz_mblock *mblk;
bf28a3ba 601 struct dmz_dev *dev = zmd->sb[zmd->mblk_primary].dev;
3b1a94c8
DLM
602
603 /* Check rbtree */
604 spin_lock(&zmd->mblk_lock);
3d4e7383 605 mblk = dmz_get_mblock_fast(zmd, mblk_no);
3b1a94c8
DLM
606 spin_unlock(&zmd->mblk_lock);
607
608 if (!mblk) {
609 /* Cache miss: read the block from disk */
3d4e7383 610 mblk = dmz_get_mblock_slow(zmd, mblk_no);
75d66ffb
DF
611 if (IS_ERR(mblk))
612 return mblk;
3b1a94c8
DLM
613 }
614
615 /* Wait for on-going read I/O and check for error */
616 wait_on_bit_io(&mblk->state, DMZ_META_READING,
617 TASK_UNINTERRUPTIBLE);
618 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
619 dmz_release_mblock(zmd, mblk);
bf28a3ba 620 dmz_check_bdev(dev);
3b1a94c8
DLM
621 return ERR_PTR(-EIO);
622 }
623
624 return mblk;
625}
626
627/*
628 * Mark a metadata block dirty.
629 */
630static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
631{
632 spin_lock(&zmd->mblk_lock);
633 if (!test_and_set_bit(DMZ_META_DIRTY, &mblk->state))
634 list_add_tail(&mblk->link, &zmd->mblk_dirty_list);
635 spin_unlock(&zmd->mblk_lock);
636}
637
638/*
639 * Issue a metadata block write BIO.
640 */
75d66ffb
DF
641static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
642 unsigned int set)
3b1a94c8 643{
bf28a3ba 644 struct dmz_dev *dev = zmd->sb[set].dev;
3b1a94c8
DLM
645 sector_t block = zmd->sb[set].block + mblk->no;
646 struct bio *bio;
647
bf28a3ba 648 if (dmz_bdev_is_dying(dev))
75d66ffb
DF
649 return -EIO;
650
3b1a94c8
DLM
651 bio = bio_alloc(GFP_NOIO, 1);
652 if (!bio) {
653 set_bit(DMZ_META_ERROR, &mblk->state);
75d66ffb 654 return -ENOMEM;
3b1a94c8
DLM
655 }
656
657 set_bit(DMZ_META_WRITING, &mblk->state);
658
659 bio->bi_iter.bi_sector = dmz_blk2sect(block);
bf28a3ba 660 bio_set_dev(bio, dev->bdev);
3b1a94c8
DLM
661 bio->bi_private = mblk;
662 bio->bi_end_io = dmz_mblock_bio_end_io;
663 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
664 bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
665 submit_bio(bio);
75d66ffb
DF
666
667 return 0;
3b1a94c8
DLM
668}
669
670/*
671 * Read/write a metadata block.
672 */
bf28a3ba
HR
673static int dmz_rdwr_block(struct dmz_dev *dev, int op,
674 sector_t block, struct page *page)
3b1a94c8
DLM
675{
676 struct bio *bio;
677 int ret;
678
bf28a3ba 679 if (dmz_bdev_is_dying(dev))
75d66ffb
DF
680 return -EIO;
681
3b1a94c8
DLM
682 bio = bio_alloc(GFP_NOIO, 1);
683 if (!bio)
684 return -ENOMEM;
685
686 bio->bi_iter.bi_sector = dmz_blk2sect(block);
bf28a3ba 687 bio_set_dev(bio, dev->bdev);
3b1a94c8
DLM
688 bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
689 bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
690 ret = submit_bio_wait(bio);
691 bio_put(bio);
692
e7fad909 693 if (ret)
bf28a3ba 694 dmz_check_bdev(dev);
3b1a94c8
DLM
695 return ret;
696}
697
698/*
699 * Write super block of the specified metadata set.
700 */
701static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set)
702{
703 sector_t block = zmd->sb[set].block;
704 struct dmz_mblock *mblk = zmd->sb[set].mblk;
705 struct dmz_super *sb = zmd->sb[set].sb;
bf28a3ba 706 struct dmz_dev *dev = zmd->sb[set].dev;
3b1a94c8
DLM
707 u64 sb_gen = zmd->sb_gen + 1;
708 int ret;
709
710 sb->magic = cpu_to_le32(DMZ_MAGIC);
711 sb->version = cpu_to_le32(DMZ_META_VER);
712
713 sb->gen = cpu_to_le64(sb_gen);
714
715 sb->sb_block = cpu_to_le64(block);
716 sb->nr_meta_blocks = cpu_to_le32(zmd->nr_meta_blocks);
717 sb->nr_reserved_seq = cpu_to_le32(zmd->nr_reserved_seq);
718 sb->nr_chunks = cpu_to_le32(zmd->nr_chunks);
719
720 sb->nr_map_blocks = cpu_to_le32(zmd->nr_map_blocks);
721 sb->nr_bitmap_blocks = cpu_to_le32(zmd->nr_bitmap_blocks);
722
723 sb->crc = 0;
724 sb->crc = cpu_to_le32(crc32_le(sb_gen, (unsigned char *)sb, DMZ_BLOCK_SIZE));
725
bf28a3ba 726 ret = dmz_rdwr_block(dev, REQ_OP_WRITE, block, mblk->page);
3b1a94c8 727 if (ret == 0)
bf28a3ba 728 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
3b1a94c8
DLM
729
730 return ret;
731}
732
733/*
734 * Write dirty metadata blocks to the specified set.
735 */
736static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
737 struct list_head *write_list,
738 unsigned int set)
739{
740 struct dmz_mblock *mblk;
bf28a3ba 741 struct dmz_dev *dev = zmd->sb[set].dev;
3b1a94c8 742 struct blk_plug plug;
75d66ffb 743 int ret = 0, nr_mblks_submitted = 0;
3b1a94c8
DLM
744
745 /* Issue writes */
746 blk_start_plug(&plug);
75d66ffb
DF
747 list_for_each_entry(mblk, write_list, link) {
748 ret = dmz_write_mblock(zmd, mblk, set);
749 if (ret)
750 break;
751 nr_mblks_submitted++;
752 }
3b1a94c8
DLM
753 blk_finish_plug(&plug);
754
755 /* Wait for completion */
756 list_for_each_entry(mblk, write_list, link) {
75d66ffb
DF
757 if (!nr_mblks_submitted)
758 break;
3b1a94c8
DLM
759 wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
760 TASK_UNINTERRUPTIBLE);
761 if (test_bit(DMZ_META_ERROR, &mblk->state)) {
762 clear_bit(DMZ_META_ERROR, &mblk->state);
bf28a3ba 763 dmz_check_bdev(dev);
3b1a94c8
DLM
764 ret = -EIO;
765 }
75d66ffb 766 nr_mblks_submitted--;
3b1a94c8
DLM
767 }
768
769 /* Flush drive cache (this will also sync data) */
770 if (ret == 0)
bf28a3ba 771 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
3b1a94c8
DLM
772
773 return ret;
774}
775
776/*
777 * Log dirty metadata blocks.
778 */
779static int dmz_log_dirty_mblocks(struct dmz_metadata *zmd,
780 struct list_head *write_list)
781{
782 unsigned int log_set = zmd->mblk_primary ^ 0x1;
783 int ret;
784
785 /* Write dirty blocks to the log */
786 ret = dmz_write_dirty_mblocks(zmd, write_list, log_set);
787 if (ret)
788 return ret;
789
790 /*
791 * No error so far: now validate the log by updating the
792 * log index super block generation.
793 */
794 ret = dmz_write_sb(zmd, log_set);
795 if (ret)
796 return ret;
797
798 return 0;
799}
800
801/*
802 * Flush dirty metadata blocks.
803 */
804int dmz_flush_metadata(struct dmz_metadata *zmd)
805{
806 struct dmz_mblock *mblk;
807 struct list_head write_list;
bf28a3ba 808 struct dmz_dev *dev;
3b1a94c8
DLM
809 int ret;
810
811 if (WARN_ON(!zmd))
812 return 0;
813
814 INIT_LIST_HEAD(&write_list);
815
816 /*
817 * Make sure that metadata blocks are stable before logging: take
818 * the write lock on the metadata semaphore to prevent target BIOs
819 * from modifying metadata.
820 */
821 down_write(&zmd->mblk_sem);
bf28a3ba 822 dev = zmd->sb[zmd->mblk_primary].dev;
3b1a94c8
DLM
823
824 /*
825 * This is called from the target flush work and reclaim work.
826 * Concurrent execution is not allowed.
827 */
828 dmz_lock_flush(zmd);
829
bf28a3ba 830 if (dmz_bdev_is_dying(dev)) {
75d66ffb
DF
831 ret = -EIO;
832 goto out;
833 }
834
3b1a94c8
DLM
835 /* Get dirty blocks */
836 spin_lock(&zmd->mblk_lock);
837 list_splice_init(&zmd->mblk_dirty_list, &write_list);
838 spin_unlock(&zmd->mblk_lock);
839
840 /* If there are no dirty metadata blocks, just flush the device cache */
841 if (list_empty(&write_list)) {
bf28a3ba 842 ret = blkdev_issue_flush(dev->bdev, GFP_NOIO, NULL);
e7fad909 843 goto err;
3b1a94c8
DLM
844 }
845
846 /*
847 * The primary metadata set is still clean. Keep it this way until
848 * all updates are successful in the secondary set. That is, use
849 * the secondary set as a log.
850 */
851 ret = dmz_log_dirty_mblocks(zmd, &write_list);
852 if (ret)
e7fad909 853 goto err;
3b1a94c8
DLM
854
855 /*
856 * The log is on disk. It is now safe to update in place
857 * in the primary metadata set.
858 */
859 ret = dmz_write_dirty_mblocks(zmd, &write_list, zmd->mblk_primary);
860 if (ret)
e7fad909 861 goto err;
3b1a94c8
DLM
862
863 ret = dmz_write_sb(zmd, zmd->mblk_primary);
864 if (ret)
e7fad909 865 goto err;
3b1a94c8
DLM
866
867 while (!list_empty(&write_list)) {
868 mblk = list_first_entry(&write_list, struct dmz_mblock, link);
869 list_del_init(&mblk->link);
870
871 spin_lock(&zmd->mblk_lock);
872 clear_bit(DMZ_META_DIRTY, &mblk->state);
33c2865f 873 if (mblk->ref == 0)
3b1a94c8
DLM
874 list_add_tail(&mblk->link, &zmd->mblk_lru_list);
875 spin_unlock(&zmd->mblk_lock);
876 }
877
878 zmd->sb_gen++;
879out:
3b1a94c8
DLM
880 dmz_unlock_flush(zmd);
881 up_write(&zmd->mblk_sem);
882
883 return ret;
e7fad909
DF
884
885err:
886 if (!list_empty(&write_list)) {
887 spin_lock(&zmd->mblk_lock);
888 list_splice(&write_list, &zmd->mblk_dirty_list);
889 spin_unlock(&zmd->mblk_lock);
890 }
bf28a3ba 891 if (!dmz_check_bdev(dev))
e7fad909
DF
892 ret = -EIO;
893 goto out;
3b1a94c8
DLM
894}
895
896/*
897 * Check super block.
898 */
735bd7e4 899static int dmz_check_sb(struct dmz_metadata *zmd, unsigned int set)
3b1a94c8 900{
735bd7e4 901 struct dmz_super *sb = zmd->sb[set].sb;
bf28a3ba 902 struct dmz_dev *dev = zmd->sb[set].dev;
3b1a94c8 903 unsigned int nr_meta_zones, nr_data_zones;
3b1a94c8
DLM
904 u32 crc, stored_crc;
905 u64 gen;
906
907 gen = le64_to_cpu(sb->gen);
908 stored_crc = le32_to_cpu(sb->crc);
909 sb->crc = 0;
910 crc = crc32_le(gen, (unsigned char *)sb, DMZ_BLOCK_SIZE);
911 if (crc != stored_crc) {
912 dmz_dev_err(dev, "Invalid checksum (needed 0x%08x, got 0x%08x)",
913 crc, stored_crc);
914 return -ENXIO;
915 }
916
917 if (le32_to_cpu(sb->magic) != DMZ_MAGIC) {
918 dmz_dev_err(dev, "Invalid meta magic (needed 0x%08x, got 0x%08x)",
919 DMZ_MAGIC, le32_to_cpu(sb->magic));
920 return -ENXIO;
921 }
922
923 if (le32_to_cpu(sb->version) != DMZ_META_VER) {
924 dmz_dev_err(dev, "Invalid meta version (needed %d, got %d)",
925 DMZ_META_VER, le32_to_cpu(sb->version));
926 return -ENXIO;
927 }
928
36820560
HR
929 nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
930 >> zmd->zone_nr_blocks_shift;
3b1a94c8
DLM
931 if (!nr_meta_zones ||
932 nr_meta_zones >= zmd->nr_rnd_zones) {
933 dmz_dev_err(dev, "Invalid number of metadata blocks");
934 return -ENXIO;
935 }
936
937 if (!le32_to_cpu(sb->nr_reserved_seq) ||
938 le32_to_cpu(sb->nr_reserved_seq) >= (zmd->nr_useable_zones - nr_meta_zones)) {
939 dmz_dev_err(dev, "Invalid number of reserved sequential zones");
940 return -ENXIO;
941 }
942
943 nr_data_zones = zmd->nr_useable_zones -
944 (nr_meta_zones * 2 + le32_to_cpu(sb->nr_reserved_seq));
945 if (le32_to_cpu(sb->nr_chunks) > nr_data_zones) {
946 dmz_dev_err(dev, "Invalid number of chunks %u / %u",
947 le32_to_cpu(sb->nr_chunks), nr_data_zones);
948 return -ENXIO;
949 }
950
951 /* OK */
952 zmd->nr_meta_blocks = le32_to_cpu(sb->nr_meta_blocks);
953 zmd->nr_reserved_seq = le32_to_cpu(sb->nr_reserved_seq);
954 zmd->nr_chunks = le32_to_cpu(sb->nr_chunks);
955 zmd->nr_map_blocks = le32_to_cpu(sb->nr_map_blocks);
956 zmd->nr_bitmap_blocks = le32_to_cpu(sb->nr_bitmap_blocks);
957 zmd->nr_meta_zones = nr_meta_zones;
958 zmd->nr_data_zones = nr_data_zones;
959
960 return 0;
961}
962
963/*
964 * Read the first or second super block from disk.
965 */
966static int dmz_read_sb(struct dmz_metadata *zmd, unsigned int set)
967{
bf28a3ba
HR
968 return dmz_rdwr_block(zmd->sb[set].dev, REQ_OP_READ,
969 zmd->sb[set].block, zmd->sb[set].mblk->page);
3b1a94c8
DLM
970}
971
972/*
973 * Determine the position of the secondary super blocks on disk.
974 * This is used only if a corruption of the primary super block
975 * is detected.
976 */
977static int dmz_lookup_secondary_sb(struct dmz_metadata *zmd)
978{
36820560 979 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
3b1a94c8
DLM
980 struct dmz_mblock *mblk;
981 int i;
982
983 /* Allocate a block */
984 mblk = dmz_alloc_mblock(zmd, 0);
985 if (!mblk)
986 return -ENOMEM;
987
988 zmd->sb[1].mblk = mblk;
989 zmd->sb[1].sb = mblk->data;
990
991 /* Bad first super block: search for the second one */
992 zmd->sb[1].block = zmd->sb[0].block + zone_nr_blocks;
735bd7e4 993 zmd->sb[1].zone = zmd->sb[0].zone + 1;
bf28a3ba 994 zmd->sb[1].dev = dmz_zone_to_dev(zmd, zmd->sb[1].zone);
3b1a94c8
DLM
995 for (i = 0; i < zmd->nr_rnd_zones - 1; i++) {
996 if (dmz_read_sb(zmd, 1) != 0)
997 break;
735bd7e4
HR
998 if (le32_to_cpu(zmd->sb[1].sb->magic) == DMZ_MAGIC) {
999 zmd->sb[1].zone += i;
3b1a94c8 1000 return 0;
735bd7e4 1001 }
3b1a94c8 1002 zmd->sb[1].block += zone_nr_blocks;
bf28a3ba 1003 zmd->sb[1].dev = dmz_zone_to_dev(zmd, zmd->sb[1].zone + i);
3b1a94c8
DLM
1004 }
1005
1006 dmz_free_mblock(zmd, mblk);
1007 zmd->sb[1].mblk = NULL;
735bd7e4 1008 zmd->sb[1].zone = NULL;
bf28a3ba 1009 zmd->sb[1].dev = NULL;
3b1a94c8
DLM
1010
1011 return -EIO;
1012}
1013
1014/*
1015 * Read the first or second super block from disk.
1016 */
1017static int dmz_get_sb(struct dmz_metadata *zmd, unsigned int set)
1018{
1019 struct dmz_mblock *mblk;
1020 int ret;
1021
1022 /* Allocate a block */
1023 mblk = dmz_alloc_mblock(zmd, 0);
1024 if (!mblk)
1025 return -ENOMEM;
1026
1027 zmd->sb[set].mblk = mblk;
1028 zmd->sb[set].sb = mblk->data;
1029
1030 /* Read super block */
1031 ret = dmz_read_sb(zmd, set);
1032 if (ret) {
1033 dmz_free_mblock(zmd, mblk);
1034 zmd->sb[set].mblk = NULL;
1035 return ret;
1036 }
1037
1038 return 0;
1039}
1040
1041/*
1042 * Recover a metadata set.
1043 */
1044static int dmz_recover_mblocks(struct dmz_metadata *zmd, unsigned int dst_set)
1045{
1046 unsigned int src_set = dst_set ^ 0x1;
1047 struct page *page;
1048 int i, ret;
1049
bf28a3ba
HR
1050 dmz_dev_warn(zmd->sb[dst_set].dev,
1051 "Metadata set %u invalid: recovering", dst_set);
3b1a94c8
DLM
1052
1053 if (dst_set == 0)
735bd7e4
HR
1054 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
1055 else
1056 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
3b1a94c8 1057
4218a955 1058 page = alloc_page(GFP_NOIO);
3b1a94c8
DLM
1059 if (!page)
1060 return -ENOMEM;
1061
1062 /* Copy metadata blocks */
1063 for (i = 1; i < zmd->nr_meta_blocks; i++) {
bf28a3ba 1064 ret = dmz_rdwr_block(zmd->sb[src_set].dev, REQ_OP_READ,
3b1a94c8
DLM
1065 zmd->sb[src_set].block + i, page);
1066 if (ret)
1067 goto out;
bf28a3ba 1068 ret = dmz_rdwr_block(zmd->sb[dst_set].dev, REQ_OP_WRITE,
3b1a94c8
DLM
1069 zmd->sb[dst_set].block + i, page);
1070 if (ret)
1071 goto out;
1072 }
1073
1074 /* Finalize with the super block */
1075 if (!zmd->sb[dst_set].mblk) {
1076 zmd->sb[dst_set].mblk = dmz_alloc_mblock(zmd, 0);
1077 if (!zmd->sb[dst_set].mblk) {
1078 ret = -ENOMEM;
1079 goto out;
1080 }
1081 zmd->sb[dst_set].sb = zmd->sb[dst_set].mblk->data;
1082 }
1083
1084 ret = dmz_write_sb(zmd, dst_set);
1085out:
1086 __free_pages(page, 0);
1087
1088 return ret;
1089}
1090
1091/*
1092 * Get super block from disk.
1093 */
1094static int dmz_load_sb(struct dmz_metadata *zmd)
1095{
1096 bool sb_good[2] = {false, false};
1097 u64 sb_gen[2] = {0, 0};
1098 int ret;
1099
735bd7e4
HR
1100 if (!zmd->sb[0].zone) {
1101 dmz_dev_err(zmd->dev, "Primary super block zone not set");
1102 return -ENXIO;
1103 }
1104
3b1a94c8 1105 /* Read and check the primary super block */
735bd7e4 1106 zmd->sb[0].block = dmz_start_block(zmd, zmd->sb[0].zone);
bf28a3ba 1107 zmd->sb[0].dev = dmz_zone_to_dev(zmd, zmd->sb[0].zone);
3b1a94c8
DLM
1108 ret = dmz_get_sb(zmd, 0);
1109 if (ret) {
bf28a3ba 1110 dmz_dev_err(zmd->sb[0].dev, "Read primary super block failed");
3b1a94c8
DLM
1111 return ret;
1112 }
1113
735bd7e4 1114 ret = dmz_check_sb(zmd, 0);
3b1a94c8
DLM
1115
1116 /* Read and check secondary super block */
1117 if (ret == 0) {
1118 sb_good[0] = true;
735bd7e4
HR
1119 if (!zmd->sb[1].zone)
1120 zmd->sb[1].zone = zmd->sb[0].zone + zmd->nr_meta_zones;
1121 zmd->sb[1].block = dmz_start_block(zmd, zmd->sb[1].zone);
bf28a3ba 1122 zmd->sb[1].dev = dmz_zone_to_dev(zmd, zmd->sb[1].zone);
3b1a94c8
DLM
1123 ret = dmz_get_sb(zmd, 1);
1124 } else
1125 ret = dmz_lookup_secondary_sb(zmd);
1126
1127 if (ret) {
bf28a3ba 1128 dmz_dev_err(zmd->sb[1].dev, "Read secondary super block failed");
3b1a94c8
DLM
1129 return ret;
1130 }
1131
735bd7e4 1132 ret = dmz_check_sb(zmd, 1);
3b1a94c8
DLM
1133 if (ret == 0)
1134 sb_good[1] = true;
1135
1136 /* Use highest generation sb first */
1137 if (!sb_good[0] && !sb_good[1]) {
1138 dmz_dev_err(zmd->dev, "No valid super block found");
1139 return -EIO;
1140 }
1141
1142 if (sb_good[0])
1143 sb_gen[0] = le64_to_cpu(zmd->sb[0].sb->gen);
bf28a3ba 1144 else {
3b1a94c8 1145 ret = dmz_recover_mblocks(zmd, 0);
bf28a3ba
HR
1146 if (ret) {
1147 dmz_dev_err(zmd->sb[0].dev,
1148 "Recovery of superblock 0 failed");
1149 return -EIO;
1150 }
1151 }
3b1a94c8
DLM
1152
1153 if (sb_good[1])
1154 sb_gen[1] = le64_to_cpu(zmd->sb[1].sb->gen);
bf28a3ba 1155 else {
3b1a94c8
DLM
1156 ret = dmz_recover_mblocks(zmd, 1);
1157
bf28a3ba
HR
1158 if (ret) {
1159 dmz_dev_err(zmd->sb[1].dev,
1160 "Recovery of superblock 1 failed");
1161 return -EIO;
1162 }
3b1a94c8
DLM
1163 }
1164
1165 if (sb_gen[0] >= sb_gen[1]) {
1166 zmd->sb_gen = sb_gen[0];
1167 zmd->mblk_primary = 0;
1168 } else {
1169 zmd->sb_gen = sb_gen[1];
1170 zmd->mblk_primary = 1;
1171 }
1172
bf28a3ba
HR
1173 dmz_dev_debug(zmd->sb[zmd->mblk_primary].dev,
1174 "Using super block %u (gen %llu)",
3b1a94c8
DLM
1175 zmd->mblk_primary, zmd->sb_gen);
1176
1177 return 0;
1178}
1179
1180/*
1181 * Initialize a zone descriptor.
1182 */
d4100351 1183static int dmz_init_zone(struct blk_zone *blkz, unsigned int idx, void *data)
3b1a94c8 1184{
d4100351
CH
1185 struct dmz_metadata *zmd = data;
1186 struct dm_zone *zone = &zmd->zones[idx];
3b1a94c8
DLM
1187 struct dmz_dev *dev = zmd->dev;
1188
1189 /* Ignore the eventual last runt (smaller) zone */
36820560 1190 if (blkz->len != zmd->zone_nr_sectors) {
3b1a94c8
DLM
1191 if (blkz->start + blkz->len == dev->capacity)
1192 return 0;
1193 return -ENXIO;
1194 }
1195
1196 INIT_LIST_HEAD(&zone->link);
1197 atomic_set(&zone->refcount, 0);
b7122873 1198 zone->id = idx;
3b1a94c8
DLM
1199 zone->chunk = DMZ_MAP_UNMAPPED;
1200
d4100351
CH
1201 switch (blkz->type) {
1202 case BLK_ZONE_TYPE_CONVENTIONAL:
3b1a94c8 1203 set_bit(DMZ_RND, &zone->flags);
d4100351
CH
1204 break;
1205 case BLK_ZONE_TYPE_SEQWRITE_REQ:
1206 case BLK_ZONE_TYPE_SEQWRITE_PREF:
3b1a94c8 1207 set_bit(DMZ_SEQ, &zone->flags);
d4100351
CH
1208 break;
1209 default:
3b1a94c8 1210 return -ENXIO;
d4100351 1211 }
3b1a94c8
DLM
1212
1213 if (dmz_is_rnd(zone))
1214 zone->wp_block = 0;
1215 else
1216 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1217
d4100351
CH
1218 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1219 set_bit(DMZ_OFFLINE, &zone->flags);
1220 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1221 set_bit(DMZ_READ_ONLY, &zone->flags);
1222 else {
3b1a94c8
DLM
1223 zmd->nr_useable_zones++;
1224 if (dmz_is_rnd(zone)) {
1225 zmd->nr_rnd_zones++;
735bd7e4 1226 if (!zmd->sb[0].zone) {
3b1a94c8 1227 /* Super block zone */
735bd7e4 1228 zmd->sb[0].zone = zone;
3b1a94c8
DLM
1229 }
1230 }
1231 }
1232
1233 return 0;
1234}
1235
1236/*
1237 * Free zones descriptors.
1238 */
1239static void dmz_drop_zones(struct dmz_metadata *zmd)
1240{
1241 kfree(zmd->zones);
1242 zmd->zones = NULL;
1243}
1244
3b1a94c8
DLM
1245/*
1246 * Allocate and initialize zone descriptors using the zone
1247 * information from disk.
1248 */
1249static int dmz_init_zones(struct dmz_metadata *zmd)
1250{
1251 struct dmz_dev *dev = zmd->dev;
d4100351 1252 int ret;
3b1a94c8
DLM
1253
1254 /* Init */
36820560
HR
1255 zmd->zone_nr_sectors = dev->zone_nr_sectors;
1256 zmd->zone_nr_sectors_shift = ilog2(zmd->zone_nr_sectors);
1257 zmd->zone_nr_blocks = dmz_sect2blk(zmd->zone_nr_sectors);
1258 zmd->zone_nr_blocks_shift = ilog2(zmd->zone_nr_blocks);
1259 zmd->zone_bitmap_size = zmd->zone_nr_blocks >> 3;
b3996295
DF
1260 zmd->zone_nr_bitmap_blocks =
1261 max_t(sector_t, 1, zmd->zone_bitmap_size >> DMZ_BLOCK_SHIFT);
36820560 1262 zmd->zone_bits_per_mblk = min_t(sector_t, zmd->zone_nr_blocks,
b3996295 1263 DMZ_BLOCK_SIZE_BITS);
3b1a94c8
DLM
1264
1265 /* Allocate zone array */
36820560
HR
1266 zmd->nr_zones = dev->nr_zones;
1267 zmd->zones = kcalloc(zmd->nr_zones, sizeof(struct dm_zone), GFP_KERNEL);
3b1a94c8
DLM
1268 if (!zmd->zones)
1269 return -ENOMEM;
1270
1271 dmz_dev_info(dev, "Using %zu B for zone information",
36820560 1272 sizeof(struct dm_zone) * zmd->nr_zones);
3b1a94c8 1273
3b1a94c8 1274 /*
d4100351
CH
1275 * Get zone information and initialize zone descriptors. At the same
1276 * time, determine where the super block should be: first block of the
1277 * first randomly writable zone.
3b1a94c8 1278 */
d4100351
CH
1279 ret = blkdev_report_zones(dev->bdev, 0, BLK_ALL_ZONES, dmz_init_zone,
1280 zmd);
1281 if (ret < 0) {
1282 dmz_drop_zones(zmd);
1283 return ret;
1284 }
3b1a94c8 1285
d4100351
CH
1286 return 0;
1287}
7aedf75f 1288
d4100351
CH
1289static int dmz_update_zone_cb(struct blk_zone *blkz, unsigned int idx,
1290 void *data)
1291{
1292 struct dm_zone *zone = data;
3b1a94c8 1293
d4100351
CH
1294 clear_bit(DMZ_OFFLINE, &zone->flags);
1295 clear_bit(DMZ_READ_ONLY, &zone->flags);
1296 if (blkz->cond == BLK_ZONE_COND_OFFLINE)
1297 set_bit(DMZ_OFFLINE, &zone->flags);
1298 else if (blkz->cond == BLK_ZONE_COND_READONLY)
1299 set_bit(DMZ_READ_ONLY, &zone->flags);
3b1a94c8 1300
d4100351
CH
1301 if (dmz_is_seq(zone))
1302 zone->wp_block = dmz_sect2blk(blkz->wp - blkz->start);
1303 else
1304 zone->wp_block = 0;
1305 return 0;
3b1a94c8
DLM
1306}
1307
1308/*
1309 * Update a zone information.
1310 */
1311static int dmz_update_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1312{
aa821c8d 1313 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
bd976e52 1314 unsigned int noio_flag;
3b1a94c8
DLM
1315 int ret;
1316
bd976e52
DLM
1317 /*
1318 * Get zone information from disk. Since blkdev_report_zones() uses
1319 * GFP_KERNEL by default for memory allocations, set the per-task
1320 * PF_MEMALLOC_NOIO flag so that all allocations are done as if
1321 * GFP_NOIO was specified.
1322 */
1323 noio_flag = memalloc_noio_save();
aa821c8d 1324 ret = blkdev_report_zones(dev->bdev, dmz_start_sect(zmd, zone), 1,
d4100351 1325 dmz_update_zone_cb, zone);
bd976e52 1326 memalloc_noio_restore(noio_flag);
d4100351
CH
1327
1328 if (ret == 0)
7aedf75f 1329 ret = -EIO;
d4100351 1330 if (ret < 0) {
aa821c8d 1331 dmz_dev_err(dev, "Get zone %u report failed",
b7122873 1332 zone->id);
aa821c8d 1333 dmz_check_bdev(dev);
3b1a94c8
DLM
1334 return ret;
1335 }
1336
3b1a94c8
DLM
1337 return 0;
1338}
1339
1340/*
1341 * Check a zone write pointer position when the zone is marked
1342 * with the sequential write error flag.
1343 */
1344static int dmz_handle_seq_write_err(struct dmz_metadata *zmd,
1345 struct dm_zone *zone)
1346{
aa821c8d 1347 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
3b1a94c8
DLM
1348 unsigned int wp = 0;
1349 int ret;
1350
1351 wp = zone->wp_block;
1352 ret = dmz_update_zone(zmd, zone);
1353 if (ret)
1354 return ret;
1355
aa821c8d 1356 dmz_dev_warn(dev, "Processing zone %u write error (zone wp %u/%u)",
b7122873 1357 zone->id, zone->wp_block, wp);
3b1a94c8
DLM
1358
1359 if (zone->wp_block < wp) {
1360 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
1361 wp - zone->wp_block);
1362 }
1363
1364 return 0;
1365}
1366
1367static struct dm_zone *dmz_get(struct dmz_metadata *zmd, unsigned int zone_id)
1368{
1369 return &zmd->zones[zone_id];
1370}
1371
1372/*
1373 * Reset a zone write pointer.
1374 */
1375static int dmz_reset_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1376{
1377 int ret;
1378
1379 /*
1380 * Ignore offline zones, read only zones,
1381 * and conventional zones.
1382 */
1383 if (dmz_is_offline(zone) ||
1384 dmz_is_readonly(zone) ||
1385 dmz_is_rnd(zone))
1386 return 0;
1387
1388 if (!dmz_is_empty(zone) || dmz_seq_write_err(zone)) {
aa821c8d 1389 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
3b1a94c8 1390
6c1b1da5
AJ
1391 ret = blkdev_zone_mgmt(dev->bdev, REQ_OP_ZONE_RESET,
1392 dmz_start_sect(zmd, zone),
36820560 1393 zmd->zone_nr_sectors, GFP_NOIO);
3b1a94c8
DLM
1394 if (ret) {
1395 dmz_dev_err(dev, "Reset zone %u failed %d",
b7122873 1396 zone->id, ret);
3b1a94c8
DLM
1397 return ret;
1398 }
1399 }
1400
1401 /* Clear write error bit and rewind write pointer position */
1402 clear_bit(DMZ_SEQ_WRITE_ERR, &zone->flags);
1403 zone->wp_block = 0;
1404
1405 return 0;
1406}
1407
1408static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone);
1409
1410/*
1411 * Initialize chunk mapping.
1412 */
1413static int dmz_load_mapping(struct dmz_metadata *zmd)
1414{
1415 struct dmz_dev *dev = zmd->dev;
1416 struct dm_zone *dzone, *bzone;
1417 struct dmz_mblock *dmap_mblk = NULL;
1418 struct dmz_map *dmap;
1419 unsigned int i = 0, e = 0, chunk = 0;
1420 unsigned int dzone_id;
1421 unsigned int bzone_id;
1422
1423 /* Metadata block array for the chunk mapping table */
1424 zmd->map_mblk = kcalloc(zmd->nr_map_blocks,
1425 sizeof(struct dmz_mblk *), GFP_KERNEL);
1426 if (!zmd->map_mblk)
1427 return -ENOMEM;
1428
1429 /* Get chunk mapping table blocks and initialize zone mapping */
1430 while (chunk < zmd->nr_chunks) {
1431 if (!dmap_mblk) {
1432 /* Get mapping block */
1433 dmap_mblk = dmz_get_mblock(zmd, i + 1);
1434 if (IS_ERR(dmap_mblk))
1435 return PTR_ERR(dmap_mblk);
1436 zmd->map_mblk[i] = dmap_mblk;
1437 dmap = (struct dmz_map *) dmap_mblk->data;
1438 i++;
1439 e = 0;
1440 }
1441
1442 /* Check data zone */
1443 dzone_id = le32_to_cpu(dmap[e].dzone_id);
1444 if (dzone_id == DMZ_MAP_UNMAPPED)
1445 goto next;
1446
36820560 1447 if (dzone_id >= zmd->nr_zones) {
3b1a94c8
DLM
1448 dmz_dev_err(dev, "Chunk %u mapping: invalid data zone ID %u",
1449 chunk, dzone_id);
1450 return -EIO;
1451 }
1452
1453 dzone = dmz_get(zmd, dzone_id);
1454 set_bit(DMZ_DATA, &dzone->flags);
1455 dzone->chunk = chunk;
1456 dmz_get_zone_weight(zmd, dzone);
1457
1458 if (dmz_is_rnd(dzone))
1459 list_add_tail(&dzone->link, &zmd->map_rnd_list);
1460 else
1461 list_add_tail(&dzone->link, &zmd->map_seq_list);
1462
1463 /* Check buffer zone */
1464 bzone_id = le32_to_cpu(dmap[e].bzone_id);
1465 if (bzone_id == DMZ_MAP_UNMAPPED)
1466 goto next;
1467
36820560 1468 if (bzone_id >= zmd->nr_zones) {
3b1a94c8
DLM
1469 dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone ID %u",
1470 chunk, bzone_id);
1471 return -EIO;
1472 }
1473
1474 bzone = dmz_get(zmd, bzone_id);
1475 if (!dmz_is_rnd(bzone)) {
1476 dmz_dev_err(dev, "Chunk %u mapping: invalid buffer zone %u",
1477 chunk, bzone_id);
1478 return -EIO;
1479 }
1480
1481 set_bit(DMZ_DATA, &bzone->flags);
1482 set_bit(DMZ_BUF, &bzone->flags);
1483 bzone->chunk = chunk;
1484 bzone->bzone = dzone;
1485 dzone->bzone = bzone;
1486 dmz_get_zone_weight(zmd, bzone);
1487 list_add_tail(&bzone->link, &zmd->map_rnd_list);
1488next:
1489 chunk++;
1490 e++;
1491 if (e >= DMZ_MAP_ENTRIES)
1492 dmap_mblk = NULL;
1493 }
1494
1495 /*
1496 * At this point, only meta zones and mapped data zones were
1497 * fully initialized. All remaining zones are unmapped data
1498 * zones. Finish initializing those here.
1499 */
36820560 1500 for (i = 0; i < zmd->nr_zones; i++) {
3b1a94c8
DLM
1501 dzone = dmz_get(zmd, i);
1502 if (dmz_is_meta(dzone))
1503 continue;
1504
1505 if (dmz_is_rnd(dzone))
1506 zmd->nr_rnd++;
1507 else
1508 zmd->nr_seq++;
1509
1510 if (dmz_is_data(dzone)) {
1511 /* Already initialized */
1512 continue;
1513 }
1514
1515 /* Unmapped data zone */
1516 set_bit(DMZ_DATA, &dzone->flags);
1517 dzone->chunk = DMZ_MAP_UNMAPPED;
1518 if (dmz_is_rnd(dzone)) {
1519 list_add_tail(&dzone->link, &zmd->unmap_rnd_list);
1520 atomic_inc(&zmd->unmap_nr_rnd);
1521 } else if (atomic_read(&zmd->nr_reserved_seq_zones) < zmd->nr_reserved_seq) {
1522 list_add_tail(&dzone->link, &zmd->reserved_seq_zones_list);
1523 atomic_inc(&zmd->nr_reserved_seq_zones);
1524 zmd->nr_seq--;
1525 } else {
1526 list_add_tail(&dzone->link, &zmd->unmap_seq_list);
1527 atomic_inc(&zmd->unmap_nr_seq);
1528 }
1529 }
1530
1531 return 0;
1532}
1533
1534/*
1535 * Set a data chunk mapping.
1536 */
1537static void dmz_set_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk,
1538 unsigned int dzone_id, unsigned int bzone_id)
1539{
1540 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1541 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1542 int map_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1543
1544 dmap[map_idx].dzone_id = cpu_to_le32(dzone_id);
1545 dmap[map_idx].bzone_id = cpu_to_le32(bzone_id);
1546 dmz_dirty_mblock(zmd, dmap_mblk);
1547}
1548
1549/*
1550 * The list of mapped zones is maintained in LRU order.
1551 * This rotates a zone at the end of its map list.
1552 */
1553static void __dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1554{
1555 if (list_empty(&zone->link))
1556 return;
1557
1558 list_del_init(&zone->link);
1559 if (dmz_is_seq(zone)) {
1560 /* LRU rotate sequential zone */
1561 list_add_tail(&zone->link, &zmd->map_seq_list);
1562 } else {
1563 /* LRU rotate random zone */
1564 list_add_tail(&zone->link, &zmd->map_rnd_list);
1565 }
1566}
1567
1568/*
1569 * The list of mapped random zones is maintained
1570 * in LRU order. This rotates a zone at the end of the list.
1571 */
1572static void dmz_lru_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1573{
1574 __dmz_lru_zone(zmd, zone);
1575 if (zone->bzone)
1576 __dmz_lru_zone(zmd, zone->bzone);
1577}
1578
1579/*
1580 * Wait for any zone to be freed.
1581 */
1582static void dmz_wait_for_free_zones(struct dmz_metadata *zmd)
1583{
1584 DEFINE_WAIT(wait);
1585
1586 prepare_to_wait(&zmd->free_wq, &wait, TASK_UNINTERRUPTIBLE);
1587 dmz_unlock_map(zmd);
1588 dmz_unlock_metadata(zmd);
1589
1590 io_schedule_timeout(HZ);
1591
1592 dmz_lock_metadata(zmd);
1593 dmz_lock_map(zmd);
1594 finish_wait(&zmd->free_wq, &wait);
1595}
1596
1597/*
1598 * Lock a zone for reclaim (set the zone RECLAIM bit).
1599 * Returns false if the zone cannot be locked or if it is already locked
1600 * and 1 otherwise.
1601 */
1602int dmz_lock_zone_reclaim(struct dm_zone *zone)
1603{
1604 /* Active zones cannot be reclaimed */
1605 if (dmz_is_active(zone))
1606 return 0;
1607
1608 return !test_and_set_bit(DMZ_RECLAIM, &zone->flags);
1609}
1610
1611/*
1612 * Clear a zone reclaim flag.
1613 */
1614void dmz_unlock_zone_reclaim(struct dm_zone *zone)
1615{
1616 WARN_ON(dmz_is_active(zone));
1617 WARN_ON(!dmz_in_reclaim(zone));
1618
1619 clear_bit_unlock(DMZ_RECLAIM, &zone->flags);
1620 smp_mb__after_atomic();
1621 wake_up_bit(&zone->flags, DMZ_RECLAIM);
1622}
1623
1624/*
1625 * Wait for a zone reclaim to complete.
1626 */
1627static void dmz_wait_for_reclaim(struct dmz_metadata *zmd, struct dm_zone *zone)
1628{
1629 dmz_unlock_map(zmd);
1630 dmz_unlock_metadata(zmd);
1631 wait_on_bit_timeout(&zone->flags, DMZ_RECLAIM, TASK_UNINTERRUPTIBLE, HZ);
1632 dmz_lock_metadata(zmd);
1633 dmz_lock_map(zmd);
1634}
1635
1636/*
1637 * Select a random write zone for reclaim.
1638 */
1639static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1640{
1641 struct dm_zone *dzone = NULL;
1642 struct dm_zone *zone;
1643
1644 if (list_empty(&zmd->map_rnd_list))
b234c6d7 1645 return ERR_PTR(-EBUSY);
3b1a94c8
DLM
1646
1647 list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1648 if (dmz_is_buf(zone))
1649 dzone = zone->bzone;
1650 else
1651 dzone = zone;
1652 if (dmz_lock_zone_reclaim(dzone))
1653 return dzone;
1654 }
1655
b234c6d7 1656 return ERR_PTR(-EBUSY);
3b1a94c8
DLM
1657}
1658
1659/*
1660 * Select a buffered sequential zone for reclaim.
1661 */
1662static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1663{
1664 struct dm_zone *zone;
1665
1666 if (list_empty(&zmd->map_seq_list))
e0702d90 1667 return ERR_PTR(-EBUSY);
3b1a94c8
DLM
1668
1669 list_for_each_entry(zone, &zmd->map_seq_list, link) {
1670 if (!zone->bzone)
1671 continue;
1672 if (dmz_lock_zone_reclaim(zone))
1673 return zone;
1674 }
1675
e0702d90 1676 return ERR_PTR(-EBUSY);
3b1a94c8
DLM
1677}
1678
1679/*
1680 * Select a zone for reclaim.
1681 */
1682struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd)
1683{
1684 struct dm_zone *zone;
1685
1686 /*
1687 * Search for a zone candidate to reclaim: 2 cases are possible.
1688 * (1) There is no free sequential zones. Then a random data zone
1689 * cannot be reclaimed. So choose a sequential zone to reclaim so
1690 * that afterward a random zone can be reclaimed.
1691 * (2) At least one free sequential zone is available, then choose
1692 * the oldest random zone (data or buffer) that can be locked.
1693 */
1694 dmz_lock_map(zmd);
1695 if (list_empty(&zmd->reserved_seq_zones_list))
1696 zone = dmz_get_seq_zone_for_reclaim(zmd);
1697 else
1698 zone = dmz_get_rnd_zone_for_reclaim(zmd);
1699 dmz_unlock_map(zmd);
1700
1701 return zone;
1702}
1703
3b1a94c8
DLM
1704/*
1705 * Get the zone mapping a chunk, if the chunk is mapped already.
1706 * If no mapping exist and the operation is WRITE, a zone is
1707 * allocated and used to map the chunk.
1708 * The zone returned will be set to the active state.
1709 */
1710struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chunk, int op)
1711{
1712 struct dmz_mblock *dmap_mblk = zmd->map_mblk[chunk >> DMZ_MAP_ENTRIES_SHIFT];
1713 struct dmz_map *dmap = (struct dmz_map *) dmap_mblk->data;
1714 int dmap_idx = chunk & DMZ_MAP_ENTRIES_MASK;
1715 unsigned int dzone_id;
1716 struct dm_zone *dzone = NULL;
1717 int ret = 0;
1718
1719 dmz_lock_map(zmd);
1720again:
1721 /* Get the chunk mapping */
1722 dzone_id = le32_to_cpu(dmap[dmap_idx].dzone_id);
1723 if (dzone_id == DMZ_MAP_UNMAPPED) {
1724 /*
1725 * Read or discard in unmapped chunks are fine. But for
1726 * writes, we need a mapping, so get one.
1727 */
1728 if (op != REQ_OP_WRITE)
1729 goto out;
1730
ad1bd578 1731 /* Allocate a random zone */
3b1a94c8
DLM
1732 dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1733 if (!dzone) {
d0e21ce4 1734 if (dmz_dev_is_dying(zmd)) {
75d66ffb
DF
1735 dzone = ERR_PTR(-EIO);
1736 goto out;
1737 }
3b1a94c8
DLM
1738 dmz_wait_for_free_zones(zmd);
1739 goto again;
1740 }
1741
1742 dmz_map_zone(zmd, dzone, chunk);
1743
1744 } else {
1745 /* The chunk is already mapped: get the mapping zone */
1746 dzone = dmz_get(zmd, dzone_id);
1747 if (dzone->chunk != chunk) {
1748 dzone = ERR_PTR(-EIO);
1749 goto out;
1750 }
1751
1752 /* Repair write pointer if the sequential dzone has error */
1753 if (dmz_seq_write_err(dzone)) {
1754 ret = dmz_handle_seq_write_err(zmd, dzone);
1755 if (ret) {
1756 dzone = ERR_PTR(-EIO);
1757 goto out;
1758 }
1759 clear_bit(DMZ_SEQ_WRITE_ERR, &dzone->flags);
1760 }
1761 }
1762
1763 /*
1764 * If the zone is being reclaimed, the chunk mapping may change
1765 * to a different zone. So wait for reclaim and retry. Otherwise,
1766 * activate the zone (this will prevent reclaim from touching it).
1767 */
1768 if (dmz_in_reclaim(dzone)) {
1769 dmz_wait_for_reclaim(zmd, dzone);
1770 goto again;
1771 }
1772 dmz_activate_zone(dzone);
1773 dmz_lru_zone(zmd, dzone);
1774out:
1775 dmz_unlock_map(zmd);
1776
1777 return dzone;
1778}
1779
1780/*
1781 * Write and discard change the block validity of data zones and their buffer
1782 * zones. Check here that valid blocks are still present. If all blocks are
1783 * invalid, the zones can be unmapped on the fly without waiting for reclaim
1784 * to do it.
1785 */
1786void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *dzone)
1787{
1788 struct dm_zone *bzone;
1789
1790 dmz_lock_map(zmd);
1791
1792 bzone = dzone->bzone;
1793 if (bzone) {
1794 if (dmz_weight(bzone))
1795 dmz_lru_zone(zmd, bzone);
1796 else {
1797 /* Empty buffer zone: reclaim it */
1798 dmz_unmap_zone(zmd, bzone);
1799 dmz_free_zone(zmd, bzone);
1800 bzone = NULL;
1801 }
1802 }
1803
1804 /* Deactivate the data zone */
1805 dmz_deactivate_zone(dzone);
1806 if (dmz_is_active(dzone) || bzone || dmz_weight(dzone))
1807 dmz_lru_zone(zmd, dzone);
1808 else {
1809 /* Unbuffered inactive empty data zone: reclaim it */
1810 dmz_unmap_zone(zmd, dzone);
1811 dmz_free_zone(zmd, dzone);
1812 }
1813
1814 dmz_unlock_map(zmd);
1815}
1816
1817/*
1818 * Allocate and map a random zone to buffer a chunk
1819 * already mapped to a sequential zone.
1820 */
1821struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
1822 struct dm_zone *dzone)
1823{
1824 struct dm_zone *bzone;
1825
1826 dmz_lock_map(zmd);
1827again:
1828 bzone = dzone->bzone;
1829 if (bzone)
1830 goto out;
1831
ad1bd578 1832 /* Allocate a random zone */
3b1a94c8
DLM
1833 bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1834 if (!bzone) {
d0e21ce4 1835 if (dmz_dev_is_dying(zmd)) {
75d66ffb
DF
1836 bzone = ERR_PTR(-EIO);
1837 goto out;
1838 }
3b1a94c8
DLM
1839 dmz_wait_for_free_zones(zmd);
1840 goto again;
1841 }
1842
1843 /* Update the chunk mapping */
b7122873 1844 dmz_set_chunk_mapping(zmd, dzone->chunk, dzone->id, bzone->id);
3b1a94c8
DLM
1845
1846 set_bit(DMZ_BUF, &bzone->flags);
1847 bzone->chunk = dzone->chunk;
1848 bzone->bzone = dzone;
1849 dzone->bzone = bzone;
1850 list_add_tail(&bzone->link, &zmd->map_rnd_list);
1851out:
1852 dmz_unlock_map(zmd);
1853
1854 return bzone;
1855}
1856
1857/*
1858 * Get an unmapped (free) zone.
1859 * This must be called with the mapping lock held.
1860 */
1861struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
1862{
1863 struct list_head *list;
1864 struct dm_zone *zone;
1865
1866 if (flags & DMZ_ALLOC_RND)
1867 list = &zmd->unmap_rnd_list;
1868 else
1869 list = &zmd->unmap_seq_list;
1870again:
1871 if (list_empty(list)) {
1872 /*
1873 * No free zone: if this is for reclaim, allow using the
1874 * reserved sequential zones.
1875 */
1876 if (!(flags & DMZ_ALLOC_RECLAIM) ||
1877 list_empty(&zmd->reserved_seq_zones_list))
1878 return NULL;
1879
1880 zone = list_first_entry(&zmd->reserved_seq_zones_list,
1881 struct dm_zone, link);
1882 list_del_init(&zone->link);
1883 atomic_dec(&zmd->nr_reserved_seq_zones);
1884 return zone;
1885 }
1886
1887 zone = list_first_entry(list, struct dm_zone, link);
1888 list_del_init(&zone->link);
1889
1890 if (dmz_is_rnd(zone))
1891 atomic_dec(&zmd->unmap_nr_rnd);
1892 else
1893 atomic_dec(&zmd->unmap_nr_seq);
1894
1895 if (dmz_is_offline(zone)) {
b7122873 1896 dmz_dev_warn(zmd->dev, "Zone %u is offline", zone->id);
3b1a94c8
DLM
1897 zone = NULL;
1898 goto again;
1899 }
1900
1901 return zone;
1902}
1903
1904/*
1905 * Free a zone.
1906 * This must be called with the mapping lock held.
1907 */
1908void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1909{
1910 /* If this is a sequential zone, reset it */
1911 if (dmz_is_seq(zone))
1912 dmz_reset_zone(zmd, zone);
1913
1914 /* Return the zone to its type unmap list */
1915 if (dmz_is_rnd(zone)) {
1916 list_add_tail(&zone->link, &zmd->unmap_rnd_list);
1917 atomic_inc(&zmd->unmap_nr_rnd);
1918 } else if (atomic_read(&zmd->nr_reserved_seq_zones) <
1919 zmd->nr_reserved_seq) {
1920 list_add_tail(&zone->link, &zmd->reserved_seq_zones_list);
1921 atomic_inc(&zmd->nr_reserved_seq_zones);
1922 } else {
1923 list_add_tail(&zone->link, &zmd->unmap_seq_list);
1924 atomic_inc(&zmd->unmap_nr_seq);
1925 }
1926
1927 wake_up_all(&zmd->free_wq);
1928}
1929
1930/*
1931 * Map a chunk to a zone.
1932 * This must be called with the mapping lock held.
1933 */
1934void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *dzone,
1935 unsigned int chunk)
1936{
1937 /* Set the chunk mapping */
b7122873 1938 dmz_set_chunk_mapping(zmd, chunk, dzone->id,
3b1a94c8
DLM
1939 DMZ_MAP_UNMAPPED);
1940 dzone->chunk = chunk;
1941 if (dmz_is_rnd(dzone))
1942 list_add_tail(&dzone->link, &zmd->map_rnd_list);
1943 else
1944 list_add_tail(&dzone->link, &zmd->map_seq_list);
1945}
1946
1947/*
1948 * Unmap a zone.
1949 * This must be called with the mapping lock held.
1950 */
1951void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone)
1952{
1953 unsigned int chunk = zone->chunk;
1954 unsigned int dzone_id;
1955
1956 if (chunk == DMZ_MAP_UNMAPPED) {
1957 /* Already unmapped */
1958 return;
1959 }
1960
1961 if (test_and_clear_bit(DMZ_BUF, &zone->flags)) {
1962 /*
1963 * Unmapping the chunk buffer zone: clear only
1964 * the chunk buffer mapping
1965 */
b7122873 1966 dzone_id = zone->bzone->id;
3b1a94c8
DLM
1967 zone->bzone->bzone = NULL;
1968 zone->bzone = NULL;
1969
1970 } else {
1971 /*
1972 * Unmapping the chunk data zone: the zone must
1973 * not be buffered.
1974 */
1975 if (WARN_ON(zone->bzone)) {
1976 zone->bzone->bzone = NULL;
1977 zone->bzone = NULL;
1978 }
1979 dzone_id = DMZ_MAP_UNMAPPED;
1980 }
1981
1982 dmz_set_chunk_mapping(zmd, chunk, dzone_id, DMZ_MAP_UNMAPPED);
1983
1984 zone->chunk = DMZ_MAP_UNMAPPED;
1985 list_del_init(&zone->link);
1986}
1987
1988/*
1989 * Set @nr_bits bits in @bitmap starting from @bit.
1990 * Return the number of bits changed from 0 to 1.
1991 */
1992static unsigned int dmz_set_bits(unsigned long *bitmap,
1993 unsigned int bit, unsigned int nr_bits)
1994{
1995 unsigned long *addr;
1996 unsigned int end = bit + nr_bits;
1997 unsigned int n = 0;
1998
1999 while (bit < end) {
2000 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2001 ((end - bit) >= BITS_PER_LONG)) {
2002 /* Try to set the whole word at once */
2003 addr = bitmap + BIT_WORD(bit);
2004 if (*addr == 0) {
2005 *addr = ULONG_MAX;
2006 n += BITS_PER_LONG;
2007 bit += BITS_PER_LONG;
2008 continue;
2009 }
2010 }
2011
2012 if (!test_and_set_bit(bit, bitmap))
2013 n++;
2014 bit++;
2015 }
2016
2017 return n;
2018}
2019
2020/*
2021 * Get the bitmap block storing the bit for chunk_block in zone.
2022 */
2023static struct dmz_mblock *dmz_get_bitmap(struct dmz_metadata *zmd,
2024 struct dm_zone *zone,
2025 sector_t chunk_block)
2026{
2027 sector_t bitmap_block = 1 + zmd->nr_map_blocks +
b7122873 2028 (sector_t)(zone->id * zmd->zone_nr_bitmap_blocks) +
3b1a94c8
DLM
2029 (chunk_block >> DMZ_BLOCK_SHIFT_BITS);
2030
2031 return dmz_get_mblock(zmd, bitmap_block);
2032}
2033
2034/*
2035 * Copy the valid blocks bitmap of from_zone to the bitmap of to_zone.
2036 */
2037int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2038 struct dm_zone *to_zone)
2039{
2040 struct dmz_mblock *from_mblk, *to_mblk;
2041 sector_t chunk_block = 0;
2042
2043 /* Get the zones bitmap blocks */
36820560 2044 while (chunk_block < zmd->zone_nr_blocks) {
3b1a94c8
DLM
2045 from_mblk = dmz_get_bitmap(zmd, from_zone, chunk_block);
2046 if (IS_ERR(from_mblk))
2047 return PTR_ERR(from_mblk);
2048 to_mblk = dmz_get_bitmap(zmd, to_zone, chunk_block);
2049 if (IS_ERR(to_mblk)) {
2050 dmz_release_mblock(zmd, from_mblk);
2051 return PTR_ERR(to_mblk);
2052 }
2053
2054 memcpy(to_mblk->data, from_mblk->data, DMZ_BLOCK_SIZE);
2055 dmz_dirty_mblock(zmd, to_mblk);
2056
2057 dmz_release_mblock(zmd, to_mblk);
2058 dmz_release_mblock(zmd, from_mblk);
2059
b3996295 2060 chunk_block += zmd->zone_bits_per_mblk;
3b1a94c8
DLM
2061 }
2062
2063 to_zone->weight = from_zone->weight;
2064
2065 return 0;
2066}
2067
2068/*
2069 * Merge the valid blocks bitmap of from_zone into the bitmap of to_zone,
2070 * starting from chunk_block.
2071 */
2072int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
2073 struct dm_zone *to_zone, sector_t chunk_block)
2074{
2075 unsigned int nr_blocks;
2076 int ret;
2077
2078 /* Get the zones bitmap blocks */
36820560 2079 while (chunk_block < zmd->zone_nr_blocks) {
3b1a94c8
DLM
2080 /* Get a valid region from the source zone */
2081 ret = dmz_first_valid_block(zmd, from_zone, &chunk_block);
2082 if (ret <= 0)
2083 return ret;
2084
2085 nr_blocks = ret;
2086 ret = dmz_validate_blocks(zmd, to_zone, chunk_block, nr_blocks);
2087 if (ret)
2088 return ret;
2089
2090 chunk_block += nr_blocks;
2091 }
2092
2093 return 0;
2094}
2095
2096/*
2097 * Validate all the blocks in the range [block..block+nr_blocks-1].
2098 */
2099int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2100 sector_t chunk_block, unsigned int nr_blocks)
2101{
2102 unsigned int count, bit, nr_bits;
36820560 2103 unsigned int zone_nr_blocks = zmd->zone_nr_blocks;
3b1a94c8
DLM
2104 struct dmz_mblock *mblk;
2105 unsigned int n = 0;
2106
2107 dmz_dev_debug(zmd->dev, "=> VALIDATE zone %u, block %llu, %u blocks",
b7122873 2108 zone->id, (unsigned long long)chunk_block,
3b1a94c8
DLM
2109 nr_blocks);
2110
2111 WARN_ON(chunk_block + nr_blocks > zone_nr_blocks);
2112
2113 while (nr_blocks) {
2114 /* Get bitmap block */
2115 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2116 if (IS_ERR(mblk))
2117 return PTR_ERR(mblk);
2118
2119 /* Set bits */
2120 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2121 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
3b1a94c8
DLM
2122
2123 count = dmz_set_bits((unsigned long *)mblk->data, bit, nr_bits);
2124 if (count) {
2125 dmz_dirty_mblock(zmd, mblk);
2126 n += count;
2127 }
2128 dmz_release_mblock(zmd, mblk);
2129
2130 nr_blocks -= nr_bits;
2131 chunk_block += nr_bits;
2132 }
2133
2134 if (likely(zone->weight + n <= zone_nr_blocks))
2135 zone->weight += n;
2136 else {
2137 dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be <= %u",
b7122873 2138 zone->id, zone->weight,
3b1a94c8
DLM
2139 zone_nr_blocks - n);
2140 zone->weight = zone_nr_blocks;
2141 }
2142
2143 return 0;
2144}
2145
2146/*
2147 * Clear nr_bits bits in bitmap starting from bit.
2148 * Return the number of bits cleared.
2149 */
2150static int dmz_clear_bits(unsigned long *bitmap, int bit, int nr_bits)
2151{
2152 unsigned long *addr;
2153 int end = bit + nr_bits;
2154 int n = 0;
2155
2156 while (bit < end) {
2157 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2158 ((end - bit) >= BITS_PER_LONG)) {
2159 /* Try to clear whole word at once */
2160 addr = bitmap + BIT_WORD(bit);
2161 if (*addr == ULONG_MAX) {
2162 *addr = 0;
2163 n += BITS_PER_LONG;
2164 bit += BITS_PER_LONG;
2165 continue;
2166 }
2167 }
2168
2169 if (test_and_clear_bit(bit, bitmap))
2170 n++;
2171 bit++;
2172 }
2173
2174 return n;
2175}
2176
2177/*
2178 * Invalidate all the blocks in the range [block..block+nr_blocks-1].
2179 */
2180int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
2181 sector_t chunk_block, unsigned int nr_blocks)
2182{
2183 unsigned int count, bit, nr_bits;
2184 struct dmz_mblock *mblk;
2185 unsigned int n = 0;
2186
2187 dmz_dev_debug(zmd->dev, "=> INVALIDATE zone %u, block %llu, %u blocks",
b7122873 2188 zone->id, (u64)chunk_block, nr_blocks);
3b1a94c8 2189
36820560 2190 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
3b1a94c8
DLM
2191
2192 while (nr_blocks) {
2193 /* Get bitmap block */
2194 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2195 if (IS_ERR(mblk))
2196 return PTR_ERR(mblk);
2197
2198 /* Clear bits */
2199 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2200 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
3b1a94c8
DLM
2201
2202 count = dmz_clear_bits((unsigned long *)mblk->data,
2203 bit, nr_bits);
2204 if (count) {
2205 dmz_dirty_mblock(zmd, mblk);
2206 n += count;
2207 }
2208 dmz_release_mblock(zmd, mblk);
2209
2210 nr_blocks -= nr_bits;
2211 chunk_block += nr_bits;
2212 }
2213
2214 if (zone->weight >= n)
2215 zone->weight -= n;
2216 else {
2217 dmz_dev_warn(zmd->dev, "Zone %u: weight %u should be >= %u",
b7122873 2218 zone->id, zone->weight, n);
3b1a94c8
DLM
2219 zone->weight = 0;
2220 }
2221
2222 return 0;
2223}
2224
2225/*
2226 * Get a block bit value.
2227 */
2228static int dmz_test_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2229 sector_t chunk_block)
2230{
2231 struct dmz_mblock *mblk;
2232 int ret;
2233
36820560 2234 WARN_ON(chunk_block >= zmd->zone_nr_blocks);
3b1a94c8
DLM
2235
2236 /* Get bitmap block */
2237 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2238 if (IS_ERR(mblk))
2239 return PTR_ERR(mblk);
2240
2241 /* Get offset */
2242 ret = test_bit(chunk_block & DMZ_BLOCK_MASK_BITS,
2243 (unsigned long *) mblk->data) != 0;
2244
2245 dmz_release_mblock(zmd, mblk);
2246
2247 return ret;
2248}
2249
2250/*
2251 * Return the number of blocks from chunk_block to the first block with a bit
2252 * value specified by set. Search at most nr_blocks blocks from chunk_block.
2253 */
2254static int dmz_to_next_set_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2255 sector_t chunk_block, unsigned int nr_blocks,
2256 int set)
2257{
2258 struct dmz_mblock *mblk;
2259 unsigned int bit, set_bit, nr_bits;
b3996295 2260 unsigned int zone_bits = zmd->zone_bits_per_mblk;
3b1a94c8
DLM
2261 unsigned long *bitmap;
2262 int n = 0;
2263
36820560 2264 WARN_ON(chunk_block + nr_blocks > zmd->zone_nr_blocks);
3b1a94c8
DLM
2265
2266 while (nr_blocks) {
2267 /* Get bitmap block */
2268 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2269 if (IS_ERR(mblk))
2270 return PTR_ERR(mblk);
2271
2272 /* Get offset */
2273 bitmap = (unsigned long *) mblk->data;
2274 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2275 nr_bits = min(nr_blocks, zone_bits - bit);
3b1a94c8 2276 if (set)
b3996295 2277 set_bit = find_next_bit(bitmap, zone_bits, bit);
3b1a94c8 2278 else
b3996295 2279 set_bit = find_next_zero_bit(bitmap, zone_bits, bit);
3b1a94c8
DLM
2280 dmz_release_mblock(zmd, mblk);
2281
2282 n += set_bit - bit;
b3996295 2283 if (set_bit < zone_bits)
3b1a94c8
DLM
2284 break;
2285
2286 nr_blocks -= nr_bits;
2287 chunk_block += nr_bits;
2288 }
2289
2290 return n;
2291}
2292
2293/*
2294 * Test if chunk_block is valid. If it is, the number of consecutive
2295 * valid blocks from chunk_block will be returned.
2296 */
2297int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
2298 sector_t chunk_block)
2299{
2300 int valid;
2301
2302 valid = dmz_test_block(zmd, zone, chunk_block);
2303 if (valid <= 0)
2304 return valid;
2305
2306 /* The block is valid: get the number of valid blocks from block */
2307 return dmz_to_next_set_block(zmd, zone, chunk_block,
36820560 2308 zmd->zone_nr_blocks - chunk_block, 0);
3b1a94c8
DLM
2309}
2310
2311/*
2312 * Find the first valid block from @chunk_block in @zone.
2313 * If such a block is found, its number is returned using
2314 * @chunk_block and the total number of valid blocks from @chunk_block
2315 * is returned.
2316 */
2317int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
2318 sector_t *chunk_block)
2319{
2320 sector_t start_block = *chunk_block;
2321 int ret;
2322
2323 ret = dmz_to_next_set_block(zmd, zone, start_block,
36820560 2324 zmd->zone_nr_blocks - start_block, 1);
3b1a94c8
DLM
2325 if (ret < 0)
2326 return ret;
2327
2328 start_block += ret;
2329 *chunk_block = start_block;
2330
2331 return dmz_to_next_set_block(zmd, zone, start_block,
36820560 2332 zmd->zone_nr_blocks - start_block, 0);
3b1a94c8
DLM
2333}
2334
2335/*
2336 * Count the number of bits set starting from bit up to bit + nr_bits - 1.
2337 */
2338static int dmz_count_bits(void *bitmap, int bit, int nr_bits)
2339{
2340 unsigned long *addr;
2341 int end = bit + nr_bits;
2342 int n = 0;
2343
2344 while (bit < end) {
2345 if (((bit & (BITS_PER_LONG - 1)) == 0) &&
2346 ((end - bit) >= BITS_PER_LONG)) {
2347 addr = (unsigned long *)bitmap + BIT_WORD(bit);
2348 if (*addr == ULONG_MAX) {
2349 n += BITS_PER_LONG;
2350 bit += BITS_PER_LONG;
2351 continue;
2352 }
2353 }
2354
2355 if (test_bit(bit, bitmap))
2356 n++;
2357 bit++;
2358 }
2359
2360 return n;
2361}
2362
2363/*
2364 * Get a zone weight.
2365 */
2366static void dmz_get_zone_weight(struct dmz_metadata *zmd, struct dm_zone *zone)
2367{
2368 struct dmz_mblock *mblk;
2369 sector_t chunk_block = 0;
2370 unsigned int bit, nr_bits;
36820560 2371 unsigned int nr_blocks = zmd->zone_nr_blocks;
3b1a94c8
DLM
2372 void *bitmap;
2373 int n = 0;
2374
2375 while (nr_blocks) {
2376 /* Get bitmap block */
2377 mblk = dmz_get_bitmap(zmd, zone, chunk_block);
2378 if (IS_ERR(mblk)) {
2379 n = 0;
2380 break;
2381 }
2382
2383 /* Count bits in this block */
2384 bitmap = mblk->data;
2385 bit = chunk_block & DMZ_BLOCK_MASK_BITS;
b3996295 2386 nr_bits = min(nr_blocks, zmd->zone_bits_per_mblk - bit);
3b1a94c8
DLM
2387 n += dmz_count_bits(bitmap, bit, nr_bits);
2388
2389 dmz_release_mblock(zmd, mblk);
2390
2391 nr_blocks -= nr_bits;
2392 chunk_block += nr_bits;
2393 }
2394
2395 zone->weight = n;
2396}
2397
2398/*
2399 * Cleanup the zoned metadata resources.
2400 */
2401static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
2402{
2403 struct rb_root *root;
2404 struct dmz_mblock *mblk, *next;
2405 int i;
2406
2407 /* Release zone mapping resources */
2408 if (zmd->map_mblk) {
2409 for (i = 0; i < zmd->nr_map_blocks; i++)
2410 dmz_release_mblock(zmd, zmd->map_mblk[i]);
2411 kfree(zmd->map_mblk);
2412 zmd->map_mblk = NULL;
2413 }
2414
2415 /* Release super blocks */
2416 for (i = 0; i < 2; i++) {
2417 if (zmd->sb[i].mblk) {
2418 dmz_free_mblock(zmd, zmd->sb[i].mblk);
2419 zmd->sb[i].mblk = NULL;
2420 }
2421 }
2422
2423 /* Free cached blocks */
2424 while (!list_empty(&zmd->mblk_dirty_list)) {
2425 mblk = list_first_entry(&zmd->mblk_dirty_list,
2426 struct dmz_mblock, link);
2427 dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
33c2865f 2428 (u64)mblk->no, mblk->ref);
3b1a94c8
DLM
2429 list_del_init(&mblk->link);
2430 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2431 dmz_free_mblock(zmd, mblk);
2432 }
2433
2434 while (!list_empty(&zmd->mblk_lru_list)) {
2435 mblk = list_first_entry(&zmd->mblk_lru_list,
2436 struct dmz_mblock, link);
2437 list_del_init(&mblk->link);
2438 rb_erase(&mblk->node, &zmd->mblk_rbtree);
2439 dmz_free_mblock(zmd, mblk);
2440 }
2441
2442 /* Sanity checks: the mblock rbtree should now be empty */
2443 root = &zmd->mblk_rbtree;
2444 rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
2445 dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
33c2865f
DLM
2446 (u64)mblk->no, mblk->ref);
2447 mblk->ref = 0;
3b1a94c8
DLM
2448 dmz_free_mblock(zmd, mblk);
2449 }
2450
2451 /* Free the zone descriptors */
2452 dmz_drop_zones(zmd);
d5ffebdd
MS
2453
2454 mutex_destroy(&zmd->mblk_flush_lock);
2455 mutex_destroy(&zmd->map_lock);
3b1a94c8
DLM
2456}
2457
2458/*
2459 * Initialize the zoned metadata.
2460 */
2234e732
HR
2461int dmz_ctr_metadata(struct dmz_dev *dev, struct dmz_metadata **metadata,
2462 const char *devname)
3b1a94c8
DLM
2463{
2464 struct dmz_metadata *zmd;
b7122873 2465 unsigned int i;
3b1a94c8
DLM
2466 struct dm_zone *zone;
2467 int ret;
2468
2469 zmd = kzalloc(sizeof(struct dmz_metadata), GFP_KERNEL);
2470 if (!zmd)
2471 return -ENOMEM;
2472
2234e732 2473 strcpy(zmd->devname, devname);
3b1a94c8
DLM
2474 zmd->dev = dev;
2475 zmd->mblk_rbtree = RB_ROOT;
2476 init_rwsem(&zmd->mblk_sem);
2477 mutex_init(&zmd->mblk_flush_lock);
2478 spin_lock_init(&zmd->mblk_lock);
2479 INIT_LIST_HEAD(&zmd->mblk_lru_list);
2480 INIT_LIST_HEAD(&zmd->mblk_dirty_list);
2481
2482 mutex_init(&zmd->map_lock);
2483 atomic_set(&zmd->unmap_nr_rnd, 0);
2484 INIT_LIST_HEAD(&zmd->unmap_rnd_list);
2485 INIT_LIST_HEAD(&zmd->map_rnd_list);
2486
2487 atomic_set(&zmd->unmap_nr_seq, 0);
2488 INIT_LIST_HEAD(&zmd->unmap_seq_list);
2489 INIT_LIST_HEAD(&zmd->map_seq_list);
2490
2491 atomic_set(&zmd->nr_reserved_seq_zones, 0);
2492 INIT_LIST_HEAD(&zmd->reserved_seq_zones_list);
2493
2494 init_waitqueue_head(&zmd->free_wq);
2495
2496 /* Initialize zone descriptors */
2497 ret = dmz_init_zones(zmd);
2498 if (ret)
2499 goto err;
2500
2501 /* Get super block */
2502 ret = dmz_load_sb(zmd);
2503 if (ret)
2504 goto err;
2505
2506 /* Set metadata zones starting from sb_zone */
3b1a94c8 2507 for (i = 0; i < zmd->nr_meta_zones << 1; i++) {
735bd7e4 2508 zone = dmz_get(zmd, zmd->sb[0].zone->id + i);
3b1a94c8
DLM
2509 if (!dmz_is_rnd(zone))
2510 goto err;
2511 set_bit(DMZ_META, &zone->flags);
2512 }
2513
2514 /* Load mapping table */
2515 ret = dmz_load_mapping(zmd);
2516 if (ret)
2517 goto err;
2518
2519 /*
2520 * Cache size boundaries: allow at least 2 super blocks, the chunk map
2521 * blocks and enough blocks to be able to cache the bitmap blocks of
2522 * up to 16 zones when idle (min_nr_mblks). Otherwise, if busy, allow
2523 * the cache to add 512 more metadata blocks.
2524 */
2525 zmd->min_nr_mblks = 2 + zmd->nr_map_blocks + zmd->zone_nr_bitmap_blocks * 16;
2526 zmd->max_nr_mblks = zmd->min_nr_mblks + 512;
2527 zmd->mblk_shrinker.count_objects = dmz_mblock_shrinker_count;
2528 zmd->mblk_shrinker.scan_objects = dmz_mblock_shrinker_scan;
2529 zmd->mblk_shrinker.seeks = DEFAULT_SEEKS;
2530
2531 /* Metadata cache shrinker */
2532 ret = register_shrinker(&zmd->mblk_shrinker);
2533 if (ret) {
2534 dmz_dev_err(dev, "Register metadata cache shrinker failed");
2535 goto err;
2536 }
2537
2538 dmz_dev_info(dev, "Host-%s zoned block device",
2539 bdev_zoned_model(dev->bdev) == BLK_ZONED_HA ?
2540 "aware" : "managed");
2541 dmz_dev_info(dev, " %llu 512-byte logical sectors",
2542 (u64)dev->capacity);
2543 dmz_dev_info(dev, " %u zones of %llu 512-byte logical sectors",
36820560 2544 zmd->nr_zones, (u64)zmd->zone_nr_sectors);
3b1a94c8
DLM
2545 dmz_dev_info(dev, " %u metadata zones",
2546 zmd->nr_meta_zones * 2);
2547 dmz_dev_info(dev, " %u data zones for %u chunks",
2548 zmd->nr_data_zones, zmd->nr_chunks);
2549 dmz_dev_info(dev, " %u random zones (%u unmapped)",
2550 zmd->nr_rnd, atomic_read(&zmd->unmap_nr_rnd));
2551 dmz_dev_info(dev, " %u sequential zones (%u unmapped)",
2552 zmd->nr_seq, atomic_read(&zmd->unmap_nr_seq));
2553 dmz_dev_info(dev, " %u reserved sequential data zones",
2554 zmd->nr_reserved_seq);
2555
2556 dmz_dev_debug(dev, "Format:");
2557 dmz_dev_debug(dev, "%u metadata blocks per set (%u max cache)",
2558 zmd->nr_meta_blocks, zmd->max_nr_mblks);
2559 dmz_dev_debug(dev, " %u data zone mapping blocks",
2560 zmd->nr_map_blocks);
2561 dmz_dev_debug(dev, " %u bitmap blocks",
2562 zmd->nr_bitmap_blocks);
2563
2564 *metadata = zmd;
2565
2566 return 0;
2567err:
2568 dmz_cleanup_metadata(zmd);
2569 kfree(zmd);
2570 *metadata = NULL;
2571
2572 return ret;
2573}
2574
2575/*
2576 * Cleanup the zoned metadata resources.
2577 */
2578void dmz_dtr_metadata(struct dmz_metadata *zmd)
2579{
2580 unregister_shrinker(&zmd->mblk_shrinker);
2581 dmz_cleanup_metadata(zmd);
2582 kfree(zmd);
2583}
2584
2585/*
2586 * Check zone information on resume.
2587 */
2588int dmz_resume_metadata(struct dmz_metadata *zmd)
2589{
2590 struct dmz_dev *dev = zmd->dev;
2591 struct dm_zone *zone;
2592 sector_t wp_block;
2593 unsigned int i;
2594 int ret;
2595
2596 /* Check zones */
36820560 2597 for (i = 0; i < zmd->nr_zones; i++) {
3b1a94c8
DLM
2598 zone = dmz_get(zmd, i);
2599 if (!zone) {
2600 dmz_dev_err(dev, "Unable to get zone %u", i);
2601 return -EIO;
2602 }
2603
2604 wp_block = zone->wp_block;
2605
2606 ret = dmz_update_zone(zmd, zone);
2607 if (ret) {
2608 dmz_dev_err(dev, "Broken zone %u", i);
2609 return ret;
2610 }
2611
2612 if (dmz_is_offline(zone)) {
2613 dmz_dev_warn(dev, "Zone %u is offline", i);
2614 continue;
2615 }
2616
2617 /* Check write pointer */
2618 if (!dmz_is_seq(zone))
2619 zone->wp_block = 0;
2620 else if (zone->wp_block != wp_block) {
2621 dmz_dev_err(dev, "Zone %u: Invalid wp (%llu / %llu)",
2622 i, (u64)zone->wp_block, (u64)wp_block);
2623 zone->wp_block = wp_block;
2624 dmz_invalidate_blocks(zmd, zone, zone->wp_block,
36820560 2625 zmd->zone_nr_blocks - zone->wp_block);
3b1a94c8
DLM
2626 }
2627 }
2628
2629 return 0;
2630}