block: pass a gendisk to blk_queue_clear_zone_settings
[linux-block.git] / block / blk-zoned.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
6a0cb1bc
HR
2/*
3 * Zoned block device handling
4 *
5 * Copyright (c) 2015, Hannes Reinecke
6 * Copyright (c) 2015, SUSE Linux GmbH
7 *
8 * Copyright (c) 2016, Damien Le Moal
9 * Copyright (c) 2016, Western Digital
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/rbtree.h>
15#include <linux/blkdev.h>
bf505456 16#include <linux/blk-mq.h>
26202928
DLM
17#include <linux/mm.h>
18#include <linux/vmalloc.h>
bd976e52 19#include <linux/sched/mm.h>
6a0cb1bc 20
a2d6b3a2
DLM
21#include "blk.h"
22
02694e86
CK
23#define ZONE_COND_NAME(name) [BLK_ZONE_COND_##name] = #name
24static const char *const zone_cond_name[] = {
25 ZONE_COND_NAME(NOT_WP),
26 ZONE_COND_NAME(EMPTY),
27 ZONE_COND_NAME(IMP_OPEN),
28 ZONE_COND_NAME(EXP_OPEN),
29 ZONE_COND_NAME(CLOSED),
30 ZONE_COND_NAME(READONLY),
31 ZONE_COND_NAME(FULL),
32 ZONE_COND_NAME(OFFLINE),
33};
34#undef ZONE_COND_NAME
35
36/**
37 * blk_zone_cond_str - Return string XXX in BLK_ZONE_COND_XXX.
38 * @zone_cond: BLK_ZONE_COND_XXX.
39 *
40 * Description: Centralize block layer function to convert BLK_ZONE_COND_XXX
41 * into string format. Useful in the debugging and tracing zone conditions. For
42 * invalid BLK_ZONE_COND_XXX it returns string "UNKNOWN".
43 */
44const char *blk_zone_cond_str(enum blk_zone_cond zone_cond)
45{
46 static const char *zone_cond_str = "UNKNOWN";
47
48 if (zone_cond < ARRAY_SIZE(zone_cond_name) && zone_cond_name[zone_cond])
49 zone_cond_str = zone_cond_name[zone_cond];
50
51 return zone_cond_str;
52}
53EXPORT_SYMBOL_GPL(blk_zone_cond_str);
54
6cc77e9c
CH
55/*
56 * Return true if a request is a write requests that needs zone write locking.
57 */
58bool blk_req_needs_zone_write_lock(struct request *rq)
59{
60 if (!rq->q->seq_zones_wlock)
61 return false;
62
63 if (blk_rq_is_passthrough(rq))
64 return false;
65
66 switch (req_op(rq)) {
67 case REQ_OP_WRITE_ZEROES:
6cc77e9c
CH
68 case REQ_OP_WRITE:
69 return blk_rq_zone_is_seq(rq);
70 default:
71 return false;
72 }
73}
74EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);
75
1392d370
JT
76bool blk_req_zone_write_trylock(struct request *rq)
77{
78 unsigned int zno = blk_rq_zone_no(rq);
79
80 if (test_and_set_bit(zno, rq->q->seq_zones_wlock))
81 return false;
82
83 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
84 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
85
86 return true;
87}
88EXPORT_SYMBOL_GPL(blk_req_zone_write_trylock);
89
6cc77e9c
CH
90void __blk_req_zone_write_lock(struct request *rq)
91{
92 if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq),
93 rq->q->seq_zones_wlock)))
94 return;
95
96 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED);
97 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED;
98}
99EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock);
100
101void __blk_req_zone_write_unlock(struct request *rq)
102{
103 rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED;
104 if (rq->q->seq_zones_wlock)
105 WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq),
106 rq->q->seq_zones_wlock));
107}
108EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock);
109
a91e1380
DLM
110/**
111 * blkdev_nr_zones - Get number of zones
9b38bb4b 112 * @disk: Target gendisk
a91e1380 113 *
9b38bb4b
CH
114 * Return the total number of zones of a zoned block device. For a block
115 * device without zone capabilities, the number of zones is always 0.
a91e1380 116 */
9b38bb4b 117unsigned int blkdev_nr_zones(struct gendisk *disk)
a91e1380 118{
9b38bb4b 119 sector_t zone_sectors = blk_queue_zone_sectors(disk->queue);
a91e1380 120
9b38bb4b 121 if (!blk_queue_is_zoned(disk->queue))
a91e1380 122 return 0;
9b38bb4b 123 return (get_capacity(disk) + zone_sectors - 1) >> ilog2(zone_sectors);
a91e1380
DLM
124}
125EXPORT_SYMBOL_GPL(blkdev_nr_zones);
126
6a0cb1bc
HR
127/**
128 * blkdev_report_zones - Get zones information
129 * @bdev: Target block device
130 * @sector: Sector from which to report zones
d4100351
CH
131 * @nr_zones: Maximum number of zones to report
132 * @cb: Callback function called for each reported zone
133 * @data: Private data for the callback
6a0cb1bc
HR
134 *
135 * Description:
d4100351
CH
136 * Get zone information starting from the zone containing @sector for at most
137 * @nr_zones, and call @cb for each zone reported by the device.
138 * To report all zones in a device starting from @sector, the BLK_ALL_ZONES
139 * constant can be passed to @nr_zones.
140 * Returns the number of zones reported by the device, or a negative errno
141 * value in case of failure.
142 *
143 * Note: The caller must use memalloc_noXX_save/restore() calls to control
144 * memory allocations done within this function.
6a0cb1bc 145 */
e76239a3 146int blkdev_report_zones(struct block_device *bdev, sector_t sector,
d4100351 147 unsigned int nr_zones, report_zones_cb cb, void *data)
6a0cb1bc 148{
ceeb373a 149 struct gendisk *disk = bdev->bd_disk;
5eac3eb3 150 sector_t capacity = get_capacity(disk);
6a0cb1bc 151
edd1dbc8 152 if (!bdev_is_zoned(bdev) || WARN_ON_ONCE(!disk->fops->report_zones))
e76239a3 153 return -EOPNOTSUPP;
6a0cb1bc 154
d4100351 155 if (!nr_zones || sector >= capacity)
6a0cb1bc 156 return 0;
6a0cb1bc 157
d4100351 158 return disk->fops->report_zones(disk, sector, nr_zones, cb, data);
6a0cb1bc
HR
159}
160EXPORT_SYMBOL_GPL(blkdev_report_zones);
161
1ee533ec
DLM
162static inline unsigned long *blk_alloc_zone_bitmap(int node,
163 unsigned int nr_zones)
6e33dbf2 164{
1ee533ec
DLM
165 return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long),
166 GFP_NOIO, node);
167}
6e33dbf2 168
1ee533ec
DLM
169static int blk_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx,
170 void *data)
171{
6e33dbf2 172 /*
1ee533ec
DLM
173 * For an all-zones reset, ignore conventional, empty, read-only
174 * and offline zones.
6e33dbf2 175 */
1ee533ec
DLM
176 switch (zone->cond) {
177 case BLK_ZONE_COND_NOT_WP:
178 case BLK_ZONE_COND_EMPTY:
179 case BLK_ZONE_COND_READONLY:
180 case BLK_ZONE_COND_OFFLINE:
181 return 0;
182 default:
183 set_bit(idx, (unsigned long *)data);
184 return 0;
185 }
186}
187
188static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
189 gfp_t gfp_mask)
190{
191 struct request_queue *q = bdev_get_queue(bdev);
192 sector_t capacity = get_capacity(bdev->bd_disk);
193 sector_t zone_sectors = blk_queue_zone_sectors(q);
194 unsigned long *need_reset;
195 struct bio *bio = NULL;
196 sector_t sector = 0;
197 int ret;
198
199 need_reset = blk_alloc_zone_bitmap(q->node, q->nr_zones);
200 if (!need_reset)
201 return -ENOMEM;
202
203 ret = bdev->bd_disk->fops->report_zones(bdev->bd_disk, 0,
204 q->nr_zones, blk_zone_need_reset_cb,
205 need_reset);
206 if (ret < 0)
207 goto out_free_need_reset;
208
209 ret = 0;
210 while (sector < capacity) {
211 if (!test_bit(blk_queue_zone_no(q, sector), need_reset)) {
212 sector += zone_sectors;
213 continue;
214 }
215
0a3140ea
CK
216 bio = blk_next_bio(bio, bdev, 0, REQ_OP_ZONE_RESET | REQ_SYNC,
217 gfp_mask);
1ee533ec
DLM
218 bio->bi_iter.bi_sector = sector;
219 sector += zone_sectors;
220
221 /* This may take a while, so be nice to others */
222 cond_resched();
223 }
224
225 if (bio) {
226 ret = submit_bio_wait(bio);
227 bio_put(bio);
228 }
229
230out_free_need_reset:
231 kfree(need_reset);
232 return ret;
233}
234
235static int blkdev_zone_reset_all(struct block_device *bdev, gfp_t gfp_mask)
236{
237 struct bio bio;
238
49add496 239 bio_init(&bio, bdev, NULL, 0, REQ_OP_ZONE_RESET_ALL | REQ_SYNC);
1ee533ec 240 return submit_bio_wait(&bio);
6e33dbf2
CK
241}
242
6a0cb1bc 243/**
6c1b1da5 244 * blkdev_zone_mgmt - Execute a zone management operation on a range of zones
6a0cb1bc 245 * @bdev: Target block device
6c1b1da5
AJ
246 * @op: Operation to be performed on the zones
247 * @sector: Start sector of the first zone to operate on
248 * @nr_sectors: Number of sectors, should be at least the length of one zone and
249 * must be zone size aligned.
6a0cb1bc
HR
250 * @gfp_mask: Memory allocation flags (for bio_alloc)
251 *
252 * Description:
6c1b1da5 253 * Perform the specified operation on the range of zones specified by
6a0cb1bc
HR
254 * @sector..@sector+@nr_sectors. Specifying the entire disk sector range
255 * is valid, but the specified range should not contain conventional zones.
6c1b1da5
AJ
256 * The operation to execute on each zone can be a zone reset, open, close
257 * or finish request.
6a0cb1bc 258 */
6c1b1da5
AJ
259int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
260 sector_t sector, sector_t nr_sectors,
261 gfp_t gfp_mask)
6a0cb1bc
HR
262{
263 struct request_queue *q = bdev_get_queue(bdev);
6c1b1da5 264 sector_t zone_sectors = blk_queue_zone_sectors(q);
5eac3eb3 265 sector_t capacity = get_capacity(bdev->bd_disk);
6a0cb1bc 266 sector_t end_sector = sector + nr_sectors;
a2d6b3a2 267 struct bio *bio = NULL;
1ee533ec 268 int ret = 0;
6a0cb1bc 269
edd1dbc8 270 if (!bdev_is_zoned(bdev))
6a0cb1bc
HR
271 return -EOPNOTSUPP;
272
a2d6b3a2
DLM
273 if (bdev_read_only(bdev))
274 return -EPERM;
275
6c1b1da5
AJ
276 if (!op_is_zone_mgmt(op))
277 return -EOPNOTSUPP;
278
11bde986 279 if (end_sector <= sector || end_sector > capacity)
6a0cb1bc
HR
280 /* Out of range */
281 return -EINVAL;
282
283 /* Check alignment (handle eventual smaller last zone) */
6a0cb1bc
HR
284 if (sector & (zone_sectors - 1))
285 return -EINVAL;
286
5eac3eb3 287 if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity)
6a0cb1bc
HR
288 return -EINVAL;
289
1ee533ec
DLM
290 /*
291 * In the case of a zone reset operation over all zones,
292 * REQ_OP_ZONE_RESET_ALL can be used with devices supporting this
293 * command. For other devices, we emulate this command behavior by
294 * identifying the zones needing a reset.
295 */
296 if (op == REQ_OP_ZONE_RESET && sector == 0 && nr_sectors == capacity) {
297 if (!blk_queue_zone_resetall(q))
298 return blkdev_zone_reset_all_emulated(bdev, gfp_mask);
299 return blkdev_zone_reset_all(bdev, gfp_mask);
300 }
301
6a0cb1bc 302 while (sector < end_sector) {
0a3140ea 303 bio = blk_next_bio(bio, bdev, 0, op | REQ_SYNC, gfp_mask);
c7a1d926 304 bio->bi_iter.bi_sector = sector;
6a0cb1bc
HR
305 sector += zone_sectors;
306
307 /* This may take a while, so be nice to others */
308 cond_resched();
6a0cb1bc
HR
309 }
310
a2d6b3a2
DLM
311 ret = submit_bio_wait(bio);
312 bio_put(bio);
313
a2d6b3a2 314 return ret;
6a0cb1bc 315}
6c1b1da5 316EXPORT_SYMBOL_GPL(blkdev_zone_mgmt);
3ed05a98 317
d4100351
CH
318struct zone_report_args {
319 struct blk_zone __user *zones;
320};
321
322static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx,
323 void *data)
324{
325 struct zone_report_args *args = data;
326
327 if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone)))
328 return -EFAULT;
329 return 0;
330}
331
56c4bddb 332/*
3ed05a98
ST
333 * BLKREPORTZONE ioctl processing.
334 * Called from blkdev_ioctl.
335 */
336int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
337 unsigned int cmd, unsigned long arg)
338{
339 void __user *argp = (void __user *)arg;
d4100351 340 struct zone_report_args args;
3ed05a98
ST
341 struct request_queue *q;
342 struct blk_zone_report rep;
3ed05a98
ST
343 int ret;
344
345 if (!argp)
346 return -EINVAL;
347
348 q = bdev_get_queue(bdev);
349 if (!q)
350 return -ENXIO;
351
edd1dbc8 352 if (!bdev_is_zoned(bdev))
3ed05a98
ST
353 return -ENOTTY;
354
3ed05a98
ST
355 if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
356 return -EFAULT;
357
358 if (!rep.nr_zones)
359 return -EINVAL;
360
d4100351
CH
361 args.zones = argp + sizeof(struct blk_zone_report);
362 ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones,
363 blkdev_copy_zone_to_user, &args);
364 if (ret < 0)
365 return ret;
3ed05a98 366
d4100351 367 rep.nr_zones = ret;
82394db7 368 rep.flags = BLK_ZONE_REP_CAPACITY;
d4100351
CH
369 if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report)))
370 return -EFAULT;
371 return 0;
3ed05a98
ST
372}
373
e5113505
SK
374static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode,
375 const struct blk_zone_range *zrange)
376{
377 loff_t start, end;
378
379 if (zrange->sector + zrange->nr_sectors <= zrange->sector ||
380 zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk))
381 /* Out of range */
382 return -EINVAL;
383
384 start = zrange->sector << SECTOR_SHIFT;
385 end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1;
386
387 return truncate_bdev_range(bdev, mode, start, end);
388}
389
56c4bddb 390/*
e876df1f 391 * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing.
3ed05a98
ST
392 * Called from blkdev_ioctl.
393 */
e876df1f
AJ
394int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
395 unsigned int cmd, unsigned long arg)
3ed05a98
ST
396{
397 void __user *argp = (void __user *)arg;
398 struct request_queue *q;
399 struct blk_zone_range zrange;
e876df1f 400 enum req_opf op;
e5113505 401 int ret;
3ed05a98
ST
402
403 if (!argp)
404 return -EINVAL;
405
406 q = bdev_get_queue(bdev);
407 if (!q)
408 return -ENXIO;
409
edd1dbc8 410 if (!bdev_is_zoned(bdev))
3ed05a98
ST
411 return -ENOTTY;
412
3ed05a98
ST
413 if (!(mode & FMODE_WRITE))
414 return -EBADF;
415
416 if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range)))
417 return -EFAULT;
418
e876df1f
AJ
419 switch (cmd) {
420 case BLKRESETZONE:
421 op = REQ_OP_ZONE_RESET;
e5113505
SK
422
423 /* Invalidate the page cache, including dirty pages. */
86399ea0 424 filemap_invalidate_lock(bdev->bd_inode->i_mapping);
e5113505
SK
425 ret = blkdev_truncate_zone_range(bdev, mode, &zrange);
426 if (ret)
86399ea0 427 goto fail;
e876df1f
AJ
428 break;
429 case BLKOPENZONE:
430 op = REQ_OP_ZONE_OPEN;
431 break;
432 case BLKCLOSEZONE:
433 op = REQ_OP_ZONE_CLOSE;
434 break;
435 case BLKFINISHZONE:
436 op = REQ_OP_ZONE_FINISH;
437 break;
438 default:
439 return -ENOTTY;
440 }
441
e5113505
SK
442 ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors,
443 GFP_KERNEL);
444
86399ea0
SK
445fail:
446 if (cmd == BLKRESETZONE)
447 filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
e5113505
SK
448
449 return ret;
3ed05a98 450}
bf505456 451
bf505456
DLM
452void blk_queue_free_zone_bitmaps(struct request_queue *q)
453{
f216fdd7
CH
454 kfree(q->conv_zones_bitmap);
455 q->conv_zones_bitmap = NULL;
bf505456
DLM
456 kfree(q->seq_zones_wlock);
457 q->seq_zones_wlock = NULL;
458}
459
d4100351
CH
460struct blk_revalidate_zone_args {
461 struct gendisk *disk;
f216fdd7 462 unsigned long *conv_zones_bitmap;
d4100351 463 unsigned long *seq_zones_wlock;
e94f5819 464 unsigned int nr_zones;
6c6b3549 465 sector_t zone_sectors;
d4100351
CH
466 sector_t sector;
467};
468
d9dd7308
DLM
469/*
470 * Helper function to check the validity of zones of a zoned block device.
471 */
d4100351
CH
472static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx,
473 void *data)
d9dd7308 474{
d4100351
CH
475 struct blk_revalidate_zone_args *args = data;
476 struct gendisk *disk = args->disk;
d9dd7308 477 struct request_queue *q = disk->queue;
d9dd7308
DLM
478 sector_t capacity = get_capacity(disk);
479
480 /*
481 * All zones must have the same size, with the exception on an eventual
482 * smaller last zone.
483 */
6c6b3549
CH
484 if (zone->start == 0) {
485 if (zone->len == 0 || !is_power_of_2(zone->len)) {
486 pr_warn("%s: Invalid zoned device with non power of two zone size (%llu)\n",
487 disk->disk_name, zone->len);
488 return -ENODEV;
489 }
d9dd7308 490
6c6b3549
CH
491 args->zone_sectors = zone->len;
492 args->nr_zones = (capacity + zone->len - 1) >> ilog2(zone->len);
493 } else if (zone->start + args->zone_sectors < capacity) {
494 if (zone->len != args->zone_sectors) {
495 pr_warn("%s: Invalid zoned device with non constant zone size\n",
496 disk->disk_name);
497 return -ENODEV;
498 }
499 } else {
500 if (zone->len > args->zone_sectors) {
501 pr_warn("%s: Invalid zoned device with larger last zone size\n",
502 disk->disk_name);
503 return -ENODEV;
504 }
d9dd7308
DLM
505 }
506
507 /* Check for holes in the zone report */
d4100351 508 if (zone->start != args->sector) {
d9dd7308 509 pr_warn("%s: Zone gap at sectors %llu..%llu\n",
d4100351
CH
510 disk->disk_name, args->sector, zone->start);
511 return -ENODEV;
d9dd7308
DLM
512 }
513
514 /* Check zone type */
515 switch (zone->type) {
516 case BLK_ZONE_TYPE_CONVENTIONAL:
e94f5819
CH
517 if (!args->conv_zones_bitmap) {
518 args->conv_zones_bitmap =
519 blk_alloc_zone_bitmap(q->node, args->nr_zones);
520 if (!args->conv_zones_bitmap)
521 return -ENOMEM;
522 }
523 set_bit(idx, args->conv_zones_bitmap);
524 break;
d9dd7308
DLM
525 case BLK_ZONE_TYPE_SEQWRITE_REQ:
526 case BLK_ZONE_TYPE_SEQWRITE_PREF:
e94f5819
CH
527 if (!args->seq_zones_wlock) {
528 args->seq_zones_wlock =
529 blk_alloc_zone_bitmap(q->node, args->nr_zones);
530 if (!args->seq_zones_wlock)
531 return -ENOMEM;
532 }
d9dd7308
DLM
533 break;
534 default:
535 pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n",
536 disk->disk_name, (int)zone->type, zone->start);
d4100351 537 return -ENODEV;
d9dd7308
DLM
538 }
539
d4100351
CH
540 args->sector += zone->len;
541 return 0;
542}
543
bf505456
DLM
544/**
545 * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps
546 * @disk: Target disk
e732671a 547 * @update_driver_data: Callback to update driver data on the frozen disk
bf505456
DLM
548 *
549 * Helper function for low-level device drivers to (re) allocate and initialize
550 * a disk request queue zone bitmaps. This functions should normally be called
ae58954d
CH
551 * within the disk ->revalidate method for blk-mq based drivers. For BIO based
552 * drivers only q->nr_zones needs to be updated so that the sysfs exposed value
553 * is correct.
e732671a
DLM
554 * If the @update_driver_data callback function is not NULL, the callback is
555 * executed with the device request queue frozen after all zones have been
556 * checked.
bf505456 557 */
e732671a
DLM
558int blk_revalidate_disk_zones(struct gendisk *disk,
559 void (*update_driver_data)(struct gendisk *disk))
bf505456
DLM
560{
561 struct request_queue *q = disk->queue;
e94f5819
CH
562 struct blk_revalidate_zone_args args = {
563 .disk = disk,
e94f5819 564 };
6c6b3549
CH
565 unsigned int noio_flag;
566 int ret;
bf505456 567
c98c3d09
CH
568 if (WARN_ON_ONCE(!blk_queue_is_zoned(q)))
569 return -EIO;
ae58954d
CH
570 if (WARN_ON_ONCE(!queue_is_mq(q)))
571 return -EIO;
bf505456 572
1a1206dc
JT
573 if (!get_capacity(disk))
574 return -EIO;
575
e94f5819 576 /*
6c6b3549
CH
577 * Ensure that all memory allocations in this context are done as if
578 * GFP_NOIO was specified.
e94f5819 579 */
6c6b3549
CH
580 noio_flag = memalloc_noio_save();
581 ret = disk->fops->report_zones(disk, 0, UINT_MAX,
582 blk_revalidate_zone_cb, &args);
2afdeb23
DLM
583 if (!ret) {
584 pr_warn("%s: No zones reported\n", disk->disk_name);
585 ret = -ENODEV;
586 }
6c6b3549 587 memalloc_noio_restore(noio_flag);
bf505456 588
2afdeb23
DLM
589 /*
590 * If zones where reported, make sure that the entire disk capacity
591 * has been checked.
592 */
593 if (ret > 0 && args.sector != get_capacity(disk)) {
594 pr_warn("%s: Missing zones from sector %llu\n",
595 disk->disk_name, args.sector);
596 ret = -ENODEV;
597 }
598
bf505456 599 /*
6c6b3549
CH
600 * Install the new bitmaps and update nr_zones only once the queue is
601 * stopped and all I/Os are completed (i.e. a scheduler is not
602 * referencing the bitmaps).
bf505456
DLM
603 */
604 blk_mq_freeze_queue(q);
2afdeb23 605 if (ret > 0) {
6c6b3549 606 blk_queue_chunk_sectors(q, args.zone_sectors);
e94f5819 607 q->nr_zones = args.nr_zones;
d4100351 608 swap(q->seq_zones_wlock, args.seq_zones_wlock);
f216fdd7 609 swap(q->conv_zones_bitmap, args.conv_zones_bitmap);
e732671a
DLM
610 if (update_driver_data)
611 update_driver_data(disk);
d4100351
CH
612 ret = 0;
613 } else {
bf505456 614 pr_warn("%s: failed to revalidate zones\n", disk->disk_name);
bf505456 615 blk_queue_free_zone_bitmaps(q);
bf505456 616 }
d4100351 617 blk_mq_unfreeze_queue(q);
bf505456 618
d4100351 619 kfree(args.seq_zones_wlock);
f216fdd7 620 kfree(args.conv_zones_bitmap);
bf505456
DLM
621 return ret;
622}
623EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones);
508aebb8 624
b3c72f81 625void disk_clear_zone_settings(struct gendisk *disk)
508aebb8 626{
b3c72f81
CH
627 struct request_queue *q = disk->queue;
628
508aebb8
DLM
629 blk_mq_freeze_queue(q);
630
631 blk_queue_free_zone_bitmaps(q);
632 blk_queue_flag_clear(QUEUE_FLAG_ZONE_RESETALL, q);
633 q->required_elevator_features &= ~ELEVATOR_F_ZBD_SEQ_WRITE;
634 q->nr_zones = 0;
635 q->max_open_zones = 0;
636 q->max_active_zones = 0;
637 q->limits.chunk_sectors = 0;
638 q->limits.zone_write_granularity = 0;
639 q->limits.max_zone_append_sectors = 0;
640
641 blk_mq_unfreeze_queue(q);
642}