Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
6a0cb1bc HR |
2 | /* |
3 | * Zoned block device handling | |
4 | * | |
5 | * Copyright (c) 2015, Hannes Reinecke | |
6 | * Copyright (c) 2015, SUSE Linux GmbH | |
7 | * | |
8 | * Copyright (c) 2016, Damien Le Moal | |
9 | * Copyright (c) 2016, Western Digital | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/rbtree.h> | |
15 | #include <linux/blkdev.h> | |
bf505456 | 16 | #include <linux/blk-mq.h> |
26202928 DLM |
17 | #include <linux/mm.h> |
18 | #include <linux/vmalloc.h> | |
bd976e52 | 19 | #include <linux/sched/mm.h> |
6a0cb1bc | 20 | |
a2d6b3a2 DLM |
21 | #include "blk.h" |
22 | ||
6a0cb1bc HR |
23 | static inline sector_t blk_zone_start(struct request_queue *q, |
24 | sector_t sector) | |
25 | { | |
f99e8648 | 26 | sector_t zone_mask = blk_queue_zone_sectors(q) - 1; |
6a0cb1bc HR |
27 | |
28 | return sector & ~zone_mask; | |
29 | } | |
30 | ||
6cc77e9c CH |
31 | /* |
32 | * Return true if a request is a write requests that needs zone write locking. | |
33 | */ | |
34 | bool blk_req_needs_zone_write_lock(struct request *rq) | |
35 | { | |
36 | if (!rq->q->seq_zones_wlock) | |
37 | return false; | |
38 | ||
39 | if (blk_rq_is_passthrough(rq)) | |
40 | return false; | |
41 | ||
42 | switch (req_op(rq)) { | |
43 | case REQ_OP_WRITE_ZEROES: | |
44 | case REQ_OP_WRITE_SAME: | |
45 | case REQ_OP_WRITE: | |
46 | return blk_rq_zone_is_seq(rq); | |
47 | default: | |
48 | return false; | |
49 | } | |
50 | } | |
51 | EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); | |
52 | ||
53 | void __blk_req_zone_write_lock(struct request *rq) | |
54 | { | |
55 | if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), | |
56 | rq->q->seq_zones_wlock))) | |
57 | return; | |
58 | ||
59 | WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); | |
60 | rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); | |
63 | ||
64 | void __blk_req_zone_write_unlock(struct request *rq) | |
65 | { | |
66 | rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; | |
67 | if (rq->q->seq_zones_wlock) | |
68 | WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), | |
69 | rq->q->seq_zones_wlock)); | |
70 | } | |
71 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); | |
72 | ||
a91e1380 DLM |
73 | static inline unsigned int __blkdev_nr_zones(struct request_queue *q, |
74 | sector_t nr_sectors) | |
75 | { | |
113ab72e | 76 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
a91e1380 DLM |
77 | |
78 | return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors); | |
79 | } | |
80 | ||
81 | /** | |
82 | * blkdev_nr_zones - Get number of zones | |
83 | * @bdev: Target block device | |
84 | * | |
85 | * Description: | |
86 | * Return the total number of zones of a zoned block device. | |
87 | * For a regular block device, the number of zones is always 0. | |
88 | */ | |
89 | unsigned int blkdev_nr_zones(struct block_device *bdev) | |
90 | { | |
91 | struct request_queue *q = bdev_get_queue(bdev); | |
92 | ||
93 | if (!blk_queue_is_zoned(q)) | |
94 | return 0; | |
95 | ||
5eac3eb3 | 96 | return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk)); |
a91e1380 DLM |
97 | } |
98 | EXPORT_SYMBOL_GPL(blkdev_nr_zones); | |
99 | ||
6a0cb1bc HR |
100 | /** |
101 | * blkdev_report_zones - Get zones information | |
102 | * @bdev: Target block device | |
103 | * @sector: Sector from which to report zones | |
104 | * @zones: Array of zone structures where to return the zones information | |
105 | * @nr_zones: Number of zone structures in the zone array | |
6a0cb1bc HR |
106 | * |
107 | * Description: | |
108 | * Get zone information starting from the zone containing @sector. | |
109 | * The number of zone information reported may be less than the number | |
110 | * requested by @nr_zones. The number of zones actually reported is | |
111 | * returned in @nr_zones. | |
bd976e52 DLM |
112 | * The caller must use memalloc_noXX_save/restore() calls to control |
113 | * memory allocations done within this function (zone array and command | |
114 | * buffer allocation by the device driver). | |
6a0cb1bc | 115 | */ |
e76239a3 | 116 | int blkdev_report_zones(struct block_device *bdev, sector_t sector, |
bd976e52 | 117 | struct blk_zone *zones, unsigned int *nr_zones) |
6a0cb1bc HR |
118 | { |
119 | struct request_queue *q = bdev_get_queue(bdev); | |
ceeb373a | 120 | struct gendisk *disk = bdev->bd_disk; |
5eac3eb3 | 121 | sector_t capacity = get_capacity(disk); |
6a0cb1bc | 122 | |
6a0cb1bc HR |
123 | if (!blk_queue_is_zoned(q)) |
124 | return -EOPNOTSUPP; | |
125 | ||
e76239a3 CH |
126 | /* |
127 | * A block device that advertized itself as zoned must have a | |
128 | * report_zones method. If it does not have one defined, the device | |
129 | * driver has a bug. So warn about that. | |
130 | */ | |
ceeb373a | 131 | if (WARN_ON_ONCE(!disk->fops->report_zones)) |
e76239a3 | 132 | return -EOPNOTSUPP; |
6a0cb1bc | 133 | |
5eac3eb3 | 134 | if (!*nr_zones || sector >= capacity) { |
6a0cb1bc HR |
135 | *nr_zones = 0; |
136 | return 0; | |
137 | } | |
138 | ||
5eac3eb3 | 139 | *nr_zones = min(*nr_zones, __blkdev_nr_zones(q, capacity - sector)); |
6a0cb1bc | 140 | |
5eac3eb3 | 141 | return disk->fops->report_zones(disk, sector, zones, nr_zones); |
6a0cb1bc HR |
142 | } |
143 | EXPORT_SYMBOL_GPL(blkdev_report_zones); | |
144 | ||
6e33dbf2 | 145 | static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev, |
c7a1d926 | 146 | sector_t sector, |
6e33dbf2 CK |
147 | sector_t nr_sectors) |
148 | { | |
149 | if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) | |
150 | return false; | |
151 | ||
6e33dbf2 | 152 | /* |
5eac3eb3 DLM |
153 | * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors |
154 | * of the applicable zone range is the entire disk. | |
6e33dbf2 | 155 | */ |
5eac3eb3 | 156 | return !sector && nr_sectors == get_capacity(bdev->bd_disk); |
6e33dbf2 CK |
157 | } |
158 | ||
6a0cb1bc | 159 | /** |
6c1b1da5 | 160 | * blkdev_zone_mgmt - Execute a zone management operation on a range of zones |
6a0cb1bc | 161 | * @bdev: Target block device |
6c1b1da5 AJ |
162 | * @op: Operation to be performed on the zones |
163 | * @sector: Start sector of the first zone to operate on | |
164 | * @nr_sectors: Number of sectors, should be at least the length of one zone and | |
165 | * must be zone size aligned. | |
6a0cb1bc HR |
166 | * @gfp_mask: Memory allocation flags (for bio_alloc) |
167 | * | |
168 | * Description: | |
6c1b1da5 | 169 | * Perform the specified operation on the range of zones specified by |
6a0cb1bc HR |
170 | * @sector..@sector+@nr_sectors. Specifying the entire disk sector range |
171 | * is valid, but the specified range should not contain conventional zones. | |
6c1b1da5 AJ |
172 | * The operation to execute on each zone can be a zone reset, open, close |
173 | * or finish request. | |
6a0cb1bc | 174 | */ |
6c1b1da5 AJ |
175 | int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, |
176 | sector_t sector, sector_t nr_sectors, | |
177 | gfp_t gfp_mask) | |
6a0cb1bc HR |
178 | { |
179 | struct request_queue *q = bdev_get_queue(bdev); | |
6c1b1da5 | 180 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
5eac3eb3 | 181 | sector_t capacity = get_capacity(bdev->bd_disk); |
6a0cb1bc | 182 | sector_t end_sector = sector + nr_sectors; |
a2d6b3a2 | 183 | struct bio *bio = NULL; |
6a0cb1bc HR |
184 | int ret; |
185 | ||
6a0cb1bc HR |
186 | if (!blk_queue_is_zoned(q)) |
187 | return -EOPNOTSUPP; | |
188 | ||
a2d6b3a2 DLM |
189 | if (bdev_read_only(bdev)) |
190 | return -EPERM; | |
191 | ||
6c1b1da5 AJ |
192 | if (!op_is_zone_mgmt(op)) |
193 | return -EOPNOTSUPP; | |
194 | ||
5eac3eb3 | 195 | if (!nr_sectors || end_sector > capacity) |
6a0cb1bc HR |
196 | /* Out of range */ |
197 | return -EINVAL; | |
198 | ||
199 | /* Check alignment (handle eventual smaller last zone) */ | |
6a0cb1bc HR |
200 | if (sector & (zone_sectors - 1)) |
201 | return -EINVAL; | |
202 | ||
5eac3eb3 | 203 | if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) |
6a0cb1bc HR |
204 | return -EINVAL; |
205 | ||
206 | while (sector < end_sector) { | |
a2d6b3a2 | 207 | bio = blk_next_bio(bio, 0, gfp_mask); |
74d46992 | 208 | bio_set_dev(bio, bdev); |
6a0cb1bc | 209 | |
c7a1d926 DLM |
210 | /* |
211 | * Special case for the zone reset operation that reset all | |
212 | * zones, this is useful for applications like mkfs. | |
213 | */ | |
6c1b1da5 AJ |
214 | if (op == REQ_OP_ZONE_RESET && |
215 | blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { | |
c7a1d926 DLM |
216 | bio->bi_opf = REQ_OP_ZONE_RESET_ALL; |
217 | break; | |
218 | } | |
219 | ||
6c1b1da5 | 220 | bio->bi_opf = op; |
c7a1d926 | 221 | bio->bi_iter.bi_sector = sector; |
6a0cb1bc HR |
222 | sector += zone_sectors; |
223 | ||
224 | /* This may take a while, so be nice to others */ | |
225 | cond_resched(); | |
6a0cb1bc HR |
226 | } |
227 | ||
a2d6b3a2 DLM |
228 | ret = submit_bio_wait(bio); |
229 | bio_put(bio); | |
230 | ||
a2d6b3a2 | 231 | return ret; |
6a0cb1bc | 232 | } |
6c1b1da5 | 233 | EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); |
3ed05a98 | 234 | |
56c4bddb | 235 | /* |
3ed05a98 ST |
236 | * BLKREPORTZONE ioctl processing. |
237 | * Called from blkdev_ioctl. | |
238 | */ | |
239 | int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, | |
240 | unsigned int cmd, unsigned long arg) | |
241 | { | |
242 | void __user *argp = (void __user *)arg; | |
243 | struct request_queue *q; | |
244 | struct blk_zone_report rep; | |
245 | struct blk_zone *zones; | |
246 | int ret; | |
247 | ||
248 | if (!argp) | |
249 | return -EINVAL; | |
250 | ||
251 | q = bdev_get_queue(bdev); | |
252 | if (!q) | |
253 | return -ENXIO; | |
254 | ||
255 | if (!blk_queue_is_zoned(q)) | |
256 | return -ENOTTY; | |
257 | ||
258 | if (!capable(CAP_SYS_ADMIN)) | |
259 | return -EACCES; | |
260 | ||
261 | if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) | |
262 | return -EFAULT; | |
263 | ||
264 | if (!rep.nr_zones) | |
265 | return -EINVAL; | |
266 | ||
2e85fbaf | 267 | rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones); |
327ea4ad | 268 | |
344476e1 KC |
269 | zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone), |
270 | GFP_KERNEL | __GFP_ZERO); | |
3ed05a98 ST |
271 | if (!zones) |
272 | return -ENOMEM; | |
273 | ||
bd976e52 | 274 | ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones); |
3ed05a98 ST |
275 | if (ret) |
276 | goto out; | |
277 | ||
278 | if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) { | |
279 | ret = -EFAULT; | |
280 | goto out; | |
281 | } | |
282 | ||
283 | if (rep.nr_zones) { | |
284 | if (copy_to_user(argp + sizeof(struct blk_zone_report), zones, | |
285 | sizeof(struct blk_zone) * rep.nr_zones)) | |
286 | ret = -EFAULT; | |
287 | } | |
288 | ||
289 | out: | |
327ea4ad | 290 | kvfree(zones); |
3ed05a98 ST |
291 | |
292 | return ret; | |
293 | } | |
294 | ||
56c4bddb | 295 | /* |
e876df1f | 296 | * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. |
3ed05a98 ST |
297 | * Called from blkdev_ioctl. |
298 | */ | |
e876df1f AJ |
299 | int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, |
300 | unsigned int cmd, unsigned long arg) | |
3ed05a98 ST |
301 | { |
302 | void __user *argp = (void __user *)arg; | |
303 | struct request_queue *q; | |
304 | struct blk_zone_range zrange; | |
e876df1f | 305 | enum req_opf op; |
3ed05a98 ST |
306 | |
307 | if (!argp) | |
308 | return -EINVAL; | |
309 | ||
310 | q = bdev_get_queue(bdev); | |
311 | if (!q) | |
312 | return -ENXIO; | |
313 | ||
314 | if (!blk_queue_is_zoned(q)) | |
315 | return -ENOTTY; | |
316 | ||
317 | if (!capable(CAP_SYS_ADMIN)) | |
318 | return -EACCES; | |
319 | ||
320 | if (!(mode & FMODE_WRITE)) | |
321 | return -EBADF; | |
322 | ||
323 | if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) | |
324 | return -EFAULT; | |
325 | ||
e876df1f AJ |
326 | switch (cmd) { |
327 | case BLKRESETZONE: | |
328 | op = REQ_OP_ZONE_RESET; | |
329 | break; | |
330 | case BLKOPENZONE: | |
331 | op = REQ_OP_ZONE_OPEN; | |
332 | break; | |
333 | case BLKCLOSEZONE: | |
334 | op = REQ_OP_ZONE_CLOSE; | |
335 | break; | |
336 | case BLKFINISHZONE: | |
337 | op = REQ_OP_ZONE_FINISH; | |
338 | break; | |
339 | default: | |
340 | return -ENOTTY; | |
341 | } | |
342 | ||
343 | return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, | |
344 | GFP_KERNEL); | |
3ed05a98 | 345 | } |
bf505456 DLM |
346 | |
347 | static inline unsigned long *blk_alloc_zone_bitmap(int node, | |
348 | unsigned int nr_zones) | |
349 | { | |
350 | return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), | |
351 | GFP_NOIO, node); | |
352 | } | |
353 | ||
354 | /* | |
355 | * Allocate an array of struct blk_zone to get nr_zones zone information. | |
356 | * The allocated array may be smaller than nr_zones. | |
357 | */ | |
26202928 | 358 | static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones) |
bf505456 | 359 | { |
26202928 DLM |
360 | struct blk_zone *zones; |
361 | size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES); | |
362 | ||
363 | /* | |
364 | * GFP_KERNEL here is meaningless as the caller task context has | |
365 | * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones() | |
366 | * with memalloc_noio_save(). | |
367 | */ | |
368 | zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL); | |
369 | if (!zones) { | |
370 | *nr_zones = 0; | |
371 | return NULL; | |
bf505456 DLM |
372 | } |
373 | ||
26202928 DLM |
374 | *nr_zones = nrz; |
375 | ||
376 | return zones; | |
bf505456 DLM |
377 | } |
378 | ||
379 | void blk_queue_free_zone_bitmaps(struct request_queue *q) | |
380 | { | |
381 | kfree(q->seq_zones_bitmap); | |
382 | q->seq_zones_bitmap = NULL; | |
383 | kfree(q->seq_zones_wlock); | |
384 | q->seq_zones_wlock = NULL; | |
385 | } | |
386 | ||
d9dd7308 DLM |
387 | /* |
388 | * Helper function to check the validity of zones of a zoned block device. | |
389 | */ | |
390 | static bool blk_zone_valid(struct gendisk *disk, struct blk_zone *zone, | |
391 | sector_t *sector) | |
392 | { | |
393 | struct request_queue *q = disk->queue; | |
394 | sector_t zone_sectors = blk_queue_zone_sectors(q); | |
395 | sector_t capacity = get_capacity(disk); | |
396 | ||
397 | /* | |
398 | * All zones must have the same size, with the exception on an eventual | |
399 | * smaller last zone. | |
400 | */ | |
401 | if (zone->start + zone_sectors < capacity && | |
402 | zone->len != zone_sectors) { | |
403 | pr_warn("%s: Invalid zoned device with non constant zone size\n", | |
404 | disk->disk_name); | |
405 | return false; | |
406 | } | |
407 | ||
408 | if (zone->start + zone->len >= capacity && | |
409 | zone->len > zone_sectors) { | |
410 | pr_warn("%s: Invalid zoned device with larger last zone size\n", | |
411 | disk->disk_name); | |
412 | return false; | |
413 | } | |
414 | ||
415 | /* Check for holes in the zone report */ | |
416 | if (zone->start != *sector) { | |
417 | pr_warn("%s: Zone gap at sectors %llu..%llu\n", | |
418 | disk->disk_name, *sector, zone->start); | |
419 | return false; | |
420 | } | |
421 | ||
422 | /* Check zone type */ | |
423 | switch (zone->type) { | |
424 | case BLK_ZONE_TYPE_CONVENTIONAL: | |
425 | case BLK_ZONE_TYPE_SEQWRITE_REQ: | |
426 | case BLK_ZONE_TYPE_SEQWRITE_PREF: | |
427 | break; | |
428 | default: | |
429 | pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", | |
430 | disk->disk_name, (int)zone->type, zone->start); | |
431 | return false; | |
432 | } | |
433 | ||
434 | *sector += zone->len; | |
435 | ||
436 | return true; | |
437 | } | |
438 | ||
bf505456 DLM |
439 | /** |
440 | * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps | |
441 | * @disk: Target disk | |
442 | * | |
443 | * Helper function for low-level device drivers to (re) allocate and initialize | |
444 | * a disk request queue zone bitmaps. This functions should normally be called | |
445 | * within the disk ->revalidate method. For BIO based queues, no zone bitmap | |
446 | * is allocated. | |
447 | */ | |
448 | int blk_revalidate_disk_zones(struct gendisk *disk) | |
449 | { | |
450 | struct request_queue *q = disk->queue; | |
451 | unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); | |
452 | unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; | |
453 | unsigned int i, rep_nr_zones = 0, z = 0, nrz; | |
454 | struct blk_zone *zones = NULL; | |
bd976e52 | 455 | unsigned int noio_flag; |
bf505456 DLM |
456 | sector_t sector = 0; |
457 | int ret = 0; | |
458 | ||
c98c3d09 CH |
459 | if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) |
460 | return -EIO; | |
461 | ||
bf505456 DLM |
462 | /* |
463 | * BIO based queues do not use a scheduler so only q->nr_zones | |
464 | * needs to be updated so that the sysfs exposed value is correct. | |
465 | */ | |
344e9ffc | 466 | if (!queue_is_mq(q)) { |
bf505456 DLM |
467 | q->nr_zones = nr_zones; |
468 | return 0; | |
469 | } | |
470 | ||
bd976e52 DLM |
471 | /* |
472 | * Ensure that all memory allocations in this context are done as | |
473 | * if GFP_NOIO was specified. | |
474 | */ | |
475 | noio_flag = memalloc_noio_save(); | |
476 | ||
c98c3d09 | 477 | if (!nr_zones) |
bf505456 | 478 | goto update; |
bf505456 DLM |
479 | |
480 | /* Allocate bitmaps */ | |
481 | ret = -ENOMEM; | |
482 | seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); | |
483 | if (!seq_zones_wlock) | |
484 | goto out; | |
485 | seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); | |
486 | if (!seq_zones_bitmap) | |
487 | goto out; | |
488 | ||
d9dd7308 DLM |
489 | /* |
490 | * Get zone information to check the zones and initialize | |
491 | * seq_zones_bitmap. | |
492 | */ | |
bf505456 | 493 | rep_nr_zones = nr_zones; |
26202928 | 494 | zones = blk_alloc_zones(&rep_nr_zones); |
bf505456 DLM |
495 | if (!zones) |
496 | goto out; | |
497 | ||
498 | while (z < nr_zones) { | |
499 | nrz = min(nr_zones - z, rep_nr_zones); | |
ceeb373a | 500 | ret = disk->fops->report_zones(disk, sector, zones, &nrz); |
bf505456 DLM |
501 | if (ret) |
502 | goto out; | |
503 | if (!nrz) | |
504 | break; | |
505 | for (i = 0; i < nrz; i++) { | |
d9dd7308 DLM |
506 | if (!blk_zone_valid(disk, &zones[i], §or)) { |
507 | ret = -ENODEV; | |
508 | goto out; | |
509 | } | |
bf505456 DLM |
510 | if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL) |
511 | set_bit(z, seq_zones_bitmap); | |
512 | z++; | |
513 | } | |
bf505456 DLM |
514 | } |
515 | ||
516 | if (WARN_ON(z != nr_zones)) { | |
517 | ret = -EIO; | |
518 | goto out; | |
519 | } | |
520 | ||
521 | update: | |
522 | /* | |
523 | * Install the new bitmaps, making sure the queue is stopped and | |
524 | * all I/Os are completed (i.e. a scheduler is not referencing the | |
525 | * bitmaps). | |
526 | */ | |
527 | blk_mq_freeze_queue(q); | |
528 | q->nr_zones = nr_zones; | |
529 | swap(q->seq_zones_wlock, seq_zones_wlock); | |
530 | swap(q->seq_zones_bitmap, seq_zones_bitmap); | |
531 | blk_mq_unfreeze_queue(q); | |
532 | ||
533 | out: | |
bd976e52 DLM |
534 | memalloc_noio_restore(noio_flag); |
535 | ||
26202928 | 536 | kvfree(zones); |
bf505456 DLM |
537 | kfree(seq_zones_wlock); |
538 | kfree(seq_zones_bitmap); | |
539 | ||
540 | if (ret) { | |
541 | pr_warn("%s: failed to revalidate zones\n", disk->disk_name); | |
542 | blk_mq_freeze_queue(q); | |
543 | blk_queue_free_zone_bitmaps(q); | |
544 | blk_mq_unfreeze_queue(q); | |
545 | } | |
546 | ||
547 | return ret; | |
548 | } | |
549 | EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); | |
550 |