Commit | Line | Data |
---|---|---|
3dcf60bc | 1 | // SPDX-License-Identifier: GPL-2.0 |
6a0cb1bc HR |
2 | /* |
3 | * Zoned block device handling | |
4 | * | |
5 | * Copyright (c) 2015, Hannes Reinecke | |
6 | * Copyright (c) 2015, SUSE Linux GmbH | |
7 | * | |
8 | * Copyright (c) 2016, Damien Le Moal | |
9 | * Copyright (c) 2016, Western Digital | |
10 | */ | |
11 | ||
12 | #include <linux/kernel.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/rbtree.h> | |
15 | #include <linux/blkdev.h> | |
bf505456 | 16 | #include <linux/blk-mq.h> |
26202928 DLM |
17 | #include <linux/mm.h> |
18 | #include <linux/vmalloc.h> | |
bd976e52 | 19 | #include <linux/sched/mm.h> |
6a0cb1bc | 20 | |
a2d6b3a2 DLM |
21 | #include "blk.h" |
22 | ||
6a0cb1bc HR |
23 | static inline sector_t blk_zone_start(struct request_queue *q, |
24 | sector_t sector) | |
25 | { | |
f99e8648 | 26 | sector_t zone_mask = blk_queue_zone_sectors(q) - 1; |
6a0cb1bc HR |
27 | |
28 | return sector & ~zone_mask; | |
29 | } | |
30 | ||
6cc77e9c CH |
31 | /* |
32 | * Return true if a request is a write requests that needs zone write locking. | |
33 | */ | |
34 | bool blk_req_needs_zone_write_lock(struct request *rq) | |
35 | { | |
36 | if (!rq->q->seq_zones_wlock) | |
37 | return false; | |
38 | ||
39 | if (blk_rq_is_passthrough(rq)) | |
40 | return false; | |
41 | ||
42 | switch (req_op(rq)) { | |
43 | case REQ_OP_WRITE_ZEROES: | |
44 | case REQ_OP_WRITE_SAME: | |
45 | case REQ_OP_WRITE: | |
46 | return blk_rq_zone_is_seq(rq); | |
47 | default: | |
48 | return false; | |
49 | } | |
50 | } | |
51 | EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); | |
52 | ||
53 | void __blk_req_zone_write_lock(struct request *rq) | |
54 | { | |
55 | if (WARN_ON_ONCE(test_and_set_bit(blk_rq_zone_no(rq), | |
56 | rq->q->seq_zones_wlock))) | |
57 | return; | |
58 | ||
59 | WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); | |
60 | rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; | |
61 | } | |
62 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_lock); | |
63 | ||
64 | void __blk_req_zone_write_unlock(struct request *rq) | |
65 | { | |
66 | rq->rq_flags &= ~RQF_ZONE_WRITE_LOCKED; | |
67 | if (rq->q->seq_zones_wlock) | |
68 | WARN_ON_ONCE(!test_and_clear_bit(blk_rq_zone_no(rq), | |
69 | rq->q->seq_zones_wlock)); | |
70 | } | |
71 | EXPORT_SYMBOL_GPL(__blk_req_zone_write_unlock); | |
72 | ||
a91e1380 DLM |
73 | static inline unsigned int __blkdev_nr_zones(struct request_queue *q, |
74 | sector_t nr_sectors) | |
75 | { | |
113ab72e | 76 | sector_t zone_sectors = blk_queue_zone_sectors(q); |
a91e1380 DLM |
77 | |
78 | return (nr_sectors + zone_sectors - 1) >> ilog2(zone_sectors); | |
79 | } | |
80 | ||
81 | /** | |
82 | * blkdev_nr_zones - Get number of zones | |
83 | * @bdev: Target block device | |
84 | * | |
85 | * Description: | |
86 | * Return the total number of zones of a zoned block device. | |
87 | * For a regular block device, the number of zones is always 0. | |
88 | */ | |
89 | unsigned int blkdev_nr_zones(struct block_device *bdev) | |
90 | { | |
91 | struct request_queue *q = bdev_get_queue(bdev); | |
92 | ||
93 | if (!blk_queue_is_zoned(q)) | |
94 | return 0; | |
95 | ||
96 | return __blkdev_nr_zones(q, bdev->bd_part->nr_sects); | |
97 | } | |
98 | EXPORT_SYMBOL_GPL(blkdev_nr_zones); | |
99 | ||
6a0cb1bc | 100 | /* |
e76239a3 CH |
101 | * Check that a zone report belongs to this partition, and if yes, fix its start |
102 | * sector and write pointer and return true. Return false otherwise. | |
6a0cb1bc | 103 | */ |
e76239a3 | 104 | static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep) |
6a0cb1bc HR |
105 | { |
106 | sector_t offset = get_start_sect(bdev); | |
107 | ||
108 | if (rep->start < offset) | |
109 | return false; | |
110 | ||
111 | rep->start -= offset; | |
112 | if (rep->start + rep->len > bdev->bd_part->nr_sects) | |
113 | return false; | |
114 | ||
115 | if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL) | |
116 | rep->wp = rep->start + rep->len; | |
117 | else | |
118 | rep->wp -= offset; | |
6a0cb1bc HR |
119 | return true; |
120 | } | |
121 | ||
e76239a3 | 122 | static int blk_report_zones(struct gendisk *disk, sector_t sector, |
bd976e52 | 123 | struct blk_zone *zones, unsigned int *nr_zones) |
e76239a3 CH |
124 | { |
125 | struct request_queue *q = disk->queue; | |
126 | unsigned int z = 0, n, nrz = *nr_zones; | |
127 | sector_t capacity = get_capacity(disk); | |
128 | int ret; | |
129 | ||
130 | while (z < nrz && sector < capacity) { | |
131 | n = nrz - z; | |
bd976e52 | 132 | ret = disk->fops->report_zones(disk, sector, &zones[z], &n); |
e76239a3 CH |
133 | if (ret) |
134 | return ret; | |
135 | if (!n) | |
136 | break; | |
137 | sector += blk_queue_zone_sectors(q) * n; | |
138 | z += n; | |
139 | } | |
140 | ||
141 | WARN_ON(z > *nr_zones); | |
142 | *nr_zones = z; | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
6a0cb1bc HR |
147 | /** |
148 | * blkdev_report_zones - Get zones information | |
149 | * @bdev: Target block device | |
150 | * @sector: Sector from which to report zones | |
151 | * @zones: Array of zone structures where to return the zones information | |
152 | * @nr_zones: Number of zone structures in the zone array | |
6a0cb1bc HR |
153 | * |
154 | * Description: | |
155 | * Get zone information starting from the zone containing @sector. | |
156 | * The number of zone information reported may be less than the number | |
157 | * requested by @nr_zones. The number of zones actually reported is | |
158 | * returned in @nr_zones. | |
bd976e52 DLM |
159 | * The caller must use memalloc_noXX_save/restore() calls to control |
160 | * memory allocations done within this function (zone array and command | |
161 | * buffer allocation by the device driver). | |
6a0cb1bc | 162 | */ |
e76239a3 | 163 | int blkdev_report_zones(struct block_device *bdev, sector_t sector, |
bd976e52 | 164 | struct blk_zone *zones, unsigned int *nr_zones) |
6a0cb1bc HR |
165 | { |
166 | struct request_queue *q = bdev_get_queue(bdev); | |
e76239a3 | 167 | unsigned int i, nrz; |
3c4da758 | 168 | int ret; |
6a0cb1bc | 169 | |
6a0cb1bc HR |
170 | if (!blk_queue_is_zoned(q)) |
171 | return -EOPNOTSUPP; | |
172 | ||
e76239a3 CH |
173 | /* |
174 | * A block device that advertized itself as zoned must have a | |
175 | * report_zones method. If it does not have one defined, the device | |
176 | * driver has a bug. So warn about that. | |
177 | */ | |
178 | if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones)) | |
179 | return -EOPNOTSUPP; | |
6a0cb1bc | 180 | |
e76239a3 | 181 | if (!*nr_zones || sector >= bdev->bd_part->nr_sects) { |
6a0cb1bc HR |
182 | *nr_zones = 0; |
183 | return 0; | |
184 | } | |
185 | ||
e76239a3 CH |
186 | nrz = min(*nr_zones, |
187 | __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector)); | |
188 | ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector, | |
bd976e52 | 189 | zones, &nrz); |
6a0cb1bc | 190 | if (ret) |
e76239a3 | 191 | return ret; |
6a0cb1bc | 192 | |
e76239a3 CH |
193 | for (i = 0; i < nrz; i++) { |
194 | if (!blkdev_report_zone(bdev, zones)) | |
6a0cb1bc | 195 | break; |
e76239a3 | 196 | zones++; |
6a0cb1bc HR |
197 | } |
198 | ||
e76239a3 | 199 | *nr_zones = i; |
6a0cb1bc | 200 | |
e76239a3 | 201 | return 0; |
6a0cb1bc HR |
202 | } |
203 | EXPORT_SYMBOL_GPL(blkdev_report_zones); | |
204 | ||
205 | /** | |
206 | * blkdev_reset_zones - Reset zones write pointer | |
207 | * @bdev: Target block device | |
208 | * @sector: Start sector of the first zone to reset | |
209 | * @nr_sectors: Number of sectors, at least the length of one zone | |
210 | * @gfp_mask: Memory allocation flags (for bio_alloc) | |
211 | * | |
212 | * Description: | |
213 | * Reset the write pointer of the zones contained in the range | |
214 | * @sector..@sector+@nr_sectors. Specifying the entire disk sector range | |
215 | * is valid, but the specified range should not contain conventional zones. | |
216 | */ | |
217 | int blkdev_reset_zones(struct block_device *bdev, | |
218 | sector_t sector, sector_t nr_sectors, | |
219 | gfp_t gfp_mask) | |
220 | { | |
221 | struct request_queue *q = bdev_get_queue(bdev); | |
222 | sector_t zone_sectors; | |
223 | sector_t end_sector = sector + nr_sectors; | |
a2d6b3a2 DLM |
224 | struct bio *bio = NULL; |
225 | struct blk_plug plug; | |
6a0cb1bc HR |
226 | int ret; |
227 | ||
6a0cb1bc HR |
228 | if (!blk_queue_is_zoned(q)) |
229 | return -EOPNOTSUPP; | |
230 | ||
a2d6b3a2 DLM |
231 | if (bdev_read_only(bdev)) |
232 | return -EPERM; | |
233 | ||
234 | if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) | |
6a0cb1bc HR |
235 | /* Out of range */ |
236 | return -EINVAL; | |
237 | ||
238 | /* Check alignment (handle eventual smaller last zone) */ | |
f99e8648 | 239 | zone_sectors = blk_queue_zone_sectors(q); |
6a0cb1bc HR |
240 | if (sector & (zone_sectors - 1)) |
241 | return -EINVAL; | |
242 | ||
243 | if ((nr_sectors & (zone_sectors - 1)) && | |
244 | end_sector != bdev->bd_part->nr_sects) | |
245 | return -EINVAL; | |
246 | ||
a2d6b3a2 | 247 | blk_start_plug(&plug); |
6a0cb1bc HR |
248 | while (sector < end_sector) { |
249 | ||
a2d6b3a2 | 250 | bio = blk_next_bio(bio, 0, gfp_mask); |
6a0cb1bc | 251 | bio->bi_iter.bi_sector = sector; |
74d46992 | 252 | bio_set_dev(bio, bdev); |
6a0cb1bc HR |
253 | bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); |
254 | ||
6a0cb1bc HR |
255 | sector += zone_sectors; |
256 | ||
257 | /* This may take a while, so be nice to others */ | |
258 | cond_resched(); | |
259 | ||
260 | } | |
261 | ||
a2d6b3a2 DLM |
262 | ret = submit_bio_wait(bio); |
263 | bio_put(bio); | |
264 | ||
265 | blk_finish_plug(&plug); | |
266 | ||
267 | return ret; | |
6a0cb1bc HR |
268 | } |
269 | EXPORT_SYMBOL_GPL(blkdev_reset_zones); | |
3ed05a98 | 270 | |
56c4bddb | 271 | /* |
3ed05a98 ST |
272 | * BLKREPORTZONE ioctl processing. |
273 | * Called from blkdev_ioctl. | |
274 | */ | |
275 | int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, | |
276 | unsigned int cmd, unsigned long arg) | |
277 | { | |
278 | void __user *argp = (void __user *)arg; | |
279 | struct request_queue *q; | |
280 | struct blk_zone_report rep; | |
281 | struct blk_zone *zones; | |
282 | int ret; | |
283 | ||
284 | if (!argp) | |
285 | return -EINVAL; | |
286 | ||
287 | q = bdev_get_queue(bdev); | |
288 | if (!q) | |
289 | return -ENXIO; | |
290 | ||
291 | if (!blk_queue_is_zoned(q)) | |
292 | return -ENOTTY; | |
293 | ||
294 | if (!capable(CAP_SYS_ADMIN)) | |
295 | return -EACCES; | |
296 | ||
297 | if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report))) | |
298 | return -EFAULT; | |
299 | ||
300 | if (!rep.nr_zones) | |
301 | return -EINVAL; | |
302 | ||
2e85fbaf | 303 | rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones); |
327ea4ad | 304 | |
344476e1 KC |
305 | zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone), |
306 | GFP_KERNEL | __GFP_ZERO); | |
3ed05a98 ST |
307 | if (!zones) |
308 | return -ENOMEM; | |
309 | ||
bd976e52 | 310 | ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones); |
3ed05a98 ST |
311 | if (ret) |
312 | goto out; | |
313 | ||
314 | if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) { | |
315 | ret = -EFAULT; | |
316 | goto out; | |
317 | } | |
318 | ||
319 | if (rep.nr_zones) { | |
320 | if (copy_to_user(argp + sizeof(struct blk_zone_report), zones, | |
321 | sizeof(struct blk_zone) * rep.nr_zones)) | |
322 | ret = -EFAULT; | |
323 | } | |
324 | ||
325 | out: | |
327ea4ad | 326 | kvfree(zones); |
3ed05a98 ST |
327 | |
328 | return ret; | |
329 | } | |
330 | ||
56c4bddb | 331 | /* |
3ed05a98 ST |
332 | * BLKRESETZONE ioctl processing. |
333 | * Called from blkdev_ioctl. | |
334 | */ | |
335 | int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, | |
336 | unsigned int cmd, unsigned long arg) | |
337 | { | |
338 | void __user *argp = (void __user *)arg; | |
339 | struct request_queue *q; | |
340 | struct blk_zone_range zrange; | |
341 | ||
342 | if (!argp) | |
343 | return -EINVAL; | |
344 | ||
345 | q = bdev_get_queue(bdev); | |
346 | if (!q) | |
347 | return -ENXIO; | |
348 | ||
349 | if (!blk_queue_is_zoned(q)) | |
350 | return -ENOTTY; | |
351 | ||
352 | if (!capable(CAP_SYS_ADMIN)) | |
353 | return -EACCES; | |
354 | ||
355 | if (!(mode & FMODE_WRITE)) | |
356 | return -EBADF; | |
357 | ||
358 | if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) | |
359 | return -EFAULT; | |
360 | ||
361 | return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors, | |
362 | GFP_KERNEL); | |
363 | } | |
bf505456 DLM |
364 | |
365 | static inline unsigned long *blk_alloc_zone_bitmap(int node, | |
366 | unsigned int nr_zones) | |
367 | { | |
368 | return kcalloc_node(BITS_TO_LONGS(nr_zones), sizeof(unsigned long), | |
369 | GFP_NOIO, node); | |
370 | } | |
371 | ||
372 | /* | |
373 | * Allocate an array of struct blk_zone to get nr_zones zone information. | |
374 | * The allocated array may be smaller than nr_zones. | |
375 | */ | |
26202928 | 376 | static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones) |
bf505456 | 377 | { |
26202928 DLM |
378 | struct blk_zone *zones; |
379 | size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES); | |
380 | ||
381 | /* | |
382 | * GFP_KERNEL here is meaningless as the caller task context has | |
383 | * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones() | |
384 | * with memalloc_noio_save(). | |
385 | */ | |
386 | zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL); | |
387 | if (!zones) { | |
388 | *nr_zones = 0; | |
389 | return NULL; | |
bf505456 DLM |
390 | } |
391 | ||
26202928 DLM |
392 | *nr_zones = nrz; |
393 | ||
394 | return zones; | |
bf505456 DLM |
395 | } |
396 | ||
397 | void blk_queue_free_zone_bitmaps(struct request_queue *q) | |
398 | { | |
399 | kfree(q->seq_zones_bitmap); | |
400 | q->seq_zones_bitmap = NULL; | |
401 | kfree(q->seq_zones_wlock); | |
402 | q->seq_zones_wlock = NULL; | |
403 | } | |
404 | ||
405 | /** | |
406 | * blk_revalidate_disk_zones - (re)allocate and initialize zone bitmaps | |
407 | * @disk: Target disk | |
408 | * | |
409 | * Helper function for low-level device drivers to (re) allocate and initialize | |
410 | * a disk request queue zone bitmaps. This functions should normally be called | |
411 | * within the disk ->revalidate method. For BIO based queues, no zone bitmap | |
412 | * is allocated. | |
413 | */ | |
414 | int blk_revalidate_disk_zones(struct gendisk *disk) | |
415 | { | |
416 | struct request_queue *q = disk->queue; | |
417 | unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); | |
418 | unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; | |
419 | unsigned int i, rep_nr_zones = 0, z = 0, nrz; | |
420 | struct blk_zone *zones = NULL; | |
bd976e52 | 421 | unsigned int noio_flag; |
bf505456 DLM |
422 | sector_t sector = 0; |
423 | int ret = 0; | |
424 | ||
425 | /* | |
426 | * BIO based queues do not use a scheduler so only q->nr_zones | |
427 | * needs to be updated so that the sysfs exposed value is correct. | |
428 | */ | |
344e9ffc | 429 | if (!queue_is_mq(q)) { |
bf505456 DLM |
430 | q->nr_zones = nr_zones; |
431 | return 0; | |
432 | } | |
433 | ||
bd976e52 DLM |
434 | /* |
435 | * Ensure that all memory allocations in this context are done as | |
436 | * if GFP_NOIO was specified. | |
437 | */ | |
438 | noio_flag = memalloc_noio_save(); | |
439 | ||
bf505456 DLM |
440 | if (!blk_queue_is_zoned(q) || !nr_zones) { |
441 | nr_zones = 0; | |
442 | goto update; | |
443 | } | |
444 | ||
445 | /* Allocate bitmaps */ | |
446 | ret = -ENOMEM; | |
447 | seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); | |
448 | if (!seq_zones_wlock) | |
449 | goto out; | |
450 | seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); | |
451 | if (!seq_zones_bitmap) | |
452 | goto out; | |
453 | ||
454 | /* Get zone information and initialize seq_zones_bitmap */ | |
455 | rep_nr_zones = nr_zones; | |
26202928 | 456 | zones = blk_alloc_zones(&rep_nr_zones); |
bf505456 DLM |
457 | if (!zones) |
458 | goto out; | |
459 | ||
460 | while (z < nr_zones) { | |
461 | nrz = min(nr_zones - z, rep_nr_zones); | |
bd976e52 | 462 | ret = blk_report_zones(disk, sector, zones, &nrz); |
bf505456 DLM |
463 | if (ret) |
464 | goto out; | |
465 | if (!nrz) | |
466 | break; | |
467 | for (i = 0; i < nrz; i++) { | |
468 | if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL) | |
469 | set_bit(z, seq_zones_bitmap); | |
470 | z++; | |
471 | } | |
472 | sector += nrz * blk_queue_zone_sectors(q); | |
473 | } | |
474 | ||
475 | if (WARN_ON(z != nr_zones)) { | |
476 | ret = -EIO; | |
477 | goto out; | |
478 | } | |
479 | ||
480 | update: | |
481 | /* | |
482 | * Install the new bitmaps, making sure the queue is stopped and | |
483 | * all I/Os are completed (i.e. a scheduler is not referencing the | |
484 | * bitmaps). | |
485 | */ | |
486 | blk_mq_freeze_queue(q); | |
487 | q->nr_zones = nr_zones; | |
488 | swap(q->seq_zones_wlock, seq_zones_wlock); | |
489 | swap(q->seq_zones_bitmap, seq_zones_bitmap); | |
490 | blk_mq_unfreeze_queue(q); | |
491 | ||
492 | out: | |
bd976e52 DLM |
493 | memalloc_noio_restore(noio_flag); |
494 | ||
26202928 | 495 | kvfree(zones); |
bf505456 DLM |
496 | kfree(seq_zones_wlock); |
497 | kfree(seq_zones_bitmap); | |
498 | ||
499 | if (ret) { | |
500 | pr_warn("%s: failed to revalidate zones\n", disk->disk_name); | |
501 | blk_mq_freeze_queue(q); | |
502 | blk_queue_free_zone_bitmaps(q); | |
503 | blk_mq_unfreeze_queue(q); | |
504 | } | |
505 | ||
506 | return ret; | |
507 | } | |
508 | EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); | |
509 |