Merge tag 'docs-5.18-2' of git://git.lwn.net/linux
[linux-2.6-block.git] / block / blk-lib.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f31e7e40
DM
2/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
38f25255 13int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a 14 sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216 15 struct bio **biop)
f31e7e40 16{
f31e7e40 17 struct request_queue *q = bdev_get_queue(bdev);
38f25255 18 struct bio *bio = *biop;
ef295ecf 19 unsigned int op;
9b15d109 20 sector_t bs_mask, part_offset = 0;
f31e7e40 21
a13553c7
ID
22 if (bdev_read_only(bdev))
23 return -EPERM;
24
288dab8a
CH
25 if (flags & BLKDEV_DISCARD_SECURE) {
26 if (!blk_queue_secure_erase(q))
27 return -EOPNOTSUPP;
28 op = REQ_OP_SECURE_ERASE;
29 } else {
30 if (!blk_queue_discard(q))
31 return -EOPNOTSUPP;
32 op = REQ_OP_DISCARD;
33 }
f31e7e40 34
b35fd742
CL
35 /* In case the discard granularity isn't set by buggy device driver */
36 if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
37 char dev_name[BDEVNAME_SIZE];
38
39 bdevname(bdev, dev_name);
40 pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
41 return -EOPNOTSUPP;
42 }
43
28b2be20
DW
44 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
45 if ((sector | nr_sects) & bs_mask)
46 return -EINVAL;
47
ba5d7385
ML
48 if (!nr_sects)
49 return -EINVAL;
a22c4d7e 50
9b15d109 51 /* In case the discard request is in a partition */
fa01b1e9 52 if (bdev_is_partition(bdev))
29ff57c6 53 part_offset = bdev->bd_start_sect;
9b15d109 54
ba5d7385 55 while (nr_sects) {
9b15d109
CL
56 sector_t granularity_aligned_lba, req_sects;
57 sector_t sector_mapped = sector + part_offset;
58
59 granularity_aligned_lba = round_up(sector_mapped,
60 q->limits.discard_granularity >> SECTOR_SHIFT);
61
62 /*
63 * Check whether the discard bio starts at a discard_granularity
64 * aligned LBA,
65 * - If no: set (granularity_aligned_lba - sector_mapped) to
66 * bi_size of the first split bio, then the second bio will
67 * start at a discard_granularity aligned LBA on the device.
68 * - If yes: use bio_aligned_discard_max_sectors() as the max
69 * possible bi_size of the first split bio. Then when this bio
70 * is split in device drive, the split ones are very probably
71 * to be aligned to discard_granularity of the device's queue.
72 */
73 if (granularity_aligned_lba == sector_mapped)
74 req_sects = min_t(sector_t, nr_sects,
75 bio_aligned_discard_max_sectors(q));
76 else
77 req_sects = min_t(sector_t, nr_sects,
78 granularity_aligned_lba - sector_mapped);
c6e66634 79
4800bf7b
DC
80 WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
81
0a3140ea 82 bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
4f024f37 83 bio->bi_iter.bi_sector = sector;
4f024f37 84 bio->bi_iter.bi_size = req_sects << 9;
ba5d7385 85 sector += req_sects;
c6e66634 86 nr_sects -= req_sects;
f31e7e40 87
c8123f8c
JA
88 /*
89 * We can loop for a long time in here, if someone does
90 * full device discards (like mkfs). Be nice and allow
91 * us to schedule out to avoid softlocking if preempt
92 * is disabled.
93 */
94 cond_resched();
5dba3089 95 }
38f25255
CH
96
97 *biop = bio;
98 return 0;
99}
100EXPORT_SYMBOL(__blkdev_issue_discard);
101
102/**
103 * blkdev_issue_discard - queue a discard
104 * @bdev: blockdev to issue discard for
105 * @sector: start sector
106 * @nr_sects: number of sectors to discard
107 * @gfp_mask: memory allocation flags (for bio_alloc)
e554911c 108 * @flags: BLKDEV_DISCARD_* flags to control behaviour
38f25255
CH
109 *
110 * Description:
111 * Issue a discard request for the sectors in question.
112 */
113int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
114 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
115{
38f25255
CH
116 struct bio *bio = NULL;
117 struct blk_plug plug;
118 int ret;
119
38f25255 120 blk_start_plug(&plug);
288dab8a 121 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
38f25255 122 &bio);
bbd848e0 123 if (!ret && bio) {
4e49ea4a 124 ret = submit_bio_wait(bio);
48920ff2 125 if (ret == -EOPNOTSUPP)
bbd848e0 126 ret = 0;
05bd92dd 127 bio_put(bio);
bbd848e0 128 }
0cfbcafc 129 blk_finish_plug(&plug);
f31e7e40 130
bbd848e0 131 return ret;
f31e7e40
DM
132}
133EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792 134
a6f0788e
CK
135static int __blkdev_issue_write_zeroes(struct block_device *bdev,
136 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
d928be9f 137 struct bio **biop, unsigned flags)
a6f0788e
CK
138{
139 struct bio *bio = *biop;
140 unsigned int max_write_zeroes_sectors;
a6f0788e 141
a13553c7
ID
142 if (bdev_read_only(bdev))
143 return -EPERM;
144
a6f0788e
CK
145 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
146 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
147
148 if (max_write_zeroes_sectors == 0)
149 return -EOPNOTSUPP;
150
151 while (nr_sects) {
0a3140ea 152 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
a6f0788e 153 bio->bi_iter.bi_sector = sector;
d928be9f
CH
154 if (flags & BLKDEV_ZERO_NOUNMAP)
155 bio->bi_opf |= REQ_NOUNMAP;
a6f0788e
CK
156
157 if (nr_sects > max_write_zeroes_sectors) {
158 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
159 nr_sects -= max_write_zeroes_sectors;
160 sector += max_write_zeroes_sectors;
161 } else {
162 bio->bi_iter.bi_size = nr_sects << 9;
163 nr_sects = 0;
164 }
165 cond_resched();
166 }
167
168 *biop = bio;
169 return 0;
170}
171
615d22a5
DLM
172/*
173 * Convert a number of 512B sectors to a number of pages.
174 * The result is limited to a number of pages that can fit into a BIO.
175 * Also make sure that the result is always at least 1 (page) for the cases
176 * where nr_sects is lower than the number of sectors in a page.
177 */
178static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
179{
09c2c359 180 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
615d22a5 181
a8affc03 182 return min(pages, (sector_t)BIO_MAX_VECS);
615d22a5
DLM
183}
184
425a4dba
ID
185static int __blkdev_issue_zero_pages(struct block_device *bdev,
186 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
187 struct bio **biop)
188{
425a4dba
ID
189 struct bio *bio = *biop;
190 int bi_size = 0;
191 unsigned int sz;
192
a13553c7
ID
193 if (bdev_read_only(bdev))
194 return -EPERM;
195
425a4dba 196 while (nr_sects != 0) {
0a3140ea
CK
197 bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
198 REQ_OP_WRITE, gfp_mask);
425a4dba 199 bio->bi_iter.bi_sector = sector;
425a4dba
ID
200
201 while (nr_sects != 0) {
202 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
203 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
204 nr_sects -= bi_size >> 9;
205 sector += bi_size >> 9;
206 if (bi_size < sz)
207 break;
208 }
209 cond_resched();
210 }
211
212 *biop = bio;
213 return 0;
214}
215
3f14d792 216/**
e73c23ff 217 * __blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792
DM
218 * @bdev: blockdev to issue
219 * @sector: start sector
220 * @nr_sects: number of sectors to write
221 * @gfp_mask: memory allocation flags (for bio_alloc)
e73c23ff 222 * @biop: pointer to anchor bio
ee472d83 223 * @flags: controls detailed behavior
3f14d792
DM
224 *
225 * Description:
ee472d83
CH
226 * Zero-fill a block range, either using hardware offload or by explicitly
227 * writing zeroes to the device.
228 *
229 * If a device is using logical block provisioning, the underlying space will
230 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
cb365b96
CH
231 *
232 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
233 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
3f14d792 234 */
e73c23ff
CK
235int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
236 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d83 237 unsigned flags)
3f14d792 238{
18edc8ea 239 int ret;
28b2be20
DW
240 sector_t bs_mask;
241
242 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
243 if ((sector | nr_sects) & bs_mask)
244 return -EINVAL;
3f14d792 245
a6f0788e 246 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
d928be9f 247 biop, flags);
cb365b96 248 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
425a4dba 249 return ret;
3f14d792 250
425a4dba
ID
251 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
252 biop);
3f14d792 253}
e73c23ff 254EXPORT_SYMBOL(__blkdev_issue_zeroout);
579e8f3c
MP
255
256/**
257 * blkdev_issue_zeroout - zero-fill a block range
258 * @bdev: blockdev to write
259 * @sector: start sector
260 * @nr_sects: number of sectors to write
261 * @gfp_mask: memory allocation flags (for bio_alloc)
ee472d83 262 * @flags: controls detailed behavior
579e8f3c
MP
263 *
264 * Description:
ee472d83
CH
265 * Zero-fill a block range, either using hardware offload or by explicitly
266 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
267 * valid values for %flags.
579e8f3c 268 */
579e8f3c 269int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d83 270 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
579e8f3c 271{
d5ce4c31
ID
272 int ret = 0;
273 sector_t bs_mask;
274 struct bio *bio;
e73c23ff 275 struct blk_plug plug;
d5ce4c31 276 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
d93ba7a5 277
d5ce4c31
ID
278 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
279 if ((sector | nr_sects) & bs_mask)
280 return -EINVAL;
281
282retry:
283 bio = NULL;
e73c23ff 284 blk_start_plug(&plug);
d5ce4c31
ID
285 if (try_write_zeroes) {
286 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
287 gfp_mask, &bio, flags);
288 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
289 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
290 gfp_mask, &bio);
291 } else {
292 /* No zeroing offload support */
293 ret = -EOPNOTSUPP;
294 }
e73c23ff
CK
295 if (ret == 0 && bio) {
296 ret = submit_bio_wait(bio);
297 bio_put(bio);
298 }
299 blk_finish_plug(&plug);
d5ce4c31
ID
300 if (ret && try_write_zeroes) {
301 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
302 try_write_zeroes = false;
303 goto retry;
304 }
305 if (!bdev_write_zeroes_sectors(bdev)) {
306 /*
307 * Zeroing offload support was indicated, but the
308 * device reported ILLEGAL REQUEST (for some devices
309 * there is no non-destructive way to verify whether
310 * WRITE ZEROES is actually supported).
311 */
312 ret = -EOPNOTSUPP;
313 }
314 }
579e8f3c 315
e73c23ff 316 return ret;
579e8f3c 317}
3f14d792 318EXPORT_SYMBOL(blkdev_issue_zeroout);