Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f31e7e40 DM |
2 | /* |
3 | * Functions related to generic helpers functions | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
a2d6b3a2 | 13 | struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 26 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 27 | struct bio **biop) |
f31e7e40 | 28 | { |
f31e7e40 | 29 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 30 | struct bio *bio = *biop; |
ef295ecf | 31 | unsigned int op; |
28b2be20 | 32 | sector_t bs_mask; |
f31e7e40 DM |
33 | |
34 | if (!q) | |
35 | return -ENXIO; | |
288dab8a | 36 | |
a13553c7 ID |
37 | if (bdev_read_only(bdev)) |
38 | return -EPERM; | |
39 | ||
288dab8a CH |
40 | if (flags & BLKDEV_DISCARD_SECURE) { |
41 | if (!blk_queue_secure_erase(q)) | |
42 | return -EOPNOTSUPP; | |
43 | op = REQ_OP_SECURE_ERASE; | |
44 | } else { | |
45 | if (!blk_queue_discard(q)) | |
46 | return -EOPNOTSUPP; | |
47 | op = REQ_OP_DISCARD; | |
48 | } | |
f31e7e40 | 49 | |
28b2be20 DW |
50 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
51 | if ((sector | nr_sects) & bs_mask) | |
52 | return -EINVAL; | |
53 | ||
ba5d7385 ML |
54 | if (!nr_sects) |
55 | return -EINVAL; | |
a22c4d7e | 56 | |
ba5d7385 ML |
57 | while (nr_sects) { |
58 | unsigned int req_sects = min_t(unsigned int, nr_sects, | |
59 | bio_allowed_max_sectors(q)); | |
c6e66634 | 60 | |
a2d6b3a2 | 61 | bio = blk_next_bio(bio, 0, gfp_mask); |
4f024f37 | 62 | bio->bi_iter.bi_sector = sector; |
74d46992 | 63 | bio_set_dev(bio, bdev); |
288dab8a | 64 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 65 | |
4f024f37 | 66 | bio->bi_iter.bi_size = req_sects << 9; |
ba5d7385 | 67 | sector += req_sects; |
c6e66634 | 68 | nr_sects -= req_sects; |
f31e7e40 | 69 | |
c8123f8c JA |
70 | /* |
71 | * We can loop for a long time in here, if someone does | |
72 | * full device discards (like mkfs). Be nice and allow | |
73 | * us to schedule out to avoid softlocking if preempt | |
74 | * is disabled. | |
75 | */ | |
76 | cond_resched(); | |
5dba3089 | 77 | } |
38f25255 CH |
78 | |
79 | *biop = bio; | |
80 | return 0; | |
81 | } | |
82 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
83 | ||
84 | /** | |
85 | * blkdev_issue_discard - queue a discard | |
86 | * @bdev: blockdev to issue discard for | |
87 | * @sector: start sector | |
88 | * @nr_sects: number of sectors to discard | |
89 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e554911c | 90 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f25255 CH |
91 | * |
92 | * Description: | |
93 | * Issue a discard request for the sectors in question. | |
94 | */ | |
95 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
96 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
97 | { | |
38f25255 CH |
98 | struct bio *bio = NULL; |
99 | struct blk_plug plug; | |
100 | int ret; | |
101 | ||
38f25255 | 102 | blk_start_plug(&plug); |
288dab8a | 103 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 104 | &bio); |
bbd848e0 | 105 | if (!ret && bio) { |
4e49ea4a | 106 | ret = submit_bio_wait(bio); |
48920ff2 | 107 | if (ret == -EOPNOTSUPP) |
bbd848e0 | 108 | ret = 0; |
05bd92dd | 109 | bio_put(bio); |
bbd848e0 | 110 | } |
0cfbcafc | 111 | blk_finish_plug(&plug); |
f31e7e40 | 112 | |
bbd848e0 | 113 | return ret; |
f31e7e40 DM |
114 | } |
115 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 116 | |
4363ac7c | 117 | /** |
e73c23ff | 118 | * __blkdev_issue_write_same - generate number of bios with same page |
4363ac7c MP |
119 | * @bdev: target blockdev |
120 | * @sector: start sector | |
121 | * @nr_sects: number of sectors to write | |
122 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
123 | * @page: page containing data to write | |
e73c23ff | 124 | * @biop: pointer to anchor bio |
4363ac7c MP |
125 | * |
126 | * Description: | |
e73c23ff | 127 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
4363ac7c | 128 | */ |
e73c23ff CK |
129 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
130 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, | |
131 | struct bio **biop) | |
4363ac7c | 132 | { |
4363ac7c MP |
133 | struct request_queue *q = bdev_get_queue(bdev); |
134 | unsigned int max_write_same_sectors; | |
e73c23ff | 135 | struct bio *bio = *biop; |
28b2be20 | 136 | sector_t bs_mask; |
4363ac7c MP |
137 | |
138 | if (!q) | |
139 | return -ENXIO; | |
140 | ||
a13553c7 ID |
141 | if (bdev_read_only(bdev)) |
142 | return -EPERM; | |
143 | ||
28b2be20 DW |
144 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
145 | if ((sector | nr_sects) & bs_mask) | |
146 | return -EINVAL; | |
147 | ||
e73c23ff CK |
148 | if (!bdev_write_same(bdev)) |
149 | return -EOPNOTSUPP; | |
150 | ||
b49a0871 | 151 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
34ffec60 | 152 | max_write_same_sectors = bio_allowed_max_sectors(q); |
4363ac7c | 153 | |
4363ac7c | 154 | while (nr_sects) { |
a2d6b3a2 | 155 | bio = blk_next_bio(bio, 1, gfp_mask); |
4f024f37 | 156 | bio->bi_iter.bi_sector = sector; |
74d46992 | 157 | bio_set_dev(bio, bdev); |
4363ac7c MP |
158 | bio->bi_vcnt = 1; |
159 | bio->bi_io_vec->bv_page = page; | |
160 | bio->bi_io_vec->bv_offset = 0; | |
161 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 162 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
163 | |
164 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 165 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
166 | nr_sects -= max_write_same_sectors; |
167 | sector += max_write_same_sectors; | |
168 | } else { | |
4f024f37 | 169 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
170 | nr_sects = 0; |
171 | } | |
e73c23ff | 172 | cond_resched(); |
4363ac7c MP |
173 | } |
174 | ||
e73c23ff CK |
175 | *biop = bio; |
176 | return 0; | |
177 | } | |
178 | ||
179 | /** | |
180 | * blkdev_issue_write_same - queue a write same operation | |
181 | * @bdev: target blockdev | |
182 | * @sector: start sector | |
183 | * @nr_sects: number of sectors to write | |
184 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
185 | * @page: page containing data | |
186 | * | |
187 | * Description: | |
188 | * Issue a write same request for the sectors in question. | |
189 | */ | |
190 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
191 | sector_t nr_sects, gfp_t gfp_mask, | |
192 | struct page *page) | |
193 | { | |
194 | struct bio *bio = NULL; | |
195 | struct blk_plug plug; | |
196 | int ret; | |
197 | ||
198 | blk_start_plug(&plug); | |
199 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | |
200 | &bio); | |
201 | if (ret == 0 && bio) { | |
4e49ea4a | 202 | ret = submit_bio_wait(bio); |
05bd92dd ST |
203 | bio_put(bio); |
204 | } | |
e73c23ff | 205 | blk_finish_plug(&plug); |
3f40bf2c | 206 | return ret; |
4363ac7c MP |
207 | } |
208 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
209 | ||
a6f0788e CK |
210 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
211 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
d928be9f | 212 | struct bio **biop, unsigned flags) |
a6f0788e CK |
213 | { |
214 | struct bio *bio = *biop; | |
215 | unsigned int max_write_zeroes_sectors; | |
216 | struct request_queue *q = bdev_get_queue(bdev); | |
217 | ||
218 | if (!q) | |
219 | return -ENXIO; | |
220 | ||
a13553c7 ID |
221 | if (bdev_read_only(bdev)) |
222 | return -EPERM; | |
223 | ||
a6f0788e CK |
224 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
225 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
226 | ||
227 | if (max_write_zeroes_sectors == 0) | |
228 | return -EOPNOTSUPP; | |
229 | ||
230 | while (nr_sects) { | |
a2d6b3a2 | 231 | bio = blk_next_bio(bio, 0, gfp_mask); |
a6f0788e | 232 | bio->bi_iter.bi_sector = sector; |
74d46992 | 233 | bio_set_dev(bio, bdev); |
d928be9f CH |
234 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
235 | if (flags & BLKDEV_ZERO_NOUNMAP) | |
236 | bio->bi_opf |= REQ_NOUNMAP; | |
a6f0788e CK |
237 | |
238 | if (nr_sects > max_write_zeroes_sectors) { | |
239 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
240 | nr_sects -= max_write_zeroes_sectors; | |
241 | sector += max_write_zeroes_sectors; | |
242 | } else { | |
243 | bio->bi_iter.bi_size = nr_sects << 9; | |
244 | nr_sects = 0; | |
245 | } | |
246 | cond_resched(); | |
247 | } | |
248 | ||
249 | *biop = bio; | |
250 | return 0; | |
251 | } | |
252 | ||
615d22a5 DLM |
253 | /* |
254 | * Convert a number of 512B sectors to a number of pages. | |
255 | * The result is limited to a number of pages that can fit into a BIO. | |
256 | * Also make sure that the result is always at least 1 (page) for the cases | |
257 | * where nr_sects is lower than the number of sectors in a page. | |
258 | */ | |
259 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | |
260 | { | |
09c2c359 | 261 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
615d22a5 | 262 | |
09c2c359 | 263 | return min(pages, (sector_t)BIO_MAX_PAGES); |
615d22a5 DLM |
264 | } |
265 | ||
425a4dba ID |
266 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
267 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
268 | struct bio **biop) | |
269 | { | |
270 | struct request_queue *q = bdev_get_queue(bdev); | |
271 | struct bio *bio = *biop; | |
272 | int bi_size = 0; | |
273 | unsigned int sz; | |
274 | ||
275 | if (!q) | |
276 | return -ENXIO; | |
277 | ||
a13553c7 ID |
278 | if (bdev_read_only(bdev)) |
279 | return -EPERM; | |
280 | ||
425a4dba | 281 | while (nr_sects != 0) { |
a2d6b3a2 DLM |
282 | bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
283 | gfp_mask); | |
425a4dba ID |
284 | bio->bi_iter.bi_sector = sector; |
285 | bio_set_dev(bio, bdev); | |
286 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
287 | ||
288 | while (nr_sects != 0) { | |
289 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | |
290 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | |
291 | nr_sects -= bi_size >> 9; | |
292 | sector += bi_size >> 9; | |
293 | if (bi_size < sz) | |
294 | break; | |
295 | } | |
296 | cond_resched(); | |
297 | } | |
298 | ||
299 | *biop = bio; | |
300 | return 0; | |
301 | } | |
302 | ||
3f14d792 | 303 | /** |
e73c23ff | 304 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
305 | * @bdev: blockdev to issue |
306 | * @sector: start sector | |
307 | * @nr_sects: number of sectors to write | |
308 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e73c23ff | 309 | * @biop: pointer to anchor bio |
ee472d83 | 310 | * @flags: controls detailed behavior |
3f14d792 DM |
311 | * |
312 | * Description: | |
ee472d83 CH |
313 | * Zero-fill a block range, either using hardware offload or by explicitly |
314 | * writing zeroes to the device. | |
315 | * | |
316 | * If a device is using logical block provisioning, the underlying space will | |
317 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | |
cb365b96 CH |
318 | * |
319 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | |
320 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | |
3f14d792 | 321 | */ |
e73c23ff CK |
322 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
323 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
ee472d83 | 324 | unsigned flags) |
3f14d792 | 325 | { |
18edc8ea | 326 | int ret; |
28b2be20 DW |
327 | sector_t bs_mask; |
328 | ||
329 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
330 | if ((sector | nr_sects) & bs_mask) | |
331 | return -EINVAL; | |
3f14d792 | 332 | |
a6f0788e | 333 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f | 334 | biop, flags); |
cb365b96 | 335 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
425a4dba | 336 | return ret; |
3f14d792 | 337 | |
425a4dba ID |
338 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
339 | biop); | |
3f14d792 | 340 | } |
e73c23ff | 341 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c MP |
342 | |
343 | /** | |
344 | * blkdev_issue_zeroout - zero-fill a block range | |
345 | * @bdev: blockdev to write | |
346 | * @sector: start sector | |
347 | * @nr_sects: number of sectors to write | |
348 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
ee472d83 | 349 | * @flags: controls detailed behavior |
579e8f3c MP |
350 | * |
351 | * Description: | |
ee472d83 CH |
352 | * Zero-fill a block range, either using hardware offload or by explicitly |
353 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the | |
354 | * valid values for %flags. | |
579e8f3c | 355 | */ |
579e8f3c | 356 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d83 | 357 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c | 358 | { |
d5ce4c31 ID |
359 | int ret = 0; |
360 | sector_t bs_mask; | |
361 | struct bio *bio; | |
e73c23ff | 362 | struct blk_plug plug; |
d5ce4c31 | 363 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
d93ba7a5 | 364 | |
d5ce4c31 ID |
365 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
366 | if ((sector | nr_sects) & bs_mask) | |
367 | return -EINVAL; | |
368 | ||
369 | retry: | |
370 | bio = NULL; | |
e73c23ff | 371 | blk_start_plug(&plug); |
d5ce4c31 ID |
372 | if (try_write_zeroes) { |
373 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | |
374 | gfp_mask, &bio, flags); | |
375 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
376 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | |
377 | gfp_mask, &bio); | |
378 | } else { | |
379 | /* No zeroing offload support */ | |
380 | ret = -EOPNOTSUPP; | |
381 | } | |
e73c23ff CK |
382 | if (ret == 0 && bio) { |
383 | ret = submit_bio_wait(bio); | |
384 | bio_put(bio); | |
385 | } | |
386 | blk_finish_plug(&plug); | |
d5ce4c31 ID |
387 | if (ret && try_write_zeroes) { |
388 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
389 | try_write_zeroes = false; | |
390 | goto retry; | |
391 | } | |
392 | if (!bdev_write_zeroes_sectors(bdev)) { | |
393 | /* | |
394 | * Zeroing offload support was indicated, but the | |
395 | * device reported ILLEGAL REQUEST (for some devices | |
396 | * there is no non-destructive way to verify whether | |
397 | * WRITE ZEROES is actually supported). | |
398 | */ | |
399 | ret = -EOPNOTSUPP; | |
400 | } | |
401 | } | |
579e8f3c | 402 | |
e73c23ff | 403 | return ret; |
579e8f3c | 404 | } |
3f14d792 | 405 | EXPORT_SYMBOL(blkdev_issue_zeroout); |