Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f31e7e40 DM |
2 | /* |
3 | * Functions related to generic helpers functions | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
a2d6b3a2 | 13 | struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 | 23 | } |
c28a6147 | 24 | EXPORT_SYMBOL_GPL(blk_next_bio); |
f31e7e40 | 25 | |
38f25255 | 26 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 27 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 28 | struct bio **biop) |
f31e7e40 | 29 | { |
f31e7e40 | 30 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 31 | struct bio *bio = *biop; |
ef295ecf | 32 | unsigned int op; |
9b15d109 | 33 | sector_t bs_mask, part_offset = 0; |
f31e7e40 DM |
34 | |
35 | if (!q) | |
36 | return -ENXIO; | |
288dab8a | 37 | |
a13553c7 ID |
38 | if (bdev_read_only(bdev)) |
39 | return -EPERM; | |
40 | ||
288dab8a CH |
41 | if (flags & BLKDEV_DISCARD_SECURE) { |
42 | if (!blk_queue_secure_erase(q)) | |
43 | return -EOPNOTSUPP; | |
44 | op = REQ_OP_SECURE_ERASE; | |
45 | } else { | |
46 | if (!blk_queue_discard(q)) | |
47 | return -EOPNOTSUPP; | |
48 | op = REQ_OP_DISCARD; | |
49 | } | |
f31e7e40 | 50 | |
b35fd742 CL |
51 | /* In case the discard granularity isn't set by buggy device driver */ |
52 | if (WARN_ON_ONCE(!q->limits.discard_granularity)) { | |
53 | char dev_name[BDEVNAME_SIZE]; | |
54 | ||
55 | bdevname(bdev, dev_name); | |
56 | pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name); | |
57 | return -EOPNOTSUPP; | |
58 | } | |
59 | ||
28b2be20 DW |
60 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
61 | if ((sector | nr_sects) & bs_mask) | |
62 | return -EINVAL; | |
63 | ||
ba5d7385 ML |
64 | if (!nr_sects) |
65 | return -EINVAL; | |
a22c4d7e | 66 | |
9b15d109 | 67 | /* In case the discard request is in a partition */ |
fa01b1e9 | 68 | if (bdev_is_partition(bdev)) |
29ff57c6 | 69 | part_offset = bdev->bd_start_sect; |
9b15d109 | 70 | |
ba5d7385 | 71 | while (nr_sects) { |
9b15d109 CL |
72 | sector_t granularity_aligned_lba, req_sects; |
73 | sector_t sector_mapped = sector + part_offset; | |
74 | ||
75 | granularity_aligned_lba = round_up(sector_mapped, | |
76 | q->limits.discard_granularity >> SECTOR_SHIFT); | |
77 | ||
78 | /* | |
79 | * Check whether the discard bio starts at a discard_granularity | |
80 | * aligned LBA, | |
81 | * - If no: set (granularity_aligned_lba - sector_mapped) to | |
82 | * bi_size of the first split bio, then the second bio will | |
83 | * start at a discard_granularity aligned LBA on the device. | |
84 | * - If yes: use bio_aligned_discard_max_sectors() as the max | |
85 | * possible bi_size of the first split bio. Then when this bio | |
86 | * is split in device drive, the split ones are very probably | |
87 | * to be aligned to discard_granularity of the device's queue. | |
88 | */ | |
89 | if (granularity_aligned_lba == sector_mapped) | |
90 | req_sects = min_t(sector_t, nr_sects, | |
91 | bio_aligned_discard_max_sectors(q)); | |
92 | else | |
93 | req_sects = min_t(sector_t, nr_sects, | |
94 | granularity_aligned_lba - sector_mapped); | |
c6e66634 | 95 | |
4800bf7b DC |
96 | WARN_ON_ONCE((req_sects << 9) > UINT_MAX); |
97 | ||
a2d6b3a2 | 98 | bio = blk_next_bio(bio, 0, gfp_mask); |
4f024f37 | 99 | bio->bi_iter.bi_sector = sector; |
74d46992 | 100 | bio_set_dev(bio, bdev); |
288dab8a | 101 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 102 | |
4f024f37 | 103 | bio->bi_iter.bi_size = req_sects << 9; |
ba5d7385 | 104 | sector += req_sects; |
c6e66634 | 105 | nr_sects -= req_sects; |
f31e7e40 | 106 | |
c8123f8c JA |
107 | /* |
108 | * We can loop for a long time in here, if someone does | |
109 | * full device discards (like mkfs). Be nice and allow | |
110 | * us to schedule out to avoid softlocking if preempt | |
111 | * is disabled. | |
112 | */ | |
113 | cond_resched(); | |
5dba3089 | 114 | } |
38f25255 CH |
115 | |
116 | *biop = bio; | |
117 | return 0; | |
118 | } | |
119 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
120 | ||
121 | /** | |
122 | * blkdev_issue_discard - queue a discard | |
123 | * @bdev: blockdev to issue discard for | |
124 | * @sector: start sector | |
125 | * @nr_sects: number of sectors to discard | |
126 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e554911c | 127 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f25255 CH |
128 | * |
129 | * Description: | |
130 | * Issue a discard request for the sectors in question. | |
131 | */ | |
132 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
133 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
134 | { | |
38f25255 CH |
135 | struct bio *bio = NULL; |
136 | struct blk_plug plug; | |
137 | int ret; | |
138 | ||
38f25255 | 139 | blk_start_plug(&plug); |
288dab8a | 140 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 141 | &bio); |
bbd848e0 | 142 | if (!ret && bio) { |
4e49ea4a | 143 | ret = submit_bio_wait(bio); |
48920ff2 | 144 | if (ret == -EOPNOTSUPP) |
bbd848e0 | 145 | ret = 0; |
05bd92dd | 146 | bio_put(bio); |
bbd848e0 | 147 | } |
0cfbcafc | 148 | blk_finish_plug(&plug); |
f31e7e40 | 149 | |
bbd848e0 | 150 | return ret; |
f31e7e40 DM |
151 | } |
152 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 153 | |
a6f0788e CK |
154 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
155 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
d928be9f | 156 | struct bio **biop, unsigned flags) |
a6f0788e CK |
157 | { |
158 | struct bio *bio = *biop; | |
159 | unsigned int max_write_zeroes_sectors; | |
160 | struct request_queue *q = bdev_get_queue(bdev); | |
161 | ||
162 | if (!q) | |
163 | return -ENXIO; | |
164 | ||
a13553c7 ID |
165 | if (bdev_read_only(bdev)) |
166 | return -EPERM; | |
167 | ||
a6f0788e CK |
168 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
169 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
170 | ||
171 | if (max_write_zeroes_sectors == 0) | |
172 | return -EOPNOTSUPP; | |
173 | ||
174 | while (nr_sects) { | |
a2d6b3a2 | 175 | bio = blk_next_bio(bio, 0, gfp_mask); |
a6f0788e | 176 | bio->bi_iter.bi_sector = sector; |
74d46992 | 177 | bio_set_dev(bio, bdev); |
d928be9f CH |
178 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
179 | if (flags & BLKDEV_ZERO_NOUNMAP) | |
180 | bio->bi_opf |= REQ_NOUNMAP; | |
a6f0788e CK |
181 | |
182 | if (nr_sects > max_write_zeroes_sectors) { | |
183 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
184 | nr_sects -= max_write_zeroes_sectors; | |
185 | sector += max_write_zeroes_sectors; | |
186 | } else { | |
187 | bio->bi_iter.bi_size = nr_sects << 9; | |
188 | nr_sects = 0; | |
189 | } | |
190 | cond_resched(); | |
191 | } | |
192 | ||
193 | *biop = bio; | |
194 | return 0; | |
195 | } | |
196 | ||
615d22a5 DLM |
197 | /* |
198 | * Convert a number of 512B sectors to a number of pages. | |
199 | * The result is limited to a number of pages that can fit into a BIO. | |
200 | * Also make sure that the result is always at least 1 (page) for the cases | |
201 | * where nr_sects is lower than the number of sectors in a page. | |
202 | */ | |
203 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | |
204 | { | |
09c2c359 | 205 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
615d22a5 | 206 | |
a8affc03 | 207 | return min(pages, (sector_t)BIO_MAX_VECS); |
615d22a5 DLM |
208 | } |
209 | ||
425a4dba ID |
210 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
211 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
212 | struct bio **biop) | |
213 | { | |
214 | struct request_queue *q = bdev_get_queue(bdev); | |
215 | struct bio *bio = *biop; | |
216 | int bi_size = 0; | |
217 | unsigned int sz; | |
218 | ||
219 | if (!q) | |
220 | return -ENXIO; | |
221 | ||
a13553c7 ID |
222 | if (bdev_read_only(bdev)) |
223 | return -EPERM; | |
224 | ||
425a4dba | 225 | while (nr_sects != 0) { |
a2d6b3a2 DLM |
226 | bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
227 | gfp_mask); | |
425a4dba ID |
228 | bio->bi_iter.bi_sector = sector; |
229 | bio_set_dev(bio, bdev); | |
230 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
231 | ||
232 | while (nr_sects != 0) { | |
233 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | |
234 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | |
235 | nr_sects -= bi_size >> 9; | |
236 | sector += bi_size >> 9; | |
237 | if (bi_size < sz) | |
238 | break; | |
239 | } | |
240 | cond_resched(); | |
241 | } | |
242 | ||
243 | *biop = bio; | |
244 | return 0; | |
245 | } | |
246 | ||
3f14d792 | 247 | /** |
e73c23ff | 248 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
249 | * @bdev: blockdev to issue |
250 | * @sector: start sector | |
251 | * @nr_sects: number of sectors to write | |
252 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e73c23ff | 253 | * @biop: pointer to anchor bio |
ee472d83 | 254 | * @flags: controls detailed behavior |
3f14d792 DM |
255 | * |
256 | * Description: | |
ee472d83 CH |
257 | * Zero-fill a block range, either using hardware offload or by explicitly |
258 | * writing zeroes to the device. | |
259 | * | |
260 | * If a device is using logical block provisioning, the underlying space will | |
261 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | |
cb365b96 CH |
262 | * |
263 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | |
264 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | |
3f14d792 | 265 | */ |
e73c23ff CK |
266 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
267 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
ee472d83 | 268 | unsigned flags) |
3f14d792 | 269 | { |
18edc8ea | 270 | int ret; |
28b2be20 DW |
271 | sector_t bs_mask; |
272 | ||
273 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
274 | if ((sector | nr_sects) & bs_mask) | |
275 | return -EINVAL; | |
3f14d792 | 276 | |
a6f0788e | 277 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f | 278 | biop, flags); |
cb365b96 | 279 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
425a4dba | 280 | return ret; |
3f14d792 | 281 | |
425a4dba ID |
282 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
283 | biop); | |
3f14d792 | 284 | } |
e73c23ff | 285 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c MP |
286 | |
287 | /** | |
288 | * blkdev_issue_zeroout - zero-fill a block range | |
289 | * @bdev: blockdev to write | |
290 | * @sector: start sector | |
291 | * @nr_sects: number of sectors to write | |
292 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
ee472d83 | 293 | * @flags: controls detailed behavior |
579e8f3c MP |
294 | * |
295 | * Description: | |
ee472d83 CH |
296 | * Zero-fill a block range, either using hardware offload or by explicitly |
297 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the | |
298 | * valid values for %flags. | |
579e8f3c | 299 | */ |
579e8f3c | 300 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d83 | 301 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c | 302 | { |
d5ce4c31 ID |
303 | int ret = 0; |
304 | sector_t bs_mask; | |
305 | struct bio *bio; | |
e73c23ff | 306 | struct blk_plug plug; |
d5ce4c31 | 307 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
d93ba7a5 | 308 | |
d5ce4c31 ID |
309 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
310 | if ((sector | nr_sects) & bs_mask) | |
311 | return -EINVAL; | |
312 | ||
313 | retry: | |
314 | bio = NULL; | |
e73c23ff | 315 | blk_start_plug(&plug); |
d5ce4c31 ID |
316 | if (try_write_zeroes) { |
317 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | |
318 | gfp_mask, &bio, flags); | |
319 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
320 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | |
321 | gfp_mask, &bio); | |
322 | } else { | |
323 | /* No zeroing offload support */ | |
324 | ret = -EOPNOTSUPP; | |
325 | } | |
e73c23ff CK |
326 | if (ret == 0 && bio) { |
327 | ret = submit_bio_wait(bio); | |
328 | bio_put(bio); | |
329 | } | |
330 | blk_finish_plug(&plug); | |
d5ce4c31 ID |
331 | if (ret && try_write_zeroes) { |
332 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
333 | try_write_zeroes = false; | |
334 | goto retry; | |
335 | } | |
336 | if (!bdev_write_zeroes_sectors(bdev)) { | |
337 | /* | |
338 | * Zeroing offload support was indicated, but the | |
339 | * device reported ILLEGAL REQUEST (for some devices | |
340 | * there is no non-destructive way to verify whether | |
341 | * WRITE ZEROES is actually supported). | |
342 | */ | |
343 | ret = -EOPNOTSUPP; | |
344 | } | |
345 | } | |
579e8f3c | 346 | |
e73c23ff | 347 | return ret; |
579e8f3c | 348 | } |
3f14d792 | 349 | EXPORT_SYMBOL(blkdev_issue_zeroout); |