mm/memblock.c: do not complain about top-down allocations for !MEMORY_HOTREMOVE
[linux-2.6-block.git] / block / blk-lib.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
f31e7e40
DM
2/*
3 * Functions related to generic helpers functions
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/scatterlist.h>
10
11#include "blk.h"
12
4e49ea4a 13static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
9082e87b 14 gfp_t gfp)
f31e7e40 15{
9082e87b
CH
16 struct bio *new = bio_alloc(gfp, nr_pages);
17
18 if (bio) {
19 bio_chain(bio, new);
4e49ea4a 20 submit_bio(bio);
9082e87b 21 }
5dba3089 22
9082e87b 23 return new;
f31e7e40
DM
24}
25
38f25255 26int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a 27 sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216 28 struct bio **biop)
f31e7e40 29{
f31e7e40 30 struct request_queue *q = bdev_get_queue(bdev);
38f25255 31 struct bio *bio = *biop;
a22c4d7e 32 unsigned int granularity;
ef295ecf 33 unsigned int op;
a22c4d7e 34 int alignment;
28b2be20 35 sector_t bs_mask;
f31e7e40
DM
36
37 if (!q)
38 return -ENXIO;
288dab8a 39
a13553c7
ID
40 if (bdev_read_only(bdev))
41 return -EPERM;
42
288dab8a
CH
43 if (flags & BLKDEV_DISCARD_SECURE) {
44 if (!blk_queue_secure_erase(q))
45 return -EOPNOTSUPP;
46 op = REQ_OP_SECURE_ERASE;
47 } else {
48 if (!blk_queue_discard(q))
49 return -EOPNOTSUPP;
50 op = REQ_OP_DISCARD;
51 }
f31e7e40 52
28b2be20
DW
53 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
54 if ((sector | nr_sects) & bs_mask)
55 return -EINVAL;
56
a22c4d7e
ML
57 /* Zero-sector (unknown) and one-sector granularities are the same. */
58 granularity = max(q->limits.discard_granularity >> 9, 1U);
59 alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
60
5dba3089 61 while (nr_sects) {
c6e66634 62 unsigned int req_sects;
a22c4d7e 63 sector_t end_sect, tmp;
c6e66634 64
af097f5d
JA
65 /*
66 * Issue in chunks of the user defined max discard setting,
67 * ensuring that bi_size doesn't overflow
68 */
69 req_sects = min_t(sector_t, nr_sects,
70 q->limits.max_discard_sectors);
71 if (req_sects > UINT_MAX >> 9)
72 req_sects = UINT_MAX >> 9;
a22c4d7e 73
af097f5d 74 /*
a22c4d7e
ML
75 * If splitting a request, and the next starting sector would be
76 * misaligned, stop the discard at the previous aligned sector.
77 */
c6e66634 78 end_sect = sector + req_sects;
a22c4d7e
ML
79 tmp = end_sect;
80 if (req_sects < nr_sects &&
81 sector_div(tmp, granularity) != alignment) {
82 end_sect = end_sect - alignment;
83 sector_div(end_sect, granularity);
84 end_sect = end_sect * granularity + alignment;
85 req_sects = end_sect - sector;
86 }
c6e66634 87
f9d03f96 88 bio = next_bio(bio, 0, gfp_mask);
4f024f37 89 bio->bi_iter.bi_sector = sector;
74d46992 90 bio_set_dev(bio, bdev);
288dab8a 91 bio_set_op_attrs(bio, op, 0);
f31e7e40 92
4f024f37 93 bio->bi_iter.bi_size = req_sects << 9;
c6e66634
PB
94 nr_sects -= req_sects;
95 sector = end_sect;
f31e7e40 96
c8123f8c
JA
97 /*
98 * We can loop for a long time in here, if someone does
99 * full device discards (like mkfs). Be nice and allow
100 * us to schedule out to avoid softlocking if preempt
101 * is disabled.
102 */
103 cond_resched();
5dba3089 104 }
38f25255
CH
105
106 *biop = bio;
107 return 0;
108}
109EXPORT_SYMBOL(__blkdev_issue_discard);
110
111/**
112 * blkdev_issue_discard - queue a discard
113 * @bdev: blockdev to issue discard for
114 * @sector: start sector
115 * @nr_sects: number of sectors to discard
116 * @gfp_mask: memory allocation flags (for bio_alloc)
e554911c 117 * @flags: BLKDEV_DISCARD_* flags to control behaviour
38f25255
CH
118 *
119 * Description:
120 * Issue a discard request for the sectors in question.
121 */
122int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
123 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
124{
38f25255
CH
125 struct bio *bio = NULL;
126 struct blk_plug plug;
127 int ret;
128
38f25255 129 blk_start_plug(&plug);
288dab8a 130 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
38f25255 131 &bio);
bbd848e0 132 if (!ret && bio) {
4e49ea4a 133 ret = submit_bio_wait(bio);
48920ff2 134 if (ret == -EOPNOTSUPP)
bbd848e0 135 ret = 0;
05bd92dd 136 bio_put(bio);
bbd848e0 137 }
0cfbcafc 138 blk_finish_plug(&plug);
f31e7e40 139
bbd848e0 140 return ret;
f31e7e40
DM
141}
142EXPORT_SYMBOL(blkdev_issue_discard);
3f14d792 143
4363ac7c 144/**
e73c23ff 145 * __blkdev_issue_write_same - generate number of bios with same page
4363ac7c
MP
146 * @bdev: target blockdev
147 * @sector: start sector
148 * @nr_sects: number of sectors to write
149 * @gfp_mask: memory allocation flags (for bio_alloc)
150 * @page: page containing data to write
e73c23ff 151 * @biop: pointer to anchor bio
4363ac7c
MP
152 *
153 * Description:
e73c23ff 154 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
4363ac7c 155 */
e73c23ff
CK
156static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
157 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
158 struct bio **biop)
4363ac7c 159{
4363ac7c
MP
160 struct request_queue *q = bdev_get_queue(bdev);
161 unsigned int max_write_same_sectors;
e73c23ff 162 struct bio *bio = *biop;
28b2be20 163 sector_t bs_mask;
4363ac7c
MP
164
165 if (!q)
166 return -ENXIO;
167
a13553c7
ID
168 if (bdev_read_only(bdev))
169 return -EPERM;
170
28b2be20
DW
171 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
172 if ((sector | nr_sects) & bs_mask)
173 return -EINVAL;
174
e73c23ff
CK
175 if (!bdev_write_same(bdev))
176 return -EOPNOTSUPP;
177
b49a0871
ML
178 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
179 max_write_same_sectors = UINT_MAX >> 9;
4363ac7c 180
4363ac7c 181 while (nr_sects) {
4e49ea4a 182 bio = next_bio(bio, 1, gfp_mask);
4f024f37 183 bio->bi_iter.bi_sector = sector;
74d46992 184 bio_set_dev(bio, bdev);
4363ac7c
MP
185 bio->bi_vcnt = 1;
186 bio->bi_io_vec->bv_page = page;
187 bio->bi_io_vec->bv_offset = 0;
188 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
95fe6c1a 189 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
4363ac7c
MP
190
191 if (nr_sects > max_write_same_sectors) {
4f024f37 192 bio->bi_iter.bi_size = max_write_same_sectors << 9;
4363ac7c
MP
193 nr_sects -= max_write_same_sectors;
194 sector += max_write_same_sectors;
195 } else {
4f024f37 196 bio->bi_iter.bi_size = nr_sects << 9;
4363ac7c
MP
197 nr_sects = 0;
198 }
e73c23ff 199 cond_resched();
4363ac7c
MP
200 }
201
e73c23ff
CK
202 *biop = bio;
203 return 0;
204}
205
206/**
207 * blkdev_issue_write_same - queue a write same operation
208 * @bdev: target blockdev
209 * @sector: start sector
210 * @nr_sects: number of sectors to write
211 * @gfp_mask: memory allocation flags (for bio_alloc)
212 * @page: page containing data
213 *
214 * Description:
215 * Issue a write same request for the sectors in question.
216 */
217int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
218 sector_t nr_sects, gfp_t gfp_mask,
219 struct page *page)
220{
221 struct bio *bio = NULL;
222 struct blk_plug plug;
223 int ret;
224
225 blk_start_plug(&plug);
226 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
227 &bio);
228 if (ret == 0 && bio) {
4e49ea4a 229 ret = submit_bio_wait(bio);
05bd92dd
ST
230 bio_put(bio);
231 }
e73c23ff 232 blk_finish_plug(&plug);
3f40bf2c 233 return ret;
4363ac7c
MP
234}
235EXPORT_SYMBOL(blkdev_issue_write_same);
236
a6f0788e
CK
237static int __blkdev_issue_write_zeroes(struct block_device *bdev,
238 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
d928be9f 239 struct bio **biop, unsigned flags)
a6f0788e
CK
240{
241 struct bio *bio = *biop;
242 unsigned int max_write_zeroes_sectors;
243 struct request_queue *q = bdev_get_queue(bdev);
244
245 if (!q)
246 return -ENXIO;
247
a13553c7
ID
248 if (bdev_read_only(bdev))
249 return -EPERM;
250
a6f0788e
CK
251 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
252 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
253
254 if (max_write_zeroes_sectors == 0)
255 return -EOPNOTSUPP;
256
257 while (nr_sects) {
258 bio = next_bio(bio, 0, gfp_mask);
259 bio->bi_iter.bi_sector = sector;
74d46992 260 bio_set_dev(bio, bdev);
d928be9f
CH
261 bio->bi_opf = REQ_OP_WRITE_ZEROES;
262 if (flags & BLKDEV_ZERO_NOUNMAP)
263 bio->bi_opf |= REQ_NOUNMAP;
a6f0788e
CK
264
265 if (nr_sects > max_write_zeroes_sectors) {
266 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
267 nr_sects -= max_write_zeroes_sectors;
268 sector += max_write_zeroes_sectors;
269 } else {
270 bio->bi_iter.bi_size = nr_sects << 9;
271 nr_sects = 0;
272 }
273 cond_resched();
274 }
275
276 *biop = bio;
277 return 0;
278}
279
615d22a5
DLM
280/*
281 * Convert a number of 512B sectors to a number of pages.
282 * The result is limited to a number of pages that can fit into a BIO.
283 * Also make sure that the result is always at least 1 (page) for the cases
284 * where nr_sects is lower than the number of sectors in a page.
285 */
286static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
287{
09c2c359 288 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
615d22a5 289
09c2c359 290 return min(pages, (sector_t)BIO_MAX_PAGES);
615d22a5
DLM
291}
292
425a4dba
ID
293static int __blkdev_issue_zero_pages(struct block_device *bdev,
294 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
295 struct bio **biop)
296{
297 struct request_queue *q = bdev_get_queue(bdev);
298 struct bio *bio = *biop;
299 int bi_size = 0;
300 unsigned int sz;
301
302 if (!q)
303 return -ENXIO;
304
a13553c7
ID
305 if (bdev_read_only(bdev))
306 return -EPERM;
307
425a4dba
ID
308 while (nr_sects != 0) {
309 bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
310 gfp_mask);
311 bio->bi_iter.bi_sector = sector;
312 bio_set_dev(bio, bdev);
313 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
314
315 while (nr_sects != 0) {
316 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
317 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
318 nr_sects -= bi_size >> 9;
319 sector += bi_size >> 9;
320 if (bi_size < sz)
321 break;
322 }
323 cond_resched();
324 }
325
326 *biop = bio;
327 return 0;
328}
329
3f14d792 330/**
e73c23ff 331 * __blkdev_issue_zeroout - generate number of zero filed write bios
3f14d792
DM
332 * @bdev: blockdev to issue
333 * @sector: start sector
334 * @nr_sects: number of sectors to write
335 * @gfp_mask: memory allocation flags (for bio_alloc)
e73c23ff 336 * @biop: pointer to anchor bio
ee472d83 337 * @flags: controls detailed behavior
3f14d792
DM
338 *
339 * Description:
ee472d83
CH
340 * Zero-fill a block range, either using hardware offload or by explicitly
341 * writing zeroes to the device.
342 *
343 * If a device is using logical block provisioning, the underlying space will
344 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
cb365b96
CH
345 *
346 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
347 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
3f14d792 348 */
e73c23ff
CK
349int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
350 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d83 351 unsigned flags)
3f14d792 352{
18edc8ea 353 int ret;
28b2be20
DW
354 sector_t bs_mask;
355
356 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
357 if ((sector | nr_sects) & bs_mask)
358 return -EINVAL;
3f14d792 359
a6f0788e 360 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
d928be9f 361 biop, flags);
cb365b96 362 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
425a4dba 363 return ret;
3f14d792 364
425a4dba
ID
365 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
366 biop);
3f14d792 367}
e73c23ff 368EXPORT_SYMBOL(__blkdev_issue_zeroout);
579e8f3c
MP
369
370/**
371 * blkdev_issue_zeroout - zero-fill a block range
372 * @bdev: blockdev to write
373 * @sector: start sector
374 * @nr_sects: number of sectors to write
375 * @gfp_mask: memory allocation flags (for bio_alloc)
ee472d83 376 * @flags: controls detailed behavior
579e8f3c
MP
377 *
378 * Description:
ee472d83
CH
379 * Zero-fill a block range, either using hardware offload or by explicitly
380 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
381 * valid values for %flags.
579e8f3c 382 */
579e8f3c 383int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d83 384 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
579e8f3c 385{
d5ce4c31
ID
386 int ret = 0;
387 sector_t bs_mask;
388 struct bio *bio;
e73c23ff 389 struct blk_plug plug;
d5ce4c31 390 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
d93ba7a5 391
d5ce4c31
ID
392 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
393 if ((sector | nr_sects) & bs_mask)
394 return -EINVAL;
395
396retry:
397 bio = NULL;
e73c23ff 398 blk_start_plug(&plug);
d5ce4c31
ID
399 if (try_write_zeroes) {
400 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
401 gfp_mask, &bio, flags);
402 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
403 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
404 gfp_mask, &bio);
405 } else {
406 /* No zeroing offload support */
407 ret = -EOPNOTSUPP;
408 }
e73c23ff
CK
409 if (ret == 0 && bio) {
410 ret = submit_bio_wait(bio);
411 bio_put(bio);
412 }
413 blk_finish_plug(&plug);
d5ce4c31
ID
414 if (ret && try_write_zeroes) {
415 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
416 try_write_zeroes = false;
417 goto retry;
418 }
419 if (!bdev_write_zeroes_sectors(bdev)) {
420 /*
421 * Zeroing offload support was indicated, but the
422 * device reported ILLEGAL REQUEST (for some devices
423 * there is no non-destructive way to verify whether
424 * WRITE ZEROES is actually supported).
425 */
426 ret = -EOPNOTSUPP;
427 }
428 }
579e8f3c 429
e73c23ff 430 return ret;
579e8f3c 431}
3f14d792 432EXPORT_SYMBOL(blkdev_issue_zeroout);