Merge tag 'regulator-fix-v6.9-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / md / dm-io.c
CommitLineData
3bd94003 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Copyright (C) 2003 Sistina Software
891ce207 4 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
5 *
6 * This file is released under the GPL.
7 */
8
4cc96131 9#include "dm-core.h"
952b3557 10
586e80e6 11#include <linux/device-mapper.h>
1da177e4
LT
12
13#include <linux/bio.h>
10f1d5d1 14#include <linux/completion.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
a765e20e 19#include <linux/dm-io.h>
1da177e4 20
f1e53987
MP
21#define DM_MSG_PREFIX "io"
22
23#define DM_IO_MAX_REGIONS BITS_PER_LONG
24
891ce207 25struct dm_io_client {
6f1c819c
KO
26 mempool_t pool;
27 struct bio_set bios;
891ce207
HM
28};
29
f1e53987
MP
30/*
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
33 */
1da177e4 34struct io {
e01fd7ee 35 unsigned long error_bits;
1da177e4 36 atomic_t count;
891ce207 37 struct dm_io_client *client;
1da177e4
LT
38 io_notify_fn callback;
39 void *context;
bb91bc7b
MP
40 void *vma_invalidate_address;
41 unsigned long vma_invalidate_size;
f8922a48 42} __aligned(DM_IO_MAX_REGIONS);
1da177e4 43
952b3557
MP
44static struct kmem_cache *_dm_io_cache;
45
c8b03afe
HM
46/*
47 * Create a client with mempool and bioset.
48 */
bda8efec 49struct dm_io_client *dm_io_client_create(void)
c8b03afe 50{
c8b03afe 51 struct dm_io_client *client;
86a3238c 52 unsigned int min_ios = dm_get_reserved_bio_based_ios();
6f1c819c 53 int ret;
c8b03afe 54
d3775354 55 client = kzalloc(sizeof(*client), GFP_KERNEL);
c8b03afe
HM
56 if (!client)
57 return ERR_PTR(-ENOMEM);
58
6f1c819c
KO
59 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
60 if (ret)
c8b03afe
HM
61 goto bad;
62
6f1c819c
KO
63 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
64 if (ret)
c8b03afe
HM
65 goto bad;
66
67 return client;
68
a6ba79c0 69bad:
6f1c819c 70 mempool_exit(&client->pool);
c8b03afe 71 kfree(client);
6f1c819c 72 return ERR_PTR(ret);
c8b03afe
HM
73}
74EXPORT_SYMBOL(dm_io_client_create);
75
c8b03afe
HM
76void dm_io_client_destroy(struct dm_io_client *client)
77{
6f1c819c
KO
78 mempool_exit(&client->pool);
79 bioset_exit(&client->bios);
c8b03afe
HM
80 kfree(client);
81}
82EXPORT_SYMBOL(dm_io_client_destroy);
83
a4a82ce3
HM
84/*
85 *-------------------------------------------------------------------
1da177e4 86 * We need to keep track of which region a bio is doing io for.
f1e53987
MP
87 * To avoid a memory allocation to store just 5 or 6 bits, we
88 * ensure the 'struct io' pointer is aligned so enough low bits are
89 * always zero and then combine it with the region number directly in
90 * bi_private.
a4a82ce3
HM
91 *-------------------------------------------------------------------
92 */
f1e53987 93static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
86a3238c 94 unsigned int region)
1da177e4 95{
f1e53987
MP
96 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
97 DMCRIT("Unaligned struct io pointer %p", io);
98 BUG();
99 }
100
101 bio->bi_private = (void *)((unsigned long)io | region);
1da177e4
LT
102}
103
f1e53987 104static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
86a3238c 105 unsigned int *region)
1da177e4 106{
f1e53987
MP
107 unsigned long val = (unsigned long)bio->bi_private;
108
109 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
110 *region = val & (DM_IO_MAX_REGIONS - 1);
1da177e4
LT
111}
112
a4a82ce3
HM
113/*
114 *--------------------------------------------------------------
1da177e4
LT
115 * We need an io object to keep track of the number of bios that
116 * have been dispatched for a particular io.
a4a82ce3
HM
117 *--------------------------------------------------------------
118 */
97e7cdf1 119static void complete_io(struct io *io)
1da177e4 120{
97e7cdf1
JT
121 unsigned long error_bits = io->error_bits;
122 io_notify_fn fn = io->callback;
123 void *context = io->context;
1da177e4 124
97e7cdf1
JT
125 if (io->vma_invalidate_size)
126 invalidate_kernel_vmap_range(io->vma_invalidate_address,
127 io->vma_invalidate_size);
bb91bc7b 128
6f1c819c 129 mempool_free(io, &io->client->pool);
97e7cdf1
JT
130 fn(error_bits, context);
131}
1da177e4 132
4e4cbee9 133static void dec_count(struct io *io, unsigned int region, blk_status_t error)
97e7cdf1
JT
134{
135 if (error)
136 set_bit(region, &io->error_bits);
1da177e4 137
97e7cdf1
JT
138 if (atomic_dec_and_test(&io->count))
139 complete_io(io);
1da177e4
LT
140}
141
4246a0b6 142static void endio(struct bio *bio)
1da177e4 143{
c897feb3 144 struct io *io;
86a3238c 145 unsigned int region;
4e4cbee9 146 blk_status_t error;
1da177e4 147
4e4cbee9 148 if (bio->bi_status && bio_data_dir(bio) == READ)
1da177e4
LT
149 zero_fill_bio(bio);
150
c897feb3
HM
151 /*
152 * The bio destructor in bio_put() may use the io object.
153 */
f1e53987 154 retrieve_io_and_region_from_bio(bio, &io, &region);
c897feb3 155
4e4cbee9 156 error = bio->bi_status;
1da177e4
LT
157 bio_put(bio);
158
9b81c842 159 dec_count(io, region, error);
1da177e4
LT
160}
161
a4a82ce3
HM
162/*
163 *--------------------------------------------------------------
1da177e4
LT
164 * These little objects provide an abstraction for getting a new
165 * destination page for io.
a4a82ce3
HM
166 *--------------------------------------------------------------
167 */
1da177e4
LT
168struct dpages {
169 void (*get_page)(struct dpages *dp,
86a3238c 170 struct page **p, unsigned long *len, unsigned int *offset);
1da177e4
LT
171 void (*next_page)(struct dpages *dp);
172
cacc7b05 173 union {
86a3238c 174 unsigned int context_u;
cacc7b05
ML
175 struct bvec_iter context_bi;
176 };
1da177e4 177 void *context_ptr;
bb91bc7b
MP
178
179 void *vma_invalidate_address;
180 unsigned long vma_invalidate_size;
1da177e4
LT
181};
182
183/*
184 * Functions for getting the pages from a list.
185 */
186static void list_get_page(struct dpages *dp,
86a3238c 187 struct page **p, unsigned long *len, unsigned int *offset)
1da177e4 188{
86a3238c 189 unsigned int o = dp->context_u;
26cb62a2 190 struct page_list *pl = dp->context_ptr;
1da177e4
LT
191
192 *p = pl->page;
193 *len = PAGE_SIZE - o;
194 *offset = o;
195}
196
197static void list_next_page(struct dpages *dp)
198{
26cb62a2 199 struct page_list *pl = dp->context_ptr;
0ef0b471 200
1da177e4
LT
201 dp->context_ptr = pl->next;
202 dp->context_u = 0;
203}
204
86a3238c 205static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
1da177e4
LT
206{
207 dp->get_page = list_get_page;
208 dp->next_page = list_next_page;
209 dp->context_u = offset;
210 dp->context_ptr = pl;
211}
212
213/*
214 * Functions for getting the pages from a bvec.
215 */
d73f9907 216static void bio_get_page(struct dpages *dp, struct page **p,
86a3238c 217 unsigned long *len, unsigned int *offset)
1da177e4 218{
cacc7b05
ML
219 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
220 dp->context_bi);
221
222 *p = bvec.bv_page;
223 *len = bvec.bv_len;
224 *offset = bvec.bv_offset;
225
226 /* avoid figuring it out again in bio_next_page() */
227 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
1da177e4
LT
228}
229
003b5c57 230static void bio_next_page(struct dpages *dp)
1da177e4 231{
cacc7b05
ML
232 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
233
234 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
235 &dp->context_bi, len);
1da177e4
LT
236}
237
003b5c57 238static void bio_dp_init(struct dpages *dp, struct bio *bio)
1da177e4 239{
003b5c57
KO
240 dp->get_page = bio_get_page;
241 dp->next_page = bio_next_page;
cacc7b05
ML
242
243 /*
244 * We just use bvec iterator to retrieve pages, so it is ok to
245 * access the bvec table directly here
246 */
247 dp->context_ptr = bio->bi_io_vec;
248 dp->context_bi = bio->bi_iter;
1da177e4
LT
249}
250
c8b03afe
HM
251/*
252 * Functions for getting the pages from a VMA.
253 */
1da177e4 254static void vm_get_page(struct dpages *dp,
86a3238c 255 struct page **p, unsigned long *len, unsigned int *offset)
1da177e4
LT
256{
257 *p = vmalloc_to_page(dp->context_ptr);
258 *offset = dp->context_u;
259 *len = PAGE_SIZE - dp->context_u;
260}
261
262static void vm_next_page(struct dpages *dp)
263{
264 dp->context_ptr += PAGE_SIZE - dp->context_u;
265 dp->context_u = 0;
266}
267
268static void vm_dp_init(struct dpages *dp, void *data)
269{
270 dp->get_page = vm_get_page;
271 dp->next_page = vm_next_page;
93bbf583 272 dp->context_u = offset_in_page(data);
1da177e4
LT
273 dp->context_ptr = data;
274}
275
c8b03afe
HM
276/*
277 * Functions for getting the pages from kernel memory.
278 */
279static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
86a3238c 280 unsigned int *offset)
c8b03afe
HM
281{
282 *p = virt_to_page(dp->context_ptr);
283 *offset = dp->context_u;
284 *len = PAGE_SIZE - dp->context_u;
285}
286
287static void km_next_page(struct dpages *dp)
288{
289 dp->context_ptr += PAGE_SIZE - dp->context_u;
290 dp->context_u = 0;
291}
292
293static void km_dp_init(struct dpages *dp, void *data)
294{
295 dp->get_page = km_get_page;
296 dp->next_page = km_next_page;
93bbf583 297 dp->context_u = offset_in_page(data);
c8b03afe
HM
298 dp->context_ptr = data;
299}
300
a4a82ce3
HM
301/*
302 *---------------------------------------------------------------
1da177e4 303 * IO routines that accept a list of pages.
a4a82ce3
HM
304 *---------------------------------------------------------------
305 */
86a3238c 306static void do_region(const blk_opf_t opf, unsigned int region,
e6047149 307 struct dm_io_region *where, struct dpages *dp,
6e5f0f63 308 struct io *io, unsigned short ioprio)
1da177e4
LT
309{
310 struct bio *bio;
311 struct page *page;
312 unsigned long len;
86a3238c
HM
313 unsigned int offset;
314 unsigned int num_bvecs;
1da177e4 315 sector_t remaining = where->count;
0c535e0d 316 struct request_queue *q = bdev_get_queue(where->bdev);
70d6c400 317 sector_t num_sectors;
3f649ab7 318 unsigned int special_cmd_max_sectors;
a3282b43 319 const enum req_op op = opf & REQ_OP_MASK;
1da177e4 320
e5db2980
DW
321 /*
322 * Reject unsupported discard and write same requests.
323 */
e6047149 324 if (op == REQ_OP_DISCARD)
cf0fbf89 325 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
ac62d620
CH
326 else if (op == REQ_OP_WRITE_ZEROES)
327 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
a773187e
CH
328 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
329 special_cmd_max_sectors == 0) {
feb7695f 330 atomic_inc(&io->count);
4e4cbee9 331 dec_count(io, region, BLK_STS_NOTSUPP);
37527b86
DW
332 return;
333 }
334
12fc0f49 335 /*
e6047149 336 * where->count may be zero if op holds a flush and we need to
d87f4c14 337 * send a zero-sized flush.
12fc0f49
MP
338 */
339 do {
1da177e4 340 /*
f1e53987 341 * Allocate a suitably sized-bio.
1da177e4 342 */
0f5d690f
CH
343 switch (op) {
344 case REQ_OP_DISCARD:
ac62d620 345 case REQ_OP_WRITE_ZEROES:
0f5d690f
CH
346 num_bvecs = 0;
347 break;
0f5d690f 348 default:
5f7136db
MWO
349 num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
350 (PAGE_SIZE >> SECTOR_SHIFT)));
0f5d690f 351 }
0c535e0d 352
a3282b43
BVA
353 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
354 &io->client->bios);
4f024f37 355 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
1da177e4 356 bio->bi_end_io = endio;
6e5f0f63 357 bio->bi_ioprio = ioprio;
f1e53987 358 store_io_and_region_in_bio(bio, io, region);
1da177e4 359
ac62d620 360 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
e5db2980 361 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
4f024f37 362 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400 363 remaining -= num_sectors;
03b18887
HM
364 } else {
365 while (remaining) {
366 /*
367 * Try and add as many pages as possible.
368 */
369 dp->get_page(dp, &page, &len, &offset);
370 len = min(len, to_bytes(remaining));
371 if (!bio_add_page(bio, page, len, offset))
372 break;
373
374 offset = 0;
375 remaining -= to_sector(len);
376 dp->next_page(dp);
377 }
1da177e4
LT
378 }
379
380 atomic_inc(&io->count);
4e49ea4a 381 submit_bio(bio);
12fc0f49 382 } while (remaining);
1da177e4
LT
383}
384
a3282b43 385static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
22a1ceb1 386 struct dm_io_region *where, struct dpages *dp,
6e5f0f63 387 struct io *io, int sync, unsigned short ioprio)
1da177e4
LT
388{
389 int i;
390 struct dpages old_pages = *dp;
391
f1e53987
MP
392 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
393
1da177e4 394 if (sync)
a3282b43 395 opf |= REQ_SYNC;
1da177e4
LT
396
397 /*
398 * For multiple regions we need to be careful to rewind
399 * the dp object for each call to do_region.
400 */
401 for (i = 0; i < num_regions; i++) {
402 *dp = old_pages;
a3282b43 403 if (where[i].count || (opf & REQ_PREFLUSH))
6e5f0f63 404 do_region(opf, i, where + i, dp, io, ioprio);
1da177e4
LT
405 }
406
407 /*
f00b16ad 408 * Drop the extra reference that we were holding to avoid
1da177e4
LT
409 * the io being completed too early.
410 */
411 dec_count(io, 0, 0);
412}
413
97e7cdf1
JT
414struct sync_io {
415 unsigned long error_bits;
416 struct completion wait;
417};
418
419static void sync_io_complete(unsigned long error, void *context)
420{
421 struct sync_io *sio = context;
422
423 sio->error_bits = error;
424 complete(&sio->wait);
425}
426
891ce207 427static int sync_io(struct dm_io_client *client, unsigned int num_regions,
a3282b43 428 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
6e5f0f63 429 unsigned long *error_bits, unsigned short ioprio)
1da177e4 430{
97e7cdf1
JT
431 struct io *io;
432 struct sync_io sio;
1da177e4 433
a3282b43 434 if (num_regions > 1 && !op_is_write(opf)) {
1da177e4
LT
435 WARN_ON(1);
436 return -EIO;
437 }
438
97e7cdf1
JT
439 init_completion(&sio.wait);
440
6f1c819c 441 io = mempool_alloc(&client->pool, GFP_NOIO);
f1e53987 442 io->error_bits = 0;
f1e53987 443 atomic_set(&io->count, 1); /* see dispatch_io() */
f1e53987 444 io->client = client;
97e7cdf1
JT
445 io->callback = sync_io_complete;
446 io->context = &sio;
1da177e4 447
bb91bc7b
MP
448 io->vma_invalidate_address = dp->vma_invalidate_address;
449 io->vma_invalidate_size = dp->vma_invalidate_size;
450
6e5f0f63 451 dispatch_io(opf, num_regions, where, dp, io, 1, ioprio);
1da177e4 452
97e7cdf1 453 wait_for_completion_io(&sio.wait);
1da177e4 454
891ce207 455 if (error_bits)
97e7cdf1 456 *error_bits = sio.error_bits;
891ce207 457
97e7cdf1 458 return sio.error_bits ? -EIO : 0;
1da177e4
LT
459}
460
891ce207 461static int async_io(struct dm_io_client *client, unsigned int num_regions,
a3282b43 462 struct dm_io_region *where, blk_opf_t opf,
6e5f0f63
HJ
463 struct dpages *dp, io_notify_fn fn, void *context,
464 unsigned short ioprio)
1da177e4
LT
465{
466 struct io *io;
467
a3282b43 468 if (num_regions > 1 && !op_is_write(opf)) {
1da177e4
LT
469 WARN_ON(1);
470 fn(1, context);
471 return -EIO;
472 }
473
6f1c819c 474 io = mempool_alloc(&client->pool, GFP_NOIO);
e01fd7ee 475 io->error_bits = 0;
1da177e4 476 atomic_set(&io->count, 1); /* see dispatch_io() */
891ce207 477 io->client = client;
1da177e4
LT
478 io->callback = fn;
479 io->context = context;
480
bb91bc7b
MP
481 io->vma_invalidate_address = dp->vma_invalidate_address;
482 io->vma_invalidate_size = dp->vma_invalidate_size;
483
6e5f0f63 484 dispatch_io(opf, num_regions, where, dp, io, 0, ioprio);
1da177e4
LT
485 return 0;
486}
487
bb91bc7b
MP
488static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
489 unsigned long size)
c8b03afe
HM
490{
491 /* Set up dpages based on memory type */
bb91bc7b
MP
492
493 dp->vma_invalidate_address = NULL;
494 dp->vma_invalidate_size = 0;
495
c8b03afe
HM
496 switch (io_req->mem.type) {
497 case DM_IO_PAGE_LIST:
498 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
499 break;
500
003b5c57
KO
501 case DM_IO_BIO:
502 bio_dp_init(dp, io_req->mem.ptr.bio);
c8b03afe
HM
503 break;
504
505 case DM_IO_VMA:
bb91bc7b 506 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
581075e4 507 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
bb91bc7b
MP
508 dp->vma_invalidate_address = io_req->mem.ptr.vma;
509 dp->vma_invalidate_size = size;
510 }
c8b03afe
HM
511 vm_dp_init(dp, io_req->mem.ptr.vma);
512 break;
513
514 case DM_IO_KMEM:
515 km_dp_init(dp, io_req->mem.ptr.addr);
516 break;
517
518 default:
519 return -EINVAL;
520 }
521
522 return 0;
523}
524
86a3238c 525int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
6e5f0f63
HJ
526 struct dm_io_region *where, unsigned long *sync_error_bits,
527 unsigned short ioprio)
c8b03afe
HM
528{
529 int r;
530 struct dpages dp;
531
bb91bc7b 532 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
c8b03afe
HM
533 if (r)
534 return r;
535
536 if (!io_req->notify.fn)
537 return sync_io(io_req->client, num_regions, where,
6e5f0f63 538 io_req->bi_opf, &dp, sync_error_bits, ioprio);
c8b03afe 539
581075e4 540 return async_io(io_req->client, num_regions, where,
a3282b43 541 io_req->bi_opf, &dp, io_req->notify.fn,
6e5f0f63 542 io_req->notify.context, ioprio);
c8b03afe
HM
543}
544EXPORT_SYMBOL(dm_io);
952b3557
MP
545
546int __init dm_io_init(void)
547{
548 _dm_io_cache = KMEM_CACHE(io, 0);
549 if (!_dm_io_cache)
550 return -ENOMEM;
551
552 return 0;
553}
554
555void dm_io_exit(void)
556{
557 kmem_cache_destroy(_dm_io_cache);
558 _dm_io_cache = NULL;
559}