dm: add missing SPDX-License-Indentifiers
[linux-2.6-block.git] / drivers / md / dm-io.c
CommitLineData
3bd94003 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * Copyright (C) 2003 Sistina Software
891ce207 4 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
5 *
6 * This file is released under the GPL.
7 */
8
4cc96131 9#include "dm-core.h"
952b3557 10
586e80e6 11#include <linux/device-mapper.h>
1da177e4
LT
12
13#include <linux/bio.h>
10f1d5d1 14#include <linux/completion.h>
1da177e4
LT
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
a765e20e 19#include <linux/dm-io.h>
1da177e4 20
f1e53987
MP
21#define DM_MSG_PREFIX "io"
22
23#define DM_IO_MAX_REGIONS BITS_PER_LONG
24
891ce207 25struct dm_io_client {
6f1c819c
KO
26 mempool_t pool;
27 struct bio_set bios;
891ce207
HM
28};
29
f1e53987
MP
30/*
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
33 */
1da177e4 34struct io {
e01fd7ee 35 unsigned long error_bits;
1da177e4 36 atomic_t count;
891ce207 37 struct dm_io_client *client;
1da177e4
LT
38 io_notify_fn callback;
39 void *context;
bb91bc7b
MP
40 void *vma_invalidate_address;
41 unsigned long vma_invalidate_size;
f1e53987 42} __attribute__((aligned(DM_IO_MAX_REGIONS)));
1da177e4 43
952b3557
MP
44static struct kmem_cache *_dm_io_cache;
45
c8b03afe
HM
46/*
47 * Create a client with mempool and bioset.
48 */
bda8efec 49struct dm_io_client *dm_io_client_create(void)
c8b03afe 50{
c8b03afe 51 struct dm_io_client *client;
e8603136 52 unsigned min_ios = dm_get_reserved_bio_based_ios();
6f1c819c 53 int ret;
c8b03afe 54
d3775354 55 client = kzalloc(sizeof(*client), GFP_KERNEL);
c8b03afe
HM
56 if (!client)
57 return ERR_PTR(-ENOMEM);
58
6f1c819c
KO
59 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
60 if (ret)
c8b03afe
HM
61 goto bad;
62
6f1c819c
KO
63 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
64 if (ret)
c8b03afe
HM
65 goto bad;
66
67 return client;
68
69 bad:
6f1c819c 70 mempool_exit(&client->pool);
c8b03afe 71 kfree(client);
6f1c819c 72 return ERR_PTR(ret);
c8b03afe
HM
73}
74EXPORT_SYMBOL(dm_io_client_create);
75
c8b03afe
HM
76void dm_io_client_destroy(struct dm_io_client *client)
77{
6f1c819c
KO
78 mempool_exit(&client->pool);
79 bioset_exit(&client->bios);
c8b03afe
HM
80 kfree(client);
81}
82EXPORT_SYMBOL(dm_io_client_destroy);
83
1da177e4
LT
84/*-----------------------------------------------------------------
85 * We need to keep track of which region a bio is doing io for.
f1e53987
MP
86 * To avoid a memory allocation to store just 5 or 6 bits, we
87 * ensure the 'struct io' pointer is aligned so enough low bits are
88 * always zero and then combine it with the region number directly in
89 * bi_private.
1da177e4 90 *---------------------------------------------------------------*/
f1e53987
MP
91static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
92 unsigned region)
1da177e4 93{
f1e53987
MP
94 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
95 DMCRIT("Unaligned struct io pointer %p", io);
96 BUG();
97 }
98
99 bio->bi_private = (void *)((unsigned long)io | region);
1da177e4
LT
100}
101
f1e53987
MP
102static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103 unsigned *region)
1da177e4 104{
f1e53987
MP
105 unsigned long val = (unsigned long)bio->bi_private;
106
107 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108 *region = val & (DM_IO_MAX_REGIONS - 1);
1da177e4
LT
109}
110
111/*-----------------------------------------------------------------
112 * We need an io object to keep track of the number of bios that
113 * have been dispatched for a particular io.
114 *---------------------------------------------------------------*/
97e7cdf1 115static void complete_io(struct io *io)
1da177e4 116{
97e7cdf1
JT
117 unsigned long error_bits = io->error_bits;
118 io_notify_fn fn = io->callback;
119 void *context = io->context;
1da177e4 120
97e7cdf1
JT
121 if (io->vma_invalidate_size)
122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 io->vma_invalidate_size);
bb91bc7b 124
6f1c819c 125 mempool_free(io, &io->client->pool);
97e7cdf1
JT
126 fn(error_bits, context);
127}
1da177e4 128
4e4cbee9 129static void dec_count(struct io *io, unsigned int region, blk_status_t error)
97e7cdf1
JT
130{
131 if (error)
132 set_bit(region, &io->error_bits);
1da177e4 133
97e7cdf1
JT
134 if (atomic_dec_and_test(&io->count))
135 complete_io(io);
1da177e4
LT
136}
137
4246a0b6 138static void endio(struct bio *bio)
1da177e4 139{
c897feb3
HM
140 struct io *io;
141 unsigned region;
4e4cbee9 142 blk_status_t error;
1da177e4 143
4e4cbee9 144 if (bio->bi_status && bio_data_dir(bio) == READ)
1da177e4
LT
145 zero_fill_bio(bio);
146
c897feb3
HM
147 /*
148 * The bio destructor in bio_put() may use the io object.
149 */
f1e53987 150 retrieve_io_and_region_from_bio(bio, &io, &region);
c897feb3 151
4e4cbee9 152 error = bio->bi_status;
1da177e4
LT
153 bio_put(bio);
154
9b81c842 155 dec_count(io, region, error);
1da177e4
LT
156}
157
158/*-----------------------------------------------------------------
159 * These little objects provide an abstraction for getting a new
160 * destination page for io.
161 *---------------------------------------------------------------*/
162struct dpages {
163 void (*get_page)(struct dpages *dp,
164 struct page **p, unsigned long *len, unsigned *offset);
165 void (*next_page)(struct dpages *dp);
166
cacc7b05
ML
167 union {
168 unsigned context_u;
169 struct bvec_iter context_bi;
170 };
1da177e4 171 void *context_ptr;
bb91bc7b
MP
172
173 void *vma_invalidate_address;
174 unsigned long vma_invalidate_size;
1da177e4
LT
175};
176
177/*
178 * Functions for getting the pages from a list.
179 */
180static void list_get_page(struct dpages *dp,
181 struct page **p, unsigned long *len, unsigned *offset)
182{
183 unsigned o = dp->context_u;
184 struct page_list *pl = (struct page_list *) dp->context_ptr;
185
186 *p = pl->page;
187 *len = PAGE_SIZE - o;
188 *offset = o;
189}
190
191static void list_next_page(struct dpages *dp)
192{
193 struct page_list *pl = (struct page_list *) dp->context_ptr;
194 dp->context_ptr = pl->next;
195 dp->context_u = 0;
196}
197
198static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
199{
200 dp->get_page = list_get_page;
201 dp->next_page = list_next_page;
202 dp->context_u = offset;
203 dp->context_ptr = pl;
204}
205
206/*
207 * Functions for getting the pages from a bvec.
208 */
d73f9907
MP
209static void bio_get_page(struct dpages *dp, struct page **p,
210 unsigned long *len, unsigned *offset)
1da177e4 211{
cacc7b05
ML
212 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
213 dp->context_bi);
214
215 *p = bvec.bv_page;
216 *len = bvec.bv_len;
217 *offset = bvec.bv_offset;
218
219 /* avoid figuring it out again in bio_next_page() */
220 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
1da177e4
LT
221}
222
003b5c57 223static void bio_next_page(struct dpages *dp)
1da177e4 224{
cacc7b05
ML
225 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
226
227 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
228 &dp->context_bi, len);
1da177e4
LT
229}
230
003b5c57 231static void bio_dp_init(struct dpages *dp, struct bio *bio)
1da177e4 232{
003b5c57
KO
233 dp->get_page = bio_get_page;
234 dp->next_page = bio_next_page;
cacc7b05
ML
235
236 /*
237 * We just use bvec iterator to retrieve pages, so it is ok to
238 * access the bvec table directly here
239 */
240 dp->context_ptr = bio->bi_io_vec;
241 dp->context_bi = bio->bi_iter;
1da177e4
LT
242}
243
c8b03afe
HM
244/*
245 * Functions for getting the pages from a VMA.
246 */
1da177e4
LT
247static void vm_get_page(struct dpages *dp,
248 struct page **p, unsigned long *len, unsigned *offset)
249{
250 *p = vmalloc_to_page(dp->context_ptr);
251 *offset = dp->context_u;
252 *len = PAGE_SIZE - dp->context_u;
253}
254
255static void vm_next_page(struct dpages *dp)
256{
257 dp->context_ptr += PAGE_SIZE - dp->context_u;
258 dp->context_u = 0;
259}
260
261static void vm_dp_init(struct dpages *dp, void *data)
262{
263 dp->get_page = vm_get_page;
264 dp->next_page = vm_next_page;
93bbf583 265 dp->context_u = offset_in_page(data);
1da177e4
LT
266 dp->context_ptr = data;
267}
268
c8b03afe
HM
269/*
270 * Functions for getting the pages from kernel memory.
271 */
272static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
273 unsigned *offset)
274{
275 *p = virt_to_page(dp->context_ptr);
276 *offset = dp->context_u;
277 *len = PAGE_SIZE - dp->context_u;
278}
279
280static void km_next_page(struct dpages *dp)
281{
282 dp->context_ptr += PAGE_SIZE - dp->context_u;
283 dp->context_u = 0;
284}
285
286static void km_dp_init(struct dpages *dp, void *data)
287{
288 dp->get_page = km_get_page;
289 dp->next_page = km_next_page;
93bbf583 290 dp->context_u = offset_in_page(data);
c8b03afe
HM
291 dp->context_ptr = data;
292}
293
1da177e4
LT
294/*-----------------------------------------------------------------
295 * IO routines that accept a list of pages.
296 *---------------------------------------------------------------*/
a3282b43 297static void do_region(const blk_opf_t opf, unsigned region,
e6047149
MC
298 struct dm_io_region *where, struct dpages *dp,
299 struct io *io)
1da177e4
LT
300{
301 struct bio *bio;
302 struct page *page;
303 unsigned long len;
304 unsigned offset;
305 unsigned num_bvecs;
306 sector_t remaining = where->count;
0c535e0d 307 struct request_queue *q = bdev_get_queue(where->bdev);
70d6c400 308 sector_t num_sectors;
3f649ab7 309 unsigned int special_cmd_max_sectors;
a3282b43 310 const enum req_op op = opf & REQ_OP_MASK;
1da177e4 311
e5db2980
DW
312 /*
313 * Reject unsupported discard and write same requests.
314 */
e6047149 315 if (op == REQ_OP_DISCARD)
cf0fbf89 316 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
ac62d620
CH
317 else if (op == REQ_OP_WRITE_ZEROES)
318 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
a773187e
CH
319 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
320 special_cmd_max_sectors == 0) {
feb7695f 321 atomic_inc(&io->count);
4e4cbee9 322 dec_count(io, region, BLK_STS_NOTSUPP);
37527b86
DW
323 return;
324 }
325
12fc0f49 326 /*
e6047149 327 * where->count may be zero if op holds a flush and we need to
d87f4c14 328 * send a zero-sized flush.
12fc0f49
MP
329 */
330 do {
1da177e4 331 /*
f1e53987 332 * Allocate a suitably sized-bio.
1da177e4 333 */
0f5d690f
CH
334 switch (op) {
335 case REQ_OP_DISCARD:
ac62d620 336 case REQ_OP_WRITE_ZEROES:
0f5d690f
CH
337 num_bvecs = 0;
338 break;
0f5d690f 339 default:
5f7136db
MWO
340 num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
341 (PAGE_SIZE >> SECTOR_SHIFT)));
0f5d690f 342 }
0c535e0d 343
a3282b43
BVA
344 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
345 &io->client->bios);
4f024f37 346 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
1da177e4 347 bio->bi_end_io = endio;
f1e53987 348 store_io_and_region_in_bio(bio, io, region);
1da177e4 349
ac62d620 350 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
e5db2980 351 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
4f024f37 352 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400 353 remaining -= num_sectors;
0c535e0d
MB
354 } else while (remaining) {
355 /*
356 * Try and add as many pages as possible.
357 */
1da177e4
LT
358 dp->get_page(dp, &page, &len, &offset);
359 len = min(len, to_bytes(remaining));
360 if (!bio_add_page(bio, page, len, offset))
361 break;
362
363 offset = 0;
364 remaining -= to_sector(len);
365 dp->next_page(dp);
366 }
367
368 atomic_inc(&io->count);
4e49ea4a 369 submit_bio(bio);
12fc0f49 370 } while (remaining);
1da177e4
LT
371}
372
a3282b43 373static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
22a1ceb1 374 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
375 struct io *io, int sync)
376{
377 int i;
378 struct dpages old_pages = *dp;
379
f1e53987
MP
380 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
381
1da177e4 382 if (sync)
a3282b43 383 opf |= REQ_SYNC;
1da177e4
LT
384
385 /*
386 * For multiple regions we need to be careful to rewind
387 * the dp object for each call to do_region.
388 */
389 for (i = 0; i < num_regions; i++) {
390 *dp = old_pages;
a3282b43
BVA
391 if (where[i].count || (opf & REQ_PREFLUSH))
392 do_region(opf, i, where + i, dp, io);
1da177e4
LT
393 }
394
395 /*
f00b16ad 396 * Drop the extra reference that we were holding to avoid
1da177e4
LT
397 * the io being completed too early.
398 */
399 dec_count(io, 0, 0);
400}
401
97e7cdf1
JT
402struct sync_io {
403 unsigned long error_bits;
404 struct completion wait;
405};
406
407static void sync_io_complete(unsigned long error, void *context)
408{
409 struct sync_io *sio = context;
410
411 sio->error_bits = error;
412 complete(&sio->wait);
413}
414
891ce207 415static int sync_io(struct dm_io_client *client, unsigned int num_regions,
a3282b43
BVA
416 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
417 unsigned long *error_bits)
1da177e4 418{
97e7cdf1
JT
419 struct io *io;
420 struct sync_io sio;
1da177e4 421
a3282b43 422 if (num_regions > 1 && !op_is_write(opf)) {
1da177e4
LT
423 WARN_ON(1);
424 return -EIO;
425 }
426
97e7cdf1
JT
427 init_completion(&sio.wait);
428
6f1c819c 429 io = mempool_alloc(&client->pool, GFP_NOIO);
f1e53987 430 io->error_bits = 0;
f1e53987 431 atomic_set(&io->count, 1); /* see dispatch_io() */
f1e53987 432 io->client = client;
97e7cdf1
JT
433 io->callback = sync_io_complete;
434 io->context = &sio;
1da177e4 435
bb91bc7b
MP
436 io->vma_invalidate_address = dp->vma_invalidate_address;
437 io->vma_invalidate_size = dp->vma_invalidate_size;
438
a3282b43 439 dispatch_io(opf, num_regions, where, dp, io, 1);
1da177e4 440
97e7cdf1 441 wait_for_completion_io(&sio.wait);
1da177e4 442
891ce207 443 if (error_bits)
97e7cdf1 444 *error_bits = sio.error_bits;
891ce207 445
97e7cdf1 446 return sio.error_bits ? -EIO : 0;
1da177e4
LT
447}
448
891ce207 449static int async_io(struct dm_io_client *client, unsigned int num_regions,
a3282b43 450 struct dm_io_region *where, blk_opf_t opf,
e6047149 451 struct dpages *dp, io_notify_fn fn, void *context)
1da177e4
LT
452{
453 struct io *io;
454
a3282b43 455 if (num_regions > 1 && !op_is_write(opf)) {
1da177e4
LT
456 WARN_ON(1);
457 fn(1, context);
458 return -EIO;
459 }
460
6f1c819c 461 io = mempool_alloc(&client->pool, GFP_NOIO);
e01fd7ee 462 io->error_bits = 0;
1da177e4 463 atomic_set(&io->count, 1); /* see dispatch_io() */
891ce207 464 io->client = client;
1da177e4
LT
465 io->callback = fn;
466 io->context = context;
467
bb91bc7b
MP
468 io->vma_invalidate_address = dp->vma_invalidate_address;
469 io->vma_invalidate_size = dp->vma_invalidate_size;
470
a3282b43 471 dispatch_io(opf, num_regions, where, dp, io, 0);
1da177e4
LT
472 return 0;
473}
474
bb91bc7b
MP
475static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
476 unsigned long size)
c8b03afe
HM
477{
478 /* Set up dpages based on memory type */
bb91bc7b
MP
479
480 dp->vma_invalidate_address = NULL;
481 dp->vma_invalidate_size = 0;
482
c8b03afe
HM
483 switch (io_req->mem.type) {
484 case DM_IO_PAGE_LIST:
485 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
486 break;
487
003b5c57
KO
488 case DM_IO_BIO:
489 bio_dp_init(dp, io_req->mem.ptr.bio);
c8b03afe
HM
490 break;
491
492 case DM_IO_VMA:
bb91bc7b 493 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
581075e4 494 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
bb91bc7b
MP
495 dp->vma_invalidate_address = io_req->mem.ptr.vma;
496 dp->vma_invalidate_size = size;
497 }
c8b03afe
HM
498 vm_dp_init(dp, io_req->mem.ptr.vma);
499 break;
500
501 case DM_IO_KMEM:
502 km_dp_init(dp, io_req->mem.ptr.addr);
503 break;
504
505 default:
506 return -EINVAL;
507 }
508
509 return 0;
510}
511
c8b03afe 512int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 513 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
514{
515 int r;
516 struct dpages dp;
517
bb91bc7b 518 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
c8b03afe
HM
519 if (r)
520 return r;
521
522 if (!io_req->notify.fn)
523 return sync_io(io_req->client, num_regions, where,
a3282b43 524 io_req->bi_opf, &dp, sync_error_bits);
c8b03afe 525
581075e4 526 return async_io(io_req->client, num_regions, where,
a3282b43 527 io_req->bi_opf, &dp, io_req->notify.fn,
e6047149 528 io_req->notify.context);
c8b03afe
HM
529}
530EXPORT_SYMBOL(dm_io);
952b3557
MP
531
532int __init dm_io_init(void)
533{
534 _dm_io_cache = KMEM_CACHE(io, 0);
535 if (!_dm_io_cache)
536 return -ENOMEM;
537
538 return 0;
539}
540
541void dm_io_exit(void)
542{
543 kmem_cache_destroy(_dm_io_cache);
544 _dm_io_cache = NULL;
545}