dm: backfill abnormal IO support to non-splitting IO submission
[linux-block.git] / drivers / md / dm-linear.c
... / ...
CommitLineData
1/*
2 * Copyright (C) 2001-2003 Sistina Software (UK) Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/blkdev.h>
11#include <linux/bio.h>
12#include <linux/dax.h>
13#include <linux/slab.h>
14#include <linux/device-mapper.h>
15
16#define DM_MSG_PREFIX "linear"
17
18/*
19 * Linear: maps a linear range of a device.
20 */
21struct linear_c {
22 struct dm_dev *dev;
23 sector_t start;
24};
25
26/*
27 * Construct a linear mapping: <dev_path> <offset>
28 */
29static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
30{
31 struct linear_c *lc;
32 unsigned long long tmp;
33 char dummy;
34 int ret;
35
36 if (argc != 2) {
37 ti->error = "Invalid argument count";
38 return -EINVAL;
39 }
40
41 lc = kmalloc(sizeof(*lc), GFP_KERNEL);
42 if (lc == NULL) {
43 ti->error = "Cannot allocate linear context";
44 return -ENOMEM;
45 }
46
47 ret = -EINVAL;
48 if (sscanf(argv[1], "%llu%c", &tmp, &dummy) != 1) {
49 ti->error = "Invalid device sector";
50 goto bad;
51 }
52 lc->start = tmp;
53
54 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev);
55 if (ret) {
56 ti->error = "Device lookup failed";
57 goto bad;
58 }
59
60 ti->num_flush_bios = 1;
61 ti->num_discard_bios = 1;
62 ti->num_write_same_bios = 1;
63 ti->num_write_zeroes_bios = 1;
64 ti->private = lc;
65 return 0;
66
67 bad:
68 kfree(lc);
69 return ret;
70}
71
72static void linear_dtr(struct dm_target *ti)
73{
74 struct linear_c *lc = (struct linear_c *) ti->private;
75
76 dm_put_device(ti, lc->dev);
77 kfree(lc);
78}
79
80static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
81{
82 struct linear_c *lc = ti->private;
83
84 return lc->start + dm_target_offset(ti, bi_sector);
85}
86
87static void linear_map_bio(struct dm_target *ti, struct bio *bio)
88{
89 struct linear_c *lc = ti->private;
90
91 bio_set_dev(bio, lc->dev->bdev);
92 if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
93 bio->bi_iter.bi_sector =
94 linear_map_sector(ti, bio->bi_iter.bi_sector);
95}
96
97static int linear_map(struct dm_target *ti, struct bio *bio)
98{
99 linear_map_bio(ti, bio);
100
101 return DM_MAPIO_REMAPPED;
102}
103
104static int linear_end_io(struct dm_target *ti, struct bio *bio,
105 blk_status_t *error)
106{
107 struct linear_c *lc = ti->private;
108
109 if (!*error && bio_op(bio) == REQ_OP_ZONE_REPORT)
110 dm_remap_zone_report(ti, bio, lc->start);
111
112 return DM_ENDIO_DONE;
113}
114
115static void linear_status(struct dm_target *ti, status_type_t type,
116 unsigned status_flags, char *result, unsigned maxlen)
117{
118 struct linear_c *lc = (struct linear_c *) ti->private;
119
120 switch (type) {
121 case STATUSTYPE_INFO:
122 result[0] = '\0';
123 break;
124
125 case STATUSTYPE_TABLE:
126 snprintf(result, maxlen, "%s %llu", lc->dev->name,
127 (unsigned long long)lc->start);
128 break;
129 }
130}
131
132static int linear_prepare_ioctl(struct dm_target *ti,
133 struct block_device **bdev, fmode_t *mode)
134{
135 struct linear_c *lc = (struct linear_c *) ti->private;
136 struct dm_dev *dev = lc->dev;
137
138 *bdev = dev->bdev;
139
140 /*
141 * Only pass ioctls through if the device sizes match exactly.
142 */
143 if (lc->start ||
144 ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
145 return 1;
146 return 0;
147}
148
149static int linear_iterate_devices(struct dm_target *ti,
150 iterate_devices_callout_fn fn, void *data)
151{
152 struct linear_c *lc = ti->private;
153
154 return fn(ti, lc->dev, lc->start, ti->len, data);
155}
156
157static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
158 long nr_pages, void **kaddr, pfn_t *pfn)
159{
160 long ret;
161 struct linear_c *lc = ti->private;
162 struct block_device *bdev = lc->dev->bdev;
163 struct dax_device *dax_dev = lc->dev->dax_dev;
164 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
165
166 dev_sector = linear_map_sector(ti, sector);
167 ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
168 if (ret)
169 return ret;
170 return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
171}
172
173static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
174 void *addr, size_t bytes, struct iov_iter *i)
175{
176 struct linear_c *lc = ti->private;
177 struct block_device *bdev = lc->dev->bdev;
178 struct dax_device *dax_dev = lc->dev->dax_dev;
179 sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
180
181 dev_sector = linear_map_sector(ti, sector);
182 if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
183 return 0;
184 return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
185}
186
187static struct target_type linear_target = {
188 .name = "linear",
189 .version = {1, 4, 0},
190 .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ZONED_HM,
191 .module = THIS_MODULE,
192 .ctr = linear_ctr,
193 .dtr = linear_dtr,
194 .map = linear_map,
195 .end_io = linear_end_io,
196 .status = linear_status,
197 .prepare_ioctl = linear_prepare_ioctl,
198 .iterate_devices = linear_iterate_devices,
199 .direct_access = linear_dax_direct_access,
200 .dax_copy_from_iter = linear_dax_copy_from_iter,
201};
202
203int __init dm_linear_init(void)
204{
205 int r = dm_register_target(&linear_target);
206
207 if (r < 0)
208 DMERR("register failed %d", r);
209
210 return r;
211}
212
213void dm_linear_exit(void)
214{
215 dm_unregister_target(&linear_target);
216}