Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
d5816876 | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
4cc96131 | 8 | #include "dm-core.h" |
1da177e4 LT |
9 | |
10 | #include <linux/module.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/blkdev.h> | |
13 | #include <linux/namei.h> | |
14 | #include <linux/ctype.h> | |
e7d2860b | 15 | #include <linux/string.h> |
1da177e4 LT |
16 | #include <linux/slab.h> |
17 | #include <linux/interrupt.h> | |
48c9c27b | 18 | #include <linux/mutex.h> |
d5816876 | 19 | #include <linux/delay.h> |
60063497 | 20 | #include <linux/atomic.h> |
bfebd1cd | 21 | #include <linux/blk-mq.h> |
644bda6f | 22 | #include <linux/mount.h> |
273752c9 | 23 | #include <linux/dax.h> |
1da177e4 | 24 | |
72d94861 AK |
25 | #define DM_MSG_PREFIX "table" |
26 | ||
1da177e4 LT |
27 | #define MAX_DEPTH 16 |
28 | #define NODE_SIZE L1_CACHE_BYTES | |
29 | #define KEYS_PER_NODE (NODE_SIZE / sizeof(sector_t)) | |
30 | #define CHILDREN_PER_NODE (KEYS_PER_NODE + 1) | |
31 | ||
32 | struct dm_table { | |
1134e5ae | 33 | struct mapped_device *md; |
7e0d574f | 34 | enum dm_queue_mode type; |
1da177e4 LT |
35 | |
36 | /* btree table */ | |
37 | unsigned int depth; | |
38 | unsigned int counts[MAX_DEPTH]; /* in nodes */ | |
39 | sector_t *index[MAX_DEPTH]; | |
40 | ||
41 | unsigned int num_targets; | |
42 | unsigned int num_allocated; | |
43 | sector_t *highs; | |
44 | struct dm_target *targets; | |
45 | ||
36a0456f | 46 | struct target_type *immutable_target_type; |
e83068a5 MS |
47 | |
48 | bool integrity_supported:1; | |
49 | bool singleton:1; | |
9b4b5a79 | 50 | unsigned integrity_added:1; |
5ae89a87 | 51 | |
1da177e4 LT |
52 | /* |
53 | * Indicates the rw permissions for the new logical | |
54 | * device. This should be a combination of FMODE_READ | |
55 | * and FMODE_WRITE. | |
56 | */ | |
aeb5d727 | 57 | fmode_t mode; |
1da177e4 LT |
58 | |
59 | /* a list of devices used by this table */ | |
60 | struct list_head devices; | |
61 | ||
1da177e4 LT |
62 | /* events get handed up using this callback */ |
63 | void (*event_fn)(void *); | |
64 | void *event_context; | |
e6ee8c0b KU |
65 | |
66 | struct dm_md_mempools *mempools; | |
9d357b07 N |
67 | |
68 | struct list_head target_callbacks; | |
1da177e4 LT |
69 | }; |
70 | ||
71 | /* | |
72 | * Similar to ceiling(log_size(n)) | |
73 | */ | |
74 | static unsigned int int_log(unsigned int n, unsigned int base) | |
75 | { | |
76 | int result = 0; | |
77 | ||
78 | while (n > 1) { | |
79 | n = dm_div_up(n, base); | |
80 | result++; | |
81 | } | |
82 | ||
83 | return result; | |
84 | } | |
85 | ||
1da177e4 LT |
86 | /* |
87 | * Calculate the index of the child node of the n'th node k'th key. | |
88 | */ | |
89 | static inline unsigned int get_child(unsigned int n, unsigned int k) | |
90 | { | |
91 | return (n * CHILDREN_PER_NODE) + k; | |
92 | } | |
93 | ||
94 | /* | |
95 | * Return the n'th node of level l from table t. | |
96 | */ | |
97 | static inline sector_t *get_node(struct dm_table *t, | |
98 | unsigned int l, unsigned int n) | |
99 | { | |
100 | return t->index[l] + (n * KEYS_PER_NODE); | |
101 | } | |
102 | ||
103 | /* | |
104 | * Return the highest key that you could lookup from the n'th | |
105 | * node on level l of the btree. | |
106 | */ | |
107 | static sector_t high(struct dm_table *t, unsigned int l, unsigned int n) | |
108 | { | |
109 | for (; l < t->depth - 1; l++) | |
110 | n = get_child(n, CHILDREN_PER_NODE - 1); | |
111 | ||
112 | if (n >= t->counts[l]) | |
113 | return (sector_t) - 1; | |
114 | ||
115 | return get_node(t, l, n)[KEYS_PER_NODE - 1]; | |
116 | } | |
117 | ||
118 | /* | |
119 | * Fills in a level of the btree based on the highs of the level | |
120 | * below it. | |
121 | */ | |
122 | static int setup_btree_index(unsigned int l, struct dm_table *t) | |
123 | { | |
124 | unsigned int n, k; | |
125 | sector_t *node; | |
126 | ||
127 | for (n = 0U; n < t->counts[l]; n++) { | |
128 | node = get_node(t, l, n); | |
129 | ||
130 | for (k = 0U; k < KEYS_PER_NODE; k++) | |
131 | node[k] = high(t, l + 1, get_child(n, k)); | |
132 | } | |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
137 | void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size) | |
138 | { | |
139 | unsigned long size; | |
140 | void *addr; | |
141 | ||
142 | /* | |
143 | * Check that we're not going to overflow. | |
144 | */ | |
145 | if (nmemb > (ULONG_MAX / elem_size)) | |
146 | return NULL; | |
147 | ||
148 | size = nmemb * elem_size; | |
e29e65aa | 149 | addr = vzalloc(size); |
1da177e4 LT |
150 | |
151 | return addr; | |
152 | } | |
08649012 | 153 | EXPORT_SYMBOL(dm_vcalloc); |
1da177e4 LT |
154 | |
155 | /* | |
156 | * highs, and targets are managed as dynamic arrays during a | |
157 | * table load. | |
158 | */ | |
159 | static int alloc_targets(struct dm_table *t, unsigned int num) | |
160 | { | |
161 | sector_t *n_highs; | |
162 | struct dm_target *n_targets; | |
1da177e4 LT |
163 | |
164 | /* | |
165 | * Allocate both the target array and offset array at once. | |
512875bd JN |
166 | * Append an empty entry to catch sectors beyond the end of |
167 | * the device. | |
1da177e4 | 168 | */ |
512875bd | 169 | n_highs = (sector_t *) dm_vcalloc(num + 1, sizeof(struct dm_target) + |
1da177e4 LT |
170 | sizeof(sector_t)); |
171 | if (!n_highs) | |
172 | return -ENOMEM; | |
173 | ||
174 | n_targets = (struct dm_target *) (n_highs + num); | |
175 | ||
57a2f238 | 176 | memset(n_highs, -1, sizeof(*n_highs) * num); |
1da177e4 LT |
177 | vfree(t->highs); |
178 | ||
179 | t->num_allocated = num; | |
180 | t->highs = n_highs; | |
181 | t->targets = n_targets; | |
182 | ||
183 | return 0; | |
184 | } | |
185 | ||
aeb5d727 | 186 | int dm_table_create(struct dm_table **result, fmode_t mode, |
1134e5ae | 187 | unsigned num_targets, struct mapped_device *md) |
1da177e4 | 188 | { |
094262db | 189 | struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); |
1da177e4 LT |
190 | |
191 | if (!t) | |
192 | return -ENOMEM; | |
193 | ||
1da177e4 | 194 | INIT_LIST_HEAD(&t->devices); |
9d357b07 | 195 | INIT_LIST_HEAD(&t->target_callbacks); |
1da177e4 LT |
196 | |
197 | if (!num_targets) | |
198 | num_targets = KEYS_PER_NODE; | |
199 | ||
200 | num_targets = dm_round_up(num_targets, KEYS_PER_NODE); | |
201 | ||
5b2d0657 MP |
202 | if (!num_targets) { |
203 | kfree(t); | |
204 | return -ENOMEM; | |
205 | } | |
206 | ||
1da177e4 LT |
207 | if (alloc_targets(t, num_targets)) { |
208 | kfree(t); | |
1da177e4 LT |
209 | return -ENOMEM; |
210 | } | |
211 | ||
e83068a5 | 212 | t->type = DM_TYPE_NONE; |
1da177e4 | 213 | t->mode = mode; |
1134e5ae | 214 | t->md = md; |
1da177e4 LT |
215 | *result = t; |
216 | return 0; | |
217 | } | |
218 | ||
86f1152b | 219 | static void free_devices(struct list_head *devices, struct mapped_device *md) |
1da177e4 LT |
220 | { |
221 | struct list_head *tmp, *next; | |
222 | ||
afb24528 | 223 | list_for_each_safe(tmp, next, devices) { |
82b1519b MP |
224 | struct dm_dev_internal *dd = |
225 | list_entry(tmp, struct dm_dev_internal, list); | |
86f1152b BM |
226 | DMWARN("%s: dm_table_destroy: dm_put_device call missing for %s", |
227 | dm_device_name(md), dd->dm_dev->name); | |
228 | dm_put_table_device(md, dd->dm_dev); | |
1da177e4 LT |
229 | kfree(dd); |
230 | } | |
231 | } | |
232 | ||
d5816876 | 233 | void dm_table_destroy(struct dm_table *t) |
1da177e4 LT |
234 | { |
235 | unsigned int i; | |
236 | ||
a7940155 AK |
237 | if (!t) |
238 | return; | |
239 | ||
26803b9f | 240 | /* free the indexes */ |
1da177e4 LT |
241 | if (t->depth >= 2) |
242 | vfree(t->index[t->depth - 2]); | |
243 | ||
244 | /* free the targets */ | |
245 | for (i = 0; i < t->num_targets; i++) { | |
246 | struct dm_target *tgt = t->targets + i; | |
247 | ||
248 | if (tgt->type->dtr) | |
249 | tgt->type->dtr(tgt); | |
250 | ||
251 | dm_put_target_type(tgt->type); | |
252 | } | |
253 | ||
254 | vfree(t->highs); | |
255 | ||
256 | /* free the device list */ | |
86f1152b | 257 | free_devices(&t->devices, t->md); |
1da177e4 | 258 | |
e6ee8c0b KU |
259 | dm_free_md_mempools(t->mempools); |
260 | ||
1da177e4 LT |
261 | kfree(t); |
262 | } | |
263 | ||
1da177e4 LT |
264 | /* |
265 | * See if we've already got a device in the list. | |
266 | */ | |
82b1519b | 267 | static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev) |
1da177e4 | 268 | { |
82b1519b | 269 | struct dm_dev_internal *dd; |
1da177e4 LT |
270 | |
271 | list_for_each_entry (dd, l, list) | |
86f1152b | 272 | if (dd->dm_dev->bdev->bd_dev == dev) |
1da177e4 LT |
273 | return dd; |
274 | ||
275 | return NULL; | |
276 | } | |
277 | ||
1da177e4 | 278 | /* |
f6a1ed10 | 279 | * If possible, this checks an area of a destination device is invalid. |
1da177e4 | 280 | */ |
f6a1ed10 MP |
281 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
282 | sector_t start, sector_t len, void *data) | |
1da177e4 | 283 | { |
f4808ca9 | 284 | struct request_queue *q; |
754c5fc7 MS |
285 | struct queue_limits *limits = data; |
286 | struct block_device *bdev = dev->bdev; | |
287 | sector_t dev_size = | |
288 | i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; | |
02acc3a4 | 289 | unsigned short logical_block_size_sectors = |
754c5fc7 | 290 | limits->logical_block_size >> SECTOR_SHIFT; |
02acc3a4 | 291 | char b[BDEVNAME_SIZE]; |
2cd54d9b | 292 | |
f4808ca9 MB |
293 | /* |
294 | * Some devices exist without request functions, | |
295 | * such as loop devices not yet bound to backing files. | |
296 | * Forbid the use of such devices. | |
297 | */ | |
298 | q = bdev_get_queue(bdev); | |
299 | if (!q || !q->make_request_fn) { | |
300 | DMWARN("%s: %s is not yet initialised: " | |
301 | "start=%llu, len=%llu, dev_size=%llu", | |
302 | dm_device_name(ti->table->md), bdevname(bdev, b), | |
303 | (unsigned long long)start, | |
304 | (unsigned long long)len, | |
305 | (unsigned long long)dev_size); | |
306 | return 1; | |
307 | } | |
308 | ||
2cd54d9b | 309 | if (!dev_size) |
f6a1ed10 | 310 | return 0; |
2cd54d9b | 311 | |
5dea271b | 312 | if ((start >= dev_size) || (start + len > dev_size)) { |
a963a956 MS |
313 | DMWARN("%s: %s too small for target: " |
314 | "start=%llu, len=%llu, dev_size=%llu", | |
315 | dm_device_name(ti->table->md), bdevname(bdev, b), | |
316 | (unsigned long long)start, | |
317 | (unsigned long long)len, | |
318 | (unsigned long long)dev_size); | |
f6a1ed10 | 319 | return 1; |
02acc3a4 MS |
320 | } |
321 | ||
dd88d313 DLM |
322 | /* |
323 | * If the target is mapped to zoned block device(s), check | |
324 | * that the zones are not partially mapped. | |
325 | */ | |
326 | if (bdev_zoned_model(bdev) != BLK_ZONED_NONE) { | |
327 | unsigned int zone_sectors = bdev_zone_sectors(bdev); | |
328 | ||
329 | if (start & (zone_sectors - 1)) { | |
330 | DMWARN("%s: start=%llu not aligned to h/w zone size %u of %s", | |
331 | dm_device_name(ti->table->md), | |
332 | (unsigned long long)start, | |
333 | zone_sectors, bdevname(bdev, b)); | |
334 | return 1; | |
335 | } | |
336 | ||
337 | /* | |
338 | * Note: The last zone of a zoned block device may be smaller | |
339 | * than other zones. So for a target mapping the end of a | |
340 | * zoned block device with such a zone, len would not be zone | |
341 | * aligned. We do not allow such last smaller zone to be part | |
342 | * of the mapping here to ensure that mappings with multiple | |
343 | * devices do not end up with a smaller zone in the middle of | |
344 | * the sector range. | |
345 | */ | |
346 | if (len & (zone_sectors - 1)) { | |
347 | DMWARN("%s: len=%llu not aligned to h/w zone size %u of %s", | |
348 | dm_device_name(ti->table->md), | |
349 | (unsigned long long)len, | |
350 | zone_sectors, bdevname(bdev, b)); | |
351 | return 1; | |
352 | } | |
353 | } | |
354 | ||
02acc3a4 | 355 | if (logical_block_size_sectors <= 1) |
f6a1ed10 | 356 | return 0; |
02acc3a4 MS |
357 | |
358 | if (start & (logical_block_size_sectors - 1)) { | |
359 | DMWARN("%s: start=%llu not aligned to h/w " | |
a963a956 | 360 | "logical block size %u of %s", |
02acc3a4 MS |
361 | dm_device_name(ti->table->md), |
362 | (unsigned long long)start, | |
754c5fc7 | 363 | limits->logical_block_size, bdevname(bdev, b)); |
f6a1ed10 | 364 | return 1; |
02acc3a4 MS |
365 | } |
366 | ||
5dea271b | 367 | if (len & (logical_block_size_sectors - 1)) { |
02acc3a4 | 368 | DMWARN("%s: len=%llu not aligned to h/w " |
a963a956 | 369 | "logical block size %u of %s", |
02acc3a4 | 370 | dm_device_name(ti->table->md), |
5dea271b | 371 | (unsigned long long)len, |
754c5fc7 | 372 | limits->logical_block_size, bdevname(bdev, b)); |
f6a1ed10 | 373 | return 1; |
02acc3a4 MS |
374 | } |
375 | ||
f6a1ed10 | 376 | return 0; |
1da177e4 LT |
377 | } |
378 | ||
379 | /* | |
570b9d96 | 380 | * This upgrades the mode on an already open dm_dev, being |
1da177e4 | 381 | * careful to leave things as they were if we fail to reopen the |
570b9d96 AK |
382 | * device and not to touch the existing bdev field in case |
383 | * it is accessed concurrently inside dm_table_any_congested(). | |
1da177e4 | 384 | */ |
aeb5d727 | 385 | static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, |
82b1519b | 386 | struct mapped_device *md) |
1da177e4 LT |
387 | { |
388 | int r; | |
86f1152b | 389 | struct dm_dev *old_dev, *new_dev; |
1da177e4 | 390 | |
86f1152b | 391 | old_dev = dd->dm_dev; |
570b9d96 | 392 | |
86f1152b BM |
393 | r = dm_get_table_device(md, dd->dm_dev->bdev->bd_dev, |
394 | dd->dm_dev->mode | new_mode, &new_dev); | |
570b9d96 AK |
395 | if (r) |
396 | return r; | |
1da177e4 | 397 | |
86f1152b BM |
398 | dd->dm_dev = new_dev; |
399 | dm_put_table_device(md, old_dev); | |
1da177e4 | 400 | |
570b9d96 | 401 | return 0; |
1da177e4 LT |
402 | } |
403 | ||
4df2bf46 D |
404 | /* |
405 | * Convert the path to a device | |
406 | */ | |
407 | dev_t dm_get_dev_t(const char *path) | |
408 | { | |
3c120169 | 409 | dev_t dev; |
4df2bf46 D |
410 | struct block_device *bdev; |
411 | ||
412 | bdev = lookup_bdev(path); | |
413 | if (IS_ERR(bdev)) | |
414 | dev = name_to_dev_t(path); | |
415 | else { | |
416 | dev = bdev->bd_dev; | |
417 | bdput(bdev); | |
418 | } | |
419 | ||
420 | return dev; | |
421 | } | |
422 | EXPORT_SYMBOL_GPL(dm_get_dev_t); | |
423 | ||
1da177e4 LT |
424 | /* |
425 | * Add a device to the list, or just increment the usage count if | |
426 | * it's already present. | |
427 | */ | |
08649012 MS |
428 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
429 | struct dm_dev **result) | |
1da177e4 LT |
430 | { |
431 | int r; | |
4df2bf46 | 432 | dev_t dev; |
82b1519b | 433 | struct dm_dev_internal *dd; |
08649012 | 434 | struct dm_table *t = ti->table; |
1da177e4 | 435 | |
547bc926 | 436 | BUG_ON(!t); |
1da177e4 | 437 | |
4df2bf46 D |
438 | dev = dm_get_dev_t(path); |
439 | if (!dev) | |
440 | return -ENODEV; | |
1da177e4 LT |
441 | |
442 | dd = find_device(&t->devices, dev); | |
443 | if (!dd) { | |
444 | dd = kmalloc(sizeof(*dd), GFP_KERNEL); | |
445 | if (!dd) | |
446 | return -ENOMEM; | |
447 | ||
86f1152b | 448 | if ((r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev))) { |
1da177e4 LT |
449 | kfree(dd); |
450 | return r; | |
451 | } | |
452 | ||
2a0b4682 | 453 | refcount_set(&dd->count, 1); |
1da177e4 | 454 | list_add(&dd->list, &t->devices); |
afc567a4 | 455 | goto out; |
1da177e4 | 456 | |
86f1152b | 457 | } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { |
f165921d | 458 | r = upgrade_mode(dd, mode, t->md); |
1da177e4 LT |
459 | if (r) |
460 | return r; | |
461 | } | |
afc567a4 MS |
462 | refcount_inc(&dd->count); |
463 | out: | |
86f1152b | 464 | *result = dd->dm_dev; |
1da177e4 LT |
465 | return 0; |
466 | } | |
08649012 | 467 | EXPORT_SYMBOL(dm_get_device); |
1da177e4 | 468 | |
11f0431b MS |
469 | static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, |
470 | sector_t start, sector_t len, void *data) | |
1da177e4 | 471 | { |
754c5fc7 MS |
472 | struct queue_limits *limits = data; |
473 | struct block_device *bdev = dev->bdev; | |
165125e1 | 474 | struct request_queue *q = bdev_get_queue(bdev); |
0c2322e4 AK |
475 | char b[BDEVNAME_SIZE]; |
476 | ||
477 | if (unlikely(!q)) { | |
478 | DMWARN("%s: Cannot set limits for nonexistent device %s", | |
479 | dm_device_name(ti->table->md), bdevname(bdev, b)); | |
754c5fc7 | 480 | return 0; |
0c2322e4 | 481 | } |
3cb40214 | 482 | |
b27d7f16 MP |
483 | if (bdev_stack_limits(limits, bdev, start) < 0) |
484 | DMWARN("%s: adding target device %s caused an alignment inconsistency: " | |
a963a956 MS |
485 | "physical_block_size=%u, logical_block_size=%u, " |
486 | "alignment_offset=%u, start=%llu", | |
487 | dm_device_name(ti->table->md), bdevname(bdev, b), | |
488 | q->limits.physical_block_size, | |
489 | q->limits.logical_block_size, | |
490 | q->limits.alignment_offset, | |
b27d7f16 | 491 | (unsigned long long) start << SECTOR_SHIFT); |
3cb40214 | 492 | |
dd88d313 DLM |
493 | limits->zoned = blk_queue_zoned_model(q); |
494 | ||
754c5fc7 | 495 | return 0; |
3cb40214 | 496 | } |
969429b5 | 497 | |
1da177e4 | 498 | /* |
08649012 | 499 | * Decrement a device's use count and remove it if necessary. |
1da177e4 | 500 | */ |
82b1519b | 501 | void dm_put_device(struct dm_target *ti, struct dm_dev *d) |
1da177e4 | 502 | { |
86f1152b BM |
503 | int found = 0; |
504 | struct list_head *devices = &ti->table->devices; | |
505 | struct dm_dev_internal *dd; | |
82b1519b | 506 | |
86f1152b BM |
507 | list_for_each_entry(dd, devices, list) { |
508 | if (dd->dm_dev == d) { | |
509 | found = 1; | |
510 | break; | |
511 | } | |
512 | } | |
513 | if (!found) { | |
514 | DMWARN("%s: device %s not in table devices list", | |
515 | dm_device_name(ti->table->md), d->name); | |
516 | return; | |
517 | } | |
2a0b4682 | 518 | if (refcount_dec_and_test(&dd->count)) { |
86f1152b | 519 | dm_put_table_device(ti->table->md, d); |
1da177e4 LT |
520 | list_del(&dd->list); |
521 | kfree(dd); | |
522 | } | |
523 | } | |
08649012 | 524 | EXPORT_SYMBOL(dm_put_device); |
1da177e4 LT |
525 | |
526 | /* | |
527 | * Checks to see if the target joins onto the end of the table. | |
528 | */ | |
529 | static int adjoin(struct dm_table *table, struct dm_target *ti) | |
530 | { | |
531 | struct dm_target *prev; | |
532 | ||
533 | if (!table->num_targets) | |
534 | return !ti->begin; | |
535 | ||
536 | prev = &table->targets[table->num_targets - 1]; | |
537 | return (ti->begin == (prev->begin + prev->len)); | |
538 | } | |
539 | ||
540 | /* | |
541 | * Used to dynamically allocate the arg array. | |
f36afb39 MP |
542 | * |
543 | * We do first allocation with GFP_NOIO because dm-mpath and dm-thin must | |
544 | * process messages even if some device is suspended. These messages have a | |
545 | * small fixed number of arguments. | |
546 | * | |
547 | * On the other hand, dm-switch needs to process bulk data using messages and | |
548 | * excessive use of GFP_NOIO could cause trouble. | |
1da177e4 | 549 | */ |
610b15c5 | 550 | static char **realloc_argv(unsigned *size, char **old_argv) |
1da177e4 LT |
551 | { |
552 | char **argv; | |
553 | unsigned new_size; | |
f36afb39 | 554 | gfp_t gfp; |
1da177e4 | 555 | |
610b15c5 KC |
556 | if (*size) { |
557 | new_size = *size * 2; | |
f36afb39 MP |
558 | gfp = GFP_KERNEL; |
559 | } else { | |
560 | new_size = 8; | |
561 | gfp = GFP_NOIO; | |
562 | } | |
6da2ec56 | 563 | argv = kmalloc_array(new_size, sizeof(*argv), gfp); |
a0651926 | 564 | if (argv && old_argv) { |
610b15c5 KC |
565 | memcpy(argv, old_argv, *size * sizeof(*argv)); |
566 | *size = new_size; | |
1da177e4 LT |
567 | } |
568 | ||
569 | kfree(old_argv); | |
570 | return argv; | |
571 | } | |
572 | ||
573 | /* | |
574 | * Destructively splits up the argument list to pass to ctr. | |
575 | */ | |
576 | int dm_split_args(int *argc, char ***argvp, char *input) | |
577 | { | |
578 | char *start, *end = input, *out, **argv = NULL; | |
579 | unsigned array_size = 0; | |
580 | ||
581 | *argc = 0; | |
814d6862 DT |
582 | |
583 | if (!input) { | |
584 | *argvp = NULL; | |
585 | return 0; | |
586 | } | |
587 | ||
1da177e4 LT |
588 | argv = realloc_argv(&array_size, argv); |
589 | if (!argv) | |
590 | return -ENOMEM; | |
591 | ||
592 | while (1) { | |
1da177e4 | 593 | /* Skip whitespace */ |
e7d2860b | 594 | start = skip_spaces(end); |
1da177e4 LT |
595 | |
596 | if (!*start) | |
597 | break; /* success, we hit the end */ | |
598 | ||
599 | /* 'out' is used to remove any back-quotes */ | |
600 | end = out = start; | |
601 | while (*end) { | |
602 | /* Everything apart from '\0' can be quoted */ | |
603 | if (*end == '\\' && *(end + 1)) { | |
604 | *out++ = *(end + 1); | |
605 | end += 2; | |
606 | continue; | |
607 | } | |
608 | ||
609 | if (isspace(*end)) | |
610 | break; /* end of token */ | |
611 | ||
612 | *out++ = *end++; | |
613 | } | |
614 | ||
615 | /* have we already filled the array ? */ | |
616 | if ((*argc + 1) > array_size) { | |
617 | argv = realloc_argv(&array_size, argv); | |
618 | if (!argv) | |
619 | return -ENOMEM; | |
620 | } | |
621 | ||
622 | /* we know this is whitespace */ | |
623 | if (*end) | |
624 | end++; | |
625 | ||
626 | /* terminate the string and put it in the array */ | |
627 | *out = '\0'; | |
628 | argv[*argc] = start; | |
629 | (*argc)++; | |
630 | } | |
631 | ||
632 | *argvp = argv; | |
633 | return 0; | |
634 | } | |
635 | ||
be6d4305 MS |
636 | /* |
637 | * Impose necessary and sufficient conditions on a devices's table such | |
638 | * that any incoming bio which respects its logical_block_size can be | |
639 | * processed successfully. If it falls across the boundary between | |
640 | * two or more targets, the size of each piece it gets split into must | |
641 | * be compatible with the logical_block_size of the target processing it. | |
642 | */ | |
754c5fc7 MS |
643 | static int validate_hardware_logical_block_alignment(struct dm_table *table, |
644 | struct queue_limits *limits) | |
be6d4305 MS |
645 | { |
646 | /* | |
647 | * This function uses arithmetic modulo the logical_block_size | |
648 | * (in units of 512-byte sectors). | |
649 | */ | |
650 | unsigned short device_logical_block_size_sects = | |
754c5fc7 | 651 | limits->logical_block_size >> SECTOR_SHIFT; |
be6d4305 MS |
652 | |
653 | /* | |
654 | * Offset of the start of the next table entry, mod logical_block_size. | |
655 | */ | |
656 | unsigned short next_target_start = 0; | |
657 | ||
658 | /* | |
659 | * Given an aligned bio that extends beyond the end of a | |
660 | * target, how many sectors must the next target handle? | |
661 | */ | |
662 | unsigned short remaining = 0; | |
663 | ||
664 | struct dm_target *uninitialized_var(ti); | |
754c5fc7 | 665 | struct queue_limits ti_limits; |
3c120169 | 666 | unsigned i; |
be6d4305 MS |
667 | |
668 | /* | |
669 | * Check each entry in the table in turn. | |
670 | */ | |
3c120169 MP |
671 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
672 | ti = dm_table_get_target(table, i); | |
be6d4305 | 673 | |
b1bd055d | 674 | blk_set_stacking_limits(&ti_limits); |
754c5fc7 MS |
675 | |
676 | /* combine all target devices' limits */ | |
677 | if (ti->type->iterate_devices) | |
678 | ti->type->iterate_devices(ti, dm_set_device_limits, | |
679 | &ti_limits); | |
680 | ||
be6d4305 MS |
681 | /* |
682 | * If the remaining sectors fall entirely within this | |
683 | * table entry are they compatible with its logical_block_size? | |
684 | */ | |
685 | if (remaining < ti->len && | |
754c5fc7 | 686 | remaining & ((ti_limits.logical_block_size >> |
be6d4305 MS |
687 | SECTOR_SHIFT) - 1)) |
688 | break; /* Error */ | |
689 | ||
690 | next_target_start = | |
691 | (unsigned short) ((next_target_start + ti->len) & | |
692 | (device_logical_block_size_sects - 1)); | |
693 | remaining = next_target_start ? | |
694 | device_logical_block_size_sects - next_target_start : 0; | |
695 | } | |
696 | ||
697 | if (remaining) { | |
698 | DMWARN("%s: table line %u (start sect %llu len %llu) " | |
a963a956 | 699 | "not aligned to h/w logical block size %u", |
be6d4305 MS |
700 | dm_device_name(table->md), i, |
701 | (unsigned long long) ti->begin, | |
702 | (unsigned long long) ti->len, | |
754c5fc7 | 703 | limits->logical_block_size); |
be6d4305 MS |
704 | return -EINVAL; |
705 | } | |
706 | ||
707 | return 0; | |
708 | } | |
709 | ||
1da177e4 LT |
710 | int dm_table_add_target(struct dm_table *t, const char *type, |
711 | sector_t start, sector_t len, char *params) | |
712 | { | |
713 | int r = -EINVAL, argc; | |
714 | char **argv; | |
715 | struct dm_target *tgt; | |
716 | ||
3791e2fc AK |
717 | if (t->singleton) { |
718 | DMERR("%s: target type %s must appear alone in table", | |
719 | dm_device_name(t->md), t->targets->type->name); | |
720 | return -EINVAL; | |
721 | } | |
722 | ||
57a2f238 | 723 | BUG_ON(t->num_targets >= t->num_allocated); |
1da177e4 LT |
724 | |
725 | tgt = t->targets + t->num_targets; | |
726 | memset(tgt, 0, sizeof(*tgt)); | |
727 | ||
728 | if (!len) { | |
72d94861 | 729 | DMERR("%s: zero-length target", dm_device_name(t->md)); |
1da177e4 LT |
730 | return -EINVAL; |
731 | } | |
732 | ||
733 | tgt->type = dm_get_target_type(type); | |
734 | if (!tgt->type) { | |
dafa724b | 735 | DMERR("%s: %s: unknown target type", dm_device_name(t->md), type); |
1da177e4 LT |
736 | return -EINVAL; |
737 | } | |
738 | ||
3791e2fc AK |
739 | if (dm_target_needs_singleton(tgt->type)) { |
740 | if (t->num_targets) { | |
dafa724b | 741 | tgt->error = "singleton target type must appear alone in table"; |
742 | goto bad; | |
3791e2fc | 743 | } |
e83068a5 | 744 | t->singleton = true; |
3791e2fc AK |
745 | } |
746 | ||
cc6cbe14 | 747 | if (dm_target_always_writeable(tgt->type) && !(t->mode & FMODE_WRITE)) { |
dafa724b | 748 | tgt->error = "target type may not be included in a read-only table"; |
749 | goto bad; | |
cc6cbe14 AK |
750 | } |
751 | ||
36a0456f AK |
752 | if (t->immutable_target_type) { |
753 | if (t->immutable_target_type != tgt->type) { | |
dafa724b | 754 | tgt->error = "immutable target type cannot be mixed with other target types"; |
755 | goto bad; | |
36a0456f AK |
756 | } |
757 | } else if (dm_target_is_immutable(tgt->type)) { | |
758 | if (t->num_targets) { | |
dafa724b | 759 | tgt->error = "immutable target type cannot be mixed with other target types"; |
760 | goto bad; | |
36a0456f AK |
761 | } |
762 | t->immutable_target_type = tgt->type; | |
763 | } | |
764 | ||
9b4b5a79 MB |
765 | if (dm_target_has_integrity(tgt->type)) |
766 | t->integrity_added = 1; | |
767 | ||
1da177e4 LT |
768 | tgt->table = t; |
769 | tgt->begin = start; | |
770 | tgt->len = len; | |
771 | tgt->error = "Unknown error"; | |
772 | ||
773 | /* | |
774 | * Does this target adjoin the previous one ? | |
775 | */ | |
776 | if (!adjoin(t, tgt)) { | |
777 | tgt->error = "Gap in table"; | |
1da177e4 LT |
778 | goto bad; |
779 | } | |
780 | ||
781 | r = dm_split_args(&argc, &argv, params); | |
782 | if (r) { | |
783 | tgt->error = "couldn't split parameters (insufficient memory)"; | |
784 | goto bad; | |
785 | } | |
786 | ||
787 | r = tgt->type->ctr(tgt, argc, argv); | |
788 | kfree(argv); | |
789 | if (r) | |
790 | goto bad; | |
791 | ||
792 | t->highs[t->num_targets++] = tgt->begin + tgt->len - 1; | |
793 | ||
55a62eef AK |
794 | if (!tgt->num_discard_bios && tgt->discards_supported) |
795 | DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.", | |
936688d7 | 796 | dm_device_name(t->md), type); |
5ae89a87 | 797 | |
1da177e4 LT |
798 | return 0; |
799 | ||
800 | bad: | |
72d94861 | 801 | DMERR("%s: %s: %s", dm_device_name(t->md), type, tgt->error); |
1da177e4 LT |
802 | dm_put_target_type(tgt->type); |
803 | return r; | |
804 | } | |
805 | ||
498f0103 MS |
806 | /* |
807 | * Target argument parsing helpers. | |
808 | */ | |
5916a22b EB |
809 | static int validate_next_arg(const struct dm_arg *arg, |
810 | struct dm_arg_set *arg_set, | |
498f0103 MS |
811 | unsigned *value, char **error, unsigned grouped) |
812 | { | |
813 | const char *arg_str = dm_shift_arg(arg_set); | |
31998ef1 | 814 | char dummy; |
498f0103 MS |
815 | |
816 | if (!arg_str || | |
31998ef1 | 817 | (sscanf(arg_str, "%u%c", value, &dummy) != 1) || |
498f0103 MS |
818 | (*value < arg->min) || |
819 | (*value > arg->max) || | |
820 | (grouped && arg_set->argc < *value)) { | |
821 | *error = arg->error; | |
822 | return -EINVAL; | |
823 | } | |
824 | ||
825 | return 0; | |
826 | } | |
827 | ||
5916a22b | 828 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
498f0103 MS |
829 | unsigned *value, char **error) |
830 | { | |
831 | return validate_next_arg(arg, arg_set, value, error, 0); | |
832 | } | |
833 | EXPORT_SYMBOL(dm_read_arg); | |
834 | ||
5916a22b | 835 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
498f0103 MS |
836 | unsigned *value, char **error) |
837 | { | |
838 | return validate_next_arg(arg, arg_set, value, error, 1); | |
839 | } | |
840 | EXPORT_SYMBOL(dm_read_arg_group); | |
841 | ||
842 | const char *dm_shift_arg(struct dm_arg_set *as) | |
843 | { | |
844 | char *r; | |
845 | ||
846 | if (as->argc) { | |
847 | as->argc--; | |
848 | r = *as->argv; | |
849 | as->argv++; | |
850 | return r; | |
851 | } | |
852 | ||
853 | return NULL; | |
854 | } | |
855 | EXPORT_SYMBOL(dm_shift_arg); | |
856 | ||
857 | void dm_consume_args(struct dm_arg_set *as, unsigned num_args) | |
858 | { | |
859 | BUG_ON(as->argc < num_args); | |
860 | as->argc -= num_args; | |
861 | as->argv += num_args; | |
862 | } | |
863 | EXPORT_SYMBOL(dm_consume_args); | |
864 | ||
7e0d574f | 865 | static bool __table_type_bio_based(enum dm_queue_mode table_type) |
545ed20e TK |
866 | { |
867 | return (table_type == DM_TYPE_BIO_BASED || | |
22c11858 MS |
868 | table_type == DM_TYPE_DAX_BIO_BASED || |
869 | table_type == DM_TYPE_NVME_BIO_BASED); | |
545ed20e TK |
870 | } |
871 | ||
7e0d574f | 872 | static bool __table_type_request_based(enum dm_queue_mode table_type) |
15b94a69 | 873 | { |
953923c0 | 874 | return table_type == DM_TYPE_REQUEST_BASED; |
15b94a69 JN |
875 | } |
876 | ||
7e0d574f | 877 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) |
e83068a5 MS |
878 | { |
879 | t->type = type; | |
880 | } | |
881 | EXPORT_SYMBOL_GPL(dm_table_set_type); | |
882 | ||
7bf7eac8 | 883 | /* validate the dax capability of the target device span */ |
2e9ee095 | 884 | int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, |
9c50a98f | 885 | sector_t start, sector_t len, void *data) |
545ed20e | 886 | { |
7bf7eac8 DW |
887 | int blocksize = *(int *) data; |
888 | ||
889 | return generic_fsdax_supported(dev->dax_dev, dev->bdev, blocksize, | |
9c50a98f | 890 | start, len); |
545ed20e TK |
891 | } |
892 | ||
2e9ee095 | 893 | /* Check devices support synchronous DAX */ |
9c50a98f MS |
894 | static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, |
895 | sector_t start, sector_t len, void *data) | |
2e9ee095 | 896 | { |
5348deb1 | 897 | return dev->dax_dev && dax_synchronous(dev->dax_dev); |
2e9ee095 PG |
898 | } |
899 | ||
900 | bool dm_table_supports_dax(struct dm_table *t, | |
9c50a98f | 901 | iterate_devices_callout_fn iterate_fn, int *blocksize) |
545ed20e TK |
902 | { |
903 | struct dm_target *ti; | |
3c120169 | 904 | unsigned i; |
545ed20e TK |
905 | |
906 | /* Ensure that all targets support DAX. */ | |
3c120169 MP |
907 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
908 | ti = dm_table_get_target(t, i); | |
545ed20e TK |
909 | |
910 | if (!ti->type->direct_access) | |
911 | return false; | |
912 | ||
913 | if (!ti->type->iterate_devices || | |
9c50a98f | 914 | !ti->type->iterate_devices(ti, iterate_fn, blocksize)) |
545ed20e TK |
915 | return false; |
916 | } | |
917 | ||
918 | return true; | |
919 | } | |
920 | ||
22c11858 MS |
921 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t); |
922 | ||
eaa160ed MS |
923 | struct verify_rq_based_data { |
924 | unsigned sq_count; | |
925 | unsigned mq_count; | |
926 | }; | |
927 | ||
928 | static int device_is_rq_based(struct dm_target *ti, struct dm_dev *dev, | |
929 | sector_t start, sector_t len, void *data) | |
930 | { | |
931 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
932 | struct verify_rq_based_data *v = data; | |
933 | ||
344e9ffc | 934 | if (queue_is_mq(q)) |
eaa160ed MS |
935 | v->mq_count++; |
936 | else | |
937 | v->sq_count++; | |
938 | ||
344e9ffc | 939 | return queue_is_mq(q); |
eaa160ed MS |
940 | } |
941 | ||
e83068a5 | 942 | static int dm_table_determine_type(struct dm_table *t) |
e6ee8c0b KU |
943 | { |
944 | unsigned i; | |
169e2cc2 | 945 | unsigned bio_based = 0, request_based = 0, hybrid = 0; |
eaa160ed | 946 | struct verify_rq_based_data v = {.sq_count = 0, .mq_count = 0}; |
e6ee8c0b | 947 | struct dm_target *tgt; |
e83068a5 | 948 | struct list_head *devices = dm_table_get_devices(t); |
7e0d574f | 949 | enum dm_queue_mode live_md_type = dm_get_md_type(t->md); |
2e9ee095 | 950 | int page_size = PAGE_SIZE; |
e6ee8c0b | 951 | |
e83068a5 MS |
952 | if (t->type != DM_TYPE_NONE) { |
953 | /* target already set the table's type */ | |
c934edad MS |
954 | if (t->type == DM_TYPE_BIO_BASED) { |
955 | /* possibly upgrade to a variant of bio-based */ | |
956 | goto verify_bio_based; | |
22c11858 | 957 | } |
545ed20e | 958 | BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED); |
c934edad | 959 | BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED); |
e83068a5 MS |
960 | goto verify_rq_based; |
961 | } | |
962 | ||
e6ee8c0b KU |
963 | for (i = 0; i < t->num_targets; i++) { |
964 | tgt = t->targets + i; | |
169e2cc2 MS |
965 | if (dm_target_hybrid(tgt)) |
966 | hybrid = 1; | |
967 | else if (dm_target_request_based(tgt)) | |
e6ee8c0b KU |
968 | request_based = 1; |
969 | else | |
970 | bio_based = 1; | |
971 | ||
972 | if (bio_based && request_based) { | |
22c11858 MS |
973 | DMERR("Inconsistent table: different target types" |
974 | " can't be mixed up"); | |
e6ee8c0b KU |
975 | return -EINVAL; |
976 | } | |
977 | } | |
978 | ||
169e2cc2 MS |
979 | if (hybrid && !bio_based && !request_based) { |
980 | /* | |
981 | * The targets can work either way. | |
982 | * Determine the type from the live device. | |
983 | * Default to bio-based if device is new. | |
984 | */ | |
15b94a69 | 985 | if (__table_type_request_based(live_md_type)) |
169e2cc2 MS |
986 | request_based = 1; |
987 | else | |
988 | bio_based = 1; | |
989 | } | |
990 | ||
e6ee8c0b | 991 | if (bio_based) { |
c934edad | 992 | verify_bio_based: |
e6ee8c0b KU |
993 | /* We must use this table as bio-based */ |
994 | t->type = DM_TYPE_BIO_BASED; | |
2e9ee095 | 995 | if (dm_table_supports_dax(t, device_supports_dax, &page_size) || |
22c11858 | 996 | (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { |
545ed20e | 997 | t->type = DM_TYPE_DAX_BIO_BASED; |
eaa160ed MS |
998 | } else { |
999 | /* Check if upgrading to NVMe bio-based is valid or required */ | |
1000 | tgt = dm_table_get_immutable_target(t); | |
1001 | if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) { | |
1002 | t->type = DM_TYPE_NVME_BIO_BASED; | |
1003 | goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */ | |
1004 | } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) { | |
1005 | t->type = DM_TYPE_NVME_BIO_BASED; | |
1006 | } | |
22c11858 | 1007 | } |
e6ee8c0b KU |
1008 | return 0; |
1009 | } | |
1010 | ||
1011 | BUG_ON(!request_based); /* No targets in this table */ | |
1012 | ||
e83068a5 MS |
1013 | t->type = DM_TYPE_REQUEST_BASED; |
1014 | ||
1015 | verify_rq_based: | |
65803c20 MS |
1016 | /* |
1017 | * Request-based dm supports only tables that have a single target now. | |
1018 | * To support multiple targets, request splitting support is needed, | |
1019 | * and that needs lots of changes in the block-layer. | |
1020 | * (e.g. request completion process for partial completion.) | |
1021 | */ | |
1022 | if (t->num_targets > 1) { | |
22c11858 MS |
1023 | DMERR("%s DM doesn't support multiple targets", |
1024 | t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based"); | |
65803c20 MS |
1025 | return -EINVAL; |
1026 | } | |
1027 | ||
6936c12c MS |
1028 | if (list_empty(devices)) { |
1029 | int srcu_idx; | |
1030 | struct dm_table *live_table = dm_get_live_table(t->md, &srcu_idx); | |
1031 | ||
6a23e05c JA |
1032 | /* inherit live table's type */ |
1033 | if (live_table) | |
6936c12c | 1034 | t->type = live_table->type; |
6936c12c MS |
1035 | dm_put_live_table(t->md, srcu_idx); |
1036 | return 0; | |
1037 | } | |
1038 | ||
22c11858 MS |
1039 | tgt = dm_table_get_immutable_target(t); |
1040 | if (!tgt) { | |
1041 | DMERR("table load rejected: immutable target is required"); | |
1042 | return -EINVAL; | |
1043 | } else if (tgt->max_io_len) { | |
1044 | DMERR("table load rejected: immutable target that splits IO is not supported"); | |
1045 | return -EINVAL; | |
1046 | } | |
1047 | ||
e6ee8c0b | 1048 | /* Non-request-stackable devices can't be used for request-based dm */ |
eaa160ed MS |
1049 | if (!tgt->type->iterate_devices || |
1050 | !tgt->type->iterate_devices(tgt, device_is_rq_based, &v)) { | |
1051 | DMERR("table load rejected: including non-request-stackable devices"); | |
1052 | return -EINVAL; | |
e5863d9a | 1053 | } |
cef6f55a | 1054 | if (v.sq_count > 0) { |
5b8c01f7 BVA |
1055 | DMERR("table load rejected: not all devices are blk-mq request-stackable"); |
1056 | return -EINVAL; | |
e83068a5 | 1057 | } |
301fc3f5 | 1058 | |
e6ee8c0b KU |
1059 | return 0; |
1060 | } | |
1061 | ||
7e0d574f | 1062 | enum dm_queue_mode dm_table_get_type(struct dm_table *t) |
e6ee8c0b KU |
1063 | { |
1064 | return t->type; | |
1065 | } | |
1066 | ||
36a0456f AK |
1067 | struct target_type *dm_table_get_immutable_target_type(struct dm_table *t) |
1068 | { | |
1069 | return t->immutable_target_type; | |
1070 | } | |
1071 | ||
16f12266 MS |
1072 | struct dm_target *dm_table_get_immutable_target(struct dm_table *t) |
1073 | { | |
1074 | /* Immutable target is implicitly a singleton */ | |
1075 | if (t->num_targets > 1 || | |
1076 | !dm_target_is_immutable(t->targets[0].type)) | |
1077 | return NULL; | |
1078 | ||
1079 | return t->targets; | |
1080 | } | |
1081 | ||
f083b09b MS |
1082 | struct dm_target *dm_table_get_wildcard_target(struct dm_table *t) |
1083 | { | |
3c120169 MP |
1084 | struct dm_target *ti; |
1085 | unsigned i; | |
f083b09b | 1086 | |
3c120169 MP |
1087 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1088 | ti = dm_table_get_target(t, i); | |
f083b09b MS |
1089 | if (dm_target_is_wildcard(ti->type)) |
1090 | return ti; | |
1091 | } | |
1092 | ||
1093 | return NULL; | |
1094 | } | |
1095 | ||
545ed20e TK |
1096 | bool dm_table_bio_based(struct dm_table *t) |
1097 | { | |
1098 | return __table_type_bio_based(dm_table_get_type(t)); | |
1099 | } | |
1100 | ||
e6ee8c0b KU |
1101 | bool dm_table_request_based(struct dm_table *t) |
1102 | { | |
15b94a69 | 1103 | return __table_type_request_based(dm_table_get_type(t)); |
e5863d9a MS |
1104 | } |
1105 | ||
17e149b8 | 1106 | static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device *md) |
e6ee8c0b | 1107 | { |
7e0d574f | 1108 | enum dm_queue_mode type = dm_table_get_type(t); |
30187e1d | 1109 | unsigned per_io_data_size = 0; |
0776aa0e MS |
1110 | unsigned min_pool_size = 0; |
1111 | struct dm_target *ti; | |
c0820cf5 | 1112 | unsigned i; |
e6ee8c0b | 1113 | |
78d8e58a | 1114 | if (unlikely(type == DM_TYPE_NONE)) { |
e6ee8c0b KU |
1115 | DMWARN("no table type is set, can't allocate mempools"); |
1116 | return -EINVAL; | |
1117 | } | |
1118 | ||
545ed20e | 1119 | if (__table_type_bio_based(type)) |
78d8e58a | 1120 | for (i = 0; i < t->num_targets; i++) { |
0776aa0e MS |
1121 | ti = t->targets + i; |
1122 | per_io_data_size = max(per_io_data_size, ti->per_io_data_size); | |
1123 | min_pool_size = max(min_pool_size, ti->num_flush_bios); | |
78d8e58a MS |
1124 | } |
1125 | ||
0776aa0e MS |
1126 | t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, |
1127 | per_io_data_size, min_pool_size); | |
4e6e36c3 MS |
1128 | if (!t->mempools) |
1129 | return -ENOMEM; | |
e6ee8c0b KU |
1130 | |
1131 | return 0; | |
1132 | } | |
1133 | ||
1134 | void dm_table_free_md_mempools(struct dm_table *t) | |
1135 | { | |
1136 | dm_free_md_mempools(t->mempools); | |
1137 | t->mempools = NULL; | |
1138 | } | |
1139 | ||
1140 | struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) | |
1141 | { | |
1142 | return t->mempools; | |
1143 | } | |
1144 | ||
1da177e4 LT |
1145 | static int setup_indexes(struct dm_table *t) |
1146 | { | |
1147 | int i; | |
1148 | unsigned int total = 0; | |
1149 | sector_t *indexes; | |
1150 | ||
1151 | /* allocate the space for *all* the indexes */ | |
1152 | for (i = t->depth - 2; i >= 0; i--) { | |
1153 | t->counts[i] = dm_div_up(t->counts[i + 1], CHILDREN_PER_NODE); | |
1154 | total += t->counts[i]; | |
1155 | } | |
1156 | ||
1157 | indexes = (sector_t *) dm_vcalloc(total, (unsigned long) NODE_SIZE); | |
1158 | if (!indexes) | |
1159 | return -ENOMEM; | |
1160 | ||
1161 | /* set up internal nodes, bottom-up */ | |
82d601dc | 1162 | for (i = t->depth - 2; i >= 0; i--) { |
1da177e4 LT |
1163 | t->index[i] = indexes; |
1164 | indexes += (KEYS_PER_NODE * t->counts[i]); | |
1165 | setup_btree_index(i, t); | |
1166 | } | |
1167 | ||
1168 | return 0; | |
1169 | } | |
1170 | ||
1171 | /* | |
1172 | * Builds the btree to index the map. | |
1173 | */ | |
26803b9f | 1174 | static int dm_table_build_index(struct dm_table *t) |
1da177e4 LT |
1175 | { |
1176 | int r = 0; | |
1177 | unsigned int leaf_nodes; | |
1178 | ||
1da177e4 LT |
1179 | /* how many indexes will the btree have ? */ |
1180 | leaf_nodes = dm_div_up(t->num_targets, KEYS_PER_NODE); | |
1181 | t->depth = 1 + int_log(leaf_nodes, CHILDREN_PER_NODE); | |
1182 | ||
1183 | /* leaf layer has already been set up */ | |
1184 | t->counts[t->depth - 1] = leaf_nodes; | |
1185 | t->index[t->depth - 1] = t->highs; | |
1186 | ||
1187 | if (t->depth >= 2) | |
1188 | r = setup_indexes(t); | |
1189 | ||
1190 | return r; | |
1191 | } | |
1192 | ||
25520d55 MP |
1193 | static bool integrity_profile_exists(struct gendisk *disk) |
1194 | { | |
1195 | return !!blk_get_integrity(disk); | |
1196 | } | |
1197 | ||
a63a5cf8 MS |
1198 | /* |
1199 | * Get a disk whose integrity profile reflects the table's profile. | |
a63a5cf8 MS |
1200 | * Returns NULL if integrity support was inconsistent or unavailable. |
1201 | */ | |
25520d55 | 1202 | static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) |
a63a5cf8 MS |
1203 | { |
1204 | struct list_head *devices = dm_table_get_devices(t); | |
1205 | struct dm_dev_internal *dd = NULL; | |
1206 | struct gendisk *prev_disk = NULL, *template_disk = NULL; | |
e2460f2a MP |
1207 | unsigned i; |
1208 | ||
1209 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1210 | struct dm_target *ti = dm_table_get_target(t, i); | |
1211 | if (!dm_target_passes_integrity(ti->type)) | |
1212 | goto no_integrity; | |
1213 | } | |
a63a5cf8 MS |
1214 | |
1215 | list_for_each_entry(dd, devices, list) { | |
86f1152b | 1216 | template_disk = dd->dm_dev->bdev->bd_disk; |
25520d55 | 1217 | if (!integrity_profile_exists(template_disk)) |
a63a5cf8 | 1218 | goto no_integrity; |
a63a5cf8 MS |
1219 | else if (prev_disk && |
1220 | blk_integrity_compare(prev_disk, template_disk) < 0) | |
1221 | goto no_integrity; | |
1222 | prev_disk = template_disk; | |
1223 | } | |
1224 | ||
1225 | return template_disk; | |
1226 | ||
1227 | no_integrity: | |
1228 | if (prev_disk) | |
1229 | DMWARN("%s: integrity not set: %s and %s profile mismatch", | |
1230 | dm_device_name(t->md), | |
1231 | prev_disk->disk_name, | |
1232 | template_disk->disk_name); | |
1233 | return NULL; | |
1234 | } | |
1235 | ||
26803b9f | 1236 | /* |
25520d55 MP |
1237 | * Register the mapped device for blk_integrity support if the |
1238 | * underlying devices have an integrity profile. But all devices may | |
1239 | * not have matching profiles (checking all devices isn't reliable | |
a63a5cf8 | 1240 | * during table load because this table may use other DM device(s) which |
25520d55 MP |
1241 | * must be resumed before they will have an initialized integity |
1242 | * profile). Consequently, stacked DM devices force a 2 stage integrity | |
1243 | * profile validation: First pass during table load, final pass during | |
1244 | * resume. | |
26803b9f | 1245 | */ |
25520d55 | 1246 | static int dm_table_register_integrity(struct dm_table *t) |
26803b9f | 1247 | { |
25520d55 | 1248 | struct mapped_device *md = t->md; |
a63a5cf8 | 1249 | struct gendisk *template_disk = NULL; |
26803b9f | 1250 | |
9b4b5a79 MB |
1251 | /* If target handles integrity itself do not register it here. */ |
1252 | if (t->integrity_added) | |
1253 | return 0; | |
1254 | ||
25520d55 | 1255 | template_disk = dm_table_get_integrity_disk(t); |
a63a5cf8 MS |
1256 | if (!template_disk) |
1257 | return 0; | |
26803b9f | 1258 | |
25520d55 | 1259 | if (!integrity_profile_exists(dm_disk(md))) { |
e83068a5 | 1260 | t->integrity_supported = true; |
25520d55 MP |
1261 | /* |
1262 | * Register integrity profile during table load; we can do | |
1263 | * this because the final profile must match during resume. | |
1264 | */ | |
1265 | blk_integrity_register(dm_disk(md), | |
1266 | blk_get_integrity(template_disk)); | |
1267 | return 0; | |
a63a5cf8 MS |
1268 | } |
1269 | ||
1270 | /* | |
25520d55 | 1271 | * If DM device already has an initialized integrity |
a63a5cf8 MS |
1272 | * profile the new profile should not conflict. |
1273 | */ | |
25520d55 | 1274 | if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { |
a63a5cf8 MS |
1275 | DMWARN("%s: conflict with existing integrity profile: " |
1276 | "%s profile mismatch", | |
1277 | dm_device_name(t->md), | |
1278 | template_disk->disk_name); | |
1279 | return 1; | |
1280 | } | |
1281 | ||
25520d55 | 1282 | /* Preserve existing integrity profile */ |
e83068a5 | 1283 | t->integrity_supported = true; |
26803b9f WD |
1284 | return 0; |
1285 | } | |
1286 | ||
1287 | /* | |
1288 | * Prepares the table for use by building the indices, | |
1289 | * setting the type, and allocating mempools. | |
1290 | */ | |
1291 | int dm_table_complete(struct dm_table *t) | |
1292 | { | |
1293 | int r; | |
1294 | ||
e83068a5 | 1295 | r = dm_table_determine_type(t); |
26803b9f | 1296 | if (r) { |
e83068a5 | 1297 | DMERR("unable to determine table type"); |
26803b9f WD |
1298 | return r; |
1299 | } | |
1300 | ||
1301 | r = dm_table_build_index(t); | |
1302 | if (r) { | |
1303 | DMERR("unable to build btrees"); | |
1304 | return r; | |
1305 | } | |
1306 | ||
25520d55 | 1307 | r = dm_table_register_integrity(t); |
26803b9f WD |
1308 | if (r) { |
1309 | DMERR("could not register integrity profile."); | |
1310 | return r; | |
1311 | } | |
1312 | ||
17e149b8 | 1313 | r = dm_table_alloc_md_mempools(t, t->md); |
26803b9f WD |
1314 | if (r) |
1315 | DMERR("unable to allocate mempools"); | |
1316 | ||
1317 | return r; | |
1318 | } | |
1319 | ||
48c9c27b | 1320 | static DEFINE_MUTEX(_event_lock); |
1da177e4 LT |
1321 | void dm_table_event_callback(struct dm_table *t, |
1322 | void (*fn)(void *), void *context) | |
1323 | { | |
48c9c27b | 1324 | mutex_lock(&_event_lock); |
1da177e4 LT |
1325 | t->event_fn = fn; |
1326 | t->event_context = context; | |
48c9c27b | 1327 | mutex_unlock(&_event_lock); |
1da177e4 LT |
1328 | } |
1329 | ||
1330 | void dm_table_event(struct dm_table *t) | |
1331 | { | |
1332 | /* | |
1333 | * You can no longer call dm_table_event() from interrupt | |
1334 | * context, use a bottom half instead. | |
1335 | */ | |
1336 | BUG_ON(in_interrupt()); | |
1337 | ||
48c9c27b | 1338 | mutex_lock(&_event_lock); |
1da177e4 LT |
1339 | if (t->event_fn) |
1340 | t->event_fn(t->event_context); | |
48c9c27b | 1341 | mutex_unlock(&_event_lock); |
1da177e4 | 1342 | } |
08649012 | 1343 | EXPORT_SYMBOL(dm_table_event); |
1da177e4 | 1344 | |
1cfd5d33 | 1345 | inline sector_t dm_table_get_size(struct dm_table *t) |
1da177e4 LT |
1346 | { |
1347 | return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0; | |
1348 | } | |
08649012 | 1349 | EXPORT_SYMBOL(dm_table_get_size); |
1da177e4 LT |
1350 | |
1351 | struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index) | |
1352 | { | |
14353539 | 1353 | if (index >= t->num_targets) |
1da177e4 LT |
1354 | return NULL; |
1355 | ||
1356 | return t->targets + index; | |
1357 | } | |
1358 | ||
1359 | /* | |
1360 | * Search the btree for the correct target. | |
512875bd JN |
1361 | * |
1362 | * Caller should check returned pointer with dm_target_is_valid() | |
1363 | * to trap I/O beyond end of device. | |
1da177e4 LT |
1364 | */ |
1365 | struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) | |
1366 | { | |
1367 | unsigned int l, n = 0, k = 0; | |
1368 | sector_t *node; | |
1369 | ||
1cfd5d33 MP |
1370 | if (unlikely(sector >= dm_table_get_size(t))) |
1371 | return &t->targets[t->num_targets]; | |
1372 | ||
1da177e4 LT |
1373 | for (l = 0; l < t->depth; l++) { |
1374 | n = get_child(n, k); | |
1375 | node = get_node(t, l, n); | |
1376 | ||
1377 | for (k = 0; k < KEYS_PER_NODE; k++) | |
1378 | if (node[k] >= sector) | |
1379 | break; | |
1380 | } | |
1381 | ||
1382 | return &t->targets[(KEYS_PER_NODE * n) + k]; | |
1383 | } | |
1384 | ||
3ae70656 MS |
1385 | static int count_device(struct dm_target *ti, struct dm_dev *dev, |
1386 | sector_t start, sector_t len, void *data) | |
1387 | { | |
1388 | unsigned *num_devices = data; | |
1389 | ||
1390 | (*num_devices)++; | |
1391 | ||
1392 | return 0; | |
1393 | } | |
1394 | ||
1395 | /* | |
1396 | * Check whether a table has no data devices attached using each | |
1397 | * target's iterate_devices method. | |
1398 | * Returns false if the result is unknown because a target doesn't | |
1399 | * support iterate_devices. | |
1400 | */ | |
1401 | bool dm_table_has_no_data_devices(struct dm_table *table) | |
1402 | { | |
3c120169 MP |
1403 | struct dm_target *ti; |
1404 | unsigned i, num_devices; | |
3ae70656 | 1405 | |
3c120169 MP |
1406 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
1407 | ti = dm_table_get_target(table, i); | |
3ae70656 MS |
1408 | |
1409 | if (!ti->type->iterate_devices) | |
1410 | return false; | |
1411 | ||
3c120169 | 1412 | num_devices = 0; |
3ae70656 MS |
1413 | ti->type->iterate_devices(ti, count_device, &num_devices); |
1414 | if (num_devices) | |
1415 | return false; | |
1416 | } | |
1417 | ||
1418 | return true; | |
1419 | } | |
1420 | ||
dd88d313 DLM |
1421 | static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, |
1422 | sector_t start, sector_t len, void *data) | |
1423 | { | |
1424 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1425 | enum blk_zoned_model *zoned_model = data; | |
1426 | ||
1427 | return q && blk_queue_zoned_model(q) == *zoned_model; | |
1428 | } | |
1429 | ||
1430 | static bool dm_table_supports_zoned_model(struct dm_table *t, | |
1431 | enum blk_zoned_model zoned_model) | |
1432 | { | |
1433 | struct dm_target *ti; | |
1434 | unsigned i; | |
1435 | ||
1436 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1437 | ti = dm_table_get_target(t, i); | |
1438 | ||
1439 | if (zoned_model == BLK_ZONED_HM && | |
1440 | !dm_target_supports_zoned_hm(ti->type)) | |
1441 | return false; | |
1442 | ||
1443 | if (!ti->type->iterate_devices || | |
1444 | !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) | |
1445 | return false; | |
1446 | } | |
1447 | ||
1448 | return true; | |
1449 | } | |
1450 | ||
1451 | static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, | |
1452 | sector_t start, sector_t len, void *data) | |
1453 | { | |
1454 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1455 | unsigned int *zone_sectors = data; | |
1456 | ||
1457 | return q && blk_queue_zone_sectors(q) == *zone_sectors; | |
1458 | } | |
1459 | ||
1460 | static bool dm_table_matches_zone_sectors(struct dm_table *t, | |
1461 | unsigned int zone_sectors) | |
1462 | { | |
1463 | struct dm_target *ti; | |
1464 | unsigned i; | |
1465 | ||
1466 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1467 | ti = dm_table_get_target(t, i); | |
1468 | ||
1469 | if (!ti->type->iterate_devices || | |
1470 | !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) | |
1471 | return false; | |
1472 | } | |
1473 | ||
1474 | return true; | |
1475 | } | |
1476 | ||
1477 | static int validate_hardware_zoned_model(struct dm_table *table, | |
1478 | enum blk_zoned_model zoned_model, | |
1479 | unsigned int zone_sectors) | |
1480 | { | |
1481 | if (zoned_model == BLK_ZONED_NONE) | |
1482 | return 0; | |
1483 | ||
1484 | if (!dm_table_supports_zoned_model(table, zoned_model)) { | |
1485 | DMERR("%s: zoned model is not consistent across all devices", | |
1486 | dm_device_name(table->md)); | |
1487 | return -EINVAL; | |
1488 | } | |
1489 | ||
1490 | /* Check zone size validity and compatibility */ | |
1491 | if (!zone_sectors || !is_power_of_2(zone_sectors)) | |
1492 | return -EINVAL; | |
1493 | ||
1494 | if (!dm_table_matches_zone_sectors(table, zone_sectors)) { | |
1495 | DMERR("%s: zone sectors is not consistent across all devices", | |
1496 | dm_device_name(table->md)); | |
1497 | return -EINVAL; | |
1498 | } | |
1499 | ||
1500 | return 0; | |
1501 | } | |
1502 | ||
754c5fc7 MS |
1503 | /* |
1504 | * Establish the new table's queue_limits and validate them. | |
1505 | */ | |
1506 | int dm_calculate_queue_limits(struct dm_table *table, | |
1507 | struct queue_limits *limits) | |
1508 | { | |
3c120169 | 1509 | struct dm_target *ti; |
754c5fc7 | 1510 | struct queue_limits ti_limits; |
3c120169 | 1511 | unsigned i; |
dd88d313 DLM |
1512 | enum blk_zoned_model zoned_model = BLK_ZONED_NONE; |
1513 | unsigned int zone_sectors = 0; | |
754c5fc7 | 1514 | |
b1bd055d | 1515 | blk_set_stacking_limits(limits); |
754c5fc7 | 1516 | |
3c120169 | 1517 | for (i = 0; i < dm_table_get_num_targets(table); i++) { |
b1bd055d | 1518 | blk_set_stacking_limits(&ti_limits); |
754c5fc7 | 1519 | |
3c120169 | 1520 | ti = dm_table_get_target(table, i); |
754c5fc7 MS |
1521 | |
1522 | if (!ti->type->iterate_devices) | |
1523 | goto combine_limits; | |
1524 | ||
1525 | /* | |
1526 | * Combine queue limits of all the devices this target uses. | |
1527 | */ | |
1528 | ti->type->iterate_devices(ti, dm_set_device_limits, | |
1529 | &ti_limits); | |
1530 | ||
dd88d313 DLM |
1531 | if (zoned_model == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { |
1532 | /* | |
1533 | * After stacking all limits, validate all devices | |
1534 | * in table support this zoned model and zone sectors. | |
1535 | */ | |
1536 | zoned_model = ti_limits.zoned; | |
1537 | zone_sectors = ti_limits.chunk_sectors; | |
1538 | } | |
1539 | ||
40bea431 MS |
1540 | /* Set I/O hints portion of queue limits */ |
1541 | if (ti->type->io_hints) | |
1542 | ti->type->io_hints(ti, &ti_limits); | |
1543 | ||
754c5fc7 MS |
1544 | /* |
1545 | * Check each device area is consistent with the target's | |
1546 | * overall queue limits. | |
1547 | */ | |
f6a1ed10 MP |
1548 | if (ti->type->iterate_devices(ti, device_area_is_invalid, |
1549 | &ti_limits)) | |
754c5fc7 MS |
1550 | return -EINVAL; |
1551 | ||
1552 | combine_limits: | |
1553 | /* | |
1554 | * Merge this target's queue limits into the overall limits | |
1555 | * for the table. | |
1556 | */ | |
1557 | if (blk_stack_limits(limits, &ti_limits, 0) < 0) | |
b27d7f16 | 1558 | DMWARN("%s: adding target device " |
754c5fc7 | 1559 | "(start sect %llu len %llu) " |
b27d7f16 | 1560 | "caused an alignment inconsistency", |
754c5fc7 MS |
1561 | dm_device_name(table->md), |
1562 | (unsigned long long) ti->begin, | |
1563 | (unsigned long long) ti->len); | |
dd88d313 DLM |
1564 | |
1565 | /* | |
1566 | * FIXME: this should likely be moved to blk_stack_limits(), would | |
1567 | * also eliminate limits->zoned stacking hack in dm_set_device_limits() | |
1568 | */ | |
1569 | if (limits->zoned == BLK_ZONED_NONE && ti_limits.zoned != BLK_ZONED_NONE) { | |
1570 | /* | |
1571 | * By default, the stacked limits zoned model is set to | |
1572 | * BLK_ZONED_NONE in blk_set_stacking_limits(). Update | |
1573 | * this model using the first target model reported | |
1574 | * that is not BLK_ZONED_NONE. This will be either the | |
1575 | * first target device zoned model or the model reported | |
1576 | * by the target .io_hints. | |
1577 | */ | |
1578 | limits->zoned = ti_limits.zoned; | |
1579 | } | |
754c5fc7 MS |
1580 | } |
1581 | ||
dd88d313 DLM |
1582 | /* |
1583 | * Verify that the zoned model and zone sectors, as determined before | |
1584 | * any .io_hints override, are the same across all devices in the table. | |
1585 | * - this is especially relevant if .io_hints is emulating a disk-managed | |
1586 | * zoned model (aka BLK_ZONED_NONE) on host-managed zoned block devices. | |
1587 | * BUT... | |
1588 | */ | |
1589 | if (limits->zoned != BLK_ZONED_NONE) { | |
1590 | /* | |
1591 | * ...IF the above limits stacking determined a zoned model | |
1592 | * validate that all of the table's devices conform to it. | |
1593 | */ | |
1594 | zoned_model = limits->zoned; | |
1595 | zone_sectors = limits->chunk_sectors; | |
1596 | } | |
1597 | if (validate_hardware_zoned_model(table, zoned_model, zone_sectors)) | |
1598 | return -EINVAL; | |
1599 | ||
754c5fc7 MS |
1600 | return validate_hardware_logical_block_alignment(table, limits); |
1601 | } | |
1602 | ||
9c47008d | 1603 | /* |
25520d55 MP |
1604 | * Verify that all devices have an integrity profile that matches the |
1605 | * DM device's registered integrity profile. If the profiles don't | |
1606 | * match then unregister the DM device's integrity profile. | |
9c47008d | 1607 | */ |
25520d55 | 1608 | static void dm_table_verify_integrity(struct dm_table *t) |
9c47008d | 1609 | { |
a63a5cf8 | 1610 | struct gendisk *template_disk = NULL; |
9c47008d | 1611 | |
9b4b5a79 MB |
1612 | if (t->integrity_added) |
1613 | return; | |
1614 | ||
25520d55 MP |
1615 | if (t->integrity_supported) { |
1616 | /* | |
1617 | * Verify that the original integrity profile | |
1618 | * matches all the devices in this table. | |
1619 | */ | |
1620 | template_disk = dm_table_get_integrity_disk(t); | |
1621 | if (template_disk && | |
1622 | blk_integrity_compare(dm_disk(t->md), template_disk) >= 0) | |
1623 | return; | |
1624 | } | |
9c47008d | 1625 | |
25520d55 | 1626 | if (integrity_profile_exists(dm_disk(t->md))) { |
876fbba1 MS |
1627 | DMWARN("%s: unable to establish an integrity profile", |
1628 | dm_device_name(t->md)); | |
25520d55 MP |
1629 | blk_integrity_unregister(dm_disk(t->md)); |
1630 | } | |
9c47008d MP |
1631 | } |
1632 | ||
ed8b752b MS |
1633 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
1634 | sector_t start, sector_t len, void *data) | |
1635 | { | |
c888a8f9 | 1636 | unsigned long flush = (unsigned long) data; |
ed8b752b MS |
1637 | struct request_queue *q = bdev_get_queue(dev->bdev); |
1638 | ||
c888a8f9 | 1639 | return q && (q->queue_flags & flush); |
ed8b752b MS |
1640 | } |
1641 | ||
c888a8f9 | 1642 | static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) |
ed8b752b MS |
1643 | { |
1644 | struct dm_target *ti; | |
3c120169 | 1645 | unsigned i; |
ed8b752b MS |
1646 | |
1647 | /* | |
1648 | * Require at least one underlying device to support flushes. | |
1649 | * t->devices includes internal dm devices such as mirror logs | |
1650 | * so we need to use iterate_devices here, which targets | |
1651 | * supporting flushes must provide. | |
1652 | */ | |
3c120169 MP |
1653 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1654 | ti = dm_table_get_target(t, i); | |
ed8b752b | 1655 | |
55a62eef | 1656 | if (!ti->num_flush_bios) |
ed8b752b MS |
1657 | continue; |
1658 | ||
0e9c24ed | 1659 | if (ti->flush_supported) |
7f61f5a0 | 1660 | return true; |
0e9c24ed | 1661 | |
ed8b752b | 1662 | if (ti->type->iterate_devices && |
c888a8f9 | 1663 | ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) |
7f61f5a0 | 1664 | return true; |
ed8b752b MS |
1665 | } |
1666 | ||
7f61f5a0 | 1667 | return false; |
ed8b752b MS |
1668 | } |
1669 | ||
273752c9 VG |
1670 | static int device_dax_write_cache_enabled(struct dm_target *ti, |
1671 | struct dm_dev *dev, sector_t start, | |
1672 | sector_t len, void *data) | |
1673 | { | |
1674 | struct dax_device *dax_dev = dev->dax_dev; | |
1675 | ||
1676 | if (!dax_dev) | |
1677 | return false; | |
1678 | ||
1679 | if (dax_write_cache_enabled(dax_dev)) | |
1680 | return true; | |
1681 | return false; | |
1682 | } | |
1683 | ||
1684 | static int dm_table_supports_dax_write_cache(struct dm_table *t) | |
1685 | { | |
1686 | struct dm_target *ti; | |
1687 | unsigned i; | |
1688 | ||
1689 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1690 | ti = dm_table_get_target(t, i); | |
1691 | ||
1692 | if (ti->type->iterate_devices && | |
1693 | ti->type->iterate_devices(ti, | |
1694 | device_dax_write_cache_enabled, NULL)) | |
1695 | return true; | |
1696 | } | |
1697 | ||
1698 | return false; | |
1699 | } | |
1700 | ||
4693c966 MSB |
1701 | static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, |
1702 | sector_t start, sector_t len, void *data) | |
1703 | { | |
1704 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1705 | ||
1706 | return q && blk_queue_nonrot(q); | |
1707 | } | |
1708 | ||
c3c4555e MB |
1709 | static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, |
1710 | sector_t start, sector_t len, void *data) | |
1711 | { | |
1712 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1713 | ||
1714 | return q && !blk_queue_add_random(q); | |
1715 | } | |
1716 | ||
1717 | static bool dm_table_all_devices_attribute(struct dm_table *t, | |
1718 | iterate_devices_callout_fn func) | |
4693c966 MSB |
1719 | { |
1720 | struct dm_target *ti; | |
3c120169 | 1721 | unsigned i; |
4693c966 | 1722 | |
3c120169 MP |
1723 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1724 | ti = dm_table_get_target(t, i); | |
4693c966 MSB |
1725 | |
1726 | if (!ti->type->iterate_devices || | |
c3c4555e | 1727 | !ti->type->iterate_devices(ti, func, NULL)) |
7f61f5a0 | 1728 | return false; |
4693c966 MSB |
1729 | } |
1730 | ||
7f61f5a0 | 1731 | return true; |
4693c966 MSB |
1732 | } |
1733 | ||
22c11858 MS |
1734 | static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev, |
1735 | sector_t start, sector_t len, void *data) | |
1736 | { | |
1737 | char b[BDEVNAME_SIZE]; | |
1738 | ||
1739 | /* For now, NVMe devices are the only devices of this class */ | |
99243b92 | 1740 | return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0); |
22c11858 MS |
1741 | } |
1742 | ||
1743 | static bool dm_table_does_not_support_partial_completion(struct dm_table *t) | |
1744 | { | |
1745 | return dm_table_all_devices_attribute(t, device_no_partial_completion); | |
1746 | } | |
1747 | ||
d54eaa5a MS |
1748 | static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, |
1749 | sector_t start, sector_t len, void *data) | |
1750 | { | |
1751 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1752 | ||
1753 | return q && !q->limits.max_write_same_sectors; | |
1754 | } | |
1755 | ||
1756 | static bool dm_table_supports_write_same(struct dm_table *t) | |
1757 | { | |
1758 | struct dm_target *ti; | |
3c120169 | 1759 | unsigned i; |
d54eaa5a | 1760 | |
3c120169 MP |
1761 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1762 | ti = dm_table_get_target(t, i); | |
d54eaa5a | 1763 | |
55a62eef | 1764 | if (!ti->num_write_same_bios) |
d54eaa5a MS |
1765 | return false; |
1766 | ||
1767 | if (!ti->type->iterate_devices || | |
dc019b21 | 1768 | ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) |
d54eaa5a MS |
1769 | return false; |
1770 | } | |
1771 | ||
1772 | return true; | |
1773 | } | |
1774 | ||
ac62d620 CH |
1775 | static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, |
1776 | sector_t start, sector_t len, void *data) | |
1777 | { | |
1778 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1779 | ||
1780 | return q && !q->limits.max_write_zeroes_sectors; | |
1781 | } | |
1782 | ||
1783 | static bool dm_table_supports_write_zeroes(struct dm_table *t) | |
1784 | { | |
1785 | struct dm_target *ti; | |
1786 | unsigned i = 0; | |
1787 | ||
1788 | while (i < dm_table_get_num_targets(t)) { | |
1789 | ti = dm_table_get_target(t, i++); | |
1790 | ||
1791 | if (!ti->num_write_zeroes_bios) | |
1792 | return false; | |
1793 | ||
1794 | if (!ti->type->iterate_devices || | |
1795 | ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) | |
1796 | return false; | |
1797 | } | |
1798 | ||
1799 | return true; | |
1800 | } | |
1801 | ||
8a74d29d MS |
1802 | static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, |
1803 | sector_t start, sector_t len, void *data) | |
a7ffb6a5 MP |
1804 | { |
1805 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1806 | ||
8a74d29d | 1807 | return q && !blk_queue_discard(q); |
a7ffb6a5 MP |
1808 | } |
1809 | ||
1810 | static bool dm_table_supports_discards(struct dm_table *t) | |
1811 | { | |
1812 | struct dm_target *ti; | |
3c120169 | 1813 | unsigned i; |
a7ffb6a5 | 1814 | |
3c120169 MP |
1815 | for (i = 0; i < dm_table_get_num_targets(t); i++) { |
1816 | ti = dm_table_get_target(t, i); | |
a7ffb6a5 MP |
1817 | |
1818 | if (!ti->num_discard_bios) | |
8a74d29d | 1819 | return false; |
a7ffb6a5 | 1820 | |
8a74d29d MS |
1821 | /* |
1822 | * Either the target provides discard support (as implied by setting | |
1823 | * 'discards_supported') or it relies on _all_ data devices having | |
1824 | * discard support. | |
1825 | */ | |
1826 | if (!ti->discards_supported && | |
1827 | (!ti->type->iterate_devices || | |
1828 | ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) | |
1829 | return false; | |
a7ffb6a5 MP |
1830 | } |
1831 | ||
8a74d29d | 1832 | return true; |
a7ffb6a5 MP |
1833 | } |
1834 | ||
00716545 DS |
1835 | static int device_not_secure_erase_capable(struct dm_target *ti, |
1836 | struct dm_dev *dev, sector_t start, | |
1837 | sector_t len, void *data) | |
1838 | { | |
1839 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1840 | ||
1841 | return q && !blk_queue_secure_erase(q); | |
1842 | } | |
1843 | ||
1844 | static bool dm_table_supports_secure_erase(struct dm_table *t) | |
1845 | { | |
1846 | struct dm_target *ti; | |
1847 | unsigned int i; | |
1848 | ||
1849 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1850 | ti = dm_table_get_target(t, i); | |
1851 | ||
1852 | if (!ti->num_secure_erase_bios) | |
1853 | return false; | |
1854 | ||
1855 | if (!ti->type->iterate_devices || | |
1856 | ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) | |
1857 | return false; | |
1858 | } | |
1859 | ||
1860 | return true; | |
1861 | } | |
1862 | ||
eb40c0ac ID |
1863 | static int device_requires_stable_pages(struct dm_target *ti, |
1864 | struct dm_dev *dev, sector_t start, | |
1865 | sector_t len, void *data) | |
1866 | { | |
1867 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
1868 | ||
1869 | return q && bdi_cap_stable_pages_required(q->backing_dev_info); | |
1870 | } | |
1871 | ||
1872 | /* | |
1873 | * If any underlying device requires stable pages, a table must require | |
1874 | * them as well. Only targets that support iterate_devices are considered: | |
1875 | * don't want error, zero, etc to require stable pages. | |
1876 | */ | |
1877 | static bool dm_table_requires_stable_pages(struct dm_table *t) | |
1878 | { | |
1879 | struct dm_target *ti; | |
1880 | unsigned i; | |
1881 | ||
1882 | for (i = 0; i < dm_table_get_num_targets(t); i++) { | |
1883 | ti = dm_table_get_target(t, i); | |
1884 | ||
1885 | if (ti->type->iterate_devices && | |
1886 | ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) | |
1887 | return true; | |
1888 | } | |
1889 | ||
1890 | return false; | |
1891 | } | |
1892 | ||
754c5fc7 MS |
1893 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1894 | struct queue_limits *limits) | |
1da177e4 | 1895 | { |
519a7e16 | 1896 | bool wc = false, fua = false; |
2e9ee095 | 1897 | int page_size = PAGE_SIZE; |
ed8b752b | 1898 | |
1da177e4 | 1899 | /* |
1197764e | 1900 | * Copy table's limits to the DM device's request_queue |
1da177e4 | 1901 | */ |
754c5fc7 | 1902 | q->limits = *limits; |
c9a3f6d6 | 1903 | |
5d47c89f | 1904 | if (!dm_table_supports_discards(t)) { |
8b904b5b | 1905 | blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); |
5d47c89f MS |
1906 | /* Must also clear discard limits... */ |
1907 | q->limits.max_discard_sectors = 0; | |
1908 | q->limits.max_hw_discard_sectors = 0; | |
1909 | q->limits.discard_granularity = 0; | |
1910 | q->limits.discard_alignment = 0; | |
1911 | q->limits.discard_misaligned = 0; | |
1912 | } else | |
8b904b5b | 1913 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); |
5ae89a87 | 1914 | |
00716545 | 1915 | if (dm_table_supports_secure_erase(t)) |
83c7c18b | 1916 | blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); |
00716545 | 1917 | |
c888a8f9 | 1918 | if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { |
519a7e16 | 1919 | wc = true; |
c888a8f9 | 1920 | if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA))) |
519a7e16 | 1921 | fua = true; |
ed8b752b | 1922 | } |
519a7e16 | 1923 | blk_queue_write_cache(q, wc, fua); |
ed8b752b | 1924 | |
2e9ee095 | 1925 | if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { |
8b904b5b | 1926 | blk_queue_flag_set(QUEUE_FLAG_DAX, q); |
9c50a98f | 1927 | if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) |
2e9ee095 PG |
1928 | set_dax_synchronous(t->md->dax_dev); |
1929 | } | |
dbc62659 RZ |
1930 | else |
1931 | blk_queue_flag_clear(QUEUE_FLAG_DAX, q); | |
1932 | ||
273752c9 VG |
1933 | if (dm_table_supports_dax_write_cache(t)) |
1934 | dax_write_cache(t->md->dax_dev, true); | |
1935 | ||
c3c4555e MB |
1936 | /* Ensure that all underlying devices are non-rotational. */ |
1937 | if (dm_table_all_devices_attribute(t, device_is_nonrot)) | |
8b904b5b | 1938 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
4693c966 | 1939 | else |
8b904b5b | 1940 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); |
4693c966 | 1941 | |
d54eaa5a MS |
1942 | if (!dm_table_supports_write_same(t)) |
1943 | q->limits.max_write_same_sectors = 0; | |
ac62d620 CH |
1944 | if (!dm_table_supports_write_zeroes(t)) |
1945 | q->limits.max_write_zeroes_sectors = 0; | |
c1a94672 | 1946 | |
25520d55 | 1947 | dm_table_verify_integrity(t); |
e6ee8c0b | 1948 | |
eb40c0ac ID |
1949 | /* |
1950 | * Some devices don't use blk_integrity but still want stable pages | |
1951 | * because they do their own checksumming. | |
1952 | */ | |
1953 | if (dm_table_requires_stable_pages(t)) | |
1954 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | |
1955 | else | |
1956 | q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; | |
1957 | ||
c3c4555e MB |
1958 | /* |
1959 | * Determine whether or not this queue's I/O timings contribute | |
1960 | * to the entropy pool, Only request-based targets use this. | |
1961 | * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not | |
1962 | * have it set. | |
1963 | */ | |
1964 | if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) | |
8b904b5b | 1965 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
bf505456 DLM |
1966 | |
1967 | /* | |
1968 | * For a zoned target, the number of zones should be updated for the | |
1969 | * correct value to be exposed in sysfs queue/nr_zones. For a BIO based | |
1970 | * target, this is all that is needed. For a request based target, the | |
1971 | * queue zone bitmaps must also be updated. | |
1972 | * Use blk_revalidate_disk_zones() to handle this. | |
1973 | */ | |
1974 | if (blk_queue_is_zoned(q)) | |
1975 | blk_revalidate_disk_zones(t->md->disk); | |
c6d6e9b0 JK |
1976 | |
1977 | /* Allow reads to exceed readahead limits */ | |
1978 | q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9); | |
1da177e4 LT |
1979 | } |
1980 | ||
1981 | unsigned int dm_table_get_num_targets(struct dm_table *t) | |
1982 | { | |
1983 | return t->num_targets; | |
1984 | } | |
1985 | ||
1986 | struct list_head *dm_table_get_devices(struct dm_table *t) | |
1987 | { | |
1988 | return &t->devices; | |
1989 | } | |
1990 | ||
aeb5d727 | 1991 | fmode_t dm_table_get_mode(struct dm_table *t) |
1da177e4 LT |
1992 | { |
1993 | return t->mode; | |
1994 | } | |
08649012 | 1995 | EXPORT_SYMBOL(dm_table_get_mode); |
1da177e4 | 1996 | |
d67ee213 MS |
1997 | enum suspend_mode { |
1998 | PRESUSPEND, | |
1999 | PRESUSPEND_UNDO, | |
2000 | POSTSUSPEND, | |
2001 | }; | |
2002 | ||
2003 | static void suspend_targets(struct dm_table *t, enum suspend_mode mode) | |
1da177e4 LT |
2004 | { |
2005 | int i = t->num_targets; | |
2006 | struct dm_target *ti = t->targets; | |
2007 | ||
1ea0654e BVA |
2008 | lockdep_assert_held(&t->md->suspend_lock); |
2009 | ||
1da177e4 | 2010 | while (i--) { |
d67ee213 MS |
2011 | switch (mode) { |
2012 | case PRESUSPEND: | |
2013 | if (ti->type->presuspend) | |
2014 | ti->type->presuspend(ti); | |
2015 | break; | |
2016 | case PRESUSPEND_UNDO: | |
2017 | if (ti->type->presuspend_undo) | |
2018 | ti->type->presuspend_undo(ti); | |
2019 | break; | |
2020 | case POSTSUSPEND: | |
1da177e4 LT |
2021 | if (ti->type->postsuspend) |
2022 | ti->type->postsuspend(ti); | |
d67ee213 MS |
2023 | break; |
2024 | } | |
1da177e4 LT |
2025 | ti++; |
2026 | } | |
2027 | } | |
2028 | ||
2029 | void dm_table_presuspend_targets(struct dm_table *t) | |
2030 | { | |
cf222b37 AK |
2031 | if (!t) |
2032 | return; | |
2033 | ||
d67ee213 MS |
2034 | suspend_targets(t, PRESUSPEND); |
2035 | } | |
2036 | ||
2037 | void dm_table_presuspend_undo_targets(struct dm_table *t) | |
2038 | { | |
2039 | if (!t) | |
2040 | return; | |
2041 | ||
2042 | suspend_targets(t, PRESUSPEND_UNDO); | |
1da177e4 LT |
2043 | } |
2044 | ||
2045 | void dm_table_postsuspend_targets(struct dm_table *t) | |
2046 | { | |
cf222b37 AK |
2047 | if (!t) |
2048 | return; | |
2049 | ||
d67ee213 | 2050 | suspend_targets(t, POSTSUSPEND); |
1da177e4 LT |
2051 | } |
2052 | ||
8757b776 | 2053 | int dm_table_resume_targets(struct dm_table *t) |
1da177e4 | 2054 | { |
8757b776 MB |
2055 | int i, r = 0; |
2056 | ||
1ea0654e BVA |
2057 | lockdep_assert_held(&t->md->suspend_lock); |
2058 | ||
8757b776 MB |
2059 | for (i = 0; i < t->num_targets; i++) { |
2060 | struct dm_target *ti = t->targets + i; | |
2061 | ||
2062 | if (!ti->type->preresume) | |
2063 | continue; | |
2064 | ||
2065 | r = ti->type->preresume(ti); | |
7833b08e MS |
2066 | if (r) { |
2067 | DMERR("%s: %s: preresume failed, error = %d", | |
2068 | dm_device_name(t->md), ti->type->name, r); | |
8757b776 | 2069 | return r; |
7833b08e | 2070 | } |
8757b776 | 2071 | } |
1da177e4 LT |
2072 | |
2073 | for (i = 0; i < t->num_targets; i++) { | |
2074 | struct dm_target *ti = t->targets + i; | |
2075 | ||
2076 | if (ti->type->resume) | |
2077 | ti->type->resume(ti); | |
2078 | } | |
8757b776 MB |
2079 | |
2080 | return 0; | |
1da177e4 LT |
2081 | } |
2082 | ||
9d357b07 N |
2083 | void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) |
2084 | { | |
2085 | list_add(&cb->list, &t->target_callbacks); | |
2086 | } | |
2087 | EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); | |
2088 | ||
1da177e4 LT |
2089 | int dm_table_any_congested(struct dm_table *t, int bdi_bits) |
2090 | { | |
82b1519b | 2091 | struct dm_dev_internal *dd; |
afb24528 | 2092 | struct list_head *devices = dm_table_get_devices(t); |
9d357b07 | 2093 | struct dm_target_callbacks *cb; |
1da177e4 LT |
2094 | int r = 0; |
2095 | ||
afb24528 | 2096 | list_for_each_entry(dd, devices, list) { |
86f1152b | 2097 | struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); |
0c2322e4 AK |
2098 | char b[BDEVNAME_SIZE]; |
2099 | ||
2100 | if (likely(q)) | |
dc3b17cc | 2101 | r |= bdi_congested(q->backing_dev_info, bdi_bits); |
0c2322e4 AK |
2102 | else |
2103 | DMWARN_LIMIT("%s: any_congested: nonexistent device %s", | |
2104 | dm_device_name(t->md), | |
86f1152b | 2105 | bdevname(dd->dm_dev->bdev, b)); |
1da177e4 LT |
2106 | } |
2107 | ||
9d357b07 N |
2108 | list_for_each_entry(cb, &t->target_callbacks, list) |
2109 | if (cb->congested_fn) | |
2110 | r |= cb->congested_fn(cb, bdi_bits); | |
2111 | ||
1da177e4 LT |
2112 | return r; |
2113 | } | |
2114 | ||
1134e5ae MA |
2115 | struct mapped_device *dm_table_get_md(struct dm_table *t) |
2116 | { | |
1134e5ae MA |
2117 | return t->md; |
2118 | } | |
08649012 | 2119 | EXPORT_SYMBOL(dm_table_get_md); |
1134e5ae | 2120 | |
f349b0a3 MM |
2121 | const char *dm_table_device_name(struct dm_table *t) |
2122 | { | |
2123 | return dm_device_name(t->md); | |
2124 | } | |
2125 | EXPORT_SYMBOL_GPL(dm_table_device_name); | |
2126 | ||
9974fa2c MS |
2127 | void dm_table_run_md_queue_async(struct dm_table *t) |
2128 | { | |
2129 | struct mapped_device *md; | |
2130 | struct request_queue *queue; | |
9974fa2c MS |
2131 | |
2132 | if (!dm_table_request_based(t)) | |
2133 | return; | |
2134 | ||
2135 | md = dm_table_get_md(t); | |
2136 | queue = dm_get_md_queue(md); | |
6a23e05c JA |
2137 | if (queue) |
2138 | blk_mq_run_hw_queues(queue, true); | |
9974fa2c MS |
2139 | } |
2140 | EXPORT_SYMBOL(dm_table_run_md_queue_async); | |
2141 |