Commit | Line | Data |
---|---|---|
3bd94003 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 LT |
2 | /* |
3 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
0da336e5 | 4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This file is released under the LGPL. | |
7 | */ | |
8 | ||
9 | #ifndef _LINUX_DEVICE_MAPPER_H | |
10 | #define _LINUX_DEVICE_MAPPER_H | |
11 | ||
416cd17b | 12 | #include <linux/bio.h> |
f6fccb12 | 13 | #include <linux/blkdev.h> |
6bbc923d | 14 | #include <linux/dm-ioctl.h> |
fd2ed4d2 | 15 | #include <linux/math64.h> |
71a16736 | 16 | #include <linux/ratelimit.h> |
416cd17b | 17 | |
af4874e0 | 18 | struct dm_dev; |
1da177e4 LT |
19 | struct dm_target; |
20 | struct dm_table; | |
d4100351 | 21 | struct dm_report_zones_args; |
17b2f66f | 22 | struct mapped_device; |
f6fccb12 | 23 | struct bio_vec; |
e511c4a3 | 24 | enum dax_access_mode; |
1da177e4 | 25 | |
e83068a5 MS |
26 | /* |
27 | * Type of table, mapped_device's mempool and request_queue | |
28 | */ | |
7e0d574f BVA |
29 | enum dm_queue_mode { |
30 | DM_TYPE_NONE = 0, | |
31 | DM_TYPE_BIO_BASED = 1, | |
32 | DM_TYPE_REQUEST_BASED = 2, | |
953923c0 | 33 | DM_TYPE_DAX_BIO_BASED = 3, |
7e0d574f | 34 | }; |
e83068a5 | 35 | |
91ccbbac | 36 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; |
1da177e4 LT |
37 | |
38 | union map_info { | |
39 | void *ptr; | |
1da177e4 LT |
40 | }; |
41 | ||
42 | /* | |
43 | * In the constructor the target parameter will already have the | |
44 | * table, type, begin and len fields filled in. | |
45 | */ | |
46 | typedef int (*dm_ctr_fn) (struct dm_target *target, | |
47 | unsigned int argc, char **argv); | |
48 | ||
49 | /* | |
50 | * The destructor doesn't need to free the dm_target, just | |
51 | * anything hidden ti->private. | |
52 | */ | |
53 | typedef void (*dm_dtr_fn) (struct dm_target *ti); | |
54 | ||
55 | /* | |
56 | * The map function must return: | |
57 | * < 0: error | |
58 | * = 0: The target will handle the io by resubmitting it later | |
45cbcd79 | 59 | * = 1: simple remap complete |
2e93ccc1 | 60 | * = 2: The target wants to push back the io |
1da177e4 | 61 | */ |
7de3ee57 | 62 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); |
e5863d9a MS |
63 | typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, |
64 | struct request *rq, | |
65 | union map_info *map_context, | |
66 | struct request **clone); | |
5de719e3 YY |
67 | typedef void (*dm_release_clone_request_fn) (struct request *clone, |
68 | union map_info *map_context); | |
1da177e4 LT |
69 | |
70 | /* | |
71 | * Returns: | |
72 | * < 0 : error (currently ignored) | |
73 | * 0 : ended successfully | |
74 | * 1 : for some reason the io has still not completed (eg, | |
75 | * multipath target might want to requeue a failed io). | |
2e93ccc1 | 76 | * 2 : The target wants to push back the io |
1da177e4 LT |
77 | */ |
78 | typedef int (*dm_endio_fn) (struct dm_target *ti, | |
4e4cbee9 | 79 | struct bio *bio, blk_status_t *error); |
7d76345d | 80 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
2a842aca | 81 | struct request *clone, blk_status_t error, |
7d76345d | 82 | union map_info *map_context); |
1da177e4 LT |
83 | |
84 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); | |
d67ee213 | 85 | typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); |
1da177e4 | 86 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); |
8757b776 | 87 | typedef int (*dm_preresume_fn) (struct dm_target *ti); |
1da177e4 LT |
88 | typedef void (*dm_resume_fn) (struct dm_target *ti); |
89 | ||
fd7c092e | 90 | typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, |
86a3238c | 91 | unsigned int status_flags, char *result, unsigned int maxlen); |
1da177e4 | 92 | |
86a3238c HM |
93 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv, |
94 | char *result, unsigned int maxlen); | |
1da177e4 | 95 | |
5bd5e8d8 | 96 | typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); |
aa129a22 | 97 | |
e3290b94 | 98 | #ifdef CONFIG_BLK_DEV_ZONED |
d4100351 CH |
99 | typedef int (*dm_report_zones_fn) (struct dm_target *ti, |
100 | struct dm_report_zones_args *args, | |
101 | unsigned int nr_zones); | |
e3290b94 MS |
102 | #else |
103 | /* | |
104 | * Define dm_report_zones_fn so that targets can assign to NULL if | |
105 | * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do | |
106 | * awkward #ifdefs in their target_type, etc. | |
107 | */ | |
108 | typedef int (*dm_report_zones_fn) (struct dm_target *dummy); | |
109 | #endif | |
e76239a3 | 110 | |
058ce5ca AK |
111 | /* |
112 | * These iteration functions are typically used to check (and combine) | |
113 | * properties of underlying devices. | |
114 | * E.g. Does at least one underlying device support flush? | |
115 | * Does any underlying device not support WRITE_SAME? | |
116 | * | |
117 | * The callout function is called once for each contiguous section of | |
118 | * an underlying device. State can be maintained in *data. | |
119 | * Return non-zero to stop iterating through any further devices. | |
120 | */ | |
af4874e0 MS |
121 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, |
122 | struct dm_dev *dev, | |
5dea271b | 123 | sector_t start, sector_t len, |
af4874e0 MS |
124 | void *data); |
125 | ||
058ce5ca AK |
126 | /* |
127 | * This function must iterate through each section of device used by the | |
128 | * target until it encounters a non-zero return code, which it then returns. | |
129 | * Returns zero if no callout returned non-zero. | |
130 | */ | |
af4874e0 MS |
131 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, |
132 | iterate_devices_callout_fn fn, | |
133 | void *data); | |
134 | ||
40bea431 MS |
135 | typedef void (*dm_io_hints_fn) (struct dm_target *ti, |
136 | struct queue_limits *limits); | |
137 | ||
7d76345d KU |
138 | /* |
139 | * Returns: | |
140 | * 0: The target can handle the next I/O immediately. | |
141 | * 1: The target can't handle the next I/O immediately. | |
142 | */ | |
143 | typedef int (*dm_busy_fn) (struct dm_target *ti); | |
144 | ||
545ed20e TK |
145 | /* |
146 | * Returns: | |
147 | * < 0 : error | |
148 | * >= 0 : the number of bytes accessible at the address | |
149 | */ | |
817bf402 | 150 | typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, |
e511c4a3 JC |
151 | long nr_pages, enum dax_access_mode node, void **kaddr, |
152 | pfn_t *pfn); | |
cdf6cdcd VG |
153 | typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, |
154 | size_t nr_pages); | |
545ed20e | 155 | |
047218ec JC |
156 | /* |
157 | * Returns: | |
158 | * != 0 : number of bytes transferred | |
159 | * 0 : recovery write failed | |
160 | */ | |
161 | typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff, | |
162 | void *addr, size_t bytes, struct iov_iter *i); | |
163 | ||
1da177e4 LT |
164 | void dm_error(const char *message); |
165 | ||
82b1519b MP |
166 | struct dm_dev { |
167 | struct block_device *bdev; | |
817bf402 | 168 | struct dax_device *dax_dev; |
05bdb996 | 169 | blk_mode_t mode; |
82b1519b MP |
170 | char name[16]; |
171 | }; | |
172 | ||
1da177e4 LT |
173 | /* |
174 | * Constructors should call these functions to ensure destination devices | |
175 | * are opened/closed correctly. | |
1da177e4 | 176 | */ |
05bdb996 | 177 | int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, |
11f0431b | 178 | struct dm_dev **result); |
1da177e4 LT |
179 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); |
180 | ||
181 | /* | |
182 | * Information about a target type | |
183 | */ | |
ab4c1424 | 184 | |
1da177e4 | 185 | struct target_type { |
ab4c1424 | 186 | uint64_t features; |
1da177e4 LT |
187 | const char *name; |
188 | struct module *module; | |
86a3238c | 189 | unsigned int version[3]; |
1da177e4 LT |
190 | dm_ctr_fn ctr; |
191 | dm_dtr_fn dtr; | |
192 | dm_map_fn map; | |
e5863d9a MS |
193 | dm_clone_and_map_request_fn clone_and_map_rq; |
194 | dm_release_clone_request_fn release_clone_rq; | |
1da177e4 | 195 | dm_endio_fn end_io; |
7d76345d | 196 | dm_request_endio_fn rq_end_io; |
1da177e4 | 197 | dm_presuspend_fn presuspend; |
d67ee213 | 198 | dm_presuspend_undo_fn presuspend_undo; |
1da177e4 | 199 | dm_postsuspend_fn postsuspend; |
8757b776 | 200 | dm_preresume_fn preresume; |
1da177e4 LT |
201 | dm_resume_fn resume; |
202 | dm_status_fn status; | |
203 | dm_message_fn message; | |
e56f81e0 | 204 | dm_prepare_ioctl_fn prepare_ioctl; |
e76239a3 | 205 | dm_report_zones_fn report_zones; |
7d76345d | 206 | dm_busy_fn busy; |
af4874e0 | 207 | dm_iterate_devices_fn iterate_devices; |
40bea431 | 208 | dm_io_hints_fn io_hints; |
817bf402 | 209 | dm_dax_direct_access_fn direct_access; |
cdf6cdcd | 210 | dm_dax_zero_page_range_fn dax_zero_page_range; |
047218ec | 211 | dm_dax_recovery_write_fn dax_recovery_write; |
45194e4f CR |
212 | |
213 | /* For internal device-mapper use. */ | |
214 | struct list_head list; | |
1da177e4 LT |
215 | }; |
216 | ||
3791e2fc AK |
217 | /* |
218 | * Target features | |
219 | */ | |
220 | ||
221 | /* | |
222 | * Any table that contains an instance of this target must have only one. | |
223 | */ | |
224 | #define DM_TARGET_SINGLETON 0x00000001 | |
225 | #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) | |
226 | ||
cc6cbe14 AK |
227 | /* |
228 | * Indicates that a target does not support read-only devices. | |
229 | */ | |
230 | #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 | |
231 | #define dm_target_always_writeable(type) \ | |
232 | ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) | |
233 | ||
36a0456f AK |
234 | /* |
235 | * Any device that contains a table with an instance of this target may never | |
236 | * have tables containing any different target type. | |
237 | */ | |
238 | #define DM_TARGET_IMMUTABLE 0x00000004 | |
239 | #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) | |
240 | ||
f083b09b MS |
241 | /* |
242 | * Indicates that a target may replace any target; even immutable targets. | |
243 | * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. | |
244 | */ | |
245 | #define DM_TARGET_WILDCARD 0x00000008 | |
246 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) | |
247 | ||
9b4b5a79 MB |
248 | /* |
249 | * A target implements own bio data integrity. | |
250 | */ | |
251 | #define DM_TARGET_INTEGRITY 0x00000010 | |
252 | #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) | |
253 | ||
e2460f2a MP |
254 | /* |
255 | * A target passes integrity data to the lower device. | |
256 | */ | |
257 | #define DM_TARGET_PASSES_INTEGRITY 0x00000020 | |
258 | #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) | |
259 | ||
dd88d313 | 260 | /* |
2d669ceb SK |
261 | * Indicates support for zoned block devices: |
262 | * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned | |
263 | * block devices but does not support combining different zoned models. | |
264 | * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple | |
265 | * devices with different zoned models. | |
dd88d313 | 266 | */ |
e3290b94 | 267 | #ifdef CONFIG_BLK_DEV_ZONED |
dd88d313 DLM |
268 | #define DM_TARGET_ZONED_HM 0x00000040 |
269 | #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) | |
e3290b94 MS |
270 | #else |
271 | #define DM_TARGET_ZONED_HM 0x00000000 | |
272 | #define dm_target_supports_zoned_hm(type) (false) | |
273 | #endif | |
dd88d313 | 274 | |
6abc4946 KK |
275 | /* |
276 | * A target handles REQ_NOWAIT | |
277 | */ | |
278 | #define DM_TARGET_NOWAIT 0x00000080 | |
279 | #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) | |
280 | ||
aa6ce87a ST |
281 | /* |
282 | * A target supports passing through inline crypto support. | |
283 | */ | |
284 | #define DM_TARGET_PASSES_CRYPTO 0x00000100 | |
285 | #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) | |
286 | ||
2d669ceb SK |
287 | #ifdef CONFIG_BLK_DEV_ZONED |
288 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 | |
289 | #define dm_target_supports_mixed_zoned_model(type) \ | |
290 | ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) | |
291 | #else | |
292 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 | |
293 | #define dm_target_supports_mixed_zoned_model(type) (false) | |
294 | #endif | |
295 | ||
1da177e4 LT |
296 | struct dm_target { |
297 | struct dm_table *table; | |
298 | struct target_type *type; | |
299 | ||
300 | /* target limits */ | |
301 | sector_t begin; | |
302 | sector_t len; | |
303 | ||
542f9038 MS |
304 | /* If non-zero, maximum size of I/O submitted to a target. */ |
305 | uint32_t max_io_len; | |
1da177e4 | 306 | |
f9ab94ce | 307 | /* |
55a62eef | 308 | * A number of zero-length barrier bios that will be submitted |
f9ab94ce MP |
309 | * to the target for the purpose of flushing cache. |
310 | * | |
55a62eef AK |
311 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
312 | * It is a responsibility of the target driver to remap these bios | |
f9ab94ce MP |
313 | * to the real underlying devices. |
314 | */ | |
86a3238c | 315 | unsigned int num_flush_bios; |
f9ab94ce | 316 | |
5ae89a87 | 317 | /* |
55a62eef AK |
318 | * The number of discard bios that will be submitted to the target. |
319 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
5ae89a87 | 320 | */ |
86a3238c | 321 | unsigned int num_discard_bios; |
5ae89a87 | 322 | |
00716545 DS |
323 | /* |
324 | * The number of secure erase bios that will be submitted to the target. | |
325 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
326 | */ | |
86a3238c | 327 | unsigned int num_secure_erase_bios; |
00716545 | 328 | |
ac62d620 CH |
329 | /* |
330 | * The number of WRITE ZEROES bios that will be submitted to the target. | |
331 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
332 | */ | |
86a3238c | 333 | unsigned int num_write_zeroes_bios; |
ac62d620 | 334 | |
c0820cf5 | 335 | /* |
30187e1d MS |
336 | * The minimum number of extra bytes allocated in each io for the |
337 | * target to use. | |
c0820cf5 | 338 | */ |
86a3238c | 339 | unsigned int per_io_data_size; |
c0820cf5 | 340 | |
1da177e4 LT |
341 | /* target specific data */ |
342 | void *private; | |
343 | ||
344 | /* Used to provide an error string from the ctr */ | |
345 | char *error; | |
4c259327 | 346 | |
0e9c24ed JT |
347 | /* |
348 | * Set if this target needs to receive flushes regardless of | |
349 | * whether or not its underlying devices have support. | |
350 | */ | |
351 | bool flush_supported:1; | |
352 | ||
4c259327 MS |
353 | /* |
354 | * Set if this target needs to receive discards regardless of | |
355 | * whether or not its underlying devices have support. | |
356 | */ | |
0ac55489 | 357 | bool discards_supported:1; |
a666e5c0 | 358 | |
06961c48 | 359 | /* |
13f6facf MS |
360 | * Set if this target requires that discards be split on |
361 | * 'max_discard_sectors' boundaries. | |
06961c48 MS |
362 | */ |
363 | bool max_discard_granularity:1; | |
364 | ||
13f6facf MS |
365 | /* |
366 | * Set if this target requires that secure_erases be split on | |
367 | * 'max_secure_erase_sectors' boundaries. | |
368 | */ | |
369 | bool max_secure_erase_granularity:1; | |
370 | ||
371 | /* | |
372 | * Set if this target requires that write_zeroes be split on | |
373 | * 'max_write_zeroes_sectors' boundaries. | |
374 | */ | |
375 | bool max_write_zeroes_granularity:1; | |
376 | ||
a666e5c0 MP |
377 | /* |
378 | * Set if we need to limit the number of in-flight bios when swapping. | |
379 | */ | |
380 | bool limit_swap_bios:1; | |
bb37d772 DLM |
381 | |
382 | /* | |
a8b9d116 | 383 | * Set if this target implements a zoned device and needs emulation of |
bb37d772 DLM |
384 | * zone append operations using regular writes. |
385 | */ | |
386 | bool emulate_zone_append:1; | |
0fbb4d93 MS |
387 | |
388 | /* | |
389 | * Set if the target will submit IO using dm_submit_bio_remap() | |
390 | * after returning DM_MAPIO_SUBMITTED from its map function. | |
391 | */ | |
392 | bool accounts_remapped_io:1; | |
9dd1cd32 MS |
393 | |
394 | /* | |
395 | * Set if the target will submit the DM bio without first calling | |
396 | * bio_set_dev(). NOTE: ideally a target should _not_ need this. | |
397 | */ | |
398 | bool needs_bio_set_dev:1; | |
1da177e4 LT |
399 | }; |
400 | ||
64f52b0e MS |
401 | void *dm_per_bio_data(struct bio *bio, size_t data_size); |
402 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); | |
86a3238c | 403 | unsigned int dm_bio_get_target_bio_nr(const struct bio *bio); |
ddbd658f | 404 | |
087615bf GKB |
405 | u64 dm_start_time_ns_from_clone(struct bio *bio); |
406 | ||
1da177e4 | 407 | int dm_register_target(struct target_type *t); |
10d3bd09 | 408 | void dm_unregister_target(struct target_type *t); |
17b2f66f | 409 | |
498f0103 MS |
410 | /* |
411 | * Target argument parsing. | |
412 | */ | |
413 | struct dm_arg_set { | |
86a3238c | 414 | unsigned int argc; |
498f0103 MS |
415 | char **argv; |
416 | }; | |
417 | ||
418 | /* | |
419 | * The minimum and maximum value of a numeric argument, together with | |
420 | * the error message to use if the number is found to be outside that range. | |
421 | */ | |
422 | struct dm_arg { | |
86a3238c HM |
423 | unsigned int min; |
424 | unsigned int max; | |
498f0103 MS |
425 | char *error; |
426 | }; | |
427 | ||
428 | /* | |
429 | * Validate the next argument, either returning it as *value or, if invalid, | |
430 | * returning -EINVAL and setting *error. | |
431 | */ | |
5916a22b | 432 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
86a3238c | 433 | unsigned int *value, char **error); |
498f0103 MS |
434 | |
435 | /* | |
436 | * Process the next argument as the start of a group containing between | |
437 | * arg->min and arg->max further arguments. Either return the size as | |
438 | * *num_args or, if invalid, return -EINVAL and set *error. | |
439 | */ | |
5916a22b | 440 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
86a3238c | 441 | unsigned int *num_args, char **error); |
498f0103 MS |
442 | |
443 | /* | |
444 | * Return the current argument and shift to the next. | |
445 | */ | |
446 | const char *dm_shift_arg(struct dm_arg_set *as); | |
447 | ||
448 | /* | |
449 | * Move through num_args arguments. | |
450 | */ | |
86a3238c | 451 | void dm_consume_args(struct dm_arg_set *as, unsigned int num_args); |
498f0103 | 452 | |
a4a82ce3 HM |
453 | /* |
454 | *---------------------------------------------------------------- | |
17b2f66f AK |
455 | * Functions for creating and manipulating mapped devices. |
456 | * Drop the reference with dm_put when you finish with the object. | |
a4a82ce3 HM |
457 | *---------------------------------------------------------------- |
458 | */ | |
17b2f66f AK |
459 | |
460 | /* | |
461 | * DM_ANY_MINOR chooses the next available minor number. | |
462 | */ | |
463 | #define DM_ANY_MINOR (-1) | |
464 | int dm_create(int minor, struct mapped_device **md); | |
465 | ||
466 | /* | |
467 | * Reference counting for md. | |
468 | */ | |
469 | struct mapped_device *dm_get_md(dev_t dev); | |
470 | void dm_get(struct mapped_device *md); | |
09ee96b2 | 471 | int dm_hold(struct mapped_device *md); |
17b2f66f AK |
472 | void dm_put(struct mapped_device *md); |
473 | ||
474 | /* | |
475 | * An arbitrary pointer may be stored alongside a mapped device. | |
476 | */ | |
477 | void dm_set_mdptr(struct mapped_device *md, void *ptr); | |
478 | void *dm_get_mdptr(struct mapped_device *md); | |
479 | ||
480 | /* | |
481 | * A device can still be used while suspended, but I/O is deferred. | |
482 | */ | |
86a3238c | 483 | int dm_suspend(struct mapped_device *md, unsigned int suspend_flags); |
17b2f66f AK |
484 | int dm_resume(struct mapped_device *md); |
485 | ||
486 | /* | |
487 | * Event functions. | |
488 | */ | |
489 | uint32_t dm_get_event_nr(struct mapped_device *md); | |
490 | int dm_wait_event(struct mapped_device *md, int event_nr); | |
7a8c3d3b MA |
491 | uint32_t dm_next_uevent_seq(struct mapped_device *md); |
492 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist); | |
17b2f66f AK |
493 | |
494 | /* | |
495 | * Info functions. | |
496 | */ | |
72d94861 | 497 | const char *dm_device_name(struct mapped_device *md); |
96a1f7db | 498 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); |
17b2f66f | 499 | struct gendisk *dm_disk(struct mapped_device *md); |
64dbce58 | 500 | int dm_suspended(struct dm_target *ti); |
5df96f2b | 501 | int dm_post_suspending(struct dm_target *ti); |
2e93ccc1 | 502 | int dm_noflush_suspending(struct dm_target *ti); |
86a3238c | 503 | void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors); |
b7f8dff0 | 504 | void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); |
cec47e3d | 505 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
17b2f66f | 506 | |
d4100351 CH |
507 | #ifdef CONFIG_BLK_DEV_ZONED |
508 | struct dm_report_zones_args { | |
509 | struct dm_target *tgt; | |
510 | sector_t next_sector; | |
511 | ||
512 | void *orig_data; | |
513 | report_zones_cb orig_cb; | |
514 | unsigned int zone_idx; | |
515 | ||
516 | /* must be filled by ->report_zones before calling dm_report_zones_cb */ | |
517 | sector_t start; | |
518 | }; | |
912e8875 DLM |
519 | int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, |
520 | struct dm_report_zones_args *args, unsigned int nr_zones); | |
d4100351 CH |
521 | #endif /* CONFIG_BLK_DEV_ZONED */ |
522 | ||
6bbc923d HK |
523 | /* |
524 | * Device mapper functions to parse and create devices specified by the | |
525 | * parameter "dm-mod.create=" | |
526 | */ | |
527 | int __init dm_early_create(struct dm_ioctl *dmi, | |
528 | struct dm_target_spec **spec_array, | |
529 | char **target_params_array); | |
530 | ||
17b2f66f AK |
531 | /* |
532 | * Geometry functions. | |
533 | */ | |
534 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); | |
535 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); | |
536 | ||
a4a82ce3 HM |
537 | /* |
538 | *--------------------------------------------------------------- | |
17b2f66f | 539 | * Functions for manipulating device-mapper tables. |
a4a82ce3 HM |
540 | *--------------------------------------------------------------- |
541 | */ | |
17b2f66f AK |
542 | |
543 | /* | |
544 | * First create an empty table. | |
545 | */ | |
05bdb996 | 546 | int dm_table_create(struct dm_table **result, blk_mode_t mode, |
86a3238c | 547 | unsigned int num_targets, struct mapped_device *md); |
17b2f66f AK |
548 | |
549 | /* | |
550 | * Then call this once for each target. | |
551 | */ | |
552 | int dm_table_add_target(struct dm_table *t, const char *type, | |
553 | sector_t start, sector_t len, char *params); | |
554 | ||
e83068a5 MS |
555 | /* |
556 | * Target can use this to set the table's type. | |
557 | * Can only ever be called from a target's ctr. | |
558 | * Useful for "hybrid" target (supports both bio-based | |
559 | * and request-based). | |
560 | */ | |
7e0d574f | 561 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); |
9d357b07 | 562 | |
17b2f66f AK |
563 | /* |
564 | * Finally call this to make the table ready for use. | |
565 | */ | |
566 | int dm_table_complete(struct dm_table *t); | |
567 | ||
f6e7baad BN |
568 | /* |
569 | * Destroy the table when finished. | |
570 | */ | |
571 | void dm_table_destroy(struct dm_table *t); | |
572 | ||
542f9038 MS |
573 | /* |
574 | * Target may require that it is never sent I/O larger than len. | |
575 | */ | |
576 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); | |
577 | ||
17b2f66f AK |
578 | /* |
579 | * Table reference counting. | |
580 | */ | |
83d5e5b0 MP |
581 | struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); |
582 | void dm_put_live_table(struct mapped_device *md, int srcu_idx); | |
583 | void dm_sync_table(struct mapped_device *md); | |
17b2f66f AK |
584 | |
585 | /* | |
586 | * Queries | |
587 | */ | |
588 | sector_t dm_table_get_size(struct dm_table *t); | |
05bdb996 | 589 | blk_mode_t dm_table_get_mode(struct dm_table *t); |
17b2f66f | 590 | struct mapped_device *dm_table_get_md(struct dm_table *t); |
f349b0a3 | 591 | const char *dm_table_device_name(struct dm_table *t); |
17b2f66f AK |
592 | |
593 | /* | |
594 | * Trigger an event. | |
595 | */ | |
596 | void dm_table_event(struct dm_table *t); | |
597 | ||
9974fa2c MS |
598 | /* |
599 | * Run the queue for request-based targets. | |
600 | */ | |
601 | void dm_table_run_md_queue_async(struct dm_table *t); | |
602 | ||
17b2f66f AK |
603 | /* |
604 | * The device must be suspended before calling this method. | |
042d2a9b | 605 | * Returns the previous table, which the caller must destroy. |
17b2f66f | 606 | */ |
042d2a9b AK |
607 | struct dm_table *dm_swap_table(struct mapped_device *md, |
608 | struct dm_table *t); | |
17b2f66f | 609 | |
aa6ce87a | 610 | /* |
cb77cb5a | 611 | * Table blk_crypto_profile functions |
aa6ce87a | 612 | */ |
cb77cb5a | 613 | void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); |
aa6ce87a | 614 | |
a4a82ce3 HM |
615 | /* |
616 | *--------------------------------------------------------------- | |
0da336e5 | 617 | * Macros. |
a4a82ce3 HM |
618 | *--------------------------------------------------------------- |
619 | */ | |
0da336e5 AK |
620 | #define DM_NAME "device-mapper" |
621 | ||
d2c3c8dc JP |
622 | #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" |
623 | ||
624 | #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) | |
625 | ||
626 | #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) | |
733232f8 | 627 | #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
d2c3c8dc | 628 | #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 629 | #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
d2c3c8dc | 630 | #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 631 | #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
0da336e5 | 632 | |
74244b59 | 633 | #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 634 | #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
0da336e5 | 635 | |
44bc08ed | 636 | #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x))) |
0da336e5 | 637 | |
8ec45662 TS |
638 | #define DMEMIT_TARGET_NAME_VERSION(y) \ |
639 | DMEMIT("target_name=%s,target_version=%u.%u.%u", \ | |
640 | (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) | |
641 | ||
3664ff82 YL |
642 | /** |
643 | * module_dm() - Helper macro for DM targets that don't do anything | |
644 | * special in their module_init and module_exit. | |
645 | * Each module may only use this macro once, and calling it replaces | |
646 | * module_init() and module_exit(). | |
647 | * | |
648 | * @name: DM target's name | |
649 | */ | |
650 | #define module_dm(name) \ | |
651 | static int __init dm_##name##_init(void) \ | |
652 | { \ | |
653 | return dm_register_target(&(name##_target)); \ | |
654 | } \ | |
655 | module_init(dm_##name##_init) \ | |
656 | static void __exit dm_##name##_exit(void) \ | |
657 | { \ | |
658 | dm_unregister_target(&(name##_target)); \ | |
659 | } \ | |
660 | module_exit(dm_##name##_exit) | |
661 | ||
0da336e5 AK |
662 | /* |
663 | * Definitions of return values from target end_io function. | |
664 | */ | |
7ed8578a | 665 | #define DM_ENDIO_DONE 0 |
0da336e5 AK |
666 | #define DM_ENDIO_INCOMPLETE 1 |
667 | #define DM_ENDIO_REQUEUE 2 | |
ac514ffc | 668 | #define DM_ENDIO_DELAY_REQUEUE 3 |
0da336e5 AK |
669 | |
670 | /* | |
671 | * Definitions of return values from target map function. | |
672 | */ | |
673 | #define DM_MAPIO_SUBMITTED 0 | |
674 | #define DM_MAPIO_REMAPPED 1 | |
675 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE | |
ac514ffc | 676 | #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE |
412445ac | 677 | #define DM_MAPIO_KILL 4 |
0da336e5 | 678 | |
fd2ed4d2 MP |
679 | #define dm_sector_div64(x, y)( \ |
680 | { \ | |
681 | u64 _res; \ | |
682 | (x) = div64_u64_rem(x, y, &_res); \ | |
683 | _res; \ | |
684 | } \ | |
685 | ) | |
686 | ||
0da336e5 AK |
687 | /* |
688 | * Ceiling(n / sz) | |
689 | */ | |
690 | #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) | |
691 | ||
692 | #define dm_sector_div_up(n, sz) ( \ | |
693 | { \ | |
694 | sector_t _r = ((n) + (sz) - 1); \ | |
695 | sector_div(_r, (sz)); \ | |
696 | _r; \ | |
697 | } \ | |
698 | ) | |
699 | ||
700 | /* | |
701 | * ceiling(n / size) * size | |
702 | */ | |
703 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) | |
704 | ||
56a67df7 MS |
705 | /* |
706 | * Sector offset taken relative to the start of the target instead of | |
707 | * relative to the start of the device. | |
708 | */ | |
709 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) | |
710 | ||
0bdb50c5 | 711 | static inline sector_t to_sector(unsigned long long n) |
0da336e5 AK |
712 | { |
713 | return (n >> SECTOR_SHIFT); | |
714 | } | |
715 | ||
716 | static inline unsigned long to_bytes(sector_t n) | |
717 | { | |
718 | return (n << SECTOR_SHIFT); | |
719 | } | |
720 | ||
17b2f66f | 721 | #endif /* _LINUX_DEVICE_MAPPER_H */ |