Commit | Line | Data |
---|---|---|
3bd94003 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 LT |
2 | /* |
3 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
0da336e5 | 4 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This file is released under the LGPL. | |
7 | */ | |
8 | ||
9 | #ifndef _LINUX_DEVICE_MAPPER_H | |
10 | #define _LINUX_DEVICE_MAPPER_H | |
11 | ||
416cd17b | 12 | #include <linux/bio.h> |
f6fccb12 | 13 | #include <linux/blkdev.h> |
6bbc923d | 14 | #include <linux/dm-ioctl.h> |
fd2ed4d2 | 15 | #include <linux/math64.h> |
71a16736 | 16 | #include <linux/ratelimit.h> |
416cd17b | 17 | |
af4874e0 | 18 | struct dm_dev; |
1da177e4 LT |
19 | struct dm_target; |
20 | struct dm_table; | |
d4100351 | 21 | struct dm_report_zones_args; |
17b2f66f | 22 | struct mapped_device; |
f6fccb12 | 23 | struct bio_vec; |
e511c4a3 | 24 | enum dax_access_mode; |
1da177e4 | 25 | |
e83068a5 MS |
26 | /* |
27 | * Type of table, mapped_device's mempool and request_queue | |
28 | */ | |
7e0d574f BVA |
29 | enum dm_queue_mode { |
30 | DM_TYPE_NONE = 0, | |
31 | DM_TYPE_BIO_BASED = 1, | |
32 | DM_TYPE_REQUEST_BASED = 2, | |
953923c0 | 33 | DM_TYPE_DAX_BIO_BASED = 3, |
7e0d574f | 34 | }; |
e83068a5 | 35 | |
91ccbbac | 36 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; |
1da177e4 LT |
37 | |
38 | union map_info { | |
39 | void *ptr; | |
1da177e4 LT |
40 | }; |
41 | ||
42 | /* | |
43 | * In the constructor the target parameter will already have the | |
44 | * table, type, begin and len fields filled in. | |
45 | */ | |
46 | typedef int (*dm_ctr_fn) (struct dm_target *target, | |
47 | unsigned int argc, char **argv); | |
48 | ||
49 | /* | |
50 | * The destructor doesn't need to free the dm_target, just | |
51 | * anything hidden ti->private. | |
52 | */ | |
53 | typedef void (*dm_dtr_fn) (struct dm_target *ti); | |
54 | ||
55 | /* | |
56 | * The map function must return: | |
57 | * < 0: error | |
58 | * = 0: The target will handle the io by resubmitting it later | |
45cbcd79 | 59 | * = 1: simple remap complete |
2e93ccc1 | 60 | * = 2: The target wants to push back the io |
1da177e4 | 61 | */ |
7de3ee57 | 62 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); |
e5863d9a MS |
63 | typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, |
64 | struct request *rq, | |
65 | union map_info *map_context, | |
66 | struct request **clone); | |
5de719e3 YY |
67 | typedef void (*dm_release_clone_request_fn) (struct request *clone, |
68 | union map_info *map_context); | |
1da177e4 LT |
69 | |
70 | /* | |
71 | * Returns: | |
72 | * < 0 : error (currently ignored) | |
73 | * 0 : ended successfully | |
74 | * 1 : for some reason the io has still not completed (eg, | |
75 | * multipath target might want to requeue a failed io). | |
2e93ccc1 | 76 | * 2 : The target wants to push back the io |
1da177e4 LT |
77 | */ |
78 | typedef int (*dm_endio_fn) (struct dm_target *ti, | |
4e4cbee9 | 79 | struct bio *bio, blk_status_t *error); |
7d76345d | 80 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
2a842aca | 81 | struct request *clone, blk_status_t error, |
7d76345d | 82 | union map_info *map_context); |
1da177e4 LT |
83 | |
84 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); | |
d67ee213 | 85 | typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); |
1da177e4 | 86 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); |
8757b776 | 87 | typedef int (*dm_preresume_fn) (struct dm_target *ti); |
1da177e4 LT |
88 | typedef void (*dm_resume_fn) (struct dm_target *ti); |
89 | ||
fd7c092e | 90 | typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, |
86a3238c | 91 | unsigned int status_flags, char *result, unsigned int maxlen); |
1da177e4 | 92 | |
86a3238c HM |
93 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned int argc, char **argv, |
94 | char *result, unsigned int maxlen); | |
1da177e4 | 95 | |
4862c886 KW |
96 | /* |
97 | * Called with *forward == true. If it remains true, the ioctl should be | |
98 | * forwarded to bdev. If it is reset to false, the target already fully handled | |
99 | * the ioctl and the return value is the return value for the whole ioctl. | |
100 | */ | |
101 | typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev, | |
102 | unsigned int cmd, unsigned long arg, | |
103 | bool *forward); | |
aa129a22 | 104 | |
e3290b94 | 105 | #ifdef CONFIG_BLK_DEV_ZONED |
d4100351 CH |
106 | typedef int (*dm_report_zones_fn) (struct dm_target *ti, |
107 | struct dm_report_zones_args *args, | |
108 | unsigned int nr_zones); | |
e3290b94 MS |
109 | #else |
110 | /* | |
111 | * Define dm_report_zones_fn so that targets can assign to NULL if | |
112 | * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do | |
113 | * awkward #ifdefs in their target_type, etc. | |
114 | */ | |
115 | typedef int (*dm_report_zones_fn) (struct dm_target *dummy); | |
116 | #endif | |
e76239a3 | 117 | |
058ce5ca AK |
118 | /* |
119 | * These iteration functions are typically used to check (and combine) | |
120 | * properties of underlying devices. | |
121 | * E.g. Does at least one underlying device support flush? | |
122 | * Does any underlying device not support WRITE_SAME? | |
123 | * | |
124 | * The callout function is called once for each contiguous section of | |
125 | * an underlying device. State can be maintained in *data. | |
126 | * Return non-zero to stop iterating through any further devices. | |
127 | */ | |
af4874e0 MS |
128 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, |
129 | struct dm_dev *dev, | |
5dea271b | 130 | sector_t start, sector_t len, |
af4874e0 MS |
131 | void *data); |
132 | ||
058ce5ca AK |
133 | /* |
134 | * This function must iterate through each section of device used by the | |
135 | * target until it encounters a non-zero return code, which it then returns. | |
136 | * Returns zero if no callout returned non-zero. | |
137 | */ | |
af4874e0 MS |
138 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, |
139 | iterate_devices_callout_fn fn, | |
140 | void *data); | |
141 | ||
40bea431 MS |
142 | typedef void (*dm_io_hints_fn) (struct dm_target *ti, |
143 | struct queue_limits *limits); | |
144 | ||
7d76345d KU |
145 | /* |
146 | * Returns: | |
147 | * 0: The target can handle the next I/O immediately. | |
148 | * 1: The target can't handle the next I/O immediately. | |
149 | */ | |
150 | typedef int (*dm_busy_fn) (struct dm_target *ti); | |
151 | ||
545ed20e TK |
152 | /* |
153 | * Returns: | |
154 | * < 0 : error | |
155 | * >= 0 : the number of bytes accessible at the address | |
156 | */ | |
817bf402 | 157 | typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, |
e511c4a3 JC |
158 | long nr_pages, enum dax_access_mode node, void **kaddr, |
159 | pfn_t *pfn); | |
cdf6cdcd VG |
160 | typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, |
161 | size_t nr_pages); | |
545ed20e | 162 | |
047218ec JC |
163 | /* |
164 | * Returns: | |
165 | * != 0 : number of bytes transferred | |
166 | * 0 : recovery write failed | |
167 | */ | |
168 | typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff, | |
169 | void *addr, size_t bytes, struct iov_iter *i); | |
170 | ||
1da177e4 LT |
171 | void dm_error(const char *message); |
172 | ||
82b1519b MP |
173 | struct dm_dev { |
174 | struct block_device *bdev; | |
a28d893e | 175 | struct file *bdev_file; |
817bf402 | 176 | struct dax_device *dax_dev; |
05bdb996 | 177 | blk_mode_t mode; |
82b1519b MP |
178 | char name[16]; |
179 | }; | |
180 | ||
1da177e4 LT |
181 | /* |
182 | * Constructors should call these functions to ensure destination devices | |
183 | * are opened/closed correctly. | |
1da177e4 | 184 | */ |
05bdb996 | 185 | int dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, |
11f0431b | 186 | struct dm_dev **result); |
1da177e4 LT |
187 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); |
188 | ||
a21f9edb BM |
189 | /* |
190 | * Helper function for getting devices | |
191 | */ | |
192 | int dm_devt_from_path(const char *path, dev_t *dev_p); | |
193 | ||
1da177e4 LT |
194 | /* |
195 | * Information about a target type | |
196 | */ | |
ab4c1424 | 197 | |
1da177e4 | 198 | struct target_type { |
ab4c1424 | 199 | uint64_t features; |
1da177e4 LT |
200 | const char *name; |
201 | struct module *module; | |
86a3238c | 202 | unsigned int version[3]; |
1da177e4 LT |
203 | dm_ctr_fn ctr; |
204 | dm_dtr_fn dtr; | |
205 | dm_map_fn map; | |
e5863d9a MS |
206 | dm_clone_and_map_request_fn clone_and_map_rq; |
207 | dm_release_clone_request_fn release_clone_rq; | |
1da177e4 | 208 | dm_endio_fn end_io; |
7d76345d | 209 | dm_request_endio_fn rq_end_io; |
1da177e4 | 210 | dm_presuspend_fn presuspend; |
d67ee213 | 211 | dm_presuspend_undo_fn presuspend_undo; |
1da177e4 | 212 | dm_postsuspend_fn postsuspend; |
8757b776 | 213 | dm_preresume_fn preresume; |
1da177e4 LT |
214 | dm_resume_fn resume; |
215 | dm_status_fn status; | |
216 | dm_message_fn message; | |
e56f81e0 | 217 | dm_prepare_ioctl_fn prepare_ioctl; |
e76239a3 | 218 | dm_report_zones_fn report_zones; |
7d76345d | 219 | dm_busy_fn busy; |
af4874e0 | 220 | dm_iterate_devices_fn iterate_devices; |
40bea431 | 221 | dm_io_hints_fn io_hints; |
817bf402 | 222 | dm_dax_direct_access_fn direct_access; |
cdf6cdcd | 223 | dm_dax_zero_page_range_fn dax_zero_page_range; |
047218ec | 224 | dm_dax_recovery_write_fn dax_recovery_write; |
45194e4f CR |
225 | |
226 | /* For internal device-mapper use. */ | |
227 | struct list_head list; | |
1da177e4 LT |
228 | }; |
229 | ||
3791e2fc AK |
230 | /* |
231 | * Target features | |
232 | */ | |
233 | ||
234 | /* | |
235 | * Any table that contains an instance of this target must have only one. | |
236 | */ | |
237 | #define DM_TARGET_SINGLETON 0x00000001 | |
238 | #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) | |
239 | ||
cc6cbe14 AK |
240 | /* |
241 | * Indicates that a target does not support read-only devices. | |
242 | */ | |
243 | #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 | |
244 | #define dm_target_always_writeable(type) \ | |
245 | ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) | |
246 | ||
36a0456f AK |
247 | /* |
248 | * Any device that contains a table with an instance of this target may never | |
249 | * have tables containing any different target type. | |
250 | */ | |
251 | #define DM_TARGET_IMMUTABLE 0x00000004 | |
252 | #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) | |
253 | ||
f083b09b MS |
254 | /* |
255 | * Indicates that a target may replace any target; even immutable targets. | |
256 | * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. | |
257 | */ | |
258 | #define DM_TARGET_WILDCARD 0x00000008 | |
259 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) | |
260 | ||
9b4b5a79 MB |
261 | /* |
262 | * A target implements own bio data integrity. | |
263 | */ | |
264 | #define DM_TARGET_INTEGRITY 0x00000010 | |
265 | #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) | |
266 | ||
e2460f2a MP |
267 | /* |
268 | * A target passes integrity data to the lower device. | |
269 | */ | |
270 | #define DM_TARGET_PASSES_INTEGRITY 0x00000020 | |
271 | #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) | |
272 | ||
dd88d313 | 273 | /* |
2d669ceb SK |
274 | * Indicates support for zoned block devices: |
275 | * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned | |
276 | * block devices but does not support combining different zoned models. | |
277 | * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple | |
278 | * devices with different zoned models. | |
dd88d313 | 279 | */ |
e3290b94 | 280 | #ifdef CONFIG_BLK_DEV_ZONED |
dd88d313 DLM |
281 | #define DM_TARGET_ZONED_HM 0x00000040 |
282 | #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) | |
e3290b94 MS |
283 | #else |
284 | #define DM_TARGET_ZONED_HM 0x00000000 | |
285 | #define dm_target_supports_zoned_hm(type) (false) | |
286 | #endif | |
dd88d313 | 287 | |
6abc4946 KK |
288 | /* |
289 | * A target handles REQ_NOWAIT | |
290 | */ | |
291 | #define DM_TARGET_NOWAIT 0x00000080 | |
292 | #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) | |
293 | ||
aa6ce87a ST |
294 | /* |
295 | * A target supports passing through inline crypto support. | |
296 | */ | |
297 | #define DM_TARGET_PASSES_CRYPTO 0x00000100 | |
298 | #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) | |
299 | ||
2d669ceb SK |
300 | #ifdef CONFIG_BLK_DEV_ZONED |
301 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 | |
302 | #define dm_target_supports_mixed_zoned_model(type) \ | |
303 | ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) | |
304 | #else | |
305 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 | |
306 | #define dm_target_supports_mixed_zoned_model(type) (false) | |
307 | #endif | |
308 | ||
3194e364 JG |
309 | #define DM_TARGET_ATOMIC_WRITES 0x00000400 |
310 | #define dm_target_supports_atomic_writes(type) ((type)->features & DM_TARGET_ATOMIC_WRITES) | |
311 | ||
1da177e4 LT |
312 | struct dm_target { |
313 | struct dm_table *table; | |
314 | struct target_type *type; | |
315 | ||
316 | /* target limits */ | |
317 | sector_t begin; | |
318 | sector_t len; | |
319 | ||
542f9038 MS |
320 | /* If non-zero, maximum size of I/O submitted to a target. */ |
321 | uint32_t max_io_len; | |
1da177e4 | 322 | |
f9ab94ce | 323 | /* |
55a62eef | 324 | * A number of zero-length barrier bios that will be submitted |
f9ab94ce MP |
325 | * to the target for the purpose of flushing cache. |
326 | * | |
55a62eef AK |
327 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
328 | * It is a responsibility of the target driver to remap these bios | |
f9ab94ce MP |
329 | * to the real underlying devices. |
330 | */ | |
86a3238c | 331 | unsigned int num_flush_bios; |
f9ab94ce | 332 | |
5ae89a87 | 333 | /* |
55a62eef AK |
334 | * The number of discard bios that will be submitted to the target. |
335 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
5ae89a87 | 336 | */ |
86a3238c | 337 | unsigned int num_discard_bios; |
5ae89a87 | 338 | |
00716545 DS |
339 | /* |
340 | * The number of secure erase bios that will be submitted to the target. | |
341 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
342 | */ | |
86a3238c | 343 | unsigned int num_secure_erase_bios; |
00716545 | 344 | |
ac62d620 CH |
345 | /* |
346 | * The number of WRITE ZEROES bios that will be submitted to the target. | |
347 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
348 | */ | |
86a3238c | 349 | unsigned int num_write_zeroes_bios; |
ac62d620 | 350 | |
c0820cf5 | 351 | /* |
30187e1d MS |
352 | * The minimum number of extra bytes allocated in each io for the |
353 | * target to use. | |
c0820cf5 | 354 | */ |
86a3238c | 355 | unsigned int per_io_data_size; |
c0820cf5 | 356 | |
1da177e4 LT |
357 | /* target specific data */ |
358 | void *private; | |
359 | ||
360 | /* Used to provide an error string from the ctr */ | |
361 | char *error; | |
4c259327 | 362 | |
0e9c24ed JT |
363 | /* |
364 | * Set if this target needs to receive flushes regardless of | |
365 | * whether or not its underlying devices have support. | |
366 | */ | |
367 | bool flush_supported:1; | |
368 | ||
4c259327 MS |
369 | /* |
370 | * Set if this target needs to receive discards regardless of | |
371 | * whether or not its underlying devices have support. | |
372 | */ | |
0ac55489 | 373 | bool discards_supported:1; |
a666e5c0 | 374 | |
81e77063 DLM |
375 | /* |
376 | * Automatically set by dm-core if this target supports | |
377 | * REQ_OP_ZONE_RESET_ALL. Otherwise, this operation will be emulated | |
378 | * using REQ_OP_ZONE_RESET. Target drivers must not set this manually. | |
379 | */ | |
380 | bool zone_reset_all_supported:1; | |
381 | ||
06961c48 | 382 | /* |
13f6facf MS |
383 | * Set if this target requires that discards be split on |
384 | * 'max_discard_sectors' boundaries. | |
06961c48 MS |
385 | */ |
386 | bool max_discard_granularity:1; | |
387 | ||
a666e5c0 MP |
388 | /* |
389 | * Set if we need to limit the number of in-flight bios when swapping. | |
390 | */ | |
391 | bool limit_swap_bios:1; | |
bb37d772 DLM |
392 | |
393 | /* | |
a8b9d116 | 394 | * Set if this target implements a zoned device and needs emulation of |
bb37d772 DLM |
395 | * zone append operations using regular writes. |
396 | */ | |
397 | bool emulate_zone_append:1; | |
0fbb4d93 MS |
398 | |
399 | /* | |
400 | * Set if the target will submit IO using dm_submit_bio_remap() | |
401 | * after returning DM_MAPIO_SUBMITTED from its map function. | |
402 | */ | |
403 | bool accounts_remapped_io:1; | |
9dd1cd32 MS |
404 | |
405 | /* | |
406 | * Set if the target will submit the DM bio without first calling | |
407 | * bio_set_dev(). NOTE: ideally a target should _not_ need this. | |
408 | */ | |
409 | bool needs_bio_set_dev:1; | |
aaa53168 MP |
410 | |
411 | /* | |
412 | * Set if the target supports flush optimization. If all the targets in | |
413 | * a table have flush_bypasses_map set, the dm core will not send | |
414 | * flushes to the targets via a ->map method. It will iterate over | |
415 | * dm_table->devices and send flushes to the devices directly. This | |
416 | * optimization reduces the number of flushes being sent when multiple | |
417 | * targets in a table use the same underlying device. | |
418 | * | |
419 | * This optimization may be enabled on targets that just pass the | |
420 | * flushes to the underlying devices without performing any other | |
421 | * actions on the flush request. Currently, dm-linear and dm-stripe | |
422 | * support it. | |
423 | */ | |
424 | bool flush_bypasses_map:1; | |
61706974 MP |
425 | |
426 | /* | |
427 | * Set if the target calls bio_integrity_alloc on bios received | |
428 | * in the map method. | |
429 | */ | |
430 | bool mempool_needs_integrity:1; | |
1da177e4 LT |
431 | }; |
432 | ||
64f52b0e MS |
433 | void *dm_per_bio_data(struct bio *bio, size_t data_size); |
434 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); | |
86a3238c | 435 | unsigned int dm_bio_get_target_bio_nr(const struct bio *bio); |
ddbd658f | 436 | |
087615bf GKB |
437 | u64 dm_start_time_ns_from_clone(struct bio *bio); |
438 | ||
1da177e4 | 439 | int dm_register_target(struct target_type *t); |
10d3bd09 | 440 | void dm_unregister_target(struct target_type *t); |
17b2f66f | 441 | |
498f0103 MS |
442 | /* |
443 | * Target argument parsing. | |
444 | */ | |
445 | struct dm_arg_set { | |
86a3238c | 446 | unsigned int argc; |
498f0103 MS |
447 | char **argv; |
448 | }; | |
449 | ||
450 | /* | |
451 | * The minimum and maximum value of a numeric argument, together with | |
452 | * the error message to use if the number is found to be outside that range. | |
453 | */ | |
454 | struct dm_arg { | |
86a3238c HM |
455 | unsigned int min; |
456 | unsigned int max; | |
498f0103 MS |
457 | char *error; |
458 | }; | |
459 | ||
460 | /* | |
461 | * Validate the next argument, either returning it as *value or, if invalid, | |
462 | * returning -EINVAL and setting *error. | |
463 | */ | |
5916a22b | 464 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
86a3238c | 465 | unsigned int *value, char **error); |
498f0103 MS |
466 | |
467 | /* | |
468 | * Process the next argument as the start of a group containing between | |
469 | * arg->min and arg->max further arguments. Either return the size as | |
470 | * *num_args or, if invalid, return -EINVAL and set *error. | |
471 | */ | |
5916a22b | 472 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
86a3238c | 473 | unsigned int *num_args, char **error); |
498f0103 MS |
474 | |
475 | /* | |
476 | * Return the current argument and shift to the next. | |
477 | */ | |
478 | const char *dm_shift_arg(struct dm_arg_set *as); | |
479 | ||
480 | /* | |
481 | * Move through num_args arguments. | |
482 | */ | |
86a3238c | 483 | void dm_consume_args(struct dm_arg_set *as, unsigned int num_args); |
498f0103 | 484 | |
a4a82ce3 HM |
485 | /* |
486 | *---------------------------------------------------------------- | |
17b2f66f AK |
487 | * Functions for creating and manipulating mapped devices. |
488 | * Drop the reference with dm_put when you finish with the object. | |
a4a82ce3 HM |
489 | *---------------------------------------------------------------- |
490 | */ | |
17b2f66f AK |
491 | |
492 | /* | |
493 | * DM_ANY_MINOR chooses the next available minor number. | |
494 | */ | |
495 | #define DM_ANY_MINOR (-1) | |
496 | int dm_create(int minor, struct mapped_device **md); | |
497 | ||
498 | /* | |
499 | * Reference counting for md. | |
500 | */ | |
501 | struct mapped_device *dm_get_md(dev_t dev); | |
502 | void dm_get(struct mapped_device *md); | |
09ee96b2 | 503 | int dm_hold(struct mapped_device *md); |
17b2f66f AK |
504 | void dm_put(struct mapped_device *md); |
505 | ||
506 | /* | |
507 | * An arbitrary pointer may be stored alongside a mapped device. | |
508 | */ | |
509 | void dm_set_mdptr(struct mapped_device *md, void *ptr); | |
510 | void *dm_get_mdptr(struct mapped_device *md); | |
511 | ||
512 | /* | |
513 | * A device can still be used while suspended, but I/O is deferred. | |
514 | */ | |
86a3238c | 515 | int dm_suspend(struct mapped_device *md, unsigned int suspend_flags); |
17b2f66f AK |
516 | int dm_resume(struct mapped_device *md); |
517 | ||
518 | /* | |
519 | * Event functions. | |
520 | */ | |
521 | uint32_t dm_get_event_nr(struct mapped_device *md); | |
522 | int dm_wait_event(struct mapped_device *md, int event_nr); | |
7a8c3d3b MA |
523 | uint32_t dm_next_uevent_seq(struct mapped_device *md); |
524 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist); | |
17b2f66f AK |
525 | |
526 | /* | |
527 | * Info functions. | |
528 | */ | |
72d94861 | 529 | const char *dm_device_name(struct mapped_device *md); |
96a1f7db | 530 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); |
17b2f66f | 531 | struct gendisk *dm_disk(struct mapped_device *md); |
64dbce58 | 532 | int dm_suspended(struct dm_target *ti); |
5df96f2b | 533 | int dm_post_suspending(struct dm_target *ti); |
2e93ccc1 | 534 | int dm_noflush_suspending(struct dm_target *ti); |
86a3238c | 535 | void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors); |
b7f8dff0 | 536 | void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); |
17b2f66f | 537 | |
d4100351 CH |
538 | #ifdef CONFIG_BLK_DEV_ZONED |
539 | struct dm_report_zones_args { | |
540 | struct dm_target *tgt; | |
541 | sector_t next_sector; | |
542 | ||
543 | void *orig_data; | |
544 | report_zones_cb orig_cb; | |
545 | unsigned int zone_idx; | |
546 | ||
547 | /* must be filled by ->report_zones before calling dm_report_zones_cb */ | |
548 | sector_t start; | |
549 | }; | |
912e8875 DLM |
550 | int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, |
551 | struct dm_report_zones_args *args, unsigned int nr_zones); | |
d4100351 CH |
552 | #endif /* CONFIG_BLK_DEV_ZONED */ |
553 | ||
6bbc923d HK |
554 | /* |
555 | * Device mapper functions to parse and create devices specified by the | |
556 | * parameter "dm-mod.create=" | |
557 | */ | |
558 | int __init dm_early_create(struct dm_ioctl *dmi, | |
559 | struct dm_target_spec **spec_array, | |
560 | char **target_params_array); | |
561 | ||
17b2f66f AK |
562 | /* |
563 | * Geometry functions. | |
564 | */ | |
565 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); | |
566 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); | |
567 | ||
a4a82ce3 HM |
568 | /* |
569 | *--------------------------------------------------------------- | |
17b2f66f | 570 | * Functions for manipulating device-mapper tables. |
a4a82ce3 HM |
571 | *--------------------------------------------------------------- |
572 | */ | |
17b2f66f AK |
573 | |
574 | /* | |
575 | * First create an empty table. | |
576 | */ | |
05bdb996 | 577 | int dm_table_create(struct dm_table **result, blk_mode_t mode, |
86a3238c | 578 | unsigned int num_targets, struct mapped_device *md); |
17b2f66f AK |
579 | |
580 | /* | |
581 | * Then call this once for each target. | |
582 | */ | |
583 | int dm_table_add_target(struct dm_table *t, const char *type, | |
584 | sector_t start, sector_t len, char *params); | |
585 | ||
e83068a5 MS |
586 | /* |
587 | * Target can use this to set the table's type. | |
588 | * Can only ever be called from a target's ctr. | |
589 | * Useful for "hybrid" target (supports both bio-based | |
590 | * and request-based). | |
591 | */ | |
7e0d574f | 592 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); |
9d357b07 | 593 | |
17b2f66f AK |
594 | /* |
595 | * Finally call this to make the table ready for use. | |
596 | */ | |
597 | int dm_table_complete(struct dm_table *t); | |
598 | ||
f6e7baad BN |
599 | /* |
600 | * Destroy the table when finished. | |
601 | */ | |
602 | void dm_table_destroy(struct dm_table *t); | |
603 | ||
542f9038 MS |
604 | /* |
605 | * Target may require that it is never sent I/O larger than len. | |
606 | */ | |
607 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); | |
608 | ||
17b2f66f AK |
609 | /* |
610 | * Table reference counting. | |
611 | */ | |
83d5e5b0 MP |
612 | struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); |
613 | void dm_put_live_table(struct mapped_device *md, int srcu_idx); | |
614 | void dm_sync_table(struct mapped_device *md); | |
17b2f66f AK |
615 | |
616 | /* | |
617 | * Queries | |
618 | */ | |
619 | sector_t dm_table_get_size(struct dm_table *t); | |
05bdb996 | 620 | blk_mode_t dm_table_get_mode(struct dm_table *t); |
17b2f66f | 621 | struct mapped_device *dm_table_get_md(struct dm_table *t); |
f349b0a3 | 622 | const char *dm_table_device_name(struct dm_table *t); |
17b2f66f AK |
623 | |
624 | /* | |
625 | * Trigger an event. | |
626 | */ | |
627 | void dm_table_event(struct dm_table *t); | |
628 | ||
9974fa2c MS |
629 | /* |
630 | * Run the queue for request-based targets. | |
631 | */ | |
632 | void dm_table_run_md_queue_async(struct dm_table *t); | |
633 | ||
17b2f66f AK |
634 | /* |
635 | * The device must be suspended before calling this method. | |
042d2a9b | 636 | * Returns the previous table, which the caller must destroy. |
17b2f66f | 637 | */ |
042d2a9b AK |
638 | struct dm_table *dm_swap_table(struct mapped_device *md, |
639 | struct dm_table *t); | |
17b2f66f | 640 | |
aa6ce87a | 641 | /* |
cb77cb5a | 642 | * Table blk_crypto_profile functions |
aa6ce87a | 643 | */ |
cb77cb5a | 644 | void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); |
aa6ce87a | 645 | |
a4a82ce3 HM |
646 | /* |
647 | *--------------------------------------------------------------- | |
0da336e5 | 648 | * Macros. |
a4a82ce3 HM |
649 | *--------------------------------------------------------------- |
650 | */ | |
0da336e5 AK |
651 | #define DM_NAME "device-mapper" |
652 | ||
d2c3c8dc JP |
653 | #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" |
654 | ||
655 | #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) | |
656 | ||
657 | #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) | |
733232f8 | 658 | #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
d2c3c8dc | 659 | #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 660 | #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
d2c3c8dc | 661 | #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 662 | #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
0da336e5 | 663 | |
74244b59 | 664 | #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 665 | #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
0da336e5 | 666 | |
44bc08ed | 667 | #define DMEMIT(x...) (sz += ((sz >= maxlen) ? 0 : scnprintf(result + sz, maxlen - sz, x))) |
0da336e5 | 668 | |
8ec45662 TS |
669 | #define DMEMIT_TARGET_NAME_VERSION(y) \ |
670 | DMEMIT("target_name=%s,target_version=%u.%u.%u", \ | |
671 | (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) | |
672 | ||
3664ff82 YL |
673 | /** |
674 | * module_dm() - Helper macro for DM targets that don't do anything | |
675 | * special in their module_init and module_exit. | |
676 | * Each module may only use this macro once, and calling it replaces | |
677 | * module_init() and module_exit(). | |
678 | * | |
679 | * @name: DM target's name | |
680 | */ | |
681 | #define module_dm(name) \ | |
682 | static int __init dm_##name##_init(void) \ | |
683 | { \ | |
684 | return dm_register_target(&(name##_target)); \ | |
685 | } \ | |
686 | module_init(dm_##name##_init) \ | |
687 | static void __exit dm_##name##_exit(void) \ | |
688 | { \ | |
689 | dm_unregister_target(&(name##_target)); \ | |
690 | } \ | |
691 | module_exit(dm_##name##_exit) | |
692 | ||
0da336e5 AK |
693 | /* |
694 | * Definitions of return values from target end_io function. | |
695 | */ | |
7ed8578a | 696 | #define DM_ENDIO_DONE 0 |
0da336e5 AK |
697 | #define DM_ENDIO_INCOMPLETE 1 |
698 | #define DM_ENDIO_REQUEUE 2 | |
ac514ffc | 699 | #define DM_ENDIO_DELAY_REQUEUE 3 |
0da336e5 AK |
700 | |
701 | /* | |
702 | * Definitions of return values from target map function. | |
703 | */ | |
704 | #define DM_MAPIO_SUBMITTED 0 | |
705 | #define DM_MAPIO_REMAPPED 1 | |
706 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE | |
ac514ffc | 707 | #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE |
412445ac | 708 | #define DM_MAPIO_KILL 4 |
0da336e5 | 709 | |
fd2ed4d2 MP |
710 | #define dm_sector_div64(x, y)( \ |
711 | { \ | |
712 | u64 _res; \ | |
713 | (x) = div64_u64_rem(x, y, &_res); \ | |
714 | _res; \ | |
715 | } \ | |
716 | ) | |
717 | ||
0da336e5 AK |
718 | /* |
719 | * Ceiling(n / sz) | |
720 | */ | |
721 | #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) | |
722 | ||
723 | #define dm_sector_div_up(n, sz) ( \ | |
724 | { \ | |
725 | sector_t _r = ((n) + (sz) - 1); \ | |
726 | sector_div(_r, (sz)); \ | |
727 | _r; \ | |
728 | } \ | |
729 | ) | |
730 | ||
731 | /* | |
732 | * ceiling(n / size) * size | |
733 | */ | |
734 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) | |
735 | ||
56a67df7 MS |
736 | /* |
737 | * Sector offset taken relative to the start of the target instead of | |
738 | * relative to the start of the device. | |
739 | */ | |
740 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) | |
741 | ||
0bdb50c5 | 742 | static inline sector_t to_sector(unsigned long long n) |
0da336e5 AK |
743 | { |
744 | return (n >> SECTOR_SHIFT); | |
745 | } | |
746 | ||
747 | static inline unsigned long to_bytes(sector_t n) | |
748 | { | |
749 | return (n << SECTOR_SHIFT); | |
750 | } | |
751 | ||
17b2f66f | 752 | #endif /* _LINUX_DEVICE_MAPPER_H */ |