Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2001 Sistina Software (UK) Limited. | |
0da336e5 | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
4 | * |
5 | * This file is released under the LGPL. | |
6 | */ | |
7 | ||
8 | #ifndef _LINUX_DEVICE_MAPPER_H | |
9 | #define _LINUX_DEVICE_MAPPER_H | |
10 | ||
416cd17b | 11 | #include <linux/bio.h> |
f6fccb12 | 12 | #include <linux/blkdev.h> |
6bbc923d | 13 | #include <linux/dm-ioctl.h> |
fd2ed4d2 | 14 | #include <linux/math64.h> |
71a16736 | 15 | #include <linux/ratelimit.h> |
416cd17b | 16 | |
af4874e0 | 17 | struct dm_dev; |
1da177e4 LT |
18 | struct dm_target; |
19 | struct dm_table; | |
d4100351 | 20 | struct dm_report_zones_args; |
17b2f66f | 21 | struct mapped_device; |
f6fccb12 | 22 | struct bio_vec; |
e511c4a3 | 23 | enum dax_access_mode; |
1da177e4 | 24 | |
e83068a5 MS |
25 | /* |
26 | * Type of table, mapped_device's mempool and request_queue | |
27 | */ | |
7e0d574f BVA |
28 | enum dm_queue_mode { |
29 | DM_TYPE_NONE = 0, | |
30 | DM_TYPE_BIO_BASED = 1, | |
31 | DM_TYPE_REQUEST_BASED = 2, | |
953923c0 | 32 | DM_TYPE_DAX_BIO_BASED = 3, |
7e0d574f | 33 | }; |
e83068a5 | 34 | |
91ccbbac | 35 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; |
1da177e4 LT |
36 | |
37 | union map_info { | |
38 | void *ptr; | |
1da177e4 LT |
39 | }; |
40 | ||
41 | /* | |
42 | * In the constructor the target parameter will already have the | |
43 | * table, type, begin and len fields filled in. | |
44 | */ | |
45 | typedef int (*dm_ctr_fn) (struct dm_target *target, | |
46 | unsigned int argc, char **argv); | |
47 | ||
48 | /* | |
49 | * The destructor doesn't need to free the dm_target, just | |
50 | * anything hidden ti->private. | |
51 | */ | |
52 | typedef void (*dm_dtr_fn) (struct dm_target *ti); | |
53 | ||
54 | /* | |
55 | * The map function must return: | |
56 | * < 0: error | |
57 | * = 0: The target will handle the io by resubmitting it later | |
45cbcd79 | 58 | * = 1: simple remap complete |
2e93ccc1 | 59 | * = 2: The target wants to push back the io |
1da177e4 | 60 | */ |
7de3ee57 | 61 | typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio); |
e5863d9a MS |
62 | typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti, |
63 | struct request *rq, | |
64 | union map_info *map_context, | |
65 | struct request **clone); | |
5de719e3 YY |
66 | typedef void (*dm_release_clone_request_fn) (struct request *clone, |
67 | union map_info *map_context); | |
1da177e4 LT |
68 | |
69 | /* | |
70 | * Returns: | |
71 | * < 0 : error (currently ignored) | |
72 | * 0 : ended successfully | |
73 | * 1 : for some reason the io has still not completed (eg, | |
74 | * multipath target might want to requeue a failed io). | |
2e93ccc1 | 75 | * 2 : The target wants to push back the io |
1da177e4 LT |
76 | */ |
77 | typedef int (*dm_endio_fn) (struct dm_target *ti, | |
4e4cbee9 | 78 | struct bio *bio, blk_status_t *error); |
7d76345d | 79 | typedef int (*dm_request_endio_fn) (struct dm_target *ti, |
2a842aca | 80 | struct request *clone, blk_status_t error, |
7d76345d | 81 | union map_info *map_context); |
1da177e4 LT |
82 | |
83 | typedef void (*dm_presuspend_fn) (struct dm_target *ti); | |
d67ee213 | 84 | typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti); |
1da177e4 | 85 | typedef void (*dm_postsuspend_fn) (struct dm_target *ti); |
8757b776 | 86 | typedef int (*dm_preresume_fn) (struct dm_target *ti); |
1da177e4 LT |
87 | typedef void (*dm_resume_fn) (struct dm_target *ti); |
88 | ||
fd7c092e MP |
89 | typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type, |
90 | unsigned status_flags, char *result, unsigned maxlen); | |
1da177e4 | 91 | |
1eb5fa84 MS |
92 | typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv, |
93 | char *result, unsigned maxlen); | |
1da177e4 | 94 | |
5bd5e8d8 | 95 | typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev); |
aa129a22 | 96 | |
e3290b94 | 97 | #ifdef CONFIG_BLK_DEV_ZONED |
d4100351 CH |
98 | typedef int (*dm_report_zones_fn) (struct dm_target *ti, |
99 | struct dm_report_zones_args *args, | |
100 | unsigned int nr_zones); | |
e3290b94 MS |
101 | #else |
102 | /* | |
103 | * Define dm_report_zones_fn so that targets can assign to NULL if | |
104 | * CONFIG_BLK_DEV_ZONED disabled. Otherwise each target needs to do | |
105 | * awkward #ifdefs in their target_type, etc. | |
106 | */ | |
107 | typedef int (*dm_report_zones_fn) (struct dm_target *dummy); | |
108 | #endif | |
e76239a3 | 109 | |
058ce5ca AK |
110 | /* |
111 | * These iteration functions are typically used to check (and combine) | |
112 | * properties of underlying devices. | |
113 | * E.g. Does at least one underlying device support flush? | |
114 | * Does any underlying device not support WRITE_SAME? | |
115 | * | |
116 | * The callout function is called once for each contiguous section of | |
117 | * an underlying device. State can be maintained in *data. | |
118 | * Return non-zero to stop iterating through any further devices. | |
119 | */ | |
af4874e0 MS |
120 | typedef int (*iterate_devices_callout_fn) (struct dm_target *ti, |
121 | struct dm_dev *dev, | |
5dea271b | 122 | sector_t start, sector_t len, |
af4874e0 MS |
123 | void *data); |
124 | ||
058ce5ca AK |
125 | /* |
126 | * This function must iterate through each section of device used by the | |
127 | * target until it encounters a non-zero return code, which it then returns. | |
128 | * Returns zero if no callout returned non-zero. | |
129 | */ | |
af4874e0 MS |
130 | typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, |
131 | iterate_devices_callout_fn fn, | |
132 | void *data); | |
133 | ||
40bea431 MS |
134 | typedef void (*dm_io_hints_fn) (struct dm_target *ti, |
135 | struct queue_limits *limits); | |
136 | ||
7d76345d KU |
137 | /* |
138 | * Returns: | |
139 | * 0: The target can handle the next I/O immediately. | |
140 | * 1: The target can't handle the next I/O immediately. | |
141 | */ | |
142 | typedef int (*dm_busy_fn) (struct dm_target *ti); | |
143 | ||
545ed20e TK |
144 | /* |
145 | * Returns: | |
146 | * < 0 : error | |
147 | * >= 0 : the number of bytes accessible at the address | |
148 | */ | |
817bf402 | 149 | typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, |
e511c4a3 JC |
150 | long nr_pages, enum dax_access_mode node, void **kaddr, |
151 | pfn_t *pfn); | |
cdf6cdcd VG |
152 | typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, |
153 | size_t nr_pages); | |
545ed20e | 154 | |
047218ec JC |
155 | /* |
156 | * Returns: | |
157 | * != 0 : number of bytes transferred | |
158 | * 0 : recovery write failed | |
159 | */ | |
160 | typedef size_t (*dm_dax_recovery_write_fn)(struct dm_target *ti, pgoff_t pgoff, | |
161 | void *addr, size_t bytes, struct iov_iter *i); | |
162 | ||
1da177e4 LT |
163 | void dm_error(const char *message); |
164 | ||
82b1519b MP |
165 | struct dm_dev { |
166 | struct block_device *bdev; | |
817bf402 | 167 | struct dax_device *dax_dev; |
aeb5d727 | 168 | fmode_t mode; |
82b1519b MP |
169 | char name[16]; |
170 | }; | |
171 | ||
4df2bf46 D |
172 | dev_t dm_get_dev_t(const char *path); |
173 | ||
1da177e4 LT |
174 | /* |
175 | * Constructors should call these functions to ensure destination devices | |
176 | * are opened/closed correctly. | |
1da177e4 | 177 | */ |
8215d6ec | 178 | int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, |
11f0431b | 179 | struct dm_dev **result); |
1da177e4 LT |
180 | void dm_put_device(struct dm_target *ti, struct dm_dev *d); |
181 | ||
182 | /* | |
183 | * Information about a target type | |
184 | */ | |
ab4c1424 | 185 | |
1da177e4 | 186 | struct target_type { |
ab4c1424 | 187 | uint64_t features; |
1da177e4 LT |
188 | const char *name; |
189 | struct module *module; | |
17b2f66f | 190 | unsigned version[3]; |
1da177e4 LT |
191 | dm_ctr_fn ctr; |
192 | dm_dtr_fn dtr; | |
193 | dm_map_fn map; | |
e5863d9a MS |
194 | dm_clone_and_map_request_fn clone_and_map_rq; |
195 | dm_release_clone_request_fn release_clone_rq; | |
1da177e4 | 196 | dm_endio_fn end_io; |
7d76345d | 197 | dm_request_endio_fn rq_end_io; |
1da177e4 | 198 | dm_presuspend_fn presuspend; |
d67ee213 | 199 | dm_presuspend_undo_fn presuspend_undo; |
1da177e4 | 200 | dm_postsuspend_fn postsuspend; |
8757b776 | 201 | dm_preresume_fn preresume; |
1da177e4 LT |
202 | dm_resume_fn resume; |
203 | dm_status_fn status; | |
204 | dm_message_fn message; | |
e56f81e0 | 205 | dm_prepare_ioctl_fn prepare_ioctl; |
e76239a3 | 206 | dm_report_zones_fn report_zones; |
7d76345d | 207 | dm_busy_fn busy; |
af4874e0 | 208 | dm_iterate_devices_fn iterate_devices; |
40bea431 | 209 | dm_io_hints_fn io_hints; |
817bf402 | 210 | dm_dax_direct_access_fn direct_access; |
cdf6cdcd | 211 | dm_dax_zero_page_range_fn dax_zero_page_range; |
047218ec | 212 | dm_dax_recovery_write_fn dax_recovery_write; |
45194e4f CR |
213 | |
214 | /* For internal device-mapper use. */ | |
215 | struct list_head list; | |
1da177e4 LT |
216 | }; |
217 | ||
3791e2fc AK |
218 | /* |
219 | * Target features | |
220 | */ | |
221 | ||
222 | /* | |
223 | * Any table that contains an instance of this target must have only one. | |
224 | */ | |
225 | #define DM_TARGET_SINGLETON 0x00000001 | |
226 | #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON) | |
227 | ||
cc6cbe14 AK |
228 | /* |
229 | * Indicates that a target does not support read-only devices. | |
230 | */ | |
231 | #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002 | |
232 | #define dm_target_always_writeable(type) \ | |
233 | ((type)->features & DM_TARGET_ALWAYS_WRITEABLE) | |
234 | ||
36a0456f AK |
235 | /* |
236 | * Any device that contains a table with an instance of this target may never | |
237 | * have tables containing any different target type. | |
238 | */ | |
239 | #define DM_TARGET_IMMUTABLE 0x00000004 | |
240 | #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE) | |
241 | ||
f083b09b MS |
242 | /* |
243 | * Indicates that a target may replace any target; even immutable targets. | |
244 | * .map, .map_rq, .clone_and_map_rq and .release_clone_rq are all defined. | |
245 | */ | |
246 | #define DM_TARGET_WILDCARD 0x00000008 | |
247 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) | |
248 | ||
9b4b5a79 MB |
249 | /* |
250 | * A target implements own bio data integrity. | |
251 | */ | |
252 | #define DM_TARGET_INTEGRITY 0x00000010 | |
253 | #define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY) | |
254 | ||
e2460f2a MP |
255 | /* |
256 | * A target passes integrity data to the lower device. | |
257 | */ | |
258 | #define DM_TARGET_PASSES_INTEGRITY 0x00000020 | |
259 | #define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY) | |
260 | ||
dd88d313 | 261 | /* |
2d669ceb SK |
262 | * Indicates support for zoned block devices: |
263 | * - DM_TARGET_ZONED_HM: the target also supports host-managed zoned | |
264 | * block devices but does not support combining different zoned models. | |
265 | * - DM_TARGET_MIXED_ZONED_MODEL: the target supports combining multiple | |
266 | * devices with different zoned models. | |
dd88d313 | 267 | */ |
e3290b94 | 268 | #ifdef CONFIG_BLK_DEV_ZONED |
dd88d313 DLM |
269 | #define DM_TARGET_ZONED_HM 0x00000040 |
270 | #define dm_target_supports_zoned_hm(type) ((type)->features & DM_TARGET_ZONED_HM) | |
e3290b94 MS |
271 | #else |
272 | #define DM_TARGET_ZONED_HM 0x00000000 | |
273 | #define dm_target_supports_zoned_hm(type) (false) | |
274 | #endif | |
dd88d313 | 275 | |
6abc4946 KK |
276 | /* |
277 | * A target handles REQ_NOWAIT | |
278 | */ | |
279 | #define DM_TARGET_NOWAIT 0x00000080 | |
280 | #define dm_target_supports_nowait(type) ((type)->features & DM_TARGET_NOWAIT) | |
281 | ||
aa6ce87a ST |
282 | /* |
283 | * A target supports passing through inline crypto support. | |
284 | */ | |
285 | #define DM_TARGET_PASSES_CRYPTO 0x00000100 | |
286 | #define dm_target_passes_crypto(type) ((type)->features & DM_TARGET_PASSES_CRYPTO) | |
287 | ||
2d669ceb SK |
288 | #ifdef CONFIG_BLK_DEV_ZONED |
289 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000200 | |
290 | #define dm_target_supports_mixed_zoned_model(type) \ | |
291 | ((type)->features & DM_TARGET_MIXED_ZONED_MODEL) | |
292 | #else | |
293 | #define DM_TARGET_MIXED_ZONED_MODEL 0x00000000 | |
294 | #define dm_target_supports_mixed_zoned_model(type) (false) | |
295 | #endif | |
296 | ||
1da177e4 LT |
297 | struct dm_target { |
298 | struct dm_table *table; | |
299 | struct target_type *type; | |
300 | ||
301 | /* target limits */ | |
302 | sector_t begin; | |
303 | sector_t len; | |
304 | ||
542f9038 MS |
305 | /* If non-zero, maximum size of I/O submitted to a target. */ |
306 | uint32_t max_io_len; | |
1da177e4 | 307 | |
f9ab94ce | 308 | /* |
55a62eef | 309 | * A number of zero-length barrier bios that will be submitted |
f9ab94ce MP |
310 | * to the target for the purpose of flushing cache. |
311 | * | |
55a62eef AK |
312 | * The bio number can be accessed with dm_bio_get_target_bio_nr. |
313 | * It is a responsibility of the target driver to remap these bios | |
f9ab94ce MP |
314 | * to the real underlying devices. |
315 | */ | |
55a62eef | 316 | unsigned num_flush_bios; |
f9ab94ce | 317 | |
5ae89a87 | 318 | /* |
55a62eef AK |
319 | * The number of discard bios that will be submitted to the target. |
320 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
5ae89a87 | 321 | */ |
55a62eef | 322 | unsigned num_discard_bios; |
5ae89a87 | 323 | |
00716545 DS |
324 | /* |
325 | * The number of secure erase bios that will be submitted to the target. | |
326 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
327 | */ | |
328 | unsigned num_secure_erase_bios; | |
329 | ||
ac62d620 CH |
330 | /* |
331 | * The number of WRITE ZEROES bios that will be submitted to the target. | |
332 | * The bio number can be accessed with dm_bio_get_target_bio_nr. | |
333 | */ | |
334 | unsigned num_write_zeroes_bios; | |
335 | ||
c0820cf5 | 336 | /* |
30187e1d MS |
337 | * The minimum number of extra bytes allocated in each io for the |
338 | * target to use. | |
c0820cf5 | 339 | */ |
30187e1d | 340 | unsigned per_io_data_size; |
c0820cf5 | 341 | |
1da177e4 LT |
342 | /* target specific data */ |
343 | void *private; | |
344 | ||
345 | /* Used to provide an error string from the ctr */ | |
346 | char *error; | |
4c259327 | 347 | |
0e9c24ed JT |
348 | /* |
349 | * Set if this target needs to receive flushes regardless of | |
350 | * whether or not its underlying devices have support. | |
351 | */ | |
352 | bool flush_supported:1; | |
353 | ||
4c259327 MS |
354 | /* |
355 | * Set if this target needs to receive discards regardless of | |
356 | * whether or not its underlying devices have support. | |
357 | */ | |
0ac55489 | 358 | bool discards_supported:1; |
a666e5c0 MP |
359 | |
360 | /* | |
361 | * Set if we need to limit the number of in-flight bios when swapping. | |
362 | */ | |
363 | bool limit_swap_bios:1; | |
bb37d772 DLM |
364 | |
365 | /* | |
a8b9d116 | 366 | * Set if this target implements a zoned device and needs emulation of |
bb37d772 DLM |
367 | * zone append operations using regular writes. |
368 | */ | |
369 | bool emulate_zone_append:1; | |
0fbb4d93 MS |
370 | |
371 | /* | |
372 | * Set if the target will submit IO using dm_submit_bio_remap() | |
373 | * after returning DM_MAPIO_SUBMITTED from its map function. | |
374 | */ | |
375 | bool accounts_remapped_io:1; | |
9dd1cd32 MS |
376 | |
377 | /* | |
378 | * Set if the target will submit the DM bio without first calling | |
379 | * bio_set_dev(). NOTE: ideally a target should _not_ need this. | |
380 | */ | |
381 | bool needs_bio_set_dev:1; | |
1da177e4 LT |
382 | }; |
383 | ||
64f52b0e MS |
384 | void *dm_per_bio_data(struct bio *bio, size_t data_size); |
385 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); | |
386 | unsigned dm_bio_get_target_bio_nr(const struct bio *bio); | |
ddbd658f | 387 | |
087615bf GKB |
388 | u64 dm_start_time_ns_from_clone(struct bio *bio); |
389 | ||
1da177e4 | 390 | int dm_register_target(struct target_type *t); |
10d3bd09 | 391 | void dm_unregister_target(struct target_type *t); |
17b2f66f | 392 | |
498f0103 MS |
393 | /* |
394 | * Target argument parsing. | |
395 | */ | |
396 | struct dm_arg_set { | |
397 | unsigned argc; | |
398 | char **argv; | |
399 | }; | |
400 | ||
401 | /* | |
402 | * The minimum and maximum value of a numeric argument, together with | |
403 | * the error message to use if the number is found to be outside that range. | |
404 | */ | |
405 | struct dm_arg { | |
406 | unsigned min; | |
407 | unsigned max; | |
408 | char *error; | |
409 | }; | |
410 | ||
411 | /* | |
412 | * Validate the next argument, either returning it as *value or, if invalid, | |
413 | * returning -EINVAL and setting *error. | |
414 | */ | |
5916a22b | 415 | int dm_read_arg(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
498f0103 MS |
416 | unsigned *value, char **error); |
417 | ||
418 | /* | |
419 | * Process the next argument as the start of a group containing between | |
420 | * arg->min and arg->max further arguments. Either return the size as | |
421 | * *num_args or, if invalid, return -EINVAL and set *error. | |
422 | */ | |
5916a22b | 423 | int dm_read_arg_group(const struct dm_arg *arg, struct dm_arg_set *arg_set, |
498f0103 MS |
424 | unsigned *num_args, char **error); |
425 | ||
426 | /* | |
427 | * Return the current argument and shift to the next. | |
428 | */ | |
429 | const char *dm_shift_arg(struct dm_arg_set *as); | |
430 | ||
431 | /* | |
432 | * Move through num_args arguments. | |
433 | */ | |
434 | void dm_consume_args(struct dm_arg_set *as, unsigned num_args); | |
435 | ||
17b2f66f AK |
436 | /*----------------------------------------------------------------- |
437 | * Functions for creating and manipulating mapped devices. | |
438 | * Drop the reference with dm_put when you finish with the object. | |
439 | *---------------------------------------------------------------*/ | |
440 | ||
441 | /* | |
442 | * DM_ANY_MINOR chooses the next available minor number. | |
443 | */ | |
444 | #define DM_ANY_MINOR (-1) | |
445 | int dm_create(int minor, struct mapped_device **md); | |
446 | ||
447 | /* | |
448 | * Reference counting for md. | |
449 | */ | |
450 | struct mapped_device *dm_get_md(dev_t dev); | |
451 | void dm_get(struct mapped_device *md); | |
09ee96b2 | 452 | int dm_hold(struct mapped_device *md); |
17b2f66f AK |
453 | void dm_put(struct mapped_device *md); |
454 | ||
455 | /* | |
456 | * An arbitrary pointer may be stored alongside a mapped device. | |
457 | */ | |
458 | void dm_set_mdptr(struct mapped_device *md, void *ptr); | |
459 | void *dm_get_mdptr(struct mapped_device *md); | |
460 | ||
461 | /* | |
462 | * A device can still be used while suspended, but I/O is deferred. | |
463 | */ | |
a3d77d35 | 464 | int dm_suspend(struct mapped_device *md, unsigned suspend_flags); |
17b2f66f AK |
465 | int dm_resume(struct mapped_device *md); |
466 | ||
467 | /* | |
468 | * Event functions. | |
469 | */ | |
470 | uint32_t dm_get_event_nr(struct mapped_device *md); | |
471 | int dm_wait_event(struct mapped_device *md, int event_nr); | |
7a8c3d3b MA |
472 | uint32_t dm_next_uevent_seq(struct mapped_device *md); |
473 | void dm_uevent_add(struct mapped_device *md, struct list_head *elist); | |
17b2f66f AK |
474 | |
475 | /* | |
476 | * Info functions. | |
477 | */ | |
72d94861 | 478 | const char *dm_device_name(struct mapped_device *md); |
96a1f7db | 479 | int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid); |
17b2f66f | 480 | struct gendisk *dm_disk(struct mapped_device *md); |
64dbce58 | 481 | int dm_suspended(struct dm_target *ti); |
5df96f2b | 482 | int dm_post_suspending(struct dm_target *ti); |
2e93ccc1 | 483 | int dm_noflush_suspending(struct dm_target *ti); |
1dd40c3e | 484 | void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors); |
b7f8dff0 | 485 | void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone); |
cec47e3d | 486 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
17b2f66f | 487 | |
d4100351 CH |
488 | #ifdef CONFIG_BLK_DEV_ZONED |
489 | struct dm_report_zones_args { | |
490 | struct dm_target *tgt; | |
491 | sector_t next_sector; | |
492 | ||
493 | void *orig_data; | |
494 | report_zones_cb orig_cb; | |
495 | unsigned int zone_idx; | |
496 | ||
497 | /* must be filled by ->report_zones before calling dm_report_zones_cb */ | |
498 | sector_t start; | |
499 | }; | |
912e8875 DLM |
500 | int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, |
501 | struct dm_report_zones_args *args, unsigned int nr_zones); | |
d4100351 CH |
502 | #endif /* CONFIG_BLK_DEV_ZONED */ |
503 | ||
6bbc923d HK |
504 | /* |
505 | * Device mapper functions to parse and create devices specified by the | |
506 | * parameter "dm-mod.create=" | |
507 | */ | |
508 | int __init dm_early_create(struct dm_ioctl *dmi, | |
509 | struct dm_target_spec **spec_array, | |
510 | char **target_params_array); | |
511 | ||
f84cb8a4 MS |
512 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md); |
513 | ||
17b2f66f AK |
514 | /* |
515 | * Geometry functions. | |
516 | */ | |
517 | int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo); | |
518 | int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo); | |
519 | ||
17b2f66f AK |
520 | /*----------------------------------------------------------------- |
521 | * Functions for manipulating device-mapper tables. | |
522 | *---------------------------------------------------------------*/ | |
523 | ||
524 | /* | |
525 | * First create an empty table. | |
526 | */ | |
aeb5d727 | 527 | int dm_table_create(struct dm_table **result, fmode_t mode, |
17b2f66f AK |
528 | unsigned num_targets, struct mapped_device *md); |
529 | ||
530 | /* | |
531 | * Then call this once for each target. | |
532 | */ | |
533 | int dm_table_add_target(struct dm_table *t, const char *type, | |
534 | sector_t start, sector_t len, char *params); | |
535 | ||
e83068a5 MS |
536 | /* |
537 | * Target can use this to set the table's type. | |
538 | * Can only ever be called from a target's ctr. | |
539 | * Useful for "hybrid" target (supports both bio-based | |
540 | * and request-based). | |
541 | */ | |
7e0d574f | 542 | void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); |
9d357b07 | 543 | |
17b2f66f AK |
544 | /* |
545 | * Finally call this to make the table ready for use. | |
546 | */ | |
547 | int dm_table_complete(struct dm_table *t); | |
548 | ||
f6e7baad BN |
549 | /* |
550 | * Destroy the table when finished. | |
551 | */ | |
552 | void dm_table_destroy(struct dm_table *t); | |
553 | ||
542f9038 MS |
554 | /* |
555 | * Target may require that it is never sent I/O larger than len. | |
556 | */ | |
557 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); | |
558 | ||
17b2f66f AK |
559 | /* |
560 | * Table reference counting. | |
561 | */ | |
83d5e5b0 MP |
562 | struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx); |
563 | void dm_put_live_table(struct mapped_device *md, int srcu_idx); | |
564 | void dm_sync_table(struct mapped_device *md); | |
17b2f66f AK |
565 | |
566 | /* | |
567 | * Queries | |
568 | */ | |
569 | sector_t dm_table_get_size(struct dm_table *t); | |
aeb5d727 | 570 | fmode_t dm_table_get_mode(struct dm_table *t); |
17b2f66f | 571 | struct mapped_device *dm_table_get_md(struct dm_table *t); |
f349b0a3 | 572 | const char *dm_table_device_name(struct dm_table *t); |
17b2f66f AK |
573 | |
574 | /* | |
575 | * Trigger an event. | |
576 | */ | |
577 | void dm_table_event(struct dm_table *t); | |
578 | ||
9974fa2c MS |
579 | /* |
580 | * Run the queue for request-based targets. | |
581 | */ | |
582 | void dm_table_run_md_queue_async(struct dm_table *t); | |
583 | ||
17b2f66f AK |
584 | /* |
585 | * The device must be suspended before calling this method. | |
042d2a9b | 586 | * Returns the previous table, which the caller must destroy. |
17b2f66f | 587 | */ |
042d2a9b AK |
588 | struct dm_table *dm_swap_table(struct mapped_device *md, |
589 | struct dm_table *t); | |
17b2f66f | 590 | |
aa6ce87a | 591 | /* |
cb77cb5a | 592 | * Table blk_crypto_profile functions |
aa6ce87a | 593 | */ |
cb77cb5a | 594 | void dm_destroy_crypto_profile(struct blk_crypto_profile *profile); |
aa6ce87a | 595 | |
0da336e5 AK |
596 | /*----------------------------------------------------------------- |
597 | * Macros. | |
598 | *---------------------------------------------------------------*/ | |
599 | #define DM_NAME "device-mapper" | |
600 | ||
d2c3c8dc JP |
601 | #define DM_FMT(fmt) DM_NAME ": " DM_MSG_PREFIX ": " fmt "\n" |
602 | ||
603 | #define DMCRIT(fmt, ...) pr_crit(DM_FMT(fmt), ##__VA_ARGS__) | |
604 | ||
605 | #define DMERR(fmt, ...) pr_err(DM_FMT(fmt), ##__VA_ARGS__) | |
733232f8 | 606 | #define DMERR_LIMIT(fmt, ...) pr_err_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
d2c3c8dc | 607 | #define DMWARN(fmt, ...) pr_warn(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 608 | #define DMWARN_LIMIT(fmt, ...) pr_warn_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
d2c3c8dc | 609 | #define DMINFO(fmt, ...) pr_info(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 610 | #define DMINFO_LIMIT(fmt, ...) pr_info_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
0da336e5 | 611 | |
74244b59 | 612 | #define DMDEBUG(fmt, ...) pr_debug(DM_FMT(fmt), ##__VA_ARGS__) |
733232f8 | 613 | #define DMDEBUG_LIMIT(fmt, ...) pr_debug_ratelimited(DM_FMT(fmt), ##__VA_ARGS__) |
0da336e5 AK |
614 | |
615 | #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ | |
616 | 0 : scnprintf(result + sz, maxlen - sz, x)) | |
617 | ||
8ec45662 TS |
618 | #define DMEMIT_TARGET_NAME_VERSION(y) \ |
619 | DMEMIT("target_name=%s,target_version=%u.%u.%u", \ | |
620 | (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) | |
621 | ||
0da336e5 AK |
622 | /* |
623 | * Definitions of return values from target end_io function. | |
624 | */ | |
7ed8578a | 625 | #define DM_ENDIO_DONE 0 |
0da336e5 AK |
626 | #define DM_ENDIO_INCOMPLETE 1 |
627 | #define DM_ENDIO_REQUEUE 2 | |
ac514ffc | 628 | #define DM_ENDIO_DELAY_REQUEUE 3 |
0da336e5 AK |
629 | |
630 | /* | |
631 | * Definitions of return values from target map function. | |
632 | */ | |
633 | #define DM_MAPIO_SUBMITTED 0 | |
634 | #define DM_MAPIO_REMAPPED 1 | |
635 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE | |
ac514ffc | 636 | #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE |
412445ac | 637 | #define DM_MAPIO_KILL 4 |
0da336e5 | 638 | |
fd2ed4d2 MP |
639 | #define dm_sector_div64(x, y)( \ |
640 | { \ | |
641 | u64 _res; \ | |
642 | (x) = div64_u64_rem(x, y, &_res); \ | |
643 | _res; \ | |
644 | } \ | |
645 | ) | |
646 | ||
0da336e5 AK |
647 | /* |
648 | * Ceiling(n / sz) | |
649 | */ | |
650 | #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz)) | |
651 | ||
652 | #define dm_sector_div_up(n, sz) ( \ | |
653 | { \ | |
654 | sector_t _r = ((n) + (sz) - 1); \ | |
655 | sector_div(_r, (sz)); \ | |
656 | _r; \ | |
657 | } \ | |
658 | ) | |
659 | ||
660 | /* | |
661 | * ceiling(n / size) * size | |
662 | */ | |
663 | #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz)) | |
664 | ||
56a67df7 MS |
665 | /* |
666 | * Sector offset taken relative to the start of the target instead of | |
667 | * relative to the start of the device. | |
668 | */ | |
669 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) | |
670 | ||
0bdb50c5 | 671 | static inline sector_t to_sector(unsigned long long n) |
0da336e5 AK |
672 | { |
673 | return (n >> SECTOR_SHIFT); | |
674 | } | |
675 | ||
676 | static inline unsigned long to_bytes(sector_t n) | |
677 | { | |
678 | return (n << SECTOR_SHIFT); | |
679 | } | |
680 | ||
17b2f66f | 681 | #endif /* _LINUX_DEVICE_MAPPER_H */ |