Commit | Line | Data |
---|---|---|
3bd94003 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
4cc96131 MS |
2 | /* |
3 | * Internal header file _only_ for device mapper core | |
4 | * | |
5 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
6 | * | |
7 | * This file is released under the LGPL. | |
8 | */ | |
9 | ||
10 | #ifndef DM_CORE_INTERNAL_H | |
11 | #define DM_CORE_INTERNAL_H | |
12 | ||
13 | #include <linux/kthread.h> | |
14 | #include <linux/ktime.h> | |
15 | #include <linux/blk-mq.h> | |
1e8d44bd | 16 | #include <linux/blk-crypto-profile.h> |
442761fd | 17 | #include <linux/jump_label.h> |
4cc96131 MS |
18 | |
19 | #include <trace/events/block.h> | |
20 | ||
21 | #include "dm.h" | |
91ccbbac | 22 | #include "dm-ima.h" |
4cc96131 MS |
23 | |
24 | #define DM_RESERVED_MAX_IOS 1024 | |
25 | ||
8b211aac ML |
26 | struct dm_io; |
27 | ||
4cc96131 MS |
28 | struct dm_kobject_holder { |
29 | struct kobject kobj; | |
30 | struct completion completion; | |
31 | }; | |
32 | ||
33 | /* | |
33bd6f06 MS |
34 | * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. |
35 | * DM targets must _not_ deference a mapped_device or dm_table to directly | |
36 | * access their members! | |
4cc96131 | 37 | */ |
33bd6f06 | 38 | |
29dec90a CH |
39 | /* |
40 | * For mempools pre-allocation at the table loading time. | |
41 | */ | |
42 | struct dm_md_mempools { | |
43 | struct bio_set bs; | |
44 | struct bio_set io_bs; | |
45 | }; | |
46 | ||
4cc96131 | 47 | struct mapped_device { |
4cc96131 MS |
48 | struct mutex suspend_lock; |
49 | ||
72d711c8 MS |
50 | struct mutex table_devices_lock; |
51 | struct list_head table_devices; | |
52 | ||
4cc96131 MS |
53 | /* |
54 | * The current mapping (struct dm_table *). | |
55 | * Use dm_get_live_table{_fast} or take suspend_lock for | |
56 | * dereference. | |
57 | */ | |
58 | void __rcu *map; | |
59 | ||
4cc96131 MS |
60 | unsigned long flags; |
61 | ||
4cc96131 MS |
62 | /* Protect queue and type against concurrent access. */ |
63 | struct mutex type_lock; | |
72d711c8 MS |
64 | enum dm_queue_mode type; |
65 | ||
66 | int numa_node_id; | |
67 | struct request_queue *queue; | |
4cc96131 MS |
68 | |
69 | atomic_t holders; | |
70 | atomic_t open_count; | |
71 | ||
72 | struct dm_target *immutable_target; | |
73 | struct target_type *immutable_target_type; | |
74 | ||
72d711c8 | 75 | char name[16]; |
4cc96131 | 76 | struct gendisk *disk; |
f26c5719 | 77 | struct dax_device *dax_dev; |
4cc96131 | 78 | |
205649d8 | 79 | wait_queue_head_t wait; |
9f6dc633 MS |
80 | unsigned long __percpu *pending_io; |
81 | ||
205649d8 MS |
82 | /* forced geometry settings */ |
83 | struct hd_geometry geometry; | |
84 | ||
85 | /* | |
86 | * Processing queue (flush) | |
87 | */ | |
88 | struct workqueue_struct *wq; | |
89 | ||
4cc96131 MS |
90 | /* |
91 | * A list of ios that arrived while we were suspended. | |
92 | */ | |
4cc96131 MS |
93 | struct work_struct work; |
94 | spinlock_t deferred_lock; | |
95 | struct bio_list deferred; | |
96 | ||
8b211aac ML |
97 | /* |
98 | * requeue work context is needed for cloning one new bio | |
99 | * to represent the dm_io to be requeued, since each | |
100 | * dm_io may point to the original bio from FS. | |
101 | */ | |
102 | struct work_struct requeue_work; | |
103 | struct dm_io *requeue_list; | |
104 | ||
72d711c8 MS |
105 | void *interface_ptr; |
106 | ||
4cc96131 MS |
107 | /* |
108 | * Event handling. | |
109 | */ | |
110 | wait_queue_head_t eventq; | |
111 | atomic_t event_nr; | |
112 | atomic_t uevent_seq; | |
113 | struct list_head uevent_list; | |
114 | spinlock_t uevent_lock; /* Protect access to uevent_list */ | |
115 | ||
205649d8 MS |
116 | /* for blk-mq request-based DM support */ |
117 | bool init_tio_pdu:1; | |
118 | struct blk_mq_tag_set *tag_set; | |
119 | ||
120 | struct dm_stats stats; | |
121 | ||
4cc96131 | 122 | /* the number of internal suspends */ |
86a3238c | 123 | unsigned int internal_suspend_count; |
4cc96131 | 124 | |
205649d8 MS |
125 | int swap_bios; |
126 | struct semaphore swap_bios_semaphore; | |
127 | struct mutex swap_bios_lock; | |
128 | ||
4cc96131 MS |
129 | /* |
130 | * io objects are allocated from here. | |
131 | */ | |
29dec90a | 132 | struct dm_md_mempools *mempools; |
4cc96131 | 133 | |
4cc96131 MS |
134 | /* kobject and completion */ |
135 | struct dm_kobject_holder kobj_holder; | |
136 | ||
856eb091 | 137 | struct srcu_struct io_barrier; |
bb37d772 DLM |
138 | |
139 | #ifdef CONFIG_BLK_DEV_ZONED | |
140 | unsigned int nr_zones; | |
141 | unsigned int *zwp_offset; | |
142 | #endif | |
91ccbbac TS |
143 | |
144 | #ifdef CONFIG_IMA | |
145 | struct dm_ima_measurements ima; | |
146 | #endif | |
4cc96131 MS |
147 | }; |
148 | ||
e2118b3c DLM |
149 | /* |
150 | * Bits for the flags field of struct mapped_device. | |
151 | */ | |
152 | #define DMF_BLOCK_IO_FOR_SUSPEND 0 | |
153 | #define DMF_SUSPENDED 1 | |
154 | #define DMF_FROZEN 2 | |
155 | #define DMF_FREEING 3 | |
156 | #define DMF_DELETING 4 | |
157 | #define DMF_NOFLUSH_SUSPENDING 5 | |
158 | #define DMF_DEFERRED_REMOVE 6 | |
159 | #define DMF_SUSPENDED_INTERNALLY 7 | |
160 | #define DMF_POST_SUSPENDING 8 | |
bb37d772 | 161 | #define DMF_EMULATE_ZONE_APPEND 9 |
e2118b3c | 162 | |
bcb44433 | 163 | void disable_discard(struct mapped_device *md); |
ac62d620 | 164 | void disable_write_zeroes(struct mapped_device *md); |
4cc96131 | 165 | |
33bd6f06 MS |
166 | static inline sector_t dm_get_size(struct mapped_device *md) |
167 | { | |
168 | return get_capacity(md->disk); | |
169 | } | |
170 | ||
171 | static inline struct dm_stats *dm_get_stats(struct mapped_device *md) | |
172 | { | |
173 | return &md->stats; | |
174 | } | |
175 | ||
442761fd MS |
176 | DECLARE_STATIC_KEY_FALSE(stats_enabled); |
177 | DECLARE_STATIC_KEY_FALSE(swap_bios_enabled); | |
178 | DECLARE_STATIC_KEY_FALSE(zoned_enabled); | |
179 | ||
bb37d772 DLM |
180 | static inline bool dm_emulate_zone_append(struct mapped_device *md) |
181 | { | |
182 | if (blk_queue_is_zoned(md->queue)) | |
183 | return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); | |
184 | return false; | |
185 | } | |
186 | ||
33bd6f06 MS |
187 | #define DM_TABLE_MAX_DEPTH 16 |
188 | ||
189 | struct dm_table { | |
190 | struct mapped_device *md; | |
191 | enum dm_queue_mode type; | |
192 | ||
193 | /* btree table */ | |
194 | unsigned int depth; | |
195 | unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ | |
196 | sector_t *index[DM_TABLE_MAX_DEPTH]; | |
197 | ||
198 | unsigned int num_targets; | |
199 | unsigned int num_allocated; | |
200 | sector_t *highs; | |
201 | struct dm_target *targets; | |
202 | ||
203 | struct target_type *immutable_target_type; | |
204 | ||
205 | bool integrity_supported:1; | |
206 | bool singleton:1; | |
207 | unsigned integrity_added:1; | |
208 | ||
209 | /* | |
05bdb996 CH |
210 | * Indicates the rw permissions for the new logical device. This |
211 | * should be a combination of BLK_OPEN_READ and BLK_OPEN_WRITE. | |
33bd6f06 | 212 | */ |
05bdb996 | 213 | blk_mode_t mode; |
33bd6f06 MS |
214 | |
215 | /* a list of devices used by this table */ | |
216 | struct list_head devices; | |
f6007dce | 217 | struct rw_semaphore devices_lock; |
33bd6f06 MS |
218 | |
219 | /* events get handed up using this callback */ | |
02f10ba1 | 220 | void (*event_fn)(void *data); |
33bd6f06 MS |
221 | void *event_context; |
222 | ||
223 | struct dm_md_mempools *mempools; | |
aa6ce87a ST |
224 | |
225 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
cb77cb5a | 226 | struct blk_crypto_profile *crypto_profile; |
aa6ce87a | 227 | #endif |
33bd6f06 MS |
228 | }; |
229 | ||
564b5c54 MS |
230 | static inline struct dm_target *dm_table_get_target(struct dm_table *t, |
231 | unsigned int index) | |
232 | { | |
233 | BUG_ON(index >= t->num_targets); | |
234 | return t->targets + index; | |
235 | } | |
236 | ||
e2118b3c DLM |
237 | /* |
238 | * One of these is allocated per clone bio. | |
239 | */ | |
bd4a6dd2 | 240 | #define DM_TIO_MAGIC 28714 |
e2118b3c | 241 | struct dm_target_io { |
bd4a6dd2 | 242 | unsigned short magic; |
aad5b23e | 243 | blk_short_t flags; |
300432f5 | 244 | unsigned int target_bio_nr; |
e2118b3c DLM |
245 | struct dm_io *io; |
246 | struct dm_target *ti; | |
e2118b3c | 247 | unsigned int *len_ptr; |
743598f0 | 248 | sector_t old_sector; |
e2118b3c DLM |
249 | struct bio clone; |
250 | }; | |
e810cb78 CH |
251 | #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) |
252 | #define DM_IO_BIO_OFFSET \ | |
253 | (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) | |
e2118b3c | 254 | |
655f3aad MS |
255 | /* |
256 | * dm_target_io flags | |
257 | */ | |
258 | enum { | |
259 | DM_TIO_INSIDE_DM_IO, | |
260 | DM_TIO_IS_DUPLICATE_BIO | |
261 | }; | |
262 | ||
263 | static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit) | |
264 | { | |
265 | return (tio->flags & (1U << bit)) != 0; | |
266 | } | |
267 | ||
268 | static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit) | |
269 | { | |
270 | tio->flags |= (1U << bit); | |
271 | } | |
272 | ||
3b03f7c1 MS |
273 | static inline bool dm_tio_is_normal(struct dm_target_io *tio) |
274 | { | |
275 | return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) && | |
276 | !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); | |
277 | } | |
278 | ||
e2118b3c DLM |
279 | /* |
280 | * One of these is allocated per original bio. | |
281 | * It contains the first clone used for that original. | |
282 | */ | |
bd4a6dd2 | 283 | #define DM_IO_MAGIC 19577 |
e2118b3c | 284 | struct dm_io { |
bd4a6dd2 | 285 | unsigned short magic; |
9d20653f | 286 | blk_short_t flags; |
4d7bca13 | 287 | spinlock_t lock; |
e2118b3c | 288 | unsigned long start_time; |
b99fdcdc | 289 | void *data; |
ec211631 | 290 | struct dm_io *next; |
e2118b3c | 291 | struct dm_stats_aux stats_aux; |
982b48ae MS |
292 | blk_status_t status; |
293 | atomic_t io_count; | |
294 | struct mapped_device *md; | |
7dd76d1f ML |
295 | |
296 | /* The three fields represent mapped part of original bio */ | |
982b48ae | 297 | struct bio *orig_bio; |
7dd76d1f ML |
298 | unsigned int sector_offset; /* offset to end of orig_bio */ |
299 | unsigned int sectors; | |
300 | ||
e2118b3c DLM |
301 | /* last member of dm_target_io is 'struct bio' */ |
302 | struct dm_target_io tio; | |
303 | }; | |
304 | ||
82f6cdcc MS |
305 | /* |
306 | * dm_io flags | |
307 | */ | |
308 | enum { | |
7dd76d1f | 309 | DM_IO_ACCOUNTED, |
526d1006 LN |
310 | DM_IO_WAS_SPLIT, |
311 | DM_IO_BLK_STAT | |
82f6cdcc MS |
312 | }; |
313 | ||
314 | static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit) | |
315 | { | |
316 | return (io->flags & (1U << bit)) != 0; | |
317 | } | |
318 | ||
319 | static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit) | |
320 | { | |
321 | io->flags |= (1U << bit); | |
322 | } | |
323 | ||
8b211aac ML |
324 | void dm_io_rewind(struct dm_io *io, struct bio_set *bs); |
325 | ||
4cc96131 MS |
326 | static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) |
327 | { | |
328 | return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; | |
329 | } | |
330 | ||
86a3238c | 331 | unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max); |
4cc96131 | 332 | |
86a3238c | 333 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned int maxlen) |
4cc96131 MS |
334 | { |
335 | return !maxlen || strlen(result) + 1 >= maxlen; | |
336 | } | |
337 | ||
93e6442c MP |
338 | extern atomic_t dm_global_event_nr; |
339 | extern wait_queue_head_t dm_global_eventq; | |
62e08243 | 340 | void dm_issue_global_event(void); |
93e6442c | 341 | |
4cc96131 | 342 | #endif |