2 * Internal header file _only_ for device mapper core
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 * This file is released under the LGPL.
9 #ifndef DM_CORE_INTERNAL_H
10 #define DM_CORE_INTERNAL_H
12 #include <linux/kthread.h>
13 #include <linux/ktime.h>
14 #include <linux/blk-mq.h>
15 #include <linux/blk-crypto-profile.h>
16 #include <linux/jump_label.h>
18 #include <trace/events/block.h>
23 #define DM_RESERVED_MAX_IOS 1024
27 struct dm_kobject_holder {
29 struct completion completion;
33 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
34 * DM targets must _not_ deference a mapped_device or dm_table to directly
35 * access their members!
39 * For mempools pre-allocation at the table loading time.
41 struct dm_md_mempools {
46 struct mapped_device {
47 struct mutex suspend_lock;
49 struct mutex table_devices_lock;
50 struct list_head table_devices;
53 * The current mapping (struct dm_table *).
54 * Use dm_get_live_table{_fast} or take suspend_lock for
61 /* Protect queue and type against concurrent access. */
62 struct mutex type_lock;
63 enum dm_queue_mode type;
66 struct request_queue *queue;
71 struct dm_target *immutable_target;
72 struct target_type *immutable_target_type;
76 struct dax_device *dax_dev;
78 wait_queue_head_t wait;
79 unsigned long __percpu *pending_io;
81 /* forced geometry settings */
82 struct hd_geometry geometry;
85 * Processing queue (flush)
87 struct workqueue_struct *wq;
90 * A list of ios that arrived while we were suspended.
92 struct work_struct work;
93 spinlock_t deferred_lock;
94 struct bio_list deferred;
97 * requeue work context is needed for cloning one new bio
98 * to represent the dm_io to be requeued, since each
99 * dm_io may point to the original bio from FS.
101 struct work_struct requeue_work;
102 struct dm_io *requeue_list;
109 wait_queue_head_t eventq;
112 struct list_head uevent_list;
113 spinlock_t uevent_lock; /* Protect access to uevent_list */
115 /* for blk-mq request-based DM support */
117 struct blk_mq_tag_set *tag_set;
119 struct dm_stats stats;
121 /* the number of internal suspends */
122 unsigned internal_suspend_count;
125 struct semaphore swap_bios_semaphore;
126 struct mutex swap_bios_lock;
129 * io objects are allocated from here.
131 struct dm_md_mempools *mempools;
133 /* kobject and completion */
134 struct dm_kobject_holder kobj_holder;
136 struct srcu_struct io_barrier;
138 #ifdef CONFIG_BLK_DEV_ZONED
139 unsigned int nr_zones;
140 unsigned int *zwp_offset;
144 struct dm_ima_measurements ima;
149 * Bits for the flags field of struct mapped_device.
151 #define DMF_BLOCK_IO_FOR_SUSPEND 0
152 #define DMF_SUSPENDED 1
154 #define DMF_FREEING 3
155 #define DMF_DELETING 4
156 #define DMF_NOFLUSH_SUSPENDING 5
157 #define DMF_DEFERRED_REMOVE 6
158 #define DMF_SUSPENDED_INTERNALLY 7
159 #define DMF_POST_SUSPENDING 8
160 #define DMF_EMULATE_ZONE_APPEND 9
162 void disable_discard(struct mapped_device *md);
163 void disable_write_zeroes(struct mapped_device *md);
165 static inline sector_t dm_get_size(struct mapped_device *md)
167 return get_capacity(md->disk);
170 static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
175 DECLARE_STATIC_KEY_FALSE(stats_enabled);
176 DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
177 DECLARE_STATIC_KEY_FALSE(zoned_enabled);
179 static inline bool dm_emulate_zone_append(struct mapped_device *md)
181 if (blk_queue_is_zoned(md->queue))
182 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
186 #define DM_TABLE_MAX_DEPTH 16
189 struct mapped_device *md;
190 enum dm_queue_mode type;
194 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
195 sector_t *index[DM_TABLE_MAX_DEPTH];
197 unsigned int num_targets;
198 unsigned int num_allocated;
200 struct dm_target *targets;
202 struct target_type *immutable_target_type;
204 bool integrity_supported:1;
206 unsigned integrity_added:1;
209 * Indicates the rw permissions for the new logical
210 * device. This should be a combination of FMODE_READ
215 /* a list of devices used by this table */
216 struct list_head devices;
218 /* events get handed up using this callback */
219 void (*event_fn)(void *);
222 struct dm_md_mempools *mempools;
224 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
225 struct blk_crypto_profile *crypto_profile;
229 static inline struct dm_target *dm_table_get_target(struct dm_table *t,
232 BUG_ON(index >= t->num_targets);
233 return t->targets + index;
237 * One of these is allocated per clone bio.
239 #define DM_TIO_MAGIC 28714
240 struct dm_target_io {
241 unsigned short magic;
243 unsigned int target_bio_nr;
245 struct dm_target *ti;
246 unsigned int *len_ptr;
250 #define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
251 #define DM_IO_BIO_OFFSET \
252 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
259 DM_TIO_IS_DUPLICATE_BIO
262 static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
264 return (tio->flags & (1U << bit)) != 0;
267 static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
269 tio->flags |= (1U << bit);
272 static inline bool dm_tio_is_normal(struct dm_target_io *tio)
274 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
275 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
279 * One of these is allocated per original bio.
280 * It contains the first clone used for that original.
282 #define DM_IO_MAGIC 19577
284 unsigned short magic;
287 unsigned long start_time;
290 struct dm_stats_aux stats_aux;
293 struct mapped_device *md;
295 /* The three fields represent mapped part of original bio */
296 struct bio *orig_bio;
297 unsigned int sector_offset; /* offset to end of orig_bio */
298 unsigned int sectors;
300 /* last member of dm_target_io is 'struct bio' */
301 struct dm_target_io tio;
312 static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
314 return (io->flags & (1U << bit)) != 0;
317 static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
319 io->flags |= (1U << bit);
322 void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
324 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
326 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
329 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
331 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
333 return !maxlen || strlen(result) + 1 >= maxlen;
336 extern atomic_t dm_global_event_nr;
337 extern wait_queue_head_t dm_global_eventq;
338 void dm_issue_global_event(void);