Linux 5.19-rc1
[linux-block.git] / drivers / md / dm-core.h
CommitLineData
4cc96131
MS
1/*
2 * Internal header file _only_ for device mapper core
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the LGPL.
7 */
8
9#ifndef DM_CORE_INTERNAL_H
10#define DM_CORE_INTERNAL_H
11
12#include <linux/kthread.h>
13#include <linux/ktime.h>
14#include <linux/blk-mq.h>
1e8d44bd 15#include <linux/blk-crypto-profile.h>
442761fd 16#include <linux/jump_label.h>
4cc96131
MS
17
18#include <trace/events/block.h>
19
20#include "dm.h"
91ccbbac 21#include "dm-ima.h"
4cc96131
MS
22
23#define DM_RESERVED_MAX_IOS 1024
24
25struct dm_kobject_holder {
26 struct kobject kobj;
27 struct completion completion;
28};
29
30/*
33bd6f06
MS
31 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
32 * DM targets must _not_ deference a mapped_device or dm_table to directly
33 * access their members!
4cc96131 34 */
33bd6f06 35
4cc96131 36struct mapped_device {
4cc96131
MS
37 struct mutex suspend_lock;
38
72d711c8
MS
39 struct mutex table_devices_lock;
40 struct list_head table_devices;
41
4cc96131
MS
42 /*
43 * The current mapping (struct dm_table *).
44 * Use dm_get_live_table{_fast} or take suspend_lock for
45 * dereference.
46 */
47 void __rcu *map;
48
4cc96131
MS
49 unsigned long flags;
50
4cc96131
MS
51 /* Protect queue and type against concurrent access. */
52 struct mutex type_lock;
72d711c8
MS
53 enum dm_queue_mode type;
54
55 int numa_node_id;
56 struct request_queue *queue;
4cc96131
MS
57
58 atomic_t holders;
59 atomic_t open_count;
60
61 struct dm_target *immutable_target;
62 struct target_type *immutable_target_type;
63
72d711c8 64 char name[16];
4cc96131 65 struct gendisk *disk;
f26c5719 66 struct dax_device *dax_dev;
4cc96131 67
205649d8 68 wait_queue_head_t wait;
9f6dc633
MS
69 unsigned long __percpu *pending_io;
70
205649d8
MS
71 /* forced geometry settings */
72 struct hd_geometry geometry;
73
74 /*
75 * Processing queue (flush)
76 */
77 struct workqueue_struct *wq;
78
4cc96131
MS
79 /*
80 * A list of ios that arrived while we were suspended.
81 */
4cc96131
MS
82 struct work_struct work;
83 spinlock_t deferred_lock;
84 struct bio_list deferred;
85
72d711c8
MS
86 void *interface_ptr;
87
4cc96131
MS
88 /*
89 * Event handling.
90 */
91 wait_queue_head_t eventq;
92 atomic_t event_nr;
93 atomic_t uevent_seq;
94 struct list_head uevent_list;
95 spinlock_t uevent_lock; /* Protect access to uevent_list */
96
205649d8
MS
97 /* for blk-mq request-based DM support */
98 bool init_tio_pdu:1;
99 struct blk_mq_tag_set *tag_set;
100
101 struct dm_stats stats;
102
4cc96131
MS
103 /* the number of internal suspends */
104 unsigned internal_suspend_count;
105
205649d8
MS
106 int swap_bios;
107 struct semaphore swap_bios_semaphore;
108 struct mutex swap_bios_lock;
109
4cc96131
MS
110 /*
111 * io objects are allocated from here.
112 */
6f1c819c
KO
113 struct bio_set io_bs;
114 struct bio_set bs;
4cc96131 115
4cc96131
MS
116 /* kobject and completion */
117 struct dm_kobject_holder kobj_holder;
118
856eb091 119 struct srcu_struct io_barrier;
bb37d772
DLM
120
121#ifdef CONFIG_BLK_DEV_ZONED
122 unsigned int nr_zones;
123 unsigned int *zwp_offset;
124#endif
91ccbbac
TS
125
126#ifdef CONFIG_IMA
127 struct dm_ima_measurements ima;
128#endif
4cc96131
MS
129};
130
e2118b3c
DLM
131/*
132 * Bits for the flags field of struct mapped_device.
133 */
134#define DMF_BLOCK_IO_FOR_SUSPEND 0
135#define DMF_SUSPENDED 1
136#define DMF_FROZEN 2
137#define DMF_FREEING 3
138#define DMF_DELETING 4
139#define DMF_NOFLUSH_SUSPENDING 5
140#define DMF_DEFERRED_REMOVE 6
141#define DMF_SUSPENDED_INTERNALLY 7
142#define DMF_POST_SUSPENDING 8
bb37d772 143#define DMF_EMULATE_ZONE_APPEND 9
e2118b3c 144
bcb44433 145void disable_discard(struct mapped_device *md);
ac62d620 146void disable_write_zeroes(struct mapped_device *md);
4cc96131 147
33bd6f06
MS
148static inline sector_t dm_get_size(struct mapped_device *md)
149{
150 return get_capacity(md->disk);
151}
152
153static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
154{
155 return &md->stats;
156}
157
442761fd
MS
158DECLARE_STATIC_KEY_FALSE(stats_enabled);
159DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
160DECLARE_STATIC_KEY_FALSE(zoned_enabled);
161
bb37d772
DLM
162static inline bool dm_emulate_zone_append(struct mapped_device *md)
163{
164 if (blk_queue_is_zoned(md->queue))
165 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
166 return false;
167}
168
33bd6f06
MS
169#define DM_TABLE_MAX_DEPTH 16
170
171struct dm_table {
172 struct mapped_device *md;
173 enum dm_queue_mode type;
174
175 /* btree table */
176 unsigned int depth;
177 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
178 sector_t *index[DM_TABLE_MAX_DEPTH];
179
180 unsigned int num_targets;
181 unsigned int num_allocated;
182 sector_t *highs;
183 struct dm_target *targets;
184
185 struct target_type *immutable_target_type;
186
187 bool integrity_supported:1;
188 bool singleton:1;
189 unsigned integrity_added:1;
190
191 /*
192 * Indicates the rw permissions for the new logical
193 * device. This should be a combination of FMODE_READ
194 * and FMODE_WRITE.
195 */
196 fmode_t mode;
197
198 /* a list of devices used by this table */
199 struct list_head devices;
200
201 /* events get handed up using this callback */
202 void (*event_fn)(void *);
203 void *event_context;
204
205 struct dm_md_mempools *mempools;
aa6ce87a
ST
206
207#ifdef CONFIG_BLK_INLINE_ENCRYPTION
cb77cb5a 208 struct blk_crypto_profile *crypto_profile;
aa6ce87a 209#endif
33bd6f06
MS
210};
211
e2118b3c
DLM
212/*
213 * One of these is allocated per clone bio.
214 */
bd4a6dd2 215#define DM_TIO_MAGIC 28714
e2118b3c 216struct dm_target_io {
bd4a6dd2 217 unsigned short magic;
aad5b23e 218 blk_short_t flags;
300432f5 219 unsigned int target_bio_nr;
e2118b3c
DLM
220 struct dm_io *io;
221 struct dm_target *ti;
e2118b3c 222 unsigned int *len_ptr;
743598f0 223 sector_t old_sector;
e2118b3c
DLM
224 struct bio clone;
225};
226
655f3aad
MS
227/*
228 * dm_target_io flags
229 */
230enum {
231 DM_TIO_INSIDE_DM_IO,
232 DM_TIO_IS_DUPLICATE_BIO
233};
234
235static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
236{
237 return (tio->flags & (1U << bit)) != 0;
238}
239
240static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
241{
242 tio->flags |= (1U << bit);
243}
244
3b03f7c1
MS
245static inline bool dm_tio_is_normal(struct dm_target_io *tio)
246{
247 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
248 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
249}
250
e2118b3c
DLM
251/*
252 * One of these is allocated per original bio.
253 * It contains the first clone used for that original.
254 */
bd4a6dd2 255#define DM_IO_MAGIC 19577
e2118b3c 256struct dm_io {
bd4a6dd2 257 unsigned short magic;
9d20653f 258 blk_short_t flags;
4d7bca13 259 spinlock_t lock;
e2118b3c 260 unsigned long start_time;
b99fdcdc 261 void *data;
ec211631 262 struct dm_io *next;
e2118b3c 263 struct dm_stats_aux stats_aux;
982b48ae
MS
264 blk_status_t status;
265 atomic_t io_count;
266 struct mapped_device *md;
7dd76d1f
ML
267
268 /* The three fields represent mapped part of original bio */
982b48ae 269 struct bio *orig_bio;
7dd76d1f
ML
270 unsigned int sector_offset; /* offset to end of orig_bio */
271 unsigned int sectors;
272
e2118b3c
DLM
273 /* last member of dm_target_io is 'struct bio' */
274 struct dm_target_io tio;
275};
276
82f6cdcc
MS
277/*
278 * dm_io flags
279 */
280enum {
7dd76d1f
ML
281 DM_IO_ACCOUNTED,
282 DM_IO_WAS_SPLIT
82f6cdcc
MS
283};
284
285static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
286{
287 return (io->flags & (1U << bit)) != 0;
288}
289
290static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
291{
292 io->flags |= (1U << bit);
293}
294
4cc96131
MS
295static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
296{
297 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
298}
299
300unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
301
302static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
303{
304 return !maxlen || strlen(result) + 1 >= maxlen;
305}
306
93e6442c
MP
307extern atomic_t dm_global_event_nr;
308extern wait_queue_head_t dm_global_eventq;
62e08243 309void dm_issue_global_event(void);
93e6442c 310
4cc96131 311#endif