dm table: remove dm_table_get_num_targets() wrapper
[linux-block.git] / drivers / md / dm-core.h
CommitLineData
4cc96131
MS
1/*
2 * Internal header file _only_ for device mapper core
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the LGPL.
7 */
8
9#ifndef DM_CORE_INTERNAL_H
10#define DM_CORE_INTERNAL_H
11
12#include <linux/kthread.h>
13#include <linux/ktime.h>
14#include <linux/blk-mq.h>
1e8d44bd 15#include <linux/blk-crypto-profile.h>
442761fd 16#include <linux/jump_label.h>
4cc96131
MS
17
18#include <trace/events/block.h>
19
20#include "dm.h"
91ccbbac 21#include "dm-ima.h"
4cc96131
MS
22
23#define DM_RESERVED_MAX_IOS 1024
24
8b211aac
ML
25struct dm_io;
26
4cc96131
MS
27struct dm_kobject_holder {
28 struct kobject kobj;
29 struct completion completion;
30};
31
32/*
33bd6f06
MS
33 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
34 * DM targets must _not_ deference a mapped_device or dm_table to directly
35 * access their members!
4cc96131 36 */
33bd6f06 37
29dec90a
CH
38/*
39 * For mempools pre-allocation at the table loading time.
40 */
41struct dm_md_mempools {
42 struct bio_set bs;
43 struct bio_set io_bs;
44};
45
4cc96131 46struct mapped_device {
4cc96131
MS
47 struct mutex suspend_lock;
48
72d711c8
MS
49 struct mutex table_devices_lock;
50 struct list_head table_devices;
51
4cc96131
MS
52 /*
53 * The current mapping (struct dm_table *).
54 * Use dm_get_live_table{_fast} or take suspend_lock for
55 * dereference.
56 */
57 void __rcu *map;
58
4cc96131
MS
59 unsigned long flags;
60
4cc96131
MS
61 /* Protect queue and type against concurrent access. */
62 struct mutex type_lock;
72d711c8
MS
63 enum dm_queue_mode type;
64
65 int numa_node_id;
66 struct request_queue *queue;
4cc96131
MS
67
68 atomic_t holders;
69 atomic_t open_count;
70
71 struct dm_target *immutable_target;
72 struct target_type *immutable_target_type;
73
72d711c8 74 char name[16];
4cc96131 75 struct gendisk *disk;
f26c5719 76 struct dax_device *dax_dev;
4cc96131 77
205649d8 78 wait_queue_head_t wait;
9f6dc633
MS
79 unsigned long __percpu *pending_io;
80
205649d8
MS
81 /* forced geometry settings */
82 struct hd_geometry geometry;
83
84 /*
85 * Processing queue (flush)
86 */
87 struct workqueue_struct *wq;
88
4cc96131
MS
89 /*
90 * A list of ios that arrived while we were suspended.
91 */
4cc96131
MS
92 struct work_struct work;
93 spinlock_t deferred_lock;
94 struct bio_list deferred;
95
8b211aac
ML
96 /*
97 * requeue work context is needed for cloning one new bio
98 * to represent the dm_io to be requeued, since each
99 * dm_io may point to the original bio from FS.
100 */
101 struct work_struct requeue_work;
102 struct dm_io *requeue_list;
103
72d711c8
MS
104 void *interface_ptr;
105
4cc96131
MS
106 /*
107 * Event handling.
108 */
109 wait_queue_head_t eventq;
110 atomic_t event_nr;
111 atomic_t uevent_seq;
112 struct list_head uevent_list;
113 spinlock_t uevent_lock; /* Protect access to uevent_list */
114
205649d8
MS
115 /* for blk-mq request-based DM support */
116 bool init_tio_pdu:1;
117 struct blk_mq_tag_set *tag_set;
118
119 struct dm_stats stats;
120
4cc96131
MS
121 /* the number of internal suspends */
122 unsigned internal_suspend_count;
123
205649d8
MS
124 int swap_bios;
125 struct semaphore swap_bios_semaphore;
126 struct mutex swap_bios_lock;
127
4cc96131
MS
128 /*
129 * io objects are allocated from here.
130 */
29dec90a 131 struct dm_md_mempools *mempools;
4cc96131 132
4cc96131
MS
133 /* kobject and completion */
134 struct dm_kobject_holder kobj_holder;
135
856eb091 136 struct srcu_struct io_barrier;
bb37d772
DLM
137
138#ifdef CONFIG_BLK_DEV_ZONED
139 unsigned int nr_zones;
140 unsigned int *zwp_offset;
141#endif
91ccbbac
TS
142
143#ifdef CONFIG_IMA
144 struct dm_ima_measurements ima;
145#endif
4cc96131
MS
146};
147
e2118b3c
DLM
148/*
149 * Bits for the flags field of struct mapped_device.
150 */
151#define DMF_BLOCK_IO_FOR_SUSPEND 0
152#define DMF_SUSPENDED 1
153#define DMF_FROZEN 2
154#define DMF_FREEING 3
155#define DMF_DELETING 4
156#define DMF_NOFLUSH_SUSPENDING 5
157#define DMF_DEFERRED_REMOVE 6
158#define DMF_SUSPENDED_INTERNALLY 7
159#define DMF_POST_SUSPENDING 8
bb37d772 160#define DMF_EMULATE_ZONE_APPEND 9
e2118b3c 161
bcb44433 162void disable_discard(struct mapped_device *md);
ac62d620 163void disable_write_zeroes(struct mapped_device *md);
4cc96131 164
33bd6f06
MS
165static inline sector_t dm_get_size(struct mapped_device *md)
166{
167 return get_capacity(md->disk);
168}
169
170static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
171{
172 return &md->stats;
173}
174
442761fd
MS
175DECLARE_STATIC_KEY_FALSE(stats_enabled);
176DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
177DECLARE_STATIC_KEY_FALSE(zoned_enabled);
178
bb37d772
DLM
179static inline bool dm_emulate_zone_append(struct mapped_device *md)
180{
181 if (blk_queue_is_zoned(md->queue))
182 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
183 return false;
184}
185
33bd6f06
MS
186#define DM_TABLE_MAX_DEPTH 16
187
188struct dm_table {
189 struct mapped_device *md;
190 enum dm_queue_mode type;
191
192 /* btree table */
193 unsigned int depth;
194 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
195 sector_t *index[DM_TABLE_MAX_DEPTH];
196
197 unsigned int num_targets;
198 unsigned int num_allocated;
199 sector_t *highs;
200 struct dm_target *targets;
201
202 struct target_type *immutable_target_type;
203
204 bool integrity_supported:1;
205 bool singleton:1;
206 unsigned integrity_added:1;
207
208 /*
209 * Indicates the rw permissions for the new logical
210 * device. This should be a combination of FMODE_READ
211 * and FMODE_WRITE.
212 */
213 fmode_t mode;
214
215 /* a list of devices used by this table */
216 struct list_head devices;
217
218 /* events get handed up using this callback */
219 void (*event_fn)(void *);
220 void *event_context;
221
222 struct dm_md_mempools *mempools;
aa6ce87a
ST
223
224#ifdef CONFIG_BLK_INLINE_ENCRYPTION
cb77cb5a 225 struct blk_crypto_profile *crypto_profile;
aa6ce87a 226#endif
33bd6f06
MS
227};
228
e2118b3c
DLM
229/*
230 * One of these is allocated per clone bio.
231 */
bd4a6dd2 232#define DM_TIO_MAGIC 28714
e2118b3c 233struct dm_target_io {
bd4a6dd2 234 unsigned short magic;
aad5b23e 235 blk_short_t flags;
300432f5 236 unsigned int target_bio_nr;
e2118b3c
DLM
237 struct dm_io *io;
238 struct dm_target *ti;
e2118b3c 239 unsigned int *len_ptr;
743598f0 240 sector_t old_sector;
e2118b3c
DLM
241 struct bio clone;
242};
e810cb78
CH
243#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone))
244#define DM_IO_BIO_OFFSET \
245 (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio))
e2118b3c 246
655f3aad
MS
247/*
248 * dm_target_io flags
249 */
250enum {
251 DM_TIO_INSIDE_DM_IO,
252 DM_TIO_IS_DUPLICATE_BIO
253};
254
255static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
256{
257 return (tio->flags & (1U << bit)) != 0;
258}
259
260static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
261{
262 tio->flags |= (1U << bit);
263}
264
3b03f7c1
MS
265static inline bool dm_tio_is_normal(struct dm_target_io *tio)
266{
267 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
268 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
269}
270
e2118b3c
DLM
271/*
272 * One of these is allocated per original bio.
273 * It contains the first clone used for that original.
274 */
bd4a6dd2 275#define DM_IO_MAGIC 19577
e2118b3c 276struct dm_io {
bd4a6dd2 277 unsigned short magic;
9d20653f 278 blk_short_t flags;
4d7bca13 279 spinlock_t lock;
e2118b3c 280 unsigned long start_time;
b99fdcdc 281 void *data;
ec211631 282 struct dm_io *next;
e2118b3c 283 struct dm_stats_aux stats_aux;
982b48ae
MS
284 blk_status_t status;
285 atomic_t io_count;
286 struct mapped_device *md;
7dd76d1f
ML
287
288 /* The three fields represent mapped part of original bio */
982b48ae 289 struct bio *orig_bio;
7dd76d1f
ML
290 unsigned int sector_offset; /* offset to end of orig_bio */
291 unsigned int sectors;
292
e2118b3c
DLM
293 /* last member of dm_target_io is 'struct bio' */
294 struct dm_target_io tio;
295};
296
82f6cdcc
MS
297/*
298 * dm_io flags
299 */
300enum {
7dd76d1f
ML
301 DM_IO_ACCOUNTED,
302 DM_IO_WAS_SPLIT
82f6cdcc
MS
303};
304
305static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
306{
307 return (io->flags & (1U << bit)) != 0;
308}
309
310static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
311{
312 io->flags |= (1U << bit);
313}
314
8b211aac
ML
315void dm_io_rewind(struct dm_io *io, struct bio_set *bs);
316
4cc96131
MS
317static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
318{
319 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
320}
321
322unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
323
324static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
325{
326 return !maxlen || strlen(result) + 1 >= maxlen;
327}
328
93e6442c
MP
329extern atomic_t dm_global_event_nr;
330extern wait_queue_head_t dm_global_eventq;
62e08243 331void dm_issue_global_event(void);
93e6442c 332
4cc96131 333#endif