Commit | Line | Data |
---|---|---|
4cc96131 MS |
1 | /* |
2 | * Internal header file _only_ for device mapper core | |
3 | * | |
4 | * Copyright (C) 2016 Red Hat, Inc. All rights reserved. | |
5 | * | |
6 | * This file is released under the LGPL. | |
7 | */ | |
8 | ||
9 | #ifndef DM_CORE_INTERNAL_H | |
10 | #define DM_CORE_INTERNAL_H | |
11 | ||
12 | #include <linux/kthread.h> | |
13 | #include <linux/ktime.h> | |
33bd6f06 | 14 | #include <linux/genhd.h> |
4cc96131 MS |
15 | #include <linux/blk-mq.h> |
16 | ||
17 | #include <trace/events/block.h> | |
18 | ||
19 | #include "dm.h" | |
20 | ||
21 | #define DM_RESERVED_MAX_IOS 1024 | |
22 | ||
23 | struct dm_kobject_holder { | |
24 | struct kobject kobj; | |
25 | struct completion completion; | |
26 | }; | |
27 | ||
28 | /* | |
33bd6f06 MS |
29 | * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c. |
30 | * DM targets must _not_ deference a mapped_device or dm_table to directly | |
31 | * access their members! | |
4cc96131 | 32 | */ |
33bd6f06 | 33 | |
4cc96131 | 34 | struct mapped_device { |
4cc96131 MS |
35 | struct mutex suspend_lock; |
36 | ||
72d711c8 MS |
37 | struct mutex table_devices_lock; |
38 | struct list_head table_devices; | |
39 | ||
4cc96131 MS |
40 | /* |
41 | * The current mapping (struct dm_table *). | |
42 | * Use dm_get_live_table{_fast} or take suspend_lock for | |
43 | * dereference. | |
44 | */ | |
45 | void __rcu *map; | |
46 | ||
4cc96131 MS |
47 | unsigned long flags; |
48 | ||
4cc96131 MS |
49 | /* Protect queue and type against concurrent access. */ |
50 | struct mutex type_lock; | |
72d711c8 MS |
51 | enum dm_queue_mode type; |
52 | ||
53 | int numa_node_id; | |
54 | struct request_queue *queue; | |
4cc96131 MS |
55 | |
56 | atomic_t holders; | |
57 | atomic_t open_count; | |
58 | ||
59 | struct dm_target *immutable_target; | |
60 | struct target_type *immutable_target_type; | |
61 | ||
72d711c8 | 62 | char name[16]; |
4cc96131 | 63 | struct gendisk *disk; |
f26c5719 | 64 | struct dax_device *dax_dev; |
4cc96131 MS |
65 | |
66 | /* | |
67 | * A list of ios that arrived while we were suspended. | |
68 | */ | |
4cc96131 | 69 | struct work_struct work; |
72d711c8 | 70 | wait_queue_head_t wait; |
4cc96131 MS |
71 | spinlock_t deferred_lock; |
72 | struct bio_list deferred; | |
73 | ||
72d711c8 MS |
74 | void *interface_ptr; |
75 | ||
4cc96131 MS |
76 | /* |
77 | * Event handling. | |
78 | */ | |
79 | wait_queue_head_t eventq; | |
80 | atomic_t event_nr; | |
81 | atomic_t uevent_seq; | |
82 | struct list_head uevent_list; | |
83 | spinlock_t uevent_lock; /* Protect access to uevent_list */ | |
84 | ||
85 | /* the number of internal suspends */ | |
86 | unsigned internal_suspend_count; | |
87 | ||
4cc96131 MS |
88 | /* |
89 | * io objects are allocated from here. | |
90 | */ | |
6f1c819c KO |
91 | struct bio_set io_bs; |
92 | struct bio_set bs; | |
4cc96131 | 93 | |
72d711c8 MS |
94 | /* |
95 | * Processing queue (flush) | |
96 | */ | |
97 | struct workqueue_struct *wq; | |
98 | ||
4cc96131 MS |
99 | /* forced geometry settings */ |
100 | struct hd_geometry geometry; | |
101 | ||
4cc96131 MS |
102 | /* kobject and completion */ |
103 | struct dm_kobject_holder kobj_holder; | |
104 | ||
4cc96131 MS |
105 | struct dm_stats stats; |
106 | ||
4cc96131 MS |
107 | /* for blk-mq request-based DM support */ |
108 | struct blk_mq_tag_set *tag_set; | |
4cc96131 | 109 | bool init_tio_pdu:1; |
856eb091 MP |
110 | |
111 | struct srcu_struct io_barrier; | |
4cc96131 MS |
112 | }; |
113 | ||
bcb44433 | 114 | void disable_discard(struct mapped_device *md); |
4cc96131 | 115 | void disable_write_same(struct mapped_device *md); |
ac62d620 | 116 | void disable_write_zeroes(struct mapped_device *md); |
4cc96131 | 117 | |
33bd6f06 MS |
118 | static inline sector_t dm_get_size(struct mapped_device *md) |
119 | { | |
120 | return get_capacity(md->disk); | |
121 | } | |
122 | ||
123 | static inline struct dm_stats *dm_get_stats(struct mapped_device *md) | |
124 | { | |
125 | return &md->stats; | |
126 | } | |
127 | ||
128 | #define DM_TABLE_MAX_DEPTH 16 | |
129 | ||
130 | struct dm_table { | |
131 | struct mapped_device *md; | |
132 | enum dm_queue_mode type; | |
133 | ||
134 | /* btree table */ | |
135 | unsigned int depth; | |
136 | unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */ | |
137 | sector_t *index[DM_TABLE_MAX_DEPTH]; | |
138 | ||
139 | unsigned int num_targets; | |
140 | unsigned int num_allocated; | |
141 | sector_t *highs; | |
142 | struct dm_target *targets; | |
143 | ||
144 | struct target_type *immutable_target_type; | |
145 | ||
146 | bool integrity_supported:1; | |
147 | bool singleton:1; | |
148 | unsigned integrity_added:1; | |
149 | ||
150 | /* | |
151 | * Indicates the rw permissions for the new logical | |
152 | * device. This should be a combination of FMODE_READ | |
153 | * and FMODE_WRITE. | |
154 | */ | |
155 | fmode_t mode; | |
156 | ||
157 | /* a list of devices used by this table */ | |
158 | struct list_head devices; | |
159 | ||
160 | /* events get handed up using this callback */ | |
161 | void (*event_fn)(void *); | |
162 | void *event_context; | |
163 | ||
164 | struct dm_md_mempools *mempools; | |
165 | }; | |
166 | ||
4cc96131 MS |
167 | static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) |
168 | { | |
169 | return &container_of(kobj, struct dm_kobject_holder, kobj)->completion; | |
170 | } | |
171 | ||
172 | unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max); | |
173 | ||
174 | static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen) | |
175 | { | |
176 | return !maxlen || strlen(result) + 1 >= maxlen; | |
177 | } | |
178 | ||
93e6442c MP |
179 | extern atomic_t dm_global_event_nr; |
180 | extern wait_queue_head_t dm_global_eventq; | |
62e08243 | 181 | void dm_issue_global_event(void); |
93e6442c | 182 | |
4cc96131 | 183 | #endif |