Commit | Line | Data |
---|---|---|
86db1e29 JA |
1 | /* |
2 | * Functions related to io context handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/init.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ | |
5a0e3ad6 | 10 | #include <linux/slab.h> |
86db1e29 JA |
11 | |
12 | #include "blk.h" | |
13 | ||
14 | /* | |
15 | * For io context allocations | |
16 | */ | |
17 | static struct kmem_cache *iocontext_cachep; | |
18 | ||
6e736be7 TH |
19 | /** |
20 | * get_io_context - increment reference count to io_context | |
21 | * @ioc: io_context to get | |
22 | * | |
23 | * Increment reference count to @ioc. | |
24 | */ | |
25 | void get_io_context(struct io_context *ioc) | |
26 | { | |
27 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); | |
28 | atomic_long_inc(&ioc->refcount); | |
29 | } | |
30 | EXPORT_SYMBOL(get_io_context); | |
31 | ||
b2efa052 TH |
32 | /* |
33 | * Releasing ioc may nest into another put_io_context() leading to nested | |
34 | * fast path release. As the ioc's can't be the same, this is okay but | |
35 | * makes lockdep whine. Keep track of nesting and use it as subclass. | |
36 | */ | |
37 | #ifdef CONFIG_LOCKDEP | |
38 | #define ioc_release_depth(q) ((q) ? (q)->ioc_release_depth : 0) | |
39 | #define ioc_release_depth_inc(q) (q)->ioc_release_depth++ | |
40 | #define ioc_release_depth_dec(q) (q)->ioc_release_depth-- | |
41 | #else | |
42 | #define ioc_release_depth(q) 0 | |
43 | #define ioc_release_depth_inc(q) do { } while (0) | |
44 | #define ioc_release_depth_dec(q) do { } while (0) | |
45 | #endif | |
46 | ||
47 | /* | |
48 | * Slow path for ioc release in put_io_context(). Performs double-lock | |
49 | * dancing to unlink all cic's and then frees ioc. | |
50 | */ | |
51 | static void ioc_release_fn(struct work_struct *work) | |
86db1e29 | 52 | { |
b2efa052 TH |
53 | struct io_context *ioc = container_of(work, struct io_context, |
54 | release_work); | |
55 | struct request_queue *last_q = NULL; | |
56 | ||
57 | spin_lock_irq(&ioc->lock); | |
58 | ||
59 | while (!hlist_empty(&ioc->cic_list)) { | |
60 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | |
61 | struct cfq_io_context, | |
62 | cic_list); | |
63 | struct request_queue *this_q = cic->q; | |
64 | ||
65 | if (this_q != last_q) { | |
66 | /* | |
67 | * Need to switch to @this_q. Once we release | |
68 | * @ioc->lock, it can go away along with @cic. | |
69 | * Hold on to it. | |
70 | */ | |
71 | __blk_get_queue(this_q); | |
72 | ||
73 | /* | |
74 | * blk_put_queue() might sleep thanks to kobject | |
75 | * idiocy. Always release both locks, put and | |
76 | * restart. | |
77 | */ | |
78 | if (last_q) { | |
79 | spin_unlock(last_q->queue_lock); | |
80 | spin_unlock_irq(&ioc->lock); | |
81 | blk_put_queue(last_q); | |
82 | } else { | |
83 | spin_unlock_irq(&ioc->lock); | |
84 | } | |
85 | ||
86 | last_q = this_q; | |
87 | spin_lock_irq(this_q->queue_lock); | |
88 | spin_lock(&ioc->lock); | |
89 | continue; | |
90 | } | |
91 | ioc_release_depth_inc(this_q); | |
92 | cic->exit(cic); | |
93 | cic->release(cic); | |
94 | ioc_release_depth_dec(this_q); | |
95 | } | |
ffc4e759 | 96 | |
b2efa052 TH |
97 | if (last_q) { |
98 | spin_unlock(last_q->queue_lock); | |
99 | spin_unlock_irq(&ioc->lock); | |
100 | blk_put_queue(last_q); | |
101 | } else { | |
102 | spin_unlock_irq(&ioc->lock); | |
ffc4e759 | 103 | } |
b2efa052 TH |
104 | |
105 | kmem_cache_free(iocontext_cachep, ioc); | |
86db1e29 JA |
106 | } |
107 | ||
42ec57a8 TH |
108 | /** |
109 | * put_io_context - put a reference of io_context | |
110 | * @ioc: io_context to put | |
b2efa052 | 111 | * @locked_q: request_queue the caller is holding queue_lock of (hint) |
42ec57a8 TH |
112 | * |
113 | * Decrement reference count of @ioc and release it if the count reaches | |
b2efa052 TH |
114 | * zero. If the caller is holding queue_lock of a queue, it can indicate |
115 | * that with @locked_q. This is an optimization hint and the caller is | |
116 | * allowed to pass in %NULL even when it's holding a queue_lock. | |
86db1e29 | 117 | */ |
b2efa052 | 118 | void put_io_context(struct io_context *ioc, struct request_queue *locked_q) |
86db1e29 | 119 | { |
b2efa052 TH |
120 | struct request_queue *last_q = locked_q; |
121 | unsigned long flags; | |
122 | ||
86db1e29 | 123 | if (ioc == NULL) |
42ec57a8 | 124 | return; |
86db1e29 | 125 | |
42ec57a8 | 126 | BUG_ON(atomic_long_read(&ioc->refcount) <= 0); |
b2efa052 TH |
127 | if (locked_q) |
128 | lockdep_assert_held(locked_q->queue_lock); | |
86db1e29 | 129 | |
42ec57a8 TH |
130 | if (!atomic_long_dec_and_test(&ioc->refcount)) |
131 | return; | |
86db1e29 | 132 | |
b2efa052 TH |
133 | /* |
134 | * Destroy @ioc. This is a bit messy because cic's are chained | |
135 | * from both ioc and queue, and ioc->lock nests inside queue_lock. | |
136 | * The inner ioc->lock should be held to walk our cic_list and then | |
137 | * for each cic the outer matching queue_lock should be grabbed. | |
138 | * ie. We need to do reverse-order double lock dancing. | |
139 | * | |
140 | * Another twist is that we are often called with one of the | |
141 | * matching queue_locks held as indicated by @locked_q, which | |
142 | * prevents performing double-lock dance for other queues. | |
143 | * | |
144 | * So, we do it in two stages. The fast path uses the queue_lock | |
145 | * the caller is holding and, if other queues need to be accessed, | |
146 | * uses trylock to avoid introducing locking dependency. This can | |
147 | * handle most cases, especially if @ioc was performing IO on only | |
148 | * single device. | |
149 | * | |
150 | * If trylock doesn't cut it, we defer to @ioc->release_work which | |
151 | * can do all the double-locking dancing. | |
152 | */ | |
153 | spin_lock_irqsave_nested(&ioc->lock, flags, | |
154 | ioc_release_depth(locked_q)); | |
155 | ||
156 | while (!hlist_empty(&ioc->cic_list)) { | |
157 | struct cfq_io_context *cic = hlist_entry(ioc->cic_list.first, | |
158 | struct cfq_io_context, | |
159 | cic_list); | |
160 | struct request_queue *this_q = cic->q; | |
161 | ||
162 | if (this_q != last_q) { | |
163 | if (last_q && last_q != locked_q) | |
164 | spin_unlock(last_q->queue_lock); | |
165 | last_q = NULL; | |
166 | ||
167 | if (!spin_trylock(this_q->queue_lock)) | |
168 | break; | |
169 | last_q = this_q; | |
170 | continue; | |
171 | } | |
172 | ioc_release_depth_inc(this_q); | |
173 | cic->exit(cic); | |
174 | cic->release(cic); | |
175 | ioc_release_depth_dec(this_q); | |
176 | } | |
86db1e29 | 177 | |
b2efa052 TH |
178 | if (last_q && last_q != locked_q) |
179 | spin_unlock(last_q->queue_lock); | |
86db1e29 | 180 | |
b2efa052 | 181 | spin_unlock_irqrestore(&ioc->lock, flags); |
ffc4e759 | 182 | |
b2efa052 TH |
183 | /* if no cic's left, we're done; otherwise, kick release_work */ |
184 | if (hlist_empty(&ioc->cic_list)) | |
185 | kmem_cache_free(iocontext_cachep, ioc); | |
186 | else | |
187 | schedule_work(&ioc->release_work); | |
86db1e29 | 188 | } |
b2efa052 | 189 | EXPORT_SYMBOL(put_io_context); |
86db1e29 | 190 | |
27667c99 | 191 | /* Called by the exiting task */ |
b69f2292 | 192 | void exit_io_context(struct task_struct *task) |
86db1e29 JA |
193 | { |
194 | struct io_context *ioc; | |
195 | ||
6e736be7 TH |
196 | /* PF_EXITING prevents new io_context from being attached to @task */ |
197 | WARN_ON_ONCE(!(current->flags & PF_EXITING)); | |
198 | ||
b69f2292 LR |
199 | task_lock(task); |
200 | ioc = task->io_context; | |
201 | task->io_context = NULL; | |
202 | task_unlock(task); | |
86db1e29 | 203 | |
b2efa052 TH |
204 | atomic_dec(&ioc->nr_tasks); |
205 | put_io_context(ioc, NULL); | |
86db1e29 JA |
206 | } |
207 | ||
f2dbd76a TH |
208 | void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags, |
209 | int node) | |
86db1e29 | 210 | { |
df415656 | 211 | struct io_context *ioc; |
86db1e29 | 212 | |
42ec57a8 TH |
213 | ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, |
214 | node); | |
215 | if (unlikely(!ioc)) | |
f2dbd76a | 216 | return; |
42ec57a8 TH |
217 | |
218 | /* initialize */ | |
219 | atomic_long_set(&ioc->refcount, 1); | |
220 | atomic_set(&ioc->nr_tasks, 1); | |
221 | spin_lock_init(&ioc->lock); | |
222 | INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH); | |
223 | INIT_HLIST_HEAD(&ioc->cic_list); | |
b2efa052 | 224 | INIT_WORK(&ioc->release_work, ioc_release_fn); |
86db1e29 | 225 | |
6e736be7 TH |
226 | /* try to install, somebody might already have beaten us to it */ |
227 | task_lock(task); | |
f2dbd76a | 228 | if (!task->io_context && !(task->flags & PF_EXITING)) |
6e736be7 | 229 | task->io_context = ioc; |
f2dbd76a | 230 | else |
6e736be7 | 231 | kmem_cache_free(iocontext_cachep, ioc); |
6e736be7 | 232 | task_unlock(task); |
86db1e29 | 233 | } |
f2dbd76a | 234 | EXPORT_SYMBOL(create_io_context_slowpath); |
86db1e29 | 235 | |
6e736be7 TH |
236 | /** |
237 | * get_task_io_context - get io_context of a task | |
238 | * @task: task of interest | |
239 | * @gfp_flags: allocation flags, used if allocation is necessary | |
240 | * @node: allocation node, used if allocation is necessary | |
241 | * | |
242 | * Return io_context of @task. If it doesn't exist, it is created with | |
243 | * @gfp_flags and @node. The returned io_context has its reference count | |
244 | * incremented. | |
86db1e29 | 245 | * |
6e736be7 | 246 | * This function always goes through task_lock() and it's better to use |
f2dbd76a | 247 | * %current->io_context + get_io_context() for %current. |
86db1e29 | 248 | */ |
6e736be7 TH |
249 | struct io_context *get_task_io_context(struct task_struct *task, |
250 | gfp_t gfp_flags, int node) | |
86db1e29 | 251 | { |
6e736be7 | 252 | struct io_context *ioc; |
86db1e29 | 253 | |
6e736be7 TH |
254 | might_sleep_if(gfp_flags & __GFP_WAIT); |
255 | ||
f2dbd76a TH |
256 | do { |
257 | task_lock(task); | |
258 | ioc = task->io_context; | |
259 | if (likely(ioc)) { | |
260 | get_io_context(ioc); | |
261 | task_unlock(task); | |
262 | return ioc; | |
263 | } | |
6e736be7 | 264 | task_unlock(task); |
f2dbd76a | 265 | } while (create_io_context(task, gfp_flags, node)); |
6e736be7 | 266 | |
f2dbd76a | 267 | return NULL; |
86db1e29 | 268 | } |
6e736be7 | 269 | EXPORT_SYMBOL(get_task_io_context); |
86db1e29 | 270 | |
dc86900e TH |
271 | void ioc_set_changed(struct io_context *ioc, int which) |
272 | { | |
273 | struct cfq_io_context *cic; | |
274 | struct hlist_node *n; | |
275 | ||
276 | hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list) | |
277 | set_bit(which, &cic->changed); | |
278 | } | |
279 | ||
280 | /** | |
281 | * ioc_ioprio_changed - notify ioprio change | |
282 | * @ioc: io_context of interest | |
283 | * @ioprio: new ioprio | |
284 | * | |
285 | * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all | |
286 | * cic's. iosched is responsible for checking the bit and applying it on | |
287 | * request issue path. | |
288 | */ | |
289 | void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |
290 | { | |
291 | unsigned long flags; | |
292 | ||
293 | spin_lock_irqsave(&ioc->lock, flags); | |
294 | ioc->ioprio = ioprio; | |
295 | ioc_set_changed(ioc, CIC_IOPRIO_CHANGED); | |
296 | spin_unlock_irqrestore(&ioc->lock, flags); | |
297 | } | |
298 | ||
299 | /** | |
300 | * ioc_cgroup_changed - notify cgroup change | |
301 | * @ioc: io_context of interest | |
302 | * | |
303 | * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's. | |
304 | * iosched is responsible for checking the bit and applying it on request | |
305 | * issue path. | |
306 | */ | |
307 | void ioc_cgroup_changed(struct io_context *ioc) | |
308 | { | |
309 | unsigned long flags; | |
310 | ||
311 | spin_lock_irqsave(&ioc->lock, flags); | |
312 | ioc_set_changed(ioc, CIC_CGROUP_CHANGED); | |
313 | spin_unlock_irqrestore(&ioc->lock, flags); | |
314 | } | |
315 | ||
13341598 | 316 | static int __init blk_ioc_init(void) |
86db1e29 JA |
317 | { |
318 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | |
319 | sizeof(struct io_context), 0, SLAB_PANIC, NULL); | |
320 | return 0; | |
321 | } | |
322 | subsys_initcall(blk_ioc_init); |