block: simplify struct io_context refcounting
[linux-block.git] / block / blk-ioc.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
86db1e29
JA
2/*
3 * Functions related to io context handling
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/bio.h>
9#include <linux/blkdev.h>
5a0e3ad6 10#include <linux/slab.h>
f719ff9b 11#include <linux/sched/task.h>
86db1e29
JA
12
13#include "blk.h"
2aa7745b 14#include "blk-mq-sched.h"
86db1e29
JA
15
16/*
17 * For io context allocations
18 */
19static struct kmem_cache *iocontext_cachep;
20
6e736be7
TH
21/**
22 * get_io_context - increment reference count to io_context
23 * @ioc: io_context to get
24 *
25 * Increment reference count to @ioc.
26 */
87dd1d63 27static void get_io_context(struct io_context *ioc)
6e736be7
TH
28{
29 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
30 atomic_long_inc(&ioc->refcount);
31}
6e736be7 32
7e5a8794
TH
33static void icq_free_icq_rcu(struct rcu_head *head)
34{
35 struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
36
37 kmem_cache_free(icq->__rcu_icq_cache, icq);
38}
39
3d492c2e 40/*
7b36a718
JA
41 * Exit an icq. Called with ioc locked for blk-mq, and with both ioc
42 * and queue locked for legacy.
3d492c2e 43 */
7e5a8794 44static void ioc_exit_icq(struct io_cq *icq)
621032ad
TH
45{
46 struct elevator_type *et = icq->q->elevator->type;
47
48 if (icq->flags & ICQ_EXITED)
49 return;
50
f9cd4bfe
JA
51 if (et->ops.exit_icq)
52 et->ops.exit_icq(icq);
621032ad
TH
53
54 icq->flags |= ICQ_EXITED;
55}
56
7b36a718
JA
57/*
58 * Release an icq. Called with ioc locked for blk-mq, and with both ioc
59 * and queue locked for legacy.
60 */
621032ad 61static void ioc_destroy_icq(struct io_cq *icq)
7e5a8794
TH
62{
63 struct io_context *ioc = icq->ioc;
64 struct request_queue *q = icq->q;
65 struct elevator_type *et = q->elevator->type;
66
67 lockdep_assert_held(&ioc->lock);
7e5a8794
TH
68
69 radix_tree_delete(&ioc->icq_tree, icq->q->id);
70 hlist_del_init(&icq->ioc_node);
71 list_del_init(&icq->q_node);
72
73 /*
74 * Both setting lookup hint to and clearing it from @icq are done
75 * under queue_lock. If it's not pointing to @icq now, it never
76 * will. Hint assignment itself can race safely.
77 */
ec6c676a 78 if (rcu_access_pointer(ioc->icq_hint) == icq)
7e5a8794
TH
79 rcu_assign_pointer(ioc->icq_hint, NULL);
80
621032ad 81 ioc_exit_icq(icq);
7e5a8794
TH
82
83 /*
84 * @icq->q might have gone away by the time RCU callback runs
85 * making it impossible to determine icq_cache. Record it in @icq.
86 */
87 icq->__rcu_icq_cache = et->icq_cache;
30a2da7b 88 icq->flags |= ICQ_DESTROYED;
7e5a8794
TH
89 call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
90}
91
b2efa052
TH
92/*
93 * Slow path for ioc release in put_io_context(). Performs double-lock
c5869807 94 * dancing to unlink all icq's and then frees ioc.
b2efa052
TH
95 */
96static void ioc_release_fn(struct work_struct *work)
86db1e29 97{
b2efa052
TH
98 struct io_context *ioc = container_of(work, struct io_context,
99 release_work);
a43f085f 100 spin_lock_irq(&ioc->lock);
b2efa052 101
c5869807
TH
102 while (!hlist_empty(&ioc->icq_list)) {
103 struct io_cq *icq = hlist_entry(ioc->icq_list.first,
104 struct io_cq, ioc_node);
2274b029
TH
105 struct request_queue *q = icq->q;
106
0d945c1f 107 if (spin_trylock(&q->queue_lock)) {
621032ad 108 ioc_destroy_icq(icq);
0d945c1f 109 spin_unlock(&q->queue_lock);
2274b029 110 } else {
ab96bbab
JO
111 /* Make sure q and icq cannot be freed. */
112 rcu_read_lock();
113
114 /* Re-acquire the locks in the correct order. */
115 spin_unlock(&ioc->lock);
116 spin_lock(&q->queue_lock);
117 spin_lock(&ioc->lock);
118
119 /*
120 * The icq may have been destroyed when the ioc lock
121 * was released.
122 */
123 if (!(icq->flags & ICQ_DESTROYED))
124 ioc_destroy_icq(icq);
125
126 spin_unlock(&q->queue_lock);
127 rcu_read_unlock();
b2efa052 128 }
b2efa052 129 }
ffc4e759 130
a43f085f 131 spin_unlock_irq(&ioc->lock);
b2efa052
TH
132
133 kmem_cache_free(iocontext_cachep, ioc);
86db1e29
JA
134}
135
42ec57a8
TH
136/**
137 * put_io_context - put a reference of io_context
138 * @ioc: io_context to put
139 *
140 * Decrement reference count of @ioc and release it if the count reaches
11a3122f 141 * zero.
86db1e29 142 */
11a3122f 143void put_io_context(struct io_context *ioc)
86db1e29 144{
b2efa052 145 unsigned long flags;
ff8c1474 146 bool free_ioc = false;
b2efa052 147
86db1e29 148 if (ioc == NULL)
42ec57a8 149 return;
86db1e29 150
42ec57a8 151 BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
86db1e29 152
b2efa052 153 /*
11a3122f
TH
154 * Releasing ioc requires reverse order double locking and we may
155 * already be holding a queue_lock. Do it asynchronously from wq.
b2efa052 156 */
11a3122f
TH
157 if (atomic_long_dec_and_test(&ioc->refcount)) {
158 spin_lock_irqsave(&ioc->lock, flags);
159 if (!hlist_empty(&ioc->icq_list))
695588f9
VK
160 queue_work(system_power_efficient_wq,
161 &ioc->release_work);
ff8c1474
XF
162 else
163 free_ioc = true;
11a3122f 164 spin_unlock_irqrestore(&ioc->lock, flags);
b2efa052 165 }
ff8c1474
XF
166
167 if (free_ioc)
168 kmem_cache_free(iocontext_cachep, ioc);
86db1e29 169}
222ee581 170EXPORT_SYMBOL_GPL(put_io_context);
86db1e29 171
f6e8d01b
TH
172/**
173 * put_io_context_active - put active reference on ioc
174 * @ioc: ioc of interest
175 *
50569c24 176 * Put an active reference to an ioc. If active reference reaches zero after
f6e8d01b
TH
177 * put, @ioc can never issue further IOs and ioscheds are notified.
178 */
33047425 179static void put_io_context_active(struct io_context *ioc)
86db1e29 180{
f6e8d01b 181 struct io_cq *icq;
86db1e29 182
0aed2f16 183 if (!atomic_dec_and_test(&ioc->active_ref))
621032ad 184 return;
621032ad 185
a43f085f 186 spin_lock_irq(&ioc->lock);
b67bfe0d 187 hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
621032ad
TH
188 if (icq->flags & ICQ_EXITED)
189 continue;
3d492c2e 190
a1ce35fa 191 ioc_exit_icq(icq);
621032ad 192 }
a43f085f 193 spin_unlock_irq(&ioc->lock);
621032ad 194
11a3122f 195 put_io_context(ioc);
86db1e29
JA
196}
197
f6e8d01b
TH
198/* Called by the exiting task */
199void exit_io_context(struct task_struct *task)
200{
201 struct io_context *ioc;
202
203 task_lock(task);
204 ioc = task->io_context;
205 task->io_context = NULL;
206 task_unlock(task);
207
f6e8d01b
TH
208 put_io_context_active(ioc);
209}
210
7b36a718
JA
211static void __ioc_clear_queue(struct list_head *icq_list)
212{
213 unsigned long flags;
214
30a2da7b 215 rcu_read_lock();
7b36a718
JA
216 while (!list_empty(icq_list)) {
217 struct io_cq *icq = list_entry(icq_list->next,
a1ce35fa 218 struct io_cq, q_node);
7b36a718
JA
219 struct io_context *ioc = icq->ioc;
220
221 spin_lock_irqsave(&ioc->lock, flags);
30a2da7b
ST
222 if (icq->flags & ICQ_DESTROYED) {
223 spin_unlock_irqrestore(&ioc->lock, flags);
224 continue;
225 }
7b36a718
JA
226 ioc_destroy_icq(icq);
227 spin_unlock_irqrestore(&ioc->lock, flags);
228 }
30a2da7b 229 rcu_read_unlock();
7b36a718
JA
230}
231
7e5a8794
TH
232/**
233 * ioc_clear_queue - break any ioc association with the specified queue
234 * @q: request_queue being cleared
235 *
7b36a718 236 * Walk @q->icq_list and exit all io_cq's.
7e5a8794
TH
237 */
238void ioc_clear_queue(struct request_queue *q)
239{
7b36a718 240 LIST_HEAD(icq_list);
7e5a8794 241
0d945c1f 242 spin_lock_irq(&q->queue_lock);
7b36a718 243 list_splice_init(&q->icq_list, &icq_list);
0d945c1f 244 spin_unlock_irq(&q->queue_lock);
7e5a8794 245
a1ce35fa 246 __ioc_clear_queue(&icq_list);
7e5a8794
TH
247}
248
a0f14d8b 249static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
86db1e29 250{
df415656 251 struct io_context *ioc;
86db1e29 252
42ec57a8
TH
253 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
254 node);
255 if (unlikely(!ioc))
a0f14d8b 256 return NULL;
42ec57a8 257
42ec57a8 258 atomic_long_set(&ioc->refcount, 1);
f6e8d01b 259 atomic_set(&ioc->active_ref, 1);
42ec57a8 260 spin_lock_init(&ioc->lock);
c137969b 261 INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
c5869807 262 INIT_HLIST_HEAD(&ioc->icq_list);
b2efa052 263 INIT_WORK(&ioc->release_work, ioc_release_fn);
a0f14d8b
CH
264 return ioc;
265}
266
d538ea4c
CH
267static struct io_context *create_task_io_context(struct task_struct *task,
268 gfp_t gfp_flags, int node)
a0f14d8b
CH
269{
270 struct io_context *ioc;
a0f14d8b
CH
271
272 ioc = alloc_io_context(gfp_flags, node);
273 if (!ioc)
d538ea4c 274 return NULL;
86db1e29 275
fd638368
TH
276 /*
277 * Try to install. ioc shouldn't be installed if someone else
278 * already did or @task, which isn't %current, is exiting. Note
279 * that we need to allow ioc creation on exiting %current as exit
280 * path may issue IOs from e.g. exit_files(). The exit path is
281 * responsible for not issuing IO after exit_io_context().
282 */
6e736be7 283 task_lock(task);
fd638368
TH
284 if (!task->io_context &&
285 (task == current || !(task->flags & PF_EXITING)))
6e736be7 286 task->io_context = ioc;
f2dbd76a 287 else
6e736be7 288 kmem_cache_free(iocontext_cachep, ioc);
3c9c708c 289
d538ea4c
CH
290 ioc = task->io_context;
291 if (ioc)
292 get_io_context(ioc);
6e736be7 293 task_unlock(task);
d538ea4c 294 return ioc;
86db1e29 295}
86db1e29 296
6e736be7
TH
297/**
298 * get_task_io_context - get io_context of a task
299 * @task: task of interest
300 * @gfp_flags: allocation flags, used if allocation is necessary
301 * @node: allocation node, used if allocation is necessary
302 *
303 * Return io_context of @task. If it doesn't exist, it is created with
304 * @gfp_flags and @node. The returned io_context has its reference count
305 * incremented.
86db1e29 306 *
6e736be7 307 * This function always goes through task_lock() and it's better to use
f2dbd76a 308 * %current->io_context + get_io_context() for %current.
86db1e29 309 */
6e736be7
TH
310struct io_context *get_task_io_context(struct task_struct *task,
311 gfp_t gfp_flags, int node)
86db1e29 312{
6e736be7 313 struct io_context *ioc;
86db1e29 314
d0164adc 315 might_sleep_if(gfpflags_allow_blocking(gfp_flags));
6e736be7 316
d538ea4c
CH
317 task_lock(task);
318 ioc = task->io_context;
319 if (unlikely(!ioc)) {
6e736be7 320 task_unlock(task);
d538ea4c
CH
321 return create_task_io_context(task, gfp_flags, node);
322 }
323 get_io_context(ioc);
324 task_unlock(task);
325 return ioc;
86db1e29 326}
86db1e29 327
88c9a2ce
CH
328int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
329{
330 struct io_context *ioc = current->io_context;
88c9a2ce
CH
331
332 /*
333 * Share io context with parent, if CLONE_IO is set
334 */
335 if (clone_flags & CLONE_IO) {
50569c24 336 atomic_inc(&ioc->active_ref);
88c9a2ce
CH
337 tsk->io_context = ioc;
338 } else if (ioprio_valid(ioc->ioprio)) {
8ffc1368
CH
339 tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
340 if (!tsk->io_context)
88c9a2ce 341 return -ENOMEM;
8ffc1368 342 tsk->io_context->ioprio = ioc->ioprio;
88c9a2ce
CH
343 }
344
345 return 0;
346}
347
47fdd4ca
TH
348/**
349 * ioc_lookup_icq - lookup io_cq from ioc
47fdd4ca
TH
350 * @q: the associated request_queue
351 *
352 * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
353 * with @q->queue_lock held.
354 */
eca5892a 355struct io_cq *ioc_lookup_icq(struct request_queue *q)
47fdd4ca 356{
eca5892a 357 struct io_context *ioc = current->io_context;
47fdd4ca
TH
358 struct io_cq *icq;
359
0d945c1f 360 lockdep_assert_held(&q->queue_lock);
47fdd4ca
TH
361
362 /*
363 * icq's are indexed from @ioc using radix tree and hint pointer,
364 * both of which are protected with RCU. All removals are done
365 * holding both q and ioc locks, and we're holding q lock - if we
366 * find a icq which points to us, it's guaranteed to be valid.
367 */
368 rcu_read_lock();
369 icq = rcu_dereference(ioc->icq_hint);
370 if (icq && icq->q == q)
371 goto out;
372
373 icq = radix_tree_lookup(&ioc->icq_tree, q->id);
374 if (icq && icq->q == q)
375 rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
376 else
377 icq = NULL;
378out:
379 rcu_read_unlock();
380 return icq;
381}
382EXPORT_SYMBOL(ioc_lookup_icq);
383
f1f8cc94
TH
384/**
385 * ioc_create_icq - create and link io_cq
386 * @q: request_queue of interest
f1f8cc94 387 *
24acfc34
TH
388 * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
389 * will be created using @gfp_mask.
f1f8cc94
TH
390 *
391 * The caller is responsible for ensuring @ioc won't go away and @q is
392 * alive and will stay alive until this function returns.
393 */
18b74c4d 394static struct io_cq *ioc_create_icq(struct request_queue *q)
f1f8cc94 395{
18b74c4d 396 struct io_context *ioc = current->io_context;
f1f8cc94 397 struct elevator_type *et = q->elevator->type;
f1f8cc94
TH
398 struct io_cq *icq;
399
400 /* allocate stuff */
18b74c4d 401 icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
f1f8cc94
TH
402 q->node);
403 if (!icq)
404 return NULL;
405
18b74c4d 406 if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
f1f8cc94
TH
407 kmem_cache_free(et->icq_cache, icq);
408 return NULL;
409 }
410
411 icq->ioc = ioc;
412 icq->q = q;
413 INIT_LIST_HEAD(&icq->q_node);
414 INIT_HLIST_NODE(&icq->ioc_node);
415
416 /* lock both q and ioc and try to link @icq */
0d945c1f 417 spin_lock_irq(&q->queue_lock);
f1f8cc94
TH
418 spin_lock(&ioc->lock);
419
420 if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
421 hlist_add_head(&icq->ioc_node, &ioc->icq_list);
422 list_add(&icq->q_node, &q->icq_list);
f9cd4bfe
JA
423 if (et->ops.init_icq)
424 et->ops.init_icq(icq);
f1f8cc94
TH
425 } else {
426 kmem_cache_free(et->icq_cache, icq);
eca5892a 427 icq = ioc_lookup_icq(q);
f1f8cc94
TH
428 if (!icq)
429 printk(KERN_ERR "cfq: icq link failed!\n");
430 }
431
432 spin_unlock(&ioc->lock);
0d945c1f 433 spin_unlock_irq(&q->queue_lock);
f1f8cc94
TH
434 radix_tree_preload_end();
435 return icq;
436}
437
87dd1d63
CH
438struct io_cq *ioc_find_get_icq(struct request_queue *q)
439{
d538ea4c
CH
440 struct io_context *ioc = current->io_context;
441 struct io_cq *icq = NULL;
87dd1d63 442
d538ea4c
CH
443 if (unlikely(!ioc)) {
444 ioc = create_task_io_context(current, GFP_ATOMIC, q->node);
445 if (!ioc)
446 return NULL;
447 } else {
448 get_io_context(ioc);
87dd1d63 449
d538ea4c 450 spin_lock_irq(&q->queue_lock);
eca5892a 451 icq = ioc_lookup_icq(q);
d538ea4c
CH
452 spin_unlock_irq(&q->queue_lock);
453 }
87dd1d63
CH
454
455 if (!icq) {
18b74c4d 456 icq = ioc_create_icq(q);
d538ea4c
CH
457 if (!icq) {
458 put_io_context(ioc);
87dd1d63 459 return NULL;
d538ea4c 460 }
87dd1d63 461 }
87dd1d63
CH
462 return icq;
463}
464EXPORT_SYMBOL_GPL(ioc_find_get_icq);
465
13341598 466static int __init blk_ioc_init(void)
86db1e29
JA
467{
468 iocontext_cachep = kmem_cache_create("blkdev_ioc",
469 sizeof(struct io_context), 0, SLAB_PANIC, NULL);
470 return 0;
471}
472subsys_initcall(blk_ioc_init);