io_uring: export req alloc from core
[linux-block.git] / io_uring / notif.h
CommitLineData
eb42cebb
PB
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/net.h>
4#include <linux/uio.h>
5#include <net/sock.h>
6#include <linux/nospec.h>
7
6a9ce66f
PB
8#include "rsrc.h"
9
eb4a299b 10#define IO_NOTIF_SPLICE_BATCH 32
bc24d6bd 11#define IORING_MAX_NOTIF_SLOTS (1U << 10)
eb4a299b 12
eb42cebb
PB
13struct io_notif {
14 struct ubuf_info uarg;
15 struct io_ring_ctx *ctx;
68ef5578 16 struct io_rsrc_node *rsrc_node;
eb42cebb 17
e58d498e
PB
18 /* complete via tw if ->task is non-NULL, fallback to wq otherwise */
19 struct task_struct *task;
20
eb42cebb
PB
21 /* cqe->user_data, io_notif_slot::tag if not overridden */
22 u64 tag;
23 /* see struct io_notif_slot::seq */
24 u32 seq;
eb4a299b
PB
25 /* hook into ctx->notif_list and ctx->notif_list_locked */
26 struct list_head cache_node;
eb42cebb 27
6a9ce66f
PB
28 unsigned long account_pages;
29
eb42cebb
PB
30 union {
31 struct callback_head task_work;
32 struct work_struct commit_work;
33 };
34};
35
36struct io_notif_slot {
37 /*
38 * Current/active notifier. A slot holds only one active notifier at a
39 * time and keeps one reference to it. Flush releases the reference and
40 * lazily replaces it with a new notifier.
41 */
42 struct io_notif *notif;
43
44 /*
45 * Default ->user_data for this slot notifiers CQEs
46 */
47 u64 tag;
48 /*
49 * Notifiers of a slot live in generations, we create a new notifier
50 * only after flushing the previous one. Track the sequential number
51 * for all notifiers and copy it into notifiers's cqe->cflags
52 */
53 u32 seq;
54};
55
bc24d6bd
PB
56int io_notif_register(struct io_ring_ctx *ctx,
57 void __user *arg, unsigned int size);
eb42cebb 58int io_notif_unregister(struct io_ring_ctx *ctx);
eb4a299b 59void io_notif_cache_purge(struct io_ring_ctx *ctx);
eb42cebb 60
63809137 61void io_notif_slot_flush(struct io_notif_slot *slot);
eb42cebb
PB
62struct io_notif *io_alloc_notif(struct io_ring_ctx *ctx,
63 struct io_notif_slot *slot);
64
65static inline struct io_notif *io_get_notif(struct io_ring_ctx *ctx,
66 struct io_notif_slot *slot)
67{
68 if (!slot->notif)
69 slot->notif = io_alloc_notif(ctx, slot);
70 return slot->notif;
71}
72
73static inline struct io_notif_slot *io_get_notif_slot(struct io_ring_ctx *ctx,
cb309ae4 74 unsigned idx)
eb42cebb
PB
75 __must_hold(&ctx->uring_lock)
76{
77 if (idx >= ctx->nr_notif_slots)
78 return NULL;
79 idx = array_index_nospec(idx, ctx->nr_notif_slots);
80 return &ctx->notif_slots[idx];
81}
63809137
PB
82
83static inline void io_notif_slot_flush_submit(struct io_notif_slot *slot,
84 unsigned int issue_flags)
85{
86 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
87 slot->notif->task = current;
88 io_get_task_refs(1);
89 }
90 io_notif_slot_flush(slot);
91}
6a9ce66f
PB
92
93static inline int io_notif_account_mem(struct io_notif *notif, unsigned len)
94{
95 struct io_ring_ctx *ctx = notif->ctx;
96 unsigned nr_pages = (len >> PAGE_SHIFT) + 2;
97 int ret;
98
99 if (ctx->user) {
100 ret = __io_account_mem(ctx->user, nr_pages);
101 if (ret)
102 return ret;
103 notif->account_pages += nr_pages;
104 }
105 return 0;
106}