io_uring: make io_kiocb_to_cmd() typesafe
[linux-block.git] / io_uring / notif.c
CommitLineData
eb42cebb
PB
1#include <linux/kernel.h>
2#include <linux/errno.h>
3#include <linux/file.h>
4#include <linux/slab.h>
5#include <linux/net.h>
6#include <linux/io_uring.h>
7
8#include "io_uring.h"
9#include "notif.h"
68ef5578 10#include "rsrc.h"
eb42cebb 11
14b146b6 12static void __io_notif_complete_tw(struct io_kiocb *notif, bool *locked)
eb42cebb 13{
14b146b6 14 struct io_notif_data *nd = io_notif_to_data(notif);
eb42cebb
PB
15 struct io_ring_ctx *ctx = notif->ctx;
16
14b146b6
PB
17 if (nd->account_pages && ctx->user) {
18 __io_unaccount_mem(ctx->user, nd->account_pages);
19 nd->account_pages = 0;
e29e3bd4 20 }
14b146b6 21 io_req_task_complete(notif, locked);
eb42cebb
PB
22}
23
14b146b6
PB
24static inline void io_notif_complete(struct io_kiocb *notif)
25 __must_hold(&notif->ctx->uring_lock)
eb42cebb 26{
14b146b6 27 bool locked = true;
eb42cebb 28
14b146b6 29 __io_notif_complete_tw(notif, &locked);
eb42cebb
PB
30}
31
32static void io_uring_tx_zerocopy_callback(struct sk_buff *skb,
33 struct ubuf_info *uarg,
34 bool success)
35{
14b146b6
PB
36 struct io_notif_data *nd = container_of(uarg, struct io_notif_data, uarg);
37 struct io_kiocb *notif = cmd_to_io_kiocb(nd);
eb4a299b 38
14b146b6
PB
39 if (refcount_dec_and_test(&uarg->refcnt)) {
40 notif->io_task_work.func = __io_notif_complete_tw;
41 io_req_task_work_add(notif);
eb4a299b
PB
42 }
43}
44
14b146b6 45struct io_kiocb *io_alloc_notif(struct io_ring_ctx *ctx,
eb42cebb
PB
46 struct io_notif_slot *slot)
47 __must_hold(&ctx->uring_lock)
48{
14b146b6
PB
49 struct io_kiocb *notif;
50 struct io_notif_data *nd;
51
52 if (unlikely(!io_alloc_req_refill(ctx)))
53 return NULL;
54 notif = io_alloc_req(ctx);
55 notif->opcode = IORING_OP_NOP;
56 notif->flags = 0;
57 notif->file = NULL;
58 notif->task = current;
59 io_get_task_refs(1);
60 notif->rsrc_node = NULL;
61 io_req_set_rsrc_node(notif, ctx, 0);
62 notif->cqe.user_data = slot->tag;
63 notif->cqe.flags = slot->seq++;
64 notif->cqe.res = 0;
65
66 nd = io_notif_to_data(notif);
67 nd->account_pages = 0;
68 nd->uarg.flags = SKBFL_ZEROCOPY_FRAG | SKBFL_DONT_ORPHAN;
69 nd->uarg.callback = io_uring_tx_zerocopy_callback;
eb42cebb 70 /* master ref owned by io_notif_slot, will be dropped on flush */
14b146b6 71 refcount_set(&nd->uarg.refcnt, 1);
eb42cebb
PB
72 return notif;
73}
74
63809137 75void io_notif_slot_flush(struct io_notif_slot *slot)
eb42cebb
PB
76 __must_hold(&ctx->uring_lock)
77{
14b146b6
PB
78 struct io_kiocb *notif = slot->notif;
79 struct io_notif_data *nd = io_notif_to_data(notif);
eb42cebb
PB
80
81 slot->notif = NULL;
82
eb42cebb 83 /* drop slot's master ref */
14b146b6 84 if (refcount_dec_and_test(&nd->uarg.refcnt))
eb42cebb
PB
85 io_notif_complete(notif);
86}
87
88__cold int io_notif_unregister(struct io_ring_ctx *ctx)
89 __must_hold(&ctx->uring_lock)
90{
91 int i;
92
93 if (!ctx->notif_slots)
94 return -ENXIO;
95
96 for (i = 0; i < ctx->nr_notif_slots; i++) {
97 struct io_notif_slot *slot = &ctx->notif_slots[i];
14b146b6
PB
98 struct io_kiocb *notif = slot->notif;
99 struct io_notif_data *nd;
eb42cebb 100
14b146b6
PB
101 if (!notif)
102 continue;
da2634e8 103 nd = io_notif_to_data(notif);
14b146b6
PB
104 slot->notif = NULL;
105 if (!refcount_dec_and_test(&nd->uarg.refcnt))
e58d498e 106 continue;
14b146b6
PB
107 notif->io_task_work.func = __io_notif_complete_tw;
108 io_req_task_work_add(notif);
eb42cebb
PB
109 }
110
111 kvfree(ctx->notif_slots);
112 ctx->notif_slots = NULL;
113 ctx->nr_notif_slots = 0;
bc24d6bd
PB
114 return 0;
115}
116
117__cold int io_notif_register(struct io_ring_ctx *ctx,
118 void __user *arg, unsigned int size)
119 __must_hold(&ctx->uring_lock)
120{
121 struct io_uring_notification_slot __user *slots;
122 struct io_uring_notification_slot slot;
123 struct io_uring_notification_register reg;
124 unsigned i;
125
126 if (ctx->nr_notif_slots)
127 return -EBUSY;
128 if (size != sizeof(reg))
129 return -EINVAL;
130 if (copy_from_user(&reg, arg, sizeof(reg)))
131 return -EFAULT;
132 if (!reg.nr_slots || reg.nr_slots > IORING_MAX_NOTIF_SLOTS)
133 return -EINVAL;
134 if (reg.resv || reg.resv2 || reg.resv3)
135 return -EINVAL;
136
137 slots = u64_to_user_ptr(reg.data);
138 ctx->notif_slots = kvcalloc(reg.nr_slots, sizeof(ctx->notif_slots[0]),
139 GFP_KERNEL_ACCOUNT);
140 if (!ctx->notif_slots)
141 return -ENOMEM;
142
143 for (i = 0; i < reg.nr_slots; i++, ctx->nr_notif_slots++) {
144 struct io_notif_slot *notif_slot = &ctx->notif_slots[i];
145
146 if (copy_from_user(&slot, &slots[i], sizeof(slot))) {
147 io_notif_unregister(ctx);
148 return -EFAULT;
149 }
150 if (slot.resv[0] | slot.resv[1] | slot.resv[2]) {
151 io_notif_unregister(ctx);
152 return -EINVAL;
153 }
154 notif_slot->tag = slot.tag;
155 }
eb42cebb 156 return 0;
e58d498e 157}