Commit | Line | Data |
---|---|---|
c0c77d8f BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* XDP sockets | |
3 | * | |
4 | * AF_XDP sockets allows a channel between XDP programs and userspace | |
5 | * applications. | |
6 | * Copyright(c) 2018 Intel Corporation. | |
7 | * | |
c0c77d8f BT |
8 | * Author(s): Björn Töpel <bjorn.topel@intel.com> |
9 | * Magnus Karlsson <magnus.karlsson@intel.com> | |
10 | */ | |
11 | ||
12 | #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ | |
13 | ||
14 | #include <linux/if_xdp.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/sched/mm.h> | |
17 | #include <linux/sched/signal.h> | |
18 | #include <linux/sched/task.h> | |
19 | #include <linux/socket.h> | |
20 | #include <linux/file.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/net.h> | |
23 | #include <linux/netdevice.h> | |
ac98d8aa | 24 | #include <linux/rculist.h> |
c0c77d8f | 25 | #include <net/xdp_sock.h> |
b9b6b68e | 26 | #include <net/xdp.h> |
c0c77d8f | 27 | |
423f3832 | 28 | #include "xsk_queue.h" |
c0c77d8f | 29 | #include "xdp_umem.h" |
a36b38aa | 30 | #include "xsk.h" |
c0c77d8f | 31 | |
35fcde7f MK |
32 | #define TX_BATCH_SIZE 16 |
33 | ||
e312b9e7 BT |
34 | static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); |
35 | ||
fbfc504a BT |
36 | bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) |
37 | { | |
173d3adb BT |
38 | return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && |
39 | READ_ONCE(xs->umem->fq); | |
fbfc504a BT |
40 | } |
41 | ||
d57d7642 MM |
42 | bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) |
43 | { | |
c5ed924b | 44 | return xskq_cons_has_entries(umem->fq, cnt); |
d57d7642 MM |
45 | } |
46 | EXPORT_SYMBOL(xsk_umem_has_addrs); | |
47 | ||
03896ef1 | 48 | bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) |
173d3adb | 49 | { |
c5ed924b | 50 | return xskq_cons_peek_addr(umem->fq, addr, umem); |
173d3adb BT |
51 | } |
52 | EXPORT_SYMBOL(xsk_umem_peek_addr); | |
53 | ||
f8509aa0 | 54 | void xsk_umem_release_addr(struct xdp_umem *umem) |
173d3adb | 55 | { |
c5ed924b | 56 | xskq_cons_release(umem->fq); |
173d3adb | 57 | } |
f8509aa0 | 58 | EXPORT_SYMBOL(xsk_umem_release_addr); |
173d3adb | 59 | |
77cd0d7b MK |
60 | void xsk_set_rx_need_wakeup(struct xdp_umem *umem) |
61 | { | |
62 | if (umem->need_wakeup & XDP_WAKEUP_RX) | |
63 | return; | |
64 | ||
65 | umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; | |
66 | umem->need_wakeup |= XDP_WAKEUP_RX; | |
67 | } | |
68 | EXPORT_SYMBOL(xsk_set_rx_need_wakeup); | |
69 | ||
70 | void xsk_set_tx_need_wakeup(struct xdp_umem *umem) | |
71 | { | |
72 | struct xdp_sock *xs; | |
73 | ||
74 | if (umem->need_wakeup & XDP_WAKEUP_TX) | |
75 | return; | |
76 | ||
77 | rcu_read_lock(); | |
e4e5aefc | 78 | list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { |
77cd0d7b MK |
79 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; |
80 | } | |
81 | rcu_read_unlock(); | |
82 | ||
83 | umem->need_wakeup |= XDP_WAKEUP_TX; | |
84 | } | |
85 | EXPORT_SYMBOL(xsk_set_tx_need_wakeup); | |
86 | ||
87 | void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) | |
88 | { | |
89 | if (!(umem->need_wakeup & XDP_WAKEUP_RX)) | |
90 | return; | |
91 | ||
92 | umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; | |
93 | umem->need_wakeup &= ~XDP_WAKEUP_RX; | |
94 | } | |
95 | EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); | |
96 | ||
97 | void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) | |
98 | { | |
99 | struct xdp_sock *xs; | |
100 | ||
101 | if (!(umem->need_wakeup & XDP_WAKEUP_TX)) | |
102 | return; | |
103 | ||
104 | rcu_read_lock(); | |
e4e5aefc | 105 | list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { |
77cd0d7b MK |
106 | xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; |
107 | } | |
108 | rcu_read_unlock(); | |
109 | ||
110 | umem->need_wakeup &= ~XDP_WAKEUP_TX; | |
111 | } | |
112 | EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); | |
113 | ||
114 | bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) | |
115 | { | |
116 | return umem->flags & XDP_UMEM_USES_NEED_WAKEUP; | |
117 | } | |
118 | EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); | |
119 | ||
c05cd364 KL |
120 | /* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for |
121 | * each page. This is only required in copy mode. | |
122 | */ | |
123 | static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, | |
124 | u32 len, u32 metalen) | |
125 | { | |
126 | void *to_buf = xdp_umem_get_data(umem, addr); | |
127 | ||
128 | addr = xsk_umem_add_offset_to_addr(addr); | |
03896ef1 | 129 | if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) { |
c05cd364 KL |
130 | void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr; |
131 | u64 page_start = addr & ~(PAGE_SIZE - 1); | |
132 | u64 first_len = PAGE_SIZE - (addr - page_start); | |
133 | ||
db5c97f0 LR |
134 | memcpy(to_buf, from_buf, first_len); |
135 | memcpy(next_pg_addr, from_buf + first_len, | |
136 | len + metalen - first_len); | |
c05cd364 KL |
137 | |
138 | return; | |
139 | } | |
140 | ||
141 | memcpy(to_buf, from_buf, len + metalen); | |
142 | } | |
143 | ||
173d3adb | 144 | static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
c497176c | 145 | { |
c05cd364 KL |
146 | u64 offset = xs->umem->headroom; |
147 | u64 addr, memcpy_addr; | |
148 | void *from_buf; | |
18baed26 | 149 | u32 metalen; |
4e64c835 | 150 | int err; |
c497176c | 151 | |
c5ed924b | 152 | if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) || |
18baed26 | 153 | len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { |
a509a955 | 154 | xs->rx_dropped++; |
c497176c | 155 | return -ENOSPC; |
a509a955 | 156 | } |
c497176c | 157 | |
18baed26 BT |
158 | if (unlikely(xdp_data_meta_unsupported(xdp))) { |
159 | from_buf = xdp->data; | |
160 | metalen = 0; | |
161 | } else { | |
162 | from_buf = xdp->data_meta; | |
163 | metalen = xdp->data - xdp->data_meta; | |
164 | } | |
165 | ||
c05cd364 KL |
166 | memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset); |
167 | __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen); | |
168 | ||
169 | offset += metalen; | |
170 | addr = xsk_umem_adjust_offset(xs->umem, addr, offset); | |
59e35e55 | 171 | err = xskq_prod_reserve_desc(xs->rx, addr, len); |
173d3adb | 172 | if (!err) { |
c5ed924b | 173 | xskq_cons_release(xs->umem->fq); |
173d3adb BT |
174 | xdp_return_buff(xdp); |
175 | return 0; | |
176 | } | |
c497176c | 177 | |
173d3adb | 178 | xs->rx_dropped++; |
c497176c BT |
179 | return err; |
180 | } | |
181 | ||
173d3adb | 182 | static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
c497176c | 183 | { |
59e35e55 | 184 | int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len); |
c497176c | 185 | |
2d55d614 | 186 | if (err) |
173d3adb | 187 | xs->rx_dropped++; |
c497176c BT |
188 | |
189 | return err; | |
190 | } | |
191 | ||
42fddcc7 BT |
192 | static bool xsk_is_bound(struct xdp_sock *xs) |
193 | { | |
194 | if (READ_ONCE(xs->state) == XSK_BOUND) { | |
195 | /* Matches smp_wmb() in bind(). */ | |
196 | smp_rmb(); | |
197 | return true; | |
198 | } | |
199 | return false; | |
200 | } | |
201 | ||
d817991c | 202 | static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
173d3adb BT |
203 | { |
204 | u32 len; | |
205 | ||
42fddcc7 BT |
206 | if (!xsk_is_bound(xs)) |
207 | return -EINVAL; | |
208 | ||
173d3adb BT |
209 | if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) |
210 | return -EINVAL; | |
211 | ||
212 | len = xdp->data_end - xdp->data; | |
213 | ||
214 | return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ? | |
215 | __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len); | |
216 | } | |
217 | ||
d817991c | 218 | static void xsk_flush(struct xdp_sock *xs) |
c497176c | 219 | { |
59e35e55 | 220 | xskq_prod_submit(xs->rx); |
30744a68 | 221 | __xskq_cons_release(xs->umem->fq); |
43a825af | 222 | sock_def_readable(&xs->sk); |
c497176c BT |
223 | } |
224 | ||
225 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
226 | { | |
18baed26 | 227 | u32 metalen = xdp->data - xdp->data_meta; |
173d3adb | 228 | u32 len = xdp->data_end - xdp->data; |
c05cd364 | 229 | u64 offset = xs->umem->headroom; |
173d3adb BT |
230 | void *buffer; |
231 | u64 addr; | |
c497176c BT |
232 | int err; |
233 | ||
bf0bdd13 IM |
234 | spin_lock_bh(&xs->rx_lock); |
235 | ||
236 | if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) { | |
237 | err = -EINVAL; | |
238 | goto out_unlock; | |
239 | } | |
5d902372 | 240 | |
c5ed924b | 241 | if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) || |
18baed26 | 242 | len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { |
bf0bdd13 IM |
243 | err = -ENOSPC; |
244 | goto out_drop; | |
173d3adb BT |
245 | } |
246 | ||
c05cd364 | 247 | addr = xsk_umem_adjust_offset(xs->umem, addr, offset); |
173d3adb | 248 | buffer = xdp_umem_get_data(xs->umem, addr); |
18baed26 | 249 | memcpy(buffer, xdp->data_meta, len + metalen); |
c05cd364 KL |
250 | |
251 | addr = xsk_umem_adjust_offset(xs->umem, addr, metalen); | |
59e35e55 | 252 | err = xskq_prod_reserve_desc(xs->rx, addr, len); |
bf0bdd13 IM |
253 | if (err) |
254 | goto out_drop; | |
255 | ||
c5ed924b | 256 | xskq_cons_release(xs->umem->fq); |
59e35e55 | 257 | xskq_prod_submit(xs->rx); |
c497176c | 258 | |
bf0bdd13 IM |
259 | spin_unlock_bh(&xs->rx_lock); |
260 | ||
261 | xs->sk.sk_data_ready(&xs->sk); | |
262 | return 0; | |
263 | ||
264 | out_drop: | |
173d3adb | 265 | xs->rx_dropped++; |
bf0bdd13 IM |
266 | out_unlock: |
267 | spin_unlock_bh(&xs->rx_lock); | |
c497176c BT |
268 | return err; |
269 | } | |
270 | ||
e312b9e7 | 271 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
d817991c | 272 | { |
e312b9e7 | 273 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
d817991c BT |
274 | int err; |
275 | ||
276 | err = xsk_rcv(xs, xdp); | |
277 | if (err) | |
278 | return err; | |
279 | ||
280 | if (!xs->flush_node.prev) | |
281 | list_add(&xs->flush_node, flush_list); | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
e312b9e7 | 286 | void __xsk_map_flush(void) |
d817991c | 287 | { |
e312b9e7 | 288 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
d817991c BT |
289 | struct xdp_sock *xs, *tmp; |
290 | ||
291 | list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { | |
292 | xsk_flush(xs); | |
293 | __list_del_clearprev(&xs->flush_node); | |
294 | } | |
295 | } | |
296 | ||
ac98d8aa MK |
297 | void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) |
298 | { | |
59e35e55 | 299 | xskq_prod_submit_n(umem->cq, nb_entries); |
ac98d8aa MK |
300 | } |
301 | EXPORT_SYMBOL(xsk_umem_complete_tx); | |
302 | ||
303 | void xsk_umem_consume_tx_done(struct xdp_umem *umem) | |
304 | { | |
305 | struct xdp_sock *xs; | |
306 | ||
307 | rcu_read_lock(); | |
e4e5aefc | 308 | list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { |
30744a68 | 309 | __xskq_cons_release(xs->tx); |
ac98d8aa MK |
310 | xs->sk.sk_write_space(&xs->sk); |
311 | } | |
312 | rcu_read_unlock(); | |
313 | } | |
314 | EXPORT_SYMBOL(xsk_umem_consume_tx_done); | |
315 | ||
4bce4e5c | 316 | bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) |
ac98d8aa | 317 | { |
ac98d8aa MK |
318 | struct xdp_sock *xs; |
319 | ||
320 | rcu_read_lock(); | |
e4e5aefc | 321 | list_for_each_entry_rcu(xs, &umem->xsk_tx_list, list) { |
c5ed924b | 322 | if (!xskq_cons_peek_desc(xs->tx, desc, umem)) |
ac98d8aa MK |
323 | continue; |
324 | ||
0a05861f | 325 | /* This is the backpressure mechanism for the Tx path. |
15d8c916 MK |
326 | * Reserve space in the completion queue and only proceed |
327 | * if there is space in it. This avoids having to implement | |
328 | * any buffering in the Tx path. | |
329 | */ | |
59e35e55 | 330 | if (xskq_prod_reserve_addr(umem->cq, desc->addr)) |
ac98d8aa MK |
331 | goto out; |
332 | ||
c5ed924b | 333 | xskq_cons_release(xs->tx); |
ac98d8aa MK |
334 | rcu_read_unlock(); |
335 | return true; | |
336 | } | |
337 | ||
338 | out: | |
339 | rcu_read_unlock(); | |
340 | return false; | |
341 | } | |
342 | EXPORT_SYMBOL(xsk_umem_consume_tx); | |
343 | ||
06870682 | 344 | static int xsk_wakeup(struct xdp_sock *xs, u8 flags) |
ac98d8aa | 345 | { |
ac98d8aa | 346 | struct net_device *dev = xs->dev; |
06870682 MM |
347 | int err; |
348 | ||
349 | rcu_read_lock(); | |
350 | err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); | |
351 | rcu_read_unlock(); | |
ac98d8aa | 352 | |
06870682 MM |
353 | return err; |
354 | } | |
355 | ||
356 | static int xsk_zc_xmit(struct xdp_sock *xs) | |
357 | { | |
358 | return xsk_wakeup(xs, XDP_WAKEUP_TX); | |
ac98d8aa MK |
359 | } |
360 | ||
35fcde7f MK |
361 | static void xsk_destruct_skb(struct sk_buff *skb) |
362 | { | |
bbff2f32 | 363 | u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; |
35fcde7f | 364 | struct xdp_sock *xs = xdp_sk(skb->sk); |
a9744f7c | 365 | unsigned long flags; |
35fcde7f | 366 | |
a9744f7c | 367 | spin_lock_irqsave(&xs->tx_completion_lock, flags); |
59e35e55 | 368 | xskq_prod_submit_addr(xs->umem->cq, addr); |
a9744f7c | 369 | spin_unlock_irqrestore(&xs->tx_completion_lock, flags); |
35fcde7f MK |
370 | |
371 | sock_wfree(skb); | |
372 | } | |
373 | ||
df551058 | 374 | static int xsk_generic_xmit(struct sock *sk) |
35fcde7f | 375 | { |
35fcde7f | 376 | struct xdp_sock *xs = xdp_sk(sk); |
df551058 | 377 | u32 max_batch = TX_BATCH_SIZE; |
35fcde7f MK |
378 | bool sent_frame = false; |
379 | struct xdp_desc desc; | |
380 | struct sk_buff *skb; | |
381 | int err = 0; | |
382 | ||
35fcde7f MK |
383 | mutex_lock(&xs->mutex); |
384 | ||
67571640 IM |
385 | if (xs->queue_id >= xs->dev->real_num_tx_queues) |
386 | goto out; | |
387 | ||
c5ed924b | 388 | while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) { |
35fcde7f | 389 | char *buffer; |
bbff2f32 BT |
390 | u64 addr; |
391 | u32 len; | |
35fcde7f MK |
392 | |
393 | if (max_batch-- == 0) { | |
394 | err = -EAGAIN; | |
395 | goto out; | |
396 | } | |
397 | ||
09210c4b | 398 | len = desc.len; |
ac98d8aa | 399 | skb = sock_alloc_send_skb(sk, len, 1, &err); |
35fcde7f MK |
400 | if (unlikely(!skb)) { |
401 | err = -EAGAIN; | |
402 | goto out; | |
403 | } | |
404 | ||
405 | skb_put(skb, len); | |
bbff2f32 BT |
406 | addr = desc.addr; |
407 | buffer = xdp_umem_get_data(xs->umem, addr); | |
35fcde7f | 408 | err = skb_store_bits(skb, 0, buffer, len); |
0a05861f | 409 | /* This is the backpressure mechanism for the Tx path. |
15d8c916 MK |
410 | * Reserve space in the completion queue and only proceed |
411 | * if there is space in it. This avoids having to implement | |
412 | * any buffering in the Tx path. | |
413 | */ | |
59e35e55 | 414 | if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) { |
35fcde7f MK |
415 | kfree_skb(skb); |
416 | goto out; | |
417 | } | |
418 | ||
419 | skb->dev = xs->dev; | |
420 | skb->priority = sk->sk_priority; | |
421 | skb->mark = sk->sk_mark; | |
c05cd364 | 422 | skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr; |
35fcde7f MK |
423 | skb->destructor = xsk_destruct_skb; |
424 | ||
425 | err = dev_direct_xmit(skb, xs->queue_id); | |
c5ed924b | 426 | xskq_cons_release(xs->tx); |
35fcde7f MK |
427 | /* Ignore NET_XMIT_CN as packet might have been sent */ |
428 | if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) { | |
fe588685 MK |
429 | /* SKB completed but not sent */ |
430 | err = -EBUSY; | |
35fcde7f MK |
431 | goto out; |
432 | } | |
433 | ||
434 | sent_frame = true; | |
35fcde7f MK |
435 | } |
436 | ||
437 | out: | |
438 | if (sent_frame) | |
439 | sk->sk_write_space(sk); | |
440 | ||
441 | mutex_unlock(&xs->mutex); | |
442 | return err; | |
443 | } | |
444 | ||
df551058 MK |
445 | static int __xsk_sendmsg(struct sock *sk) |
446 | { | |
447 | struct xdp_sock *xs = xdp_sk(sk); | |
448 | ||
449 | if (unlikely(!(xs->dev->flags & IFF_UP))) | |
450 | return -ENETDOWN; | |
451 | if (unlikely(!xs->tx)) | |
452 | return -ENOBUFS; | |
453 | ||
454 | return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk); | |
455 | } | |
456 | ||
35fcde7f MK |
457 | static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
458 | { | |
ac98d8aa | 459 | bool need_wait = !(m->msg_flags & MSG_DONTWAIT); |
35fcde7f MK |
460 | struct sock *sk = sock->sk; |
461 | struct xdp_sock *xs = xdp_sk(sk); | |
462 | ||
42fddcc7 | 463 | if (unlikely(!xsk_is_bound(xs))) |
35fcde7f | 464 | return -ENXIO; |
df551058 | 465 | if (unlikely(need_wait)) |
ac98d8aa | 466 | return -EOPNOTSUPP; |
35fcde7f | 467 | |
df551058 | 468 | return __xsk_sendmsg(sk); |
35fcde7f MK |
469 | } |
470 | ||
5d946c5a | 471 | static __poll_t xsk_poll(struct file *file, struct socket *sock, |
a11e1d43 | 472 | struct poll_table_struct *wait) |
c497176c | 473 | { |
5d946c5a | 474 | __poll_t mask = datagram_poll(file, sock, wait); |
df551058 MK |
475 | struct sock *sk = sock->sk; |
476 | struct xdp_sock *xs = xdp_sk(sk); | |
42fddcc7 BT |
477 | struct xdp_umem *umem; |
478 | ||
479 | if (unlikely(!xsk_is_bound(xs))) | |
480 | return mask; | |
481 | ||
42fddcc7 | 482 | umem = xs->umem; |
77cd0d7b | 483 | |
df551058 | 484 | if (umem->need_wakeup) { |
06870682 MM |
485 | if (xs->zc) |
486 | xsk_wakeup(xs, umem->need_wakeup); | |
df551058 MK |
487 | else |
488 | /* Poll needs to drive Tx also in copy mode */ | |
489 | __xsk_sendmsg(sk); | |
490 | } | |
c497176c | 491 | |
59e35e55 | 492 | if (xs->rx && !xskq_prod_is_empty(xs->rx)) |
5d946c5a | 493 | mask |= EPOLLIN | EPOLLRDNORM; |
c5ed924b | 494 | if (xs->tx && !xskq_cons_is_full(xs->tx)) |
5d946c5a | 495 | mask |= EPOLLOUT | EPOLLWRNORM; |
c497176c BT |
496 | |
497 | return mask; | |
498 | } | |
499 | ||
b9b6b68e BT |
500 | static int xsk_init_queue(u32 entries, struct xsk_queue **queue, |
501 | bool umem_queue) | |
423f3832 MK |
502 | { |
503 | struct xsk_queue *q; | |
504 | ||
505 | if (entries == 0 || *queue || !is_power_of_2(entries)) | |
506 | return -EINVAL; | |
507 | ||
b9b6b68e | 508 | q = xskq_create(entries, umem_queue); |
423f3832 MK |
509 | if (!q) |
510 | return -ENOMEM; | |
511 | ||
37b07693 BT |
512 | /* Make sure queue is ready before it can be seen by others */ |
513 | smp_wmb(); | |
94a99763 | 514 | WRITE_ONCE(*queue, q); |
423f3832 MK |
515 | return 0; |
516 | } | |
517 | ||
455302d1 IM |
518 | static void xsk_unbind_dev(struct xdp_sock *xs) |
519 | { | |
520 | struct net_device *dev = xs->dev; | |
521 | ||
42fddcc7 | 522 | if (xs->state != XSK_BOUND) |
455302d1 | 523 | return; |
42fddcc7 | 524 | WRITE_ONCE(xs->state, XSK_UNBOUND); |
455302d1 IM |
525 | |
526 | /* Wait for driver to stop using the xdp socket. */ | |
527 | xdp_del_sk_umem(xs->umem, xs); | |
528 | xs->dev = NULL; | |
529 | synchronize_net(); | |
530 | dev_put(dev); | |
531 | } | |
532 | ||
0402acd6 BT |
533 | static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, |
534 | struct xdp_sock ***map_entry) | |
535 | { | |
536 | struct xsk_map *map = NULL; | |
537 | struct xsk_map_node *node; | |
538 | ||
539 | *map_entry = NULL; | |
540 | ||
541 | spin_lock_bh(&xs->map_list_lock); | |
542 | node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, | |
543 | node); | |
544 | if (node) { | |
545 | WARN_ON(xsk_map_inc(node->map)); | |
546 | map = node->map; | |
547 | *map_entry = node->map_entry; | |
548 | } | |
549 | spin_unlock_bh(&xs->map_list_lock); | |
550 | return map; | |
551 | } | |
552 | ||
553 | static void xsk_delete_from_maps(struct xdp_sock *xs) | |
554 | { | |
555 | /* This function removes the current XDP socket from all the | |
556 | * maps it resides in. We need to take extra care here, due to | |
557 | * the two locks involved. Each map has a lock synchronizing | |
558 | * updates to the entries, and each socket has a lock that | |
559 | * synchronizes access to the list of maps (map_list). For | |
560 | * deadlock avoidance the locks need to be taken in the order | |
561 | * "map lock"->"socket map list lock". We start off by | |
562 | * accessing the socket map list, and take a reference to the | |
563 | * map to guarantee existence between the | |
564 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete() | |
565 | * calls. Then we ask the map to remove the socket, which | |
566 | * tries to remove the socket from the map. Note that there | |
567 | * might be updates to the map between | |
568 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). | |
569 | */ | |
570 | struct xdp_sock **map_entry = NULL; | |
571 | struct xsk_map *map; | |
572 | ||
573 | while ((map = xsk_get_map_list_entry(xs, &map_entry))) { | |
574 | xsk_map_try_sock_delete(map, xs, map_entry); | |
575 | xsk_map_put(map); | |
576 | } | |
577 | } | |
578 | ||
c0c77d8f BT |
579 | static int xsk_release(struct socket *sock) |
580 | { | |
581 | struct sock *sk = sock->sk; | |
965a9909 | 582 | struct xdp_sock *xs = xdp_sk(sk); |
c0c77d8f BT |
583 | struct net *net; |
584 | ||
585 | if (!sk) | |
586 | return 0; | |
587 | ||
588 | net = sock_net(sk); | |
589 | ||
1d0dc069 BT |
590 | mutex_lock(&net->xdp.lock); |
591 | sk_del_node_init_rcu(sk); | |
592 | mutex_unlock(&net->xdp.lock); | |
593 | ||
c0c77d8f BT |
594 | local_bh_disable(); |
595 | sock_prot_inuse_add(net, sk->sk_prot, -1); | |
596 | local_bh_enable(); | |
597 | ||
0402acd6 | 598 | xsk_delete_from_maps(xs); |
42fddcc7 | 599 | mutex_lock(&xs->mutex); |
455302d1 | 600 | xsk_unbind_dev(xs); |
42fddcc7 | 601 | mutex_unlock(&xs->mutex); |
965a9909 | 602 | |
541d7fdd BT |
603 | xskq_destroy(xs->rx); |
604 | xskq_destroy(xs->tx); | |
605 | ||
c0c77d8f BT |
606 | sock_orphan(sk); |
607 | sock->sk = NULL; | |
608 | ||
609 | sk_refcnt_debug_release(sk); | |
610 | sock_put(sk); | |
611 | ||
612 | return 0; | |
613 | } | |
614 | ||
965a9909 MK |
615 | static struct socket *xsk_lookup_xsk_from_fd(int fd) |
616 | { | |
617 | struct socket *sock; | |
618 | int err; | |
619 | ||
620 | sock = sockfd_lookup(fd, &err); | |
621 | if (!sock) | |
622 | return ERR_PTR(-ENOTSOCK); | |
623 | ||
624 | if (sock->sk->sk_family != PF_XDP) { | |
625 | sockfd_put(sock); | |
626 | return ERR_PTR(-ENOPROTOOPT); | |
627 | } | |
628 | ||
629 | return sock; | |
630 | } | |
631 | ||
c05cd364 KL |
632 | /* Check if umem pages are contiguous. |
633 | * If zero-copy mode, use the DMA address to do the page contiguity check | |
634 | * For all other modes we use addr (kernel virtual address) | |
635 | * Store the result in the low bits of addr. | |
636 | */ | |
637 | static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags) | |
638 | { | |
639 | struct xdp_umem_page *pgs = umem->pages; | |
640 | int i, is_contig; | |
641 | ||
642 | for (i = 0; i < umem->npgs - 1; i++) { | |
643 | is_contig = (flags & XDP_ZEROCOPY) ? | |
644 | (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) : | |
645 | (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr); | |
646 | pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT; | |
647 | } | |
648 | } | |
649 | ||
965a9909 MK |
650 | static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) |
651 | { | |
652 | struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; | |
653 | struct sock *sk = sock->sk; | |
965a9909 | 654 | struct xdp_sock *xs = xdp_sk(sk); |
959b71db | 655 | struct net_device *dev; |
173d3adb | 656 | u32 flags, qid; |
965a9909 MK |
657 | int err = 0; |
658 | ||
659 | if (addr_len < sizeof(struct sockaddr_xdp)) | |
660 | return -EINVAL; | |
661 | if (sxdp->sxdp_family != AF_XDP) | |
662 | return -EINVAL; | |
663 | ||
f54ba391 | 664 | flags = sxdp->sxdp_flags; |
77cd0d7b MK |
665 | if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | |
666 | XDP_USE_NEED_WAKEUP)) | |
f54ba391 BT |
667 | return -EINVAL; |
668 | ||
5464c3a0 | 669 | rtnl_lock(); |
965a9909 | 670 | mutex_lock(&xs->mutex); |
455302d1 | 671 | if (xs->state != XSK_READY) { |
959b71db BT |
672 | err = -EBUSY; |
673 | goto out_release; | |
674 | } | |
675 | ||
965a9909 MK |
676 | dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); |
677 | if (!dev) { | |
678 | err = -ENODEV; | |
679 | goto out_release; | |
680 | } | |
681 | ||
f6145903 | 682 | if (!xs->rx && !xs->tx) { |
965a9909 MK |
683 | err = -EINVAL; |
684 | goto out_unlock; | |
685 | } | |
686 | ||
173d3adb | 687 | qid = sxdp->sxdp_queue_id; |
173d3adb BT |
688 | |
689 | if (flags & XDP_SHARED_UMEM) { | |
965a9909 MK |
690 | struct xdp_sock *umem_xs; |
691 | struct socket *sock; | |
692 | ||
77cd0d7b MK |
693 | if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || |
694 | (flags & XDP_USE_NEED_WAKEUP)) { | |
173d3adb BT |
695 | /* Cannot specify flags for shared sockets. */ |
696 | err = -EINVAL; | |
697 | goto out_unlock; | |
698 | } | |
699 | ||
965a9909 MK |
700 | if (xs->umem) { |
701 | /* We have already our own. */ | |
702 | err = -EINVAL; | |
703 | goto out_unlock; | |
704 | } | |
705 | ||
706 | sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); | |
707 | if (IS_ERR(sock)) { | |
708 | err = PTR_ERR(sock); | |
709 | goto out_unlock; | |
710 | } | |
711 | ||
712 | umem_xs = xdp_sk(sock->sk); | |
42fddcc7 | 713 | if (!xsk_is_bound(umem_xs)) { |
965a9909 MK |
714 | err = -EBADF; |
715 | sockfd_put(sock); | |
716 | goto out_unlock; | |
42fddcc7 BT |
717 | } |
718 | if (umem_xs->dev != dev || umem_xs->queue_id != qid) { | |
965a9909 MK |
719 | err = -EINVAL; |
720 | sockfd_put(sock); | |
721 | goto out_unlock; | |
722 | } | |
723 | ||
724 | xdp_get_umem(umem_xs->umem); | |
9764f4b3 | 725 | WRITE_ONCE(xs->umem, umem_xs->umem); |
965a9909 MK |
726 | sockfd_put(sock); |
727 | } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) { | |
728 | err = -EINVAL; | |
729 | goto out_unlock; | |
c497176c BT |
730 | } else { |
731 | /* This xsk has its own umem. */ | |
93ee30f3 MK |
732 | xskq_set_umem(xs->umem->fq, xs->umem->size, |
733 | xs->umem->chunk_mask); | |
734 | xskq_set_umem(xs->umem->cq, xs->umem->size, | |
735 | xs->umem->chunk_mask); | |
173d3adb BT |
736 | |
737 | err = xdp_umem_assign_dev(xs->umem, dev, qid, flags); | |
738 | if (err) | |
739 | goto out_unlock; | |
c05cd364 KL |
740 | |
741 | xsk_check_page_contiguity(xs->umem, flags); | |
965a9909 MK |
742 | } |
743 | ||
965a9909 | 744 | xs->dev = dev; |
ac98d8aa MK |
745 | xs->zc = xs->umem->zc; |
746 | xs->queue_id = qid; | |
93ee30f3 MK |
747 | xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask); |
748 | xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask); | |
ac98d8aa | 749 | xdp_add_sk_umem(xs->umem, xs); |
965a9909 MK |
750 | |
751 | out_unlock: | |
42fddcc7 | 752 | if (err) { |
965a9909 | 753 | dev_put(dev); |
42fddcc7 BT |
754 | } else { |
755 | /* Matches smp_rmb() in bind() for shared umem | |
756 | * sockets, and xsk_is_bound(). | |
757 | */ | |
758 | smp_wmb(); | |
759 | WRITE_ONCE(xs->state, XSK_BOUND); | |
760 | } | |
965a9909 MK |
761 | out_release: |
762 | mutex_unlock(&xs->mutex); | |
5464c3a0 | 763 | rtnl_unlock(); |
965a9909 MK |
764 | return err; |
765 | } | |
766 | ||
c05cd364 KL |
767 | struct xdp_umem_reg_v1 { |
768 | __u64 addr; /* Start of packet data area */ | |
769 | __u64 len; /* Length of packet data area */ | |
770 | __u32 chunk_size; | |
771 | __u32 headroom; | |
772 | }; | |
773 | ||
c0c77d8f BT |
774 | static int xsk_setsockopt(struct socket *sock, int level, int optname, |
775 | char __user *optval, unsigned int optlen) | |
776 | { | |
777 | struct sock *sk = sock->sk; | |
778 | struct xdp_sock *xs = xdp_sk(sk); | |
779 | int err; | |
780 | ||
781 | if (level != SOL_XDP) | |
782 | return -ENOPROTOOPT; | |
783 | ||
784 | switch (optname) { | |
b9b6b68e | 785 | case XDP_RX_RING: |
f6145903 | 786 | case XDP_TX_RING: |
b9b6b68e BT |
787 | { |
788 | struct xsk_queue **q; | |
789 | int entries; | |
790 | ||
791 | if (optlen < sizeof(entries)) | |
792 | return -EINVAL; | |
793 | if (copy_from_user(&entries, optval, sizeof(entries))) | |
794 | return -EFAULT; | |
795 | ||
796 | mutex_lock(&xs->mutex); | |
455302d1 IM |
797 | if (xs->state != XSK_READY) { |
798 | mutex_unlock(&xs->mutex); | |
799 | return -EBUSY; | |
800 | } | |
f6145903 | 801 | q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; |
b9b6b68e | 802 | err = xsk_init_queue(entries, q, false); |
77cd0d7b MK |
803 | if (!err && optname == XDP_TX_RING) |
804 | /* Tx needs to be explicitly woken up the first time */ | |
805 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; | |
b9b6b68e BT |
806 | mutex_unlock(&xs->mutex); |
807 | return err; | |
808 | } | |
c0c77d8f BT |
809 | case XDP_UMEM_REG: |
810 | { | |
c05cd364 KL |
811 | size_t mr_size = sizeof(struct xdp_umem_reg); |
812 | struct xdp_umem_reg mr = {}; | |
c0c77d8f BT |
813 | struct xdp_umem *umem; |
814 | ||
c05cd364 KL |
815 | if (optlen < sizeof(struct xdp_umem_reg_v1)) |
816 | return -EINVAL; | |
817 | else if (optlen < sizeof(mr)) | |
818 | mr_size = sizeof(struct xdp_umem_reg_v1); | |
819 | ||
820 | if (copy_from_user(&mr, optval, mr_size)) | |
c0c77d8f BT |
821 | return -EFAULT; |
822 | ||
823 | mutex_lock(&xs->mutex); | |
455302d1 | 824 | if (xs->state != XSK_READY || xs->umem) { |
a49049ea BT |
825 | mutex_unlock(&xs->mutex); |
826 | return -EBUSY; | |
827 | } | |
c0c77d8f | 828 | |
a49049ea BT |
829 | umem = xdp_umem_create(&mr); |
830 | if (IS_ERR(umem)) { | |
c0c77d8f | 831 | mutex_unlock(&xs->mutex); |
a49049ea | 832 | return PTR_ERR(umem); |
c0c77d8f BT |
833 | } |
834 | ||
835 | /* Make sure umem is ready before it can be seen by others */ | |
836 | smp_wmb(); | |
9764f4b3 | 837 | WRITE_ONCE(xs->umem, umem); |
c0c77d8f BT |
838 | mutex_unlock(&xs->mutex); |
839 | return 0; | |
840 | } | |
423f3832 | 841 | case XDP_UMEM_FILL_RING: |
fe230832 | 842 | case XDP_UMEM_COMPLETION_RING: |
423f3832 MK |
843 | { |
844 | struct xsk_queue **q; | |
845 | int entries; | |
846 | ||
423f3832 MK |
847 | if (copy_from_user(&entries, optval, sizeof(entries))) |
848 | return -EFAULT; | |
849 | ||
850 | mutex_lock(&xs->mutex); | |
455302d1 IM |
851 | if (xs->state != XSK_READY) { |
852 | mutex_unlock(&xs->mutex); | |
853 | return -EBUSY; | |
854 | } | |
a49049ea BT |
855 | if (!xs->umem) { |
856 | mutex_unlock(&xs->mutex); | |
857 | return -EINVAL; | |
858 | } | |
859 | ||
fe230832 MK |
860 | q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : |
861 | &xs->umem->cq; | |
b9b6b68e | 862 | err = xsk_init_queue(entries, q, true); |
423f3832 MK |
863 | mutex_unlock(&xs->mutex); |
864 | return err; | |
865 | } | |
c0c77d8f BT |
866 | default: |
867 | break; | |
868 | } | |
869 | ||
870 | return -ENOPROTOOPT; | |
871 | } | |
872 | ||
77cd0d7b MK |
873 | static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) |
874 | { | |
875 | ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); | |
876 | ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); | |
877 | ring->desc = offsetof(struct xdp_rxtx_ring, desc); | |
878 | } | |
879 | ||
880 | static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) | |
881 | { | |
882 | ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); | |
883 | ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); | |
884 | ring->desc = offsetof(struct xdp_umem_ring, desc); | |
885 | } | |
886 | ||
af75d9e0 MK |
887 | static int xsk_getsockopt(struct socket *sock, int level, int optname, |
888 | char __user *optval, int __user *optlen) | |
889 | { | |
890 | struct sock *sk = sock->sk; | |
891 | struct xdp_sock *xs = xdp_sk(sk); | |
892 | int len; | |
893 | ||
894 | if (level != SOL_XDP) | |
895 | return -ENOPROTOOPT; | |
896 | ||
897 | if (get_user(len, optlen)) | |
898 | return -EFAULT; | |
899 | if (len < 0) | |
900 | return -EINVAL; | |
901 | ||
902 | switch (optname) { | |
903 | case XDP_STATISTICS: | |
904 | { | |
905 | struct xdp_statistics stats; | |
906 | ||
907 | if (len < sizeof(stats)) | |
908 | return -EINVAL; | |
909 | ||
910 | mutex_lock(&xs->mutex); | |
911 | stats.rx_dropped = xs->rx_dropped; | |
912 | stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); | |
913 | stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); | |
914 | mutex_unlock(&xs->mutex); | |
915 | ||
916 | if (copy_to_user(optval, &stats, sizeof(stats))) | |
917 | return -EFAULT; | |
918 | if (put_user(sizeof(stats), optlen)) | |
919 | return -EFAULT; | |
920 | ||
921 | return 0; | |
922 | } | |
b3a9e0be BT |
923 | case XDP_MMAP_OFFSETS: |
924 | { | |
925 | struct xdp_mmap_offsets off; | |
77cd0d7b MK |
926 | struct xdp_mmap_offsets_v1 off_v1; |
927 | bool flags_supported = true; | |
928 | void *to_copy; | |
b3a9e0be | 929 | |
77cd0d7b | 930 | if (len < sizeof(off_v1)) |
b3a9e0be | 931 | return -EINVAL; |
77cd0d7b MK |
932 | else if (len < sizeof(off)) |
933 | flags_supported = false; | |
934 | ||
935 | if (flags_supported) { | |
936 | /* xdp_ring_offset is identical to xdp_ring_offset_v1 | |
937 | * except for the flags field added to the end. | |
938 | */ | |
939 | xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) | |
940 | &off.rx); | |
941 | xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) | |
942 | &off.tx); | |
943 | xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) | |
944 | &off.fr); | |
945 | xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) | |
946 | &off.cr); | |
947 | off.rx.flags = offsetof(struct xdp_rxtx_ring, | |
948 | ptrs.flags); | |
949 | off.tx.flags = offsetof(struct xdp_rxtx_ring, | |
950 | ptrs.flags); | |
951 | off.fr.flags = offsetof(struct xdp_umem_ring, | |
952 | ptrs.flags); | |
953 | off.cr.flags = offsetof(struct xdp_umem_ring, | |
954 | ptrs.flags); | |
955 | ||
956 | len = sizeof(off); | |
957 | to_copy = &off; | |
958 | } else { | |
959 | xsk_enter_rxtx_offsets(&off_v1.rx); | |
960 | xsk_enter_rxtx_offsets(&off_v1.tx); | |
961 | xsk_enter_umem_offsets(&off_v1.fr); | |
962 | xsk_enter_umem_offsets(&off_v1.cr); | |
963 | ||
964 | len = sizeof(off_v1); | |
965 | to_copy = &off_v1; | |
966 | } | |
b3a9e0be | 967 | |
77cd0d7b | 968 | if (copy_to_user(optval, to_copy, len)) |
b3a9e0be BT |
969 | return -EFAULT; |
970 | if (put_user(len, optlen)) | |
971 | return -EFAULT; | |
972 | ||
973 | return 0; | |
974 | } | |
2640d3c8 MM |
975 | case XDP_OPTIONS: |
976 | { | |
977 | struct xdp_options opts = {}; | |
978 | ||
979 | if (len < sizeof(opts)) | |
980 | return -EINVAL; | |
981 | ||
982 | mutex_lock(&xs->mutex); | |
983 | if (xs->zc) | |
984 | opts.flags |= XDP_OPTIONS_ZEROCOPY; | |
985 | mutex_unlock(&xs->mutex); | |
986 | ||
987 | len = sizeof(opts); | |
988 | if (copy_to_user(optval, &opts, len)) | |
989 | return -EFAULT; | |
990 | if (put_user(len, optlen)) | |
991 | return -EFAULT; | |
992 | ||
993 | return 0; | |
994 | } | |
af75d9e0 MK |
995 | default: |
996 | break; | |
997 | } | |
998 | ||
999 | return -EOPNOTSUPP; | |
1000 | } | |
1001 | ||
423f3832 MK |
1002 | static int xsk_mmap(struct file *file, struct socket *sock, |
1003 | struct vm_area_struct *vma) | |
1004 | { | |
a5a16e43 | 1005 | loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
423f3832 MK |
1006 | unsigned long size = vma->vm_end - vma->vm_start; |
1007 | struct xdp_sock *xs = xdp_sk(sock->sk); | |
1008 | struct xsk_queue *q = NULL; | |
37b07693 | 1009 | struct xdp_umem *umem; |
423f3832 MK |
1010 | unsigned long pfn; |
1011 | struct page *qpg; | |
1012 | ||
42fddcc7 | 1013 | if (READ_ONCE(xs->state) != XSK_READY) |
455302d1 IM |
1014 | return -EBUSY; |
1015 | ||
b9b6b68e | 1016 | if (offset == XDP_PGOFF_RX_RING) { |
37b07693 | 1017 | q = READ_ONCE(xs->rx); |
f6145903 | 1018 | } else if (offset == XDP_PGOFF_TX_RING) { |
37b07693 | 1019 | q = READ_ONCE(xs->tx); |
b9b6b68e | 1020 | } else { |
37b07693 BT |
1021 | umem = READ_ONCE(xs->umem); |
1022 | if (!umem) | |
b9b6b68e | 1023 | return -EINVAL; |
423f3832 | 1024 | |
e6762c8b MK |
1025 | /* Matches the smp_wmb() in XDP_UMEM_REG */ |
1026 | smp_rmb(); | |
b9b6b68e | 1027 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
37b07693 | 1028 | q = READ_ONCE(umem->fq); |
fe230832 | 1029 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
37b07693 | 1030 | q = READ_ONCE(umem->cq); |
b9b6b68e | 1031 | } |
423f3832 MK |
1032 | |
1033 | if (!q) | |
1034 | return -EINVAL; | |
1035 | ||
e6762c8b MK |
1036 | /* Matches the smp_wmb() in xsk_init_queue */ |
1037 | smp_rmb(); | |
423f3832 | 1038 | qpg = virt_to_head_page(q->ring); |
a50b854e | 1039 | if (size > page_size(qpg)) |
423f3832 MK |
1040 | return -EINVAL; |
1041 | ||
1042 | pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; | |
1043 | return remap_pfn_range(vma, vma->vm_start, pfn, | |
1044 | size, vma->vm_page_prot); | |
1045 | } | |
1046 | ||
455302d1 IM |
1047 | static int xsk_notifier(struct notifier_block *this, |
1048 | unsigned long msg, void *ptr) | |
1049 | { | |
1050 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
1051 | struct net *net = dev_net(dev); | |
1052 | struct sock *sk; | |
1053 | ||
1054 | switch (msg) { | |
1055 | case NETDEV_UNREGISTER: | |
1056 | mutex_lock(&net->xdp.lock); | |
1057 | sk_for_each(sk, &net->xdp.list) { | |
1058 | struct xdp_sock *xs = xdp_sk(sk); | |
1059 | ||
1060 | mutex_lock(&xs->mutex); | |
1061 | if (xs->dev == dev) { | |
1062 | sk->sk_err = ENETDOWN; | |
1063 | if (!sock_flag(sk, SOCK_DEAD)) | |
1064 | sk->sk_error_report(sk); | |
1065 | ||
1066 | xsk_unbind_dev(xs); | |
1067 | ||
1068 | /* Clear device references in umem. */ | |
1069 | xdp_umem_clear_dev(xs->umem); | |
1070 | } | |
1071 | mutex_unlock(&xs->mutex); | |
1072 | } | |
1073 | mutex_unlock(&net->xdp.lock); | |
1074 | break; | |
1075 | } | |
1076 | return NOTIFY_DONE; | |
1077 | } | |
1078 | ||
c0c77d8f BT |
1079 | static struct proto xsk_proto = { |
1080 | .name = "XDP", | |
1081 | .owner = THIS_MODULE, | |
1082 | .obj_size = sizeof(struct xdp_sock), | |
1083 | }; | |
1084 | ||
1085 | static const struct proto_ops xsk_proto_ops = { | |
c2f4374b BT |
1086 | .family = PF_XDP, |
1087 | .owner = THIS_MODULE, | |
1088 | .release = xsk_release, | |
1089 | .bind = xsk_bind, | |
1090 | .connect = sock_no_connect, | |
1091 | .socketpair = sock_no_socketpair, | |
1092 | .accept = sock_no_accept, | |
1093 | .getname = sock_no_getname, | |
a11e1d43 | 1094 | .poll = xsk_poll, |
c2f4374b BT |
1095 | .ioctl = sock_no_ioctl, |
1096 | .listen = sock_no_listen, | |
1097 | .shutdown = sock_no_shutdown, | |
1098 | .setsockopt = xsk_setsockopt, | |
1099 | .getsockopt = xsk_getsockopt, | |
1100 | .sendmsg = xsk_sendmsg, | |
1101 | .recvmsg = sock_no_recvmsg, | |
1102 | .mmap = xsk_mmap, | |
1103 | .sendpage = sock_no_sendpage, | |
c0c77d8f BT |
1104 | }; |
1105 | ||
11fe9262 BT |
1106 | static void xsk_destruct(struct sock *sk) |
1107 | { | |
1108 | struct xdp_sock *xs = xdp_sk(sk); | |
1109 | ||
1110 | if (!sock_flag(sk, SOCK_DEAD)) | |
1111 | return; | |
1112 | ||
1113 | xdp_put_umem(xs->umem); | |
1114 | ||
1115 | sk_refcnt_debug_dec(sk); | |
1116 | } | |
1117 | ||
c0c77d8f BT |
1118 | static int xsk_create(struct net *net, struct socket *sock, int protocol, |
1119 | int kern) | |
1120 | { | |
1121 | struct sock *sk; | |
1122 | struct xdp_sock *xs; | |
1123 | ||
1124 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) | |
1125 | return -EPERM; | |
1126 | if (sock->type != SOCK_RAW) | |
1127 | return -ESOCKTNOSUPPORT; | |
1128 | ||
1129 | if (protocol) | |
1130 | return -EPROTONOSUPPORT; | |
1131 | ||
1132 | sock->state = SS_UNCONNECTED; | |
1133 | ||
1134 | sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); | |
1135 | if (!sk) | |
1136 | return -ENOBUFS; | |
1137 | ||
1138 | sock->ops = &xsk_proto_ops; | |
1139 | ||
1140 | sock_init_data(sock, sk); | |
1141 | ||
1142 | sk->sk_family = PF_XDP; | |
1143 | ||
11fe9262 BT |
1144 | sk->sk_destruct = xsk_destruct; |
1145 | sk_refcnt_debug_inc(sk); | |
1146 | ||
cee27167 BT |
1147 | sock_set_flag(sk, SOCK_RCU_FREE); |
1148 | ||
c0c77d8f | 1149 | xs = xdp_sk(sk); |
455302d1 | 1150 | xs->state = XSK_READY; |
c0c77d8f | 1151 | mutex_init(&xs->mutex); |
bf0bdd13 | 1152 | spin_lock_init(&xs->rx_lock); |
a9744f7c | 1153 | spin_lock_init(&xs->tx_completion_lock); |
c0c77d8f | 1154 | |
0402acd6 BT |
1155 | INIT_LIST_HEAD(&xs->map_list); |
1156 | spin_lock_init(&xs->map_list_lock); | |
1157 | ||
1d0dc069 BT |
1158 | mutex_lock(&net->xdp.lock); |
1159 | sk_add_node_rcu(sk, &net->xdp.list); | |
1160 | mutex_unlock(&net->xdp.lock); | |
1161 | ||
c0c77d8f BT |
1162 | local_bh_disable(); |
1163 | sock_prot_inuse_add(net, &xsk_proto, 1); | |
1164 | local_bh_enable(); | |
1165 | ||
1166 | return 0; | |
1167 | } | |
1168 | ||
1169 | static const struct net_proto_family xsk_family_ops = { | |
1170 | .family = PF_XDP, | |
1171 | .create = xsk_create, | |
1172 | .owner = THIS_MODULE, | |
1173 | }; | |
1174 | ||
455302d1 IM |
1175 | static struct notifier_block xsk_netdev_notifier = { |
1176 | .notifier_call = xsk_notifier, | |
1177 | }; | |
1178 | ||
1d0dc069 BT |
1179 | static int __net_init xsk_net_init(struct net *net) |
1180 | { | |
1181 | mutex_init(&net->xdp.lock); | |
1182 | INIT_HLIST_HEAD(&net->xdp.list); | |
1183 | return 0; | |
1184 | } | |
1185 | ||
1186 | static void __net_exit xsk_net_exit(struct net *net) | |
1187 | { | |
1188 | WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); | |
1189 | } | |
1190 | ||
1191 | static struct pernet_operations xsk_net_ops = { | |
1192 | .init = xsk_net_init, | |
1193 | .exit = xsk_net_exit, | |
1194 | }; | |
1195 | ||
c0c77d8f BT |
1196 | static int __init xsk_init(void) |
1197 | { | |
e312b9e7 | 1198 | int err, cpu; |
c0c77d8f BT |
1199 | |
1200 | err = proto_register(&xsk_proto, 0 /* no slab */); | |
1201 | if (err) | |
1202 | goto out; | |
1203 | ||
1204 | err = sock_register(&xsk_family_ops); | |
1205 | if (err) | |
1206 | goto out_proto; | |
1207 | ||
1d0dc069 BT |
1208 | err = register_pernet_subsys(&xsk_net_ops); |
1209 | if (err) | |
1210 | goto out_sk; | |
455302d1 IM |
1211 | |
1212 | err = register_netdevice_notifier(&xsk_netdev_notifier); | |
1213 | if (err) | |
1214 | goto out_pernet; | |
1215 | ||
e312b9e7 BT |
1216 | for_each_possible_cpu(cpu) |
1217 | INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); | |
c0c77d8f BT |
1218 | return 0; |
1219 | ||
455302d1 IM |
1220 | out_pernet: |
1221 | unregister_pernet_subsys(&xsk_net_ops); | |
1d0dc069 BT |
1222 | out_sk: |
1223 | sock_unregister(PF_XDP); | |
c0c77d8f BT |
1224 | out_proto: |
1225 | proto_unregister(&xsk_proto); | |
1226 | out: | |
1227 | return err; | |
1228 | } | |
1229 | ||
1230 | fs_initcall(xsk_init); |