Merge tag 'irq_urgent_for_v6.8_rc2' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / net / xdp / xsk.c
CommitLineData
c0c77d8f
BT
1// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
c0c77d8f
BT
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
ac98d8aa 24#include <linux/rculist.h>
951bce29 25#include <linux/vmalloc.h>
a71506a4 26#include <net/xdp_sock_drv.h>
a0731952 27#include <net/busy_poll.h>
49e47a5b 28#include <net/netdev_rx_queue.h>
b9b6b68e 29#include <net/xdp.h>
c0c77d8f 30
423f3832 31#include "xsk_queue.h"
c0c77d8f 32#include "xdp_umem.h"
a36b38aa 33#include "xsk.h"
c0c77d8f 34
e7a1c130 35#define TX_BATCH_SIZE 32
99b29a49 36#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
35fcde7f 37
e312b9e7
BT
38static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
39
c4655761 40void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 41{
c2d3d6a4 42 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
77cd0d7b
MK
43 return;
44
7361f9c3 45 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
c2d3d6a4 46 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
77cd0d7b
MK
47}
48EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
49
c4655761 50void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b
MK
51{
52 struct xdp_sock *xs;
53
c2d3d6a4 54 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
77cd0d7b
MK
55 return;
56
57 rcu_read_lock();
a5aa8e52 58 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
77cd0d7b
MK
59 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
60 }
61 rcu_read_unlock();
62
c2d3d6a4 63 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
77cd0d7b
MK
64}
65EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
66
c4655761 67void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 68{
c2d3d6a4 69 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
77cd0d7b
MK
70 return;
71
7361f9c3 72 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
c2d3d6a4 73 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
77cd0d7b
MK
74}
75EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
76
c4655761 77void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b
MK
78{
79 struct xdp_sock *xs;
80
c2d3d6a4 81 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
77cd0d7b
MK
82 return;
83
84 rcu_read_lock();
a5aa8e52 85 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
77cd0d7b
MK
86 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
87 }
88 rcu_read_unlock();
89
c2d3d6a4 90 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
77cd0d7b
MK
91}
92EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
93
c4655761 94bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 95{
c2d3d6a4 96 return pool->uses_need_wakeup;
77cd0d7b 97}
c4655761 98EXPORT_SYMBOL(xsk_uses_need_wakeup);
77cd0d7b 99
1c1efc2a
MK
100struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
101 u16 queue_id)
102{
103 if (queue_id < dev->real_num_rx_queues)
104 return dev->_rx[queue_id].pool;
105 if (queue_id < dev->real_num_tx_queues)
106 return dev->_tx[queue_id].pool;
107
108 return NULL;
109}
110EXPORT_SYMBOL(xsk_get_pool_from_qid);
111
112void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
113{
b425e24a 114 if (queue_id < dev->num_rx_queues)
1c1efc2a 115 dev->_rx[queue_id].pool = NULL;
b425e24a 116 if (queue_id < dev->num_tx_queues)
1c1efc2a
MK
117 dev->_tx[queue_id].pool = NULL;
118}
119
120/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
121 * not know if the device has more tx queues than rx, or the opposite.
122 * This might also change during run time.
123 */
124int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
125 u16 queue_id)
126{
127 if (queue_id >= max_t(unsigned int,
128 dev->real_num_rx_queues,
129 dev->real_num_tx_queues))
130 return -EINVAL;
131
132 if (queue_id < dev->real_num_rx_queues)
133 dev->_rx[queue_id].pool = pool;
134 if (queue_id < dev->real_num_tx_queues)
135 dev->_tx[queue_id].pool = pool;
136
137 return 0;
138}
139
556444c4
MF
140static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff_xsk *xskb, u32 len,
141 u32 flags)
c05cd364 142{
2b43470a
BT
143 u64 addr;
144 int err;
c05cd364 145
2b43470a 146 addr = xp_get_handle(xskb);
63a64a56 147 err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
2b43470a 148 if (err) {
8aa5a335 149 xs->rx_queue_full++;
2b43470a 150 return err;
c05cd364
KL
151 }
152
2b43470a
BT
153 xp_release(xskb);
154 return 0;
c05cd364
KL
155}
156
556444c4
MF
157static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
158{
159 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
24ea5012
MF
160 u32 frags = xdp_buff_has_frags(xdp);
161 struct xdp_buff_xsk *pos, *tmp;
162 struct list_head *xskb_list;
163 u32 contd = 0;
164 int err;
165
166 if (frags)
167 contd = XDP_PKT_CONTD;
168
169 err = __xsk_rcv_zc(xs, xskb, len, contd);
26900989
MF
170 if (err)
171 goto err;
172 if (likely(!frags))
173 return 0;
24ea5012
MF
174
175 xskb_list = &xskb->pool->xskb_list;
176 list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
177 if (list_is_singular(xskb_list))
178 contd = 0;
179 len = pos->xdp.data_end - pos->xdp.data;
180 err = __xsk_rcv_zc(xs, pos, len, contd);
181 if (err)
26900989 182 goto err;
24ea5012
MF
183 list_del(&pos->xskb_list_node);
184 }
556444c4 185
26900989
MF
186 return 0;
187err:
188 xsk_buff_free(xdp);
24ea5012 189 return err;
556444c4
MF
190}
191
80462775
TS
192static void *xsk_copy_xdp_start(struct xdp_buff *from)
193{
194 if (unlikely(xdp_data_meta_unsupported(from)))
195 return from->data;
196 else
197 return from->data_meta;
198}
18baed26 199
80462775
TS
200static u32 xsk_copy_xdp(void *to, void **from, u32 to_len,
201 u32 *from_len, skb_frag_t **frag, u32 rem)
202{
203 u32 copied = 0;
204
205 while (1) {
206 u32 copy_len = min_t(u32, *from_len, to_len);
207
208 memcpy(to, *from, copy_len);
209 copied += copy_len;
210 if (rem == copied)
211 return copied;
212
213 if (*from_len == copy_len) {
214 *from = skb_frag_address(*frag);
215 *from_len = skb_frag_size((*frag)++);
216 } else {
217 *from += copy_len;
218 *from_len -= copy_len;
219 }
220 if (to_len == copy_len)
221 return copied;
222
223 to_len -= copy_len;
224 to += copy_len;
225 }
c497176c
BT
226}
227
faa91b83 228static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
c497176c 229{
80462775
TS
230 u32 frame_size = xsk_pool_get_rx_frame_size(xs->pool);
231 void *copy_from = xsk_copy_xdp_start(xdp), *copy_to;
232 u32 from_len, meta_len, rem, num_desc;
556444c4 233 struct xdp_buff_xsk *xskb;
2b43470a 234 struct xdp_buff *xsk_xdp;
80462775
TS
235 skb_frag_t *frag;
236
237 from_len = xdp->data_end - copy_from;
238 meta_len = xdp->data - copy_from;
239 rem = len + meta_len;
240
241 if (len <= frame_size && !xdp_buff_has_frags(xdp)) {
242 int err;
243
244 xsk_xdp = xsk_buff_alloc(xs->pool);
245 if (!xsk_xdp) {
246 xs->rx_dropped++;
247 return -ENOMEM;
248 }
249 memcpy(xsk_xdp->data - meta_len, copy_from, rem);
250 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
251 err = __xsk_rcv_zc(xs, xskb, len, 0);
252 if (err) {
253 xsk_buff_free(xsk_xdp);
254 return err;
255 }
256
257 return 0;
258 }
2b43470a 259
80462775
TS
260 num_desc = (len - 1) / frame_size + 1;
261
262 if (!xsk_buff_can_alloc(xs->pool, num_desc)) {
173d3adb 263 xs->rx_dropped++;
c6c1f11b 264 return -ENOMEM;
2b43470a 265 }
80462775
TS
266 if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
267 xs->rx_queue_full++;
268 return -ENOBUFS;
269 }
c497176c 270
80462775
TS
271 if (xdp_buff_has_frags(xdp)) {
272 struct skb_shared_info *sinfo;
273
274 sinfo = xdp_get_shared_info_from_buff(xdp);
275 frag = &sinfo->frags[0];
2b43470a 276 }
80462775
TS
277
278 do {
279 u32 to_len = frame_size + meta_len;
280 u32 copied;
281
282 xsk_xdp = xsk_buff_alloc(xs->pool);
283 copy_to = xsk_xdp->data - meta_len;
284
285 copied = xsk_copy_xdp(copy_to, &copy_from, to_len, &from_len, &frag, rem);
286 rem -= copied;
287
288 xskb = container_of(xsk_xdp, struct xdp_buff_xsk, xdp);
289 __xsk_rcv_zc(xs, xskb, copied - meta_len, rem ? XDP_PKT_CONTD : 0);
290 meta_len = 0;
291 } while (rem);
292
2b43470a 293 return 0;
c497176c
BT
294}
295
3413f041
XZ
296static bool xsk_tx_writeable(struct xdp_sock *xs)
297{
298 if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
299 return false;
300
301 return true;
302}
303
42fddcc7
BT
304static bool xsk_is_bound(struct xdp_sock *xs)
305{
306 if (READ_ONCE(xs->state) == XSK_BOUND) {
307 /* Matches smp_wmb() in bind(). */
308 smp_rmb();
309 return true;
310 }
311 return false;
312}
313
faa91b83 314static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
173d3adb 315{
42fddcc7 316 if (!xsk_is_bound(xs))
2be4a677 317 return -ENXIO;
42fddcc7 318
173d3adb
BT
319 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
320 return -EINVAL;
321
80462775 322 if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
faa91b83
TS
323 xs->rx_dropped++;
324 return -ENOSPC;
325 }
326
b02e5a0e 327 sk_mark_napi_id_once_xdp(&xs->sk, xdp);
458f7272 328 return 0;
173d3adb
BT
329}
330
d817991c 331static void xsk_flush(struct xdp_sock *xs)
c497176c 332{
59e35e55 333 xskq_prod_submit(xs->rx);
7361f9c3 334 __xskq_cons_release(xs->pool->fq);
43a825af 335 sock_def_readable(&xs->sk);
c497176c
BT
336}
337
338int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
339{
faa91b83 340 u32 len = xdp_get_buff_len(xdp);
c497176c
BT
341 int err;
342
bf0bdd13 343 spin_lock_bh(&xs->rx_lock);
faa91b83 344 err = xsk_rcv_check(xs, xdp, len);
458f7272 345 if (!err) {
faa91b83 346 err = __xsk_rcv(xs, xdp, len);
458f7272
BT
347 xsk_flush(xs);
348 }
bf0bdd13 349 spin_unlock_bh(&xs->rx_lock);
c497176c
BT
350 return err;
351}
352
458f7272
BT
353static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
354{
faa91b83 355 u32 len = xdp_get_buff_len(xdp);
458f7272 356 int err;
458f7272 357
faa91b83 358 err = xsk_rcv_check(xs, xdp, len);
458f7272
BT
359 if (err)
360 return err;
361
362 if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
363 len = xdp->data_end - xdp->data;
556444c4 364 return xsk_rcv_zc(xs, xdp, len);
458f7272
BT
365 }
366
faa91b83 367 err = __xsk_rcv(xs, xdp, len);
458f7272
BT
368 if (!err)
369 xdp_return_buff(xdp);
370 return err;
371}
372
e312b9e7 373int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
d817991c 374{
e312b9e7 375 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
d817991c
BT
376 int err;
377
458f7272 378 err = xsk_rcv(xs, xdp);
d817991c
BT
379 if (err)
380 return err;
381
382 if (!xs->flush_node.prev)
383 list_add(&xs->flush_node, flush_list);
384
385 return 0;
386}
387
e312b9e7 388void __xsk_map_flush(void)
d817991c 389{
e312b9e7 390 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
d817991c
BT
391 struct xdp_sock *xs, *tmp;
392
393 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
394 xsk_flush(xs);
395 __list_del_clearprev(&xs->flush_node);
396 }
397}
398
9a675ba5
SAS
399#ifdef CONFIG_DEBUG_NET
400bool xsk_map_check_flush(void)
401{
402 if (list_empty(this_cpu_ptr(&xskmap_flush_list)))
403 return false;
404 __xsk_map_flush();
405 return true;
406}
407#endif
408
c4655761 409void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
ac98d8aa 410{
7361f9c3 411 xskq_prod_submit_n(pool->cq, nb_entries);
ac98d8aa 412}
c4655761 413EXPORT_SYMBOL(xsk_tx_completed);
ac98d8aa 414
c4655761 415void xsk_tx_release(struct xsk_buff_pool *pool)
ac98d8aa
MK
416{
417 struct xdp_sock *xs;
418
419 rcu_read_lock();
a5aa8e52 420 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
30744a68 421 __xskq_cons_release(xs->tx);
3413f041
XZ
422 if (xsk_tx_writeable(xs))
423 xs->sk.sk_write_space(&xs->sk);
ac98d8aa
MK
424 }
425 rcu_read_unlock();
426}
c4655761 427EXPORT_SYMBOL(xsk_tx_release);
ac98d8aa 428
c4655761 429bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
ac98d8aa 430{
99b29a49 431 bool budget_exhausted = false;
ac98d8aa
MK
432 struct xdp_sock *xs;
433
434 rcu_read_lock();
99b29a49 435again:
a5aa8e52 436 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
99b29a49
AH
437 if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
438 budget_exhausted = true;
439 continue;
440 }
441
1c1efc2a 442 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
cf24f5a5
TS
443 if (xskq_has_descs(xs->tx))
444 xskq_cons_release(xs->tx);
ac98d8aa 445 continue;
8aa5a335 446 }
ac98d8aa 447
99b29a49
AH
448 xs->tx_budget_spent++;
449
0a05861f 450 /* This is the backpressure mechanism for the Tx path.
15d8c916
MK
451 * Reserve space in the completion queue and only proceed
452 * if there is space in it. This avoids having to implement
453 * any buffering in the Tx path.
454 */
7361f9c3 455 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
ac98d8aa
MK
456 goto out;
457
c5ed924b 458 xskq_cons_release(xs->tx);
ac98d8aa
MK
459 rcu_read_unlock();
460 return true;
461 }
462
99b29a49
AH
463 if (budget_exhausted) {
464 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
465 xs->tx_budget_spent = 0;
466
467 budget_exhausted = false;
468 goto again;
469 }
470
ac98d8aa
MK
471out:
472 rcu_read_unlock();
473 return false;
474}
c4655761 475EXPORT_SYMBOL(xsk_tx_peek_desc);
ac98d8aa 476
d1bc532e 477static u32 xsk_tx_peek_release_fallback(struct xsk_buff_pool *pool, u32 max_entries)
9349eb3a 478{
d1bc532e 479 struct xdp_desc *descs = pool->tx_descs;
9349eb3a
MK
480 u32 nb_pkts = 0;
481
482 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts]))
483 nb_pkts++;
484
485 xsk_tx_release(pool);
486 return nb_pkts;
487}
488
c00c4461 489u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 nb_pkts)
9349eb3a
MK
490{
491 struct xdp_sock *xs;
9349eb3a
MK
492
493 rcu_read_lock();
494 if (!list_is_singular(&pool->xsk_tx_list)) {
495 /* Fallback to the non-batched version */
496 rcu_read_unlock();
c00c4461 497 return xsk_tx_peek_release_fallback(pool, nb_pkts);
9349eb3a
MK
498 }
499
500 xs = list_first_or_null_rcu(&pool->xsk_tx_list, struct xdp_sock, tx_list);
501 if (!xs) {
502 nb_pkts = 0;
503 goto out;
504 }
505
c00c4461 506 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts);
9349eb3a
MK
507
508 /* This is the backpressure mechanism for the Tx path. Try to
509 * reserve space in the completion queue for all packets, but
510 * if there are fewer slots available, just process that many
511 * packets. This avoids having to implement any buffering in
512 * the Tx path.
513 */
c00c4461 514 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts);
9349eb3a
MK
515 if (!nb_pkts)
516 goto out;
517
c00c4461
MF
518 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts);
519 if (!nb_pkts) {
520 xs->tx->queue_empty_descs++;
521 goto out;
522 }
523
9349eb3a 524 __xskq_cons_release(xs->tx);
c00c4461 525 xskq_prod_write_addr_batch(pool->cq, pool->tx_descs, nb_pkts);
9349eb3a
MK
526 xs->sk.sk_write_space(&xs->sk);
527
528out:
529 rcu_read_unlock();
530 return nb_pkts;
531}
532EXPORT_SYMBOL(xsk_tx_peek_release_desc_batch);
533
06870682 534static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
ac98d8aa 535{
ac98d8aa 536 struct net_device *dev = xs->dev;
06870682 537
18b1ab7a 538 return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
ac98d8aa
MK
539}
540
b7f72a30
TS
541static int xsk_cq_reserve_addr_locked(struct xdp_sock *xs, u64 addr)
542{
543 unsigned long flags;
544 int ret;
545
546 spin_lock_irqsave(&xs->pool->cq_lock, flags);
547 ret = xskq_prod_reserve_addr(xs->pool->cq, addr);
548 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
549
550 return ret;
551}
552
553static void xsk_cq_submit_locked(struct xdp_sock *xs, u32 n)
554{
555 unsigned long flags;
556
557 spin_lock_irqsave(&xs->pool->cq_lock, flags);
558 xskq_prod_submit_n(xs->pool->cq, n);
559 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
560}
561
562static void xsk_cq_cancel_locked(struct xdp_sock *xs, u32 n)
35fcde7f 563{
a9744f7c 564 unsigned long flags;
35fcde7f 565
f09ced40 566 spin_lock_irqsave(&xs->pool->cq_lock, flags);
b7f72a30 567 xskq_prod_cancel_n(xs->pool->cq, n);
f09ced40 568 spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
b7f72a30 569}
35fcde7f 570
b7f72a30
TS
571static u32 xsk_get_num_desc(struct sk_buff *skb)
572{
573 return skb ? (long)skb_shinfo(skb)->destructor_arg : 0;
574}
575
576static void xsk_destruct_skb(struct sk_buff *skb)
577{
48eb03dd
SF
578 struct xsk_tx_metadata_compl *compl = &skb_shinfo(skb)->xsk_meta;
579
580 if (compl->tx_timestamp) {
581 /* sw completion timestamp, not a real one */
582 *compl->tx_timestamp = ktime_get_tai_fast_ns();
583 }
584
b7f72a30 585 xsk_cq_submit_locked(xdp_sk(skb->sk), xsk_get_num_desc(skb));
35fcde7f
MK
586 sock_wfree(skb);
587}
588
b7f72a30
TS
589static void xsk_set_destructor_arg(struct sk_buff *skb)
590{
591 long num = xsk_get_num_desc(xdp_sk(skb->sk)->skb) + 1;
592
593 skb_shinfo(skb)->destructor_arg = (void *)num;
594}
595
596static void xsk_consume_skb(struct sk_buff *skb)
597{
598 struct xdp_sock *xs = xdp_sk(skb->sk);
599
600 skb->destructor = sock_wfree;
601 xsk_cq_cancel_locked(xs, xsk_get_num_desc(skb));
602 /* Free skb without triggering the perf drop trace */
603 consume_skb(skb);
604 xs->skb = NULL;
605}
606
cf24f5a5
TS
607static void xsk_drop_skb(struct sk_buff *skb)
608{
609 xdp_sk(skb->sk)->tx->invalid_descs += xsk_get_num_desc(skb);
610 xsk_consume_skb(skb);
611}
612
9c8f21e6
XZ
613static struct sk_buff *xsk_build_skb_zerocopy(struct xdp_sock *xs,
614 struct xdp_desc *desc)
615{
616 struct xsk_buff_pool *pool = xs->pool;
617 u32 hr, len, ts, offset, copy, copied;
cf24f5a5 618 struct sk_buff *skb = xs->skb;
9c8f21e6
XZ
619 struct page *page;
620 void *buffer;
621 int err, i;
622 u64 addr;
623
cf24f5a5
TS
624 if (!skb) {
625 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(xs->dev->needed_headroom));
9c8f21e6 626
cf24f5a5
TS
627 skb = sock_alloc_send_skb(&xs->sk, hr, 1, &err);
628 if (unlikely(!skb))
629 return ERR_PTR(err);
9c8f21e6 630
cf24f5a5
TS
631 skb_reserve(skb, hr);
632 }
9c8f21e6
XZ
633
634 addr = desc->addr;
635 len = desc->len;
636 ts = pool->unaligned ? len : pool->chunk_size;
637
638 buffer = xsk_buff_raw_get_data(pool, addr);
639 offset = offset_in_page(buffer);
640 addr = buffer - pool->addrs;
641
cf24f5a5
TS
642 for (copied = 0, i = skb_shinfo(skb)->nr_frags; copied < len; i++) {
643 if (unlikely(i >= MAX_SKB_FRAGS))
9d0a67b9 644 return ERR_PTR(-EOVERFLOW);
cf24f5a5 645
9c8f21e6
XZ
646 page = pool->umem->pgs[addr >> PAGE_SHIFT];
647 get_page(page);
648
649 copy = min_t(u32, PAGE_SIZE - offset, len - copied);
650 skb_fill_page_desc(skb, i, page, offset, copy);
651
652 copied += copy;
653 addr += copy;
654 offset = 0;
655 }
656
657 skb->len += len;
658 skb->data_len += len;
659 skb->truesize += ts;
660
661 refcount_add(ts, &xs->sk.sk_wmem_alloc);
662
663 return skb;
664}
665
666static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
667 struct xdp_desc *desc)
668{
48eb03dd 669 struct xsk_tx_metadata *meta = NULL;
9c8f21e6 670 struct net_device *dev = xs->dev;
cf24f5a5 671 struct sk_buff *skb = xs->skb;
48eb03dd 672 bool first_frag = false;
cf24f5a5 673 int err;
9c8f21e6
XZ
674
675 if (dev->priv_flags & IFF_TX_SKB_NO_LINEAR) {
676 skb = xsk_build_skb_zerocopy(xs, desc);
cf24f5a5
TS
677 if (IS_ERR(skb)) {
678 err = PTR_ERR(skb);
679 goto free_err;
680 }
9c8f21e6
XZ
681 } else {
682 u32 hr, tr, len;
683 void *buffer;
9c8f21e6 684
cf24f5a5 685 buffer = xsk_buff_raw_get_data(xs->pool, desc->addr);
9c8f21e6
XZ
686 len = desc->len;
687
cf24f5a5
TS
688 if (!skb) {
689 hr = max(NET_SKB_PAD, L1_CACHE_ALIGN(dev->needed_headroom));
690 tr = dev->needed_tailroom;
691 skb = sock_alloc_send_skb(&xs->sk, hr + len + tr, 1, &err);
692 if (unlikely(!skb))
693 goto free_err;
9c8f21e6 694
cf24f5a5
TS
695 skb_reserve(skb, hr);
696 skb_put(skb, len);
9c8f21e6 697
cf24f5a5 698 err = skb_store_bits(skb, 0, buffer, len);
9d0a67b9
TS
699 if (unlikely(err)) {
700 kfree_skb(skb);
cf24f5a5 701 goto free_err;
9d0a67b9 702 }
48eb03dd
SF
703
704 first_frag = true;
cf24f5a5
TS
705 } else {
706 int nr_frags = skb_shinfo(skb)->nr_frags;
707 struct page *page;
708 u8 *vaddr;
709
710 if (unlikely(nr_frags == (MAX_SKB_FRAGS - 1) && xp_mb_desc(desc))) {
9d0a67b9 711 err = -EOVERFLOW;
cf24f5a5
TS
712 goto free_err;
713 }
714
715 page = alloc_page(xs->sk.sk_allocation);
716 if (unlikely(!page)) {
717 err = -EAGAIN;
718 goto free_err;
719 }
720
721 vaddr = kmap_local_page(page);
722 memcpy(vaddr, buffer, len);
723 kunmap_local(vaddr);
724
725 skb_add_rx_frag(skb, nr_frags, page, 0, len, 0);
9c8f21e6 726 }
48eb03dd
SF
727
728 if (first_frag && desc->options & XDP_TX_METADATA) {
729 if (unlikely(xs->pool->tx_metadata_len == 0)) {
730 err = -EINVAL;
731 goto free_err;
732 }
733
734 meta = buffer - xs->pool->tx_metadata_len;
ce59f968
SF
735 if (unlikely(!xsk_buff_valid_tx_metadata(meta))) {
736 err = -EINVAL;
737 goto free_err;
738 }
48eb03dd
SF
739
740 if (meta->flags & XDP_TXMD_FLAGS_CHECKSUM) {
741 if (unlikely(meta->request.csum_start +
742 meta->request.csum_offset +
743 sizeof(__sum16) > len)) {
744 err = -EINVAL;
745 goto free_err;
746 }
747
748 skb->csum_start = hr + meta->request.csum_start;
749 skb->csum_offset = meta->request.csum_offset;
750 skb->ip_summed = CHECKSUM_PARTIAL;
11614723
SF
751
752 if (unlikely(xs->pool->tx_sw_csum)) {
753 err = skb_checksum_help(skb);
754 if (err)
755 goto free_err;
756 }
48eb03dd
SF
757 }
758 }
9c8f21e6
XZ
759 }
760
761 skb->dev = dev;
10bbf165 762 skb->priority = READ_ONCE(xs->sk.sk_priority);
3c5b4d69 763 skb->mark = READ_ONCE(xs->sk.sk_mark);
9c8f21e6 764 skb->destructor = xsk_destruct_skb;
48eb03dd 765 xsk_tx_metadata_to_compl(meta, &skb_shinfo(skb)->xsk_meta);
b7f72a30 766 xsk_set_destructor_arg(skb);
9c8f21e6
XZ
767
768 return skb;
cf24f5a5
TS
769
770free_err:
9d0a67b9
TS
771 if (err == -EOVERFLOW) {
772 /* Drop the packet */
773 xsk_set_destructor_arg(xs->skb);
774 xsk_drop_skb(xs->skb);
cf24f5a5 775 xskq_cons_release(xs->tx);
9d0a67b9
TS
776 } else {
777 /* Let application retry */
778 xsk_cq_cancel_locked(xs, 1);
cf24f5a5
TS
779 }
780
781 return ERR_PTR(err);
9c8f21e6
XZ
782}
783
1596dae2 784static int __xsk_generic_xmit(struct sock *sk)
35fcde7f 785{
35fcde7f 786 struct xdp_sock *xs = xdp_sk(sk);
df551058 787 u32 max_batch = TX_BATCH_SIZE;
35fcde7f
MK
788 bool sent_frame = false;
789 struct xdp_desc desc;
790 struct sk_buff *skb;
791 int err = 0;
792
35fcde7f
MK
793 mutex_lock(&xs->mutex);
794
18b1ab7a
MK
795 /* Since we dropped the RCU read lock, the socket state might have changed. */
796 if (unlikely(!xsk_is_bound(xs))) {
797 err = -ENXIO;
798 goto out;
799 }
800
67571640
IM
801 if (xs->queue_id >= xs->dev->real_num_tx_queues)
802 goto out;
803
1c1efc2a 804 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
35fcde7f
MK
805 if (max_batch-- == 0) {
806 err = -EAGAIN;
807 goto out;
808 }
809
0a05861f 810 /* This is the backpressure mechanism for the Tx path.
15d8c916
MK
811 * Reserve space in the completion queue and only proceed
812 * if there is space in it. This avoids having to implement
813 * any buffering in the Tx path.
814 */
b7f72a30 815 if (xsk_cq_reserve_addr_locked(xs, desc.addr))
35fcde7f 816 goto out;
35fcde7f 817
a6e944f2
CL
818 skb = xsk_build_skb(xs, &desc);
819 if (IS_ERR(skb)) {
820 err = PTR_ERR(skb);
9d0a67b9 821 if (err != -EOVERFLOW)
cf24f5a5
TS
822 goto out;
823 err = 0;
824 continue;
825 }
826
827 xskq_cons_release(xs->tx);
828
829 if (xp_mb_desc(&desc)) {
830 xs->skb = skb;
831 continue;
a6e944f2
CL
832 }
833
36ccdf85 834 err = __dev_direct_xmit(skb, xs->queue_id);
642e450b
MK
835 if (err == NETDEV_TX_BUSY) {
836 /* Tell user-space to retry the send */
cf24f5a5 837 xskq_cons_cancel_n(xs->tx, xsk_get_num_desc(skb));
b7f72a30 838 xsk_consume_skb(skb);
642e450b
MK
839 err = -EAGAIN;
840 goto out;
841 }
842
35fcde7f 843 /* Ignore NET_XMIT_CN as packet might have been sent */
642e450b 844 if (err == NET_XMIT_DROP) {
fe588685
MK
845 /* SKB completed but not sent */
846 err = -EBUSY;
cf24f5a5 847 xs->skb = NULL;
35fcde7f
MK
848 goto out;
849 }
850
851 sent_frame = true;
cf24f5a5 852 xs->skb = NULL;
35fcde7f
MK
853 }
854
cf24f5a5
TS
855 if (xskq_has_descs(xs->tx)) {
856 if (xs->skb)
857 xsk_drop_skb(xs->skb);
858 xskq_cons_release(xs->tx);
859 }
8aa5a335 860
35fcde7f
MK
861out:
862 if (sent_frame)
3413f041
XZ
863 if (xsk_tx_writeable(xs))
864 sk->sk_write_space(sk);
35fcde7f
MK
865
866 mutex_unlock(&xs->mutex);
867 return err;
868}
869
1596dae2 870static int xsk_generic_xmit(struct sock *sk)
df551058 871{
18b1ab7a 872 int ret;
df551058 873
18b1ab7a
MK
874 /* Drop the RCU lock since the SKB path might sleep. */
875 rcu_read_unlock();
1596dae2 876 ret = __xsk_generic_xmit(sk);
18b1ab7a
MK
877 /* Reaquire RCU lock before going into common code. */
878 rcu_read_lock();
879
880 return ret;
df551058
MK
881}
882
a0731952
BT
883static bool xsk_no_wakeup(struct sock *sk)
884{
885#ifdef CONFIG_NET_RX_BUSY_POLL
886 /* Prefer busy-polling, skip the wakeup. */
887 return READ_ONCE(sk->sk_prefer_busy_poll) && READ_ONCE(sk->sk_ll_usec) &&
888 READ_ONCE(sk->sk_napi_id) >= MIN_NAPI_ID;
889#else
890 return false;
891#endif
892}
893
1596dae2
MF
894static int xsk_check_common(struct xdp_sock *xs)
895{
896 if (unlikely(!xsk_is_bound(xs)))
897 return -ENXIO;
898 if (unlikely(!(xs->dev->flags & IFF_UP)))
899 return -ENETDOWN;
900
901 return 0;
902}
903
18b1ab7a 904static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
35fcde7f 905{
ac98d8aa 906 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
35fcde7f
MK
907 struct sock *sk = sock->sk;
908 struct xdp_sock *xs = xdp_sk(sk);
e3920818 909 struct xsk_buff_pool *pool;
1596dae2 910 int err;
35fcde7f 911
1596dae2
MF
912 err = xsk_check_common(xs);
913 if (err)
914 return err;
df551058 915 if (unlikely(need_wait))
ac98d8aa 916 return -EOPNOTSUPP;
1596dae2
MF
917 if (unlikely(!xs->tx))
918 return -ENOBUFS;
35fcde7f 919
ca2e1a62
MF
920 if (sk_can_busy_loop(sk)) {
921 if (xs->zc)
922 __sk_mark_napi_id_once(sk, xsk_pool_get_napi_id(xs->pool));
a0731952 923 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
ca2e1a62 924 }
a0731952 925
8de8b71b 926 if (xs->zc && xsk_no_wakeup(sk))
a0731952
BT
927 return 0;
928
e3920818 929 pool = xs->pool;
1596dae2
MF
930 if (pool->cached_need_wakeup & XDP_WAKEUP_TX) {
931 if (xs->zc)
932 return xsk_wakeup(xs, XDP_WAKEUP_TX);
933 return xsk_generic_xmit(sk);
934 }
e3920818 935 return 0;
35fcde7f
MK
936}
937
18b1ab7a
MK
938static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
939{
940 int ret;
941
942 rcu_read_lock();
943 ret = __xsk_sendmsg(sock, m, total_len);
944 rcu_read_unlock();
945
946 return ret;
947}
948
949static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
45a86681
BT
950{
951 bool need_wait = !(flags & MSG_DONTWAIT);
952 struct sock *sk = sock->sk;
953 struct xdp_sock *xs = xdp_sk(sk);
1596dae2 954 int err;
45a86681 955
1596dae2
MF
956 err = xsk_check_common(xs);
957 if (err)
958 return err;
45a86681
BT
959 if (unlikely(!xs->rx))
960 return -ENOBUFS;
45a86681
BT
961 if (unlikely(need_wait))
962 return -EOPNOTSUPP;
963
a0731952
BT
964 if (sk_can_busy_loop(sk))
965 sk_busy_loop(sk, 1); /* only support non-blocking sockets */
966
967 if (xsk_no_wakeup(sk))
968 return 0;
969
45a86681
BT
970 if (xs->pool->cached_need_wakeup & XDP_WAKEUP_RX && xs->zc)
971 return xsk_wakeup(xs, XDP_WAKEUP_RX);
972 return 0;
35fcde7f
MK
973}
974
18b1ab7a
MK
975static int xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int flags)
976{
977 int ret;
978
979 rcu_read_lock();
980 ret = __xsk_recvmsg(sock, m, len, flags);
981 rcu_read_unlock();
982
983 return ret;
984}
985
5d946c5a 986static __poll_t xsk_poll(struct file *file, struct socket *sock,
a11e1d43 987 struct poll_table_struct *wait)
c497176c 988{
f5da5418 989 __poll_t mask = 0;
df551058
MK
990 struct sock *sk = sock->sk;
991 struct xdp_sock *xs = xdp_sk(sk);
c2d3d6a4 992 struct xsk_buff_pool *pool;
42fddcc7 993
0706a78f
MK
994 sock_poll_wait(file, sock, wait);
995
18b1ab7a 996 rcu_read_lock();
1596dae2 997 if (xsk_check_common(xs))
e4d008d4 998 goto out;
42fddcc7 999
c2d3d6a4 1000 pool = xs->pool;
77cd0d7b 1001
c2d3d6a4 1002 if (pool->cached_need_wakeup) {
06870682 1003 if (xs->zc)
c2d3d6a4 1004 xsk_wakeup(xs, pool->cached_need_wakeup);
1596dae2 1005 else if (xs->tx)
df551058 1006 /* Poll needs to drive Tx also in copy mode */
1596dae2 1007 xsk_generic_xmit(sk);
df551058 1008 }
c497176c 1009
59e35e55 1010 if (xs->rx && !xskq_prod_is_empty(xs->rx))
5d946c5a 1011 mask |= EPOLLIN | EPOLLRDNORM;
3413f041 1012 if (xs->tx && xsk_tx_writeable(xs))
5d946c5a 1013 mask |= EPOLLOUT | EPOLLWRNORM;
e4d008d4 1014out:
18b1ab7a 1015 rcu_read_unlock();
c497176c
BT
1016 return mask;
1017}
1018
b9b6b68e
BT
1019static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
1020 bool umem_queue)
423f3832
MK
1021{
1022 struct xsk_queue *q;
1023
1024 if (entries == 0 || *queue || !is_power_of_2(entries))
1025 return -EINVAL;
1026
b9b6b68e 1027 q = xskq_create(entries, umem_queue);
423f3832
MK
1028 if (!q)
1029 return -ENOMEM;
1030
37b07693
BT
1031 /* Make sure queue is ready before it can be seen by others */
1032 smp_wmb();
94a99763 1033 WRITE_ONCE(*queue, q);
423f3832
MK
1034 return 0;
1035}
1036
455302d1
IM
1037static void xsk_unbind_dev(struct xdp_sock *xs)
1038{
1039 struct net_device *dev = xs->dev;
1040
42fddcc7 1041 if (xs->state != XSK_BOUND)
455302d1 1042 return;
42fddcc7 1043 WRITE_ONCE(xs->state, XSK_UNBOUND);
455302d1
IM
1044
1045 /* Wait for driver to stop using the xdp socket. */
a5aa8e52 1046 xp_del_xsk(xs->pool, xs);
455302d1
IM
1047 synchronize_net();
1048 dev_put(dev);
1049}
1050
0402acd6 1051static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
782347b6 1052 struct xdp_sock __rcu ***map_entry)
0402acd6
BT
1053{
1054 struct xsk_map *map = NULL;
1055 struct xsk_map_node *node;
1056
1057 *map_entry = NULL;
1058
1059 spin_lock_bh(&xs->map_list_lock);
1060 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
1061 node);
1062 if (node) {
bb1b25ca 1063 bpf_map_inc(&node->map->map);
0402acd6
BT
1064 map = node->map;
1065 *map_entry = node->map_entry;
1066 }
1067 spin_unlock_bh(&xs->map_list_lock);
1068 return map;
1069}
1070
1071static void xsk_delete_from_maps(struct xdp_sock *xs)
1072{
1073 /* This function removes the current XDP socket from all the
1074 * maps it resides in. We need to take extra care here, due to
1075 * the two locks involved. Each map has a lock synchronizing
1076 * updates to the entries, and each socket has a lock that
1077 * synchronizes access to the list of maps (map_list). For
1078 * deadlock avoidance the locks need to be taken in the order
1079 * "map lock"->"socket map list lock". We start off by
1080 * accessing the socket map list, and take a reference to the
1081 * map to guarantee existence between the
1082 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
1083 * calls. Then we ask the map to remove the socket, which
1084 * tries to remove the socket from the map. Note that there
1085 * might be updates to the map between
1086 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
1087 */
782347b6 1088 struct xdp_sock __rcu **map_entry = NULL;
0402acd6
BT
1089 struct xsk_map *map;
1090
1091 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
1092 xsk_map_try_sock_delete(map, xs, map_entry);
bb1b25ca 1093 bpf_map_put(&map->map);
0402acd6
BT
1094 }
1095}
1096
c0c77d8f
BT
1097static int xsk_release(struct socket *sock)
1098{
1099 struct sock *sk = sock->sk;
965a9909 1100 struct xdp_sock *xs = xdp_sk(sk);
c0c77d8f
BT
1101 struct net *net;
1102
1103 if (!sk)
1104 return 0;
1105
1106 net = sock_net(sk);
1107
cf24f5a5
TS
1108 if (xs->skb)
1109 xsk_drop_skb(xs->skb);
1110
1d0dc069
BT
1111 mutex_lock(&net->xdp.lock);
1112 sk_del_node_init_rcu(sk);
1113 mutex_unlock(&net->xdp.lock);
1114
c0c77d8f 1115 sock_prot_inuse_add(net, sk->sk_prot, -1);
c0c77d8f 1116
0402acd6 1117 xsk_delete_from_maps(xs);
42fddcc7 1118 mutex_lock(&xs->mutex);
455302d1 1119 xsk_unbind_dev(xs);
42fddcc7 1120 mutex_unlock(&xs->mutex);
965a9909 1121
541d7fdd
BT
1122 xskq_destroy(xs->rx);
1123 xskq_destroy(xs->tx);
7361f9c3
MK
1124 xskq_destroy(xs->fq_tmp);
1125 xskq_destroy(xs->cq_tmp);
541d7fdd 1126
c0c77d8f
BT
1127 sock_orphan(sk);
1128 sock->sk = NULL;
1129
c0c77d8f
BT
1130 sock_put(sk);
1131
1132 return 0;
1133}
1134
965a9909
MK
1135static struct socket *xsk_lookup_xsk_from_fd(int fd)
1136{
1137 struct socket *sock;
1138 int err;
1139
1140 sock = sockfd_lookup(fd, &err);
1141 if (!sock)
1142 return ERR_PTR(-ENOTSOCK);
1143
1144 if (sock->sk->sk_family != PF_XDP) {
1145 sockfd_put(sock);
1146 return ERR_PTR(-ENOPROTOOPT);
1147 }
1148
1149 return sock;
1150}
1151
7361f9c3
MK
1152static bool xsk_validate_queues(struct xdp_sock *xs)
1153{
1154 return xs->fq_tmp && xs->cq_tmp;
1155}
1156
965a9909
MK
1157static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
1158{
1159 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
1160 struct sock *sk = sock->sk;
965a9909 1161 struct xdp_sock *xs = xdp_sk(sk);
959b71db 1162 struct net_device *dev;
f7306ace 1163 int bound_dev_if;
173d3adb 1164 u32 flags, qid;
965a9909
MK
1165 int err = 0;
1166
1167 if (addr_len < sizeof(struct sockaddr_xdp))
1168 return -EINVAL;
1169 if (sxdp->sxdp_family != AF_XDP)
1170 return -EINVAL;
1171
f54ba391 1172 flags = sxdp->sxdp_flags;
77cd0d7b 1173 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
81470b5c 1174 XDP_USE_NEED_WAKEUP | XDP_USE_SG))
f54ba391
BT
1175 return -EINVAL;
1176
f7306ace
IM
1177 bound_dev_if = READ_ONCE(sk->sk_bound_dev_if);
1178 if (bound_dev_if && bound_dev_if != sxdp->sxdp_ifindex)
1179 return -EINVAL;
1180
5464c3a0 1181 rtnl_lock();
965a9909 1182 mutex_lock(&xs->mutex);
455302d1 1183 if (xs->state != XSK_READY) {
959b71db
BT
1184 err = -EBUSY;
1185 goto out_release;
1186 }
1187
965a9909
MK
1188 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
1189 if (!dev) {
1190 err = -ENODEV;
1191 goto out_release;
1192 }
1193
f6145903 1194 if (!xs->rx && !xs->tx) {
965a9909
MK
1195 err = -EINVAL;
1196 goto out_unlock;
1197 }
1198
173d3adb 1199 qid = sxdp->sxdp_queue_id;
173d3adb
BT
1200
1201 if (flags & XDP_SHARED_UMEM) {
965a9909
MK
1202 struct xdp_sock *umem_xs;
1203 struct socket *sock;
1204
77cd0d7b 1205 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
81470b5c 1206 (flags & XDP_USE_NEED_WAKEUP) || (flags & XDP_USE_SG)) {
173d3adb
BT
1207 /* Cannot specify flags for shared sockets. */
1208 err = -EINVAL;
1209 goto out_unlock;
1210 }
1211
965a9909
MK
1212 if (xs->umem) {
1213 /* We have already our own. */
1214 err = -EINVAL;
1215 goto out_unlock;
1216 }
1217
1218 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
1219 if (IS_ERR(sock)) {
1220 err = PTR_ERR(sock);
1221 goto out_unlock;
1222 }
1223
1224 umem_xs = xdp_sk(sock->sk);
42fddcc7 1225 if (!xsk_is_bound(umem_xs)) {
965a9909
MK
1226 err = -EBADF;
1227 sockfd_put(sock);
1228 goto out_unlock;
42fddcc7 1229 }
965a9909 1230
a1132430
MK
1231 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
1232 /* Share the umem with another socket on another qid
1233 * and/or device.
1234 */
b5aea28d
MK
1235 xs->pool = xp_create_and_assign_umem(xs,
1236 umem_xs->umem);
1237 if (!xs->pool) {
1fd17c8c 1238 err = -ENOMEM;
b5aea28d
MK
1239 sockfd_put(sock);
1240 goto out_unlock;
1241 }
1242
60240bc2
JM
1243 err = xp_assign_dev_shared(xs->pool, umem_xs, dev,
1244 qid);
b5aea28d
MK
1245 if (err) {
1246 xp_destroy(xs->pool);
83cf5c68 1247 xs->pool = NULL;
b5aea28d
MK
1248 sockfd_put(sock);
1249 goto out_unlock;
1250 }
1251 } else {
1252 /* Share the buffer pool with the other socket. */
1253 if (xs->fq_tmp || xs->cq_tmp) {
1254 /* Do not allow setting your own fq or cq. */
1255 err = -EINVAL;
1256 sockfd_put(sock);
1257 goto out_unlock;
1258 }
1259
1260 xp_get_pool(umem_xs->pool);
1261 xs->pool = umem_xs->pool;
ba3beec2
MF
1262
1263 /* If underlying shared umem was created without Tx
1264 * ring, allocate Tx descs array that Tx batching API
1265 * utilizes
1266 */
1267 if (xs->tx && !xs->pool->tx_descs) {
1268 err = xp_alloc_tx_descs(xs->pool, xs);
1269 if (err) {
1270 xp_put_pool(xs->pool);
85c2c79a 1271 xs->pool = NULL;
ba3beec2
MF
1272 sockfd_put(sock);
1273 goto out_unlock;
1274 }
1275 }
b5aea28d
MK
1276 }
1277
965a9909 1278 xdp_get_umem(umem_xs->umem);
9764f4b3 1279 WRITE_ONCE(xs->umem, umem_xs->umem);
965a9909 1280 sockfd_put(sock);
7361f9c3 1281 } else if (!xs->umem || !xsk_validate_queues(xs)) {
965a9909
MK
1282 err = -EINVAL;
1283 goto out_unlock;
c497176c
BT
1284 } else {
1285 /* This xsk has its own umem. */
1c1efc2a
MK
1286 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
1287 if (!xs->pool) {
1288 err = -ENOMEM;
173d3adb 1289 goto out_unlock;
1c1efc2a
MK
1290 }
1291
1292 err = xp_assign_dev(xs->pool, dev, qid, flags);
1293 if (err) {
1294 xp_destroy(xs->pool);
1295 xs->pool = NULL;
1c1efc2a
MK
1296 goto out_unlock;
1297 }
965a9909
MK
1298 }
1299
8bee6833
MK
1300 /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
1301 xs->fq_tmp = NULL;
1302 xs->cq_tmp = NULL;
1303
965a9909 1304 xs->dev = dev;
ac98d8aa 1305 xs->zc = xs->umem->zc;
d609f3d2 1306 xs->sg = !!(xs->umem->flags & XDP_UMEM_SG_FLAG);
ac98d8aa 1307 xs->queue_id = qid;
a5aa8e52 1308 xp_add_xsk(xs->pool, xs);
965a9909
MK
1309
1310out_unlock:
42fddcc7 1311 if (err) {
965a9909 1312 dev_put(dev);
42fddcc7
BT
1313 } else {
1314 /* Matches smp_rmb() in bind() for shared umem
1315 * sockets, and xsk_is_bound().
1316 */
1317 smp_wmb();
1318 WRITE_ONCE(xs->state, XSK_BOUND);
1319 }
965a9909
MK
1320out_release:
1321 mutex_unlock(&xs->mutex);
5464c3a0 1322 rtnl_unlock();
965a9909
MK
1323 return err;
1324}
1325
c05cd364
KL
1326struct xdp_umem_reg_v1 {
1327 __u64 addr; /* Start of packet data area */
1328 __u64 len; /* Length of packet data area */
1329 __u32 chunk_size;
1330 __u32 headroom;
1331};
1332
341ac980
SF
1333struct xdp_umem_reg_v2 {
1334 __u64 addr; /* Start of packet data area */
1335 __u64 len; /* Length of packet data area */
1336 __u32 chunk_size;
1337 __u32 headroom;
1338 __u32 flags;
1339};
1340
c0c77d8f 1341static int xsk_setsockopt(struct socket *sock, int level, int optname,
a7b75c5a 1342 sockptr_t optval, unsigned int optlen)
c0c77d8f
BT
1343{
1344 struct sock *sk = sock->sk;
1345 struct xdp_sock *xs = xdp_sk(sk);
1346 int err;
1347
1348 if (level != SOL_XDP)
1349 return -ENOPROTOOPT;
1350
1351 switch (optname) {
b9b6b68e 1352 case XDP_RX_RING:
f6145903 1353 case XDP_TX_RING:
b9b6b68e
BT
1354 {
1355 struct xsk_queue **q;
1356 int entries;
1357
1358 if (optlen < sizeof(entries))
1359 return -EINVAL;
a7b75c5a 1360 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
b9b6b68e
BT
1361 return -EFAULT;
1362
1363 mutex_lock(&xs->mutex);
455302d1
IM
1364 if (xs->state != XSK_READY) {
1365 mutex_unlock(&xs->mutex);
1366 return -EBUSY;
1367 }
f6145903 1368 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
b9b6b68e 1369 err = xsk_init_queue(entries, q, false);
77cd0d7b
MK
1370 if (!err && optname == XDP_TX_RING)
1371 /* Tx needs to be explicitly woken up the first time */
1372 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
b9b6b68e
BT
1373 mutex_unlock(&xs->mutex);
1374 return err;
1375 }
c0c77d8f
BT
1376 case XDP_UMEM_REG:
1377 {
c05cd364
KL
1378 size_t mr_size = sizeof(struct xdp_umem_reg);
1379 struct xdp_umem_reg mr = {};
c0c77d8f
BT
1380 struct xdp_umem *umem;
1381
c05cd364
KL
1382 if (optlen < sizeof(struct xdp_umem_reg_v1))
1383 return -EINVAL;
341ac980 1384 else if (optlen < sizeof(struct xdp_umem_reg_v2))
c05cd364 1385 mr_size = sizeof(struct xdp_umem_reg_v1);
341ac980
SF
1386 else if (optlen < sizeof(mr))
1387 mr_size = sizeof(struct xdp_umem_reg_v2);
c05cd364 1388
a7b75c5a 1389 if (copy_from_sockptr(&mr, optval, mr_size))
c0c77d8f
BT
1390 return -EFAULT;
1391
1392 mutex_lock(&xs->mutex);
455302d1 1393 if (xs->state != XSK_READY || xs->umem) {
a49049ea
BT
1394 mutex_unlock(&xs->mutex);
1395 return -EBUSY;
1396 }
c0c77d8f 1397
a49049ea
BT
1398 umem = xdp_umem_create(&mr);
1399 if (IS_ERR(umem)) {
c0c77d8f 1400 mutex_unlock(&xs->mutex);
a49049ea 1401 return PTR_ERR(umem);
c0c77d8f
BT
1402 }
1403
1404 /* Make sure umem is ready before it can be seen by others */
1405 smp_wmb();
9764f4b3 1406 WRITE_ONCE(xs->umem, umem);
c0c77d8f
BT
1407 mutex_unlock(&xs->mutex);
1408 return 0;
1409 }
423f3832 1410 case XDP_UMEM_FILL_RING:
fe230832 1411 case XDP_UMEM_COMPLETION_RING:
423f3832
MK
1412 {
1413 struct xsk_queue **q;
1414 int entries;
1415
a7b75c5a 1416 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
423f3832
MK
1417 return -EFAULT;
1418
1419 mutex_lock(&xs->mutex);
455302d1
IM
1420 if (xs->state != XSK_READY) {
1421 mutex_unlock(&xs->mutex);
1422 return -EBUSY;
1423 }
a49049ea 1424
7361f9c3
MK
1425 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
1426 &xs->cq_tmp;
b9b6b68e 1427 err = xsk_init_queue(entries, q, true);
423f3832
MK
1428 mutex_unlock(&xs->mutex);
1429 return err;
1430 }
c0c77d8f
BT
1431 default:
1432 break;
1433 }
1434
1435 return -ENOPROTOOPT;
1436}
1437
77cd0d7b
MK
1438static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
1439{
1440 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
1441 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
1442 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
1443}
1444
1445static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
1446{
1447 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
1448 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
1449 ring->desc = offsetof(struct xdp_umem_ring, desc);
1450}
1451
8aa5a335
CL
1452struct xdp_statistics_v1 {
1453 __u64 rx_dropped;
1454 __u64 rx_invalid_descs;
1455 __u64 tx_invalid_descs;
1456};
1457
af75d9e0
MK
1458static int xsk_getsockopt(struct socket *sock, int level, int optname,
1459 char __user *optval, int __user *optlen)
1460{
1461 struct sock *sk = sock->sk;
1462 struct xdp_sock *xs = xdp_sk(sk);
1463 int len;
1464
1465 if (level != SOL_XDP)
1466 return -ENOPROTOOPT;
1467
1468 if (get_user(len, optlen))
1469 return -EFAULT;
1470 if (len < 0)
1471 return -EINVAL;
1472
1473 switch (optname) {
1474 case XDP_STATISTICS:
1475 {
3c4f850e 1476 struct xdp_statistics stats = {};
8aa5a335
CL
1477 bool extra_stats = true;
1478 size_t stats_size;
af75d9e0 1479
8aa5a335 1480 if (len < sizeof(struct xdp_statistics_v1)) {
af75d9e0 1481 return -EINVAL;
8aa5a335
CL
1482 } else if (len < sizeof(stats)) {
1483 extra_stats = false;
1484 stats_size = sizeof(struct xdp_statistics_v1);
1485 } else {
1486 stats_size = sizeof(stats);
1487 }
af75d9e0
MK
1488
1489 mutex_lock(&xs->mutex);
1490 stats.rx_dropped = xs->rx_dropped;
8aa5a335
CL
1491 if (extra_stats) {
1492 stats.rx_ring_full = xs->rx_queue_full;
1493 stats.rx_fill_ring_empty_descs =
7361f9c3 1494 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
8aa5a335
CL
1495 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
1496 } else {
1497 stats.rx_dropped += xs->rx_queue_full;
1498 }
af75d9e0
MK
1499 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
1500 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
1501 mutex_unlock(&xs->mutex);
1502
8aa5a335 1503 if (copy_to_user(optval, &stats, stats_size))
af75d9e0 1504 return -EFAULT;
8aa5a335 1505 if (put_user(stats_size, optlen))
af75d9e0
MK
1506 return -EFAULT;
1507
1508 return 0;
1509 }
b3a9e0be
BT
1510 case XDP_MMAP_OFFSETS:
1511 {
1512 struct xdp_mmap_offsets off;
77cd0d7b
MK
1513 struct xdp_mmap_offsets_v1 off_v1;
1514 bool flags_supported = true;
1515 void *to_copy;
b3a9e0be 1516
77cd0d7b 1517 if (len < sizeof(off_v1))
b3a9e0be 1518 return -EINVAL;
77cd0d7b
MK
1519 else if (len < sizeof(off))
1520 flags_supported = false;
1521
1522 if (flags_supported) {
1523 /* xdp_ring_offset is identical to xdp_ring_offset_v1
1524 * except for the flags field added to the end.
1525 */
1526 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1527 &off.rx);
1528 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
1529 &off.tx);
1530 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1531 &off.fr);
1532 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
1533 &off.cr);
1534 off.rx.flags = offsetof(struct xdp_rxtx_ring,
1535 ptrs.flags);
1536 off.tx.flags = offsetof(struct xdp_rxtx_ring,
1537 ptrs.flags);
1538 off.fr.flags = offsetof(struct xdp_umem_ring,
1539 ptrs.flags);
1540 off.cr.flags = offsetof(struct xdp_umem_ring,
1541 ptrs.flags);
1542
1543 len = sizeof(off);
1544 to_copy = &off;
1545 } else {
1546 xsk_enter_rxtx_offsets(&off_v1.rx);
1547 xsk_enter_rxtx_offsets(&off_v1.tx);
1548 xsk_enter_umem_offsets(&off_v1.fr);
1549 xsk_enter_umem_offsets(&off_v1.cr);
1550
1551 len = sizeof(off_v1);
1552 to_copy = &off_v1;
1553 }
b3a9e0be 1554
77cd0d7b 1555 if (copy_to_user(optval, to_copy, len))
b3a9e0be
BT
1556 return -EFAULT;
1557 if (put_user(len, optlen))
1558 return -EFAULT;
1559
1560 return 0;
1561 }
2640d3c8
MM
1562 case XDP_OPTIONS:
1563 {
1564 struct xdp_options opts = {};
1565
1566 if (len < sizeof(opts))
1567 return -EINVAL;
1568
1569 mutex_lock(&xs->mutex);
1570 if (xs->zc)
1571 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1572 mutex_unlock(&xs->mutex);
1573
1574 len = sizeof(opts);
1575 if (copy_to_user(optval, &opts, len))
1576 return -EFAULT;
1577 if (put_user(len, optlen))
1578 return -EFAULT;
1579
1580 return 0;
1581 }
af75d9e0
MK
1582 default:
1583 break;
1584 }
1585
1586 return -EOPNOTSUPP;
1587}
1588
423f3832
MK
1589static int xsk_mmap(struct file *file, struct socket *sock,
1590 struct vm_area_struct *vma)
1591{
a5a16e43 1592 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
423f3832
MK
1593 unsigned long size = vma->vm_end - vma->vm_start;
1594 struct xdp_sock *xs = xdp_sk(sock->sk);
5f5a7d8d 1595 int state = READ_ONCE(xs->state);
423f3832 1596 struct xsk_queue *q = NULL;
423f3832 1597
5f5a7d8d 1598 if (state != XSK_READY && state != XSK_BOUND)
455302d1
IM
1599 return -EBUSY;
1600
b9b6b68e 1601 if (offset == XDP_PGOFF_RX_RING) {
37b07693 1602 q = READ_ONCE(xs->rx);
f6145903 1603 } else if (offset == XDP_PGOFF_TX_RING) {
37b07693 1604 q = READ_ONCE(xs->tx);
b9b6b68e 1605 } else {
e6762c8b
MK
1606 /* Matches the smp_wmb() in XDP_UMEM_REG */
1607 smp_rmb();
b9b6b68e 1608 if (offset == XDP_UMEM_PGOFF_FILL_RING)
5f5a7d8d
NG
1609 q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
1610 READ_ONCE(xs->pool->fq);
fe230832 1611 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
5f5a7d8d
NG
1612 q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
1613 READ_ONCE(xs->pool->cq);
b9b6b68e 1614 }
423f3832
MK
1615
1616 if (!q)
1617 return -EINVAL;
1618
e6762c8b
MK
1619 /* Matches the smp_wmb() in xsk_init_queue */
1620 smp_rmb();
9f78bf33 1621 if (size > q->ring_vmalloc_size)
423f3832
MK
1622 return -EINVAL;
1623
9f78bf33 1624 return remap_vmalloc_range(vma, q->ring, 0);
423f3832
MK
1625}
1626
455302d1
IM
1627static int xsk_notifier(struct notifier_block *this,
1628 unsigned long msg, void *ptr)
1629{
1630 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1631 struct net *net = dev_net(dev);
1632 struct sock *sk;
1633
1634 switch (msg) {
1635 case NETDEV_UNREGISTER:
1636 mutex_lock(&net->xdp.lock);
1637 sk_for_each(sk, &net->xdp.list) {
1638 struct xdp_sock *xs = xdp_sk(sk);
1639
1640 mutex_lock(&xs->mutex);
1641 if (xs->dev == dev) {
1642 sk->sk_err = ENETDOWN;
1643 if (!sock_flag(sk, SOCK_DEAD))
e3ae2365 1644 sk_error_report(sk);
455302d1
IM
1645
1646 xsk_unbind_dev(xs);
1647
1c1efc2a
MK
1648 /* Clear device references. */
1649 xp_clear_dev(xs->pool);
455302d1
IM
1650 }
1651 mutex_unlock(&xs->mutex);
1652 }
1653 mutex_unlock(&net->xdp.lock);
1654 break;
1655 }
1656 return NOTIFY_DONE;
1657}
1658
c0c77d8f
BT
1659static struct proto xsk_proto = {
1660 .name = "XDP",
1661 .owner = THIS_MODULE,
1662 .obj_size = sizeof(struct xdp_sock),
1663};
1664
1665static const struct proto_ops xsk_proto_ops = {
c2f4374b
BT
1666 .family = PF_XDP,
1667 .owner = THIS_MODULE,
1668 .release = xsk_release,
1669 .bind = xsk_bind,
1670 .connect = sock_no_connect,
1671 .socketpair = sock_no_socketpair,
1672 .accept = sock_no_accept,
1673 .getname = sock_no_getname,
a11e1d43 1674 .poll = xsk_poll,
c2f4374b
BT
1675 .ioctl = sock_no_ioctl,
1676 .listen = sock_no_listen,
1677 .shutdown = sock_no_shutdown,
1678 .setsockopt = xsk_setsockopt,
1679 .getsockopt = xsk_getsockopt,
1680 .sendmsg = xsk_sendmsg,
45a86681 1681 .recvmsg = xsk_recvmsg,
c2f4374b 1682 .mmap = xsk_mmap,
c0c77d8f
BT
1683};
1684
11fe9262
BT
1685static void xsk_destruct(struct sock *sk)
1686{
1687 struct xdp_sock *xs = xdp_sk(sk);
1688
1689 if (!sock_flag(sk, SOCK_DEAD))
1690 return;
1691
e5e1a4bc 1692 if (!xp_put_pool(xs->pool))
537cf4e3 1693 xdp_put_umem(xs->umem, !xs->pool);
11fe9262
BT
1694}
1695
c0c77d8f
BT
1696static int xsk_create(struct net *net, struct socket *sock, int protocol,
1697 int kern)
1698{
c0c77d8f 1699 struct xdp_sock *xs;
1c1efc2a 1700 struct sock *sk;
c0c77d8f
BT
1701
1702 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1703 return -EPERM;
1704 if (sock->type != SOCK_RAW)
1705 return -ESOCKTNOSUPPORT;
1706
1707 if (protocol)
1708 return -EPROTONOSUPPORT;
1709
1710 sock->state = SS_UNCONNECTED;
1711
1712 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1713 if (!sk)
1714 return -ENOBUFS;
1715
1716 sock->ops = &xsk_proto_ops;
1717
1718 sock_init_data(sock, sk);
1719
1720 sk->sk_family = PF_XDP;
1721
11fe9262 1722 sk->sk_destruct = xsk_destruct;
11fe9262 1723
cee27167
BT
1724 sock_set_flag(sk, SOCK_RCU_FREE);
1725
c0c77d8f 1726 xs = xdp_sk(sk);
455302d1 1727 xs->state = XSK_READY;
c0c77d8f 1728 mutex_init(&xs->mutex);
bf0bdd13 1729 spin_lock_init(&xs->rx_lock);
c0c77d8f 1730
0402acd6
BT
1731 INIT_LIST_HEAD(&xs->map_list);
1732 spin_lock_init(&xs->map_list_lock);
1733
1d0dc069
BT
1734 mutex_lock(&net->xdp.lock);
1735 sk_add_node_rcu(sk, &net->xdp.list);
1736 mutex_unlock(&net->xdp.lock);
1737
c0c77d8f 1738 sock_prot_inuse_add(net, &xsk_proto, 1);
c0c77d8f
BT
1739
1740 return 0;
1741}
1742
1743static const struct net_proto_family xsk_family_ops = {
1744 .family = PF_XDP,
1745 .create = xsk_create,
1746 .owner = THIS_MODULE,
1747};
1748
455302d1
IM
1749static struct notifier_block xsk_netdev_notifier = {
1750 .notifier_call = xsk_notifier,
1751};
1752
1d0dc069
BT
1753static int __net_init xsk_net_init(struct net *net)
1754{
1755 mutex_init(&net->xdp.lock);
1756 INIT_HLIST_HEAD(&net->xdp.list);
1757 return 0;
1758}
1759
1760static void __net_exit xsk_net_exit(struct net *net)
1761{
1762 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1763}
1764
1765static struct pernet_operations xsk_net_ops = {
1766 .init = xsk_net_init,
1767 .exit = xsk_net_exit,
1768};
1769
c0c77d8f
BT
1770static int __init xsk_init(void)
1771{
e312b9e7 1772 int err, cpu;
c0c77d8f
BT
1773
1774 err = proto_register(&xsk_proto, 0 /* no slab */);
1775 if (err)
1776 goto out;
1777
1778 err = sock_register(&xsk_family_ops);
1779 if (err)
1780 goto out_proto;
1781
1d0dc069
BT
1782 err = register_pernet_subsys(&xsk_net_ops);
1783 if (err)
1784 goto out_sk;
455302d1
IM
1785
1786 err = register_netdevice_notifier(&xsk_netdev_notifier);
1787 if (err)
1788 goto out_pernet;
1789
e312b9e7
BT
1790 for_each_possible_cpu(cpu)
1791 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
c0c77d8f
BT
1792 return 0;
1793
455302d1
IM
1794out_pernet:
1795 unregister_pernet_subsys(&xsk_net_ops);
1d0dc069
BT
1796out_sk:
1797 sock_unregister(PF_XDP);
c0c77d8f
BT
1798out_proto:
1799 proto_unregister(&xsk_proto);
1800out:
1801 return err;
1802}
1803
1804fs_initcall(xsk_init);