xsk: Introduce padding between more ring pointers
[linux-block.git] / net / xdp / xsk.c
CommitLineData
c0c77d8f
BT
1// SPDX-License-Identifier: GPL-2.0
2/* XDP sockets
3 *
4 * AF_XDP sockets allows a channel between XDP programs and userspace
5 * applications.
6 * Copyright(c) 2018 Intel Corporation.
7 *
c0c77d8f
BT
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
10 */
11
12#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
13
14#include <linux/if_xdp.h>
15#include <linux/init.h>
16#include <linux/sched/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/sched/task.h>
19#include <linux/socket.h>
20#include <linux/file.h>
21#include <linux/uaccess.h>
22#include <linux/net.h>
23#include <linux/netdevice.h>
ac98d8aa 24#include <linux/rculist.h>
a71506a4 25#include <net/xdp_sock_drv.h>
b9b6b68e 26#include <net/xdp.h>
c0c77d8f 27
423f3832 28#include "xsk_queue.h"
c0c77d8f 29#include "xdp_umem.h"
a36b38aa 30#include "xsk.h"
c0c77d8f 31
35fcde7f
MK
32#define TX_BATCH_SIZE 16
33
e312b9e7
BT
34static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
35
c4655761 36void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 37{
c2d3d6a4 38 if (pool->cached_need_wakeup & XDP_WAKEUP_RX)
77cd0d7b
MK
39 return;
40
7361f9c3 41 pool->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
c2d3d6a4 42 pool->cached_need_wakeup |= XDP_WAKEUP_RX;
77cd0d7b
MK
43}
44EXPORT_SYMBOL(xsk_set_rx_need_wakeup);
45
c4655761 46void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b
MK
47{
48 struct xdp_sock *xs;
49
c2d3d6a4 50 if (pool->cached_need_wakeup & XDP_WAKEUP_TX)
77cd0d7b
MK
51 return;
52
53 rcu_read_lock();
a5aa8e52 54 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
77cd0d7b
MK
55 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
56 }
57 rcu_read_unlock();
58
c2d3d6a4 59 pool->cached_need_wakeup |= XDP_WAKEUP_TX;
77cd0d7b
MK
60}
61EXPORT_SYMBOL(xsk_set_tx_need_wakeup);
62
c4655761 63void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 64{
c2d3d6a4 65 if (!(pool->cached_need_wakeup & XDP_WAKEUP_RX))
77cd0d7b
MK
66 return;
67
7361f9c3 68 pool->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
c2d3d6a4 69 pool->cached_need_wakeup &= ~XDP_WAKEUP_RX;
77cd0d7b
MK
70}
71EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);
72
c4655761 73void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b
MK
74{
75 struct xdp_sock *xs;
76
c2d3d6a4 77 if (!(pool->cached_need_wakeup & XDP_WAKEUP_TX))
77cd0d7b
MK
78 return;
79
80 rcu_read_lock();
a5aa8e52 81 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
77cd0d7b
MK
82 xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
83 }
84 rcu_read_unlock();
85
c2d3d6a4 86 pool->cached_need_wakeup &= ~XDP_WAKEUP_TX;
77cd0d7b
MK
87}
88EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);
89
c4655761 90bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
77cd0d7b 91{
c2d3d6a4 92 return pool->uses_need_wakeup;
77cd0d7b 93}
c4655761 94EXPORT_SYMBOL(xsk_uses_need_wakeup);
77cd0d7b 95
1c1efc2a
MK
96struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
97 u16 queue_id)
98{
99 if (queue_id < dev->real_num_rx_queues)
100 return dev->_rx[queue_id].pool;
101 if (queue_id < dev->real_num_tx_queues)
102 return dev->_tx[queue_id].pool;
103
104 return NULL;
105}
106EXPORT_SYMBOL(xsk_get_pool_from_qid);
107
108void xsk_clear_pool_at_qid(struct net_device *dev, u16 queue_id)
109{
110 if (queue_id < dev->real_num_rx_queues)
111 dev->_rx[queue_id].pool = NULL;
112 if (queue_id < dev->real_num_tx_queues)
113 dev->_tx[queue_id].pool = NULL;
114}
115
116/* The buffer pool is stored both in the _rx struct and the _tx struct as we do
117 * not know if the device has more tx queues than rx, or the opposite.
118 * This might also change during run time.
119 */
120int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
121 u16 queue_id)
122{
123 if (queue_id >= max_t(unsigned int,
124 dev->real_num_rx_queues,
125 dev->real_num_tx_queues))
126 return -EINVAL;
127
128 if (queue_id < dev->real_num_rx_queues)
129 dev->_rx[queue_id].pool = pool;
130 if (queue_id < dev->real_num_tx_queues)
131 dev->_tx[queue_id].pool = pool;
132
133 return 0;
134}
135
26062b18
BT
136void xp_release(struct xdp_buff_xsk *xskb)
137{
138 xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb;
139}
140
141static u64 xp_get_handle(struct xdp_buff_xsk *xskb)
142{
143 u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start;
144
145 offset += xskb->pool->headroom;
146 if (!xskb->pool->unaligned)
147 return xskb->orig_addr + offset;
148 return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
149}
150
2b43470a 151static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
c05cd364 152{
2b43470a
BT
153 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
154 u64 addr;
155 int err;
c05cd364 156
2b43470a
BT
157 addr = xp_get_handle(xskb);
158 err = xskq_prod_reserve_desc(xs->rx, addr, len);
159 if (err) {
8aa5a335 160 xs->rx_queue_full++;
2b43470a 161 return err;
c05cd364
KL
162 }
163
2b43470a
BT
164 xp_release(xskb);
165 return 0;
c05cd364
KL
166}
167
2b43470a 168static void xsk_copy_xdp(struct xdp_buff *to, struct xdp_buff *from, u32 len)
c497176c 169{
2b43470a 170 void *from_buf, *to_buf;
18baed26 171 u32 metalen;
c497176c 172
2b43470a
BT
173 if (unlikely(xdp_data_meta_unsupported(from))) {
174 from_buf = from->data;
175 to_buf = to->data;
18baed26
BT
176 metalen = 0;
177 } else {
2b43470a
BT
178 from_buf = from->data_meta;
179 metalen = from->data - from->data_meta;
180 to_buf = to->data - metalen;
18baed26
BT
181 }
182
2b43470a 183 memcpy(to_buf, from_buf, len + metalen);
c497176c
BT
184}
185
2b43470a
BT
186static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
187 bool explicit_free)
c497176c 188{
2b43470a
BT
189 struct xdp_buff *xsk_xdp;
190 int err;
c497176c 191
c4655761 192 if (len > xsk_pool_get_rx_frame_size(xs->pool)) {
2b43470a
BT
193 xs->rx_dropped++;
194 return -ENOSPC;
195 }
196
c4655761 197 xsk_xdp = xsk_buff_alloc(xs->pool);
2b43470a 198 if (!xsk_xdp) {
173d3adb 199 xs->rx_dropped++;
2b43470a
BT
200 return -ENOSPC;
201 }
c497176c 202
2b43470a
BT
203 xsk_copy_xdp(xsk_xdp, xdp, len);
204 err = __xsk_rcv_zc(xs, xsk_xdp, len);
205 if (err) {
206 xsk_buff_free(xsk_xdp);
207 return err;
208 }
209 if (explicit_free)
210 xdp_return_buff(xdp);
211 return 0;
c497176c
BT
212}
213
42fddcc7
BT
214static bool xsk_is_bound(struct xdp_sock *xs)
215{
216 if (READ_ONCE(xs->state) == XSK_BOUND) {
217 /* Matches smp_wmb() in bind(). */
218 smp_rmb();
219 return true;
220 }
221 return false;
222}
223
2b43470a
BT
224static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp,
225 bool explicit_free)
173d3adb
BT
226{
227 u32 len;
228
42fddcc7
BT
229 if (!xsk_is_bound(xs))
230 return -EINVAL;
231
173d3adb
BT
232 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
233 return -EINVAL;
234
235 len = xdp->data_end - xdp->data;
236
0807892e 237 return xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL ?
2b43470a
BT
238 __xsk_rcv_zc(xs, xdp, len) :
239 __xsk_rcv(xs, xdp, len, explicit_free);
173d3adb
BT
240}
241
d817991c 242static void xsk_flush(struct xdp_sock *xs)
c497176c 243{
59e35e55 244 xskq_prod_submit(xs->rx);
7361f9c3 245 __xskq_cons_release(xs->pool->fq);
43a825af 246 sock_def_readable(&xs->sk);
c497176c
BT
247}
248
249int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
250{
251 int err;
252
bf0bdd13 253 spin_lock_bh(&xs->rx_lock);
2b43470a
BT
254 err = xsk_rcv(xs, xdp, false);
255 xsk_flush(xs);
bf0bdd13 256 spin_unlock_bh(&xs->rx_lock);
c497176c
BT
257 return err;
258}
259
e312b9e7 260int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
d817991c 261{
e312b9e7 262 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
d817991c
BT
263 int err;
264
2b43470a 265 err = xsk_rcv(xs, xdp, true);
d817991c
BT
266 if (err)
267 return err;
268
269 if (!xs->flush_node.prev)
270 list_add(&xs->flush_node, flush_list);
271
272 return 0;
273}
274
e312b9e7 275void __xsk_map_flush(void)
d817991c 276{
e312b9e7 277 struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
d817991c
BT
278 struct xdp_sock *xs, *tmp;
279
280 list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
281 xsk_flush(xs);
282 __list_del_clearprev(&xs->flush_node);
283 }
284}
285
c4655761 286void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
ac98d8aa 287{
7361f9c3 288 xskq_prod_submit_n(pool->cq, nb_entries);
ac98d8aa 289}
c4655761 290EXPORT_SYMBOL(xsk_tx_completed);
ac98d8aa 291
c4655761 292void xsk_tx_release(struct xsk_buff_pool *pool)
ac98d8aa
MK
293{
294 struct xdp_sock *xs;
295
296 rcu_read_lock();
a5aa8e52 297 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
30744a68 298 __xskq_cons_release(xs->tx);
ac98d8aa
MK
299 xs->sk.sk_write_space(&xs->sk);
300 }
301 rcu_read_unlock();
302}
c4655761 303EXPORT_SYMBOL(xsk_tx_release);
ac98d8aa 304
c4655761 305bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
ac98d8aa 306{
ac98d8aa
MK
307 struct xdp_sock *xs;
308
309 rcu_read_lock();
a5aa8e52 310 list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
1c1efc2a 311 if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
8aa5a335 312 xs->tx->queue_empty_descs++;
ac98d8aa 313 continue;
8aa5a335 314 }
ac98d8aa 315
0a05861f 316 /* This is the backpressure mechanism for the Tx path.
15d8c916
MK
317 * Reserve space in the completion queue and only proceed
318 * if there is space in it. This avoids having to implement
319 * any buffering in the Tx path.
320 */
7361f9c3 321 if (xskq_prod_reserve_addr(pool->cq, desc->addr))
ac98d8aa
MK
322 goto out;
323
c5ed924b 324 xskq_cons_release(xs->tx);
ac98d8aa
MK
325 rcu_read_unlock();
326 return true;
327 }
328
329out:
330 rcu_read_unlock();
331 return false;
332}
c4655761 333EXPORT_SYMBOL(xsk_tx_peek_desc);
ac98d8aa 334
06870682 335static int xsk_wakeup(struct xdp_sock *xs, u8 flags)
ac98d8aa 336{
ac98d8aa 337 struct net_device *dev = xs->dev;
06870682
MM
338 int err;
339
340 rcu_read_lock();
341 err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags);
342 rcu_read_unlock();
ac98d8aa 343
06870682
MM
344 return err;
345}
346
347static int xsk_zc_xmit(struct xdp_sock *xs)
348{
349 return xsk_wakeup(xs, XDP_WAKEUP_TX);
ac98d8aa
MK
350}
351
35fcde7f
MK
352static void xsk_destruct_skb(struct sk_buff *skb)
353{
bbff2f32 354 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
35fcde7f 355 struct xdp_sock *xs = xdp_sk(skb->sk);
a9744f7c 356 unsigned long flags;
35fcde7f 357
a9744f7c 358 spin_lock_irqsave(&xs->tx_completion_lock, flags);
7361f9c3 359 xskq_prod_submit_addr(xs->pool->cq, addr);
a9744f7c 360 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
35fcde7f
MK
361
362 sock_wfree(skb);
363}
364
df551058 365static int xsk_generic_xmit(struct sock *sk)
35fcde7f 366{
35fcde7f 367 struct xdp_sock *xs = xdp_sk(sk);
df551058 368 u32 max_batch = TX_BATCH_SIZE;
35fcde7f
MK
369 bool sent_frame = false;
370 struct xdp_desc desc;
371 struct sk_buff *skb;
372 int err = 0;
373
35fcde7f
MK
374 mutex_lock(&xs->mutex);
375
67571640
IM
376 if (xs->queue_id >= xs->dev->real_num_tx_queues)
377 goto out;
378
1c1efc2a 379 while (xskq_cons_peek_desc(xs->tx, &desc, xs->pool)) {
35fcde7f 380 char *buffer;
bbff2f32
BT
381 u64 addr;
382 u32 len;
35fcde7f
MK
383
384 if (max_batch-- == 0) {
385 err = -EAGAIN;
386 goto out;
387 }
388
09210c4b 389 len = desc.len;
ac98d8aa 390 skb = sock_alloc_send_skb(sk, len, 1, &err);
aa2cad06 391 if (unlikely(!skb))
35fcde7f 392 goto out;
35fcde7f
MK
393
394 skb_put(skb, len);
bbff2f32 395 addr = desc.addr;
c4655761 396 buffer = xsk_buff_raw_get_data(xs->pool, addr);
35fcde7f 397 err = skb_store_bits(skb, 0, buffer, len);
0a05861f 398 /* This is the backpressure mechanism for the Tx path.
15d8c916
MK
399 * Reserve space in the completion queue and only proceed
400 * if there is space in it. This avoids having to implement
401 * any buffering in the Tx path.
402 */
7361f9c3 403 if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
35fcde7f
MK
404 kfree_skb(skb);
405 goto out;
406 }
407
408 skb->dev = xs->dev;
409 skb->priority = sk->sk_priority;
410 skb->mark = sk->sk_mark;
c05cd364 411 skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
35fcde7f
MK
412 skb->destructor = xsk_destruct_skb;
413
642e450b
MK
414 /* Hinder dev_direct_xmit from freeing the packet and
415 * therefore completing it in the destructor
416 */
417 refcount_inc(&skb->users);
35fcde7f 418 err = dev_direct_xmit(skb, xs->queue_id);
642e450b
MK
419 if (err == NETDEV_TX_BUSY) {
420 /* Tell user-space to retry the send */
421 skb->destructor = sock_wfree;
422 /* Free skb without triggering the perf drop trace */
423 consume_skb(skb);
424 err = -EAGAIN;
425 goto out;
426 }
427
c5ed924b 428 xskq_cons_release(xs->tx);
35fcde7f 429 /* Ignore NET_XMIT_CN as packet might have been sent */
642e450b 430 if (err == NET_XMIT_DROP) {
fe588685 431 /* SKB completed but not sent */
642e450b 432 kfree_skb(skb);
fe588685 433 err = -EBUSY;
35fcde7f
MK
434 goto out;
435 }
436
642e450b 437 consume_skb(skb);
35fcde7f 438 sent_frame = true;
35fcde7f
MK
439 }
440
8aa5a335
CL
441 xs->tx->queue_empty_descs++;
442
35fcde7f
MK
443out:
444 if (sent_frame)
445 sk->sk_write_space(sk);
446
447 mutex_unlock(&xs->mutex);
448 return err;
449}
450
df551058
MK
451static int __xsk_sendmsg(struct sock *sk)
452{
453 struct xdp_sock *xs = xdp_sk(sk);
454
455 if (unlikely(!(xs->dev->flags & IFF_UP)))
456 return -ENETDOWN;
457 if (unlikely(!xs->tx))
458 return -ENOBUFS;
459
460 return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
461}
462
35fcde7f
MK
463static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
464{
ac98d8aa 465 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
35fcde7f
MK
466 struct sock *sk = sock->sk;
467 struct xdp_sock *xs = xdp_sk(sk);
468
42fddcc7 469 if (unlikely(!xsk_is_bound(xs)))
35fcde7f 470 return -ENXIO;
df551058 471 if (unlikely(need_wait))
ac98d8aa 472 return -EOPNOTSUPP;
35fcde7f 473
df551058 474 return __xsk_sendmsg(sk);
35fcde7f
MK
475}
476
5d946c5a 477static __poll_t xsk_poll(struct file *file, struct socket *sock,
a11e1d43 478 struct poll_table_struct *wait)
c497176c 479{
5d946c5a 480 __poll_t mask = datagram_poll(file, sock, wait);
df551058
MK
481 struct sock *sk = sock->sk;
482 struct xdp_sock *xs = xdp_sk(sk);
c2d3d6a4 483 struct xsk_buff_pool *pool;
42fddcc7
BT
484
485 if (unlikely(!xsk_is_bound(xs)))
486 return mask;
487
c2d3d6a4 488 pool = xs->pool;
77cd0d7b 489
c2d3d6a4 490 if (pool->cached_need_wakeup) {
06870682 491 if (xs->zc)
c2d3d6a4 492 xsk_wakeup(xs, pool->cached_need_wakeup);
df551058
MK
493 else
494 /* Poll needs to drive Tx also in copy mode */
495 __xsk_sendmsg(sk);
496 }
c497176c 497
59e35e55 498 if (xs->rx && !xskq_prod_is_empty(xs->rx))
5d946c5a 499 mask |= EPOLLIN | EPOLLRDNORM;
c5ed924b 500 if (xs->tx && !xskq_cons_is_full(xs->tx))
5d946c5a 501 mask |= EPOLLOUT | EPOLLWRNORM;
c497176c
BT
502
503 return mask;
504}
505
b9b6b68e
BT
506static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
507 bool umem_queue)
423f3832
MK
508{
509 struct xsk_queue *q;
510
511 if (entries == 0 || *queue || !is_power_of_2(entries))
512 return -EINVAL;
513
b9b6b68e 514 q = xskq_create(entries, umem_queue);
423f3832
MK
515 if (!q)
516 return -ENOMEM;
517
37b07693
BT
518 /* Make sure queue is ready before it can be seen by others */
519 smp_wmb();
94a99763 520 WRITE_ONCE(*queue, q);
423f3832
MK
521 return 0;
522}
523
455302d1
IM
524static void xsk_unbind_dev(struct xdp_sock *xs)
525{
526 struct net_device *dev = xs->dev;
527
42fddcc7 528 if (xs->state != XSK_BOUND)
455302d1 529 return;
42fddcc7 530 WRITE_ONCE(xs->state, XSK_UNBOUND);
455302d1
IM
531
532 /* Wait for driver to stop using the xdp socket. */
a5aa8e52 533 xp_del_xsk(xs->pool, xs);
455302d1
IM
534 xs->dev = NULL;
535 synchronize_net();
536 dev_put(dev);
537}
538
0402acd6
BT
539static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
540 struct xdp_sock ***map_entry)
541{
542 struct xsk_map *map = NULL;
543 struct xsk_map_node *node;
544
545 *map_entry = NULL;
546
547 spin_lock_bh(&xs->map_list_lock);
548 node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
549 node);
550 if (node) {
551 WARN_ON(xsk_map_inc(node->map));
552 map = node->map;
553 *map_entry = node->map_entry;
554 }
555 spin_unlock_bh(&xs->map_list_lock);
556 return map;
557}
558
559static void xsk_delete_from_maps(struct xdp_sock *xs)
560{
561 /* This function removes the current XDP socket from all the
562 * maps it resides in. We need to take extra care here, due to
563 * the two locks involved. Each map has a lock synchronizing
564 * updates to the entries, and each socket has a lock that
565 * synchronizes access to the list of maps (map_list). For
566 * deadlock avoidance the locks need to be taken in the order
567 * "map lock"->"socket map list lock". We start off by
568 * accessing the socket map list, and take a reference to the
569 * map to guarantee existence between the
570 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
571 * calls. Then we ask the map to remove the socket, which
572 * tries to remove the socket from the map. Note that there
573 * might be updates to the map between
574 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
575 */
576 struct xdp_sock **map_entry = NULL;
577 struct xsk_map *map;
578
579 while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
580 xsk_map_try_sock_delete(map, xs, map_entry);
581 xsk_map_put(map);
582 }
583}
584
c0c77d8f
BT
585static int xsk_release(struct socket *sock)
586{
587 struct sock *sk = sock->sk;
965a9909 588 struct xdp_sock *xs = xdp_sk(sk);
c0c77d8f
BT
589 struct net *net;
590
591 if (!sk)
592 return 0;
593
594 net = sock_net(sk);
595
1d0dc069
BT
596 mutex_lock(&net->xdp.lock);
597 sk_del_node_init_rcu(sk);
598 mutex_unlock(&net->xdp.lock);
599
c0c77d8f
BT
600 local_bh_disable();
601 sock_prot_inuse_add(net, sk->sk_prot, -1);
602 local_bh_enable();
603
0402acd6 604 xsk_delete_from_maps(xs);
42fddcc7 605 mutex_lock(&xs->mutex);
455302d1 606 xsk_unbind_dev(xs);
42fddcc7 607 mutex_unlock(&xs->mutex);
965a9909 608
541d7fdd
BT
609 xskq_destroy(xs->rx);
610 xskq_destroy(xs->tx);
7361f9c3
MK
611 xskq_destroy(xs->fq_tmp);
612 xskq_destroy(xs->cq_tmp);
541d7fdd 613
c0c77d8f
BT
614 sock_orphan(sk);
615 sock->sk = NULL;
616
617 sk_refcnt_debug_release(sk);
618 sock_put(sk);
619
620 return 0;
621}
622
965a9909
MK
623static struct socket *xsk_lookup_xsk_from_fd(int fd)
624{
625 struct socket *sock;
626 int err;
627
628 sock = sockfd_lookup(fd, &err);
629 if (!sock)
630 return ERR_PTR(-ENOTSOCK);
631
632 if (sock->sk->sk_family != PF_XDP) {
633 sockfd_put(sock);
634 return ERR_PTR(-ENOPROTOOPT);
635 }
636
637 return sock;
638}
639
7361f9c3
MK
640static bool xsk_validate_queues(struct xdp_sock *xs)
641{
642 return xs->fq_tmp && xs->cq_tmp;
643}
644
965a9909
MK
645static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
646{
647 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
648 struct sock *sk = sock->sk;
965a9909 649 struct xdp_sock *xs = xdp_sk(sk);
959b71db 650 struct net_device *dev;
173d3adb 651 u32 flags, qid;
965a9909
MK
652 int err = 0;
653
654 if (addr_len < sizeof(struct sockaddr_xdp))
655 return -EINVAL;
656 if (sxdp->sxdp_family != AF_XDP)
657 return -EINVAL;
658
f54ba391 659 flags = sxdp->sxdp_flags;
77cd0d7b
MK
660 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
661 XDP_USE_NEED_WAKEUP))
f54ba391
BT
662 return -EINVAL;
663
5464c3a0 664 rtnl_lock();
965a9909 665 mutex_lock(&xs->mutex);
455302d1 666 if (xs->state != XSK_READY) {
959b71db
BT
667 err = -EBUSY;
668 goto out_release;
669 }
670
965a9909
MK
671 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
672 if (!dev) {
673 err = -ENODEV;
674 goto out_release;
675 }
676
f6145903 677 if (!xs->rx && !xs->tx) {
965a9909
MK
678 err = -EINVAL;
679 goto out_unlock;
680 }
681
173d3adb 682 qid = sxdp->sxdp_queue_id;
173d3adb
BT
683
684 if (flags & XDP_SHARED_UMEM) {
965a9909
MK
685 struct xdp_sock *umem_xs;
686 struct socket *sock;
687
77cd0d7b
MK
688 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
689 (flags & XDP_USE_NEED_WAKEUP)) {
173d3adb
BT
690 /* Cannot specify flags for shared sockets. */
691 err = -EINVAL;
692 goto out_unlock;
693 }
694
965a9909
MK
695 if (xs->umem) {
696 /* We have already our own. */
697 err = -EINVAL;
698 goto out_unlock;
699 }
700
701 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
702 if (IS_ERR(sock)) {
703 err = PTR_ERR(sock);
704 goto out_unlock;
705 }
706
707 umem_xs = xdp_sk(sock->sk);
42fddcc7 708 if (!xsk_is_bound(umem_xs)) {
965a9909
MK
709 err = -EBADF;
710 sockfd_put(sock);
711 goto out_unlock;
42fddcc7 712 }
965a9909 713
a1132430
MK
714 if (umem_xs->queue_id != qid || umem_xs->dev != dev) {
715 /* Share the umem with another socket on another qid
716 * and/or device.
717 */
b5aea28d
MK
718 xs->pool = xp_create_and_assign_umem(xs,
719 umem_xs->umem);
720 if (!xs->pool) {
1fd17c8c 721 err = -ENOMEM;
b5aea28d
MK
722 sockfd_put(sock);
723 goto out_unlock;
724 }
725
726 err = xp_assign_dev_shared(xs->pool, umem_xs->umem,
727 dev, qid);
728 if (err) {
729 xp_destroy(xs->pool);
83cf5c68 730 xs->pool = NULL;
b5aea28d
MK
731 sockfd_put(sock);
732 goto out_unlock;
733 }
734 } else {
735 /* Share the buffer pool with the other socket. */
736 if (xs->fq_tmp || xs->cq_tmp) {
737 /* Do not allow setting your own fq or cq. */
738 err = -EINVAL;
739 sockfd_put(sock);
740 goto out_unlock;
741 }
742
743 xp_get_pool(umem_xs->pool);
744 xs->pool = umem_xs->pool;
745 }
746
965a9909 747 xdp_get_umem(umem_xs->umem);
9764f4b3 748 WRITE_ONCE(xs->umem, umem_xs->umem);
965a9909 749 sockfd_put(sock);
7361f9c3 750 } else if (!xs->umem || !xsk_validate_queues(xs)) {
965a9909
MK
751 err = -EINVAL;
752 goto out_unlock;
c497176c
BT
753 } else {
754 /* This xsk has its own umem. */
1c1efc2a
MK
755 xs->pool = xp_create_and_assign_umem(xs, xs->umem);
756 if (!xs->pool) {
757 err = -ENOMEM;
173d3adb 758 goto out_unlock;
1c1efc2a
MK
759 }
760
761 err = xp_assign_dev(xs->pool, dev, qid, flags);
762 if (err) {
763 xp_destroy(xs->pool);
764 xs->pool = NULL;
1c1efc2a
MK
765 goto out_unlock;
766 }
965a9909
MK
767 }
768
965a9909 769 xs->dev = dev;
ac98d8aa
MK
770 xs->zc = xs->umem->zc;
771 xs->queue_id = qid;
a5aa8e52 772 xp_add_xsk(xs->pool, xs);
965a9909
MK
773
774out_unlock:
42fddcc7 775 if (err) {
965a9909 776 dev_put(dev);
42fddcc7
BT
777 } else {
778 /* Matches smp_rmb() in bind() for shared umem
779 * sockets, and xsk_is_bound().
780 */
781 smp_wmb();
782 WRITE_ONCE(xs->state, XSK_BOUND);
783 }
965a9909
MK
784out_release:
785 mutex_unlock(&xs->mutex);
5464c3a0 786 rtnl_unlock();
965a9909
MK
787 return err;
788}
789
c05cd364
KL
790struct xdp_umem_reg_v1 {
791 __u64 addr; /* Start of packet data area */
792 __u64 len; /* Length of packet data area */
793 __u32 chunk_size;
794 __u32 headroom;
795};
796
c0c77d8f 797static int xsk_setsockopt(struct socket *sock, int level, int optname,
a7b75c5a 798 sockptr_t optval, unsigned int optlen)
c0c77d8f
BT
799{
800 struct sock *sk = sock->sk;
801 struct xdp_sock *xs = xdp_sk(sk);
802 int err;
803
804 if (level != SOL_XDP)
805 return -ENOPROTOOPT;
806
807 switch (optname) {
b9b6b68e 808 case XDP_RX_RING:
f6145903 809 case XDP_TX_RING:
b9b6b68e
BT
810 {
811 struct xsk_queue **q;
812 int entries;
813
814 if (optlen < sizeof(entries))
815 return -EINVAL;
a7b75c5a 816 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
b9b6b68e
BT
817 return -EFAULT;
818
819 mutex_lock(&xs->mutex);
455302d1
IM
820 if (xs->state != XSK_READY) {
821 mutex_unlock(&xs->mutex);
822 return -EBUSY;
823 }
f6145903 824 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
b9b6b68e 825 err = xsk_init_queue(entries, q, false);
77cd0d7b
MK
826 if (!err && optname == XDP_TX_RING)
827 /* Tx needs to be explicitly woken up the first time */
828 xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
b9b6b68e
BT
829 mutex_unlock(&xs->mutex);
830 return err;
831 }
c0c77d8f
BT
832 case XDP_UMEM_REG:
833 {
c05cd364
KL
834 size_t mr_size = sizeof(struct xdp_umem_reg);
835 struct xdp_umem_reg mr = {};
c0c77d8f
BT
836 struct xdp_umem *umem;
837
c05cd364
KL
838 if (optlen < sizeof(struct xdp_umem_reg_v1))
839 return -EINVAL;
840 else if (optlen < sizeof(mr))
841 mr_size = sizeof(struct xdp_umem_reg_v1);
842
a7b75c5a 843 if (copy_from_sockptr(&mr, optval, mr_size))
c0c77d8f
BT
844 return -EFAULT;
845
846 mutex_lock(&xs->mutex);
455302d1 847 if (xs->state != XSK_READY || xs->umem) {
a49049ea
BT
848 mutex_unlock(&xs->mutex);
849 return -EBUSY;
850 }
c0c77d8f 851
a49049ea
BT
852 umem = xdp_umem_create(&mr);
853 if (IS_ERR(umem)) {
c0c77d8f 854 mutex_unlock(&xs->mutex);
a49049ea 855 return PTR_ERR(umem);
c0c77d8f
BT
856 }
857
858 /* Make sure umem is ready before it can be seen by others */
859 smp_wmb();
9764f4b3 860 WRITE_ONCE(xs->umem, umem);
c0c77d8f
BT
861 mutex_unlock(&xs->mutex);
862 return 0;
863 }
423f3832 864 case XDP_UMEM_FILL_RING:
fe230832 865 case XDP_UMEM_COMPLETION_RING:
423f3832
MK
866 {
867 struct xsk_queue **q;
868 int entries;
869
a7b75c5a 870 if (copy_from_sockptr(&entries, optval, sizeof(entries)))
423f3832
MK
871 return -EFAULT;
872
873 mutex_lock(&xs->mutex);
455302d1
IM
874 if (xs->state != XSK_READY) {
875 mutex_unlock(&xs->mutex);
876 return -EBUSY;
877 }
a49049ea 878
7361f9c3
MK
879 q = (optname == XDP_UMEM_FILL_RING) ? &xs->fq_tmp :
880 &xs->cq_tmp;
b9b6b68e 881 err = xsk_init_queue(entries, q, true);
423f3832
MK
882 mutex_unlock(&xs->mutex);
883 return err;
884 }
c0c77d8f
BT
885 default:
886 break;
887 }
888
889 return -ENOPROTOOPT;
890}
891
77cd0d7b
MK
892static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
893{
894 ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
895 ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
896 ring->desc = offsetof(struct xdp_rxtx_ring, desc);
897}
898
899static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
900{
901 ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
902 ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
903 ring->desc = offsetof(struct xdp_umem_ring, desc);
904}
905
8aa5a335
CL
906struct xdp_statistics_v1 {
907 __u64 rx_dropped;
908 __u64 rx_invalid_descs;
909 __u64 tx_invalid_descs;
910};
911
af75d9e0
MK
912static int xsk_getsockopt(struct socket *sock, int level, int optname,
913 char __user *optval, int __user *optlen)
914{
915 struct sock *sk = sock->sk;
916 struct xdp_sock *xs = xdp_sk(sk);
917 int len;
918
919 if (level != SOL_XDP)
920 return -ENOPROTOOPT;
921
922 if (get_user(len, optlen))
923 return -EFAULT;
924 if (len < 0)
925 return -EINVAL;
926
927 switch (optname) {
928 case XDP_STATISTICS:
929 {
3c4f850e 930 struct xdp_statistics stats = {};
8aa5a335
CL
931 bool extra_stats = true;
932 size_t stats_size;
af75d9e0 933
8aa5a335 934 if (len < sizeof(struct xdp_statistics_v1)) {
af75d9e0 935 return -EINVAL;
8aa5a335
CL
936 } else if (len < sizeof(stats)) {
937 extra_stats = false;
938 stats_size = sizeof(struct xdp_statistics_v1);
939 } else {
940 stats_size = sizeof(stats);
941 }
af75d9e0
MK
942
943 mutex_lock(&xs->mutex);
944 stats.rx_dropped = xs->rx_dropped;
8aa5a335
CL
945 if (extra_stats) {
946 stats.rx_ring_full = xs->rx_queue_full;
947 stats.rx_fill_ring_empty_descs =
7361f9c3 948 xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
8aa5a335
CL
949 stats.tx_ring_empty_descs = xskq_nb_queue_empty_descs(xs->tx);
950 } else {
951 stats.rx_dropped += xs->rx_queue_full;
952 }
af75d9e0
MK
953 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
954 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
955 mutex_unlock(&xs->mutex);
956
8aa5a335 957 if (copy_to_user(optval, &stats, stats_size))
af75d9e0 958 return -EFAULT;
8aa5a335 959 if (put_user(stats_size, optlen))
af75d9e0
MK
960 return -EFAULT;
961
962 return 0;
963 }
b3a9e0be
BT
964 case XDP_MMAP_OFFSETS:
965 {
966 struct xdp_mmap_offsets off;
77cd0d7b
MK
967 struct xdp_mmap_offsets_v1 off_v1;
968 bool flags_supported = true;
969 void *to_copy;
b3a9e0be 970
77cd0d7b 971 if (len < sizeof(off_v1))
b3a9e0be 972 return -EINVAL;
77cd0d7b
MK
973 else if (len < sizeof(off))
974 flags_supported = false;
975
976 if (flags_supported) {
977 /* xdp_ring_offset is identical to xdp_ring_offset_v1
978 * except for the flags field added to the end.
979 */
980 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
981 &off.rx);
982 xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
983 &off.tx);
984 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
985 &off.fr);
986 xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
987 &off.cr);
988 off.rx.flags = offsetof(struct xdp_rxtx_ring,
989 ptrs.flags);
990 off.tx.flags = offsetof(struct xdp_rxtx_ring,
991 ptrs.flags);
992 off.fr.flags = offsetof(struct xdp_umem_ring,
993 ptrs.flags);
994 off.cr.flags = offsetof(struct xdp_umem_ring,
995 ptrs.flags);
996
997 len = sizeof(off);
998 to_copy = &off;
999 } else {
1000 xsk_enter_rxtx_offsets(&off_v1.rx);
1001 xsk_enter_rxtx_offsets(&off_v1.tx);
1002 xsk_enter_umem_offsets(&off_v1.fr);
1003 xsk_enter_umem_offsets(&off_v1.cr);
1004
1005 len = sizeof(off_v1);
1006 to_copy = &off_v1;
1007 }
b3a9e0be 1008
77cd0d7b 1009 if (copy_to_user(optval, to_copy, len))
b3a9e0be
BT
1010 return -EFAULT;
1011 if (put_user(len, optlen))
1012 return -EFAULT;
1013
1014 return 0;
1015 }
2640d3c8
MM
1016 case XDP_OPTIONS:
1017 {
1018 struct xdp_options opts = {};
1019
1020 if (len < sizeof(opts))
1021 return -EINVAL;
1022
1023 mutex_lock(&xs->mutex);
1024 if (xs->zc)
1025 opts.flags |= XDP_OPTIONS_ZEROCOPY;
1026 mutex_unlock(&xs->mutex);
1027
1028 len = sizeof(opts);
1029 if (copy_to_user(optval, &opts, len))
1030 return -EFAULT;
1031 if (put_user(len, optlen))
1032 return -EFAULT;
1033
1034 return 0;
1035 }
af75d9e0
MK
1036 default:
1037 break;
1038 }
1039
1040 return -EOPNOTSUPP;
1041}
1042
423f3832
MK
1043static int xsk_mmap(struct file *file, struct socket *sock,
1044 struct vm_area_struct *vma)
1045{
a5a16e43 1046 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
423f3832
MK
1047 unsigned long size = vma->vm_end - vma->vm_start;
1048 struct xdp_sock *xs = xdp_sk(sock->sk);
1049 struct xsk_queue *q = NULL;
1050 unsigned long pfn;
1051 struct page *qpg;
1052
42fddcc7 1053 if (READ_ONCE(xs->state) != XSK_READY)
455302d1
IM
1054 return -EBUSY;
1055
b9b6b68e 1056 if (offset == XDP_PGOFF_RX_RING) {
37b07693 1057 q = READ_ONCE(xs->rx);
f6145903 1058 } else if (offset == XDP_PGOFF_TX_RING) {
37b07693 1059 q = READ_ONCE(xs->tx);
b9b6b68e 1060 } else {
e6762c8b
MK
1061 /* Matches the smp_wmb() in XDP_UMEM_REG */
1062 smp_rmb();
b9b6b68e 1063 if (offset == XDP_UMEM_PGOFF_FILL_RING)
7361f9c3 1064 q = READ_ONCE(xs->fq_tmp);
fe230832 1065 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
7361f9c3 1066 q = READ_ONCE(xs->cq_tmp);
b9b6b68e 1067 }
423f3832
MK
1068
1069 if (!q)
1070 return -EINVAL;
1071
e6762c8b
MK
1072 /* Matches the smp_wmb() in xsk_init_queue */
1073 smp_rmb();
423f3832 1074 qpg = virt_to_head_page(q->ring);
a50b854e 1075 if (size > page_size(qpg))
423f3832
MK
1076 return -EINVAL;
1077
1078 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
1079 return remap_pfn_range(vma, vma->vm_start, pfn,
1080 size, vma->vm_page_prot);
1081}
1082
455302d1
IM
1083static int xsk_notifier(struct notifier_block *this,
1084 unsigned long msg, void *ptr)
1085{
1086 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1087 struct net *net = dev_net(dev);
1088 struct sock *sk;
1089
1090 switch (msg) {
1091 case NETDEV_UNREGISTER:
1092 mutex_lock(&net->xdp.lock);
1093 sk_for_each(sk, &net->xdp.list) {
1094 struct xdp_sock *xs = xdp_sk(sk);
1095
1096 mutex_lock(&xs->mutex);
1097 if (xs->dev == dev) {
1098 sk->sk_err = ENETDOWN;
1099 if (!sock_flag(sk, SOCK_DEAD))
1100 sk->sk_error_report(sk);
1101
1102 xsk_unbind_dev(xs);
1103
1c1efc2a
MK
1104 /* Clear device references. */
1105 xp_clear_dev(xs->pool);
455302d1
IM
1106 }
1107 mutex_unlock(&xs->mutex);
1108 }
1109 mutex_unlock(&net->xdp.lock);
1110 break;
1111 }
1112 return NOTIFY_DONE;
1113}
1114
c0c77d8f
BT
1115static struct proto xsk_proto = {
1116 .name = "XDP",
1117 .owner = THIS_MODULE,
1118 .obj_size = sizeof(struct xdp_sock),
1119};
1120
1121static const struct proto_ops xsk_proto_ops = {
c2f4374b
BT
1122 .family = PF_XDP,
1123 .owner = THIS_MODULE,
1124 .release = xsk_release,
1125 .bind = xsk_bind,
1126 .connect = sock_no_connect,
1127 .socketpair = sock_no_socketpair,
1128 .accept = sock_no_accept,
1129 .getname = sock_no_getname,
a11e1d43 1130 .poll = xsk_poll,
c2f4374b
BT
1131 .ioctl = sock_no_ioctl,
1132 .listen = sock_no_listen,
1133 .shutdown = sock_no_shutdown,
1134 .setsockopt = xsk_setsockopt,
1135 .getsockopt = xsk_getsockopt,
1136 .sendmsg = xsk_sendmsg,
1137 .recvmsg = sock_no_recvmsg,
1138 .mmap = xsk_mmap,
1139 .sendpage = sock_no_sendpage,
c0c77d8f
BT
1140};
1141
11fe9262
BT
1142static void xsk_destruct(struct sock *sk)
1143{
1144 struct xdp_sock *xs = xdp_sk(sk);
1145
1146 if (!sock_flag(sk, SOCK_DEAD))
1147 return;
1148
e5e1a4bc
MK
1149 if (!xp_put_pool(xs->pool))
1150 xdp_put_umem(xs->umem);
11fe9262
BT
1151
1152 sk_refcnt_debug_dec(sk);
1153}
1154
c0c77d8f
BT
1155static int xsk_create(struct net *net, struct socket *sock, int protocol,
1156 int kern)
1157{
c0c77d8f 1158 struct xdp_sock *xs;
1c1efc2a 1159 struct sock *sk;
c0c77d8f
BT
1160
1161 if (!ns_capable(net->user_ns, CAP_NET_RAW))
1162 return -EPERM;
1163 if (sock->type != SOCK_RAW)
1164 return -ESOCKTNOSUPPORT;
1165
1166 if (protocol)
1167 return -EPROTONOSUPPORT;
1168
1169 sock->state = SS_UNCONNECTED;
1170
1171 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
1172 if (!sk)
1173 return -ENOBUFS;
1174
1175 sock->ops = &xsk_proto_ops;
1176
1177 sock_init_data(sock, sk);
1178
1179 sk->sk_family = PF_XDP;
1180
11fe9262
BT
1181 sk->sk_destruct = xsk_destruct;
1182 sk_refcnt_debug_inc(sk);
1183
cee27167
BT
1184 sock_set_flag(sk, SOCK_RCU_FREE);
1185
c0c77d8f 1186 xs = xdp_sk(sk);
455302d1 1187 xs->state = XSK_READY;
c0c77d8f 1188 mutex_init(&xs->mutex);
bf0bdd13 1189 spin_lock_init(&xs->rx_lock);
a9744f7c 1190 spin_lock_init(&xs->tx_completion_lock);
c0c77d8f 1191
0402acd6
BT
1192 INIT_LIST_HEAD(&xs->map_list);
1193 spin_lock_init(&xs->map_list_lock);
1194
1d0dc069
BT
1195 mutex_lock(&net->xdp.lock);
1196 sk_add_node_rcu(sk, &net->xdp.list);
1197 mutex_unlock(&net->xdp.lock);
1198
c0c77d8f
BT
1199 local_bh_disable();
1200 sock_prot_inuse_add(net, &xsk_proto, 1);
1201 local_bh_enable();
1202
1203 return 0;
1204}
1205
1206static const struct net_proto_family xsk_family_ops = {
1207 .family = PF_XDP,
1208 .create = xsk_create,
1209 .owner = THIS_MODULE,
1210};
1211
455302d1
IM
1212static struct notifier_block xsk_netdev_notifier = {
1213 .notifier_call = xsk_notifier,
1214};
1215
1d0dc069
BT
1216static int __net_init xsk_net_init(struct net *net)
1217{
1218 mutex_init(&net->xdp.lock);
1219 INIT_HLIST_HEAD(&net->xdp.list);
1220 return 0;
1221}
1222
1223static void __net_exit xsk_net_exit(struct net *net)
1224{
1225 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
1226}
1227
1228static struct pernet_operations xsk_net_ops = {
1229 .init = xsk_net_init,
1230 .exit = xsk_net_exit,
1231};
1232
c0c77d8f
BT
1233static int __init xsk_init(void)
1234{
e312b9e7 1235 int err, cpu;
c0c77d8f
BT
1236
1237 err = proto_register(&xsk_proto, 0 /* no slab */);
1238 if (err)
1239 goto out;
1240
1241 err = sock_register(&xsk_family_ops);
1242 if (err)
1243 goto out_proto;
1244
1d0dc069
BT
1245 err = register_pernet_subsys(&xsk_net_ops);
1246 if (err)
1247 goto out_sk;
455302d1
IM
1248
1249 err = register_netdevice_notifier(&xsk_netdev_notifier);
1250 if (err)
1251 goto out_pernet;
1252
e312b9e7
BT
1253 for_each_possible_cpu(cpu)
1254 INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
c0c77d8f
BT
1255 return 0;
1256
455302d1
IM
1257out_pernet:
1258 unregister_pernet_subsys(&xsk_net_ops);
1d0dc069
BT
1259out_sk:
1260 sock_unregister(PF_XDP);
c0c77d8f
BT
1261out_proto:
1262 proto_unregister(&xsk_proto);
1263out:
1264 return err;
1265}
1266
1267fs_initcall(xsk_init);