1 // SPDX-License-Identifier: GPL-2.0
4 * AF_XDP sockets allows a channel between XDP programs and userspace
6 * Copyright(c) 2018 Intel Corporation.
8 * Author(s): Björn Töpel <bjorn.topel@intel.com>
9 * Magnus Karlsson <magnus.karlsson@intel.com>
12 #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__
14 #include <linux/if_xdp.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/signal.h>
18 #include <linux/sched/task.h>
19 #include <linux/socket.h>
20 #include <linux/file.h>
21 #include <linux/uaccess.h>
22 #include <linux/net.h>
23 #include <linux/netdevice.h>
24 #include <linux/rculist.h>
25 #include <net/xdp_sock.h>
28 #include "xsk_queue.h"
32 #define TX_BATCH_SIZE 16
34 bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
36 return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) &&
37 READ_ONCE(xs->umem->fq);
40 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
42 return xskq_peek_addr(umem->fq, addr);
44 EXPORT_SYMBOL(xsk_umem_peek_addr);
46 void xsk_umem_discard_addr(struct xdp_umem *umem)
48 xskq_discard_addr(umem->fq);
50 EXPORT_SYMBOL(xsk_umem_discard_addr);
52 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
54 void *to_buf, *from_buf;
59 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
60 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
65 addr += xs->umem->headroom;
67 if (unlikely(xdp_data_meta_unsupported(xdp))) {
71 from_buf = xdp->data_meta;
72 metalen = xdp->data - xdp->data_meta;
75 to_buf = xdp_umem_get_data(xs->umem, addr);
76 memcpy(to_buf, from_buf, len + metalen);
78 err = xskq_produce_batch_desc(xs->rx, addr, len);
80 xskq_discard_addr(xs->umem->fq);
89 static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
91 int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
99 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
103 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
106 len = xdp->data_end - xdp->data;
108 return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
109 __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
112 void xsk_flush(struct xdp_sock *xs)
114 xskq_produce_flush_desc(xs->rx);
115 xs->sk.sk_data_ready(&xs->sk);
118 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
120 u32 metalen = xdp->data - xdp->data_meta;
121 u32 len = xdp->data_end - xdp->data;
126 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
129 if (!xskq_peek_addr(xs->umem->fq, &addr) ||
130 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
135 addr += xs->umem->headroom;
137 buffer = xdp_umem_get_data(xs->umem, addr);
138 memcpy(buffer, xdp->data_meta, len + metalen);
140 err = xskq_produce_batch_desc(xs->rx, addr, len);
142 xskq_discard_addr(xs->umem->fq);
151 void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
153 xskq_produce_flush_addr_n(umem->cq, nb_entries);
155 EXPORT_SYMBOL(xsk_umem_complete_tx);
157 void xsk_umem_consume_tx_done(struct xdp_umem *umem)
162 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
163 xs->sk.sk_write_space(&xs->sk);
167 EXPORT_SYMBOL(xsk_umem_consume_tx_done);
169 bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len)
171 struct xdp_desc desc;
175 list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
176 if (!xskq_peek_desc(xs->tx, &desc))
179 if (xskq_produce_addr_lazy(umem->cq, desc.addr))
182 *dma = xdp_umem_get_dma(umem, desc.addr);
185 xskq_discard_desc(xs->tx);
194 EXPORT_SYMBOL(xsk_umem_consume_tx);
196 static int xsk_zc_xmit(struct sock *sk)
198 struct xdp_sock *xs = xdp_sk(sk);
199 struct net_device *dev = xs->dev;
201 return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id);
204 static void xsk_destruct_skb(struct sk_buff *skb)
206 u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
207 struct xdp_sock *xs = xdp_sk(skb->sk);
210 spin_lock_irqsave(&xs->tx_completion_lock, flags);
211 WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
212 spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
217 static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
220 u32 max_batch = TX_BATCH_SIZE;
221 struct xdp_sock *xs = xdp_sk(sk);
222 bool sent_frame = false;
223 struct xdp_desc desc;
227 mutex_lock(&xs->mutex);
229 while (xskq_peek_desc(xs->tx, &desc)) {
234 if (max_batch-- == 0) {
239 if (xskq_reserve_addr(xs->umem->cq))
242 if (xs->queue_id >= xs->dev->real_num_tx_queues)
246 skb = sock_alloc_send_skb(sk, len, 1, &err);
247 if (unlikely(!skb)) {
254 buffer = xdp_umem_get_data(xs->umem, addr);
255 err = skb_store_bits(skb, 0, buffer, len);
262 skb->priority = sk->sk_priority;
263 skb->mark = sk->sk_mark;
264 skb_shinfo(skb)->destructor_arg = (void *)(long)addr;
265 skb->destructor = xsk_destruct_skb;
267 err = dev_direct_xmit(skb, xs->queue_id);
268 xskq_discard_desc(xs->tx);
269 /* Ignore NET_XMIT_CN as packet might have been sent */
270 if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
271 /* SKB completed but not sent */
281 sk->sk_write_space(sk);
283 mutex_unlock(&xs->mutex);
287 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
289 bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
290 struct sock *sk = sock->sk;
291 struct xdp_sock *xs = xdp_sk(sk);
293 if (unlikely(!xs->dev))
295 if (unlikely(!(xs->dev->flags & IFF_UP)))
297 if (unlikely(!xs->tx))
302 return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
305 static unsigned int xsk_poll(struct file *file, struct socket *sock,
306 struct poll_table_struct *wait)
308 unsigned int mask = datagram_poll(file, sock, wait);
309 struct sock *sk = sock->sk;
310 struct xdp_sock *xs = xdp_sk(sk);
312 if (xs->rx && !xskq_empty_desc(xs->rx))
313 mask |= POLLIN | POLLRDNORM;
314 if (xs->tx && !xskq_full_desc(xs->tx))
315 mask |= POLLOUT | POLLWRNORM;
320 static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
325 if (entries == 0 || *queue || !is_power_of_2(entries))
328 q = xskq_create(entries, umem_queue);
332 /* Make sure queue is ready before it can be seen by others */
338 static void xsk_unbind_dev(struct xdp_sock *xs)
340 struct net_device *dev = xs->dev;
342 if (!dev || xs->state != XSK_BOUND)
345 xs->state = XSK_UNBOUND;
347 /* Wait for driver to stop using the xdp socket. */
348 xdp_del_sk_umem(xs->umem, xs);
354 static int xsk_release(struct socket *sock)
356 struct sock *sk = sock->sk;
357 struct xdp_sock *xs = xdp_sk(sk);
365 mutex_lock(&net->xdp.lock);
366 sk_del_node_init_rcu(sk);
367 mutex_unlock(&net->xdp.lock);
370 sock_prot_inuse_add(net, sk->sk_prot, -1);
375 xskq_destroy(xs->rx);
376 xskq_destroy(xs->tx);
381 sk_refcnt_debug_release(sk);
387 static struct socket *xsk_lookup_xsk_from_fd(int fd)
392 sock = sockfd_lookup(fd, &err);
394 return ERR_PTR(-ENOTSOCK);
396 if (sock->sk->sk_family != PF_XDP) {
398 return ERR_PTR(-ENOPROTOOPT);
404 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
406 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
407 struct sock *sk = sock->sk;
408 struct xdp_sock *xs = xdp_sk(sk);
409 struct net_device *dev;
413 if (addr_len < sizeof(struct sockaddr_xdp))
415 if (sxdp->sxdp_family != AF_XDP)
418 flags = sxdp->sxdp_flags;
419 if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
422 mutex_lock(&xs->mutex);
423 if (xs->state != XSK_READY) {
428 dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
434 if (!xs->rx && !xs->tx) {
439 qid = sxdp->sxdp_queue_id;
441 if (flags & XDP_SHARED_UMEM) {
442 struct xdp_sock *umem_xs;
445 if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) {
446 /* Cannot specify flags for shared sockets. */
452 /* We have already our own. */
457 sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
463 umem_xs = xdp_sk(sock->sk);
464 if (!umem_xs->umem) {
465 /* No umem to inherit. */
469 } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
475 xdp_get_umem(umem_xs->umem);
476 xs->umem = umem_xs->umem;
478 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
482 /* This xsk has its own umem. */
483 xskq_set_umem(xs->umem->fq, xs->umem->size,
484 xs->umem->chunk_mask);
485 xskq_set_umem(xs->umem->cq, xs->umem->size,
486 xs->umem->chunk_mask);
488 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
494 xs->zc = xs->umem->zc;
496 xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
497 xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
498 xdp_add_sk_umem(xs->umem, xs);
504 xs->state = XSK_BOUND;
506 mutex_unlock(&xs->mutex);
510 static int xsk_setsockopt(struct socket *sock, int level, int optname,
511 char __user *optval, unsigned int optlen)
513 struct sock *sk = sock->sk;
514 struct xdp_sock *xs = xdp_sk(sk);
517 if (level != SOL_XDP)
524 struct xsk_queue **q;
527 if (optlen < sizeof(entries))
529 if (copy_from_user(&entries, optval, sizeof(entries)))
532 mutex_lock(&xs->mutex);
533 if (xs->state != XSK_READY) {
534 mutex_unlock(&xs->mutex);
537 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
538 err = xsk_init_queue(entries, q, false);
539 mutex_unlock(&xs->mutex);
544 struct xdp_umem_reg mr;
545 struct xdp_umem *umem;
547 if (copy_from_user(&mr, optval, sizeof(mr)))
550 mutex_lock(&xs->mutex);
551 if (xs->state != XSK_READY || xs->umem) {
552 mutex_unlock(&xs->mutex);
556 umem = xdp_umem_create(&mr);
558 mutex_unlock(&xs->mutex);
559 return PTR_ERR(umem);
562 /* Make sure umem is ready before it can be seen by others */
565 mutex_unlock(&xs->mutex);
568 case XDP_UMEM_FILL_RING:
569 case XDP_UMEM_COMPLETION_RING:
571 struct xsk_queue **q;
574 if (copy_from_user(&entries, optval, sizeof(entries)))
577 mutex_lock(&xs->mutex);
578 if (xs->state != XSK_READY) {
579 mutex_unlock(&xs->mutex);
583 mutex_unlock(&xs->mutex);
587 q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
589 err = xsk_init_queue(entries, q, true);
590 mutex_unlock(&xs->mutex);
600 static int xsk_getsockopt(struct socket *sock, int level, int optname,
601 char __user *optval, int __user *optlen)
603 struct sock *sk = sock->sk;
604 struct xdp_sock *xs = xdp_sk(sk);
607 if (level != SOL_XDP)
610 if (get_user(len, optlen))
618 struct xdp_statistics stats;
620 if (len < sizeof(stats))
623 mutex_lock(&xs->mutex);
624 stats.rx_dropped = xs->rx_dropped;
625 stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
626 stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
627 mutex_unlock(&xs->mutex);
629 if (copy_to_user(optval, &stats, sizeof(stats)))
631 if (put_user(sizeof(stats), optlen))
636 case XDP_MMAP_OFFSETS:
638 struct xdp_mmap_offsets off;
640 if (len < sizeof(off))
643 off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
644 off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
645 off.rx.desc = offsetof(struct xdp_rxtx_ring, desc);
646 off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
647 off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
648 off.tx.desc = offsetof(struct xdp_rxtx_ring, desc);
650 off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
651 off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
652 off.fr.desc = offsetof(struct xdp_umem_ring, desc);
653 off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer);
654 off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
655 off.cr.desc = offsetof(struct xdp_umem_ring, desc);
658 if (copy_to_user(optval, &off, len))
660 if (put_user(len, optlen))
672 static int xsk_mmap(struct file *file, struct socket *sock,
673 struct vm_area_struct *vma)
675 loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
676 unsigned long size = vma->vm_end - vma->vm_start;
677 struct xdp_sock *xs = xdp_sk(sock->sk);
678 struct xsk_queue *q = NULL;
679 struct xdp_umem *umem;
683 if (xs->state != XSK_READY)
686 if (offset == XDP_PGOFF_RX_RING) {
687 q = READ_ONCE(xs->rx);
688 } else if (offset == XDP_PGOFF_TX_RING) {
689 q = READ_ONCE(xs->tx);
691 umem = READ_ONCE(xs->umem);
695 /* Matches the smp_wmb() in XDP_UMEM_REG */
697 if (offset == XDP_UMEM_PGOFF_FILL_RING)
698 q = READ_ONCE(umem->fq);
699 else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
700 q = READ_ONCE(umem->cq);
706 /* Matches the smp_wmb() in xsk_init_queue */
708 qpg = virt_to_head_page(q->ring);
709 if (size > (PAGE_SIZE << compound_order(qpg)))
712 pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
713 return remap_pfn_range(vma, vma->vm_start, pfn,
714 size, vma->vm_page_prot);
717 static int xsk_notifier(struct notifier_block *this,
718 unsigned long msg, void *ptr)
720 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
721 struct net *net = dev_net(dev);
725 case NETDEV_UNREGISTER:
726 mutex_lock(&net->xdp.lock);
727 sk_for_each(sk, &net->xdp.list) {
728 struct xdp_sock *xs = xdp_sk(sk);
730 mutex_lock(&xs->mutex);
731 if (xs->dev == dev) {
732 sk->sk_err = ENETDOWN;
733 if (!sock_flag(sk, SOCK_DEAD))
734 sk->sk_error_report(sk);
738 /* Clear device references in umem. */
739 xdp_umem_clear_dev(xs->umem);
741 mutex_unlock(&xs->mutex);
743 mutex_unlock(&net->xdp.lock);
749 static struct proto xsk_proto = {
751 .owner = THIS_MODULE,
752 .obj_size = sizeof(struct xdp_sock),
755 static const struct proto_ops xsk_proto_ops = {
757 .owner = THIS_MODULE,
758 .release = xsk_release,
760 .connect = sock_no_connect,
761 .socketpair = sock_no_socketpair,
762 .accept = sock_no_accept,
763 .getname = sock_no_getname,
765 .ioctl = sock_no_ioctl,
766 .listen = sock_no_listen,
767 .shutdown = sock_no_shutdown,
768 .setsockopt = xsk_setsockopt,
769 .getsockopt = xsk_getsockopt,
770 .sendmsg = xsk_sendmsg,
771 .recvmsg = sock_no_recvmsg,
773 .sendpage = sock_no_sendpage,
776 static void xsk_destruct(struct sock *sk)
778 struct xdp_sock *xs = xdp_sk(sk);
780 if (!sock_flag(sk, SOCK_DEAD))
783 xdp_put_umem(xs->umem);
785 sk_refcnt_debug_dec(sk);
788 static int xsk_create(struct net *net, struct socket *sock, int protocol,
794 if (!ns_capable(net->user_ns, CAP_NET_RAW))
796 if (sock->type != SOCK_RAW)
797 return -ESOCKTNOSUPPORT;
800 return -EPROTONOSUPPORT;
802 sock->state = SS_UNCONNECTED;
804 sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
808 sock->ops = &xsk_proto_ops;
810 sock_init_data(sock, sk);
812 sk->sk_family = PF_XDP;
814 sk->sk_destruct = xsk_destruct;
815 sk_refcnt_debug_inc(sk);
817 sock_set_flag(sk, SOCK_RCU_FREE);
820 xs->state = XSK_READY;
821 mutex_init(&xs->mutex);
822 spin_lock_init(&xs->tx_completion_lock);
824 mutex_lock(&net->xdp.lock);
825 sk_add_node_rcu(sk, &net->xdp.list);
826 mutex_unlock(&net->xdp.lock);
829 sock_prot_inuse_add(net, &xsk_proto, 1);
835 static const struct net_proto_family xsk_family_ops = {
837 .create = xsk_create,
838 .owner = THIS_MODULE,
841 static struct notifier_block xsk_netdev_notifier = {
842 .notifier_call = xsk_notifier,
845 static int __net_init xsk_net_init(struct net *net)
847 mutex_init(&net->xdp.lock);
848 INIT_HLIST_HEAD(&net->xdp.list);
852 static void __net_exit xsk_net_exit(struct net *net)
854 WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
857 static struct pernet_operations xsk_net_ops = {
858 .init = xsk_net_init,
859 .exit = xsk_net_exit,
862 static int __init xsk_init(void)
866 err = proto_register(&xsk_proto, 0 /* no slab */);
870 err = sock_register(&xsk_family_ops);
874 err = register_pernet_subsys(&xsk_net_ops);
878 err = register_netdevice_notifier(&xsk_netdev_notifier);
885 unregister_pernet_subsys(&xsk_net_ops);
887 sock_unregister(PF_XDP);
889 proto_unregister(&xsk_proto);
894 fs_initcall(xsk_init);