Commit | Line | Data |
---|---|---|
351e1581 HZ |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* Copyright (c) 2019, Microsoft Corporation. | |
3 | * | |
4 | * Author: | |
5 | * Haiyang Zhang <haiyangz@microsoft.com> | |
6 | */ | |
7 | ||
8 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
9 | ||
10 | #include <linux/netdevice.h> | |
11 | #include <linux/etherdevice.h> | |
12 | #include <linux/ethtool.h> | |
13 | #include <linux/bpf.h> | |
14 | #include <linux/bpf_trace.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <net/xdp.h> | |
17 | ||
18 | #include <linux/mutex.h> | |
19 | #include <linux/rtnetlink.h> | |
20 | ||
21 | #include "hyperv_net.h" | |
22 | ||
23 | u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan, | |
24 | struct xdp_buff *xdp) | |
25 | { | |
26 | void *data = nvchan->rsc.data[0]; | |
27 | u32 len = nvchan->rsc.len[0]; | |
28 | struct page *page = NULL; | |
29 | struct bpf_prog *prog; | |
30 | u32 act = XDP_PASS; | |
31 | ||
32 | xdp->data_hard_start = NULL; | |
33 | ||
34 | rcu_read_lock(); | |
35 | prog = rcu_dereference(nvchan->bpf_prog); | |
36 | ||
37 | if (!prog) | |
38 | goto out; | |
39 | ||
40 | /* allocate page buffer for data */ | |
41 | page = alloc_page(GFP_ATOMIC); | |
42 | if (!page) { | |
43 | act = XDP_DROP; | |
44 | goto out; | |
45 | } | |
46 | ||
43b5169d | 47 | xdp_init_buff(xdp, PAGE_SIZE, &nvchan->xdp_rxq); |
351e1581 HZ |
48 | xdp->data_hard_start = page_address(page); |
49 | xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM; | |
50 | xdp_set_data_meta_invalid(xdp); | |
51 | xdp->data_end = xdp->data + len; | |
351e1581 HZ |
52 | |
53 | memcpy(xdp->data, data, len); | |
54 | ||
55 | act = bpf_prog_run_xdp(prog, xdp); | |
56 | ||
57 | switch (act) { | |
58 | case XDP_PASS: | |
59 | case XDP_TX: | |
60 | case XDP_DROP: | |
61 | break; | |
62 | ||
63 | case XDP_ABORTED: | |
64 | trace_xdp_exception(ndev, prog, act); | |
65 | break; | |
66 | ||
67 | default: | |
68 | bpf_warn_invalid_xdp_action(act); | |
69 | } | |
70 | ||
71 | out: | |
72 | rcu_read_unlock(); | |
73 | ||
74 | if (page && act != XDP_PASS && act != XDP_TX) { | |
75 | __free_page(page); | |
76 | xdp->data_hard_start = NULL; | |
77 | } | |
78 | ||
79 | return act; | |
80 | } | |
81 | ||
82 | unsigned int netvsc_xdp_fraglen(unsigned int len) | |
83 | { | |
84 | return SKB_DATA_ALIGN(len) + | |
85 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | |
86 | } | |
87 | ||
88 | struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev) | |
89 | { | |
90 | return rtnl_dereference(nvdev->chan_table[0].bpf_prog); | |
91 | } | |
92 | ||
93 | int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |
94 | struct netlink_ext_ack *extack, | |
95 | struct netvsc_device *nvdev) | |
96 | { | |
97 | struct bpf_prog *old_prog; | |
98 | int buf_max, i; | |
99 | ||
100 | old_prog = netvsc_xdp_get(nvdev); | |
101 | ||
102 | if (!old_prog && !prog) | |
103 | return 0; | |
104 | ||
105 | buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN); | |
106 | if (prog && buf_max > PAGE_SIZE) { | |
107 | netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n", | |
108 | dev->mtu, buf_max); | |
109 | NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large"); | |
110 | ||
111 | return -EOPNOTSUPP; | |
112 | } | |
113 | ||
114 | if (prog && (dev->features & NETIF_F_LRO)) { | |
115 | netdev_err(dev, "XDP: not support LRO\n"); | |
116 | NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO"); | |
117 | ||
118 | return -EOPNOTSUPP; | |
119 | } | |
120 | ||
121 | if (prog) | |
184367dc | 122 | bpf_prog_add(prog, nvdev->num_chn - 1); |
351e1581 HZ |
123 | |
124 | for (i = 0; i < nvdev->num_chn; i++) | |
125 | rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog); | |
126 | ||
127 | if (old_prog) | |
128 | for (i = 0; i < nvdev->num_chn; i++) | |
129 | bpf_prog_put(old_prog); | |
130 | ||
131 | return 0; | |
132 | } | |
133 | ||
134 | int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog) | |
135 | { | |
136 | struct netdev_bpf xdp; | |
137 | bpf_op_t ndo_bpf; | |
184367dc | 138 | int ret; |
351e1581 HZ |
139 | |
140 | ASSERT_RTNL(); | |
141 | ||
142 | if (!vf_netdev) | |
143 | return 0; | |
144 | ||
145 | ndo_bpf = vf_netdev->netdev_ops->ndo_bpf; | |
146 | if (!ndo_bpf) | |
147 | return 0; | |
148 | ||
149 | memset(&xdp, 0, sizeof(xdp)); | |
150 | ||
184367dc HZ |
151 | if (prog) |
152 | bpf_prog_inc(prog); | |
153 | ||
351e1581 HZ |
154 | xdp.command = XDP_SETUP_PROG; |
155 | xdp.prog = prog; | |
156 | ||
184367dc HZ |
157 | ret = ndo_bpf(vf_netdev, &xdp); |
158 | ||
159 | if (ret && prog) | |
160 | bpf_prog_put(prog); | |
161 | ||
162 | return ret; | |
351e1581 HZ |
163 | } |
164 | ||
351e1581 HZ |
165 | int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf) |
166 | { | |
167 | struct net_device_context *ndevctx = netdev_priv(dev); | |
168 | struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); | |
169 | struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); | |
170 | struct netlink_ext_ack *extack = bpf->extack; | |
171 | int ret; | |
172 | ||
173 | if (!nvdev || nvdev->destroy) { | |
e8407fde | 174 | return -ENODEV; |
351e1581 HZ |
175 | } |
176 | ||
177 | switch (bpf->command) { | |
178 | case XDP_SETUP_PROG: | |
179 | ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev); | |
180 | ||
181 | if (ret) | |
182 | return ret; | |
183 | ||
184 | ret = netvsc_vf_setxdp(vf_netdev, bpf->prog); | |
185 | ||
186 | if (ret) { | |
187 | netdev_err(dev, "vf_setxdp failed:%d\n", ret); | |
188 | NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed"); | |
189 | ||
190 | netvsc_xdp_set(dev, NULL, extack, nvdev); | |
191 | } | |
192 | ||
193 | return ret; | |
194 | ||
351e1581 HZ |
195 | default: |
196 | return -EINVAL; | |
197 | } | |
198 | } |