Commit | Line | Data |
---|---|---|
c0c77d8f BT |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* XDP sockets | |
3 | * | |
4 | * AF_XDP sockets allows a channel between XDP programs and userspace | |
5 | * applications. | |
6 | * Copyright(c) 2018 Intel Corporation. | |
7 | * | |
c0c77d8f BT |
8 | * Author(s): Björn Töpel <bjorn.topel@intel.com> |
9 | * Magnus Karlsson <magnus.karlsson@intel.com> | |
10 | */ | |
11 | ||
12 | #define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__ | |
13 | ||
14 | #include <linux/if_xdp.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/sched/mm.h> | |
17 | #include <linux/sched/signal.h> | |
18 | #include <linux/sched/task.h> | |
19 | #include <linux/socket.h> | |
20 | #include <linux/file.h> | |
21 | #include <linux/uaccess.h> | |
22 | #include <linux/net.h> | |
23 | #include <linux/netdevice.h> | |
ac98d8aa | 24 | #include <linux/rculist.h> |
c0c77d8f | 25 | #include <net/xdp_sock.h> |
b9b6b68e | 26 | #include <net/xdp.h> |
c0c77d8f | 27 | |
423f3832 | 28 | #include "xsk_queue.h" |
c0c77d8f | 29 | #include "xdp_umem.h" |
a36b38aa | 30 | #include "xsk.h" |
c0c77d8f | 31 | |
35fcde7f MK |
32 | #define TX_BATCH_SIZE 16 |
33 | ||
e312b9e7 BT |
34 | static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); |
35 | ||
fbfc504a BT |
36 | bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) |
37 | { | |
173d3adb BT |
38 | return READ_ONCE(xs->rx) && READ_ONCE(xs->umem) && |
39 | READ_ONCE(xs->umem->fq); | |
fbfc504a BT |
40 | } |
41 | ||
d57d7642 MM |
42 | bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) |
43 | { | |
c5ed924b | 44 | return xskq_cons_has_entries(umem->fq, cnt); |
d57d7642 MM |
45 | } |
46 | EXPORT_SYMBOL(xsk_umem_has_addrs); | |
47 | ||
03896ef1 | 48 | bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) |
173d3adb | 49 | { |
c5ed924b | 50 | return xskq_cons_peek_addr(umem->fq, addr, umem); |
173d3adb BT |
51 | } |
52 | EXPORT_SYMBOL(xsk_umem_peek_addr); | |
53 | ||
f8509aa0 | 54 | void xsk_umem_release_addr(struct xdp_umem *umem) |
173d3adb | 55 | { |
c5ed924b | 56 | xskq_cons_release(umem->fq); |
173d3adb | 57 | } |
f8509aa0 | 58 | EXPORT_SYMBOL(xsk_umem_release_addr); |
173d3adb | 59 | |
77cd0d7b MK |
60 | void xsk_set_rx_need_wakeup(struct xdp_umem *umem) |
61 | { | |
62 | if (umem->need_wakeup & XDP_WAKEUP_RX) | |
63 | return; | |
64 | ||
65 | umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; | |
66 | umem->need_wakeup |= XDP_WAKEUP_RX; | |
67 | } | |
68 | EXPORT_SYMBOL(xsk_set_rx_need_wakeup); | |
69 | ||
70 | void xsk_set_tx_need_wakeup(struct xdp_umem *umem) | |
71 | { | |
72 | struct xdp_sock *xs; | |
73 | ||
74 | if (umem->need_wakeup & XDP_WAKEUP_TX) | |
75 | return; | |
76 | ||
77 | rcu_read_lock(); | |
78 | list_for_each_entry_rcu(xs, &umem->xsk_list, list) { | |
79 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; | |
80 | } | |
81 | rcu_read_unlock(); | |
82 | ||
83 | umem->need_wakeup |= XDP_WAKEUP_TX; | |
84 | } | |
85 | EXPORT_SYMBOL(xsk_set_tx_need_wakeup); | |
86 | ||
87 | void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) | |
88 | { | |
89 | if (!(umem->need_wakeup & XDP_WAKEUP_RX)) | |
90 | return; | |
91 | ||
92 | umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; | |
93 | umem->need_wakeup &= ~XDP_WAKEUP_RX; | |
94 | } | |
95 | EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); | |
96 | ||
97 | void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) | |
98 | { | |
99 | struct xdp_sock *xs; | |
100 | ||
101 | if (!(umem->need_wakeup & XDP_WAKEUP_TX)) | |
102 | return; | |
103 | ||
104 | rcu_read_lock(); | |
105 | list_for_each_entry_rcu(xs, &umem->xsk_list, list) { | |
106 | xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; | |
107 | } | |
108 | rcu_read_unlock(); | |
109 | ||
110 | umem->need_wakeup &= ~XDP_WAKEUP_TX; | |
111 | } | |
112 | EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); | |
113 | ||
114 | bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) | |
115 | { | |
116 | return umem->flags & XDP_UMEM_USES_NEED_WAKEUP; | |
117 | } | |
118 | EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); | |
119 | ||
c05cd364 KL |
120 | /* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for |
121 | * each page. This is only required in copy mode. | |
122 | */ | |
123 | static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, | |
124 | u32 len, u32 metalen) | |
125 | { | |
126 | void *to_buf = xdp_umem_get_data(umem, addr); | |
127 | ||
128 | addr = xsk_umem_add_offset_to_addr(addr); | |
03896ef1 | 129 | if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) { |
c05cd364 KL |
130 | void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr; |
131 | u64 page_start = addr & ~(PAGE_SIZE - 1); | |
132 | u64 first_len = PAGE_SIZE - (addr - page_start); | |
133 | ||
134 | memcpy(to_buf, from_buf, first_len + metalen); | |
135 | memcpy(next_pg_addr, from_buf + first_len, len - first_len); | |
136 | ||
137 | return; | |
138 | } | |
139 | ||
140 | memcpy(to_buf, from_buf, len + metalen); | |
141 | } | |
142 | ||
173d3adb | 143 | static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
c497176c | 144 | { |
c05cd364 KL |
145 | u64 offset = xs->umem->headroom; |
146 | u64 addr, memcpy_addr; | |
147 | void *from_buf; | |
18baed26 | 148 | u32 metalen; |
4e64c835 | 149 | int err; |
c497176c | 150 | |
c5ed924b | 151 | if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) || |
18baed26 | 152 | len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { |
a509a955 | 153 | xs->rx_dropped++; |
c497176c | 154 | return -ENOSPC; |
a509a955 | 155 | } |
c497176c | 156 | |
18baed26 BT |
157 | if (unlikely(xdp_data_meta_unsupported(xdp))) { |
158 | from_buf = xdp->data; | |
159 | metalen = 0; | |
160 | } else { | |
161 | from_buf = xdp->data_meta; | |
162 | metalen = xdp->data - xdp->data_meta; | |
163 | } | |
164 | ||
c05cd364 KL |
165 | memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset); |
166 | __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen); | |
167 | ||
168 | offset += metalen; | |
169 | addr = xsk_umem_adjust_offset(xs->umem, addr, offset); | |
59e35e55 | 170 | err = xskq_prod_reserve_desc(xs->rx, addr, len); |
173d3adb | 171 | if (!err) { |
c5ed924b | 172 | xskq_cons_release(xs->umem->fq); |
173d3adb BT |
173 | xdp_return_buff(xdp); |
174 | return 0; | |
175 | } | |
c497176c | 176 | |
173d3adb | 177 | xs->rx_dropped++; |
c497176c BT |
178 | return err; |
179 | } | |
180 | ||
173d3adb | 181 | static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) |
c497176c | 182 | { |
59e35e55 | 183 | int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len); |
c497176c | 184 | |
2d55d614 | 185 | if (err) |
173d3adb | 186 | xs->rx_dropped++; |
c497176c BT |
187 | |
188 | return err; | |
189 | } | |
190 | ||
42fddcc7 BT |
191 | static bool xsk_is_bound(struct xdp_sock *xs) |
192 | { | |
193 | if (READ_ONCE(xs->state) == XSK_BOUND) { | |
194 | /* Matches smp_wmb() in bind(). */ | |
195 | smp_rmb(); | |
196 | return true; | |
197 | } | |
198 | return false; | |
199 | } | |
200 | ||
d817991c | 201 | static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) |
173d3adb BT |
202 | { |
203 | u32 len; | |
204 | ||
42fddcc7 BT |
205 | if (!xsk_is_bound(xs)) |
206 | return -EINVAL; | |
207 | ||
173d3adb BT |
208 | if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) |
209 | return -EINVAL; | |
210 | ||
211 | len = xdp->data_end - xdp->data; | |
212 | ||
213 | return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ? | |
214 | __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len); | |
215 | } | |
216 | ||
d817991c | 217 | static void xsk_flush(struct xdp_sock *xs) |
c497176c | 218 | { |
59e35e55 | 219 | xskq_prod_submit(xs->rx); |
30744a68 | 220 | __xskq_cons_release(xs->umem->fq); |
43a825af | 221 | sock_def_readable(&xs->sk); |
c497176c BT |
222 | } |
223 | ||
224 | int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) | |
225 | { | |
18baed26 | 226 | u32 metalen = xdp->data - xdp->data_meta; |
173d3adb | 227 | u32 len = xdp->data_end - xdp->data; |
c05cd364 | 228 | u64 offset = xs->umem->headroom; |
173d3adb BT |
229 | void *buffer; |
230 | u64 addr; | |
c497176c BT |
231 | int err; |
232 | ||
bf0bdd13 IM |
233 | spin_lock_bh(&xs->rx_lock); |
234 | ||
235 | if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) { | |
236 | err = -EINVAL; | |
237 | goto out_unlock; | |
238 | } | |
5d902372 | 239 | |
c5ed924b | 240 | if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) || |
18baed26 | 241 | len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { |
bf0bdd13 IM |
242 | err = -ENOSPC; |
243 | goto out_drop; | |
173d3adb BT |
244 | } |
245 | ||
c05cd364 | 246 | addr = xsk_umem_adjust_offset(xs->umem, addr, offset); |
173d3adb | 247 | buffer = xdp_umem_get_data(xs->umem, addr); |
18baed26 | 248 | memcpy(buffer, xdp->data_meta, len + metalen); |
c05cd364 KL |
249 | |
250 | addr = xsk_umem_adjust_offset(xs->umem, addr, metalen); | |
59e35e55 | 251 | err = xskq_prod_reserve_desc(xs->rx, addr, len); |
bf0bdd13 IM |
252 | if (err) |
253 | goto out_drop; | |
254 | ||
c5ed924b | 255 | xskq_cons_release(xs->umem->fq); |
59e35e55 | 256 | xskq_prod_submit(xs->rx); |
c497176c | 257 | |
bf0bdd13 IM |
258 | spin_unlock_bh(&xs->rx_lock); |
259 | ||
260 | xs->sk.sk_data_ready(&xs->sk); | |
261 | return 0; | |
262 | ||
263 | out_drop: | |
173d3adb | 264 | xs->rx_dropped++; |
bf0bdd13 IM |
265 | out_unlock: |
266 | spin_unlock_bh(&xs->rx_lock); | |
c497176c BT |
267 | return err; |
268 | } | |
269 | ||
e312b9e7 | 270 | int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) |
d817991c | 271 | { |
e312b9e7 | 272 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
d817991c BT |
273 | int err; |
274 | ||
275 | err = xsk_rcv(xs, xdp); | |
276 | if (err) | |
277 | return err; | |
278 | ||
279 | if (!xs->flush_node.prev) | |
280 | list_add(&xs->flush_node, flush_list); | |
281 | ||
282 | return 0; | |
283 | } | |
284 | ||
e312b9e7 | 285 | void __xsk_map_flush(void) |
d817991c | 286 | { |
e312b9e7 | 287 | struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); |
d817991c BT |
288 | struct xdp_sock *xs, *tmp; |
289 | ||
290 | list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { | |
291 | xsk_flush(xs); | |
292 | __list_del_clearprev(&xs->flush_node); | |
293 | } | |
294 | } | |
295 | ||
ac98d8aa MK |
296 | void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) |
297 | { | |
59e35e55 | 298 | xskq_prod_submit_n(umem->cq, nb_entries); |
ac98d8aa MK |
299 | } |
300 | EXPORT_SYMBOL(xsk_umem_complete_tx); | |
301 | ||
302 | void xsk_umem_consume_tx_done(struct xdp_umem *umem) | |
303 | { | |
304 | struct xdp_sock *xs; | |
305 | ||
306 | rcu_read_lock(); | |
307 | list_for_each_entry_rcu(xs, &umem->xsk_list, list) { | |
30744a68 | 308 | __xskq_cons_release(xs->tx); |
ac98d8aa MK |
309 | xs->sk.sk_write_space(&xs->sk); |
310 | } | |
311 | rcu_read_unlock(); | |
312 | } | |
313 | EXPORT_SYMBOL(xsk_umem_consume_tx_done); | |
314 | ||
4bce4e5c | 315 | bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc) |
ac98d8aa | 316 | { |
ac98d8aa MK |
317 | struct xdp_sock *xs; |
318 | ||
319 | rcu_read_lock(); | |
320 | list_for_each_entry_rcu(xs, &umem->xsk_list, list) { | |
c5ed924b | 321 | if (!xskq_cons_peek_desc(xs->tx, desc, umem)) |
ac98d8aa MK |
322 | continue; |
323 | ||
15d8c916 MK |
324 | /* This is the backpreassure mechanism for the Tx path. |
325 | * Reserve space in the completion queue and only proceed | |
326 | * if there is space in it. This avoids having to implement | |
327 | * any buffering in the Tx path. | |
328 | */ | |
59e35e55 | 329 | if (xskq_prod_reserve_addr(umem->cq, desc->addr)) |
ac98d8aa MK |
330 | goto out; |
331 | ||
c5ed924b | 332 | xskq_cons_release(xs->tx); |
ac98d8aa MK |
333 | rcu_read_unlock(); |
334 | return true; | |
335 | } | |
336 | ||
337 | out: | |
338 | rcu_read_unlock(); | |
339 | return false; | |
340 | } | |
341 | EXPORT_SYMBOL(xsk_umem_consume_tx); | |
342 | ||
06870682 | 343 | static int xsk_wakeup(struct xdp_sock *xs, u8 flags) |
ac98d8aa | 344 | { |
ac98d8aa | 345 | struct net_device *dev = xs->dev; |
06870682 MM |
346 | int err; |
347 | ||
348 | rcu_read_lock(); | |
349 | err = dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, flags); | |
350 | rcu_read_unlock(); | |
ac98d8aa | 351 | |
06870682 MM |
352 | return err; |
353 | } | |
354 | ||
355 | static int xsk_zc_xmit(struct xdp_sock *xs) | |
356 | { | |
357 | return xsk_wakeup(xs, XDP_WAKEUP_TX); | |
ac98d8aa MK |
358 | } |
359 | ||
35fcde7f MK |
360 | static void xsk_destruct_skb(struct sk_buff *skb) |
361 | { | |
bbff2f32 | 362 | u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg; |
35fcde7f | 363 | struct xdp_sock *xs = xdp_sk(skb->sk); |
a9744f7c | 364 | unsigned long flags; |
35fcde7f | 365 | |
a9744f7c | 366 | spin_lock_irqsave(&xs->tx_completion_lock, flags); |
59e35e55 | 367 | xskq_prod_submit_addr(xs->umem->cq, addr); |
a9744f7c | 368 | spin_unlock_irqrestore(&xs->tx_completion_lock, flags); |
35fcde7f MK |
369 | |
370 | sock_wfree(skb); | |
371 | } | |
372 | ||
df551058 | 373 | static int xsk_generic_xmit(struct sock *sk) |
35fcde7f | 374 | { |
35fcde7f | 375 | struct xdp_sock *xs = xdp_sk(sk); |
df551058 | 376 | u32 max_batch = TX_BATCH_SIZE; |
35fcde7f MK |
377 | bool sent_frame = false; |
378 | struct xdp_desc desc; | |
379 | struct sk_buff *skb; | |
380 | int err = 0; | |
381 | ||
35fcde7f MK |
382 | mutex_lock(&xs->mutex); |
383 | ||
67571640 IM |
384 | if (xs->queue_id >= xs->dev->real_num_tx_queues) |
385 | goto out; | |
386 | ||
c5ed924b | 387 | while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) { |
35fcde7f | 388 | char *buffer; |
bbff2f32 BT |
389 | u64 addr; |
390 | u32 len; | |
35fcde7f MK |
391 | |
392 | if (max_batch-- == 0) { | |
393 | err = -EAGAIN; | |
394 | goto out; | |
395 | } | |
396 | ||
09210c4b | 397 | len = desc.len; |
ac98d8aa | 398 | skb = sock_alloc_send_skb(sk, len, 1, &err); |
35fcde7f MK |
399 | if (unlikely(!skb)) { |
400 | err = -EAGAIN; | |
401 | goto out; | |
402 | } | |
403 | ||
404 | skb_put(skb, len); | |
bbff2f32 BT |
405 | addr = desc.addr; |
406 | buffer = xdp_umem_get_data(xs->umem, addr); | |
35fcde7f | 407 | err = skb_store_bits(skb, 0, buffer, len); |
15d8c916 MK |
408 | /* This is the backpreassure mechanism for the Tx path. |
409 | * Reserve space in the completion queue and only proceed | |
410 | * if there is space in it. This avoids having to implement | |
411 | * any buffering in the Tx path. | |
412 | */ | |
59e35e55 | 413 | if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) { |
35fcde7f MK |
414 | kfree_skb(skb); |
415 | goto out; | |
416 | } | |
417 | ||
418 | skb->dev = xs->dev; | |
419 | skb->priority = sk->sk_priority; | |
420 | skb->mark = sk->sk_mark; | |
c05cd364 | 421 | skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr; |
35fcde7f MK |
422 | skb->destructor = xsk_destruct_skb; |
423 | ||
424 | err = dev_direct_xmit(skb, xs->queue_id); | |
c5ed924b | 425 | xskq_cons_release(xs->tx); |
35fcde7f MK |
426 | /* Ignore NET_XMIT_CN as packet might have been sent */ |
427 | if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) { | |
fe588685 MK |
428 | /* SKB completed but not sent */ |
429 | err = -EBUSY; | |
35fcde7f MK |
430 | goto out; |
431 | } | |
432 | ||
433 | sent_frame = true; | |
35fcde7f MK |
434 | } |
435 | ||
436 | out: | |
437 | if (sent_frame) | |
438 | sk->sk_write_space(sk); | |
439 | ||
440 | mutex_unlock(&xs->mutex); | |
441 | return err; | |
442 | } | |
443 | ||
df551058 MK |
444 | static int __xsk_sendmsg(struct sock *sk) |
445 | { | |
446 | struct xdp_sock *xs = xdp_sk(sk); | |
447 | ||
448 | if (unlikely(!(xs->dev->flags & IFF_UP))) | |
449 | return -ENETDOWN; | |
450 | if (unlikely(!xs->tx)) | |
451 | return -ENOBUFS; | |
452 | ||
453 | return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk); | |
454 | } | |
455 | ||
35fcde7f MK |
456 | static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) |
457 | { | |
ac98d8aa | 458 | bool need_wait = !(m->msg_flags & MSG_DONTWAIT); |
35fcde7f MK |
459 | struct sock *sk = sock->sk; |
460 | struct xdp_sock *xs = xdp_sk(sk); | |
461 | ||
42fddcc7 | 462 | if (unlikely(!xsk_is_bound(xs))) |
35fcde7f | 463 | return -ENXIO; |
df551058 | 464 | if (unlikely(need_wait)) |
ac98d8aa | 465 | return -EOPNOTSUPP; |
35fcde7f | 466 | |
df551058 | 467 | return __xsk_sendmsg(sk); |
35fcde7f MK |
468 | } |
469 | ||
5d946c5a | 470 | static __poll_t xsk_poll(struct file *file, struct socket *sock, |
a11e1d43 | 471 | struct poll_table_struct *wait) |
c497176c | 472 | { |
5d946c5a | 473 | __poll_t mask = datagram_poll(file, sock, wait); |
df551058 MK |
474 | struct sock *sk = sock->sk; |
475 | struct xdp_sock *xs = xdp_sk(sk); | |
42fddcc7 BT |
476 | struct xdp_umem *umem; |
477 | ||
478 | if (unlikely(!xsk_is_bound(xs))) | |
479 | return mask; | |
480 | ||
42fddcc7 | 481 | umem = xs->umem; |
77cd0d7b | 482 | |
df551058 | 483 | if (umem->need_wakeup) { |
06870682 MM |
484 | if (xs->zc) |
485 | xsk_wakeup(xs, umem->need_wakeup); | |
df551058 MK |
486 | else |
487 | /* Poll needs to drive Tx also in copy mode */ | |
488 | __xsk_sendmsg(sk); | |
489 | } | |
c497176c | 490 | |
59e35e55 | 491 | if (xs->rx && !xskq_prod_is_empty(xs->rx)) |
5d946c5a | 492 | mask |= EPOLLIN | EPOLLRDNORM; |
c5ed924b | 493 | if (xs->tx && !xskq_cons_is_full(xs->tx)) |
5d946c5a | 494 | mask |= EPOLLOUT | EPOLLWRNORM; |
c497176c BT |
495 | |
496 | return mask; | |
497 | } | |
498 | ||
b9b6b68e BT |
499 | static int xsk_init_queue(u32 entries, struct xsk_queue **queue, |
500 | bool umem_queue) | |
423f3832 MK |
501 | { |
502 | struct xsk_queue *q; | |
503 | ||
504 | if (entries == 0 || *queue || !is_power_of_2(entries)) | |
505 | return -EINVAL; | |
506 | ||
b9b6b68e | 507 | q = xskq_create(entries, umem_queue); |
423f3832 MK |
508 | if (!q) |
509 | return -ENOMEM; | |
510 | ||
37b07693 BT |
511 | /* Make sure queue is ready before it can be seen by others */ |
512 | smp_wmb(); | |
94a99763 | 513 | WRITE_ONCE(*queue, q); |
423f3832 MK |
514 | return 0; |
515 | } | |
516 | ||
455302d1 IM |
517 | static void xsk_unbind_dev(struct xdp_sock *xs) |
518 | { | |
519 | struct net_device *dev = xs->dev; | |
520 | ||
42fddcc7 | 521 | if (xs->state != XSK_BOUND) |
455302d1 | 522 | return; |
42fddcc7 | 523 | WRITE_ONCE(xs->state, XSK_UNBOUND); |
455302d1 IM |
524 | |
525 | /* Wait for driver to stop using the xdp socket. */ | |
526 | xdp_del_sk_umem(xs->umem, xs); | |
527 | xs->dev = NULL; | |
528 | synchronize_net(); | |
529 | dev_put(dev); | |
530 | } | |
531 | ||
0402acd6 BT |
532 | static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, |
533 | struct xdp_sock ***map_entry) | |
534 | { | |
535 | struct xsk_map *map = NULL; | |
536 | struct xsk_map_node *node; | |
537 | ||
538 | *map_entry = NULL; | |
539 | ||
540 | spin_lock_bh(&xs->map_list_lock); | |
541 | node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, | |
542 | node); | |
543 | if (node) { | |
544 | WARN_ON(xsk_map_inc(node->map)); | |
545 | map = node->map; | |
546 | *map_entry = node->map_entry; | |
547 | } | |
548 | spin_unlock_bh(&xs->map_list_lock); | |
549 | return map; | |
550 | } | |
551 | ||
552 | static void xsk_delete_from_maps(struct xdp_sock *xs) | |
553 | { | |
554 | /* This function removes the current XDP socket from all the | |
555 | * maps it resides in. We need to take extra care here, due to | |
556 | * the two locks involved. Each map has a lock synchronizing | |
557 | * updates to the entries, and each socket has a lock that | |
558 | * synchronizes access to the list of maps (map_list). For | |
559 | * deadlock avoidance the locks need to be taken in the order | |
560 | * "map lock"->"socket map list lock". We start off by | |
561 | * accessing the socket map list, and take a reference to the | |
562 | * map to guarantee existence between the | |
563 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete() | |
564 | * calls. Then we ask the map to remove the socket, which | |
565 | * tries to remove the socket from the map. Note that there | |
566 | * might be updates to the map between | |
567 | * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). | |
568 | */ | |
569 | struct xdp_sock **map_entry = NULL; | |
570 | struct xsk_map *map; | |
571 | ||
572 | while ((map = xsk_get_map_list_entry(xs, &map_entry))) { | |
573 | xsk_map_try_sock_delete(map, xs, map_entry); | |
574 | xsk_map_put(map); | |
575 | } | |
576 | } | |
577 | ||
c0c77d8f BT |
578 | static int xsk_release(struct socket *sock) |
579 | { | |
580 | struct sock *sk = sock->sk; | |
965a9909 | 581 | struct xdp_sock *xs = xdp_sk(sk); |
c0c77d8f BT |
582 | struct net *net; |
583 | ||
584 | if (!sk) | |
585 | return 0; | |
586 | ||
587 | net = sock_net(sk); | |
588 | ||
1d0dc069 BT |
589 | mutex_lock(&net->xdp.lock); |
590 | sk_del_node_init_rcu(sk); | |
591 | mutex_unlock(&net->xdp.lock); | |
592 | ||
c0c77d8f BT |
593 | local_bh_disable(); |
594 | sock_prot_inuse_add(net, sk->sk_prot, -1); | |
595 | local_bh_enable(); | |
596 | ||
0402acd6 | 597 | xsk_delete_from_maps(xs); |
42fddcc7 | 598 | mutex_lock(&xs->mutex); |
455302d1 | 599 | xsk_unbind_dev(xs); |
42fddcc7 | 600 | mutex_unlock(&xs->mutex); |
965a9909 | 601 | |
541d7fdd BT |
602 | xskq_destroy(xs->rx); |
603 | xskq_destroy(xs->tx); | |
604 | ||
c0c77d8f BT |
605 | sock_orphan(sk); |
606 | sock->sk = NULL; | |
607 | ||
608 | sk_refcnt_debug_release(sk); | |
609 | sock_put(sk); | |
610 | ||
611 | return 0; | |
612 | } | |
613 | ||
965a9909 MK |
614 | static struct socket *xsk_lookup_xsk_from_fd(int fd) |
615 | { | |
616 | struct socket *sock; | |
617 | int err; | |
618 | ||
619 | sock = sockfd_lookup(fd, &err); | |
620 | if (!sock) | |
621 | return ERR_PTR(-ENOTSOCK); | |
622 | ||
623 | if (sock->sk->sk_family != PF_XDP) { | |
624 | sockfd_put(sock); | |
625 | return ERR_PTR(-ENOPROTOOPT); | |
626 | } | |
627 | ||
628 | return sock; | |
629 | } | |
630 | ||
c05cd364 KL |
631 | /* Check if umem pages are contiguous. |
632 | * If zero-copy mode, use the DMA address to do the page contiguity check | |
633 | * For all other modes we use addr (kernel virtual address) | |
634 | * Store the result in the low bits of addr. | |
635 | */ | |
636 | static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags) | |
637 | { | |
638 | struct xdp_umem_page *pgs = umem->pages; | |
639 | int i, is_contig; | |
640 | ||
641 | for (i = 0; i < umem->npgs - 1; i++) { | |
642 | is_contig = (flags & XDP_ZEROCOPY) ? | |
643 | (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) : | |
644 | (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr); | |
645 | pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT; | |
646 | } | |
647 | } | |
648 | ||
965a9909 MK |
649 | static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) |
650 | { | |
651 | struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; | |
652 | struct sock *sk = sock->sk; | |
965a9909 | 653 | struct xdp_sock *xs = xdp_sk(sk); |
959b71db | 654 | struct net_device *dev; |
173d3adb | 655 | u32 flags, qid; |
965a9909 MK |
656 | int err = 0; |
657 | ||
658 | if (addr_len < sizeof(struct sockaddr_xdp)) | |
659 | return -EINVAL; | |
660 | if (sxdp->sxdp_family != AF_XDP) | |
661 | return -EINVAL; | |
662 | ||
f54ba391 | 663 | flags = sxdp->sxdp_flags; |
77cd0d7b MK |
664 | if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | |
665 | XDP_USE_NEED_WAKEUP)) | |
f54ba391 BT |
666 | return -EINVAL; |
667 | ||
5464c3a0 | 668 | rtnl_lock(); |
965a9909 | 669 | mutex_lock(&xs->mutex); |
455302d1 | 670 | if (xs->state != XSK_READY) { |
959b71db BT |
671 | err = -EBUSY; |
672 | goto out_release; | |
673 | } | |
674 | ||
965a9909 MK |
675 | dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex); |
676 | if (!dev) { | |
677 | err = -ENODEV; | |
678 | goto out_release; | |
679 | } | |
680 | ||
f6145903 | 681 | if (!xs->rx && !xs->tx) { |
965a9909 MK |
682 | err = -EINVAL; |
683 | goto out_unlock; | |
684 | } | |
685 | ||
173d3adb | 686 | qid = sxdp->sxdp_queue_id; |
173d3adb BT |
687 | |
688 | if (flags & XDP_SHARED_UMEM) { | |
965a9909 MK |
689 | struct xdp_sock *umem_xs; |
690 | struct socket *sock; | |
691 | ||
77cd0d7b MK |
692 | if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || |
693 | (flags & XDP_USE_NEED_WAKEUP)) { | |
173d3adb BT |
694 | /* Cannot specify flags for shared sockets. */ |
695 | err = -EINVAL; | |
696 | goto out_unlock; | |
697 | } | |
698 | ||
965a9909 MK |
699 | if (xs->umem) { |
700 | /* We have already our own. */ | |
701 | err = -EINVAL; | |
702 | goto out_unlock; | |
703 | } | |
704 | ||
705 | sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd); | |
706 | if (IS_ERR(sock)) { | |
707 | err = PTR_ERR(sock); | |
708 | goto out_unlock; | |
709 | } | |
710 | ||
711 | umem_xs = xdp_sk(sock->sk); | |
42fddcc7 | 712 | if (!xsk_is_bound(umem_xs)) { |
965a9909 MK |
713 | err = -EBADF; |
714 | sockfd_put(sock); | |
715 | goto out_unlock; | |
42fddcc7 BT |
716 | } |
717 | if (umem_xs->dev != dev || umem_xs->queue_id != qid) { | |
965a9909 MK |
718 | err = -EINVAL; |
719 | sockfd_put(sock); | |
720 | goto out_unlock; | |
721 | } | |
722 | ||
723 | xdp_get_umem(umem_xs->umem); | |
9764f4b3 | 724 | WRITE_ONCE(xs->umem, umem_xs->umem); |
965a9909 MK |
725 | sockfd_put(sock); |
726 | } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) { | |
727 | err = -EINVAL; | |
728 | goto out_unlock; | |
c497176c BT |
729 | } else { |
730 | /* This xsk has its own umem. */ | |
93ee30f3 MK |
731 | xskq_set_umem(xs->umem->fq, xs->umem->size, |
732 | xs->umem->chunk_mask); | |
733 | xskq_set_umem(xs->umem->cq, xs->umem->size, | |
734 | xs->umem->chunk_mask); | |
173d3adb BT |
735 | |
736 | err = xdp_umem_assign_dev(xs->umem, dev, qid, flags); | |
737 | if (err) | |
738 | goto out_unlock; | |
c05cd364 KL |
739 | |
740 | xsk_check_page_contiguity(xs->umem, flags); | |
965a9909 MK |
741 | } |
742 | ||
965a9909 | 743 | xs->dev = dev; |
ac98d8aa MK |
744 | xs->zc = xs->umem->zc; |
745 | xs->queue_id = qid; | |
93ee30f3 MK |
746 | xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask); |
747 | xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask); | |
ac98d8aa | 748 | xdp_add_sk_umem(xs->umem, xs); |
965a9909 MK |
749 | |
750 | out_unlock: | |
42fddcc7 | 751 | if (err) { |
965a9909 | 752 | dev_put(dev); |
42fddcc7 BT |
753 | } else { |
754 | /* Matches smp_rmb() in bind() for shared umem | |
755 | * sockets, and xsk_is_bound(). | |
756 | */ | |
757 | smp_wmb(); | |
758 | WRITE_ONCE(xs->state, XSK_BOUND); | |
759 | } | |
965a9909 MK |
760 | out_release: |
761 | mutex_unlock(&xs->mutex); | |
5464c3a0 | 762 | rtnl_unlock(); |
965a9909 MK |
763 | return err; |
764 | } | |
765 | ||
c05cd364 KL |
766 | struct xdp_umem_reg_v1 { |
767 | __u64 addr; /* Start of packet data area */ | |
768 | __u64 len; /* Length of packet data area */ | |
769 | __u32 chunk_size; | |
770 | __u32 headroom; | |
771 | }; | |
772 | ||
c0c77d8f BT |
773 | static int xsk_setsockopt(struct socket *sock, int level, int optname, |
774 | char __user *optval, unsigned int optlen) | |
775 | { | |
776 | struct sock *sk = sock->sk; | |
777 | struct xdp_sock *xs = xdp_sk(sk); | |
778 | int err; | |
779 | ||
780 | if (level != SOL_XDP) | |
781 | return -ENOPROTOOPT; | |
782 | ||
783 | switch (optname) { | |
b9b6b68e | 784 | case XDP_RX_RING: |
f6145903 | 785 | case XDP_TX_RING: |
b9b6b68e BT |
786 | { |
787 | struct xsk_queue **q; | |
788 | int entries; | |
789 | ||
790 | if (optlen < sizeof(entries)) | |
791 | return -EINVAL; | |
792 | if (copy_from_user(&entries, optval, sizeof(entries))) | |
793 | return -EFAULT; | |
794 | ||
795 | mutex_lock(&xs->mutex); | |
455302d1 IM |
796 | if (xs->state != XSK_READY) { |
797 | mutex_unlock(&xs->mutex); | |
798 | return -EBUSY; | |
799 | } | |
f6145903 | 800 | q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; |
b9b6b68e | 801 | err = xsk_init_queue(entries, q, false); |
77cd0d7b MK |
802 | if (!err && optname == XDP_TX_RING) |
803 | /* Tx needs to be explicitly woken up the first time */ | |
804 | xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; | |
b9b6b68e BT |
805 | mutex_unlock(&xs->mutex); |
806 | return err; | |
807 | } | |
c0c77d8f BT |
808 | case XDP_UMEM_REG: |
809 | { | |
c05cd364 KL |
810 | size_t mr_size = sizeof(struct xdp_umem_reg); |
811 | struct xdp_umem_reg mr = {}; | |
c0c77d8f BT |
812 | struct xdp_umem *umem; |
813 | ||
c05cd364 KL |
814 | if (optlen < sizeof(struct xdp_umem_reg_v1)) |
815 | return -EINVAL; | |
816 | else if (optlen < sizeof(mr)) | |
817 | mr_size = sizeof(struct xdp_umem_reg_v1); | |
818 | ||
819 | if (copy_from_user(&mr, optval, mr_size)) | |
c0c77d8f BT |
820 | return -EFAULT; |
821 | ||
822 | mutex_lock(&xs->mutex); | |
455302d1 | 823 | if (xs->state != XSK_READY || xs->umem) { |
a49049ea BT |
824 | mutex_unlock(&xs->mutex); |
825 | return -EBUSY; | |
826 | } | |
c0c77d8f | 827 | |
a49049ea BT |
828 | umem = xdp_umem_create(&mr); |
829 | if (IS_ERR(umem)) { | |
c0c77d8f | 830 | mutex_unlock(&xs->mutex); |
a49049ea | 831 | return PTR_ERR(umem); |
c0c77d8f BT |
832 | } |
833 | ||
834 | /* Make sure umem is ready before it can be seen by others */ | |
835 | smp_wmb(); | |
9764f4b3 | 836 | WRITE_ONCE(xs->umem, umem); |
c0c77d8f BT |
837 | mutex_unlock(&xs->mutex); |
838 | return 0; | |
839 | } | |
423f3832 | 840 | case XDP_UMEM_FILL_RING: |
fe230832 | 841 | case XDP_UMEM_COMPLETION_RING: |
423f3832 MK |
842 | { |
843 | struct xsk_queue **q; | |
844 | int entries; | |
845 | ||
423f3832 MK |
846 | if (copy_from_user(&entries, optval, sizeof(entries))) |
847 | return -EFAULT; | |
848 | ||
849 | mutex_lock(&xs->mutex); | |
455302d1 IM |
850 | if (xs->state != XSK_READY) { |
851 | mutex_unlock(&xs->mutex); | |
852 | return -EBUSY; | |
853 | } | |
a49049ea BT |
854 | if (!xs->umem) { |
855 | mutex_unlock(&xs->mutex); | |
856 | return -EINVAL; | |
857 | } | |
858 | ||
fe230832 MK |
859 | q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : |
860 | &xs->umem->cq; | |
b9b6b68e | 861 | err = xsk_init_queue(entries, q, true); |
423f3832 MK |
862 | mutex_unlock(&xs->mutex); |
863 | return err; | |
864 | } | |
c0c77d8f BT |
865 | default: |
866 | break; | |
867 | } | |
868 | ||
869 | return -ENOPROTOOPT; | |
870 | } | |
871 | ||
77cd0d7b MK |
872 | static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) |
873 | { | |
874 | ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); | |
875 | ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); | |
876 | ring->desc = offsetof(struct xdp_rxtx_ring, desc); | |
877 | } | |
878 | ||
879 | static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) | |
880 | { | |
881 | ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); | |
882 | ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); | |
883 | ring->desc = offsetof(struct xdp_umem_ring, desc); | |
884 | } | |
885 | ||
af75d9e0 MK |
886 | static int xsk_getsockopt(struct socket *sock, int level, int optname, |
887 | char __user *optval, int __user *optlen) | |
888 | { | |
889 | struct sock *sk = sock->sk; | |
890 | struct xdp_sock *xs = xdp_sk(sk); | |
891 | int len; | |
892 | ||
893 | if (level != SOL_XDP) | |
894 | return -ENOPROTOOPT; | |
895 | ||
896 | if (get_user(len, optlen)) | |
897 | return -EFAULT; | |
898 | if (len < 0) | |
899 | return -EINVAL; | |
900 | ||
901 | switch (optname) { | |
902 | case XDP_STATISTICS: | |
903 | { | |
904 | struct xdp_statistics stats; | |
905 | ||
906 | if (len < sizeof(stats)) | |
907 | return -EINVAL; | |
908 | ||
909 | mutex_lock(&xs->mutex); | |
910 | stats.rx_dropped = xs->rx_dropped; | |
911 | stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx); | |
912 | stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx); | |
913 | mutex_unlock(&xs->mutex); | |
914 | ||
915 | if (copy_to_user(optval, &stats, sizeof(stats))) | |
916 | return -EFAULT; | |
917 | if (put_user(sizeof(stats), optlen)) | |
918 | return -EFAULT; | |
919 | ||
920 | return 0; | |
921 | } | |
b3a9e0be BT |
922 | case XDP_MMAP_OFFSETS: |
923 | { | |
924 | struct xdp_mmap_offsets off; | |
77cd0d7b MK |
925 | struct xdp_mmap_offsets_v1 off_v1; |
926 | bool flags_supported = true; | |
927 | void *to_copy; | |
b3a9e0be | 928 | |
77cd0d7b | 929 | if (len < sizeof(off_v1)) |
b3a9e0be | 930 | return -EINVAL; |
77cd0d7b MK |
931 | else if (len < sizeof(off)) |
932 | flags_supported = false; | |
933 | ||
934 | if (flags_supported) { | |
935 | /* xdp_ring_offset is identical to xdp_ring_offset_v1 | |
936 | * except for the flags field added to the end. | |
937 | */ | |
938 | xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) | |
939 | &off.rx); | |
940 | xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) | |
941 | &off.tx); | |
942 | xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) | |
943 | &off.fr); | |
944 | xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) | |
945 | &off.cr); | |
946 | off.rx.flags = offsetof(struct xdp_rxtx_ring, | |
947 | ptrs.flags); | |
948 | off.tx.flags = offsetof(struct xdp_rxtx_ring, | |
949 | ptrs.flags); | |
950 | off.fr.flags = offsetof(struct xdp_umem_ring, | |
951 | ptrs.flags); | |
952 | off.cr.flags = offsetof(struct xdp_umem_ring, | |
953 | ptrs.flags); | |
954 | ||
955 | len = sizeof(off); | |
956 | to_copy = &off; | |
957 | } else { | |
958 | xsk_enter_rxtx_offsets(&off_v1.rx); | |
959 | xsk_enter_rxtx_offsets(&off_v1.tx); | |
960 | xsk_enter_umem_offsets(&off_v1.fr); | |
961 | xsk_enter_umem_offsets(&off_v1.cr); | |
962 | ||
963 | len = sizeof(off_v1); | |
964 | to_copy = &off_v1; | |
965 | } | |
b3a9e0be | 966 | |
77cd0d7b | 967 | if (copy_to_user(optval, to_copy, len)) |
b3a9e0be BT |
968 | return -EFAULT; |
969 | if (put_user(len, optlen)) | |
970 | return -EFAULT; | |
971 | ||
972 | return 0; | |
973 | } | |
2640d3c8 MM |
974 | case XDP_OPTIONS: |
975 | { | |
976 | struct xdp_options opts = {}; | |
977 | ||
978 | if (len < sizeof(opts)) | |
979 | return -EINVAL; | |
980 | ||
981 | mutex_lock(&xs->mutex); | |
982 | if (xs->zc) | |
983 | opts.flags |= XDP_OPTIONS_ZEROCOPY; | |
984 | mutex_unlock(&xs->mutex); | |
985 | ||
986 | len = sizeof(opts); | |
987 | if (copy_to_user(optval, &opts, len)) | |
988 | return -EFAULT; | |
989 | if (put_user(len, optlen)) | |
990 | return -EFAULT; | |
991 | ||
992 | return 0; | |
993 | } | |
af75d9e0 MK |
994 | default: |
995 | break; | |
996 | } | |
997 | ||
998 | return -EOPNOTSUPP; | |
999 | } | |
1000 | ||
423f3832 MK |
1001 | static int xsk_mmap(struct file *file, struct socket *sock, |
1002 | struct vm_area_struct *vma) | |
1003 | { | |
a5a16e43 | 1004 | loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT; |
423f3832 MK |
1005 | unsigned long size = vma->vm_end - vma->vm_start; |
1006 | struct xdp_sock *xs = xdp_sk(sock->sk); | |
1007 | struct xsk_queue *q = NULL; | |
37b07693 | 1008 | struct xdp_umem *umem; |
423f3832 MK |
1009 | unsigned long pfn; |
1010 | struct page *qpg; | |
1011 | ||
42fddcc7 | 1012 | if (READ_ONCE(xs->state) != XSK_READY) |
455302d1 IM |
1013 | return -EBUSY; |
1014 | ||
b9b6b68e | 1015 | if (offset == XDP_PGOFF_RX_RING) { |
37b07693 | 1016 | q = READ_ONCE(xs->rx); |
f6145903 | 1017 | } else if (offset == XDP_PGOFF_TX_RING) { |
37b07693 | 1018 | q = READ_ONCE(xs->tx); |
b9b6b68e | 1019 | } else { |
37b07693 BT |
1020 | umem = READ_ONCE(xs->umem); |
1021 | if (!umem) | |
b9b6b68e | 1022 | return -EINVAL; |
423f3832 | 1023 | |
e6762c8b MK |
1024 | /* Matches the smp_wmb() in XDP_UMEM_REG */ |
1025 | smp_rmb(); | |
b9b6b68e | 1026 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
37b07693 | 1027 | q = READ_ONCE(umem->fq); |
fe230832 | 1028 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
37b07693 | 1029 | q = READ_ONCE(umem->cq); |
b9b6b68e | 1030 | } |
423f3832 MK |
1031 | |
1032 | if (!q) | |
1033 | return -EINVAL; | |
1034 | ||
e6762c8b MK |
1035 | /* Matches the smp_wmb() in xsk_init_queue */ |
1036 | smp_rmb(); | |
423f3832 | 1037 | qpg = virt_to_head_page(q->ring); |
a50b854e | 1038 | if (size > page_size(qpg)) |
423f3832 MK |
1039 | return -EINVAL; |
1040 | ||
1041 | pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; | |
1042 | return remap_pfn_range(vma, vma->vm_start, pfn, | |
1043 | size, vma->vm_page_prot); | |
1044 | } | |
1045 | ||
455302d1 IM |
1046 | static int xsk_notifier(struct notifier_block *this, |
1047 | unsigned long msg, void *ptr) | |
1048 | { | |
1049 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | |
1050 | struct net *net = dev_net(dev); | |
1051 | struct sock *sk; | |
1052 | ||
1053 | switch (msg) { | |
1054 | case NETDEV_UNREGISTER: | |
1055 | mutex_lock(&net->xdp.lock); | |
1056 | sk_for_each(sk, &net->xdp.list) { | |
1057 | struct xdp_sock *xs = xdp_sk(sk); | |
1058 | ||
1059 | mutex_lock(&xs->mutex); | |
1060 | if (xs->dev == dev) { | |
1061 | sk->sk_err = ENETDOWN; | |
1062 | if (!sock_flag(sk, SOCK_DEAD)) | |
1063 | sk->sk_error_report(sk); | |
1064 | ||
1065 | xsk_unbind_dev(xs); | |
1066 | ||
1067 | /* Clear device references in umem. */ | |
1068 | xdp_umem_clear_dev(xs->umem); | |
1069 | } | |
1070 | mutex_unlock(&xs->mutex); | |
1071 | } | |
1072 | mutex_unlock(&net->xdp.lock); | |
1073 | break; | |
1074 | } | |
1075 | return NOTIFY_DONE; | |
1076 | } | |
1077 | ||
c0c77d8f BT |
1078 | static struct proto xsk_proto = { |
1079 | .name = "XDP", | |
1080 | .owner = THIS_MODULE, | |
1081 | .obj_size = sizeof(struct xdp_sock), | |
1082 | }; | |
1083 | ||
1084 | static const struct proto_ops xsk_proto_ops = { | |
c2f4374b BT |
1085 | .family = PF_XDP, |
1086 | .owner = THIS_MODULE, | |
1087 | .release = xsk_release, | |
1088 | .bind = xsk_bind, | |
1089 | .connect = sock_no_connect, | |
1090 | .socketpair = sock_no_socketpair, | |
1091 | .accept = sock_no_accept, | |
1092 | .getname = sock_no_getname, | |
a11e1d43 | 1093 | .poll = xsk_poll, |
c2f4374b BT |
1094 | .ioctl = sock_no_ioctl, |
1095 | .listen = sock_no_listen, | |
1096 | .shutdown = sock_no_shutdown, | |
1097 | .setsockopt = xsk_setsockopt, | |
1098 | .getsockopt = xsk_getsockopt, | |
1099 | .sendmsg = xsk_sendmsg, | |
1100 | .recvmsg = sock_no_recvmsg, | |
1101 | .mmap = xsk_mmap, | |
1102 | .sendpage = sock_no_sendpage, | |
c0c77d8f BT |
1103 | }; |
1104 | ||
11fe9262 BT |
1105 | static void xsk_destruct(struct sock *sk) |
1106 | { | |
1107 | struct xdp_sock *xs = xdp_sk(sk); | |
1108 | ||
1109 | if (!sock_flag(sk, SOCK_DEAD)) | |
1110 | return; | |
1111 | ||
1112 | xdp_put_umem(xs->umem); | |
1113 | ||
1114 | sk_refcnt_debug_dec(sk); | |
1115 | } | |
1116 | ||
c0c77d8f BT |
1117 | static int xsk_create(struct net *net, struct socket *sock, int protocol, |
1118 | int kern) | |
1119 | { | |
1120 | struct sock *sk; | |
1121 | struct xdp_sock *xs; | |
1122 | ||
1123 | if (!ns_capable(net->user_ns, CAP_NET_RAW)) | |
1124 | return -EPERM; | |
1125 | if (sock->type != SOCK_RAW) | |
1126 | return -ESOCKTNOSUPPORT; | |
1127 | ||
1128 | if (protocol) | |
1129 | return -EPROTONOSUPPORT; | |
1130 | ||
1131 | sock->state = SS_UNCONNECTED; | |
1132 | ||
1133 | sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern); | |
1134 | if (!sk) | |
1135 | return -ENOBUFS; | |
1136 | ||
1137 | sock->ops = &xsk_proto_ops; | |
1138 | ||
1139 | sock_init_data(sock, sk); | |
1140 | ||
1141 | sk->sk_family = PF_XDP; | |
1142 | ||
11fe9262 BT |
1143 | sk->sk_destruct = xsk_destruct; |
1144 | sk_refcnt_debug_inc(sk); | |
1145 | ||
cee27167 BT |
1146 | sock_set_flag(sk, SOCK_RCU_FREE); |
1147 | ||
c0c77d8f | 1148 | xs = xdp_sk(sk); |
455302d1 | 1149 | xs->state = XSK_READY; |
c0c77d8f | 1150 | mutex_init(&xs->mutex); |
bf0bdd13 | 1151 | spin_lock_init(&xs->rx_lock); |
a9744f7c | 1152 | spin_lock_init(&xs->tx_completion_lock); |
c0c77d8f | 1153 | |
0402acd6 BT |
1154 | INIT_LIST_HEAD(&xs->map_list); |
1155 | spin_lock_init(&xs->map_list_lock); | |
1156 | ||
1d0dc069 BT |
1157 | mutex_lock(&net->xdp.lock); |
1158 | sk_add_node_rcu(sk, &net->xdp.list); | |
1159 | mutex_unlock(&net->xdp.lock); | |
1160 | ||
c0c77d8f BT |
1161 | local_bh_disable(); |
1162 | sock_prot_inuse_add(net, &xsk_proto, 1); | |
1163 | local_bh_enable(); | |
1164 | ||
1165 | return 0; | |
1166 | } | |
1167 | ||
1168 | static const struct net_proto_family xsk_family_ops = { | |
1169 | .family = PF_XDP, | |
1170 | .create = xsk_create, | |
1171 | .owner = THIS_MODULE, | |
1172 | }; | |
1173 | ||
455302d1 IM |
1174 | static struct notifier_block xsk_netdev_notifier = { |
1175 | .notifier_call = xsk_notifier, | |
1176 | }; | |
1177 | ||
1d0dc069 BT |
1178 | static int __net_init xsk_net_init(struct net *net) |
1179 | { | |
1180 | mutex_init(&net->xdp.lock); | |
1181 | INIT_HLIST_HEAD(&net->xdp.list); | |
1182 | return 0; | |
1183 | } | |
1184 | ||
1185 | static void __net_exit xsk_net_exit(struct net *net) | |
1186 | { | |
1187 | WARN_ON_ONCE(!hlist_empty(&net->xdp.list)); | |
1188 | } | |
1189 | ||
1190 | static struct pernet_operations xsk_net_ops = { | |
1191 | .init = xsk_net_init, | |
1192 | .exit = xsk_net_exit, | |
1193 | }; | |
1194 | ||
c0c77d8f BT |
1195 | static int __init xsk_init(void) |
1196 | { | |
e312b9e7 | 1197 | int err, cpu; |
c0c77d8f BT |
1198 | |
1199 | err = proto_register(&xsk_proto, 0 /* no slab */); | |
1200 | if (err) | |
1201 | goto out; | |
1202 | ||
1203 | err = sock_register(&xsk_family_ops); | |
1204 | if (err) | |
1205 | goto out_proto; | |
1206 | ||
1d0dc069 BT |
1207 | err = register_pernet_subsys(&xsk_net_ops); |
1208 | if (err) | |
1209 | goto out_sk; | |
455302d1 IM |
1210 | |
1211 | err = register_netdevice_notifier(&xsk_netdev_notifier); | |
1212 | if (err) | |
1213 | goto out_pernet; | |
1214 | ||
e312b9e7 BT |
1215 | for_each_possible_cpu(cpu) |
1216 | INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); | |
c0c77d8f BT |
1217 | return 0; |
1218 | ||
455302d1 IM |
1219 | out_pernet: |
1220 | unregister_pernet_subsys(&xsk_net_ops); | |
1d0dc069 BT |
1221 | out_sk: |
1222 | sock_unregister(PF_XDP); | |
c0c77d8f BT |
1223 | out_proto: |
1224 | proto_unregister(&xsk_proto); | |
1225 | out: | |
1226 | return err; | |
1227 | } | |
1228 | ||
1229 | fs_initcall(xsk_init); |