net: Convert protocol error handlers from void to int
[linux-2.6-block.git] / net / ipv4 / tcp_bpf.c
CommitLineData
604326b4
DB
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/filter.h>
6#include <linux/bpf.h>
7#include <linux/init.h>
8#include <linux/wait.h>
9
10#include <net/inet_common.h>
11
12static bool tcp_bpf_stream_read(const struct sock *sk)
13{
14 struct sk_psock *psock;
15 bool empty = true;
16
17 rcu_read_lock();
18 psock = sk_psock(sk);
19 if (likely(psock))
20 empty = list_empty(&psock->ingress_msg);
21 rcu_read_unlock();
22 return !empty;
23}
24
25static int tcp_bpf_wait_data(struct sock *sk, struct sk_psock *psock,
26 int flags, long timeo, int *err)
27{
28 DEFINE_WAIT_FUNC(wait, woken_wake_function);
29 int ret;
30
31 add_wait_queue(sk_sleep(sk), &wait);
32 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
33 ret = sk_wait_event(sk, &timeo,
34 !list_empty(&psock->ingress_msg) ||
35 !skb_queue_empty(&sk->sk_receive_queue), &wait);
36 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
37 remove_wait_queue(sk_sleep(sk), &wait);
38 return ret;
39}
40
41int __tcp_bpf_recvmsg(struct sock *sk, struct sk_psock *psock,
02c558b2 42 struct msghdr *msg, int len, int flags)
604326b4
DB
43{
44 struct iov_iter *iter = &msg->msg_iter;
02c558b2 45 int peek = flags & MSG_PEEK;
604326b4 46 int i, ret, copied = 0;
02c558b2
JF
47 struct sk_msg *msg_rx;
48
49 msg_rx = list_first_entry_or_null(&psock->ingress_msg,
50 struct sk_msg, list);
604326b4
DB
51
52 while (copied != len) {
53 struct scatterlist *sge;
604326b4 54
604326b4
DB
55 if (unlikely(!msg_rx))
56 break;
57
58 i = msg_rx->sg.start;
59 do {
60 struct page *page;
61 int copy;
62
63 sge = sk_msg_elem(msg_rx, i);
64 copy = sge->length;
65 page = sg_page(sge);
66 if (copied + copy > len)
67 copy = len - copied;
68 ret = copy_page_to_iter(page, sge->offset, copy, iter);
69 if (ret != copy) {
70 msg_rx->sg.start = i;
71 return -EFAULT;
72 }
73
74 copied += copy;
02c558b2
JF
75 if (likely(!peek)) {
76 sge->offset += copy;
77 sge->length -= copy;
78 sk_mem_uncharge(sk, copy);
79 msg_rx->sg.size -= copy;
80
81 if (!sge->length) {
82 sk_msg_iter_var_next(i);
83 if (!msg_rx->skb)
84 put_page(page);
85 }
86 } else {
87 sk_msg_iter_var_next(i);
604326b4
DB
88 }
89
90 if (copied == len)
91 break;
92 } while (i != msg_rx->sg.end);
93
02c558b2
JF
94 if (unlikely(peek)) {
95 msg_rx = list_next_entry(msg_rx, list);
96 continue;
97 }
98
604326b4
DB
99 msg_rx->sg.start = i;
100 if (!sge->length && msg_rx->sg.start == msg_rx->sg.end) {
101 list_del(&msg_rx->list);
102 if (msg_rx->skb)
103 consume_skb(msg_rx->skb);
104 kfree(msg_rx);
105 }
02c558b2
JF
106 msg_rx = list_first_entry_or_null(&psock->ingress_msg,
107 struct sk_msg, list);
604326b4
DB
108 }
109
110 return copied;
111}
112EXPORT_SYMBOL_GPL(__tcp_bpf_recvmsg);
113
114int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
115 int nonblock, int flags, int *addr_len)
116{
117 struct sk_psock *psock;
118 int copied, ret;
119
120 if (unlikely(flags & MSG_ERRQUEUE))
121 return inet_recv_error(sk, msg, len, addr_len);
122 if (!skb_queue_empty(&sk->sk_receive_queue))
123 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
124
125 psock = sk_psock_get(sk);
126 if (unlikely(!psock))
127 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
128 lock_sock(sk);
129msg_bytes_ready:
02c558b2 130 copied = __tcp_bpf_recvmsg(sk, psock, msg, len, flags);
604326b4
DB
131 if (!copied) {
132 int data, err = 0;
133 long timeo;
134
135 timeo = sock_rcvtimeo(sk, nonblock);
136 data = tcp_bpf_wait_data(sk, psock, flags, timeo, &err);
137 if (data) {
138 if (skb_queue_empty(&sk->sk_receive_queue))
139 goto msg_bytes_ready;
140 release_sock(sk);
141 sk_psock_put(sk, psock);
142 return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
143 }
144 if (err) {
145 ret = err;
146 goto out;
147 }
27b31e68 148 copied = -EAGAIN;
604326b4
DB
149 }
150 ret = copied;
151out:
152 release_sock(sk);
153 sk_psock_put(sk, psock);
154 return ret;
155}
156
157static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
158 struct sk_msg *msg, u32 apply_bytes, int flags)
159{
160 bool apply = apply_bytes;
161 struct scatterlist *sge;
162 u32 size, copied = 0;
163 struct sk_msg *tmp;
164 int i, ret = 0;
165
166 tmp = kzalloc(sizeof(*tmp), __GFP_NOWARN | GFP_KERNEL);
167 if (unlikely(!tmp))
168 return -ENOMEM;
169
170 lock_sock(sk);
171 tmp->sg.start = msg->sg.start;
172 i = msg->sg.start;
173 do {
174 sge = sk_msg_elem(msg, i);
175 size = (apply && apply_bytes < sge->length) ?
176 apply_bytes : sge->length;
177 if (!sk_wmem_schedule(sk, size)) {
178 if (!copied)
179 ret = -ENOMEM;
180 break;
181 }
182
183 sk_mem_charge(sk, size);
184 sk_msg_xfer(tmp, msg, i, size);
185 copied += size;
186 if (sge->length)
187 get_page(sk_msg_page(tmp, i));
188 sk_msg_iter_var_next(i);
189 tmp->sg.end = i;
190 if (apply) {
191 apply_bytes -= size;
192 if (!apply_bytes)
193 break;
194 }
195 } while (i != msg->sg.end);
196
197 if (!ret) {
198 msg->sg.start = i;
199 msg->sg.size -= apply_bytes;
200 sk_psock_queue_msg(psock, tmp);
201 sk->sk_data_ready(sk);
202 } else {
203 sk_msg_free(sk, tmp);
204 kfree(tmp);
205 }
206
207 release_sock(sk);
208 return ret;
209}
210
211static int tcp_bpf_push(struct sock *sk, struct sk_msg *msg, u32 apply_bytes,
212 int flags, bool uncharge)
213{
214 bool apply = apply_bytes;
215 struct scatterlist *sge;
216 struct page *page;
217 int size, ret = 0;
218 u32 off;
219
220 while (1) {
221 sge = sk_msg_elem(msg, msg->sg.start);
222 size = (apply && apply_bytes < sge->length) ?
223 apply_bytes : sge->length;
224 off = sge->offset;
225 page = sg_page(sge);
226
227 tcp_rate_check_app_limited(sk);
228retry:
229 ret = do_tcp_sendpages(sk, page, off, size, flags);
230 if (ret <= 0)
231 return ret;
232 if (apply)
233 apply_bytes -= ret;
234 msg->sg.size -= ret;
235 sge->offset += ret;
236 sge->length -= ret;
237 if (uncharge)
238 sk_mem_uncharge(sk, ret);
239 if (ret != size) {
240 size -= ret;
241 off += ret;
242 goto retry;
243 }
244 if (!sge->length) {
245 put_page(page);
246 sk_msg_iter_next(msg, start);
247 sg_init_table(sge, 1);
248 if (msg->sg.start == msg->sg.end)
249 break;
250 }
251 if (apply && !apply_bytes)
252 break;
253 }
254
255 return 0;
256}
257
258static int tcp_bpf_push_locked(struct sock *sk, struct sk_msg *msg,
259 u32 apply_bytes, int flags, bool uncharge)
260{
261 int ret;
262
263 lock_sock(sk);
264 ret = tcp_bpf_push(sk, msg, apply_bytes, flags, uncharge);
265 release_sock(sk);
266 return ret;
267}
268
269int tcp_bpf_sendmsg_redir(struct sock *sk, struct sk_msg *msg,
270 u32 bytes, int flags)
271{
272 bool ingress = sk_msg_to_ingress(msg);
273 struct sk_psock *psock = sk_psock_get(sk);
274 int ret;
275
276 if (unlikely(!psock)) {
277 sk_msg_free(sk, msg);
278 return 0;
279 }
280 ret = ingress ? bpf_tcp_ingress(sk, psock, msg, bytes, flags) :
281 tcp_bpf_push_locked(sk, msg, bytes, flags, false);
282 sk_psock_put(sk, psock);
283 return ret;
284}
285EXPORT_SYMBOL_GPL(tcp_bpf_sendmsg_redir);
286
287static int tcp_bpf_send_verdict(struct sock *sk, struct sk_psock *psock,
288 struct sk_msg *msg, int *copied, int flags)
289{
290 bool cork = false, enospc = msg->sg.start == msg->sg.end;
291 struct sock *sk_redir;
292 u32 tosend;
293 int ret;
294
295more_data:
296 if (psock->eval == __SK_NONE)
297 psock->eval = sk_psock_msg_verdict(sk, psock, msg);
298
299 if (msg->cork_bytes &&
300 msg->cork_bytes > msg->sg.size && !enospc) {
301 psock->cork_bytes = msg->cork_bytes - msg->sg.size;
302 if (!psock->cork) {
303 psock->cork = kzalloc(sizeof(*psock->cork),
304 GFP_ATOMIC | __GFP_NOWARN);
305 if (!psock->cork)
306 return -ENOMEM;
307 }
308 memcpy(psock->cork, msg, sizeof(*msg));
309 return 0;
310 }
311
312 tosend = msg->sg.size;
313 if (psock->apply_bytes && psock->apply_bytes < tosend)
314 tosend = psock->apply_bytes;
315
316 switch (psock->eval) {
317 case __SK_PASS:
318 ret = tcp_bpf_push(sk, msg, tosend, flags, true);
319 if (unlikely(ret)) {
320 *copied -= sk_msg_free(sk, msg);
321 break;
322 }
323 sk_msg_apply_bytes(psock, tosend);
324 break;
325 case __SK_REDIRECT:
326 sk_redir = psock->sk_redir;
327 sk_msg_apply_bytes(psock, tosend);
328 if (psock->cork) {
329 cork = true;
330 psock->cork = NULL;
331 }
332 sk_msg_return(sk, msg, tosend);
333 release_sock(sk);
334 ret = tcp_bpf_sendmsg_redir(sk_redir, msg, tosend, flags);
335 lock_sock(sk);
336 if (unlikely(ret < 0)) {
337 int free = sk_msg_free_nocharge(sk, msg);
338
339 if (!cork)
340 *copied -= free;
341 }
342 if (cork) {
343 sk_msg_free(sk, msg);
344 kfree(msg);
345 msg = NULL;
346 ret = 0;
347 }
348 break;
349 case __SK_DROP:
350 default:
351 sk_msg_free_partial(sk, msg, tosend);
352 sk_msg_apply_bytes(psock, tosend);
353 *copied -= tosend;
354 return -EACCES;
355 }
356
357 if (likely(!ret)) {
358 if (!psock->apply_bytes) {
359 psock->eval = __SK_NONE;
360 if (psock->sk_redir) {
361 sock_put(psock->sk_redir);
362 psock->sk_redir = NULL;
363 }
364 }
365 if (msg &&
366 msg->sg.data[msg->sg.start].page_link &&
367 msg->sg.data[msg->sg.start].length)
368 goto more_data;
369 }
370 return ret;
371}
372
373static int tcp_bpf_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
374{
375 struct sk_msg tmp, *msg_tx = NULL;
376 int flags = msg->msg_flags | MSG_NO_SHARED_FRAGS;
377 int copied = 0, err = 0;
378 struct sk_psock *psock;
379 long timeo;
380
381 psock = sk_psock_get(sk);
382 if (unlikely(!psock))
383 return tcp_sendmsg(sk, msg, size);
384
385 lock_sock(sk);
386 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
387 while (msg_data_left(msg)) {
388 bool enospc = false;
389 u32 copy, osize;
390
391 if (sk->sk_err) {
392 err = -sk->sk_err;
393 goto out_err;
394 }
395
396 copy = msg_data_left(msg);
397 if (!sk_stream_memory_free(sk))
398 goto wait_for_sndbuf;
399 if (psock->cork) {
400 msg_tx = psock->cork;
401 } else {
402 msg_tx = &tmp;
403 sk_msg_init(msg_tx);
404 }
405
406 osize = msg_tx->sg.size;
407 err = sk_msg_alloc(sk, msg_tx, msg_tx->sg.size + copy, msg_tx->sg.end - 1);
408 if (err) {
409 if (err != -ENOSPC)
410 goto wait_for_memory;
411 enospc = true;
412 copy = msg_tx->sg.size - osize;
413 }
414
415 err = sk_msg_memcopy_from_iter(sk, &msg->msg_iter, msg_tx,
416 copy);
417 if (err < 0) {
418 sk_msg_trim(sk, msg_tx, osize);
419 goto out_err;
420 }
421
422 copied += copy;
423 if (psock->cork_bytes) {
424 if (size > psock->cork_bytes)
425 psock->cork_bytes = 0;
426 else
427 psock->cork_bytes -= size;
428 if (psock->cork_bytes && !enospc)
429 goto out_err;
430 /* All cork bytes are accounted, rerun the prog. */
431 psock->eval = __SK_NONE;
432 psock->cork_bytes = 0;
433 }
434
435 err = tcp_bpf_send_verdict(sk, psock, msg_tx, &copied, flags);
436 if (unlikely(err < 0))
437 goto out_err;
438 continue;
439wait_for_sndbuf:
440 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
441wait_for_memory:
442 err = sk_stream_wait_memory(sk, &timeo);
443 if (err) {
444 if (msg_tx && msg_tx != psock->cork)
445 sk_msg_free(sk, msg_tx);
446 goto out_err;
447 }
448 }
449out_err:
450 if (err < 0)
451 err = sk_stream_error(sk, msg->msg_flags, err);
452 release_sock(sk);
453 sk_psock_put(sk, psock);
454 return copied ? copied : err;
455}
456
457static int tcp_bpf_sendpage(struct sock *sk, struct page *page, int offset,
458 size_t size, int flags)
459{
460 struct sk_msg tmp, *msg = NULL;
461 int err = 0, copied = 0;
462 struct sk_psock *psock;
463 bool enospc = false;
464
465 psock = sk_psock_get(sk);
466 if (unlikely(!psock))
467 return tcp_sendpage(sk, page, offset, size, flags);
468
469 lock_sock(sk);
470 if (psock->cork) {
471 msg = psock->cork;
472 } else {
473 msg = &tmp;
474 sk_msg_init(msg);
475 }
476
477 /* Catch case where ring is full and sendpage is stalled. */
478 if (unlikely(sk_msg_full(msg)))
479 goto out_err;
480
481 sk_msg_page_add(msg, page, size, offset);
482 sk_mem_charge(sk, size);
483 copied = size;
484 if (sk_msg_full(msg))
485 enospc = true;
486 if (psock->cork_bytes) {
487 if (size > psock->cork_bytes)
488 psock->cork_bytes = 0;
489 else
490 psock->cork_bytes -= size;
491 if (psock->cork_bytes && !enospc)
492 goto out_err;
493 /* All cork bytes are accounted, rerun the prog. */
494 psock->eval = __SK_NONE;
495 psock->cork_bytes = 0;
496 }
497
498 err = tcp_bpf_send_verdict(sk, psock, msg, &copied, flags);
499out_err:
500 release_sock(sk);
501 sk_psock_put(sk, psock);
502 return copied ? copied : err;
503}
504
505static void tcp_bpf_remove(struct sock *sk, struct sk_psock *psock)
506{
507 struct sk_psock_link *link;
508
509 sk_psock_cork_free(psock);
510 __sk_psock_purge_ingress_msg(psock);
511 while ((link = sk_psock_link_pop(psock))) {
512 sk_psock_unlink(sk, link);
513 sk_psock_free_link(link);
514 }
515}
516
517static void tcp_bpf_unhash(struct sock *sk)
518{
519 void (*saved_unhash)(struct sock *sk);
520 struct sk_psock *psock;
521
522 rcu_read_lock();
523 psock = sk_psock(sk);
524 if (unlikely(!psock)) {
525 rcu_read_unlock();
526 if (sk->sk_prot->unhash)
527 sk->sk_prot->unhash(sk);
528 return;
529 }
530
531 saved_unhash = psock->saved_unhash;
532 tcp_bpf_remove(sk, psock);
533 rcu_read_unlock();
534 saved_unhash(sk);
535}
536
537static void tcp_bpf_close(struct sock *sk, long timeout)
538{
539 void (*saved_close)(struct sock *sk, long timeout);
540 struct sk_psock *psock;
541
542 lock_sock(sk);
543 rcu_read_lock();
544 psock = sk_psock(sk);
545 if (unlikely(!psock)) {
546 rcu_read_unlock();
547 release_sock(sk);
548 return sk->sk_prot->close(sk, timeout);
549 }
550
551 saved_close = psock->saved_close;
552 tcp_bpf_remove(sk, psock);
553 rcu_read_unlock();
554 release_sock(sk);
555 saved_close(sk, timeout);
556}
557
558enum {
559 TCP_BPF_IPV4,
560 TCP_BPF_IPV6,
561 TCP_BPF_NUM_PROTS,
562};
563
564enum {
565 TCP_BPF_BASE,
566 TCP_BPF_TX,
567 TCP_BPF_NUM_CFGS,
568};
569
570static struct proto *tcpv6_prot_saved __read_mostly;
571static DEFINE_SPINLOCK(tcpv6_prot_lock);
572static struct proto tcp_bpf_prots[TCP_BPF_NUM_PROTS][TCP_BPF_NUM_CFGS];
573
574static void tcp_bpf_rebuild_protos(struct proto prot[TCP_BPF_NUM_CFGS],
575 struct proto *base)
576{
577 prot[TCP_BPF_BASE] = *base;
578 prot[TCP_BPF_BASE].unhash = tcp_bpf_unhash;
579 prot[TCP_BPF_BASE].close = tcp_bpf_close;
580 prot[TCP_BPF_BASE].recvmsg = tcp_bpf_recvmsg;
581 prot[TCP_BPF_BASE].stream_memory_read = tcp_bpf_stream_read;
582
583 prot[TCP_BPF_TX] = prot[TCP_BPF_BASE];
584 prot[TCP_BPF_TX].sendmsg = tcp_bpf_sendmsg;
585 prot[TCP_BPF_TX].sendpage = tcp_bpf_sendpage;
586}
587
588static void tcp_bpf_check_v6_needs_rebuild(struct sock *sk, struct proto *ops)
589{
590 if (sk->sk_family == AF_INET6 &&
591 unlikely(ops != smp_load_acquire(&tcpv6_prot_saved))) {
592 spin_lock_bh(&tcpv6_prot_lock);
593 if (likely(ops != tcpv6_prot_saved)) {
594 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV6], ops);
595 smp_store_release(&tcpv6_prot_saved, ops);
596 }
597 spin_unlock_bh(&tcpv6_prot_lock);
598 }
599}
600
601static int __init tcp_bpf_v4_build_proto(void)
602{
603 tcp_bpf_rebuild_protos(tcp_bpf_prots[TCP_BPF_IPV4], &tcp_prot);
604 return 0;
605}
606core_initcall(tcp_bpf_v4_build_proto);
607
608static void tcp_bpf_update_sk_prot(struct sock *sk, struct sk_psock *psock)
609{
610 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
611 int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
612
613 sk_psock_update_proto(sk, psock, &tcp_bpf_prots[family][config]);
614}
615
616static void tcp_bpf_reinit_sk_prot(struct sock *sk, struct sk_psock *psock)
617{
618 int family = sk->sk_family == AF_INET6 ? TCP_BPF_IPV6 : TCP_BPF_IPV4;
619 int config = psock->progs.msg_parser ? TCP_BPF_TX : TCP_BPF_BASE;
620
621 /* Reinit occurs when program types change e.g. TCP_BPF_TX is removed
622 * or added requiring sk_prot hook updates. We keep original saved
623 * hooks in this case.
624 */
625 sk->sk_prot = &tcp_bpf_prots[family][config];
626}
627
628static int tcp_bpf_assert_proto_ops(struct proto *ops)
629{
630 /* In order to avoid retpoline, we make assumptions when we call
631 * into ops if e.g. a psock is not present. Make sure they are
632 * indeed valid assumptions.
633 */
634 return ops->recvmsg == tcp_recvmsg &&
635 ops->sendmsg == tcp_sendmsg &&
636 ops->sendpage == tcp_sendpage ? 0 : -ENOTSUPP;
637}
638
639void tcp_bpf_reinit(struct sock *sk)
640{
641 struct sk_psock *psock;
642
643 sock_owned_by_me(sk);
644
645 rcu_read_lock();
646 psock = sk_psock(sk);
647 tcp_bpf_reinit_sk_prot(sk, psock);
648 rcu_read_unlock();
649}
650
651int tcp_bpf_init(struct sock *sk)
652{
653 struct proto *ops = READ_ONCE(sk->sk_prot);
654 struct sk_psock *psock;
655
656 sock_owned_by_me(sk);
657
658 rcu_read_lock();
659 psock = sk_psock(sk);
660 if (unlikely(!psock || psock->sk_proto ||
661 tcp_bpf_assert_proto_ops(ops))) {
662 rcu_read_unlock();
663 return -EINVAL;
664 }
665 tcp_bpf_check_v6_needs_rebuild(sk, ops);
666 tcp_bpf_update_sk_prot(sk, psock);
667 rcu_read_unlock();
668 return 0;
669}