selftests/bpf: Add wait send memory test for sockmap redirect
[linux-2.6-block.git] / net / core / skmsg.c
CommitLineData
604326b4
DB
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
e91de6af 10#include <net/tls.h>
604326b4
DB
11
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
9c34e38c 30 u32 osize = msg->sg.size;
604326b4
DB
31 int ret = 0;
32
33 len -= msg->sg.size;
34 while (len > 0) {
35 struct scatterlist *sge;
36 u32 orig_offset;
37 int use, i;
38
9c34e38c
WY
39 if (!sk_page_frag_refill(sk, pfrag)) {
40 ret = -ENOMEM;
41 goto msg_trim;
42 }
604326b4
DB
43
44 orig_offset = pfrag->offset;
45 use = min_t(int, len, pfrag->size - orig_offset);
9c34e38c
WY
46 if (!sk_wmem_schedule(sk, use)) {
47 ret = -ENOMEM;
48 goto msg_trim;
49 }
604326b4
DB
50
51 i = msg->sg.end;
52 sk_msg_iter_var_prev(i);
53 sge = &msg->sg.data[i];
54
55 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
56 sg_page(sge) == pfrag->page &&
57 sge->offset + sge->length == orig_offset) {
58 sge->length += use;
59 } else {
60 if (sk_msg_full(msg)) {
61 ret = -ENOSPC;
62 break;
63 }
64
65 sge = &msg->sg.data[msg->sg.end];
66 sg_unmark_end(sge);
67 sg_set_page(sge, pfrag->page, use, orig_offset);
68 get_page(pfrag->page);
69 sk_msg_iter_next(msg, end);
70 }
71
72 sk_mem_charge(sk, use);
73 msg->sg.size += use;
74 pfrag->offset += use;
75 len -= use;
76 }
77
78 return ret;
9c34e38c
WY
79
80msg_trim:
81 sk_msg_trim(sk, msg, osize);
82 return ret;
604326b4
DB
83}
84EXPORT_SYMBOL_GPL(sk_msg_alloc);
85
d829e9c4
DB
86int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
87 u32 off, u32 len)
88{
89 int i = src->sg.start;
90 struct scatterlist *sge = sk_msg_elem(src, i);
fda497e5 91 struct scatterlist *sgd = NULL;
d829e9c4
DB
92 u32 sge_len, sge_off;
93
d829e9c4
DB
94 while (off) {
95 if (sge->length > off)
96 break;
97 off -= sge->length;
98 sk_msg_iter_var_next(i);
99 if (i == src->sg.end && off)
100 return -ENOSPC;
101 sge = sk_msg_elem(src, i);
102 }
103
104 while (len) {
105 sge_len = sge->length - off;
d829e9c4
DB
106 if (sge_len > len)
107 sge_len = len;
fda497e5
VG
108
109 if (dst->sg.end)
110 sgd = sk_msg_elem(dst, dst->sg.end - 1);
111
112 if (sgd &&
113 (sg_page(sge) == sg_page(sgd)) &&
114 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
115 sgd->length += sge_len;
116 dst->sg.size += sge_len;
117 } else if (!sk_msg_full(dst)) {
118 sge_off = sge->offset + off;
119 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
120 } else {
121 return -ENOSPC;
122 }
123
d829e9c4
DB
124 off = 0;
125 len -= sge_len;
d829e9c4
DB
126 sk_mem_charge(sk, sge_len);
127 sk_msg_iter_var_next(i);
128 if (i == src->sg.end && len)
129 return -ENOSPC;
130 sge = sk_msg_elem(src, i);
131 }
132
133 return 0;
134}
135EXPORT_SYMBOL_GPL(sk_msg_clone);
136
604326b4
DB
137void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
138{
139 int i = msg->sg.start;
140
141 do {
142 struct scatterlist *sge = sk_msg_elem(msg, i);
143
144 if (bytes < sge->length) {
145 sge->length -= bytes;
146 sge->offset += bytes;
147 sk_mem_uncharge(sk, bytes);
148 break;
149 }
150
151 sk_mem_uncharge(sk, sge->length);
152 bytes -= sge->length;
153 sge->length = 0;
154 sge->offset = 0;
155 sk_msg_iter_var_next(i);
156 } while (bytes && i != msg->sg.end);
157 msg->sg.start = i;
158}
159EXPORT_SYMBOL_GPL(sk_msg_return_zero);
160
161void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
162{
163 int i = msg->sg.start;
164
165 do {
166 struct scatterlist *sge = &msg->sg.data[i];
167 int uncharge = (bytes < sge->length) ? bytes : sge->length;
168
169 sk_mem_uncharge(sk, uncharge);
170 bytes -= uncharge;
171 sk_msg_iter_var_next(i);
172 } while (i != msg->sg.end);
173}
174EXPORT_SYMBOL_GPL(sk_msg_return);
175
176static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
177 bool charge)
178{
179 struct scatterlist *sge = sk_msg_elem(msg, i);
180 u32 len = sge->length;
181
36cd0e69
JF
182 /* When the skb owns the memory we free it from consume_skb path. */
183 if (!msg->skb) {
184 if (charge)
185 sk_mem_uncharge(sk, len);
604326b4 186 put_page(sg_page(sge));
36cd0e69 187 }
604326b4
DB
188 memset(sge, 0, sizeof(*sge));
189 return len;
190}
191
192static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
193 bool charge)
194{
195 struct scatterlist *sge = sk_msg_elem(msg, i);
196 int freed = 0;
197
198 while (msg->sg.size) {
199 msg->sg.size -= sge->length;
200 freed += sk_msg_free_elem(sk, msg, i, charge);
201 sk_msg_iter_var_next(i);
202 sk_msg_check_to_free(msg, i, msg->sg.size);
203 sge = sk_msg_elem(msg, i);
204 }
dd016aca 205 consume_skb(msg->skb);
604326b4
DB
206 sk_msg_init(msg);
207 return freed;
208}
209
210int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
211{
212 return __sk_msg_free(sk, msg, msg->sg.start, false);
213}
214EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
215
216int sk_msg_free(struct sock *sk, struct sk_msg *msg)
217{
218 return __sk_msg_free(sk, msg, msg->sg.start, true);
219}
220EXPORT_SYMBOL_GPL(sk_msg_free);
221
222static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
223 u32 bytes, bool charge)
224{
225 struct scatterlist *sge;
226 u32 i = msg->sg.start;
227
228 while (bytes) {
229 sge = sk_msg_elem(msg, i);
230 if (!sge->length)
231 break;
232 if (bytes < sge->length) {
233 if (charge)
234 sk_mem_uncharge(sk, bytes);
235 sge->length -= bytes;
236 sge->offset += bytes;
237 msg->sg.size -= bytes;
238 break;
239 }
240
241 msg->sg.size -= sge->length;
242 bytes -= sge->length;
243 sk_msg_free_elem(sk, msg, i, charge);
244 sk_msg_iter_var_next(i);
245 sk_msg_check_to_free(msg, i, bytes);
246 }
247 msg->sg.start = i;
248}
249
250void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
251{
252 __sk_msg_free_partial(sk, msg, bytes, true);
253}
254EXPORT_SYMBOL_GPL(sk_msg_free_partial);
255
256void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
257 u32 bytes)
258{
259 __sk_msg_free_partial(sk, msg, bytes, false);
260}
261
262void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
263{
264 int trim = msg->sg.size - len;
265 u32 i = msg->sg.end;
266
267 if (trim <= 0) {
268 WARN_ON(trim < 0);
269 return;
270 }
271
272 sk_msg_iter_var_prev(i);
273 msg->sg.size = len;
274 while (msg->sg.data[i].length &&
275 trim >= msg->sg.data[i].length) {
276 trim -= msg->sg.data[i].length;
277 sk_msg_free_elem(sk, msg, i, true);
278 sk_msg_iter_var_prev(i);
279 if (!trim)
280 goto out;
281 }
282
283 msg->sg.data[i].length -= trim;
284 sk_mem_uncharge(sk, trim);
683916f6
JK
285 /* Adjust copybreak if it falls into the trimmed part of last buf */
286 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
287 msg->sg.copybreak = msg->sg.data[i].length;
604326b4 288out:
683916f6
JK
289 sk_msg_iter_var_next(i);
290 msg->sg.end = i;
291
292 /* If we trim data a full sg elem before curr pointer update
293 * copybreak and current so that any future copy operations
294 * start at new copy location.
604326b4
DB
295 * However trimed data that has not yet been used in a copy op
296 * does not require an update.
297 */
683916f6
JK
298 if (!msg->sg.size) {
299 msg->sg.curr = msg->sg.start;
300 msg->sg.copybreak = 0;
301 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
302 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
303 sk_msg_iter_var_prev(i);
604326b4
DB
304 msg->sg.curr = i;
305 msg->sg.copybreak = msg->sg.data[i].length;
306 }
604326b4
DB
307}
308EXPORT_SYMBOL_GPL(sk_msg_trim);
309
310int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
311 struct sk_msg *msg, u32 bytes)
312{
313 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
314 const int to_max_pages = MAX_MSG_FRAGS;
315 struct page *pages[MAX_MSG_FRAGS];
316 ssize_t orig, copied, use, offset;
317
318 orig = msg->sg.size;
319 while (bytes > 0) {
320 i = 0;
321 maxpages = to_max_pages - num_elems;
322 if (maxpages == 0) {
323 ret = -EFAULT;
324 goto out;
325 }
326
1ef255e2 327 copied = iov_iter_get_pages2(from, pages, bytes, maxpages,
604326b4
DB
328 &offset);
329 if (copied <= 0) {
330 ret = -EFAULT;
331 goto out;
332 }
333
604326b4
DB
334 bytes -= copied;
335 msg->sg.size += copied;
336
337 while (copied) {
338 use = min_t(int, copied, PAGE_SIZE - offset);
339 sg_set_page(&msg->sg.data[msg->sg.end],
340 pages[i], use, offset);
341 sg_unmark_end(&msg->sg.data[msg->sg.end]);
342 sk_mem_charge(sk, use);
343
344 offset = 0;
345 copied -= use;
346 sk_msg_iter_next(msg, end);
347 num_elems++;
348 i++;
349 }
350 /* When zerocopy is mixed with sk_msg_*copy* operations we
351 * may have a copybreak set in this case clear and prefer
352 * zerocopy remainder when possible.
353 */
354 msg->sg.copybreak = 0;
355 msg->sg.curr = msg->sg.end;
356 }
357out:
358 /* Revert iov_iter updates, msg will need to use 'trim' later if it
359 * also needs to be cleared.
360 */
361 if (ret)
362 iov_iter_revert(from, msg->sg.size - orig);
363 return ret;
364}
365EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
366
367int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
368 struct sk_msg *msg, u32 bytes)
369{
370 int ret = -ENOSPC, i = msg->sg.curr;
371 struct scatterlist *sge;
372 u32 copy, buf_size;
373 void *to;
374
375 do {
376 sge = sk_msg_elem(msg, i);
377 /* This is possible if a trim operation shrunk the buffer */
378 if (msg->sg.copybreak >= sge->length) {
379 msg->sg.copybreak = 0;
380 sk_msg_iter_var_next(i);
381 if (i == msg->sg.end)
382 break;
383 sge = sk_msg_elem(msg, i);
384 }
385
386 buf_size = sge->length - msg->sg.copybreak;
387 copy = (buf_size > bytes) ? bytes : buf_size;
388 to = sg_virt(sge) + msg->sg.copybreak;
389 msg->sg.copybreak += copy;
390 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
391 ret = copy_from_iter_nocache(to, copy, from);
392 else
393 ret = copy_from_iter(to, copy, from);
394 if (ret != copy) {
395 ret = -EFAULT;
396 goto out;
397 }
398 bytes -= copy;
399 if (!bytes)
400 break;
401 msg->sg.copybreak = 0;
402 sk_msg_iter_var_next(i);
403 } while (i != msg->sg.end);
404out:
405 msg->sg.curr = i;
406 return ret;
407}
408EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
409
2bc793e3
CW
410/* Receive sk_msg from psock->ingress_msg to @msg. */
411int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
412 int len, int flags)
413{
414 struct iov_iter *iter = &msg->msg_iter;
415 int peek = flags & MSG_PEEK;
416 struct sk_msg *msg_rx;
417 int i, copied = 0;
418
419 msg_rx = sk_psock_peek_msg(psock);
420 while (copied != len) {
421 struct scatterlist *sge;
422
423 if (unlikely(!msg_rx))
424 break;
425
426 i = msg_rx->sg.start;
427 do {
428 struct page *page;
429 int copy;
430
431 sge = sk_msg_elem(msg_rx, i);
432 copy = sge->length;
433 page = sg_page(sge);
434 if (copied + copy > len)
435 copy = len - copied;
436 copy = copy_page_to_iter(page, sge->offset, copy, iter);
437 if (!copy)
438 return copied ? copied : -EFAULT;
439
440 copied += copy;
441 if (likely(!peek)) {
442 sge->offset += copy;
443 sge->length -= copy;
444 if (!msg_rx->skb)
445 sk_mem_uncharge(sk, copy);
446 msg_rx->sg.size -= copy;
447
448 if (!sge->length) {
449 sk_msg_iter_var_next(i);
450 if (!msg_rx->skb)
451 put_page(page);
452 }
453 } else {
454 /* Lets not optimize peek case if copy_page_to_iter
455 * didn't copy the entire length lets just break.
456 */
457 if (copy != sge->length)
458 return copied;
459 sk_msg_iter_var_next(i);
460 }
461
462 if (copied == len)
463 break;
583585e4 464 } while ((i != msg_rx->sg.end) && !sg_is_last(sge));
2bc793e3
CW
465
466 if (unlikely(peek)) {
467 msg_rx = sk_psock_next_msg(psock, msg_rx);
468 if (!msg_rx)
469 break;
470 continue;
471 }
472
473 msg_rx->sg.start = i;
583585e4 474 if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) {
2bc793e3
CW
475 msg_rx = sk_psock_dequeue_msg(psock);
476 kfree_sk_msg(msg_rx);
477 }
478 msg_rx = sk_psock_peek_msg(psock);
479 }
480
481 return copied;
482}
483EXPORT_SYMBOL_GPL(sk_msg_recvmsg);
484
fb4e0a5e
CW
485bool sk_msg_is_readable(struct sock *sk)
486{
487 struct sk_psock *psock;
488 bool empty = true;
489
490 rcu_read_lock();
491 psock = sk_psock(sk);
492 if (likely(psock))
493 empty = list_empty(&psock->ingress_msg);
494 rcu_read_unlock();
495 return !empty;
496}
497EXPORT_SYMBOL_GPL(sk_msg_is_readable);
498
43312915 499static struct sk_msg *alloc_sk_msg(void)
604326b4 500{
604326b4
DB
501 struct sk_msg *msg;
502
43312915
CW
503 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_KERNEL);
504 if (unlikely(!msg))
6fa9201a 505 return NULL;
43312915
CW
506 sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
507 return msg;
508}
6fa9201a 509
43312915
CW
510static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk,
511 struct sk_buff *skb)
512{
513 if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
6fa9201a 514 return NULL;
36cd0e69 515
43312915 516 if (!sk_rmem_schedule(sk, skb, skb->truesize))
6fa9201a 517 return NULL;
604326b4 518
43312915 519 return alloc_sk_msg();
6fa9201a
JF
520}
521
522static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb,
7303524e 523 u32 off, u32 len,
6fa9201a
JF
524 struct sk_psock *psock,
525 struct sock *sk,
526 struct sk_msg *msg)
527{
4363023d 528 int num_sge, copied;
6fa9201a 529
7303524e 530 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
3527bfe6
LJ
531 if (num_sge < 0) {
532 /* skb linearize may fail with ENOMEM, but lets simply try again
533 * later if this happens. Under memory pressure we don't want to
534 * drop the skb. We need to linearize the skb so that the mapping
535 * in skb_to_sgvec can not error.
536 */
537 if (skb_linearize(skb))
538 return -EAGAIN;
539
540 num_sge = skb_to_sgvec(skb, msg->sg.data, off, len);
541 if (unlikely(num_sge < 0))
542 return num_sge;
543 }
604326b4 544
7303524e 545 copied = len;
604326b4 546 msg->sg.start = 0;
cabede8b 547 msg->sg.size = copied;
031097d9 548 msg->sg.end = num_sge;
604326b4
DB
549 msg->skb = skb;
550
551 sk_psock_queue_msg(psock, msg);
552de910 552 sk_psock_data_ready(sk, psock);
604326b4
DB
553 return copied;
554}
555
7303524e
LJ
556static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
557 u32 off, u32 len);
2443ca66 558
7303524e
LJ
559static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb,
560 u32 off, u32 len)
6fa9201a
JF
561{
562 struct sock *sk = psock->sk;
563 struct sk_msg *msg;
7e6b27a6 564 int err;
6fa9201a 565
2443ca66
JF
566 /* If we are receiving on the same sock skb->sk is already assigned,
567 * skip memory accounting and owner transition seeing it already set
568 * correctly.
569 */
570 if (unlikely(skb->sk == sk))
7303524e 571 return sk_psock_skb_ingress_self(psock, skb, off, len);
6fa9201a
JF
572 msg = sk_psock_create_ingress_msg(sk, skb);
573 if (!msg)
574 return -EAGAIN;
575
576 /* This will transition ownership of the data from the socket where
577 * the BPF program was run initiating the redirect to the socket
578 * we will eventually receive this data on. The data will be released
579 * from skb_consume found in __tcp_bpf_recvmsg() after its been copied
580 * into user buffers.
581 */
582 skb_set_owner_r(skb, sk);
7303524e 583 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
7e6b27a6
JF
584 if (err < 0)
585 kfree(msg);
586 return err;
6fa9201a
JF
587}
588
589/* Puts an skb on the ingress queue of the socket already assigned to the
590 * skb. In this case we do not need to check memory limits or skb_set_owner_r
591 * because the skb is already accounted for here.
592 */
7303524e
LJ
593static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
594 u32 off, u32 len)
6fa9201a 595{
43312915 596 struct sk_msg *msg = alloc_sk_msg();
6fa9201a 597 struct sock *sk = psock->sk;
7e6b27a6 598 int err;
6fa9201a
JF
599
600 if (unlikely(!msg))
601 return -EAGAIN;
144748eb 602 skb_set_owner_r(skb, sk);
7303524e 603 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg);
7e6b27a6
JF
604 if (err < 0)
605 kfree(msg);
606 return err;
6fa9201a
JF
607}
608
604326b4
DB
609static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
610 u32 off, u32 len, bool ingress)
611{
9047f19e
JF
612 if (!ingress) {
613 if (!sock_writeable(psock->sk))
614 return -EAGAIN;
799aa7f9 615 return skb_send_sock(psock->sk, skb, off, len);
9047f19e 616 }
7303524e 617 return sk_psock_skb_ingress(psock, skb, off, len);
604326b4
DB
618}
619
476d9801
JF
620static void sk_psock_skb_state(struct sk_psock *psock,
621 struct sk_psock_work_state *state,
622 struct sk_buff *skb,
623 int len, int off)
624{
625 spin_lock_bh(&psock->ingress_lock);
626 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
627 state->skb = skb;
628 state->len = len;
629 state->off = off;
630 } else {
631 sock_drop(psock->sk, skb);
632 }
633 spin_unlock_bh(&psock->ingress_lock);
634}
635
604326b4
DB
636static void sk_psock_backlog(struct work_struct *work)
637{
638 struct sk_psock *psock = container_of(work, struct sk_psock, work);
639 struct sk_psock_work_state *state = &psock->work_state;
476d9801 640 struct sk_buff *skb = NULL;
604326b4
DB
641 bool ingress;
642 u32 len, off;
643 int ret;
644
799aa7f9 645 mutex_lock(&psock->work_mutex);
476d9801
JF
646 if (unlikely(state->skb)) {
647 spin_lock_bh(&psock->ingress_lock);
604326b4
DB
648 skb = state->skb;
649 len = state->len;
650 off = state->off;
651 state->skb = NULL;
476d9801 652 spin_unlock_bh(&psock->ingress_lock);
604326b4 653 }
476d9801
JF
654 if (skb)
655 goto start;
604326b4
DB
656
657 while ((skb = skb_dequeue(&psock->ingress_skb))) {
658 len = skb->len;
659 off = 0;
7303524e
LJ
660 if (skb_bpf_strparser(skb)) {
661 struct strp_msg *stm = strp_msg(skb);
662
663 off = stm->offset;
664 len = stm->full_len;
665 }
604326b4 666start:
e3526bb9
CW
667 ingress = skb_bpf_ingress(skb);
668 skb_bpf_redirect_clear(skb);
604326b4
DB
669 do {
670 ret = -EIO;
799aa7f9 671 if (!sock_flag(psock->sk, SOCK_DEAD))
604326b4
DB
672 ret = sk_psock_handle_skb(psock, skb, off,
673 len, ingress);
674 if (ret <= 0) {
675 if (ret == -EAGAIN) {
476d9801
JF
676 sk_psock_skb_state(psock, state, skb,
677 len, off);
604326b4
DB
678 goto end;
679 }
680 /* Hard errors break pipe and stop xmit. */
681 sk_psock_report_error(psock, ret ? -ret : EPIPE);
682 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
781dd043 683 sock_drop(psock->sk, skb);
604326b4
DB
684 goto end;
685 }
686 off += ret;
687 len -= ret;
688 } while (len);
689
690 if (!ingress)
691 kfree_skb(skb);
692 }
693end:
799aa7f9 694 mutex_unlock(&psock->work_mutex);
604326b4
DB
695}
696
697struct sk_psock *sk_psock_init(struct sock *sk, int node)
698{
7b219da4
LB
699 struct sk_psock *psock;
700 struct proto *prot;
604326b4 701
7b219da4
LB
702 write_lock_bh(&sk->sk_callback_lock);
703
e34a07c0
JK
704 if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) {
705 psock = ERR_PTR(-EINVAL);
706 goto out;
707 }
708
7b219da4
LB
709 if (sk->sk_user_data) {
710 psock = ERR_PTR(-EBUSY);
711 goto out;
712 }
713
714 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
715 if (!psock) {
716 psock = ERR_PTR(-ENOMEM);
717 goto out;
718 }
719
720 prot = READ_ONCE(sk->sk_prot);
604326b4 721 psock->sk = sk;
7b219da4
LB
722 psock->eval = __SK_NONE;
723 psock->sk_proto = prot;
724 psock->saved_unhash = prot->unhash;
d8616ee2 725 psock->saved_destroy = prot->destroy;
7b219da4
LB
726 psock->saved_close = prot->close;
727 psock->saved_write_space = sk->sk_write_space;
604326b4
DB
728
729 INIT_LIST_HEAD(&psock->link);
730 spin_lock_init(&psock->link_lock);
731
732 INIT_WORK(&psock->work, sk_psock_backlog);
799aa7f9 733 mutex_init(&psock->work_mutex);
604326b4 734 INIT_LIST_HEAD(&psock->ingress_msg);
b01fd6e8 735 spin_lock_init(&psock->ingress_lock);
604326b4
DB
736 skb_queue_head_init(&psock->ingress_skb);
737
738 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
739 refcount_set(&psock->refcnt, 1);
740
2a013372
HJ
741 __rcu_assign_sk_user_data_with_flags(sk, psock,
742 SK_USER_DATA_NOCOPY |
743 SK_USER_DATA_PSOCK);
604326b4
DB
744 sock_hold(sk);
745
7b219da4
LB
746out:
747 write_unlock_bh(&sk->sk_callback_lock);
604326b4
DB
748 return psock;
749}
750EXPORT_SYMBOL_GPL(sk_psock_init);
751
752struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
753{
754 struct sk_psock_link *link;
755
756 spin_lock_bh(&psock->link_lock);
757 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
758 list);
759 if (link)
760 list_del(&link->list);
761 spin_unlock_bh(&psock->link_lock);
762 return link;
763}
764
cd81cefb 765static void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
604326b4
DB
766{
767 struct sk_msg *msg, *tmp;
768
769 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
770 list_del(&msg->list);
771 sk_msg_free(psock->sk, msg);
772 kfree(msg);
773 }
774}
775
799aa7f9 776static void __sk_psock_zap_ingress(struct sk_psock *psock)
604326b4 777{
e3526bb9
CW
778 struct sk_buff *skb;
779
37f0e514 780 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) {
e3526bb9 781 skb_bpf_redirect_clear(skb);
781dd043 782 sock_drop(psock->sk, skb);
e3526bb9 783 }
476d9801
JF
784 kfree_skb(psock->work_state.skb);
785 /* We null the skb here to ensure that calls to sk_psock_backlog
786 * do not pick up the free'd skb.
787 */
788 psock->work_state.skb = NULL;
604326b4
DB
789 __sk_psock_purge_ingress_msg(psock);
790}
791
792static void sk_psock_link_destroy(struct sk_psock *psock)
793{
794 struct sk_psock_link *link, *tmp;
795
796 list_for_each_entry_safe(link, tmp, &psock->link, list) {
797 list_del(&link->list);
798 sk_psock_free_link(link);
799 }
800}
801
799aa7f9
CW
802void sk_psock_stop(struct sk_psock *psock, bool wait)
803{
804 spin_lock_bh(&psock->ingress_lock);
805 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
806 sk_psock_cork_free(psock);
807 __sk_psock_zap_ingress(psock);
808 spin_unlock_bh(&psock->ingress_lock);
809
810 if (wait)
811 cancel_work_sync(&psock->work);
812}
813
88759609
CW
814static void sk_psock_done_strp(struct sk_psock *psock);
815
7786dfc4 816static void sk_psock_destroy(struct work_struct *work)
604326b4 817{
7786dfc4
CW
818 struct sk_psock *psock = container_of(to_rcu_work(work),
819 struct sk_psock, rwork);
604326b4 820 /* No sk_callback_lock since already detached. */
01489436 821
88759609 822 sk_psock_done_strp(psock);
604326b4
DB
823
824 cancel_work_sync(&psock->work);
799aa7f9 825 mutex_destroy(&psock->work_mutex);
604326b4
DB
826
827 psock_progs_drop(&psock->progs);
828
829 sk_psock_link_destroy(psock);
830 sk_psock_cork_free(psock);
604326b4
DB
831
832 if (psock->sk_redir)
833 sock_put(psock->sk_redir);
834 sock_put(psock->sk);
835 kfree(psock);
836}
837
604326b4
DB
838void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
839{
604326b4 840 write_lock_bh(&sk->sk_callback_lock);
95fa1454
JF
841 sk_psock_restore_proto(sk, psock);
842 rcu_assign_sk_user_data(sk, NULL);
ae8b8332 843 if (psock->progs.stream_parser)
604326b4 844 sk_psock_stop_strp(sk, psock);
a7ba4558 845 else if (psock->progs.stream_verdict || psock->progs.skb_verdict)
ef565928 846 sk_psock_stop_verdict(sk, psock);
604326b4 847 write_unlock_bh(&sk->sk_callback_lock);
604326b4 848
343597d5
JF
849 sk_psock_stop(psock, false);
850
7786dfc4
CW
851 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy);
852 queue_rcu_work(system_wq, &psock->rwork);
604326b4
DB
853}
854EXPORT_SYMBOL_GPL(sk_psock_drop);
855
856static int sk_psock_map_verd(int verdict, bool redir)
857{
858 switch (verdict) {
859 case SK_PASS:
860 return redir ? __SK_REDIRECT : __SK_PASS;
861 case SK_DROP:
862 default:
863 break;
864 }
865
866 return __SK_DROP;
867}
868
869int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
870 struct sk_msg *msg)
871{
872 struct bpf_prog *prog;
873 int ret;
874
604326b4
DB
875 rcu_read_lock();
876 prog = READ_ONCE(psock->progs.msg_parser);
877 if (unlikely(!prog)) {
878 ret = __SK_PASS;
879 goto out;
880 }
881
882 sk_msg_compute_data_pointers(msg);
883 msg->sk = sk;
3d9f773c 884 ret = bpf_prog_run_pin_on_cpu(prog, msg);
604326b4
DB
885 ret = sk_psock_map_verd(ret, msg->sk_redir);
886 psock->apply_bytes = msg->apply_bytes;
887 if (ret == __SK_REDIRECT) {
888 if (psock->sk_redir)
889 sock_put(psock->sk_redir);
890 psock->sk_redir = msg->sk_redir;
891 if (!psock->sk_redir) {
892 ret = __SK_DROP;
893 goto out;
894 }
895 sock_hold(psock->sk_redir);
896 }
897out:
898 rcu_read_unlock();
604326b4
DB
899 return ret;
900}
901EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
902
42830571 903static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb)
604326b4
DB
904{
905 struct sk_psock *psock_other;
906 struct sock *sk_other;
604326b4 907
e3526bb9 908 sk_other = skb_bpf_redirect_fetch(skb);
9047f19e
JF
909 /* This error is a buggy BPF program, it returned a redirect
910 * return code, but then didn't set a redirect interface.
911 */
ca2f5f21 912 if (unlikely(!sk_other)) {
7303524e 913 skb_bpf_redirect_clear(skb);
781dd043 914 sock_drop(from->sk, skb);
1581a6c1 915 return -EIO;
ca2f5f21
JF
916 }
917 psock_other = sk_psock(sk_other);
9047f19e
JF
918 /* This error indicates the socket is being torn down or had another
919 * error that caused the pipe to break. We can't send a packet on
920 * a socket that is in this state so we drop the skb.
921 */
799aa7f9 922 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) {
30b9c54a 923 skb_bpf_redirect_clear(skb);
781dd043 924 sock_drop(from->sk, skb);
1581a6c1 925 return -EIO;
799aa7f9
CW
926 }
927 spin_lock_bh(&psock_other->ingress_lock);
928 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
929 spin_unlock_bh(&psock_other->ingress_lock);
30b9c54a 930 skb_bpf_redirect_clear(skb);
781dd043 931 sock_drop(from->sk, skb);
1581a6c1 932 return -EIO;
ca2f5f21
JF
933 }
934
9047f19e
JF
935 skb_queue_tail(&psock_other->ingress_skb, skb);
936 schedule_work(&psock_other->work);
799aa7f9 937 spin_unlock_bh(&psock_other->ingress_lock);
1581a6c1 938 return 0;
ca2f5f21
JF
939}
940
42830571
CW
941static void sk_psock_tls_verdict_apply(struct sk_buff *skb,
942 struct sk_psock *from, int verdict)
e91de6af
JF
943{
944 switch (verdict) {
945 case __SK_REDIRECT:
42830571 946 sk_psock_skb_redirect(from, skb);
e91de6af
JF
947 break;
948 case __SK_PASS:
949 case __SK_DROP:
950 default:
951 break;
952 }
953}
954
955int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
956{
957 struct bpf_prog *prog;
958 int ret = __SK_PASS;
959
960 rcu_read_lock();
ae8b8332 961 prog = READ_ONCE(psock->progs.stream_verdict);
e91de6af 962 if (likely(prog)) {
0b17ad25 963 skb->sk = psock->sk;
e3526bb9
CW
964 skb_dst_drop(skb);
965 skb_bpf_redirect_clear(skb);
53334232 966 ret = bpf_prog_run_pin_on_cpu(prog, skb);
e3526bb9 967 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
0b17ad25 968 skb->sk = NULL;
e91de6af 969 }
42830571 970 sk_psock_tls_verdict_apply(skb, psock, ret);
e91de6af 971 rcu_read_unlock();
e91de6af
JF
972 return ret;
973}
974EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
975
1581a6c1
CW
976static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb,
977 int verdict)
ca2f5f21
JF
978{
979 struct sock *sk_other;
1581a6c1 980 int err = 0;
7303524e 981 u32 len, off;
ca2f5f21 982
604326b4 983 switch (verdict) {
51199405 984 case __SK_PASS:
1581a6c1 985 err = -EIO;
51199405
JF
986 sk_other = psock->sk;
987 if (sock_flag(sk_other, SOCK_DEAD) ||
988 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
7303524e 989 skb_bpf_redirect_clear(skb);
51199405
JF
990 goto out_free;
991 }
51199405 992
e3526bb9 993 skb_bpf_set_ingress(skb);
9ecbfb06
JF
994
995 /* If the queue is empty then we can submit directly
996 * into the msg queue. If its not empty we have to
997 * queue work otherwise we may get OOO data. Otherwise,
998 * if sk_psock_skb_ingress errors will be handled by
999 * retrying later from workqueue.
1000 */
1001 if (skb_queue_empty(&psock->ingress_skb)) {
7303524e
LJ
1002 len = skb->len;
1003 off = 0;
1004 if (skb_bpf_strparser(skb)) {
1005 struct strp_msg *stm = strp_msg(skb);
1006
1007 off = stm->offset;
1008 len = stm->full_len;
1009 }
1010 err = sk_psock_skb_ingress_self(psock, skb, off, len);
9ecbfb06
JF
1011 }
1012 if (err < 0) {
799aa7f9
CW
1013 spin_lock_bh(&psock->ingress_lock);
1014 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
1015 skb_queue_tail(&psock->ingress_skb, skb);
1016 schedule_work(&psock->work);
0cf6672b 1017 err = 0;
799aa7f9
CW
1018 }
1019 spin_unlock_bh(&psock->ingress_lock);
0cf6672b
CW
1020 if (err < 0) {
1021 skb_bpf_redirect_clear(skb);
1022 goto out_free;
1023 }
9ecbfb06 1024 }
cfea28f8 1025 break;
604326b4 1026 case __SK_REDIRECT:
42830571 1027 err = sk_psock_skb_redirect(psock, skb);
ca2f5f21 1028 break;
604326b4 1029 case __SK_DROP:
604326b4
DB
1030 default:
1031out_free:
781dd043 1032 sock_drop(psock->sk, skb);
604326b4 1033 }
1581a6c1
CW
1034
1035 return err;
604326b4
DB
1036}
1037
88759609
CW
1038static void sk_psock_write_space(struct sock *sk)
1039{
1040 struct sk_psock *psock;
1041 void (*write_space)(struct sock *sk) = NULL;
1042
1043 rcu_read_lock();
1044 psock = sk_psock(sk);
1045 if (likely(psock)) {
1046 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
1047 schedule_work(&psock->work);
1048 write_space = psock->saved_write_space;
1049 }
1050 rcu_read_unlock();
1051 if (write_space)
1052 write_space(sk);
1053}
1054
1055#if IS_ENABLED(CONFIG_BPF_STREAM_PARSER)
604326b4
DB
1056static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
1057{
8025751d 1058 struct sk_psock *psock;
604326b4
DB
1059 struct bpf_prog *prog;
1060 int ret = __SK_DROP;
8025751d 1061 struct sock *sk;
604326b4
DB
1062
1063 rcu_read_lock();
8025751d
JF
1064 sk = strp->sk;
1065 psock = sk_psock(sk);
1066 if (unlikely(!psock)) {
781dd043 1067 sock_drop(sk, skb);
8025751d
JF
1068 goto out;
1069 }
ae8b8332 1070 prog = READ_ONCE(psock->progs.stream_verdict);
604326b4 1071 if (likely(prog)) {
144748eb 1072 skb->sk = sk;
e3526bb9
CW
1073 skb_dst_drop(skb);
1074 skb_bpf_redirect_clear(skb);
53334232 1075 ret = bpf_prog_run_pin_on_cpu(prog, skb);
7303524e
LJ
1076 if (ret == SK_PASS)
1077 skb_bpf_set_strparser(skb);
e3526bb9 1078 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
144748eb 1079 skb->sk = NULL;
604326b4 1080 }
604326b4 1081 sk_psock_verdict_apply(psock, skb, ret);
8025751d 1082out:
93dd5f18 1083 rcu_read_unlock();
604326b4
DB
1084}
1085
1086static int sk_psock_strp_read_done(struct strparser *strp, int err)
1087{
1088 return err;
1089}
1090
1091static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
1092{
5a685cd9 1093 struct sk_psock *psock = container_of(strp, struct sk_psock, strp);
604326b4
DB
1094 struct bpf_prog *prog;
1095 int ret = skb->len;
1096
1097 rcu_read_lock();
ae8b8332 1098 prog = READ_ONCE(psock->progs.stream_parser);
0b17ad25
JF
1099 if (likely(prog)) {
1100 skb->sk = psock->sk;
53334232 1101 ret = bpf_prog_run_pin_on_cpu(prog, skb);
0b17ad25
JF
1102 skb->sk = NULL;
1103 }
604326b4
DB
1104 rcu_read_unlock();
1105 return ret;
1106}
1107
1108/* Called with socket lock held. */
552de910 1109static void sk_psock_strp_data_ready(struct sock *sk)
604326b4
DB
1110{
1111 struct sk_psock *psock;
1112
1113 rcu_read_lock();
1114 psock = sk_psock(sk);
1115 if (likely(psock)) {
e91de6af 1116 if (tls_sw_has_ctx_rx(sk)) {
5a685cd9 1117 psock->saved_data_ready(sk);
e91de6af
JF
1118 } else {
1119 write_lock_bh(&sk->sk_callback_lock);
5a685cd9 1120 strp_data_ready(&psock->strp);
e91de6af
JF
1121 write_unlock_bh(&sk->sk_callback_lock);
1122 }
604326b4
DB
1123 }
1124 rcu_read_unlock();
1125}
1126
88759609
CW
1127int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
1128{
1129 static const struct strp_callbacks cb = {
1130 .rcv_msg = sk_psock_strp_read,
1131 .read_sock_done = sk_psock_strp_read_done,
1132 .parse_msg = sk_psock_strp_parse,
1133 };
1134
5a685cd9 1135 return strp_init(&psock->strp, sk, &cb);
88759609
CW
1136}
1137
1138void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
1139{
5a685cd9 1140 if (psock->saved_data_ready)
88759609
CW
1141 return;
1142
5a685cd9 1143 psock->saved_data_ready = sk->sk_data_ready;
88759609
CW
1144 sk->sk_data_ready = sk_psock_strp_data_ready;
1145 sk->sk_write_space = sk_psock_write_space;
88759609
CW
1146}
1147
1148void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
1149{
c0d95d33
JF
1150 psock_set_prog(&psock->progs.stream_parser, NULL);
1151
5a685cd9 1152 if (!psock->saved_data_ready)
88759609
CW
1153 return;
1154
5a685cd9
CW
1155 sk->sk_data_ready = psock->saved_data_ready;
1156 psock->saved_data_ready = NULL;
1157 strp_stop(&psock->strp);
88759609
CW
1158}
1159
1160static void sk_psock_done_strp(struct sk_psock *psock)
1161{
1162 /* Parser has been stopped */
ae8b8332 1163 if (psock->progs.stream_parser)
5a685cd9 1164 strp_done(&psock->strp);
88759609
CW
1165}
1166#else
1167static void sk_psock_done_strp(struct sk_psock *psock)
1168{
1169}
1170#endif /* CONFIG_BPF_STREAM_PARSER */
1171
965b57b4 1172static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb)
ef565928 1173{
ef565928
JF
1174 struct sk_psock *psock;
1175 struct bpf_prog *prog;
1176 int ret = __SK_DROP;
965b57b4 1177 int len = skb->len;
ef565928 1178
57452d76 1179 skb_get(skb);
ef565928
JF
1180
1181 rcu_read_lock();
1182 psock = sk_psock(sk);
1183 if (unlikely(!psock)) {
1184 len = 0;
781dd043 1185 sock_drop(sk, skb);
ef565928
JF
1186 goto out;
1187 }
ae8b8332 1188 prog = READ_ONCE(psock->progs.stream_verdict);
a7ba4558
CW
1189 if (!prog)
1190 prog = READ_ONCE(psock->progs.skb_verdict);
ef565928 1191 if (likely(prog)) {
e3526bb9
CW
1192 skb_dst_drop(skb);
1193 skb_bpf_redirect_clear(skb);
53334232 1194 ret = bpf_prog_run_pin_on_cpu(prog, skb);
e3526bb9 1195 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb));
ef565928 1196 }
2e23acd9
CW
1197 ret = sk_psock_verdict_apply(psock, skb, ret);
1198 if (ret < 0)
1199 len = ret;
ef565928
JF
1200out:
1201 rcu_read_unlock();
1202 return len;
1203}
1204
1205static void sk_psock_verdict_data_ready(struct sock *sk)
1206{
1207 struct socket *sock = sk->sk_socket;
ef565928 1208
965b57b4 1209 if (unlikely(!sock || !sock->ops || !sock->ops->read_skb))
ef565928 1210 return;
965b57b4 1211 sock->ops->read_skb(sk, sk_psock_verdict_recv);
ef565928
JF
1212}
1213
ef565928
JF
1214void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
1215{
5a685cd9 1216 if (psock->saved_data_ready)
ef565928
JF
1217 return;
1218
5a685cd9 1219 psock->saved_data_ready = sk->sk_data_ready;
ef565928
JF
1220 sk->sk_data_ready = sk_psock_verdict_data_ready;
1221 sk->sk_write_space = sk_psock_write_space;
ef565928
JF
1222}
1223
ef565928
JF
1224void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
1225{
c0d95d33
JF
1226 psock_set_prog(&psock->progs.stream_verdict, NULL);
1227 psock_set_prog(&psock->progs.skb_verdict, NULL);
1228
5a685cd9 1229 if (!psock->saved_data_ready)
ef565928
JF
1230 return;
1231
5a685cd9
CW
1232 sk->sk_data_ready = psock->saved_data_ready;
1233 psock->saved_data_ready = NULL;
ef565928 1234}