devlink: Add reload action option to devlink reload command
[linux-2.6-block.git] / net / core / skmsg.c
CommitLineData
604326b4
DB
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */
3
4#include <linux/skmsg.h>
5#include <linux/skbuff.h>
6#include <linux/scatterlist.h>
7
8#include <net/sock.h>
9#include <net/tcp.h>
e91de6af 10#include <net/tls.h>
604326b4
DB
11
12static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce)
13{
14 if (msg->sg.end > msg->sg.start &&
15 elem_first_coalesce < msg->sg.end)
16 return true;
17
18 if (msg->sg.end < msg->sg.start &&
19 (elem_first_coalesce > msg->sg.start ||
20 elem_first_coalesce < msg->sg.end))
21 return true;
22
23 return false;
24}
25
26int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len,
27 int elem_first_coalesce)
28{
29 struct page_frag *pfrag = sk_page_frag(sk);
30 int ret = 0;
31
32 len -= msg->sg.size;
33 while (len > 0) {
34 struct scatterlist *sge;
35 u32 orig_offset;
36 int use, i;
37
38 if (!sk_page_frag_refill(sk, pfrag))
39 return -ENOMEM;
40
41 orig_offset = pfrag->offset;
42 use = min_t(int, len, pfrag->size - orig_offset);
43 if (!sk_wmem_schedule(sk, use))
44 return -ENOMEM;
45
46 i = msg->sg.end;
47 sk_msg_iter_var_prev(i);
48 sge = &msg->sg.data[i];
49
50 if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) &&
51 sg_page(sge) == pfrag->page &&
52 sge->offset + sge->length == orig_offset) {
53 sge->length += use;
54 } else {
55 if (sk_msg_full(msg)) {
56 ret = -ENOSPC;
57 break;
58 }
59
60 sge = &msg->sg.data[msg->sg.end];
61 sg_unmark_end(sge);
62 sg_set_page(sge, pfrag->page, use, orig_offset);
63 get_page(pfrag->page);
64 sk_msg_iter_next(msg, end);
65 }
66
67 sk_mem_charge(sk, use);
68 msg->sg.size += use;
69 pfrag->offset += use;
70 len -= use;
71 }
72
73 return ret;
74}
75EXPORT_SYMBOL_GPL(sk_msg_alloc);
76
d829e9c4
DB
77int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src,
78 u32 off, u32 len)
79{
80 int i = src->sg.start;
81 struct scatterlist *sge = sk_msg_elem(src, i);
fda497e5 82 struct scatterlist *sgd = NULL;
d829e9c4
DB
83 u32 sge_len, sge_off;
84
d829e9c4
DB
85 while (off) {
86 if (sge->length > off)
87 break;
88 off -= sge->length;
89 sk_msg_iter_var_next(i);
90 if (i == src->sg.end && off)
91 return -ENOSPC;
92 sge = sk_msg_elem(src, i);
93 }
94
95 while (len) {
96 sge_len = sge->length - off;
d829e9c4
DB
97 if (sge_len > len)
98 sge_len = len;
fda497e5
VG
99
100 if (dst->sg.end)
101 sgd = sk_msg_elem(dst, dst->sg.end - 1);
102
103 if (sgd &&
104 (sg_page(sge) == sg_page(sgd)) &&
105 (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) {
106 sgd->length += sge_len;
107 dst->sg.size += sge_len;
108 } else if (!sk_msg_full(dst)) {
109 sge_off = sge->offset + off;
110 sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off);
111 } else {
112 return -ENOSPC;
113 }
114
d829e9c4
DB
115 off = 0;
116 len -= sge_len;
d829e9c4
DB
117 sk_mem_charge(sk, sge_len);
118 sk_msg_iter_var_next(i);
119 if (i == src->sg.end && len)
120 return -ENOSPC;
121 sge = sk_msg_elem(src, i);
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(sk_msg_clone);
127
604326b4
DB
128void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes)
129{
130 int i = msg->sg.start;
131
132 do {
133 struct scatterlist *sge = sk_msg_elem(msg, i);
134
135 if (bytes < sge->length) {
136 sge->length -= bytes;
137 sge->offset += bytes;
138 sk_mem_uncharge(sk, bytes);
139 break;
140 }
141
142 sk_mem_uncharge(sk, sge->length);
143 bytes -= sge->length;
144 sge->length = 0;
145 sge->offset = 0;
146 sk_msg_iter_var_next(i);
147 } while (bytes && i != msg->sg.end);
148 msg->sg.start = i;
149}
150EXPORT_SYMBOL_GPL(sk_msg_return_zero);
151
152void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes)
153{
154 int i = msg->sg.start;
155
156 do {
157 struct scatterlist *sge = &msg->sg.data[i];
158 int uncharge = (bytes < sge->length) ? bytes : sge->length;
159
160 sk_mem_uncharge(sk, uncharge);
161 bytes -= uncharge;
162 sk_msg_iter_var_next(i);
163 } while (i != msg->sg.end);
164}
165EXPORT_SYMBOL_GPL(sk_msg_return);
166
167static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i,
168 bool charge)
169{
170 struct scatterlist *sge = sk_msg_elem(msg, i);
171 u32 len = sge->length;
172
173 if (charge)
174 sk_mem_uncharge(sk, len);
175 if (!msg->skb)
176 put_page(sg_page(sge));
177 memset(sge, 0, sizeof(*sge));
178 return len;
179}
180
181static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i,
182 bool charge)
183{
184 struct scatterlist *sge = sk_msg_elem(msg, i);
185 int freed = 0;
186
187 while (msg->sg.size) {
188 msg->sg.size -= sge->length;
189 freed += sk_msg_free_elem(sk, msg, i, charge);
190 sk_msg_iter_var_next(i);
191 sk_msg_check_to_free(msg, i, msg->sg.size);
192 sge = sk_msg_elem(msg, i);
193 }
dd016aca 194 consume_skb(msg->skb);
604326b4
DB
195 sk_msg_init(msg);
196 return freed;
197}
198
199int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg)
200{
201 return __sk_msg_free(sk, msg, msg->sg.start, false);
202}
203EXPORT_SYMBOL_GPL(sk_msg_free_nocharge);
204
205int sk_msg_free(struct sock *sk, struct sk_msg *msg)
206{
207 return __sk_msg_free(sk, msg, msg->sg.start, true);
208}
209EXPORT_SYMBOL_GPL(sk_msg_free);
210
211static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg,
212 u32 bytes, bool charge)
213{
214 struct scatterlist *sge;
215 u32 i = msg->sg.start;
216
217 while (bytes) {
218 sge = sk_msg_elem(msg, i);
219 if (!sge->length)
220 break;
221 if (bytes < sge->length) {
222 if (charge)
223 sk_mem_uncharge(sk, bytes);
224 sge->length -= bytes;
225 sge->offset += bytes;
226 msg->sg.size -= bytes;
227 break;
228 }
229
230 msg->sg.size -= sge->length;
231 bytes -= sge->length;
232 sk_msg_free_elem(sk, msg, i, charge);
233 sk_msg_iter_var_next(i);
234 sk_msg_check_to_free(msg, i, bytes);
235 }
236 msg->sg.start = i;
237}
238
239void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes)
240{
241 __sk_msg_free_partial(sk, msg, bytes, true);
242}
243EXPORT_SYMBOL_GPL(sk_msg_free_partial);
244
245void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg,
246 u32 bytes)
247{
248 __sk_msg_free_partial(sk, msg, bytes, false);
249}
250
251void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
252{
253 int trim = msg->sg.size - len;
254 u32 i = msg->sg.end;
255
256 if (trim <= 0) {
257 WARN_ON(trim < 0);
258 return;
259 }
260
261 sk_msg_iter_var_prev(i);
262 msg->sg.size = len;
263 while (msg->sg.data[i].length &&
264 trim >= msg->sg.data[i].length) {
265 trim -= msg->sg.data[i].length;
266 sk_msg_free_elem(sk, msg, i, true);
267 sk_msg_iter_var_prev(i);
268 if (!trim)
269 goto out;
270 }
271
272 msg->sg.data[i].length -= trim;
273 sk_mem_uncharge(sk, trim);
683916f6
JK
274 /* Adjust copybreak if it falls into the trimmed part of last buf */
275 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
276 msg->sg.copybreak = msg->sg.data[i].length;
604326b4 277out:
683916f6
JK
278 sk_msg_iter_var_next(i);
279 msg->sg.end = i;
280
281 /* If we trim data a full sg elem before curr pointer update
282 * copybreak and current so that any future copy operations
283 * start at new copy location.
604326b4
DB
284 * However trimed data that has not yet been used in a copy op
285 * does not require an update.
286 */
683916f6
JK
287 if (!msg->sg.size) {
288 msg->sg.curr = msg->sg.start;
289 msg->sg.copybreak = 0;
290 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
291 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
292 sk_msg_iter_var_prev(i);
604326b4
DB
293 msg->sg.curr = i;
294 msg->sg.copybreak = msg->sg.data[i].length;
295 }
604326b4
DB
296}
297EXPORT_SYMBOL_GPL(sk_msg_trim);
298
299int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
300 struct sk_msg *msg, u32 bytes)
301{
302 int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg);
303 const int to_max_pages = MAX_MSG_FRAGS;
304 struct page *pages[MAX_MSG_FRAGS];
305 ssize_t orig, copied, use, offset;
306
307 orig = msg->sg.size;
308 while (bytes > 0) {
309 i = 0;
310 maxpages = to_max_pages - num_elems;
311 if (maxpages == 0) {
312 ret = -EFAULT;
313 goto out;
314 }
315
316 copied = iov_iter_get_pages(from, pages, bytes, maxpages,
317 &offset);
318 if (copied <= 0) {
319 ret = -EFAULT;
320 goto out;
321 }
322
323 iov_iter_advance(from, copied);
324 bytes -= copied;
325 msg->sg.size += copied;
326
327 while (copied) {
328 use = min_t(int, copied, PAGE_SIZE - offset);
329 sg_set_page(&msg->sg.data[msg->sg.end],
330 pages[i], use, offset);
331 sg_unmark_end(&msg->sg.data[msg->sg.end]);
332 sk_mem_charge(sk, use);
333
334 offset = 0;
335 copied -= use;
336 sk_msg_iter_next(msg, end);
337 num_elems++;
338 i++;
339 }
340 /* When zerocopy is mixed with sk_msg_*copy* operations we
341 * may have a copybreak set in this case clear and prefer
342 * zerocopy remainder when possible.
343 */
344 msg->sg.copybreak = 0;
345 msg->sg.curr = msg->sg.end;
346 }
347out:
348 /* Revert iov_iter updates, msg will need to use 'trim' later if it
349 * also needs to be cleared.
350 */
351 if (ret)
352 iov_iter_revert(from, msg->sg.size - orig);
353 return ret;
354}
355EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter);
356
357int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
358 struct sk_msg *msg, u32 bytes)
359{
360 int ret = -ENOSPC, i = msg->sg.curr;
361 struct scatterlist *sge;
362 u32 copy, buf_size;
363 void *to;
364
365 do {
366 sge = sk_msg_elem(msg, i);
367 /* This is possible if a trim operation shrunk the buffer */
368 if (msg->sg.copybreak >= sge->length) {
369 msg->sg.copybreak = 0;
370 sk_msg_iter_var_next(i);
371 if (i == msg->sg.end)
372 break;
373 sge = sk_msg_elem(msg, i);
374 }
375
376 buf_size = sge->length - msg->sg.copybreak;
377 copy = (buf_size > bytes) ? bytes : buf_size;
378 to = sg_virt(sge) + msg->sg.copybreak;
379 msg->sg.copybreak += copy;
380 if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY)
381 ret = copy_from_iter_nocache(to, copy, from);
382 else
383 ret = copy_from_iter(to, copy, from);
384 if (ret != copy) {
385 ret = -EFAULT;
386 goto out;
387 }
388 bytes -= copy;
389 if (!bytes)
390 break;
391 msg->sg.copybreak = 0;
392 sk_msg_iter_var_next(i);
393 } while (i != msg->sg.end);
394out:
395 msg->sg.curr = i;
396 return ret;
397}
398EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
399
400static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb)
401{
402 struct sock *sk = psock->sk;
403 int copied = 0, num_sge;
404 struct sk_msg *msg;
405
406 msg = kzalloc(sizeof(*msg), __GFP_NOWARN | GFP_ATOMIC);
407 if (unlikely(!msg))
408 return -EAGAIN;
409 if (!sk_rmem_schedule(sk, skb, skb->len)) {
410 kfree(msg);
411 return -EAGAIN;
412 }
413
414 sk_msg_init(msg);
415 num_sge = skb_to_sgvec(skb, msg->sg.data, 0, skb->len);
416 if (unlikely(num_sge < 0)) {
417 kfree(msg);
418 return num_sge;
419 }
420
421 sk_mem_charge(sk, skb->len);
422 copied = skb->len;
423 msg->sg.start = 0;
cabede8b 424 msg->sg.size = copied;
031097d9 425 msg->sg.end = num_sge;
604326b4
DB
426 msg->skb = skb;
427
428 sk_psock_queue_msg(psock, msg);
552de910 429 sk_psock_data_ready(sk, psock);
604326b4
DB
430 return copied;
431}
432
433static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb,
434 u32 off, u32 len, bool ingress)
435{
436 if (ingress)
437 return sk_psock_skb_ingress(psock, skb);
438 else
439 return skb_send_sock_locked(psock->sk, skb, off, len);
440}
441
442static void sk_psock_backlog(struct work_struct *work)
443{
444 struct sk_psock *psock = container_of(work, struct sk_psock, work);
445 struct sk_psock_work_state *state = &psock->work_state;
446 struct sk_buff *skb;
447 bool ingress;
448 u32 len, off;
449 int ret;
450
451 /* Lock sock to avoid losing sk_socket during loop. */
452 lock_sock(psock->sk);
453 if (state->skb) {
454 skb = state->skb;
455 len = state->len;
456 off = state->off;
457 state->skb = NULL;
458 goto start;
459 }
460
461 while ((skb = skb_dequeue(&psock->ingress_skb))) {
462 len = skb->len;
463 off = 0;
464start:
465 ingress = tcp_skb_bpf_ingress(skb);
466 do {
467 ret = -EIO;
468 if (likely(psock->sk->sk_socket))
469 ret = sk_psock_handle_skb(psock, skb, off,
470 len, ingress);
471 if (ret <= 0) {
472 if (ret == -EAGAIN) {
473 state->skb = skb;
474 state->len = len;
475 state->off = off;
476 goto end;
477 }
478 /* Hard errors break pipe and stop xmit. */
479 sk_psock_report_error(psock, ret ? -ret : EPIPE);
480 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
481 kfree_skb(skb);
482 goto end;
483 }
484 off += ret;
485 len -= ret;
486 } while (len);
487
488 if (!ingress)
489 kfree_skb(skb);
490 }
491end:
492 release_sock(psock->sk);
493}
494
495struct sk_psock *sk_psock_init(struct sock *sk, int node)
496{
7b219da4
LB
497 struct sk_psock *psock;
498 struct proto *prot;
604326b4 499
7b219da4
LB
500 write_lock_bh(&sk->sk_callback_lock);
501
502 if (inet_csk_has_ulp(sk)) {
503 psock = ERR_PTR(-EINVAL);
504 goto out;
505 }
506
507 if (sk->sk_user_data) {
508 psock = ERR_PTR(-EBUSY);
509 goto out;
510 }
511
512 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node);
513 if (!psock) {
514 psock = ERR_PTR(-ENOMEM);
515 goto out;
516 }
517
518 prot = READ_ONCE(sk->sk_prot);
604326b4 519 psock->sk = sk;
7b219da4
LB
520 psock->eval = __SK_NONE;
521 psock->sk_proto = prot;
522 psock->saved_unhash = prot->unhash;
523 psock->saved_close = prot->close;
524 psock->saved_write_space = sk->sk_write_space;
604326b4
DB
525
526 INIT_LIST_HEAD(&psock->link);
527 spin_lock_init(&psock->link_lock);
528
529 INIT_WORK(&psock->work, sk_psock_backlog);
530 INIT_LIST_HEAD(&psock->ingress_msg);
531 skb_queue_head_init(&psock->ingress_skb);
532
533 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED);
534 refcount_set(&psock->refcnt, 1);
535
f1ff5ce2 536 rcu_assign_sk_user_data_nocopy(sk, psock);
604326b4
DB
537 sock_hold(sk);
538
7b219da4
LB
539out:
540 write_unlock_bh(&sk->sk_callback_lock);
604326b4
DB
541 return psock;
542}
543EXPORT_SYMBOL_GPL(sk_psock_init);
544
545struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock)
546{
547 struct sk_psock_link *link;
548
549 spin_lock_bh(&psock->link_lock);
550 link = list_first_entry_or_null(&psock->link, struct sk_psock_link,
551 list);
552 if (link)
553 list_del(&link->list);
554 spin_unlock_bh(&psock->link_lock);
555 return link;
556}
557
558void __sk_psock_purge_ingress_msg(struct sk_psock *psock)
559{
560 struct sk_msg *msg, *tmp;
561
562 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) {
563 list_del(&msg->list);
564 sk_msg_free(psock->sk, msg);
565 kfree(msg);
566 }
567}
568
569static void sk_psock_zap_ingress(struct sk_psock *psock)
570{
571 __skb_queue_purge(&psock->ingress_skb);
572 __sk_psock_purge_ingress_msg(psock);
573}
574
575static void sk_psock_link_destroy(struct sk_psock *psock)
576{
577 struct sk_psock_link *link, *tmp;
578
579 list_for_each_entry_safe(link, tmp, &psock->link, list) {
580 list_del(&link->list);
581 sk_psock_free_link(link);
582 }
583}
584
585static void sk_psock_destroy_deferred(struct work_struct *gc)
586{
587 struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
588
589 /* No sk_callback_lock since already detached. */
01489436
JF
590
591 /* Parser has been stopped */
592 if (psock->progs.skb_parser)
593 strp_done(&psock->parser.strp);
604326b4
DB
594
595 cancel_work_sync(&psock->work);
596
597 psock_progs_drop(&psock->progs);
598
599 sk_psock_link_destroy(psock);
600 sk_psock_cork_free(psock);
601 sk_psock_zap_ingress(psock);
602
603 if (psock->sk_redir)
604 sock_put(psock->sk_redir);
605 sock_put(psock->sk);
606 kfree(psock);
607}
608
609void sk_psock_destroy(struct rcu_head *rcu)
610{
611 struct sk_psock *psock = container_of(rcu, struct sk_psock, rcu);
612
613 INIT_WORK(&psock->gc, sk_psock_destroy_deferred);
614 schedule_work(&psock->gc);
615}
616EXPORT_SYMBOL_GPL(sk_psock_destroy);
617
618void sk_psock_drop(struct sock *sk, struct sk_psock *psock)
619{
604326b4 620 sk_psock_cork_free(psock);
a136678c 621 sk_psock_zap_ingress(psock);
604326b4
DB
622
623 write_lock_bh(&sk->sk_callback_lock);
95fa1454
JF
624 sk_psock_restore_proto(sk, psock);
625 rcu_assign_sk_user_data(sk, NULL);
604326b4
DB
626 if (psock->progs.skb_parser)
627 sk_psock_stop_strp(sk, psock);
628 write_unlock_bh(&sk->sk_callback_lock);
629 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED);
630
0245b80e 631 call_rcu(&psock->rcu, sk_psock_destroy);
604326b4
DB
632}
633EXPORT_SYMBOL_GPL(sk_psock_drop);
634
635static int sk_psock_map_verd(int verdict, bool redir)
636{
637 switch (verdict) {
638 case SK_PASS:
639 return redir ? __SK_REDIRECT : __SK_PASS;
640 case SK_DROP:
641 default:
642 break;
643 }
644
645 return __SK_DROP;
646}
647
648int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock,
649 struct sk_msg *msg)
650{
651 struct bpf_prog *prog;
652 int ret;
653
604326b4
DB
654 rcu_read_lock();
655 prog = READ_ONCE(psock->progs.msg_parser);
656 if (unlikely(!prog)) {
657 ret = __SK_PASS;
658 goto out;
659 }
660
661 sk_msg_compute_data_pointers(msg);
662 msg->sk = sk;
3d9f773c 663 ret = bpf_prog_run_pin_on_cpu(prog, msg);
604326b4
DB
664 ret = sk_psock_map_verd(ret, msg->sk_redir);
665 psock->apply_bytes = msg->apply_bytes;
666 if (ret == __SK_REDIRECT) {
667 if (psock->sk_redir)
668 sock_put(psock->sk_redir);
669 psock->sk_redir = msg->sk_redir;
670 if (!psock->sk_redir) {
671 ret = __SK_DROP;
672 goto out;
673 }
674 sock_hold(psock->sk_redir);
675 }
676out:
677 rcu_read_unlock();
604326b4
DB
678 return ret;
679}
680EXPORT_SYMBOL_GPL(sk_psock_msg_verdict);
681
682static int sk_psock_bpf_run(struct sk_psock *psock, struct bpf_prog *prog,
683 struct sk_buff *skb)
684{
685 int ret;
686
687 skb->sk = psock->sk;
688 bpf_compute_data_end_sk_skb(skb);
3d9f773c 689 ret = bpf_prog_run_pin_on_cpu(prog, skb);
604326b4
DB
690 /* strparser clones the skb before handing it to a upper layer,
691 * meaning skb_orphan has been called. We NULL sk on the way out
692 * to ensure we don't trigger a BUG_ON() in skb/sk operations
693 * later and because we are not charging the memory of this skb
694 * to any socket yet.
695 */
696 skb->sk = NULL;
697 return ret;
698}
699
700static struct sk_psock *sk_psock_from_strp(struct strparser *strp)
701{
702 struct sk_psock_parser *parser;
703
704 parser = container_of(strp, struct sk_psock_parser, strp);
705 return container_of(parser, struct sk_psock, parser);
706}
707
93dd5f18 708static void sk_psock_skb_redirect(struct sk_buff *skb)
604326b4
DB
709{
710 struct sk_psock *psock_other;
711 struct sock *sk_other;
712 bool ingress;
713
ca2f5f21
JF
714 sk_other = tcp_skb_bpf_redirect_fetch(skb);
715 if (unlikely(!sk_other)) {
716 kfree_skb(skb);
717 return;
718 }
719 psock_other = sk_psock(sk_other);
720 if (!psock_other || sock_flag(sk_other, SOCK_DEAD) ||
721 !sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) {
722 kfree_skb(skb);
723 return;
724 }
725
726 ingress = tcp_skb_bpf_ingress(skb);
727 if ((!ingress && sock_writeable(sk_other)) ||
728 (ingress &&
729 atomic_read(&sk_other->sk_rmem_alloc) <=
730 sk_other->sk_rcvbuf)) {
731 if (!ingress)
732 skb_set_owner_w(skb, sk_other);
733 skb_queue_tail(&psock_other->ingress_skb, skb);
734 schedule_work(&psock_other->work);
735 } else {
736 kfree_skb(skb);
737 }
738}
739
93dd5f18 740static void sk_psock_tls_verdict_apply(struct sk_buff *skb, int verdict)
e91de6af
JF
741{
742 switch (verdict) {
743 case __SK_REDIRECT:
93dd5f18 744 sk_psock_skb_redirect(skb);
e91de6af
JF
745 break;
746 case __SK_PASS:
747 case __SK_DROP:
748 default:
749 break;
750 }
751}
752
753int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb)
754{
755 struct bpf_prog *prog;
756 int ret = __SK_PASS;
757
758 rcu_read_lock();
759 prog = READ_ONCE(psock->progs.skb_verdict);
760 if (likely(prog)) {
761 tcp_skb_bpf_redirect_clear(skb);
762 ret = sk_psock_bpf_run(psock, prog, skb);
763 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
764 }
93dd5f18 765 sk_psock_tls_verdict_apply(skb, ret);
e91de6af 766 rcu_read_unlock();
e91de6af
JF
767 return ret;
768}
769EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read);
770
ca2f5f21
JF
771static void sk_psock_verdict_apply(struct sk_psock *psock,
772 struct sk_buff *skb, int verdict)
773{
774 struct sock *sk_other;
775
604326b4 776 switch (verdict) {
51199405
JF
777 case __SK_PASS:
778 sk_other = psock->sk;
779 if (sock_flag(sk_other, SOCK_DEAD) ||
780 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) {
781 goto out_free;
782 }
783 if (atomic_read(&sk_other->sk_rmem_alloc) <=
784 sk_other->sk_rcvbuf) {
785 struct tcp_skb_cb *tcp = TCP_SKB_CB(skb);
786
787 tcp->bpf.flags |= BPF_F_INGRESS;
788 skb_queue_tail(&psock->ingress_skb, skb);
789 schedule_work(&psock->work);
790 break;
791 }
792 goto out_free;
604326b4 793 case __SK_REDIRECT:
93dd5f18 794 sk_psock_skb_redirect(skb);
ca2f5f21 795 break;
604326b4 796 case __SK_DROP:
604326b4
DB
797 default:
798out_free:
799 kfree_skb(skb);
800 }
801}
802
803static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb)
804{
8025751d 805 struct sk_psock *psock;
604326b4
DB
806 struct bpf_prog *prog;
807 int ret = __SK_DROP;
8025751d 808 struct sock *sk;
604326b4
DB
809
810 rcu_read_lock();
8025751d
JF
811 sk = strp->sk;
812 psock = sk_psock(sk);
813 if (unlikely(!psock)) {
814 kfree_skb(skb);
815 goto out;
816 }
604326b4
DB
817 prog = READ_ONCE(psock->progs.skb_verdict);
818 if (likely(prog)) {
819 skb_orphan(skb);
820 tcp_skb_bpf_redirect_clear(skb);
821 ret = sk_psock_bpf_run(psock, prog, skb);
822 ret = sk_psock_map_verd(ret, tcp_skb_bpf_redirect_fetch(skb));
823 }
604326b4 824 sk_psock_verdict_apply(psock, skb, ret);
8025751d 825out:
93dd5f18 826 rcu_read_unlock();
604326b4
DB
827}
828
829static int sk_psock_strp_read_done(struct strparser *strp, int err)
830{
831 return err;
832}
833
834static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb)
835{
836 struct sk_psock *psock = sk_psock_from_strp(strp);
837 struct bpf_prog *prog;
838 int ret = skb->len;
839
840 rcu_read_lock();
841 prog = READ_ONCE(psock->progs.skb_parser);
842 if (likely(prog))
843 ret = sk_psock_bpf_run(psock, prog, skb);
844 rcu_read_unlock();
845 return ret;
846}
847
848/* Called with socket lock held. */
552de910 849static void sk_psock_strp_data_ready(struct sock *sk)
604326b4
DB
850{
851 struct sk_psock *psock;
852
853 rcu_read_lock();
854 psock = sk_psock(sk);
855 if (likely(psock)) {
e91de6af
JF
856 if (tls_sw_has_ctx_rx(sk)) {
857 psock->parser.saved_data_ready(sk);
858 } else {
859 write_lock_bh(&sk->sk_callback_lock);
860 strp_data_ready(&psock->parser.strp);
861 write_unlock_bh(&sk->sk_callback_lock);
862 }
604326b4
DB
863 }
864 rcu_read_unlock();
865}
866
867static void sk_psock_write_space(struct sock *sk)
868{
869 struct sk_psock *psock;
8163999d 870 void (*write_space)(struct sock *sk) = NULL;
604326b4
DB
871
872 rcu_read_lock();
873 psock = sk_psock(sk);
8163999d
JF
874 if (likely(psock)) {
875 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
876 schedule_work(&psock->work);
877 write_space = psock->saved_write_space;
878 }
604326b4 879 rcu_read_unlock();
8163999d
JF
880 if (write_space)
881 write_space(sk);
604326b4
DB
882}
883
884int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
885{
886 static const struct strp_callbacks cb = {
887 .rcv_msg = sk_psock_strp_read,
888 .read_sock_done = sk_psock_strp_read_done,
889 .parse_msg = sk_psock_strp_parse,
890 };
891
892 psock->parser.enabled = false;
893 return strp_init(&psock->parser.strp, sk, &cb);
894}
895
896void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
897{
898 struct sk_psock_parser *parser = &psock->parser;
899
900 if (parser->enabled)
901 return;
902
903 parser->saved_data_ready = sk->sk_data_ready;
552de910 904 sk->sk_data_ready = sk_psock_strp_data_ready;
604326b4
DB
905 sk->sk_write_space = sk_psock_write_space;
906 parser->enabled = true;
907}
908
909void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
910{
911 struct sk_psock_parser *parser = &psock->parser;
912
913 if (!parser->enabled)
914 return;
915
916 sk->sk_data_ready = parser->saved_data_ready;
917 parser->saved_data_ready = NULL;
918 strp_stop(&parser->strp);
919 parser->enabled = false;
920}