Commit | Line | Data |
---|---|---|
604326b4 DB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2017 - 2018 Covalent IO, Inc. http://covalent.io */ | |
3 | ||
4 | #include <linux/skmsg.h> | |
5 | #include <linux/skbuff.h> | |
6 | #include <linux/scatterlist.h> | |
7 | ||
8 | #include <net/sock.h> | |
9 | #include <net/tcp.h> | |
e91de6af | 10 | #include <net/tls.h> |
40e0b090 | 11 | #include <trace/events/sock.h> |
604326b4 DB |
12 | |
13 | static bool sk_msg_try_coalesce_ok(struct sk_msg *msg, int elem_first_coalesce) | |
14 | { | |
15 | if (msg->sg.end > msg->sg.start && | |
16 | elem_first_coalesce < msg->sg.end) | |
17 | return true; | |
18 | ||
19 | if (msg->sg.end < msg->sg.start && | |
20 | (elem_first_coalesce > msg->sg.start || | |
21 | elem_first_coalesce < msg->sg.end)) | |
22 | return true; | |
23 | ||
24 | return false; | |
25 | } | |
26 | ||
27 | int sk_msg_alloc(struct sock *sk, struct sk_msg *msg, int len, | |
28 | int elem_first_coalesce) | |
29 | { | |
30 | struct page_frag *pfrag = sk_page_frag(sk); | |
9c34e38c | 31 | u32 osize = msg->sg.size; |
604326b4 DB |
32 | int ret = 0; |
33 | ||
34 | len -= msg->sg.size; | |
35 | while (len > 0) { | |
36 | struct scatterlist *sge; | |
37 | u32 orig_offset; | |
38 | int use, i; | |
39 | ||
9c34e38c WY |
40 | if (!sk_page_frag_refill(sk, pfrag)) { |
41 | ret = -ENOMEM; | |
42 | goto msg_trim; | |
43 | } | |
604326b4 DB |
44 | |
45 | orig_offset = pfrag->offset; | |
46 | use = min_t(int, len, pfrag->size - orig_offset); | |
9c34e38c WY |
47 | if (!sk_wmem_schedule(sk, use)) { |
48 | ret = -ENOMEM; | |
49 | goto msg_trim; | |
50 | } | |
604326b4 DB |
51 | |
52 | i = msg->sg.end; | |
53 | sk_msg_iter_var_prev(i); | |
54 | sge = &msg->sg.data[i]; | |
55 | ||
56 | if (sk_msg_try_coalesce_ok(msg, elem_first_coalesce) && | |
57 | sg_page(sge) == pfrag->page && | |
58 | sge->offset + sge->length == orig_offset) { | |
59 | sge->length += use; | |
60 | } else { | |
61 | if (sk_msg_full(msg)) { | |
62 | ret = -ENOSPC; | |
63 | break; | |
64 | } | |
65 | ||
66 | sge = &msg->sg.data[msg->sg.end]; | |
67 | sg_unmark_end(sge); | |
68 | sg_set_page(sge, pfrag->page, use, orig_offset); | |
69 | get_page(pfrag->page); | |
70 | sk_msg_iter_next(msg, end); | |
71 | } | |
72 | ||
73 | sk_mem_charge(sk, use); | |
74 | msg->sg.size += use; | |
75 | pfrag->offset += use; | |
76 | len -= use; | |
77 | } | |
78 | ||
79 | return ret; | |
9c34e38c WY |
80 | |
81 | msg_trim: | |
82 | sk_msg_trim(sk, msg, osize); | |
83 | return ret; | |
604326b4 DB |
84 | } |
85 | EXPORT_SYMBOL_GPL(sk_msg_alloc); | |
86 | ||
d829e9c4 DB |
87 | int sk_msg_clone(struct sock *sk, struct sk_msg *dst, struct sk_msg *src, |
88 | u32 off, u32 len) | |
89 | { | |
90 | int i = src->sg.start; | |
91 | struct scatterlist *sge = sk_msg_elem(src, i); | |
fda497e5 | 92 | struct scatterlist *sgd = NULL; |
d829e9c4 DB |
93 | u32 sge_len, sge_off; |
94 | ||
d829e9c4 DB |
95 | while (off) { |
96 | if (sge->length > off) | |
97 | break; | |
98 | off -= sge->length; | |
99 | sk_msg_iter_var_next(i); | |
100 | if (i == src->sg.end && off) | |
101 | return -ENOSPC; | |
102 | sge = sk_msg_elem(src, i); | |
103 | } | |
104 | ||
105 | while (len) { | |
106 | sge_len = sge->length - off; | |
d829e9c4 DB |
107 | if (sge_len > len) |
108 | sge_len = len; | |
fda497e5 VG |
109 | |
110 | if (dst->sg.end) | |
111 | sgd = sk_msg_elem(dst, dst->sg.end - 1); | |
112 | ||
113 | if (sgd && | |
114 | (sg_page(sge) == sg_page(sgd)) && | |
115 | (sg_virt(sge) + off == sg_virt(sgd) + sgd->length)) { | |
116 | sgd->length += sge_len; | |
117 | dst->sg.size += sge_len; | |
118 | } else if (!sk_msg_full(dst)) { | |
119 | sge_off = sge->offset + off; | |
120 | sk_msg_page_add(dst, sg_page(sge), sge_len, sge_off); | |
121 | } else { | |
122 | return -ENOSPC; | |
123 | } | |
124 | ||
d829e9c4 DB |
125 | off = 0; |
126 | len -= sge_len; | |
d829e9c4 DB |
127 | sk_mem_charge(sk, sge_len); |
128 | sk_msg_iter_var_next(i); | |
129 | if (i == src->sg.end && len) | |
130 | return -ENOSPC; | |
131 | sge = sk_msg_elem(src, i); | |
132 | } | |
133 | ||
134 | return 0; | |
135 | } | |
136 | EXPORT_SYMBOL_GPL(sk_msg_clone); | |
137 | ||
604326b4 DB |
138 | void sk_msg_return_zero(struct sock *sk, struct sk_msg *msg, int bytes) |
139 | { | |
140 | int i = msg->sg.start; | |
141 | ||
142 | do { | |
143 | struct scatterlist *sge = sk_msg_elem(msg, i); | |
144 | ||
145 | if (bytes < sge->length) { | |
146 | sge->length -= bytes; | |
147 | sge->offset += bytes; | |
148 | sk_mem_uncharge(sk, bytes); | |
149 | break; | |
150 | } | |
151 | ||
152 | sk_mem_uncharge(sk, sge->length); | |
153 | bytes -= sge->length; | |
154 | sge->length = 0; | |
155 | sge->offset = 0; | |
156 | sk_msg_iter_var_next(i); | |
157 | } while (bytes && i != msg->sg.end); | |
158 | msg->sg.start = i; | |
159 | } | |
160 | EXPORT_SYMBOL_GPL(sk_msg_return_zero); | |
161 | ||
162 | void sk_msg_return(struct sock *sk, struct sk_msg *msg, int bytes) | |
163 | { | |
164 | int i = msg->sg.start; | |
165 | ||
166 | do { | |
167 | struct scatterlist *sge = &msg->sg.data[i]; | |
168 | int uncharge = (bytes < sge->length) ? bytes : sge->length; | |
169 | ||
170 | sk_mem_uncharge(sk, uncharge); | |
171 | bytes -= uncharge; | |
172 | sk_msg_iter_var_next(i); | |
173 | } while (i != msg->sg.end); | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(sk_msg_return); | |
176 | ||
177 | static int sk_msg_free_elem(struct sock *sk, struct sk_msg *msg, u32 i, | |
178 | bool charge) | |
179 | { | |
180 | struct scatterlist *sge = sk_msg_elem(msg, i); | |
181 | u32 len = sge->length; | |
182 | ||
36cd0e69 JF |
183 | /* When the skb owns the memory we free it from consume_skb path. */ |
184 | if (!msg->skb) { | |
185 | if (charge) | |
186 | sk_mem_uncharge(sk, len); | |
604326b4 | 187 | put_page(sg_page(sge)); |
36cd0e69 | 188 | } |
604326b4 DB |
189 | memset(sge, 0, sizeof(*sge)); |
190 | return len; | |
191 | } | |
192 | ||
193 | static int __sk_msg_free(struct sock *sk, struct sk_msg *msg, u32 i, | |
194 | bool charge) | |
195 | { | |
196 | struct scatterlist *sge = sk_msg_elem(msg, i); | |
197 | int freed = 0; | |
198 | ||
199 | while (msg->sg.size) { | |
200 | msg->sg.size -= sge->length; | |
201 | freed += sk_msg_free_elem(sk, msg, i, charge); | |
202 | sk_msg_iter_var_next(i); | |
203 | sk_msg_check_to_free(msg, i, msg->sg.size); | |
204 | sge = sk_msg_elem(msg, i); | |
205 | } | |
dd016aca | 206 | consume_skb(msg->skb); |
604326b4 DB |
207 | sk_msg_init(msg); |
208 | return freed; | |
209 | } | |
210 | ||
211 | int sk_msg_free_nocharge(struct sock *sk, struct sk_msg *msg) | |
212 | { | |
213 | return __sk_msg_free(sk, msg, msg->sg.start, false); | |
214 | } | |
215 | EXPORT_SYMBOL_GPL(sk_msg_free_nocharge); | |
216 | ||
217 | int sk_msg_free(struct sock *sk, struct sk_msg *msg) | |
218 | { | |
219 | return __sk_msg_free(sk, msg, msg->sg.start, true); | |
220 | } | |
221 | EXPORT_SYMBOL_GPL(sk_msg_free); | |
222 | ||
223 | static void __sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, | |
224 | u32 bytes, bool charge) | |
225 | { | |
226 | struct scatterlist *sge; | |
227 | u32 i = msg->sg.start; | |
228 | ||
229 | while (bytes) { | |
230 | sge = sk_msg_elem(msg, i); | |
231 | if (!sge->length) | |
232 | break; | |
233 | if (bytes < sge->length) { | |
234 | if (charge) | |
235 | sk_mem_uncharge(sk, bytes); | |
236 | sge->length -= bytes; | |
237 | sge->offset += bytes; | |
238 | msg->sg.size -= bytes; | |
239 | break; | |
240 | } | |
241 | ||
242 | msg->sg.size -= sge->length; | |
243 | bytes -= sge->length; | |
244 | sk_msg_free_elem(sk, msg, i, charge); | |
245 | sk_msg_iter_var_next(i); | |
246 | sk_msg_check_to_free(msg, i, bytes); | |
247 | } | |
248 | msg->sg.start = i; | |
249 | } | |
250 | ||
251 | void sk_msg_free_partial(struct sock *sk, struct sk_msg *msg, u32 bytes) | |
252 | { | |
253 | __sk_msg_free_partial(sk, msg, bytes, true); | |
254 | } | |
255 | EXPORT_SYMBOL_GPL(sk_msg_free_partial); | |
256 | ||
257 | void sk_msg_free_partial_nocharge(struct sock *sk, struct sk_msg *msg, | |
258 | u32 bytes) | |
259 | { | |
260 | __sk_msg_free_partial(sk, msg, bytes, false); | |
261 | } | |
262 | ||
263 | void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len) | |
264 | { | |
265 | int trim = msg->sg.size - len; | |
266 | u32 i = msg->sg.end; | |
267 | ||
268 | if (trim <= 0) { | |
269 | WARN_ON(trim < 0); | |
270 | return; | |
271 | } | |
272 | ||
273 | sk_msg_iter_var_prev(i); | |
274 | msg->sg.size = len; | |
275 | while (msg->sg.data[i].length && | |
276 | trim >= msg->sg.data[i].length) { | |
277 | trim -= msg->sg.data[i].length; | |
278 | sk_msg_free_elem(sk, msg, i, true); | |
279 | sk_msg_iter_var_prev(i); | |
280 | if (!trim) | |
281 | goto out; | |
282 | } | |
283 | ||
284 | msg->sg.data[i].length -= trim; | |
285 | sk_mem_uncharge(sk, trim); | |
683916f6 JK |
286 | /* Adjust copybreak if it falls into the trimmed part of last buf */ |
287 | if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length) | |
288 | msg->sg.copybreak = msg->sg.data[i].length; | |
604326b4 | 289 | out: |
683916f6 JK |
290 | sk_msg_iter_var_next(i); |
291 | msg->sg.end = i; | |
292 | ||
293 | /* If we trim data a full sg elem before curr pointer update | |
294 | * copybreak and current so that any future copy operations | |
295 | * start at new copy location. | |
604326b4 DB |
296 | * However trimed data that has not yet been used in a copy op |
297 | * does not require an update. | |
298 | */ | |
683916f6 JK |
299 | if (!msg->sg.size) { |
300 | msg->sg.curr = msg->sg.start; | |
301 | msg->sg.copybreak = 0; | |
302 | } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >= | |
303 | sk_msg_iter_dist(msg->sg.start, msg->sg.end)) { | |
304 | sk_msg_iter_var_prev(i); | |
604326b4 DB |
305 | msg->sg.curr = i; |
306 | msg->sg.copybreak = msg->sg.data[i].length; | |
307 | } | |
604326b4 DB |
308 | } |
309 | EXPORT_SYMBOL_GPL(sk_msg_trim); | |
310 | ||
311 | int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from, | |
312 | struct sk_msg *msg, u32 bytes) | |
313 | { | |
314 | int i, maxpages, ret = 0, num_elems = sk_msg_elem_used(msg); | |
315 | const int to_max_pages = MAX_MSG_FRAGS; | |
316 | struct page *pages[MAX_MSG_FRAGS]; | |
317 | ssize_t orig, copied, use, offset; | |
318 | ||
319 | orig = msg->sg.size; | |
320 | while (bytes > 0) { | |
321 | i = 0; | |
322 | maxpages = to_max_pages - num_elems; | |
323 | if (maxpages == 0) { | |
324 | ret = -EFAULT; | |
325 | goto out; | |
326 | } | |
327 | ||
1ef255e2 | 328 | copied = iov_iter_get_pages2(from, pages, bytes, maxpages, |
604326b4 DB |
329 | &offset); |
330 | if (copied <= 0) { | |
331 | ret = -EFAULT; | |
332 | goto out; | |
333 | } | |
334 | ||
604326b4 DB |
335 | bytes -= copied; |
336 | msg->sg.size += copied; | |
337 | ||
338 | while (copied) { | |
339 | use = min_t(int, copied, PAGE_SIZE - offset); | |
340 | sg_set_page(&msg->sg.data[msg->sg.end], | |
341 | pages[i], use, offset); | |
342 | sg_unmark_end(&msg->sg.data[msg->sg.end]); | |
343 | sk_mem_charge(sk, use); | |
344 | ||
345 | offset = 0; | |
346 | copied -= use; | |
347 | sk_msg_iter_next(msg, end); | |
348 | num_elems++; | |
349 | i++; | |
350 | } | |
351 | /* When zerocopy is mixed with sk_msg_*copy* operations we | |
352 | * may have a copybreak set in this case clear and prefer | |
353 | * zerocopy remainder when possible. | |
354 | */ | |
355 | msg->sg.copybreak = 0; | |
356 | msg->sg.curr = msg->sg.end; | |
357 | } | |
358 | out: | |
359 | /* Revert iov_iter updates, msg will need to use 'trim' later if it | |
360 | * also needs to be cleared. | |
361 | */ | |
362 | if (ret) | |
363 | iov_iter_revert(from, msg->sg.size - orig); | |
364 | return ret; | |
365 | } | |
366 | EXPORT_SYMBOL_GPL(sk_msg_zerocopy_from_iter); | |
367 | ||
368 | int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, | |
369 | struct sk_msg *msg, u32 bytes) | |
370 | { | |
371 | int ret = -ENOSPC, i = msg->sg.curr; | |
372 | struct scatterlist *sge; | |
373 | u32 copy, buf_size; | |
374 | void *to; | |
375 | ||
376 | do { | |
377 | sge = sk_msg_elem(msg, i); | |
378 | /* This is possible if a trim operation shrunk the buffer */ | |
379 | if (msg->sg.copybreak >= sge->length) { | |
380 | msg->sg.copybreak = 0; | |
381 | sk_msg_iter_var_next(i); | |
382 | if (i == msg->sg.end) | |
383 | break; | |
384 | sge = sk_msg_elem(msg, i); | |
385 | } | |
386 | ||
387 | buf_size = sge->length - msg->sg.copybreak; | |
388 | copy = (buf_size > bytes) ? bytes : buf_size; | |
389 | to = sg_virt(sge) + msg->sg.copybreak; | |
390 | msg->sg.copybreak += copy; | |
391 | if (sk->sk_route_caps & NETIF_F_NOCACHE_COPY) | |
392 | ret = copy_from_iter_nocache(to, copy, from); | |
393 | else | |
394 | ret = copy_from_iter(to, copy, from); | |
395 | if (ret != copy) { | |
396 | ret = -EFAULT; | |
397 | goto out; | |
398 | } | |
399 | bytes -= copy; | |
400 | if (!bytes) | |
401 | break; | |
402 | msg->sg.copybreak = 0; | |
403 | sk_msg_iter_var_next(i); | |
404 | } while (i != msg->sg.end); | |
405 | out: | |
406 | msg->sg.curr = i; | |
407 | return ret; | |
408 | } | |
409 | EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); | |
410 | ||
2bc793e3 CW |
411 | /* Receive sk_msg from psock->ingress_msg to @msg. */ |
412 | int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, | |
413 | int len, int flags) | |
414 | { | |
415 | struct iov_iter *iter = &msg->msg_iter; | |
416 | int peek = flags & MSG_PEEK; | |
417 | struct sk_msg *msg_rx; | |
418 | int i, copied = 0; | |
419 | ||
420 | msg_rx = sk_psock_peek_msg(psock); | |
421 | while (copied != len) { | |
422 | struct scatterlist *sge; | |
423 | ||
424 | if (unlikely(!msg_rx)) | |
425 | break; | |
426 | ||
427 | i = msg_rx->sg.start; | |
428 | do { | |
429 | struct page *page; | |
430 | int copy; | |
431 | ||
432 | sge = sk_msg_elem(msg_rx, i); | |
433 | copy = sge->length; | |
434 | page = sg_page(sge); | |
435 | if (copied + copy > len) | |
436 | copy = len - copied; | |
437 | copy = copy_page_to_iter(page, sge->offset, copy, iter); | |
bec21719 LJ |
438 | if (!copy) { |
439 | copied = copied ? copied : -EFAULT; | |
440 | goto out; | |
441 | } | |
2bc793e3 CW |
442 | |
443 | copied += copy; | |
444 | if (likely(!peek)) { | |
445 | sge->offset += copy; | |
446 | sge->length -= copy; | |
447 | if (!msg_rx->skb) | |
448 | sk_mem_uncharge(sk, copy); | |
449 | msg_rx->sg.size -= copy; | |
450 | ||
451 | if (!sge->length) { | |
452 | sk_msg_iter_var_next(i); | |
453 | if (!msg_rx->skb) | |
454 | put_page(page); | |
455 | } | |
456 | } else { | |
457 | /* Lets not optimize peek case if copy_page_to_iter | |
458 | * didn't copy the entire length lets just break. | |
459 | */ | |
460 | if (copy != sge->length) | |
bec21719 | 461 | goto out; |
2bc793e3 CW |
462 | sk_msg_iter_var_next(i); |
463 | } | |
464 | ||
465 | if (copied == len) | |
466 | break; | |
583585e4 | 467 | } while ((i != msg_rx->sg.end) && !sg_is_last(sge)); |
2bc793e3 CW |
468 | |
469 | if (unlikely(peek)) { | |
470 | msg_rx = sk_psock_next_msg(psock, msg_rx); | |
471 | if (!msg_rx) | |
472 | break; | |
473 | continue; | |
474 | } | |
475 | ||
476 | msg_rx->sg.start = i; | |
583585e4 | 477 | if (!sge->length && (i == msg_rx->sg.end || sg_is_last(sge))) { |
2bc793e3 CW |
478 | msg_rx = sk_psock_dequeue_msg(psock); |
479 | kfree_sk_msg(msg_rx); | |
480 | } | |
481 | msg_rx = sk_psock_peek_msg(psock); | |
482 | } | |
bec21719 | 483 | out: |
2bc793e3 CW |
484 | return copied; |
485 | } | |
486 | EXPORT_SYMBOL_GPL(sk_msg_recvmsg); | |
487 | ||
fb4e0a5e CW |
488 | bool sk_msg_is_readable(struct sock *sk) |
489 | { | |
490 | struct sk_psock *psock; | |
491 | bool empty = true; | |
492 | ||
493 | rcu_read_lock(); | |
494 | psock = sk_psock(sk); | |
495 | if (likely(psock)) | |
496 | empty = list_empty(&psock->ingress_msg); | |
497 | rcu_read_unlock(); | |
498 | return !empty; | |
499 | } | |
500 | EXPORT_SYMBOL_GPL(sk_msg_is_readable); | |
501 | ||
2d1f274b | 502 | static struct sk_msg *alloc_sk_msg(gfp_t gfp) |
604326b4 | 503 | { |
604326b4 DB |
504 | struct sk_msg *msg; |
505 | ||
2d1f274b | 506 | msg = kzalloc(sizeof(*msg), gfp | __GFP_NOWARN); |
43312915 | 507 | if (unlikely(!msg)) |
6fa9201a | 508 | return NULL; |
43312915 CW |
509 | sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS); |
510 | return msg; | |
511 | } | |
6fa9201a | 512 | |
43312915 CW |
513 | static struct sk_msg *sk_psock_create_ingress_msg(struct sock *sk, |
514 | struct sk_buff *skb) | |
515 | { | |
516 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) | |
6fa9201a | 517 | return NULL; |
36cd0e69 | 518 | |
43312915 | 519 | if (!sk_rmem_schedule(sk, skb, skb->truesize)) |
6fa9201a | 520 | return NULL; |
604326b4 | 521 | |
2d1f274b | 522 | return alloc_sk_msg(GFP_KERNEL); |
6fa9201a JF |
523 | } |
524 | ||
525 | static int sk_psock_skb_ingress_enqueue(struct sk_buff *skb, | |
7303524e | 526 | u32 off, u32 len, |
6fa9201a JF |
527 | struct sk_psock *psock, |
528 | struct sock *sk, | |
529 | struct sk_msg *msg) | |
530 | { | |
4363023d | 531 | int num_sge, copied; |
6fa9201a | 532 | |
7303524e | 533 | num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); |
3527bfe6 LJ |
534 | if (num_sge < 0) { |
535 | /* skb linearize may fail with ENOMEM, but lets simply try again | |
536 | * later if this happens. Under memory pressure we don't want to | |
537 | * drop the skb. We need to linearize the skb so that the mapping | |
538 | * in skb_to_sgvec can not error. | |
539 | */ | |
540 | if (skb_linearize(skb)) | |
541 | return -EAGAIN; | |
542 | ||
543 | num_sge = skb_to_sgvec(skb, msg->sg.data, off, len); | |
544 | if (unlikely(num_sge < 0)) | |
545 | return num_sge; | |
546 | } | |
604326b4 | 547 | |
7303524e | 548 | copied = len; |
604326b4 | 549 | msg->sg.start = 0; |
cabede8b | 550 | msg->sg.size = copied; |
031097d9 | 551 | msg->sg.end = num_sge; |
604326b4 DB |
552 | msg->skb = skb; |
553 | ||
554 | sk_psock_queue_msg(psock, msg); | |
552de910 | 555 | sk_psock_data_ready(sk, psock); |
604326b4 DB |
556 | return copied; |
557 | } | |
558 | ||
7303524e LJ |
559 | static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, |
560 | u32 off, u32 len); | |
2443ca66 | 561 | |
7303524e LJ |
562 | static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, |
563 | u32 off, u32 len) | |
6fa9201a JF |
564 | { |
565 | struct sock *sk = psock->sk; | |
566 | struct sk_msg *msg; | |
7e6b27a6 | 567 | int err; |
6fa9201a | 568 | |
2443ca66 JF |
569 | /* If we are receiving on the same sock skb->sk is already assigned, |
570 | * skip memory accounting and owner transition seeing it already set | |
571 | * correctly. | |
572 | */ | |
573 | if (unlikely(skb->sk == sk)) | |
7303524e | 574 | return sk_psock_skb_ingress_self(psock, skb, off, len); |
6fa9201a JF |
575 | msg = sk_psock_create_ingress_msg(sk, skb); |
576 | if (!msg) | |
577 | return -EAGAIN; | |
578 | ||
579 | /* This will transition ownership of the data from the socket where | |
580 | * the BPF program was run initiating the redirect to the socket | |
581 | * we will eventually receive this data on. The data will be released | |
582 | * from skb_consume found in __tcp_bpf_recvmsg() after its been copied | |
583 | * into user buffers. | |
584 | */ | |
585 | skb_set_owner_r(skb, sk); | |
7303524e | 586 | err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); |
7e6b27a6 JF |
587 | if (err < 0) |
588 | kfree(msg); | |
589 | return err; | |
6fa9201a JF |
590 | } |
591 | ||
592 | /* Puts an skb on the ingress queue of the socket already assigned to the | |
593 | * skb. In this case we do not need to check memory limits or skb_set_owner_r | |
594 | * because the skb is already accounted for here. | |
595 | */ | |
7303524e LJ |
596 | static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, |
597 | u32 off, u32 len) | |
6fa9201a | 598 | { |
2d1f274b | 599 | struct sk_msg *msg = alloc_sk_msg(GFP_ATOMIC); |
6fa9201a | 600 | struct sock *sk = psock->sk; |
7e6b27a6 | 601 | int err; |
6fa9201a JF |
602 | |
603 | if (unlikely(!msg)) | |
604 | return -EAGAIN; | |
144748eb | 605 | skb_set_owner_r(skb, sk); |
7303524e | 606 | err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg); |
7e6b27a6 JF |
607 | if (err < 0) |
608 | kfree(msg); | |
609 | return err; | |
6fa9201a JF |
610 | } |
611 | ||
604326b4 DB |
612 | static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, |
613 | u32 off, u32 len, bool ingress) | |
614 | { | |
a454d84e JF |
615 | int err = 0; |
616 | ||
9047f19e JF |
617 | if (!ingress) { |
618 | if (!sock_writeable(psock->sk)) | |
619 | return -EAGAIN; | |
799aa7f9 | 620 | return skb_send_sock(psock->sk, skb, off, len); |
9047f19e | 621 | } |
a454d84e JF |
622 | skb_get(skb); |
623 | err = sk_psock_skb_ingress(psock, skb, off, len); | |
624 | if (err < 0) | |
625 | kfree_skb(skb); | |
626 | return err; | |
604326b4 DB |
627 | } |
628 | ||
476d9801 JF |
629 | static void sk_psock_skb_state(struct sk_psock *psock, |
630 | struct sk_psock_work_state *state, | |
476d9801 JF |
631 | int len, int off) |
632 | { | |
633 | spin_lock_bh(&psock->ingress_lock); | |
634 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { | |
476d9801 JF |
635 | state->len = len; |
636 | state->off = off; | |
476d9801 JF |
637 | } |
638 | spin_unlock_bh(&psock->ingress_lock); | |
639 | } | |
640 | ||
604326b4 DB |
641 | static void sk_psock_backlog(struct work_struct *work) |
642 | { | |
29173d07 JF |
643 | struct delayed_work *dwork = to_delayed_work(work); |
644 | struct sk_psock *psock = container_of(dwork, struct sk_psock, work); | |
604326b4 | 645 | struct sk_psock_work_state *state = &psock->work_state; |
476d9801 | 646 | struct sk_buff *skb = NULL; |
405df89d | 647 | u32 len = 0, off = 0; |
604326b4 | 648 | bool ingress; |
604326b4 DB |
649 | int ret; |
650 | ||
799aa7f9 | 651 | mutex_lock(&psock->work_mutex); |
405df89d | 652 | if (unlikely(state->len)) { |
604326b4 DB |
653 | len = state->len; |
654 | off = state->off; | |
604326b4 DB |
655 | } |
656 | ||
405df89d | 657 | while ((skb = skb_peek(&psock->ingress_skb))) { |
604326b4 DB |
658 | len = skb->len; |
659 | off = 0; | |
7303524e LJ |
660 | if (skb_bpf_strparser(skb)) { |
661 | struct strp_msg *stm = strp_msg(skb); | |
662 | ||
663 | off = stm->offset; | |
664 | len = stm->full_len; | |
665 | } | |
e3526bb9 CW |
666 | ingress = skb_bpf_ingress(skb); |
667 | skb_bpf_redirect_clear(skb); | |
604326b4 DB |
668 | do { |
669 | ret = -EIO; | |
799aa7f9 | 670 | if (!sock_flag(psock->sk, SOCK_DEAD)) |
604326b4 DB |
671 | ret = sk_psock_handle_skb(psock, skb, off, |
672 | len, ingress); | |
673 | if (ret <= 0) { | |
674 | if (ret == -EAGAIN) { | |
405df89d | 675 | sk_psock_skb_state(psock, state, len, off); |
29173d07 JF |
676 | |
677 | /* Delay slightly to prioritize any | |
678 | * other work that might be here. | |
679 | */ | |
680 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) | |
681 | schedule_delayed_work(&psock->work, 1); | |
604326b4 DB |
682 | goto end; |
683 | } | |
684 | /* Hard errors break pipe and stop xmit. */ | |
685 | sk_psock_report_error(psock, ret ? -ret : EPIPE); | |
686 | sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); | |
604326b4 DB |
687 | goto end; |
688 | } | |
689 | off += ret; | |
690 | len -= ret; | |
691 | } while (len); | |
692 | ||
405df89d | 693 | skb = skb_dequeue(&psock->ingress_skb); |
a454d84e | 694 | kfree_skb(skb); |
604326b4 DB |
695 | } |
696 | end: | |
799aa7f9 | 697 | mutex_unlock(&psock->work_mutex); |
604326b4 DB |
698 | } |
699 | ||
700 | struct sk_psock *sk_psock_init(struct sock *sk, int node) | |
701 | { | |
7b219da4 LB |
702 | struct sk_psock *psock; |
703 | struct proto *prot; | |
604326b4 | 704 | |
7b219da4 LB |
705 | write_lock_bh(&sk->sk_callback_lock); |
706 | ||
e34a07c0 JK |
707 | if (sk_is_inet(sk) && inet_csk_has_ulp(sk)) { |
708 | psock = ERR_PTR(-EINVAL); | |
709 | goto out; | |
710 | } | |
711 | ||
7b219da4 LB |
712 | if (sk->sk_user_data) { |
713 | psock = ERR_PTR(-EBUSY); | |
714 | goto out; | |
715 | } | |
716 | ||
717 | psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); | |
718 | if (!psock) { | |
719 | psock = ERR_PTR(-ENOMEM); | |
720 | goto out; | |
721 | } | |
722 | ||
723 | prot = READ_ONCE(sk->sk_prot); | |
604326b4 | 724 | psock->sk = sk; |
7b219da4 LB |
725 | psock->eval = __SK_NONE; |
726 | psock->sk_proto = prot; | |
727 | psock->saved_unhash = prot->unhash; | |
d8616ee2 | 728 | psock->saved_destroy = prot->destroy; |
7b219da4 LB |
729 | psock->saved_close = prot->close; |
730 | psock->saved_write_space = sk->sk_write_space; | |
604326b4 DB |
731 | |
732 | INIT_LIST_HEAD(&psock->link); | |
733 | spin_lock_init(&psock->link_lock); | |
734 | ||
29173d07 | 735 | INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); |
799aa7f9 | 736 | mutex_init(&psock->work_mutex); |
604326b4 | 737 | INIT_LIST_HEAD(&psock->ingress_msg); |
b01fd6e8 | 738 | spin_lock_init(&psock->ingress_lock); |
604326b4 DB |
739 | skb_queue_head_init(&psock->ingress_skb); |
740 | ||
741 | sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); | |
742 | refcount_set(&psock->refcnt, 1); | |
743 | ||
2a013372 HJ |
744 | __rcu_assign_sk_user_data_with_flags(sk, psock, |
745 | SK_USER_DATA_NOCOPY | | |
746 | SK_USER_DATA_PSOCK); | |
604326b4 DB |
747 | sock_hold(sk); |
748 | ||
7b219da4 LB |
749 | out: |
750 | write_unlock_bh(&sk->sk_callback_lock); | |
604326b4 DB |
751 | return psock; |
752 | } | |
753 | EXPORT_SYMBOL_GPL(sk_psock_init); | |
754 | ||
755 | struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) | |
756 | { | |
757 | struct sk_psock_link *link; | |
758 | ||
759 | spin_lock_bh(&psock->link_lock); | |
760 | link = list_first_entry_or_null(&psock->link, struct sk_psock_link, | |
761 | list); | |
762 | if (link) | |
763 | list_del(&link->list); | |
764 | spin_unlock_bh(&psock->link_lock); | |
765 | return link; | |
766 | } | |
767 | ||
cd81cefb | 768 | static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) |
604326b4 DB |
769 | { |
770 | struct sk_msg *msg, *tmp; | |
771 | ||
772 | list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { | |
773 | list_del(&msg->list); | |
774 | sk_msg_free(psock->sk, msg); | |
775 | kfree(msg); | |
776 | } | |
777 | } | |
778 | ||
799aa7f9 | 779 | static void __sk_psock_zap_ingress(struct sk_psock *psock) |
604326b4 | 780 | { |
e3526bb9 CW |
781 | struct sk_buff *skb; |
782 | ||
37f0e514 | 783 | while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { |
e3526bb9 | 784 | skb_bpf_redirect_clear(skb); |
781dd043 | 785 | sock_drop(psock->sk, skb); |
e3526bb9 | 786 | } |
604326b4 DB |
787 | __sk_psock_purge_ingress_msg(psock); |
788 | } | |
789 | ||
790 | static void sk_psock_link_destroy(struct sk_psock *psock) | |
791 | { | |
792 | struct sk_psock_link *link, *tmp; | |
793 | ||
794 | list_for_each_entry_safe(link, tmp, &psock->link, list) { | |
795 | list_del(&link->list); | |
796 | sk_psock_free_link(link); | |
797 | } | |
798 | } | |
799 | ||
8bbabb3f | 800 | void sk_psock_stop(struct sk_psock *psock) |
799aa7f9 CW |
801 | { |
802 | spin_lock_bh(&psock->ingress_lock); | |
803 | sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); | |
804 | sk_psock_cork_free(psock); | |
799aa7f9 | 805 | spin_unlock_bh(&psock->ingress_lock); |
799aa7f9 CW |
806 | } |
807 | ||
88759609 CW |
808 | static void sk_psock_done_strp(struct sk_psock *psock); |
809 | ||
7786dfc4 | 810 | static void sk_psock_destroy(struct work_struct *work) |
604326b4 | 811 | { |
7786dfc4 CW |
812 | struct sk_psock *psock = container_of(to_rcu_work(work), |
813 | struct sk_psock, rwork); | |
604326b4 | 814 | /* No sk_callback_lock since already detached. */ |
01489436 | 815 | |
88759609 | 816 | sk_psock_done_strp(psock); |
604326b4 | 817 | |
29173d07 | 818 | cancel_delayed_work_sync(&psock->work); |
405df89d | 819 | __sk_psock_zap_ingress(psock); |
799aa7f9 | 820 | mutex_destroy(&psock->work_mutex); |
604326b4 DB |
821 | |
822 | psock_progs_drop(&psock->progs); | |
823 | ||
824 | sk_psock_link_destroy(psock); | |
825 | sk_psock_cork_free(psock); | |
604326b4 DB |
826 | |
827 | if (psock->sk_redir) | |
828 | sock_put(psock->sk_redir); | |
8866730a JF |
829 | if (psock->sk_pair) |
830 | sock_put(psock->sk_pair); | |
604326b4 DB |
831 | sock_put(psock->sk); |
832 | kfree(psock); | |
833 | } | |
834 | ||
604326b4 DB |
835 | void sk_psock_drop(struct sock *sk, struct sk_psock *psock) |
836 | { | |
604326b4 | 837 | write_lock_bh(&sk->sk_callback_lock); |
95fa1454 JF |
838 | sk_psock_restore_proto(sk, psock); |
839 | rcu_assign_sk_user_data(sk, NULL); | |
ae8b8332 | 840 | if (psock->progs.stream_parser) |
604326b4 | 841 | sk_psock_stop_strp(sk, psock); |
a7ba4558 | 842 | else if (psock->progs.stream_verdict || psock->progs.skb_verdict) |
ef565928 | 843 | sk_psock_stop_verdict(sk, psock); |
604326b4 | 844 | write_unlock_bh(&sk->sk_callback_lock); |
604326b4 | 845 | |
8bbabb3f | 846 | sk_psock_stop(psock); |
343597d5 | 847 | |
7786dfc4 CW |
848 | INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); |
849 | queue_rcu_work(system_wq, &psock->rwork); | |
604326b4 DB |
850 | } |
851 | EXPORT_SYMBOL_GPL(sk_psock_drop); | |
852 | ||
853 | static int sk_psock_map_verd(int verdict, bool redir) | |
854 | { | |
855 | switch (verdict) { | |
856 | case SK_PASS: | |
857 | return redir ? __SK_REDIRECT : __SK_PASS; | |
858 | case SK_DROP: | |
859 | default: | |
860 | break; | |
861 | } | |
862 | ||
863 | return __SK_DROP; | |
864 | } | |
865 | ||
866 | int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, | |
867 | struct sk_msg *msg) | |
868 | { | |
869 | struct bpf_prog *prog; | |
870 | int ret; | |
871 | ||
604326b4 DB |
872 | rcu_read_lock(); |
873 | prog = READ_ONCE(psock->progs.msg_parser); | |
874 | if (unlikely(!prog)) { | |
875 | ret = __SK_PASS; | |
876 | goto out; | |
877 | } | |
878 | ||
879 | sk_msg_compute_data_pointers(msg); | |
880 | msg->sk = sk; | |
3d9f773c | 881 | ret = bpf_prog_run_pin_on_cpu(prog, msg); |
604326b4 DB |
882 | ret = sk_psock_map_verd(ret, msg->sk_redir); |
883 | psock->apply_bytes = msg->apply_bytes; | |
884 | if (ret == __SK_REDIRECT) { | |
a351d608 | 885 | if (psock->sk_redir) { |
604326b4 | 886 | sock_put(psock->sk_redir); |
a351d608 PY |
887 | psock->sk_redir = NULL; |
888 | } | |
889 | if (!msg->sk_redir) { | |
604326b4 DB |
890 | ret = __SK_DROP; |
891 | goto out; | |
892 | } | |
a351d608 PY |
893 | psock->redir_ingress = sk_msg_to_ingress(msg); |
894 | psock->sk_redir = msg->sk_redir; | |
604326b4 DB |
895 | sock_hold(psock->sk_redir); |
896 | } | |
897 | out: | |
898 | rcu_read_unlock(); | |
604326b4 DB |
899 | return ret; |
900 | } | |
901 | EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); | |
902 | ||
42830571 | 903 | static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) |
604326b4 DB |
904 | { |
905 | struct sk_psock *psock_other; | |
906 | struct sock *sk_other; | |
604326b4 | 907 | |
e3526bb9 | 908 | sk_other = skb_bpf_redirect_fetch(skb); |
9047f19e JF |
909 | /* This error is a buggy BPF program, it returned a redirect |
910 | * return code, but then didn't set a redirect interface. | |
911 | */ | |
ca2f5f21 | 912 | if (unlikely(!sk_other)) { |
7303524e | 913 | skb_bpf_redirect_clear(skb); |
781dd043 | 914 | sock_drop(from->sk, skb); |
1581a6c1 | 915 | return -EIO; |
ca2f5f21 JF |
916 | } |
917 | psock_other = sk_psock(sk_other); | |
9047f19e JF |
918 | /* This error indicates the socket is being torn down or had another |
919 | * error that caused the pipe to break. We can't send a packet on | |
920 | * a socket that is in this state so we drop the skb. | |
921 | */ | |
799aa7f9 | 922 | if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { |
30b9c54a | 923 | skb_bpf_redirect_clear(skb); |
781dd043 | 924 | sock_drop(from->sk, skb); |
1581a6c1 | 925 | return -EIO; |
799aa7f9 CW |
926 | } |
927 | spin_lock_bh(&psock_other->ingress_lock); | |
928 | if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { | |
929 | spin_unlock_bh(&psock_other->ingress_lock); | |
30b9c54a | 930 | skb_bpf_redirect_clear(skb); |
781dd043 | 931 | sock_drop(from->sk, skb); |
1581a6c1 | 932 | return -EIO; |
ca2f5f21 JF |
933 | } |
934 | ||
9047f19e | 935 | skb_queue_tail(&psock_other->ingress_skb, skb); |
29173d07 | 936 | schedule_delayed_work(&psock_other->work, 0); |
799aa7f9 | 937 | spin_unlock_bh(&psock_other->ingress_lock); |
1581a6c1 | 938 | return 0; |
ca2f5f21 JF |
939 | } |
940 | ||
42830571 CW |
941 | static void sk_psock_tls_verdict_apply(struct sk_buff *skb, |
942 | struct sk_psock *from, int verdict) | |
e91de6af JF |
943 | { |
944 | switch (verdict) { | |
945 | case __SK_REDIRECT: | |
42830571 | 946 | sk_psock_skb_redirect(from, skb); |
e91de6af JF |
947 | break; |
948 | case __SK_PASS: | |
949 | case __SK_DROP: | |
950 | default: | |
951 | break; | |
952 | } | |
953 | } | |
954 | ||
955 | int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) | |
956 | { | |
957 | struct bpf_prog *prog; | |
958 | int ret = __SK_PASS; | |
959 | ||
960 | rcu_read_lock(); | |
ae8b8332 | 961 | prog = READ_ONCE(psock->progs.stream_verdict); |
e91de6af | 962 | if (likely(prog)) { |
0b17ad25 | 963 | skb->sk = psock->sk; |
e3526bb9 CW |
964 | skb_dst_drop(skb); |
965 | skb_bpf_redirect_clear(skb); | |
53334232 | 966 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
e3526bb9 | 967 | ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); |
0b17ad25 | 968 | skb->sk = NULL; |
e91de6af | 969 | } |
42830571 | 970 | sk_psock_tls_verdict_apply(skb, psock, ret); |
e91de6af | 971 | rcu_read_unlock(); |
e91de6af JF |
972 | return ret; |
973 | } | |
974 | EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); | |
975 | ||
1581a6c1 CW |
976 | static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, |
977 | int verdict) | |
ca2f5f21 JF |
978 | { |
979 | struct sock *sk_other; | |
1581a6c1 | 980 | int err = 0; |
7303524e | 981 | u32 len, off; |
ca2f5f21 | 982 | |
604326b4 | 983 | switch (verdict) { |
51199405 | 984 | case __SK_PASS: |
1581a6c1 | 985 | err = -EIO; |
51199405 JF |
986 | sk_other = psock->sk; |
987 | if (sock_flag(sk_other, SOCK_DEAD) || | |
e5c6de5f | 988 | !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) |
51199405 | 989 | goto out_free; |
51199405 | 990 | |
e3526bb9 | 991 | skb_bpf_set_ingress(skb); |
9ecbfb06 JF |
992 | |
993 | /* If the queue is empty then we can submit directly | |
994 | * into the msg queue. If its not empty we have to | |
995 | * queue work otherwise we may get OOO data. Otherwise, | |
996 | * if sk_psock_skb_ingress errors will be handled by | |
997 | * retrying later from workqueue. | |
998 | */ | |
999 | if (skb_queue_empty(&psock->ingress_skb)) { | |
7303524e LJ |
1000 | len = skb->len; |
1001 | off = 0; | |
1002 | if (skb_bpf_strparser(skb)) { | |
1003 | struct strp_msg *stm = strp_msg(skb); | |
1004 | ||
1005 | off = stm->offset; | |
1006 | len = stm->full_len; | |
1007 | } | |
1008 | err = sk_psock_skb_ingress_self(psock, skb, off, len); | |
9ecbfb06 JF |
1009 | } |
1010 | if (err < 0) { | |
799aa7f9 CW |
1011 | spin_lock_bh(&psock->ingress_lock); |
1012 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { | |
1013 | skb_queue_tail(&psock->ingress_skb, skb); | |
29173d07 | 1014 | schedule_delayed_work(&psock->work, 0); |
0cf6672b | 1015 | err = 0; |
799aa7f9 CW |
1016 | } |
1017 | spin_unlock_bh(&psock->ingress_lock); | |
e5c6de5f | 1018 | if (err < 0) |
0cf6672b | 1019 | goto out_free; |
9ecbfb06 | 1020 | } |
cfea28f8 | 1021 | break; |
604326b4 | 1022 | case __SK_REDIRECT: |
e5c6de5f | 1023 | tcp_eat_skb(psock->sk, skb); |
42830571 | 1024 | err = sk_psock_skb_redirect(psock, skb); |
ca2f5f21 | 1025 | break; |
604326b4 | 1026 | case __SK_DROP: |
604326b4 DB |
1027 | default: |
1028 | out_free: | |
e5c6de5f JF |
1029 | skb_bpf_redirect_clear(skb); |
1030 | tcp_eat_skb(psock->sk, skb); | |
781dd043 | 1031 | sock_drop(psock->sk, skb); |
604326b4 | 1032 | } |
1581a6c1 CW |
1033 | |
1034 | return err; | |
604326b4 DB |
1035 | } |
1036 | ||
88759609 CW |
1037 | static void sk_psock_write_space(struct sock *sk) |
1038 | { | |
1039 | struct sk_psock *psock; | |
1040 | void (*write_space)(struct sock *sk) = NULL; | |
1041 | ||
1042 | rcu_read_lock(); | |
1043 | psock = sk_psock(sk); | |
1044 | if (likely(psock)) { | |
1045 | if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) | |
29173d07 | 1046 | schedule_delayed_work(&psock->work, 0); |
88759609 CW |
1047 | write_space = psock->saved_write_space; |
1048 | } | |
1049 | rcu_read_unlock(); | |
1050 | if (write_space) | |
1051 | write_space(sk); | |
1052 | } | |
1053 | ||
1054 | #if IS_ENABLED(CONFIG_BPF_STREAM_PARSER) | |
604326b4 DB |
1055 | static void sk_psock_strp_read(struct strparser *strp, struct sk_buff *skb) |
1056 | { | |
8025751d | 1057 | struct sk_psock *psock; |
604326b4 DB |
1058 | struct bpf_prog *prog; |
1059 | int ret = __SK_DROP; | |
8025751d | 1060 | struct sock *sk; |
604326b4 DB |
1061 | |
1062 | rcu_read_lock(); | |
8025751d JF |
1063 | sk = strp->sk; |
1064 | psock = sk_psock(sk); | |
1065 | if (unlikely(!psock)) { | |
781dd043 | 1066 | sock_drop(sk, skb); |
8025751d JF |
1067 | goto out; |
1068 | } | |
ae8b8332 | 1069 | prog = READ_ONCE(psock->progs.stream_verdict); |
604326b4 | 1070 | if (likely(prog)) { |
144748eb | 1071 | skb->sk = sk; |
e3526bb9 CW |
1072 | skb_dst_drop(skb); |
1073 | skb_bpf_redirect_clear(skb); | |
53334232 | 1074 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
e5c6de5f | 1075 | skb_bpf_set_strparser(skb); |
e3526bb9 | 1076 | ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); |
144748eb | 1077 | skb->sk = NULL; |
604326b4 | 1078 | } |
604326b4 | 1079 | sk_psock_verdict_apply(psock, skb, ret); |
8025751d | 1080 | out: |
93dd5f18 | 1081 | rcu_read_unlock(); |
604326b4 DB |
1082 | } |
1083 | ||
1084 | static int sk_psock_strp_read_done(struct strparser *strp, int err) | |
1085 | { | |
1086 | return err; | |
1087 | } | |
1088 | ||
1089 | static int sk_psock_strp_parse(struct strparser *strp, struct sk_buff *skb) | |
1090 | { | |
5a685cd9 | 1091 | struct sk_psock *psock = container_of(strp, struct sk_psock, strp); |
604326b4 DB |
1092 | struct bpf_prog *prog; |
1093 | int ret = skb->len; | |
1094 | ||
1095 | rcu_read_lock(); | |
ae8b8332 | 1096 | prog = READ_ONCE(psock->progs.stream_parser); |
0b17ad25 JF |
1097 | if (likely(prog)) { |
1098 | skb->sk = psock->sk; | |
53334232 | 1099 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
0b17ad25 JF |
1100 | skb->sk = NULL; |
1101 | } | |
604326b4 DB |
1102 | rcu_read_unlock(); |
1103 | return ret; | |
1104 | } | |
1105 | ||
1106 | /* Called with socket lock held. */ | |
552de910 | 1107 | static void sk_psock_strp_data_ready(struct sock *sk) |
604326b4 DB |
1108 | { |
1109 | struct sk_psock *psock; | |
1110 | ||
40e0b090 PY |
1111 | trace_sk_data_ready(sk); |
1112 | ||
604326b4 DB |
1113 | rcu_read_lock(); |
1114 | psock = sk_psock(sk); | |
1115 | if (likely(psock)) { | |
e91de6af | 1116 | if (tls_sw_has_ctx_rx(sk)) { |
5a685cd9 | 1117 | psock->saved_data_ready(sk); |
e91de6af JF |
1118 | } else { |
1119 | write_lock_bh(&sk->sk_callback_lock); | |
5a685cd9 | 1120 | strp_data_ready(&psock->strp); |
e91de6af JF |
1121 | write_unlock_bh(&sk->sk_callback_lock); |
1122 | } | |
604326b4 DB |
1123 | } |
1124 | rcu_read_unlock(); | |
1125 | } | |
1126 | ||
88759609 CW |
1127 | int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) |
1128 | { | |
809e4dc7 XK |
1129 | int ret; |
1130 | ||
88759609 CW |
1131 | static const struct strp_callbacks cb = { |
1132 | .rcv_msg = sk_psock_strp_read, | |
1133 | .read_sock_done = sk_psock_strp_read_done, | |
1134 | .parse_msg = sk_psock_strp_parse, | |
1135 | }; | |
1136 | ||
809e4dc7 XK |
1137 | ret = strp_init(&psock->strp, sk, &cb); |
1138 | if (!ret) | |
1139 | sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); | |
1140 | ||
1141 | return ret; | |
88759609 CW |
1142 | } |
1143 | ||
1144 | void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) | |
1145 | { | |
5a685cd9 | 1146 | if (psock->saved_data_ready) |
88759609 CW |
1147 | return; |
1148 | ||
5a685cd9 | 1149 | psock->saved_data_ready = sk->sk_data_ready; |
88759609 CW |
1150 | sk->sk_data_ready = sk_psock_strp_data_ready; |
1151 | sk->sk_write_space = sk_psock_write_space; | |
88759609 CW |
1152 | } |
1153 | ||
1154 | void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) | |
1155 | { | |
c0d95d33 JF |
1156 | psock_set_prog(&psock->progs.stream_parser, NULL); |
1157 | ||
5a685cd9 | 1158 | if (!psock->saved_data_ready) |
88759609 CW |
1159 | return; |
1160 | ||
5a685cd9 CW |
1161 | sk->sk_data_ready = psock->saved_data_ready; |
1162 | psock->saved_data_ready = NULL; | |
1163 | strp_stop(&psock->strp); | |
88759609 CW |
1164 | } |
1165 | ||
1166 | static void sk_psock_done_strp(struct sk_psock *psock) | |
1167 | { | |
1168 | /* Parser has been stopped */ | |
809e4dc7 | 1169 | if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) |
5a685cd9 | 1170 | strp_done(&psock->strp); |
88759609 CW |
1171 | } |
1172 | #else | |
1173 | static void sk_psock_done_strp(struct sk_psock *psock) | |
1174 | { | |
1175 | } | |
1176 | #endif /* CONFIG_BPF_STREAM_PARSER */ | |
1177 | ||
965b57b4 | 1178 | static int sk_psock_verdict_recv(struct sock *sk, struct sk_buff *skb) |
ef565928 | 1179 | { |
ef565928 JF |
1180 | struct sk_psock *psock; |
1181 | struct bpf_prog *prog; | |
1182 | int ret = __SK_DROP; | |
965b57b4 | 1183 | int len = skb->len; |
ef565928 | 1184 | |
ef565928 JF |
1185 | rcu_read_lock(); |
1186 | psock = sk_psock(sk); | |
1187 | if (unlikely(!psock)) { | |
1188 | len = 0; | |
e5c6de5f | 1189 | tcp_eat_skb(sk, skb); |
781dd043 | 1190 | sock_drop(sk, skb); |
ef565928 JF |
1191 | goto out; |
1192 | } | |
ae8b8332 | 1193 | prog = READ_ONCE(psock->progs.stream_verdict); |
a7ba4558 CW |
1194 | if (!prog) |
1195 | prog = READ_ONCE(psock->progs.skb_verdict); | |
ef565928 | 1196 | if (likely(prog)) { |
e3526bb9 CW |
1197 | skb_dst_drop(skb); |
1198 | skb_bpf_redirect_clear(skb); | |
53334232 | 1199 | ret = bpf_prog_run_pin_on_cpu(prog, skb); |
e3526bb9 | 1200 | ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); |
ef565928 | 1201 | } |
2e23acd9 CW |
1202 | ret = sk_psock_verdict_apply(psock, skb, ret); |
1203 | if (ret < 0) | |
1204 | len = ret; | |
ef565928 JF |
1205 | out: |
1206 | rcu_read_unlock(); | |
1207 | return len; | |
1208 | } | |
1209 | ||
1210 | static void sk_psock_verdict_data_ready(struct sock *sk) | |
1211 | { | |
1212 | struct socket *sock = sk->sk_socket; | |
1ded5e5a | 1213 | const struct proto_ops *ops; |
6df7f764 | 1214 | int copied; |
ef565928 | 1215 | |
40e0b090 PY |
1216 | trace_sk_data_ready(sk); |
1217 | ||
1ded5e5a | 1218 | if (unlikely(!sock)) |
ef565928 | 1219 | return; |
1ded5e5a ED |
1220 | ops = READ_ONCE(sock->ops); |
1221 | if (!ops || !ops->read_skb) | |
1222 | return; | |
1223 | copied = ops->read_skb(sk, sk_psock_verdict_recv); | |
6df7f764 JF |
1224 | if (copied >= 0) { |
1225 | struct sk_psock *psock; | |
1226 | ||
1227 | rcu_read_lock(); | |
1228 | psock = sk_psock(sk); | |
6648e613 | 1229 | if (psock) |
4cd12c60 | 1230 | sk_psock_data_ready(sk, psock); |
6df7f764 JF |
1231 | rcu_read_unlock(); |
1232 | } | |
ef565928 JF |
1233 | } |
1234 | ||
ef565928 JF |
1235 | void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) |
1236 | { | |
5a685cd9 | 1237 | if (psock->saved_data_ready) |
ef565928 JF |
1238 | return; |
1239 | ||
5a685cd9 | 1240 | psock->saved_data_ready = sk->sk_data_ready; |
ef565928 JF |
1241 | sk->sk_data_ready = sk_psock_verdict_data_ready; |
1242 | sk->sk_write_space = sk_psock_write_space; | |
ef565928 JF |
1243 | } |
1244 | ||
ef565928 JF |
1245 | void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) |
1246 | { | |
c0d95d33 JF |
1247 | psock_set_prog(&psock->progs.stream_verdict, NULL); |
1248 | psock_set_prog(&psock->progs.skb_verdict, NULL); | |
1249 | ||
5a685cd9 | 1250 | if (!psock->saved_data_ready) |
ef565928 JF |
1251 | return; |
1252 | ||
5a685cd9 CW |
1253 | sk->sk_data_ready = psock->saved_data_ready; |
1254 | psock->saved_data_ready = NULL; | |
ef565928 | 1255 | } |