Merge tag 'pwm/for-5.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/thierry...
[linux-2.6-block.git] / net / kcm / kcmsock.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
9b73896a
TH
2/*
3 * Kernel Connection Multiplexor
4 *
5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
9b73896a
TH
6 */
7
ab7ac4eb
TH
8#include <linux/bpf.h>
9#include <linux/errno.h>
10#include <linux/errqueue.h>
11#include <linux/file.h>
12#include <linux/in.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/netdevice.h>
17#include <linux/poll.h>
18#include <linux/rculist.h>
19#include <linux/skbuff.h>
20#include <linux/socket.h>
21#include <linux/uaccess.h>
22#include <linux/workqueue.h>
c0338aff 23#include <linux/syscalls.h>
174cd4b1
IM
24#include <linux/sched/signal.h>
25
ab7ac4eb
TH
26#include <net/kcm.h>
27#include <net/netns/generic.h>
28#include <net/sock.h>
ab7ac4eb
TH
29#include <uapi/linux/kcm.h>
30
31unsigned int kcm_net_id;
32
33static struct kmem_cache *kcm_psockp __read_mostly;
34static struct kmem_cache *kcm_muxp __read_mostly;
35static struct workqueue_struct *kcm_wq;
36
37static inline struct kcm_sock *kcm_sk(const struct sock *sk)
38{
39 return (struct kcm_sock *)sk;
40}
41
42static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
43{
44 return (struct kcm_tx_msg *)skb->cb;
45}
46
ab7ac4eb
TH
47static void report_csk_error(struct sock *csk, int err)
48{
49 csk->sk_err = EPIPE;
50 csk->sk_error_report(csk);
51}
52
ab7ac4eb
TH
53static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
54 bool wakeup_kcm)
55{
56 struct sock *csk = psock->sk;
57 struct kcm_mux *mux = psock->mux;
58
59 /* Unrecoverable error in transmit */
60
61 spin_lock_bh(&mux->lock);
62
63 if (psock->tx_stopped) {
64 spin_unlock_bh(&mux->lock);
65 return;
66 }
67
68 psock->tx_stopped = 1;
cd6e111b 69 KCM_STATS_INCR(psock->stats.tx_aborts);
ab7ac4eb
TH
70
71 if (!psock->tx_kcm) {
72 /* Take off psocks_avail list */
73 list_del(&psock->psock_avail_list);
74 } else if (wakeup_kcm) {
75 /* In this case psock is being aborted while outside of
76 * write_msgs and psock is reserved. Schedule tx_work
77 * to handle the failure there. Need to commit tx_stopped
78 * before queuing work.
79 */
80 smp_mb();
81
82 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
83 }
84
85 spin_unlock_bh(&mux->lock);
86
87 /* Report error on lower socket */
88 report_csk_error(csk, err);
89}
90
cd6e111b
TH
91/* RX mux lock held. */
92static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
93 struct kcm_psock *psock)
94{
9b73896a 95 STRP_STATS_ADD(mux->stats.rx_bytes,
bbb03029 96 psock->strp.stats.bytes -
9b73896a 97 psock->saved_rx_bytes);
cd6e111b 98 mux->stats.rx_msgs +=
bbb03029
TH
99 psock->strp.stats.msgs - psock->saved_rx_msgs;
100 psock->saved_rx_msgs = psock->strp.stats.msgs;
101 psock->saved_rx_bytes = psock->strp.stats.bytes;
cd6e111b
TH
102}
103
104static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
105 struct kcm_psock *psock)
106{
107 KCM_STATS_ADD(mux->stats.tx_bytes,
108 psock->stats.tx_bytes - psock->saved_tx_bytes);
109 mux->stats.tx_msgs +=
110 psock->stats.tx_msgs - psock->saved_tx_msgs;
111 psock->saved_tx_msgs = psock->stats.tx_msgs;
112 psock->saved_tx_bytes = psock->stats.tx_bytes;
113}
114
ab7ac4eb
TH
115static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116
117/* KCM is ready to receive messages on its queue-- either the KCM is new or
118 * has become unblocked after being blocked on full socket buffer. Queue any
119 * pending ready messages on a psock. RX mux lock held.
120 */
121static void kcm_rcv_ready(struct kcm_sock *kcm)
122{
123 struct kcm_mux *mux = kcm->mux;
124 struct kcm_psock *psock;
125 struct sk_buff *skb;
126
127 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
128 return;
129
130 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
131 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
132 /* Assuming buffer limit has been reached */
133 skb_queue_head(&mux->rx_hold_queue, skb);
134 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
135 return;
136 }
137 }
138
139 while (!list_empty(&mux->psocks_ready)) {
140 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
141 psock_ready_list);
142
143 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
144 /* Assuming buffer limit has been reached */
145 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
146 return;
147 }
148
149 /* Consumed the ready message on the psock. Schedule rx_work to
150 * get more messages.
151 */
152 list_del(&psock->psock_ready_list);
153 psock->ready_rx_msg = NULL;
ab7ac4eb
TH
154 /* Commit clearing of ready_rx_msg for queuing work */
155 smp_mb();
156
9b73896a
TH
157 strp_unpause(&psock->strp);
158 strp_check_rcv(&psock->strp);
ab7ac4eb
TH
159 }
160
161 /* Buffer limit is okay now, add to ready list */
162 list_add_tail(&kcm->wait_rx_list,
163 &kcm->mux->kcm_rx_waiters);
164 kcm->rx_wait = true;
165}
166
167static void kcm_rfree(struct sk_buff *skb)
168{
169 struct sock *sk = skb->sk;
170 struct kcm_sock *kcm = kcm_sk(sk);
171 struct kcm_mux *mux = kcm->mux;
172 unsigned int len = skb->truesize;
173
174 sk_mem_uncharge(sk, len);
175 atomic_sub(len, &sk->sk_rmem_alloc);
176
177 /* For reading rx_wait and rx_psock without holding lock */
178 smp_mb__after_atomic();
179
180 if (!kcm->rx_wait && !kcm->rx_psock &&
181 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
182 spin_lock_bh(&mux->rx_lock);
183 kcm_rcv_ready(kcm);
184 spin_unlock_bh(&mux->rx_lock);
185 }
186}
187
188static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
189{
190 struct sk_buff_head *list = &sk->sk_receive_queue;
191
192 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
193 return -ENOMEM;
194
195 if (!sk_rmem_schedule(sk, skb, skb->truesize))
196 return -ENOBUFS;
197
198 skb->dev = NULL;
199
200 skb_orphan(skb);
201 skb->sk = sk;
202 skb->destructor = kcm_rfree;
203 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
204 sk_mem_charge(sk, skb->truesize);
205
206 skb_queue_tail(list, skb);
207
208 if (!sock_flag(sk, SOCK_DEAD))
209 sk->sk_data_ready(sk);
210
211 return 0;
212}
213
214/* Requeue received messages for a kcm socket to other kcm sockets. This is
215 * called with a kcm socket is receive disabled.
216 * RX mux lock held.
217 */
218static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
219{
220 struct sk_buff *skb;
221 struct kcm_sock *kcm;
222
223 while ((skb = __skb_dequeue(head))) {
224 /* Reset destructor to avoid calling kcm_rcv_ready */
225 skb->destructor = sock_rfree;
226 skb_orphan(skb);
227try_again:
228 if (list_empty(&mux->kcm_rx_waiters)) {
229 skb_queue_tail(&mux->rx_hold_queue, skb);
230 continue;
231 }
232
233 kcm = list_first_entry(&mux->kcm_rx_waiters,
234 struct kcm_sock, wait_rx_list);
235
236 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
237 /* Should mean socket buffer full */
238 list_del(&kcm->wait_rx_list);
239 kcm->rx_wait = false;
240
241 /* Commit rx_wait to read in kcm_free */
242 smp_wmb();
243
244 goto try_again;
245 }
246 }
247}
248
249/* Lower sock lock held */
250static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
251 struct sk_buff *head)
252{
253 struct kcm_mux *mux = psock->mux;
254 struct kcm_sock *kcm;
255
256 WARN_ON(psock->ready_rx_msg);
257
258 if (psock->rx_kcm)
259 return psock->rx_kcm;
260
261 spin_lock_bh(&mux->rx_lock);
262
263 if (psock->rx_kcm) {
264 spin_unlock_bh(&mux->rx_lock);
265 return psock->rx_kcm;
266 }
267
cd6e111b
TH
268 kcm_update_rx_mux_stats(mux, psock);
269
ab7ac4eb
TH
270 if (list_empty(&mux->kcm_rx_waiters)) {
271 psock->ready_rx_msg = head;
9b73896a 272 strp_pause(&psock->strp);
ab7ac4eb
TH
273 list_add_tail(&psock->psock_ready_list,
274 &mux->psocks_ready);
275 spin_unlock_bh(&mux->rx_lock);
276 return NULL;
277 }
278
279 kcm = list_first_entry(&mux->kcm_rx_waiters,
280 struct kcm_sock, wait_rx_list);
281 list_del(&kcm->wait_rx_list);
282 kcm->rx_wait = false;
283
284 psock->rx_kcm = kcm;
285 kcm->rx_psock = psock;
286
287 spin_unlock_bh(&mux->rx_lock);
288
289 return kcm;
290}
291
292static void kcm_done(struct kcm_sock *kcm);
293
294static void kcm_done_work(struct work_struct *w)
295{
296 kcm_done(container_of(w, struct kcm_sock, done_work));
297}
298
299/* Lower sock held */
300static void unreserve_rx_kcm(struct kcm_psock *psock,
301 bool rcv_ready)
302{
303 struct kcm_sock *kcm = psock->rx_kcm;
304 struct kcm_mux *mux = psock->mux;
305
306 if (!kcm)
307 return;
308
309 spin_lock_bh(&mux->rx_lock);
310
311 psock->rx_kcm = NULL;
312 kcm->rx_psock = NULL;
313
314 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
315 * kcm_rfree
316 */
317 smp_mb();
318
319 if (unlikely(kcm->done)) {
320 spin_unlock_bh(&mux->rx_lock);
321
322 /* Need to run kcm_done in a task since we need to qcquire
323 * callback locks which may already be held here.
324 */
325 INIT_WORK(&kcm->done_work, kcm_done_work);
326 schedule_work(&kcm->done_work);
327 return;
328 }
329
330 if (unlikely(kcm->rx_disabled)) {
331 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
332 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
333 /* Check for degenerative race with rx_wait that all
334 * data was dequeued (accounted for in kcm_rfree).
335 */
336 kcm_rcv_ready(kcm);
337 }
338 spin_unlock_bh(&mux->rx_lock);
339}
340
ab7ac4eb 341/* Lower sock lock held */
96a59083 342static void psock_data_ready(struct sock *sk)
ab7ac4eb
TH
343{
344 struct kcm_psock *psock;
345
346 read_lock_bh(&sk->sk_callback_lock);
347
348 psock = (struct kcm_psock *)sk->sk_user_data;
9b73896a 349 if (likely(psock))
96a59083 350 strp_data_ready(&psock->strp);
ab7ac4eb 351
ab7ac4eb
TH
352 read_unlock_bh(&sk->sk_callback_lock);
353}
354
9b73896a
TH
355/* Called with lower sock held */
356static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
ab7ac4eb 357{
9b73896a
TH
358 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
359 struct kcm_sock *kcm;
ab7ac4eb 360
9b73896a
TH
361try_queue:
362 kcm = reserve_rx_kcm(psock, skb);
363 if (!kcm) {
364 /* Unable to reserve a KCM, message is held in psock and strp
365 * is paused.
366 */
367 return;
368 }
ab7ac4eb 369
9b73896a
TH
370 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
371 /* Should mean socket buffer full */
372 unreserve_rx_kcm(psock, false);
373 goto try_queue;
374 }
ab7ac4eb
TH
375}
376
9b73896a 377static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
ab7ac4eb 378{
9b73896a
TH
379 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
380 struct bpf_prog *prog = psock->bpf_prog;
381
a2c11b03 382 return BPF_PROG_RUN(prog, skb);
ab7ac4eb
TH
383}
384
9b73896a 385static int kcm_read_sock_done(struct strparser *strp, int err)
ab7ac4eb 386{
9b73896a
TH
387 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
388
389 unreserve_rx_kcm(psock, true);
390
391 return err;
ab7ac4eb
TH
392}
393
96a59083 394static void psock_state_change(struct sock *sk)
ab7ac4eb 395{
a9a08845
LT
396 /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
397 * since application will normally not poll with EPOLLIN
ab7ac4eb
TH
398 * on the TCP sockets.
399 */
400
401 report_csk_error(sk, EPIPE);
402}
403
96a59083 404static void psock_write_space(struct sock *sk)
ab7ac4eb
TH
405{
406 struct kcm_psock *psock;
407 struct kcm_mux *mux;
408 struct kcm_sock *kcm;
409
410 read_lock_bh(&sk->sk_callback_lock);
411
412 psock = (struct kcm_psock *)sk->sk_user_data;
413 if (unlikely(!psock))
414 goto out;
ab7ac4eb
TH
415 mux = psock->mux;
416
417 spin_lock_bh(&mux->lock);
418
419 /* Check if the socket is reserved so someone is waiting for sending. */
420 kcm = psock->tx_kcm;
9b73896a 421 if (kcm && !unlikely(kcm->tx_stopped))
ab7ac4eb
TH
422 queue_work(kcm_wq, &kcm->tx_work);
423
424 spin_unlock_bh(&mux->lock);
425out:
426 read_unlock_bh(&sk->sk_callback_lock);
427}
428
429static void unreserve_psock(struct kcm_sock *kcm);
430
431/* kcm sock is locked. */
432static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
433{
434 struct kcm_mux *mux = kcm->mux;
435 struct kcm_psock *psock;
436
437 psock = kcm->tx_psock;
438
439 smp_rmb(); /* Must read tx_psock before tx_wait */
440
441 if (psock) {
442 WARN_ON(kcm->tx_wait);
443 if (unlikely(psock->tx_stopped))
444 unreserve_psock(kcm);
445 else
446 return kcm->tx_psock;
447 }
448
449 spin_lock_bh(&mux->lock);
450
451 /* Check again under lock to see if psock was reserved for this
452 * psock via psock_unreserve.
453 */
454 psock = kcm->tx_psock;
455 if (unlikely(psock)) {
456 WARN_ON(kcm->tx_wait);
457 spin_unlock_bh(&mux->lock);
458 return kcm->tx_psock;
459 }
460
461 if (!list_empty(&mux->psocks_avail)) {
462 psock = list_first_entry(&mux->psocks_avail,
463 struct kcm_psock,
464 psock_avail_list);
465 list_del(&psock->psock_avail_list);
466 if (kcm->tx_wait) {
467 list_del(&kcm->wait_psock_list);
468 kcm->tx_wait = false;
469 }
470 kcm->tx_psock = psock;
471 psock->tx_kcm = kcm;
cd6e111b 472 KCM_STATS_INCR(psock->stats.reserved);
ab7ac4eb
TH
473 } else if (!kcm->tx_wait) {
474 list_add_tail(&kcm->wait_psock_list,
475 &mux->kcm_tx_waiters);
476 kcm->tx_wait = true;
477 }
478
479 spin_unlock_bh(&mux->lock);
480
481 return psock;
482}
483
484/* mux lock held */
485static void psock_now_avail(struct kcm_psock *psock)
486{
487 struct kcm_mux *mux = psock->mux;
488 struct kcm_sock *kcm;
489
490 if (list_empty(&mux->kcm_tx_waiters)) {
491 list_add_tail(&psock->psock_avail_list,
492 &mux->psocks_avail);
493 } else {
494 kcm = list_first_entry(&mux->kcm_tx_waiters,
495 struct kcm_sock,
496 wait_psock_list);
497 list_del(&kcm->wait_psock_list);
498 kcm->tx_wait = false;
499 psock->tx_kcm = kcm;
500
501 /* Commit before changing tx_psock since that is read in
502 * reserve_psock before queuing work.
503 */
504 smp_mb();
505
506 kcm->tx_psock = psock;
cd6e111b 507 KCM_STATS_INCR(psock->stats.reserved);
ab7ac4eb
TH
508 queue_work(kcm_wq, &kcm->tx_work);
509 }
510}
511
512/* kcm sock is locked. */
513static void unreserve_psock(struct kcm_sock *kcm)
514{
515 struct kcm_psock *psock;
516 struct kcm_mux *mux = kcm->mux;
517
518 spin_lock_bh(&mux->lock);
519
520 psock = kcm->tx_psock;
521
522 if (WARN_ON(!psock)) {
523 spin_unlock_bh(&mux->lock);
524 return;
525 }
526
527 smp_rmb(); /* Read tx_psock before tx_wait */
528
cd6e111b
TH
529 kcm_update_tx_mux_stats(mux, psock);
530
ab7ac4eb
TH
531 WARN_ON(kcm->tx_wait);
532
533 kcm->tx_psock = NULL;
534 psock->tx_kcm = NULL;
cd6e111b 535 KCM_STATS_INCR(psock->stats.unreserved);
ab7ac4eb
TH
536
537 if (unlikely(psock->tx_stopped)) {
538 if (psock->done) {
539 /* Deferred free */
540 list_del(&psock->psock_list);
541 mux->psocks_cnt--;
542 sock_put(psock->sk);
543 fput(psock->sk->sk_socket->file);
544 kmem_cache_free(kcm_psockp, psock);
545 }
546
547 /* Don't put back on available list */
548
549 spin_unlock_bh(&mux->lock);
550
551 return;
552 }
553
554 psock_now_avail(psock);
555
556 spin_unlock_bh(&mux->lock);
557}
558
cd6e111b
TH
559static void kcm_report_tx_retry(struct kcm_sock *kcm)
560{
561 struct kcm_mux *mux = kcm->mux;
562
563 spin_lock_bh(&mux->lock);
564 KCM_STATS_INCR(mux->stats.tx_retries);
565 spin_unlock_bh(&mux->lock);
566}
567
ab7ac4eb
TH
568/* Write any messages ready on the kcm socket. Called with kcm sock lock
569 * held. Return bytes actually sent or error.
570 */
571static int kcm_write_msgs(struct kcm_sock *kcm)
572{
573 struct sock *sk = &kcm->sk;
574 struct kcm_psock *psock;
575 struct sk_buff *skb, *head;
576 struct kcm_tx_msg *txm;
577 unsigned short fragidx, frag_offset;
578 unsigned int sent, total_sent = 0;
579 int ret = 0;
580
581 kcm->tx_wait_more = false;
582 psock = kcm->tx_psock;
583 if (unlikely(psock && psock->tx_stopped)) {
584 /* A reserved psock was aborted asynchronously. Unreserve
585 * it and we'll retry the message.
586 */
587 unreserve_psock(kcm);
cd6e111b 588 kcm_report_tx_retry(kcm);
ab7ac4eb
TH
589 if (skb_queue_empty(&sk->sk_write_queue))
590 return 0;
591
592 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
593
594 } else if (skb_queue_empty(&sk->sk_write_queue)) {
595 return 0;
596 }
597
598 head = skb_peek(&sk->sk_write_queue);
599 txm = kcm_tx_msg(head);
600
601 if (txm->sent) {
602 /* Send of first skbuff in queue already in progress */
603 if (WARN_ON(!psock)) {
604 ret = -EINVAL;
605 goto out;
606 }
607 sent = txm->sent;
608 frag_offset = txm->frag_offset;
609 fragidx = txm->fragidx;
610 skb = txm->frag_skb;
611
612 goto do_frag;
613 }
614
615try_again:
616 psock = reserve_psock(kcm);
617 if (!psock)
618 goto out;
619
620 do {
621 skb = head;
622 txm = kcm_tx_msg(head);
623 sent = 0;
624
625do_frag_list:
626 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
627 ret = -EINVAL;
628 goto out;
629 }
630
631 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
632 fragidx++) {
633 skb_frag_t *frag;
634
635 frag_offset = 0;
636do_frag:
637 frag = &skb_shinfo(skb)->frags[fragidx];
d8e18a51 638 if (WARN_ON(!skb_frag_size(frag))) {
ab7ac4eb
TH
639 ret = -EINVAL;
640 goto out;
641 }
642
643 ret = kernel_sendpage(psock->sk->sk_socket,
d8e18a51 644 skb_frag_page(frag),
b54c9d5b 645 skb_frag_off(frag) + frag_offset,
d8e18a51 646 skb_frag_size(frag) - frag_offset,
ab7ac4eb
TH
647 MSG_DONTWAIT);
648 if (ret <= 0) {
649 if (ret == -EAGAIN) {
650 /* Save state to try again when there's
651 * write space on the socket
652 */
653 txm->sent = sent;
654 txm->frag_offset = frag_offset;
655 txm->fragidx = fragidx;
656 txm->frag_skb = skb;
657
658 ret = 0;
659 goto out;
660 }
661
662 /* Hard failure in sending message, abort this
663 * psock since it has lost framing
664 * synchonization and retry sending the
665 * message from the beginning.
666 */
667 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
668 true);
669 unreserve_psock(kcm);
670
671 txm->sent = 0;
cd6e111b 672 kcm_report_tx_retry(kcm);
ab7ac4eb
TH
673 ret = 0;
674
675 goto try_again;
676 }
677
678 sent += ret;
679 frag_offset += ret;
cd6e111b 680 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
d8e18a51 681 if (frag_offset < skb_frag_size(frag)) {
ab7ac4eb
TH
682 /* Not finished with this frag */
683 goto do_frag;
684 }
685 }
686
687 if (skb == head) {
688 if (skb_has_frag_list(skb)) {
689 skb = skb_shinfo(skb)->frag_list;
690 goto do_frag_list;
691 }
692 } else if (skb->next) {
693 skb = skb->next;
694 goto do_frag_list;
695 }
696
697 /* Successfully sent the whole packet, account for it. */
698 skb_dequeue(&sk->sk_write_queue);
699 kfree_skb(head);
700 sk->sk_wmem_queued -= sent;
701 total_sent += sent;
cd6e111b 702 KCM_STATS_INCR(psock->stats.tx_msgs);
ab7ac4eb
TH
703 } while ((head = skb_peek(&sk->sk_write_queue)));
704out:
705 if (!head) {
706 /* Done with all queued messages. */
707 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
708 unreserve_psock(kcm);
709 }
710
711 /* Check if write space is available */
712 sk->sk_write_space(sk);
713
714 return total_sent ? : ret;
715}
716
717static void kcm_tx_work(struct work_struct *w)
718{
719 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
720 struct sock *sk = &kcm->sk;
721 int err;
722
723 lock_sock(sk);
724
725 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
726 * aborts
727 */
728 err = kcm_write_msgs(kcm);
729 if (err < 0) {
730 /* Hard failure in write, report error on KCM socket */
731 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
732 report_csk_error(&kcm->sk, -err);
733 goto out;
734 }
735
736 /* Primarily for SOCK_SEQPACKET sockets */
737 if (likely(sk->sk_socket) &&
738 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
739 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
740 sk->sk_write_space(sk);
741 }
742
743out:
744 release_sock(sk);
745}
746
747static void kcm_push(struct kcm_sock *kcm)
748{
749 if (kcm->tx_wait_more)
750 kcm_write_msgs(kcm);
751}
752
f29698fc
TH
753static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
754 int offset, size_t size, int flags)
755
756{
757 struct sock *sk = sock->sk;
758 struct kcm_sock *kcm = kcm_sk(sk);
759 struct sk_buff *skb = NULL, *head = NULL;
760 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
761 bool eor;
762 int err = 0;
763 int i;
764
765 if (flags & MSG_SENDPAGE_NOTLAST)
766 flags |= MSG_MORE;
767
768 /* No MSG_EOR from splice, only look at MSG_MORE */
769 eor = !(flags & MSG_MORE);
770
771 lock_sock(sk);
772
773 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
774
775 err = -EPIPE;
776 if (sk->sk_err)
777 goto out_error;
778
779 if (kcm->seq_skb) {
780 /* Previously opened message */
781 head = kcm->seq_skb;
782 skb = kcm_tx_msg(head)->last_skb;
783 i = skb_shinfo(skb)->nr_frags;
784
785 if (skb_can_coalesce(skb, i, page, offset)) {
786 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
787 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
788 goto coalesced;
789 }
790
791 if (i >= MAX_SKB_FRAGS) {
792 struct sk_buff *tskb;
793
794 tskb = alloc_skb(0, sk->sk_allocation);
795 while (!tskb) {
796 kcm_push(kcm);
797 err = sk_stream_wait_memory(sk, &timeo);
798 if (err)
799 goto out_error;
800 }
801
802 if (head == skb)
803 skb_shinfo(head)->frag_list = tskb;
804 else
805 skb->next = tskb;
806
807 skb = tskb;
808 skb->ip_summed = CHECKSUM_UNNECESSARY;
809 i = 0;
810 }
811 } else {
812 /* Call the sk_stream functions to manage the sndbuf mem. */
813 if (!sk_stream_memory_free(sk)) {
814 kcm_push(kcm);
815 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
816 err = sk_stream_wait_memory(sk, &timeo);
817 if (err)
818 goto out_error;
819 }
820
821 head = alloc_skb(0, sk->sk_allocation);
822 while (!head) {
823 kcm_push(kcm);
824 err = sk_stream_wait_memory(sk, &timeo);
825 if (err)
826 goto out_error;
827 }
828
829 skb = head;
830 i = 0;
831 }
832
833 get_page(page);
834 skb_fill_page_desc(skb, i, page, offset, size);
835 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
836
837coalesced:
838 skb->len += size;
839 skb->data_len += size;
840 skb->truesize += size;
841 sk->sk_wmem_queued += size;
842 sk_mem_charge(sk, size);
843
844 if (head != skb) {
845 head->len += size;
846 head->data_len += size;
847 head->truesize += size;
848 }
849
850 if (eor) {
851 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
852
853 /* Message complete, queue it on send buffer */
854 __skb_queue_tail(&sk->sk_write_queue, head);
855 kcm->seq_skb = NULL;
856 KCM_STATS_INCR(kcm->stats.tx_msgs);
857
858 if (flags & MSG_BATCH) {
859 kcm->tx_wait_more = true;
860 } else if (kcm->tx_wait_more || not_busy) {
861 err = kcm_write_msgs(kcm);
862 if (err < 0) {
863 /* We got a hard error in write_msgs but have
864 * already queued this message. Report an error
865 * in the socket, but don't affect return value
866 * from sendmsg
867 */
868 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
869 report_csk_error(&kcm->sk, -err);
870 }
871 }
872 } else {
873 /* Message not complete, save state */
874 kcm->seq_skb = head;
875 kcm_tx_msg(head)->last_skb = skb;
876 }
877
878 KCM_STATS_ADD(kcm->stats.tx_bytes, size);
879
880 release_sock(sk);
881 return size;
882
883out_error:
884 kcm_push(kcm);
885
886 err = sk_stream_error(sk, flags, err);
887
888 /* make sure we wake any epoll edge trigger waiter */
889 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
890 sk->sk_write_space(sk);
891
892 release_sock(sk);
893 return err;
894}
895
ab7ac4eb
TH
896static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
897{
898 struct sock *sk = sock->sk;
899 struct kcm_sock *kcm = kcm_sk(sk);
900 struct sk_buff *skb = NULL, *head = NULL;
901 size_t copy, copied = 0;
902 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
903 int eor = (sock->type == SOCK_DGRAM) ?
904 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
905 int err = -EPIPE;
906
907 lock_sock(sk);
908
909 /* Per tcp_sendmsg this should be in poll */
910 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
911
912 if (sk->sk_err)
913 goto out_error;
914
915 if (kcm->seq_skb) {
916 /* Previously opened message */
917 head = kcm->seq_skb;
918 skb = kcm_tx_msg(head)->last_skb;
919 goto start;
920 }
921
922 /* Call the sk_stream functions to manage the sndbuf mem. */
923 if (!sk_stream_memory_free(sk)) {
924 kcm_push(kcm);
925 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
926 err = sk_stream_wait_memory(sk, &timeo);
927 if (err)
928 goto out_error;
929 }
930
98e3862c
WC
931 if (msg_data_left(msg)) {
932 /* New message, alloc head skb */
ab7ac4eb 933 head = alloc_skb(0, sk->sk_allocation);
98e3862c
WC
934 while (!head) {
935 kcm_push(kcm);
936 err = sk_stream_wait_memory(sk, &timeo);
937 if (err)
938 goto out_error;
ab7ac4eb 939
98e3862c
WC
940 head = alloc_skb(0, sk->sk_allocation);
941 }
ab7ac4eb 942
98e3862c
WC
943 skb = head;
944
945 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
946 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
947 */
948 skb->ip_summed = CHECKSUM_UNNECESSARY;
949 }
ab7ac4eb
TH
950
951start:
952 while (msg_data_left(msg)) {
953 bool merge = true;
954 int i = skb_shinfo(skb)->nr_frags;
955 struct page_frag *pfrag = sk_page_frag(sk);
956
957 if (!sk_page_frag_refill(sk, pfrag))
958 goto wait_for_memory;
959
960 if (!skb_can_coalesce(skb, i, pfrag->page,
961 pfrag->offset)) {
962 if (i == MAX_SKB_FRAGS) {
963 struct sk_buff *tskb;
964
965 tskb = alloc_skb(0, sk->sk_allocation);
966 if (!tskb)
967 goto wait_for_memory;
968
969 if (head == skb)
970 skb_shinfo(head)->frag_list = tskb;
971 else
972 skb->next = tskb;
973
974 skb = tskb;
975 skb->ip_summed = CHECKSUM_UNNECESSARY;
976 continue;
977 }
978 merge = false;
979 }
980
981 copy = min_t(int, msg_data_left(msg),
982 pfrag->size - pfrag->offset);
983
984 if (!sk_wmem_schedule(sk, copy))
985 goto wait_for_memory;
986
987 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
988 pfrag->page,
989 pfrag->offset,
990 copy);
991 if (err)
992 goto out_error;
993
994 /* Update the skb. */
995 if (merge) {
996 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
997 } else {
998 skb_fill_page_desc(skb, i, pfrag->page,
999 pfrag->offset, copy);
1000 get_page(pfrag->page);
1001 }
1002
1003 pfrag->offset += copy;
1004 copied += copy;
1005 if (head != skb) {
1006 head->len += copy;
1007 head->data_len += copy;
1008 }
1009
1010 continue;
1011
1012wait_for_memory:
1013 kcm_push(kcm);
1014 err = sk_stream_wait_memory(sk, &timeo);
1015 if (err)
1016 goto out_error;
1017 }
1018
1019 if (eor) {
1020 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1021
98e3862c
WC
1022 if (head) {
1023 /* Message complete, queue it on send buffer */
1024 __skb_queue_tail(&sk->sk_write_queue, head);
1025 kcm->seq_skb = NULL;
1026 KCM_STATS_INCR(kcm->stats.tx_msgs);
1027 }
ab7ac4eb
TH
1028
1029 if (msg->msg_flags & MSG_BATCH) {
1030 kcm->tx_wait_more = true;
1031 } else if (kcm->tx_wait_more || not_busy) {
1032 err = kcm_write_msgs(kcm);
1033 if (err < 0) {
1034 /* We got a hard error in write_msgs but have
1035 * already queued this message. Report an error
1036 * in the socket, but don't affect return value
1037 * from sendmsg
1038 */
1039 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1040 report_csk_error(&kcm->sk, -err);
1041 }
1042 }
1043 } else {
1044 /* Message not complete, save state */
1045partial_message:
cd27b96b
WC
1046 if (head) {
1047 kcm->seq_skb = head;
1048 kcm_tx_msg(head)->last_skb = skb;
1049 }
ab7ac4eb
TH
1050 }
1051
cd6e111b
TH
1052 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1053
ab7ac4eb
TH
1054 release_sock(sk);
1055 return copied;
1056
1057out_error:
1058 kcm_push(kcm);
1059
1060 if (copied && sock->type == SOCK_SEQPACKET) {
1061 /* Wrote some bytes before encountering an
1062 * error, return partial success.
1063 */
1064 goto partial_message;
1065 }
1066
1067 if (head != kcm->seq_skb)
1068 kfree_skb(head);
1069
1070 err = sk_stream_error(sk, msg->msg_flags, err);
1071
1072 /* make sure we wake any epoll edge trigger waiter */
1073 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1074 sk->sk_write_space(sk);
1075
1076 release_sock(sk);
1077 return err;
1078}
1079
1080static struct sk_buff *kcm_wait_data(struct sock *sk, int flags,
1081 long timeo, int *err)
1082{
1083 struct sk_buff *skb;
1084
1085 while (!(skb = skb_peek(&sk->sk_receive_queue))) {
1086 if (sk->sk_err) {
1087 *err = sock_error(sk);
1088 return NULL;
1089 }
1090
1091 if (sock_flag(sk, SOCK_DONE))
1092 return NULL;
1093
1094 if ((flags & MSG_DONTWAIT) || !timeo) {
1095 *err = -EAGAIN;
1096 return NULL;
1097 }
1098
1099 sk_wait_data(sk, &timeo, NULL);
1100
1101 /* Handle signals */
1102 if (signal_pending(current)) {
1103 *err = sock_intr_errno(timeo);
1104 return NULL;
1105 }
1106 }
1107
1108 return skb;
1109}
1110
1111static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1112 size_t len, int flags)
1113{
1114 struct sock *sk = sock->sk;
cd6e111b 1115 struct kcm_sock *kcm = kcm_sk(sk);
ab7ac4eb
TH
1116 int err = 0;
1117 long timeo;
bbb03029 1118 struct strp_msg *stm;
ab7ac4eb
TH
1119 int copied = 0;
1120 struct sk_buff *skb;
1121
1122 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1123
1124 lock_sock(sk);
1125
1126 skb = kcm_wait_data(sk, flags, timeo, &err);
1127 if (!skb)
1128 goto out;
1129
1130 /* Okay, have a message on the receive queue */
1131
bbb03029 1132 stm = strp_msg(skb);
ab7ac4eb 1133
bbb03029
TH
1134 if (len > stm->full_len)
1135 len = stm->full_len;
ab7ac4eb 1136
bbb03029 1137 err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
ab7ac4eb
TH
1138 if (err < 0)
1139 goto out;
1140
1141 copied = len;
1142 if (likely(!(flags & MSG_PEEK))) {
cd6e111b 1143 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
bbb03029 1144 if (copied < stm->full_len) {
ab7ac4eb
TH
1145 if (sock->type == SOCK_DGRAM) {
1146 /* Truncated message */
1147 msg->msg_flags |= MSG_TRUNC;
1148 goto msg_finished;
1149 }
bbb03029
TH
1150 stm->offset += copied;
1151 stm->full_len -= copied;
ab7ac4eb
TH
1152 } else {
1153msg_finished:
1154 /* Finished with message */
1155 msg->msg_flags |= MSG_EOR;
cd6e111b 1156 KCM_STATS_INCR(kcm->stats.rx_msgs);
ab7ac4eb
TH
1157 skb_unlink(skb, &sk->sk_receive_queue);
1158 kfree_skb(skb);
1159 }
1160 }
1161
1162out:
1163 release_sock(sk);
1164
1165 return copied ? : err;
1166}
1167
91687355
TH
1168static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1169 struct pipe_inode_info *pipe, size_t len,
1170 unsigned int flags)
1171{
1172 struct sock *sk = sock->sk;
1173 struct kcm_sock *kcm = kcm_sk(sk);
1174 long timeo;
bbb03029 1175 struct strp_msg *stm;
91687355 1176 int err = 0;
f1971a2e 1177 ssize_t copied;
91687355
TH
1178 struct sk_buff *skb;
1179
1180 /* Only support splice for SOCKSEQPACKET */
1181
1182 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1183
1184 lock_sock(sk);
1185
1186 skb = kcm_wait_data(sk, flags, timeo, &err);
1187 if (!skb)
1188 goto err_out;
1189
1190 /* Okay, have a message on the receive queue */
1191
bbb03029 1192 stm = strp_msg(skb);
91687355 1193
bbb03029
TH
1194 if (len > stm->full_len)
1195 len = stm->full_len;
91687355 1196
bbb03029 1197 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
91687355
TH
1198 if (copied < 0) {
1199 err = copied;
1200 goto err_out;
1201 }
1202
1203 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1204
bbb03029
TH
1205 stm->offset += copied;
1206 stm->full_len -= copied;
91687355
TH
1207
1208 /* We have no way to return MSG_EOR. If all the bytes have been
1209 * read we still leave the message in the receive socket buffer.
1210 * A subsequent recvmsg needs to be done to return MSG_EOR and
1211 * finish reading the message.
1212 */
1213
1214 release_sock(sk);
1215
1216 return copied;
1217
1218err_out:
1219 release_sock(sk);
1220
1221 return err;
1222}
1223
ab7ac4eb
TH
1224/* kcm sock lock held */
1225static void kcm_recv_disable(struct kcm_sock *kcm)
1226{
1227 struct kcm_mux *mux = kcm->mux;
1228
1229 if (kcm->rx_disabled)
1230 return;
1231
1232 spin_lock_bh(&mux->rx_lock);
1233
1234 kcm->rx_disabled = 1;
1235
1236 /* If a psock is reserved we'll do cleanup in unreserve */
1237 if (!kcm->rx_psock) {
1238 if (kcm->rx_wait) {
1239 list_del(&kcm->wait_rx_list);
1240 kcm->rx_wait = false;
1241 }
1242
1243 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1244 }
1245
1246 spin_unlock_bh(&mux->rx_lock);
1247}
1248
1249/* kcm sock lock held */
1250static void kcm_recv_enable(struct kcm_sock *kcm)
1251{
1252 struct kcm_mux *mux = kcm->mux;
1253
1254 if (!kcm->rx_disabled)
1255 return;
1256
1257 spin_lock_bh(&mux->rx_lock);
1258
1259 kcm->rx_disabled = 0;
1260 kcm_rcv_ready(kcm);
1261
1262 spin_unlock_bh(&mux->rx_lock);
1263}
1264
1265static int kcm_setsockopt(struct socket *sock, int level, int optname,
1266 char __user *optval, unsigned int optlen)
1267{
1268 struct kcm_sock *kcm = kcm_sk(sock->sk);
1269 int val, valbool;
1270 int err = 0;
1271
1272 if (level != SOL_KCM)
1273 return -ENOPROTOOPT;
1274
1275 if (optlen < sizeof(int))
1276 return -EINVAL;
1277
1278 if (get_user(val, (int __user *)optval))
1279 return -EINVAL;
1280
1281 valbool = val ? 1 : 0;
1282
1283 switch (optname) {
1284 case KCM_RECV_DISABLE:
1285 lock_sock(&kcm->sk);
1286 if (valbool)
1287 kcm_recv_disable(kcm);
1288 else
1289 kcm_recv_enable(kcm);
1290 release_sock(&kcm->sk);
1291 break;
1292 default:
1293 err = -ENOPROTOOPT;
1294 }
1295
1296 return err;
1297}
1298
1299static int kcm_getsockopt(struct socket *sock, int level, int optname,
1300 char __user *optval, int __user *optlen)
1301{
1302 struct kcm_sock *kcm = kcm_sk(sock->sk);
1303 int val, len;
1304
1305 if (level != SOL_KCM)
1306 return -ENOPROTOOPT;
1307
1308 if (get_user(len, optlen))
1309 return -EFAULT;
1310
1311 len = min_t(unsigned int, len, sizeof(int));
1312 if (len < 0)
1313 return -EINVAL;
1314
1315 switch (optname) {
1316 case KCM_RECV_DISABLE:
1317 val = kcm->rx_disabled;
1318 break;
1319 default:
1320 return -ENOPROTOOPT;
1321 }
1322
1323 if (put_user(len, optlen))
1324 return -EFAULT;
1325 if (copy_to_user(optval, &val, len))
1326 return -EFAULT;
1327 return 0;
1328}
1329
1330static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1331{
1332 struct kcm_sock *tkcm;
1333 struct list_head *head;
1334 int index = 0;
1335
a11e1d43
LT
1336 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1337 * we set sk_state, otherwise epoll_wait always returns right away with
1338 * EPOLLHUP
ab7ac4eb
TH
1339 */
1340 kcm->sk.sk_state = TCP_ESTABLISHED;
1341
1342 /* Add to mux's kcm sockets list */
1343 kcm->mux = mux;
1344 spin_lock_bh(&mux->lock);
1345
1346 head = &mux->kcm_socks;
1347 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1348 if (tkcm->index != index)
1349 break;
1350 head = &tkcm->kcm_sock_list;
1351 index++;
1352 }
1353
1354 list_add(&kcm->kcm_sock_list, head);
1355 kcm->index = index;
1356
1357 mux->kcm_socks_cnt++;
1358 spin_unlock_bh(&mux->lock);
1359
1360 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1361
1362 spin_lock_bh(&mux->rx_lock);
1363 kcm_rcv_ready(kcm);
1364 spin_unlock_bh(&mux->rx_lock);
1365}
1366
1367static int kcm_attach(struct socket *sock, struct socket *csock,
1368 struct bpf_prog *prog)
1369{
1370 struct kcm_sock *kcm = kcm_sk(sock->sk);
1371 struct kcm_mux *mux = kcm->mux;
1372 struct sock *csk;
1373 struct kcm_psock *psock = NULL, *tpsock;
1374 struct list_head *head;
1375 int index = 0;
3fd87127
EB
1376 static const struct strp_callbacks cb = {
1377 .rcv_msg = kcm_rcv_strparser,
1378 .parse_msg = kcm_parse_func_strparser,
1379 .read_sock_done = kcm_read_sock_done,
1380 };
2cc683e8 1381 int err = 0;
ab7ac4eb
TH
1382
1383 csk = csock->sk;
1384 if (!csk)
1385 return -EINVAL;
1386
2cc683e8
TH
1387 lock_sock(csk);
1388
581e7226
TH
1389 /* Only allow TCP sockets to be attached for now */
1390 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
2cc683e8
TH
1391 csk->sk_protocol != IPPROTO_TCP) {
1392 err = -EOPNOTSUPP;
1393 goto out;
1394 }
581e7226
TH
1395
1396 /* Don't allow listeners or closed sockets */
2cc683e8
TH
1397 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1398 err = -EOPNOTSUPP;
1399 goto out;
1400 }
351050ec 1401
ab7ac4eb 1402 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
2cc683e8
TH
1403 if (!psock) {
1404 err = -ENOMEM;
1405 goto out;
1406 }
ab7ac4eb
TH
1407
1408 psock->mux = mux;
1409 psock->sk = csk;
1410 psock->bpf_prog = prog;
29152a34 1411
96a59083
TH
1412 err = strp_init(&psock->strp, csk, &cb);
1413 if (err) {
1414 kmem_cache_free(kcm_psockp, psock);
2cc683e8 1415 goto out;
96a59083 1416 }
ab7ac4eb 1417
ab7ac4eb 1418 write_lock_bh(&csk->sk_callback_lock);
e5571240
TH
1419
1420 /* Check if sk_user_data is aready by KCM or someone else.
1421 * Must be done under lock to prevent race conditions.
1422 */
1423 if (csk->sk_user_data) {
1424 write_unlock_bh(&csk->sk_callback_lock);
dff8baa2 1425 strp_stop(&psock->strp);
e5571240
TH
1426 strp_done(&psock->strp);
1427 kmem_cache_free(kcm_psockp, psock);
2cc683e8
TH
1428 err = -EALREADY;
1429 goto out;
e5571240
TH
1430 }
1431
ab7ac4eb
TH
1432 psock->save_data_ready = csk->sk_data_ready;
1433 psock->save_write_space = csk->sk_write_space;
1434 psock->save_state_change = csk->sk_state_change;
1435 csk->sk_user_data = psock;
96a59083
TH
1436 csk->sk_data_ready = psock_data_ready;
1437 csk->sk_write_space = psock_write_space;
1438 csk->sk_state_change = psock_state_change;
e5571240 1439
ab7ac4eb
TH
1440 write_unlock_bh(&csk->sk_callback_lock);
1441
e5571240
TH
1442 sock_hold(csk);
1443
ab7ac4eb
TH
1444 /* Finished initialization, now add the psock to the MUX. */
1445 spin_lock_bh(&mux->lock);
1446 head = &mux->psocks;
1447 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1448 if (tpsock->index != index)
1449 break;
1450 head = &tpsock->psock_list;
1451 index++;
1452 }
1453
1454 list_add(&psock->psock_list, head);
1455 psock->index = index;
1456
cd6e111b 1457 KCM_STATS_INCR(mux->stats.psock_attach);
ab7ac4eb
TH
1458 mux->psocks_cnt++;
1459 psock_now_avail(psock);
1460 spin_unlock_bh(&mux->lock);
1461
1462 /* Schedule RX work in case there are already bytes queued */
9b73896a 1463 strp_check_rcv(&psock->strp);
ab7ac4eb 1464
2cc683e8
TH
1465out:
1466 release_sock(csk);
1467
1468 return err;
ab7ac4eb
TH
1469}
1470
1471static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1472{
1473 struct socket *csock;
1474 struct bpf_prog *prog;
1475 int err;
1476
1477 csock = sockfd_lookup(info->fd, &err);
1478 if (!csock)
1479 return -ENOENT;
1480
113214be 1481 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
ab7ac4eb
TH
1482 if (IS_ERR(prog)) {
1483 err = PTR_ERR(prog);
1484 goto out;
1485 }
1486
ab7ac4eb
TH
1487 err = kcm_attach(sock, csock, prog);
1488 if (err) {
1489 bpf_prog_put(prog);
1490 goto out;
1491 }
1492
1493 /* Keep reference on file also */
1494
1495 return 0;
1496out:
1497 fput(csock->file);
1498 return err;
1499}
1500
1501static void kcm_unattach(struct kcm_psock *psock)
1502{
1503 struct sock *csk = psock->sk;
1504 struct kcm_mux *mux = psock->mux;
1505
1616b38f
TH
1506 lock_sock(csk);
1507
ab7ac4eb
TH
1508 /* Stop getting callbacks from TCP socket. After this there should
1509 * be no way to reserve a kcm for this psock.
1510 */
1511 write_lock_bh(&csk->sk_callback_lock);
1512 csk->sk_user_data = NULL;
1513 csk->sk_data_ready = psock->save_data_ready;
1514 csk->sk_write_space = psock->save_write_space;
1515 csk->sk_state_change = psock->save_state_change;
9b73896a 1516 strp_stop(&psock->strp);
ab7ac4eb
TH
1517
1518 if (WARN_ON(psock->rx_kcm)) {
1519 write_unlock_bh(&csk->sk_callback_lock);
2cc683e8 1520 release_sock(csk);
ab7ac4eb
TH
1521 return;
1522 }
1523
1524 spin_lock_bh(&mux->rx_lock);
1525
1526 /* Stop receiver activities. After this point psock should not be
1527 * able to get onto ready list either through callbacks or work.
1528 */
1529 if (psock->ready_rx_msg) {
1530 list_del(&psock->psock_ready_list);
1531 kfree_skb(psock->ready_rx_msg);
1532 psock->ready_rx_msg = NULL;
cd6e111b 1533 KCM_STATS_INCR(mux->stats.rx_ready_drops);
ab7ac4eb
TH
1534 }
1535
1536 spin_unlock_bh(&mux->rx_lock);
1537
1538 write_unlock_bh(&csk->sk_callback_lock);
1539
1616b38f
TH
1540 /* Call strp_done without sock lock */
1541 release_sock(csk);
9b73896a 1542 strp_done(&psock->strp);
1616b38f 1543 lock_sock(csk);
ab7ac4eb
TH
1544
1545 bpf_prog_put(psock->bpf_prog);
1546
ab7ac4eb
TH
1547 spin_lock_bh(&mux->lock);
1548
cd6e111b 1549 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
9b73896a 1550 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
cd6e111b
TH
1551
1552 KCM_STATS_INCR(mux->stats.psock_unattach);
1553
ab7ac4eb
TH
1554 if (psock->tx_kcm) {
1555 /* psock was reserved. Just mark it finished and we will clean
1556 * up in the kcm paths, we need kcm lock which can not be
1557 * acquired here.
1558 */
cd6e111b 1559 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
ab7ac4eb
TH
1560 spin_unlock_bh(&mux->lock);
1561
1562 /* We are unattaching a socket that is reserved. Abort the
1563 * socket since we may be out of sync in sending on it. We need
1564 * to do this without the mux lock.
1565 */
1566 kcm_abort_tx_psock(psock, EPIPE, false);
1567
1568 spin_lock_bh(&mux->lock);
1569 if (!psock->tx_kcm) {
1570 /* psock now unreserved in window mux was unlocked */
1571 goto no_reserved;
1572 }
1573 psock->done = 1;
1574
1575 /* Commit done before queuing work to process it */
1576 smp_mb();
1577
1578 /* Queue tx work to make sure psock->done is handled */
1579 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1580 spin_unlock_bh(&mux->lock);
1581 } else {
1582no_reserved:
1583 if (!psock->tx_stopped)
1584 list_del(&psock->psock_avail_list);
1585 list_del(&psock->psock_list);
1586 mux->psocks_cnt--;
1587 spin_unlock_bh(&mux->lock);
1588
1589 sock_put(csk);
1590 fput(csk->sk_socket->file);
1591 kmem_cache_free(kcm_psockp, psock);
1592 }
1616b38f
TH
1593
1594 release_sock(csk);
ab7ac4eb
TH
1595}
1596
1597static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1598{
1599 struct kcm_sock *kcm = kcm_sk(sock->sk);
1600 struct kcm_mux *mux = kcm->mux;
1601 struct kcm_psock *psock;
1602 struct socket *csock;
1603 struct sock *csk;
1604 int err;
1605
1606 csock = sockfd_lookup(info->fd, &err);
1607 if (!csock)
1608 return -ENOENT;
1609
1610 csk = csock->sk;
1611 if (!csk) {
1612 err = -EINVAL;
1613 goto out;
1614 }
1615
1616 err = -ENOENT;
1617
1618 spin_lock_bh(&mux->lock);
1619
1620 list_for_each_entry(psock, &mux->psocks, psock_list) {
1621 if (psock->sk != csk)
1622 continue;
1623
1624 /* Found the matching psock */
1625
1626 if (psock->unattaching || WARN_ON(psock->done)) {
1627 err = -EALREADY;
1628 break;
1629 }
1630
1631 psock->unattaching = 1;
1632
1633 spin_unlock_bh(&mux->lock);
1634
9b73896a 1635 /* Lower socket lock should already be held */
ab7ac4eb
TH
1636 kcm_unattach(psock);
1637
1638 err = 0;
1639 goto out;
1640 }
1641
1642 spin_unlock_bh(&mux->lock);
1643
1644out:
1645 fput(csock->file);
1646 return err;
1647}
1648
1649static struct proto kcm_proto = {
1650 .name = "KCM",
1651 .owner = THIS_MODULE,
1652 .obj_size = sizeof(struct kcm_sock),
1653};
1654
1655/* Clone a kcm socket. */
a5739435 1656static struct file *kcm_clone(struct socket *osock)
ab7ac4eb
TH
1657{
1658 struct socket *newsock;
1659 struct sock *newsk;
ab7ac4eb 1660
ab7ac4eb
TH
1661 newsock = sock_alloc();
1662 if (!newsock)
a5739435 1663 return ERR_PTR(-ENFILE);
ab7ac4eb
TH
1664
1665 newsock->type = osock->type;
1666 newsock->ops = osock->ops;
1667
1668 __module_get(newsock->ops->owner);
1669
ab7ac4eb 1670 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
eb7f54b9 1671 &kcm_proto, false);
ab7ac4eb 1672 if (!newsk) {
a5739435
AV
1673 sock_release(newsock);
1674 return ERR_PTR(-ENOMEM);
ab7ac4eb 1675 }
ab7ac4eb
TH
1676 sock_init_data(newsock, newsk);
1677 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1678
8e1611e2 1679 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
ab7ac4eb
TH
1680}
1681
1682static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1683{
1684 int err;
1685
1686 switch (cmd) {
1687 case SIOCKCMATTACH: {
1688 struct kcm_attach info;
1689
1690 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
a80db69e 1691 return -EFAULT;
ab7ac4eb
TH
1692
1693 err = kcm_attach_ioctl(sock, &info);
1694
1695 break;
1696 }
1697 case SIOCKCMUNATTACH: {
1698 struct kcm_unattach info;
1699
1700 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
a80db69e 1701 return -EFAULT;
ab7ac4eb
TH
1702
1703 err = kcm_unattach_ioctl(sock, &info);
1704
1705 break;
1706 }
1707 case SIOCKCMCLONE: {
1708 struct kcm_clone info;
a5739435
AV
1709 struct file *file;
1710
1711 info.fd = get_unused_fd_flags(0);
1712 if (unlikely(info.fd < 0))
1713 return info.fd;
ab7ac4eb 1714
a5739435
AV
1715 file = kcm_clone(sock);
1716 if (IS_ERR(file)) {
1717 put_unused_fd(info.fd);
1718 return PTR_ERR(file);
1719 }
1720 if (copy_to_user((void __user *)arg, &info,
1721 sizeof(info))) {
1722 put_unused_fd(info.fd);
1723 fput(file);
1724 return -EFAULT;
1725 }
1726 fd_install(info.fd, file);
1727 err = 0;
ab7ac4eb
TH
1728 break;
1729 }
1730 default:
1731 err = -ENOIOCTLCMD;
1732 break;
1733 }
1734
1735 return err;
1736}
1737
1738static void free_mux(struct rcu_head *rcu)
1739{
1740 struct kcm_mux *mux = container_of(rcu,
1741 struct kcm_mux, rcu);
1742
1743 kmem_cache_free(kcm_muxp, mux);
1744}
1745
1746static void release_mux(struct kcm_mux *mux)
1747{
1748 struct kcm_net *knet = mux->knet;
1749 struct kcm_psock *psock, *tmp_psock;
1750
1751 /* Release psocks */
1752 list_for_each_entry_safe(psock, tmp_psock,
1753 &mux->psocks, psock_list) {
1616b38f 1754 if (!WARN_ON(psock->unattaching))
ab7ac4eb
TH
1755 kcm_unattach(psock);
1756 }
1757
1758 if (WARN_ON(mux->psocks_cnt))
1759 return;
1760
1761 __skb_queue_purge(&mux->rx_hold_queue);
1762
1763 mutex_lock(&knet->mutex);
cd6e111b
TH
1764 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1765 aggregate_psock_stats(&mux->aggregate_psock_stats,
1766 &knet->aggregate_psock_stats);
9b73896a
TH
1767 aggregate_strp_stats(&mux->aggregate_strp_stats,
1768 &knet->aggregate_strp_stats);
ab7ac4eb
TH
1769 list_del_rcu(&mux->kcm_mux_list);
1770 knet->count--;
1771 mutex_unlock(&knet->mutex);
1772
1773 call_rcu(&mux->rcu, free_mux);
1774}
1775
1776static void kcm_done(struct kcm_sock *kcm)
1777{
1778 struct kcm_mux *mux = kcm->mux;
1779 struct sock *sk = &kcm->sk;
1780 int socks_cnt;
1781
1782 spin_lock_bh(&mux->rx_lock);
1783 if (kcm->rx_psock) {
1784 /* Cleanup in unreserve_rx_kcm */
1785 WARN_ON(kcm->done);
1786 kcm->rx_disabled = 1;
1787 kcm->done = 1;
1788 spin_unlock_bh(&mux->rx_lock);
1789 return;
1790 }
1791
1792 if (kcm->rx_wait) {
1793 list_del(&kcm->wait_rx_list);
1794 kcm->rx_wait = false;
1795 }
1796 /* Move any pending receive messages to other kcm sockets */
1797 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1798
1799 spin_unlock_bh(&mux->rx_lock);
1800
1801 if (WARN_ON(sk_rmem_alloc_get(sk)))
1802 return;
1803
1804 /* Detach from MUX */
1805 spin_lock_bh(&mux->lock);
1806
1807 list_del(&kcm->kcm_sock_list);
1808 mux->kcm_socks_cnt--;
1809 socks_cnt = mux->kcm_socks_cnt;
1810
1811 spin_unlock_bh(&mux->lock);
1812
1813 if (!socks_cnt) {
1814 /* We are done with the mux now. */
1815 release_mux(mux);
1816 }
1817
1818 WARN_ON(kcm->rx_wait);
1819
1820 sock_put(&kcm->sk);
1821}
1822
1823/* Called by kcm_release to close a KCM socket.
1824 * If this is the last KCM socket on the MUX, destroy the MUX.
1825 */
1826static int kcm_release(struct socket *sock)
1827{
1828 struct sock *sk = sock->sk;
1829 struct kcm_sock *kcm;
1830 struct kcm_mux *mux;
1831 struct kcm_psock *psock;
1832
1833 if (!sk)
1834 return 0;
1835
1836 kcm = kcm_sk(sk);
1837 mux = kcm->mux;
1838
1839 sock_orphan(sk);
1840 kfree_skb(kcm->seq_skb);
1841
1842 lock_sock(sk);
1843 /* Purge queue under lock to avoid race condition with tx_work trying
1844 * to act when queue is nonempty. If tx_work runs after this point
1845 * it will just return.
1846 */
1847 __skb_queue_purge(&sk->sk_write_queue);
9b73896a
TH
1848
1849 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1850 * get a writespace callback. This prevents further work being queued
1851 * from the callback (unbinding the psock occurs after canceling work.
1852 */
1853 kcm->tx_stopped = 1;
1854
ab7ac4eb
TH
1855 release_sock(sk);
1856
1857 spin_lock_bh(&mux->lock);
1858 if (kcm->tx_wait) {
1859 /* Take of tx_wait list, after this point there should be no way
1860 * that a psock will be assigned to this kcm.
1861 */
1862 list_del(&kcm->wait_psock_list);
1863 kcm->tx_wait = false;
1864 }
1865 spin_unlock_bh(&mux->lock);
1866
1867 /* Cancel work. After this point there should be no outside references
1868 * to the kcm socket.
1869 */
1870 cancel_work_sync(&kcm->tx_work);
1871
1872 lock_sock(sk);
1873 psock = kcm->tx_psock;
1874 if (psock) {
1875 /* A psock was reserved, so we need to kill it since it
1876 * may already have some bytes queued from a message. We
1877 * need to do this after removing kcm from tx_wait list.
1878 */
1879 kcm_abort_tx_psock(psock, EPIPE, false);
1880 unreserve_psock(kcm);
1881 }
1882 release_sock(sk);
1883
1884 WARN_ON(kcm->tx_wait);
1885 WARN_ON(kcm->tx_psock);
1886
1887 sock->sk = NULL;
1888
1889 kcm_done(kcm);
1890
1891 return 0;
1892}
1893
91687355 1894static const struct proto_ops kcm_dgram_ops = {
ab7ac4eb
TH
1895 .family = PF_KCM,
1896 .owner = THIS_MODULE,
1897 .release = kcm_release,
1898 .bind = sock_no_bind,
1899 .connect = sock_no_connect,
1900 .socketpair = sock_no_socketpair,
1901 .accept = sock_no_accept,
1902 .getname = sock_no_getname,
a11e1d43 1903 .poll = datagram_poll,
ab7ac4eb
TH
1904 .ioctl = kcm_ioctl,
1905 .listen = sock_no_listen,
1906 .shutdown = sock_no_shutdown,
1907 .setsockopt = kcm_setsockopt,
1908 .getsockopt = kcm_getsockopt,
1909 .sendmsg = kcm_sendmsg,
1910 .recvmsg = kcm_recvmsg,
1911 .mmap = sock_no_mmap,
f29698fc 1912 .sendpage = kcm_sendpage,
ab7ac4eb
TH
1913};
1914
91687355
TH
1915static const struct proto_ops kcm_seqpacket_ops = {
1916 .family = PF_KCM,
1917 .owner = THIS_MODULE,
1918 .release = kcm_release,
1919 .bind = sock_no_bind,
1920 .connect = sock_no_connect,
1921 .socketpair = sock_no_socketpair,
1922 .accept = sock_no_accept,
1923 .getname = sock_no_getname,
a11e1d43 1924 .poll = datagram_poll,
91687355
TH
1925 .ioctl = kcm_ioctl,
1926 .listen = sock_no_listen,
1927 .shutdown = sock_no_shutdown,
1928 .setsockopt = kcm_setsockopt,
1929 .getsockopt = kcm_getsockopt,
1930 .sendmsg = kcm_sendmsg,
1931 .recvmsg = kcm_recvmsg,
1932 .mmap = sock_no_mmap,
f29698fc 1933 .sendpage = kcm_sendpage,
91687355
TH
1934 .splice_read = kcm_splice_read,
1935};
1936
ab7ac4eb
TH
1937/* Create proto operation for kcm sockets */
1938static int kcm_create(struct net *net, struct socket *sock,
1939 int protocol, int kern)
1940{
1941 struct kcm_net *knet = net_generic(net, kcm_net_id);
1942 struct sock *sk;
1943 struct kcm_mux *mux;
1944
1945 switch (sock->type) {
1946 case SOCK_DGRAM:
91687355
TH
1947 sock->ops = &kcm_dgram_ops;
1948 break;
ab7ac4eb 1949 case SOCK_SEQPACKET:
91687355 1950 sock->ops = &kcm_seqpacket_ops;
ab7ac4eb
TH
1951 break;
1952 default:
1953 return -ESOCKTNOSUPPORT;
1954 }
1955
1956 if (protocol != KCMPROTO_CONNECTED)
1957 return -EPROTONOSUPPORT;
1958
1959 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1960 if (!sk)
1961 return -ENOMEM;
1962
1963 /* Allocate a kcm mux, shared between KCM sockets */
1964 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1965 if (!mux) {
1966 sk_free(sk);
1967 return -ENOMEM;
1968 }
1969
1970 spin_lock_init(&mux->lock);
1971 spin_lock_init(&mux->rx_lock);
1972 INIT_LIST_HEAD(&mux->kcm_socks);
1973 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1974 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1975
1976 INIT_LIST_HEAD(&mux->psocks);
1977 INIT_LIST_HEAD(&mux->psocks_ready);
1978 INIT_LIST_HEAD(&mux->psocks_avail);
1979
1980 mux->knet = knet;
1981
1982 /* Add new MUX to list */
1983 mutex_lock(&knet->mutex);
1984 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1985 knet->count++;
1986 mutex_unlock(&knet->mutex);
1987
1988 skb_queue_head_init(&mux->rx_hold_queue);
1989
1990 /* Init KCM socket */
1991 sock_init_data(sock, sk);
1992 init_kcm_sock(kcm_sk(sk), mux);
1993
1994 return 0;
1995}
1996
173e7837 1997static const struct net_proto_family kcm_family_ops = {
ab7ac4eb
TH
1998 .family = PF_KCM,
1999 .create = kcm_create,
2000 .owner = THIS_MODULE,
2001};
2002
2003static __net_init int kcm_init_net(struct net *net)
2004{
2005 struct kcm_net *knet = net_generic(net, kcm_net_id);
2006
2007 INIT_LIST_HEAD_RCU(&knet->mux_list);
2008 mutex_init(&knet->mutex);
2009
2010 return 0;
2011}
2012
2013static __net_exit void kcm_exit_net(struct net *net)
2014{
2015 struct kcm_net *knet = net_generic(net, kcm_net_id);
2016
2017 /* All KCM sockets should be closed at this point, which should mean
2018 * that all multiplexors and psocks have been destroyed.
2019 */
2020 WARN_ON(!list_empty(&knet->mux_list));
2021}
2022
2023static struct pernet_operations kcm_net_ops = {
2024 .init = kcm_init_net,
2025 .exit = kcm_exit_net,
2026 .id = &kcm_net_id,
2027 .size = sizeof(struct kcm_net),
2028};
2029
2030static int __init kcm_init(void)
2031{
2032 int err = -ENOMEM;
2033
2034 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2035 sizeof(struct kcm_mux), 0,
c2115240 2036 SLAB_HWCACHE_ALIGN, NULL);
ab7ac4eb
TH
2037 if (!kcm_muxp)
2038 goto fail;
2039
2040 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2041 sizeof(struct kcm_psock), 0,
c2115240 2042 SLAB_HWCACHE_ALIGN, NULL);
ab7ac4eb
TH
2043 if (!kcm_psockp)
2044 goto fail;
2045
2046 kcm_wq = create_singlethread_workqueue("kkcmd");
2047 if (!kcm_wq)
2048 goto fail;
2049
2050 err = proto_register(&kcm_proto, 1);
2051 if (err)
2052 goto fail;
2053
ab7ac4eb
TH
2054 err = register_pernet_device(&kcm_net_ops);
2055 if (err)
2056 goto net_ops_fail;
2057
3c446e6f
JS
2058 err = sock_register(&kcm_family_ops);
2059 if (err)
2060 goto sock_register_fail;
2061
cd6e111b
TH
2062 err = kcm_proc_init();
2063 if (err)
2064 goto proc_init_fail;
2065
ab7ac4eb
TH
2066 return 0;
2067
cd6e111b 2068proc_init_fail:
ab7ac4eb
TH
2069 sock_unregister(PF_KCM);
2070
2071sock_register_fail:
3c446e6f
JS
2072 unregister_pernet_device(&kcm_net_ops);
2073
2074net_ops_fail:
ab7ac4eb
TH
2075 proto_unregister(&kcm_proto);
2076
2077fail:
2078 kmem_cache_destroy(kcm_muxp);
2079 kmem_cache_destroy(kcm_psockp);
2080
2081 if (kcm_wq)
2082 destroy_workqueue(kcm_wq);
2083
2084 return err;
2085}
2086
2087static void __exit kcm_exit(void)
2088{
cd6e111b 2089 kcm_proc_exit();
ab7ac4eb 2090 sock_unregister(PF_KCM);
3c446e6f 2091 unregister_pernet_device(&kcm_net_ops);
ab7ac4eb
TH
2092 proto_unregister(&kcm_proto);
2093 destroy_workqueue(kcm_wq);
2094
2095 kmem_cache_destroy(kcm_muxp);
2096 kmem_cache_destroy(kcm_psockp);
2097}
2098
2099module_init(kcm_init);
2100module_exit(kcm_exit);
2101
2102MODULE_LICENSE("GPL");
2103MODULE_ALIAS_NETPROTO(PF_KCM);