1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel Connection Multiplexor
5 * Copyright (c) 2016 Tom Herbert <tom@herbertland.com>
9 #include <linux/errno.h>
10 #include <linux/errqueue.h>
11 #include <linux/file.h>
12 #include <linux/filter.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/poll.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/socket.h>
22 #include <linux/uaccess.h>
23 #include <linux/workqueue.h>
24 #include <linux/syscalls.h>
25 #include <linux/sched/signal.h>
28 #include <net/netns/generic.h>
30 #include <uapi/linux/kcm.h>
31 #include <trace/events/sock.h>
33 unsigned int kcm_net_id;
35 static struct kmem_cache *kcm_psockp __read_mostly;
36 static struct kmem_cache *kcm_muxp __read_mostly;
37 static struct workqueue_struct *kcm_wq;
39 static inline struct kcm_sock *kcm_sk(const struct sock *sk)
41 return (struct kcm_sock *)sk;
44 static inline struct kcm_tx_msg *kcm_tx_msg(struct sk_buff *skb)
46 return (struct kcm_tx_msg *)skb->cb;
49 static void report_csk_error(struct sock *csk, int err)
55 static void kcm_abort_tx_psock(struct kcm_psock *psock, int err,
58 struct sock *csk = psock->sk;
59 struct kcm_mux *mux = psock->mux;
61 /* Unrecoverable error in transmit */
63 spin_lock_bh(&mux->lock);
65 if (psock->tx_stopped) {
66 spin_unlock_bh(&mux->lock);
70 psock->tx_stopped = 1;
71 KCM_STATS_INCR(psock->stats.tx_aborts);
74 /* Take off psocks_avail list */
75 list_del(&psock->psock_avail_list);
76 } else if (wakeup_kcm) {
77 /* In this case psock is being aborted while outside of
78 * write_msgs and psock is reserved. Schedule tx_work
79 * to handle the failure there. Need to commit tx_stopped
80 * before queuing work.
84 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
87 spin_unlock_bh(&mux->lock);
89 /* Report error on lower socket */
90 report_csk_error(csk, err);
93 /* RX mux lock held. */
94 static void kcm_update_rx_mux_stats(struct kcm_mux *mux,
95 struct kcm_psock *psock)
97 STRP_STATS_ADD(mux->stats.rx_bytes,
98 psock->strp.stats.bytes -
99 psock->saved_rx_bytes);
100 mux->stats.rx_msgs +=
101 psock->strp.stats.msgs - psock->saved_rx_msgs;
102 psock->saved_rx_msgs = psock->strp.stats.msgs;
103 psock->saved_rx_bytes = psock->strp.stats.bytes;
106 static void kcm_update_tx_mux_stats(struct kcm_mux *mux,
107 struct kcm_psock *psock)
109 KCM_STATS_ADD(mux->stats.tx_bytes,
110 psock->stats.tx_bytes - psock->saved_tx_bytes);
111 mux->stats.tx_msgs +=
112 psock->stats.tx_msgs - psock->saved_tx_msgs;
113 psock->saved_tx_msgs = psock->stats.tx_msgs;
114 psock->saved_tx_bytes = psock->stats.tx_bytes;
117 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
119 /* KCM is ready to receive messages on its queue-- either the KCM is new or
120 * has become unblocked after being blocked on full socket buffer. Queue any
121 * pending ready messages on a psock. RX mux lock held.
123 static void kcm_rcv_ready(struct kcm_sock *kcm)
125 struct kcm_mux *mux = kcm->mux;
126 struct kcm_psock *psock;
129 if (unlikely(kcm->rx_wait || kcm->rx_psock || kcm->rx_disabled))
132 while (unlikely((skb = __skb_dequeue(&mux->rx_hold_queue)))) {
133 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
134 /* Assuming buffer limit has been reached */
135 skb_queue_head(&mux->rx_hold_queue, skb);
136 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
141 while (!list_empty(&mux->psocks_ready)) {
142 psock = list_first_entry(&mux->psocks_ready, struct kcm_psock,
145 if (kcm_queue_rcv_skb(&kcm->sk, psock->ready_rx_msg)) {
146 /* Assuming buffer limit has been reached */
147 WARN_ON(!sk_rmem_alloc_get(&kcm->sk));
151 /* Consumed the ready message on the psock. Schedule rx_work to
154 list_del(&psock->psock_ready_list);
155 psock->ready_rx_msg = NULL;
156 /* Commit clearing of ready_rx_msg for queuing work */
159 strp_unpause(&psock->strp);
160 strp_check_rcv(&psock->strp);
163 /* Buffer limit is okay now, add to ready list */
164 list_add_tail(&kcm->wait_rx_list,
165 &kcm->mux->kcm_rx_waiters);
166 /* paired with lockless reads in kcm_rfree() */
167 WRITE_ONCE(kcm->rx_wait, true);
170 static void kcm_rfree(struct sk_buff *skb)
172 struct sock *sk = skb->sk;
173 struct kcm_sock *kcm = kcm_sk(sk);
174 struct kcm_mux *mux = kcm->mux;
175 unsigned int len = skb->truesize;
177 sk_mem_uncharge(sk, len);
178 atomic_sub(len, &sk->sk_rmem_alloc);
180 /* For reading rx_wait and rx_psock without holding lock */
181 smp_mb__after_atomic();
183 if (!READ_ONCE(kcm->rx_wait) && !READ_ONCE(kcm->rx_psock) &&
184 sk_rmem_alloc_get(sk) < sk->sk_rcvlowat) {
185 spin_lock_bh(&mux->rx_lock);
187 spin_unlock_bh(&mux->rx_lock);
191 static int kcm_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
193 struct sk_buff_head *list = &sk->sk_receive_queue;
195 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
198 if (!sk_rmem_schedule(sk, skb, skb->truesize))
205 skb->destructor = kcm_rfree;
206 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
207 sk_mem_charge(sk, skb->truesize);
209 skb_queue_tail(list, skb);
211 if (!sock_flag(sk, SOCK_DEAD))
212 sk->sk_data_ready(sk);
217 /* Requeue received messages for a kcm socket to other kcm sockets. This is
218 * called with a kcm socket is receive disabled.
221 static void requeue_rx_msgs(struct kcm_mux *mux, struct sk_buff_head *head)
224 struct kcm_sock *kcm;
226 while ((skb = skb_dequeue(head))) {
227 /* Reset destructor to avoid calling kcm_rcv_ready */
228 skb->destructor = sock_rfree;
231 if (list_empty(&mux->kcm_rx_waiters)) {
232 skb_queue_tail(&mux->rx_hold_queue, skb);
236 kcm = list_first_entry(&mux->kcm_rx_waiters,
237 struct kcm_sock, wait_rx_list);
239 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
240 /* Should mean socket buffer full */
241 list_del(&kcm->wait_rx_list);
242 /* paired with lockless reads in kcm_rfree() */
243 WRITE_ONCE(kcm->rx_wait, false);
245 /* Commit rx_wait to read in kcm_free */
253 /* Lower sock lock held */
254 static struct kcm_sock *reserve_rx_kcm(struct kcm_psock *psock,
255 struct sk_buff *head)
257 struct kcm_mux *mux = psock->mux;
258 struct kcm_sock *kcm;
260 WARN_ON(psock->ready_rx_msg);
263 return psock->rx_kcm;
265 spin_lock_bh(&mux->rx_lock);
268 spin_unlock_bh(&mux->rx_lock);
269 return psock->rx_kcm;
272 kcm_update_rx_mux_stats(mux, psock);
274 if (list_empty(&mux->kcm_rx_waiters)) {
275 psock->ready_rx_msg = head;
276 strp_pause(&psock->strp);
277 list_add_tail(&psock->psock_ready_list,
279 spin_unlock_bh(&mux->rx_lock);
283 kcm = list_first_entry(&mux->kcm_rx_waiters,
284 struct kcm_sock, wait_rx_list);
285 list_del(&kcm->wait_rx_list);
286 /* paired with lockless reads in kcm_rfree() */
287 WRITE_ONCE(kcm->rx_wait, false);
290 /* paired with lockless reads in kcm_rfree() */
291 WRITE_ONCE(kcm->rx_psock, psock);
293 spin_unlock_bh(&mux->rx_lock);
298 static void kcm_done(struct kcm_sock *kcm);
300 static void kcm_done_work(struct work_struct *w)
302 kcm_done(container_of(w, struct kcm_sock, done_work));
305 /* Lower sock held */
306 static void unreserve_rx_kcm(struct kcm_psock *psock,
309 struct kcm_sock *kcm = psock->rx_kcm;
310 struct kcm_mux *mux = psock->mux;
315 spin_lock_bh(&mux->rx_lock);
317 psock->rx_kcm = NULL;
318 /* paired with lockless reads in kcm_rfree() */
319 WRITE_ONCE(kcm->rx_psock, NULL);
321 /* Commit kcm->rx_psock before sk_rmem_alloc_get to sync with
326 if (unlikely(kcm->done)) {
327 spin_unlock_bh(&mux->rx_lock);
329 /* Need to run kcm_done in a task since we need to qcquire
330 * callback locks which may already be held here.
332 INIT_WORK(&kcm->done_work, kcm_done_work);
333 schedule_work(&kcm->done_work);
337 if (unlikely(kcm->rx_disabled)) {
338 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
339 } else if (rcv_ready || unlikely(!sk_rmem_alloc_get(&kcm->sk))) {
340 /* Check for degenerative race with rx_wait that all
341 * data was dequeued (accounted for in kcm_rfree).
345 spin_unlock_bh(&mux->rx_lock);
348 /* Lower sock lock held */
349 static void psock_data_ready(struct sock *sk)
351 struct kcm_psock *psock;
353 trace_sk_data_ready(sk);
355 read_lock_bh(&sk->sk_callback_lock);
357 psock = (struct kcm_psock *)sk->sk_user_data;
359 strp_data_ready(&psock->strp);
361 read_unlock_bh(&sk->sk_callback_lock);
364 /* Called with lower sock held */
365 static void kcm_rcv_strparser(struct strparser *strp, struct sk_buff *skb)
367 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
368 struct kcm_sock *kcm;
371 kcm = reserve_rx_kcm(psock, skb);
373 /* Unable to reserve a KCM, message is held in psock and strp
379 if (kcm_queue_rcv_skb(&kcm->sk, skb)) {
380 /* Should mean socket buffer full */
381 unreserve_rx_kcm(psock, false);
386 static int kcm_parse_func_strparser(struct strparser *strp, struct sk_buff *skb)
388 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
389 struct bpf_prog *prog = psock->bpf_prog;
392 res = bpf_prog_run_pin_on_cpu(prog, skb);
396 static int kcm_read_sock_done(struct strparser *strp, int err)
398 struct kcm_psock *psock = container_of(strp, struct kcm_psock, strp);
400 unreserve_rx_kcm(psock, true);
405 static void psock_state_change(struct sock *sk)
407 /* TCP only does a EPOLLIN for a half close. Do a EPOLLHUP here
408 * since application will normally not poll with EPOLLIN
409 * on the TCP sockets.
412 report_csk_error(sk, EPIPE);
415 static void psock_write_space(struct sock *sk)
417 struct kcm_psock *psock;
419 struct kcm_sock *kcm;
421 read_lock_bh(&sk->sk_callback_lock);
423 psock = (struct kcm_psock *)sk->sk_user_data;
424 if (unlikely(!psock))
428 spin_lock_bh(&mux->lock);
430 /* Check if the socket is reserved so someone is waiting for sending. */
432 if (kcm && !unlikely(kcm->tx_stopped))
433 queue_work(kcm_wq, &kcm->tx_work);
435 spin_unlock_bh(&mux->lock);
437 read_unlock_bh(&sk->sk_callback_lock);
440 static void unreserve_psock(struct kcm_sock *kcm);
442 /* kcm sock is locked. */
443 static struct kcm_psock *reserve_psock(struct kcm_sock *kcm)
445 struct kcm_mux *mux = kcm->mux;
446 struct kcm_psock *psock;
448 psock = kcm->tx_psock;
450 smp_rmb(); /* Must read tx_psock before tx_wait */
453 WARN_ON(kcm->tx_wait);
454 if (unlikely(psock->tx_stopped))
455 unreserve_psock(kcm);
457 return kcm->tx_psock;
460 spin_lock_bh(&mux->lock);
462 /* Check again under lock to see if psock was reserved for this
463 * psock via psock_unreserve.
465 psock = kcm->tx_psock;
466 if (unlikely(psock)) {
467 WARN_ON(kcm->tx_wait);
468 spin_unlock_bh(&mux->lock);
469 return kcm->tx_psock;
472 if (!list_empty(&mux->psocks_avail)) {
473 psock = list_first_entry(&mux->psocks_avail,
476 list_del(&psock->psock_avail_list);
478 list_del(&kcm->wait_psock_list);
479 kcm->tx_wait = false;
481 kcm->tx_psock = psock;
483 KCM_STATS_INCR(psock->stats.reserved);
484 } else if (!kcm->tx_wait) {
485 list_add_tail(&kcm->wait_psock_list,
486 &mux->kcm_tx_waiters);
490 spin_unlock_bh(&mux->lock);
496 static void psock_now_avail(struct kcm_psock *psock)
498 struct kcm_mux *mux = psock->mux;
499 struct kcm_sock *kcm;
501 if (list_empty(&mux->kcm_tx_waiters)) {
502 list_add_tail(&psock->psock_avail_list,
505 kcm = list_first_entry(&mux->kcm_tx_waiters,
508 list_del(&kcm->wait_psock_list);
509 kcm->tx_wait = false;
512 /* Commit before changing tx_psock since that is read in
513 * reserve_psock before queuing work.
517 kcm->tx_psock = psock;
518 KCM_STATS_INCR(psock->stats.reserved);
519 queue_work(kcm_wq, &kcm->tx_work);
523 /* kcm sock is locked. */
524 static void unreserve_psock(struct kcm_sock *kcm)
526 struct kcm_psock *psock;
527 struct kcm_mux *mux = kcm->mux;
529 spin_lock_bh(&mux->lock);
531 psock = kcm->tx_psock;
533 if (WARN_ON(!psock)) {
534 spin_unlock_bh(&mux->lock);
538 smp_rmb(); /* Read tx_psock before tx_wait */
540 kcm_update_tx_mux_stats(mux, psock);
542 WARN_ON(kcm->tx_wait);
544 kcm->tx_psock = NULL;
545 psock->tx_kcm = NULL;
546 KCM_STATS_INCR(psock->stats.unreserved);
548 if (unlikely(psock->tx_stopped)) {
551 list_del(&psock->psock_list);
554 fput(psock->sk->sk_socket->file);
555 kmem_cache_free(kcm_psockp, psock);
558 /* Don't put back on available list */
560 spin_unlock_bh(&mux->lock);
565 psock_now_avail(psock);
567 spin_unlock_bh(&mux->lock);
570 static void kcm_report_tx_retry(struct kcm_sock *kcm)
572 struct kcm_mux *mux = kcm->mux;
574 spin_lock_bh(&mux->lock);
575 KCM_STATS_INCR(mux->stats.tx_retries);
576 spin_unlock_bh(&mux->lock);
579 /* Write any messages ready on the kcm socket. Called with kcm sock lock
580 * held. Return bytes actually sent or error.
582 static int kcm_write_msgs(struct kcm_sock *kcm)
584 struct sock *sk = &kcm->sk;
585 struct kcm_psock *psock;
586 struct sk_buff *skb, *head;
587 struct kcm_tx_msg *txm;
588 unsigned short fragidx, frag_offset;
589 unsigned int sent, total_sent = 0;
592 kcm->tx_wait_more = false;
593 psock = kcm->tx_psock;
594 if (unlikely(psock && psock->tx_stopped)) {
595 /* A reserved psock was aborted asynchronously. Unreserve
596 * it and we'll retry the message.
598 unreserve_psock(kcm);
599 kcm_report_tx_retry(kcm);
600 if (skb_queue_empty(&sk->sk_write_queue))
603 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0;
605 } else if (skb_queue_empty(&sk->sk_write_queue)) {
609 head = skb_peek(&sk->sk_write_queue);
610 txm = kcm_tx_msg(head);
613 /* Send of first skbuff in queue already in progress */
614 if (WARN_ON(!psock)) {
619 frag_offset = txm->frag_offset;
620 fragidx = txm->fragidx;
627 psock = reserve_psock(kcm);
633 txm = kcm_tx_msg(head);
637 if (WARN_ON(!skb_shinfo(skb)->nr_frags)) {
642 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags;
648 frag = &skb_shinfo(skb)->frags[fragidx];
649 if (WARN_ON(!skb_frag_size(frag))) {
654 ret = kernel_sendpage(psock->sk->sk_socket,
656 skb_frag_off(frag) + frag_offset,
657 skb_frag_size(frag) - frag_offset,
660 if (ret == -EAGAIN) {
661 /* Save state to try again when there's
662 * write space on the socket
665 txm->frag_offset = frag_offset;
666 txm->fragidx = fragidx;
673 /* Hard failure in sending message, abort this
674 * psock since it has lost framing
675 * synchronization and retry sending the
676 * message from the beginning.
678 kcm_abort_tx_psock(psock, ret ? -ret : EPIPE,
680 unreserve_psock(kcm);
683 kcm_report_tx_retry(kcm);
691 KCM_STATS_ADD(psock->stats.tx_bytes, ret);
692 if (frag_offset < skb_frag_size(frag)) {
693 /* Not finished with this frag */
699 if (skb_has_frag_list(skb)) {
700 skb = skb_shinfo(skb)->frag_list;
703 } else if (skb->next) {
708 /* Successfully sent the whole packet, account for it. */
709 skb_dequeue(&sk->sk_write_queue);
711 sk->sk_wmem_queued -= sent;
713 KCM_STATS_INCR(psock->stats.tx_msgs);
714 } while ((head = skb_peek(&sk->sk_write_queue)));
717 /* Done with all queued messages. */
718 WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
719 unreserve_psock(kcm);
722 /* Check if write space is available */
723 sk->sk_write_space(sk);
725 return total_sent ? : ret;
728 static void kcm_tx_work(struct work_struct *w)
730 struct kcm_sock *kcm = container_of(w, struct kcm_sock, tx_work);
731 struct sock *sk = &kcm->sk;
736 /* Primarily for SOCK_DGRAM sockets, also handle asynchronous tx
739 err = kcm_write_msgs(kcm);
741 /* Hard failure in write, report error on KCM socket */
742 pr_warn("KCM: Hard failure on kcm_write_msgs %d\n", err);
743 report_csk_error(&kcm->sk, -err);
747 /* Primarily for SOCK_SEQPACKET sockets */
748 if (likely(sk->sk_socket) &&
749 test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
750 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
751 sk->sk_write_space(sk);
758 static void kcm_push(struct kcm_sock *kcm)
760 if (kcm->tx_wait_more)
764 static ssize_t kcm_sendpage(struct socket *sock, struct page *page,
765 int offset, size_t size, int flags)
768 struct sock *sk = sock->sk;
769 struct kcm_sock *kcm = kcm_sk(sk);
770 struct sk_buff *skb = NULL, *head = NULL;
771 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
776 if (flags & MSG_SENDPAGE_NOTLAST)
779 /* No MSG_EOR from splice, only look at MSG_MORE */
780 eor = !(flags & MSG_MORE);
784 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
791 /* Previously opened message */
793 skb = kcm_tx_msg(head)->last_skb;
794 i = skb_shinfo(skb)->nr_frags;
796 if (skb_can_coalesce(skb, i, page, offset)) {
797 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
798 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
802 if (i >= MAX_SKB_FRAGS) {
803 struct sk_buff *tskb;
805 tskb = alloc_skb(0, sk->sk_allocation);
808 err = sk_stream_wait_memory(sk, &timeo);
814 skb_shinfo(head)->frag_list = tskb;
819 skb->ip_summed = CHECKSUM_UNNECESSARY;
823 /* Call the sk_stream functions to manage the sndbuf mem. */
824 if (!sk_stream_memory_free(sk)) {
826 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
827 err = sk_stream_wait_memory(sk, &timeo);
832 head = alloc_skb(0, sk->sk_allocation);
835 err = sk_stream_wait_memory(sk, &timeo);
845 skb_fill_page_desc_noacc(skb, i, page, offset, size);
846 skb_shinfo(skb)->flags |= SKBFL_SHARED_FRAG;
850 skb->data_len += size;
851 skb->truesize += size;
852 sk->sk_wmem_queued += size;
853 sk_mem_charge(sk, size);
857 head->data_len += size;
858 head->truesize += size;
862 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
864 /* Message complete, queue it on send buffer */
865 __skb_queue_tail(&sk->sk_write_queue, head);
867 KCM_STATS_INCR(kcm->stats.tx_msgs);
869 if (flags & MSG_BATCH) {
870 kcm->tx_wait_more = true;
871 } else if (kcm->tx_wait_more || not_busy) {
872 err = kcm_write_msgs(kcm);
874 /* We got a hard error in write_msgs but have
875 * already queued this message. Report an error
876 * in the socket, but don't affect return value
879 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
880 report_csk_error(&kcm->sk, -err);
884 /* Message not complete, save state */
886 kcm_tx_msg(head)->last_skb = skb;
889 KCM_STATS_ADD(kcm->stats.tx_bytes, size);
897 err = sk_stream_error(sk, flags, err);
899 /* make sure we wake any epoll edge trigger waiter */
900 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
901 sk->sk_write_space(sk);
907 static int kcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
909 struct sock *sk = sock->sk;
910 struct kcm_sock *kcm = kcm_sk(sk);
911 struct sk_buff *skb = NULL, *head = NULL;
912 size_t copy, copied = 0;
913 long timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
914 int eor = (sock->type == SOCK_DGRAM) ?
915 !(msg->msg_flags & MSG_MORE) : !!(msg->msg_flags & MSG_EOR);
920 /* Per tcp_sendmsg this should be in poll */
921 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
927 /* Previously opened message */
929 skb = kcm_tx_msg(head)->last_skb;
933 /* Call the sk_stream functions to manage the sndbuf mem. */
934 if (!sk_stream_memory_free(sk)) {
936 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
937 err = sk_stream_wait_memory(sk, &timeo);
942 if (msg_data_left(msg)) {
943 /* New message, alloc head skb */
944 head = alloc_skb(0, sk->sk_allocation);
947 err = sk_stream_wait_memory(sk, &timeo);
951 head = alloc_skb(0, sk->sk_allocation);
956 /* Set ip_summed to CHECKSUM_UNNECESSARY to avoid calling
957 * csum_and_copy_from_iter from skb_do_copy_data_nocache.
959 skb->ip_summed = CHECKSUM_UNNECESSARY;
963 while (msg_data_left(msg)) {
965 int i = skb_shinfo(skb)->nr_frags;
966 struct page_frag *pfrag = sk_page_frag(sk);
968 if (!sk_page_frag_refill(sk, pfrag))
969 goto wait_for_memory;
971 if (!skb_can_coalesce(skb, i, pfrag->page,
973 if (i == MAX_SKB_FRAGS) {
974 struct sk_buff *tskb;
976 tskb = alloc_skb(0, sk->sk_allocation);
978 goto wait_for_memory;
981 skb_shinfo(head)->frag_list = tskb;
986 skb->ip_summed = CHECKSUM_UNNECESSARY;
992 copy = min_t(int, msg_data_left(msg),
993 pfrag->size - pfrag->offset);
995 if (!sk_wmem_schedule(sk, copy))
996 goto wait_for_memory;
998 err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb,
1005 /* Update the skb. */
1007 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1009 skb_fill_page_desc(skb, i, pfrag->page,
1010 pfrag->offset, copy);
1011 get_page(pfrag->page);
1014 pfrag->offset += copy;
1018 head->data_len += copy;
1025 err = sk_stream_wait_memory(sk, &timeo);
1031 bool not_busy = skb_queue_empty(&sk->sk_write_queue);
1034 /* Message complete, queue it on send buffer */
1035 __skb_queue_tail(&sk->sk_write_queue, head);
1036 kcm->seq_skb = NULL;
1037 KCM_STATS_INCR(kcm->stats.tx_msgs);
1040 if (msg->msg_flags & MSG_BATCH) {
1041 kcm->tx_wait_more = true;
1042 } else if (kcm->tx_wait_more || not_busy) {
1043 err = kcm_write_msgs(kcm);
1045 /* We got a hard error in write_msgs but have
1046 * already queued this message. Report an error
1047 * in the socket, but don't affect return value
1050 pr_warn("KCM: Hard failure on kcm_write_msgs\n");
1051 report_csk_error(&kcm->sk, -err);
1055 /* Message not complete, save state */
1058 kcm->seq_skb = head;
1059 kcm_tx_msg(head)->last_skb = skb;
1063 KCM_STATS_ADD(kcm->stats.tx_bytes, copied);
1071 if (copied && sock->type == SOCK_SEQPACKET) {
1072 /* Wrote some bytes before encountering an
1073 * error, return partial success.
1075 goto partial_message;
1078 if (head != kcm->seq_skb)
1081 err = sk_stream_error(sk, msg->msg_flags, err);
1083 /* make sure we wake any epoll edge trigger waiter */
1084 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN))
1085 sk->sk_write_space(sk);
1091 static int kcm_recvmsg(struct socket *sock, struct msghdr *msg,
1092 size_t len, int flags)
1094 struct sock *sk = sock->sk;
1095 struct kcm_sock *kcm = kcm_sk(sk);
1097 struct strp_msg *stm;
1099 struct sk_buff *skb;
1101 skb = skb_recv_datagram(sk, flags, &err);
1105 /* Okay, have a message on the receive queue */
1107 stm = strp_msg(skb);
1109 if (len > stm->full_len)
1110 len = stm->full_len;
1112 err = skb_copy_datagram_msg(skb, stm->offset, msg, len);
1117 if (likely(!(flags & MSG_PEEK))) {
1118 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1119 if (copied < stm->full_len) {
1120 if (sock->type == SOCK_DGRAM) {
1121 /* Truncated message */
1122 msg->msg_flags |= MSG_TRUNC;
1125 stm->offset += copied;
1126 stm->full_len -= copied;
1129 /* Finished with message */
1130 msg->msg_flags |= MSG_EOR;
1131 KCM_STATS_INCR(kcm->stats.rx_msgs);
1136 skb_free_datagram(sk, skb);
1137 return copied ? : err;
1140 static ssize_t kcm_splice_read(struct socket *sock, loff_t *ppos,
1141 struct pipe_inode_info *pipe, size_t len,
1144 struct sock *sk = sock->sk;
1145 struct kcm_sock *kcm = kcm_sk(sk);
1146 struct strp_msg *stm;
1149 struct sk_buff *skb;
1151 /* Only support splice for SOCKSEQPACKET */
1153 skb = skb_recv_datagram(sk, flags, &err);
1157 /* Okay, have a message on the receive queue */
1159 stm = strp_msg(skb);
1161 if (len > stm->full_len)
1162 len = stm->full_len;
1164 copied = skb_splice_bits(skb, sk, stm->offset, pipe, len, flags);
1170 KCM_STATS_ADD(kcm->stats.rx_bytes, copied);
1172 stm->offset += copied;
1173 stm->full_len -= copied;
1175 /* We have no way to return MSG_EOR. If all the bytes have been
1176 * read we still leave the message in the receive socket buffer.
1177 * A subsequent recvmsg needs to be done to return MSG_EOR and
1178 * finish reading the message.
1181 skb_free_datagram(sk, skb);
1185 skb_free_datagram(sk, skb);
1189 /* kcm sock lock held */
1190 static void kcm_recv_disable(struct kcm_sock *kcm)
1192 struct kcm_mux *mux = kcm->mux;
1194 if (kcm->rx_disabled)
1197 spin_lock_bh(&mux->rx_lock);
1199 kcm->rx_disabled = 1;
1201 /* If a psock is reserved we'll do cleanup in unreserve */
1202 if (!kcm->rx_psock) {
1204 list_del(&kcm->wait_rx_list);
1205 /* paired with lockless reads in kcm_rfree() */
1206 WRITE_ONCE(kcm->rx_wait, false);
1209 requeue_rx_msgs(mux, &kcm->sk.sk_receive_queue);
1212 spin_unlock_bh(&mux->rx_lock);
1215 /* kcm sock lock held */
1216 static void kcm_recv_enable(struct kcm_sock *kcm)
1218 struct kcm_mux *mux = kcm->mux;
1220 if (!kcm->rx_disabled)
1223 spin_lock_bh(&mux->rx_lock);
1225 kcm->rx_disabled = 0;
1228 spin_unlock_bh(&mux->rx_lock);
1231 static int kcm_setsockopt(struct socket *sock, int level, int optname,
1232 sockptr_t optval, unsigned int optlen)
1234 struct kcm_sock *kcm = kcm_sk(sock->sk);
1238 if (level != SOL_KCM)
1239 return -ENOPROTOOPT;
1241 if (optlen < sizeof(int))
1244 if (copy_from_sockptr(&val, optval, sizeof(int)))
1247 valbool = val ? 1 : 0;
1250 case KCM_RECV_DISABLE:
1251 lock_sock(&kcm->sk);
1253 kcm_recv_disable(kcm);
1255 kcm_recv_enable(kcm);
1256 release_sock(&kcm->sk);
1265 static int kcm_getsockopt(struct socket *sock, int level, int optname,
1266 char __user *optval, int __user *optlen)
1268 struct kcm_sock *kcm = kcm_sk(sock->sk);
1271 if (level != SOL_KCM)
1272 return -ENOPROTOOPT;
1274 if (get_user(len, optlen))
1277 len = min_t(unsigned int, len, sizeof(int));
1282 case KCM_RECV_DISABLE:
1283 val = kcm->rx_disabled;
1286 return -ENOPROTOOPT;
1289 if (put_user(len, optlen))
1291 if (copy_to_user(optval, &val, len))
1296 static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
1298 struct kcm_sock *tkcm;
1299 struct list_head *head;
1302 /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
1303 * we set sk_state, otherwise epoll_wait always returns right away with
1306 kcm->sk.sk_state = TCP_ESTABLISHED;
1308 /* Add to mux's kcm sockets list */
1310 spin_lock_bh(&mux->lock);
1312 head = &mux->kcm_socks;
1313 list_for_each_entry(tkcm, &mux->kcm_socks, kcm_sock_list) {
1314 if (tkcm->index != index)
1316 head = &tkcm->kcm_sock_list;
1320 list_add(&kcm->kcm_sock_list, head);
1323 mux->kcm_socks_cnt++;
1324 spin_unlock_bh(&mux->lock);
1326 INIT_WORK(&kcm->tx_work, kcm_tx_work);
1328 spin_lock_bh(&mux->rx_lock);
1330 spin_unlock_bh(&mux->rx_lock);
1333 static int kcm_attach(struct socket *sock, struct socket *csock,
1334 struct bpf_prog *prog)
1336 struct kcm_sock *kcm = kcm_sk(sock->sk);
1337 struct kcm_mux *mux = kcm->mux;
1339 struct kcm_psock *psock = NULL, *tpsock;
1340 struct list_head *head;
1342 static const struct strp_callbacks cb = {
1343 .rcv_msg = kcm_rcv_strparser,
1344 .parse_msg = kcm_parse_func_strparser,
1345 .read_sock_done = kcm_read_sock_done,
1355 /* Only allow TCP sockets to be attached for now */
1356 if ((csk->sk_family != AF_INET && csk->sk_family != AF_INET6) ||
1357 csk->sk_protocol != IPPROTO_TCP) {
1362 /* Don't allow listeners or closed sockets */
1363 if (csk->sk_state == TCP_LISTEN || csk->sk_state == TCP_CLOSE) {
1368 psock = kmem_cache_zalloc(kcm_psockp, GFP_KERNEL);
1376 psock->bpf_prog = prog;
1378 write_lock_bh(&csk->sk_callback_lock);
1380 /* Check if sk_user_data is already by KCM or someone else.
1381 * Must be done under lock to prevent race conditions.
1383 if (csk->sk_user_data) {
1384 write_unlock_bh(&csk->sk_callback_lock);
1385 kmem_cache_free(kcm_psockp, psock);
1390 err = strp_init(&psock->strp, csk, &cb);
1392 write_unlock_bh(&csk->sk_callback_lock);
1393 kmem_cache_free(kcm_psockp, psock);
1397 psock->save_data_ready = csk->sk_data_ready;
1398 psock->save_write_space = csk->sk_write_space;
1399 psock->save_state_change = csk->sk_state_change;
1400 csk->sk_user_data = psock;
1401 csk->sk_data_ready = psock_data_ready;
1402 csk->sk_write_space = psock_write_space;
1403 csk->sk_state_change = psock_state_change;
1405 write_unlock_bh(&csk->sk_callback_lock);
1409 /* Finished initialization, now add the psock to the MUX. */
1410 spin_lock_bh(&mux->lock);
1411 head = &mux->psocks;
1412 list_for_each_entry(tpsock, &mux->psocks, psock_list) {
1413 if (tpsock->index != index)
1415 head = &tpsock->psock_list;
1419 list_add(&psock->psock_list, head);
1420 psock->index = index;
1422 KCM_STATS_INCR(mux->stats.psock_attach);
1424 psock_now_avail(psock);
1425 spin_unlock_bh(&mux->lock);
1427 /* Schedule RX work in case there are already bytes queued */
1428 strp_check_rcv(&psock->strp);
1436 static int kcm_attach_ioctl(struct socket *sock, struct kcm_attach *info)
1438 struct socket *csock;
1439 struct bpf_prog *prog;
1442 csock = sockfd_lookup(info->fd, &err);
1446 prog = bpf_prog_get_type(info->bpf_fd, BPF_PROG_TYPE_SOCKET_FILTER);
1448 err = PTR_ERR(prog);
1452 err = kcm_attach(sock, csock, prog);
1458 /* Keep reference on file also */
1466 static void kcm_unattach(struct kcm_psock *psock)
1468 struct sock *csk = psock->sk;
1469 struct kcm_mux *mux = psock->mux;
1473 /* Stop getting callbacks from TCP socket. After this there should
1474 * be no way to reserve a kcm for this psock.
1476 write_lock_bh(&csk->sk_callback_lock);
1477 csk->sk_user_data = NULL;
1478 csk->sk_data_ready = psock->save_data_ready;
1479 csk->sk_write_space = psock->save_write_space;
1480 csk->sk_state_change = psock->save_state_change;
1481 strp_stop(&psock->strp);
1483 if (WARN_ON(psock->rx_kcm)) {
1484 write_unlock_bh(&csk->sk_callback_lock);
1489 spin_lock_bh(&mux->rx_lock);
1491 /* Stop receiver activities. After this point psock should not be
1492 * able to get onto ready list either through callbacks or work.
1494 if (psock->ready_rx_msg) {
1495 list_del(&psock->psock_ready_list);
1496 kfree_skb(psock->ready_rx_msg);
1497 psock->ready_rx_msg = NULL;
1498 KCM_STATS_INCR(mux->stats.rx_ready_drops);
1501 spin_unlock_bh(&mux->rx_lock);
1503 write_unlock_bh(&csk->sk_callback_lock);
1505 /* Call strp_done without sock lock */
1507 strp_done(&psock->strp);
1510 bpf_prog_put(psock->bpf_prog);
1512 spin_lock_bh(&mux->lock);
1514 aggregate_psock_stats(&psock->stats, &mux->aggregate_psock_stats);
1515 save_strp_stats(&psock->strp, &mux->aggregate_strp_stats);
1517 KCM_STATS_INCR(mux->stats.psock_unattach);
1519 if (psock->tx_kcm) {
1520 /* psock was reserved. Just mark it finished and we will clean
1521 * up in the kcm paths, we need kcm lock which can not be
1524 KCM_STATS_INCR(mux->stats.psock_unattach_rsvd);
1525 spin_unlock_bh(&mux->lock);
1527 /* We are unattaching a socket that is reserved. Abort the
1528 * socket since we may be out of sync in sending on it. We need
1529 * to do this without the mux lock.
1531 kcm_abort_tx_psock(psock, EPIPE, false);
1533 spin_lock_bh(&mux->lock);
1534 if (!psock->tx_kcm) {
1535 /* psock now unreserved in window mux was unlocked */
1540 /* Commit done before queuing work to process it */
1543 /* Queue tx work to make sure psock->done is handled */
1544 queue_work(kcm_wq, &psock->tx_kcm->tx_work);
1545 spin_unlock_bh(&mux->lock);
1548 if (!psock->tx_stopped)
1549 list_del(&psock->psock_avail_list);
1550 list_del(&psock->psock_list);
1552 spin_unlock_bh(&mux->lock);
1555 fput(csk->sk_socket->file);
1556 kmem_cache_free(kcm_psockp, psock);
1562 static int kcm_unattach_ioctl(struct socket *sock, struct kcm_unattach *info)
1564 struct kcm_sock *kcm = kcm_sk(sock->sk);
1565 struct kcm_mux *mux = kcm->mux;
1566 struct kcm_psock *psock;
1567 struct socket *csock;
1571 csock = sockfd_lookup(info->fd, &err);
1583 spin_lock_bh(&mux->lock);
1585 list_for_each_entry(psock, &mux->psocks, psock_list) {
1586 if (psock->sk != csk)
1589 /* Found the matching psock */
1591 if (psock->unattaching || WARN_ON(psock->done)) {
1596 psock->unattaching = 1;
1598 spin_unlock_bh(&mux->lock);
1600 /* Lower socket lock should already be held */
1601 kcm_unattach(psock);
1607 spin_unlock_bh(&mux->lock);
1614 static struct proto kcm_proto = {
1616 .owner = THIS_MODULE,
1617 .obj_size = sizeof(struct kcm_sock),
1620 /* Clone a kcm socket. */
1621 static struct file *kcm_clone(struct socket *osock)
1623 struct socket *newsock;
1626 newsock = sock_alloc();
1628 return ERR_PTR(-ENFILE);
1630 newsock->type = osock->type;
1631 newsock->ops = osock->ops;
1633 __module_get(newsock->ops->owner);
1635 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1638 sock_release(newsock);
1639 return ERR_PTR(-ENOMEM);
1641 sock_init_data(newsock, newsk);
1642 init_kcm_sock(kcm_sk(newsk), kcm_sk(osock->sk)->mux);
1644 return sock_alloc_file(newsock, 0, osock->sk->sk_prot_creator->name);
1647 static int kcm_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1652 case SIOCKCMATTACH: {
1653 struct kcm_attach info;
1655 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1658 err = kcm_attach_ioctl(sock, &info);
1662 case SIOCKCMUNATTACH: {
1663 struct kcm_unattach info;
1665 if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
1668 err = kcm_unattach_ioctl(sock, &info);
1672 case SIOCKCMCLONE: {
1673 struct kcm_clone info;
1676 info.fd = get_unused_fd_flags(0);
1677 if (unlikely(info.fd < 0))
1680 file = kcm_clone(sock);
1682 put_unused_fd(info.fd);
1683 return PTR_ERR(file);
1685 if (copy_to_user((void __user *)arg, &info,
1687 put_unused_fd(info.fd);
1691 fd_install(info.fd, file);
1703 static void free_mux(struct rcu_head *rcu)
1705 struct kcm_mux *mux = container_of(rcu,
1706 struct kcm_mux, rcu);
1708 kmem_cache_free(kcm_muxp, mux);
1711 static void release_mux(struct kcm_mux *mux)
1713 struct kcm_net *knet = mux->knet;
1714 struct kcm_psock *psock, *tmp_psock;
1716 /* Release psocks */
1717 list_for_each_entry_safe(psock, tmp_psock,
1718 &mux->psocks, psock_list) {
1719 if (!WARN_ON(psock->unattaching))
1720 kcm_unattach(psock);
1723 if (WARN_ON(mux->psocks_cnt))
1726 __skb_queue_purge(&mux->rx_hold_queue);
1728 mutex_lock(&knet->mutex);
1729 aggregate_mux_stats(&mux->stats, &knet->aggregate_mux_stats);
1730 aggregate_psock_stats(&mux->aggregate_psock_stats,
1731 &knet->aggregate_psock_stats);
1732 aggregate_strp_stats(&mux->aggregate_strp_stats,
1733 &knet->aggregate_strp_stats);
1734 list_del_rcu(&mux->kcm_mux_list);
1736 mutex_unlock(&knet->mutex);
1738 call_rcu(&mux->rcu, free_mux);
1741 static void kcm_done(struct kcm_sock *kcm)
1743 struct kcm_mux *mux = kcm->mux;
1744 struct sock *sk = &kcm->sk;
1747 spin_lock_bh(&mux->rx_lock);
1748 if (kcm->rx_psock) {
1749 /* Cleanup in unreserve_rx_kcm */
1751 kcm->rx_disabled = 1;
1753 spin_unlock_bh(&mux->rx_lock);
1758 list_del(&kcm->wait_rx_list);
1759 /* paired with lockless reads in kcm_rfree() */
1760 WRITE_ONCE(kcm->rx_wait, false);
1762 /* Move any pending receive messages to other kcm sockets */
1763 requeue_rx_msgs(mux, &sk->sk_receive_queue);
1765 spin_unlock_bh(&mux->rx_lock);
1767 if (WARN_ON(sk_rmem_alloc_get(sk)))
1770 /* Detach from MUX */
1771 spin_lock_bh(&mux->lock);
1773 list_del(&kcm->kcm_sock_list);
1774 mux->kcm_socks_cnt--;
1775 socks_cnt = mux->kcm_socks_cnt;
1777 spin_unlock_bh(&mux->lock);
1780 /* We are done with the mux now. */
1784 WARN_ON(kcm->rx_wait);
1789 /* Called by kcm_release to close a KCM socket.
1790 * If this is the last KCM socket on the MUX, destroy the MUX.
1792 static int kcm_release(struct socket *sock)
1794 struct sock *sk = sock->sk;
1795 struct kcm_sock *kcm;
1796 struct kcm_mux *mux;
1797 struct kcm_psock *psock;
1807 kfree_skb(kcm->seq_skb);
1809 /* Purge queue under lock to avoid race condition with tx_work trying
1810 * to act when queue is nonempty. If tx_work runs after this point
1811 * it will just return.
1813 __skb_queue_purge(&sk->sk_write_queue);
1815 /* Set tx_stopped. This is checked when psock is bound to a kcm and we
1816 * get a writespace callback. This prevents further work being queued
1817 * from the callback (unbinding the psock occurs after canceling work.
1819 kcm->tx_stopped = 1;
1823 spin_lock_bh(&mux->lock);
1825 /* Take of tx_wait list, after this point there should be no way
1826 * that a psock will be assigned to this kcm.
1828 list_del(&kcm->wait_psock_list);
1829 kcm->tx_wait = false;
1831 spin_unlock_bh(&mux->lock);
1833 /* Cancel work. After this point there should be no outside references
1834 * to the kcm socket.
1836 cancel_work_sync(&kcm->tx_work);
1839 psock = kcm->tx_psock;
1841 /* A psock was reserved, so we need to kill it since it
1842 * may already have some bytes queued from a message. We
1843 * need to do this after removing kcm from tx_wait list.
1845 kcm_abort_tx_psock(psock, EPIPE, false);
1846 unreserve_psock(kcm);
1850 WARN_ON(kcm->tx_wait);
1851 WARN_ON(kcm->tx_psock);
1860 static const struct proto_ops kcm_dgram_ops = {
1862 .owner = THIS_MODULE,
1863 .release = kcm_release,
1864 .bind = sock_no_bind,
1865 .connect = sock_no_connect,
1866 .socketpair = sock_no_socketpair,
1867 .accept = sock_no_accept,
1868 .getname = sock_no_getname,
1869 .poll = datagram_poll,
1871 .listen = sock_no_listen,
1872 .shutdown = sock_no_shutdown,
1873 .setsockopt = kcm_setsockopt,
1874 .getsockopt = kcm_getsockopt,
1875 .sendmsg = kcm_sendmsg,
1876 .recvmsg = kcm_recvmsg,
1877 .mmap = sock_no_mmap,
1878 .sendpage = kcm_sendpage,
1881 static const struct proto_ops kcm_seqpacket_ops = {
1883 .owner = THIS_MODULE,
1884 .release = kcm_release,
1885 .bind = sock_no_bind,
1886 .connect = sock_no_connect,
1887 .socketpair = sock_no_socketpair,
1888 .accept = sock_no_accept,
1889 .getname = sock_no_getname,
1890 .poll = datagram_poll,
1892 .listen = sock_no_listen,
1893 .shutdown = sock_no_shutdown,
1894 .setsockopt = kcm_setsockopt,
1895 .getsockopt = kcm_getsockopt,
1896 .sendmsg = kcm_sendmsg,
1897 .recvmsg = kcm_recvmsg,
1898 .mmap = sock_no_mmap,
1899 .sendpage = kcm_sendpage,
1900 .splice_read = kcm_splice_read,
1903 /* Create proto operation for kcm sockets */
1904 static int kcm_create(struct net *net, struct socket *sock,
1905 int protocol, int kern)
1907 struct kcm_net *knet = net_generic(net, kcm_net_id);
1909 struct kcm_mux *mux;
1911 switch (sock->type) {
1913 sock->ops = &kcm_dgram_ops;
1915 case SOCK_SEQPACKET:
1916 sock->ops = &kcm_seqpacket_ops;
1919 return -ESOCKTNOSUPPORT;
1922 if (protocol != KCMPROTO_CONNECTED)
1923 return -EPROTONOSUPPORT;
1925 sk = sk_alloc(net, PF_KCM, GFP_KERNEL, &kcm_proto, kern);
1929 /* Allocate a kcm mux, shared between KCM sockets */
1930 mux = kmem_cache_zalloc(kcm_muxp, GFP_KERNEL);
1936 spin_lock_init(&mux->lock);
1937 spin_lock_init(&mux->rx_lock);
1938 INIT_LIST_HEAD(&mux->kcm_socks);
1939 INIT_LIST_HEAD(&mux->kcm_rx_waiters);
1940 INIT_LIST_HEAD(&mux->kcm_tx_waiters);
1942 INIT_LIST_HEAD(&mux->psocks);
1943 INIT_LIST_HEAD(&mux->psocks_ready);
1944 INIT_LIST_HEAD(&mux->psocks_avail);
1948 /* Add new MUX to list */
1949 mutex_lock(&knet->mutex);
1950 list_add_rcu(&mux->kcm_mux_list, &knet->mux_list);
1952 mutex_unlock(&knet->mutex);
1954 skb_queue_head_init(&mux->rx_hold_queue);
1956 /* Init KCM socket */
1957 sock_init_data(sock, sk);
1958 init_kcm_sock(kcm_sk(sk), mux);
1963 static const struct net_proto_family kcm_family_ops = {
1965 .create = kcm_create,
1966 .owner = THIS_MODULE,
1969 static __net_init int kcm_init_net(struct net *net)
1971 struct kcm_net *knet = net_generic(net, kcm_net_id);
1973 INIT_LIST_HEAD_RCU(&knet->mux_list);
1974 mutex_init(&knet->mutex);
1979 static __net_exit void kcm_exit_net(struct net *net)
1981 struct kcm_net *knet = net_generic(net, kcm_net_id);
1983 /* All KCM sockets should be closed at this point, which should mean
1984 * that all multiplexors and psocks have been destroyed.
1986 WARN_ON(!list_empty(&knet->mux_list));
1989 static struct pernet_operations kcm_net_ops = {
1990 .init = kcm_init_net,
1991 .exit = kcm_exit_net,
1993 .size = sizeof(struct kcm_net),
1996 static int __init kcm_init(void)
2000 kcm_muxp = kmem_cache_create("kcm_mux_cache",
2001 sizeof(struct kcm_mux), 0,
2002 SLAB_HWCACHE_ALIGN, NULL);
2006 kcm_psockp = kmem_cache_create("kcm_psock_cache",
2007 sizeof(struct kcm_psock), 0,
2008 SLAB_HWCACHE_ALIGN, NULL);
2012 kcm_wq = create_singlethread_workqueue("kkcmd");
2016 err = proto_register(&kcm_proto, 1);
2020 err = register_pernet_device(&kcm_net_ops);
2024 err = sock_register(&kcm_family_ops);
2026 goto sock_register_fail;
2028 err = kcm_proc_init();
2030 goto proc_init_fail;
2035 sock_unregister(PF_KCM);
2038 unregister_pernet_device(&kcm_net_ops);
2041 proto_unregister(&kcm_proto);
2044 kmem_cache_destroy(kcm_muxp);
2045 kmem_cache_destroy(kcm_psockp);
2048 destroy_workqueue(kcm_wq);
2053 static void __exit kcm_exit(void)
2056 sock_unregister(PF_KCM);
2057 unregister_pernet_device(&kcm_net_ops);
2058 proto_unregister(&kcm_proto);
2059 destroy_workqueue(kcm_wq);
2061 kmem_cache_destroy(kcm_muxp);
2062 kmem_cache_destroy(kcm_psockp);
2065 module_init(kcm_init);
2066 module_exit(kcm_exit);
2068 MODULE_LICENSE("GPL");
2069 MODULE_ALIAS_NETPROTO(PF_KCM);