1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * NET4: Implementation of BSD Unix domain sockets.
5 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
8 * Linus Torvalds : Assorted bug cures.
9 * Niibe Yutaka : async I/O support.
10 * Carsten Paeth : PF_UNIX check, address fixes.
11 * Alan Cox : Limit size of allocated blocks.
12 * Alan Cox : Fixed the stupid socketpair bug.
13 * Alan Cox : BSD compatibility fine tuning.
14 * Alan Cox : Fixed a bug in connect when interrupted.
15 * Alan Cox : Sorted out a proper draft version of
16 * file descriptor passing hacked up from
18 * Marty Leisner : Fixes to fd passing
19 * Nick Nevin : recvmsg bugfix.
20 * Alan Cox : Started proper garbage collector
21 * Heiko EiBfeldt : Missing verify_area check
22 * Alan Cox : Started POSIXisms
23 * Andreas Schwab : Replace inode by dentry for proper
25 * Kirk Petersen : Made this a module
26 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
28 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
29 * by above two patches.
30 * Andrea Arcangeli : If possible we block in connect(2)
31 * if the max backlog of the listen socket
32 * is been reached. This won't break
33 * old apps and it will avoid huge amount
34 * of socks hashed (this for unix_gc()
35 * performances reasons).
36 * Security fix that limits the max
37 * number of socks to 2*max_files and
38 * the number of skb queueable in the
40 * Artur Skawina : Hash function optimizations
41 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
42 * Malcolm Beattie : Set peercred for socketpair
43 * Michal Ostrowski : Module initialization cleanup.
44 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
45 * the core infrastructure is doing that
46 * for all net proto families now (2.5.69+)
48 * Known differences from reference BSD that was tested:
51 * ECONNREFUSED is not returned from one end of a connected() socket to the
52 * other the moment one end closes.
53 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
54 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
56 * accept() returns a path name even if the connecting socket has closed
57 * in the meantime (BSD loses the path and gives up).
58 * accept() returns 0 length path for an unbound connector. BSD returns 16
59 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
60 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
61 * BSD af_unix apparently has connect forgetting to block properly.
62 * (need to check this with the POSIX spec in detail)
64 * Differences from 2.0.0-11-... (ANK)
65 * Bug fixes and improvements.
66 * - client shutdown killed server socket.
67 * - removed all useless cli/sti pairs.
69 * Semantic changes/extensions.
70 * - generic control message passing.
71 * - SCM_CREDENTIALS control message.
72 * - "Abstract" (not FS based) socket bindings.
73 * Abstract names are sequences of bytes (not zero terminated)
74 * started by 0, so that this name space does not intersect
78 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
80 #include <linux/module.h>
81 #include <linux/kernel.h>
82 #include <linux/signal.h>
83 #include <linux/sched/signal.h>
84 #include <linux/errno.h>
85 #include <linux/string.h>
86 #include <linux/stat.h>
87 #include <linux/dcache.h>
88 #include <linux/namei.h>
89 #include <linux/socket.h>
91 #include <linux/fcntl.h>
92 #include <linux/filter.h>
93 #include <linux/termios.h>
94 #include <linux/sockios.h>
95 #include <linux/net.h>
98 #include <linux/slab.h>
99 #include <linux/uaccess.h>
100 #include <linux/skbuff.h>
101 #include <linux/netdevice.h>
102 #include <net/net_namespace.h>
103 #include <net/sock.h>
104 #include <net/tcp_states.h>
105 #include <net/af_unix.h>
106 #include <linux/proc_fs.h>
107 #include <linux/seq_file.h>
109 #include <linux/init.h>
110 #include <linux/poll.h>
111 #include <linux/rtnetlink.h>
112 #include <linux/mount.h>
113 #include <net/checksum.h>
114 #include <linux/security.h>
115 #include <linux/freezer.h>
116 #include <linux/file.h>
117 #include <linux/btf_ids.h>
121 static atomic_long_t unix_nr_socks;
122 static struct hlist_head bsd_socket_buckets[UNIX_HASH_SIZE / 2];
123 static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2];
125 /* SMP locking strategy:
126 * hash table is protected with spinlock.
127 * each socket state is protected by separate spinlock.
130 static unsigned int unix_unbound_hash(struct sock *sk)
132 unsigned long hash = (unsigned long)sk;
138 return hash & UNIX_HASH_MOD;
141 static unsigned int unix_bsd_hash(struct inode *i)
143 return i->i_ino & UNIX_HASH_MOD;
146 static unsigned int unix_abstract_hash(struct sockaddr_un *sunaddr,
147 int addr_len, int type)
149 __wsum csum = csum_partial(sunaddr, addr_len, 0);
152 hash = (__force unsigned int)csum_fold(csum);
156 return UNIX_HASH_MOD + 1 + (hash & UNIX_HASH_MOD);
159 static void unix_table_double_lock(struct net *net,
160 unsigned int hash1, unsigned int hash2)
162 if (hash1 == hash2) {
163 spin_lock(&net->unx.table.locks[hash1]);
170 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
174 static void unix_table_double_unlock(struct net *net,
175 unsigned int hash1, unsigned int hash2)
177 if (hash1 == hash2) {
178 spin_unlock(&net->unx.table.locks[hash1]);
182 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash2]);
186 #ifdef CONFIG_SECURITY_NETWORK
187 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
189 UNIXCB(skb).secid = scm->secid;
192 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
194 scm->secid = UNIXCB(skb).secid;
197 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
199 return (scm->secid == UNIXCB(skb).secid);
202 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
205 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
208 static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
212 #endif /* CONFIG_SECURITY_NETWORK */
214 #define unix_peer(sk) (unix_sk(sk)->peer)
216 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
218 return unix_peer(osk) == sk;
221 static inline int unix_may_send(struct sock *sk, struct sock *osk)
223 return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
226 static inline int unix_recvq_full(const struct sock *sk)
228 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
231 static inline int unix_recvq_full_lockless(const struct sock *sk)
233 return skb_queue_len_lockless(&sk->sk_receive_queue) >
234 READ_ONCE(sk->sk_max_ack_backlog);
237 struct sock *unix_peer_get(struct sock *s)
245 unix_state_unlock(s);
248 EXPORT_SYMBOL_GPL(unix_peer_get);
250 static struct unix_address *unix_create_addr(struct sockaddr_un *sunaddr,
253 struct unix_address *addr;
255 addr = kmalloc(sizeof(*addr) + addr_len, GFP_KERNEL);
259 refcount_set(&addr->refcnt, 1);
260 addr->len = addr_len;
261 memcpy(addr->name, sunaddr, addr_len);
266 static inline void unix_release_addr(struct unix_address *addr)
268 if (refcount_dec_and_test(&addr->refcnt))
273 * Check unix socket name:
274 * - should be not zero length.
275 * - if started by not zero, should be NULL terminated (FS object)
276 * - if started by zero, it is abstract name.
279 static int unix_validate_addr(struct sockaddr_un *sunaddr, int addr_len)
281 if (addr_len <= offsetof(struct sockaddr_un, sun_path) ||
282 addr_len > sizeof(*sunaddr))
285 if (sunaddr->sun_family != AF_UNIX)
291 static void unix_mkname_bsd(struct sockaddr_un *sunaddr, int addr_len)
293 /* This may look like an off by one error but it is a bit more
294 * subtle. 108 is the longest valid AF_UNIX path for a binding.
295 * sun_path[108] doesn't as such exist. However in kernel space
296 * we are guaranteed that it is a valid memory location in our
297 * kernel address buffer because syscall functions always pass
298 * a pointer of struct sockaddr_storage which has a bigger buffer
301 ((char *)sunaddr)[addr_len] = 0;
304 static void __unix_remove_socket(struct sock *sk)
306 sk_del_node_init(sk);
309 static void __unix_insert_socket(struct net *net, struct sock *sk)
311 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
312 sk_add_node(sk, &net->unx.table.buckets[sk->sk_hash]);
315 static void __unix_set_addr_hash(struct net *net, struct sock *sk,
316 struct unix_address *addr, unsigned int hash)
318 __unix_remove_socket(sk);
319 smp_store_release(&unix_sk(sk)->addr, addr);
322 __unix_insert_socket(net, sk);
325 static void unix_remove_socket(struct net *net, struct sock *sk)
327 spin_lock(&net->unx.table.locks[sk->sk_hash]);
328 __unix_remove_socket(sk);
329 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
332 static void unix_insert_unbound_socket(struct net *net, struct sock *sk)
334 spin_lock(&net->unx.table.locks[sk->sk_hash]);
335 __unix_insert_socket(net, sk);
336 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
339 static void unix_insert_bsd_socket(struct sock *sk)
341 spin_lock(&bsd_socket_locks[sk->sk_hash]);
342 sk_add_bind_node(sk, &bsd_socket_buckets[sk->sk_hash]);
343 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
346 static void unix_remove_bsd_socket(struct sock *sk)
348 if (!hlist_unhashed(&sk->sk_bind_node)) {
349 spin_lock(&bsd_socket_locks[sk->sk_hash]);
350 __sk_del_bind_node(sk);
351 spin_unlock(&bsd_socket_locks[sk->sk_hash]);
353 sk_node_init(&sk->sk_bind_node);
357 static struct sock *__unix_find_socket_byname(struct net *net,
358 struct sockaddr_un *sunname,
359 int len, unsigned int hash)
363 sk_for_each(s, &net->unx.table.buckets[hash]) {
364 struct unix_sock *u = unix_sk(s);
366 if (u->addr->len == len &&
367 !memcmp(u->addr->name, sunname, len))
373 static inline struct sock *unix_find_socket_byname(struct net *net,
374 struct sockaddr_un *sunname,
375 int len, unsigned int hash)
379 spin_lock(&net->unx.table.locks[hash]);
380 s = __unix_find_socket_byname(net, sunname, len, hash);
383 spin_unlock(&net->unx.table.locks[hash]);
387 static struct sock *unix_find_socket_byinode(struct inode *i)
389 unsigned int hash = unix_bsd_hash(i);
392 spin_lock(&bsd_socket_locks[hash]);
393 sk_for_each_bound(s, &bsd_socket_buckets[hash]) {
394 struct dentry *dentry = unix_sk(s)->path.dentry;
396 if (dentry && d_backing_inode(dentry) == i) {
398 spin_unlock(&bsd_socket_locks[hash]);
402 spin_unlock(&bsd_socket_locks[hash]);
406 /* Support code for asymmetrically connected dgram sockets
408 * If a datagram socket is connected to a socket not itself connected
409 * to the first socket (eg, /dev/log), clients may only enqueue more
410 * messages if the present receive queue of the server socket is not
411 * "too large". This means there's a second writeability condition
412 * poll and sendmsg need to test. The dgram recv code will do a wake
413 * up on the peer_wait wait queue of a socket upon reception of a
414 * datagram which needs to be propagated to sleeping would-be writers
415 * since these might not have sent anything so far. This can't be
416 * accomplished via poll_wait because the lifetime of the server
417 * socket might be less than that of its clients if these break their
418 * association with it or if the server socket is closed while clients
419 * are still connected to it and there's no way to inform "a polling
420 * implementation" that it should let go of a certain wait queue
422 * In order to propagate a wake up, a wait_queue_entry_t of the client
423 * socket is enqueued on the peer_wait queue of the server socket
424 * whose wake function does a wake_up on the ordinary client socket
425 * wait queue. This connection is established whenever a write (or
426 * poll for write) hit the flow control condition and broken when the
427 * association to the server socket is dissolved or after a wake up
431 static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags,
435 wait_queue_head_t *u_sleep;
437 u = container_of(q, struct unix_sock, peer_wake);
439 __remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
441 u->peer_wake.private = NULL;
443 /* relaying can only happen while the wq still exists */
444 u_sleep = sk_sleep(&u->sk);
446 wake_up_interruptible_poll(u_sleep, key_to_poll(key));
451 static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
453 struct unix_sock *u, *u_other;
457 u_other = unix_sk(other);
459 spin_lock(&u_other->peer_wait.lock);
461 if (!u->peer_wake.private) {
462 u->peer_wake.private = other;
463 __add_wait_queue(&u_other->peer_wait, &u->peer_wake);
468 spin_unlock(&u_other->peer_wait.lock);
472 static void unix_dgram_peer_wake_disconnect(struct sock *sk,
475 struct unix_sock *u, *u_other;
478 u_other = unix_sk(other);
479 spin_lock(&u_other->peer_wait.lock);
481 if (u->peer_wake.private == other) {
482 __remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
483 u->peer_wake.private = NULL;
486 spin_unlock(&u_other->peer_wait.lock);
489 static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
492 unix_dgram_peer_wake_disconnect(sk, other);
493 wake_up_interruptible_poll(sk_sleep(sk),
500 * - unix_peer(sk) == other
501 * - association is stable
503 static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
507 connected = unix_dgram_peer_wake_connect(sk, other);
509 /* If other is SOCK_DEAD, we want to make sure we signal
510 * POLLOUT, such that a subsequent write() can get a
511 * -ECONNREFUSED. Otherwise, if we haven't queued any skbs
512 * to other and its full, we will hang waiting for POLLOUT.
514 if (unix_recvq_full_lockless(other) && !sock_flag(other, SOCK_DEAD))
518 unix_dgram_peer_wake_disconnect(sk, other);
523 static int unix_writable(const struct sock *sk)
525 return sk->sk_state != TCP_LISTEN &&
526 (refcount_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
529 static void unix_write_space(struct sock *sk)
531 struct socket_wq *wq;
534 if (unix_writable(sk)) {
535 wq = rcu_dereference(sk->sk_wq);
536 if (skwq_has_sleeper(wq))
537 wake_up_interruptible_sync_poll(&wq->wait,
538 EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
539 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
544 /* When dgram socket disconnects (or changes its peer), we clear its receive
545 * queue of packets arrived from previous peer. First, it allows to do
546 * flow control based only on wmem_alloc; second, sk connected to peer
547 * may receive messages only from that peer. */
548 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
550 if (!skb_queue_empty(&sk->sk_receive_queue)) {
551 skb_queue_purge(&sk->sk_receive_queue);
552 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
554 /* If one link of bidirectional dgram pipe is disconnected,
555 * we signal error. Messages are lost. Do not make this,
556 * when peer was not connected to us.
558 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
559 other->sk_err = ECONNRESET;
560 sk_error_report(other);
563 other->sk_state = TCP_CLOSE;
566 static void unix_sock_destructor(struct sock *sk)
568 struct unix_sock *u = unix_sk(sk);
570 skb_queue_purge(&sk->sk_receive_queue);
572 DEBUG_NET_WARN_ON_ONCE(refcount_read(&sk->sk_wmem_alloc));
573 DEBUG_NET_WARN_ON_ONCE(!sk_unhashed(sk));
574 DEBUG_NET_WARN_ON_ONCE(sk->sk_socket);
575 if (!sock_flag(sk, SOCK_DEAD)) {
576 pr_info("Attempt to release alive unix socket: %p\n", sk);
581 unix_release_addr(u->addr);
583 atomic_long_dec(&unix_nr_socks);
584 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
585 #ifdef UNIX_REFCNT_DEBUG
586 pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
587 atomic_long_read(&unix_nr_socks));
591 static void unix_release_sock(struct sock *sk, int embrion)
593 struct unix_sock *u = unix_sk(sk);
599 unix_remove_socket(sock_net(sk), sk);
600 unix_remove_bsd_socket(sk);
605 sk->sk_shutdown = SHUTDOWN_MASK;
607 u->path.dentry = NULL;
609 state = sk->sk_state;
610 sk->sk_state = TCP_CLOSE;
612 skpair = unix_peer(sk);
613 unix_peer(sk) = NULL;
615 unix_state_unlock(sk);
617 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
619 kfree_skb(u->oob_skb);
624 wake_up_interruptible_all(&u->peer_wait);
626 if (skpair != NULL) {
627 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
628 unix_state_lock(skpair);
630 skpair->sk_shutdown = SHUTDOWN_MASK;
631 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
632 skpair->sk_err = ECONNRESET;
633 unix_state_unlock(skpair);
634 skpair->sk_state_change(skpair);
635 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
638 unix_dgram_peer_wake_disconnect(sk, skpair);
639 sock_put(skpair); /* It may now die */
642 /* Try to flush out this socket. Throw out buffers at least */
644 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
645 if (state == TCP_LISTEN)
646 unix_release_sock(skb->sk, 1);
647 /* passed fds are erased in the kfree_skb hook */
648 UNIXCB(skb).consumed = skb->len;
657 /* ---- Socket is dead now and most probably destroyed ---- */
660 * Fixme: BSD difference: In BSD all sockets connected to us get
661 * ECONNRESET and we die on the spot. In Linux we behave
662 * like files and pipes do and wait for the last
665 * Can't we simply set sock->err?
667 * What the above comment does talk about? --ANK(980817)
670 if (unix_tot_inflight)
671 unix_gc(); /* Garbage collect fds */
674 static void init_peercred(struct sock *sk)
676 const struct cred *old_cred;
679 spin_lock(&sk->sk_peer_lock);
680 old_pid = sk->sk_peer_pid;
681 old_cred = sk->sk_peer_cred;
682 sk->sk_peer_pid = get_pid(task_tgid(current));
683 sk->sk_peer_cred = get_current_cred();
684 spin_unlock(&sk->sk_peer_lock);
690 static void copy_peercred(struct sock *sk, struct sock *peersk)
692 const struct cred *old_cred;
696 spin_lock(&sk->sk_peer_lock);
697 spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING);
699 spin_lock(&peersk->sk_peer_lock);
700 spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING);
702 old_pid = sk->sk_peer_pid;
703 old_cred = sk->sk_peer_cred;
704 sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
705 sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
707 spin_unlock(&sk->sk_peer_lock);
708 spin_unlock(&peersk->sk_peer_lock);
714 static int unix_listen(struct socket *sock, int backlog)
717 struct sock *sk = sock->sk;
718 struct unix_sock *u = unix_sk(sk);
721 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
722 goto out; /* Only stream/seqpacket sockets accept */
725 goto out; /* No listens on an unbound socket */
727 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
729 if (backlog > sk->sk_max_ack_backlog)
730 wake_up_interruptible_all(&u->peer_wait);
731 sk->sk_max_ack_backlog = backlog;
732 sk->sk_state = TCP_LISTEN;
733 /* set credentials so connect can copy them */
738 unix_state_unlock(sk);
743 static int unix_release(struct socket *);
744 static int unix_bind(struct socket *, struct sockaddr *, int);
745 static int unix_stream_connect(struct socket *, struct sockaddr *,
746 int addr_len, int flags);
747 static int unix_socketpair(struct socket *, struct socket *);
748 static int unix_accept(struct socket *, struct socket *, int, bool);
749 static int unix_getname(struct socket *, struct sockaddr *, int);
750 static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
751 static __poll_t unix_dgram_poll(struct file *, struct socket *,
753 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
755 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
757 static int unix_shutdown(struct socket *, int);
758 static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
759 static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
760 static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
761 size_t size, int flags);
762 static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
763 struct pipe_inode_info *, size_t size,
765 static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
766 static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
767 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
768 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor);
769 static int unix_dgram_connect(struct socket *, struct sockaddr *,
771 static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
772 static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
775 static int unix_set_peek_off(struct sock *sk, int val)
777 struct unix_sock *u = unix_sk(sk);
779 if (mutex_lock_interruptible(&u->iolock))
782 sk->sk_peek_off = val;
783 mutex_unlock(&u->iolock);
788 #ifdef CONFIG_PROC_FS
789 static int unix_count_nr_fds(struct sock *sk)
795 spin_lock(&sk->sk_receive_queue.lock);
796 skb = skb_peek(&sk->sk_receive_queue);
798 u = unix_sk(skb->sk);
799 nr_fds += atomic_read(&u->scm_stat.nr_fds);
800 skb = skb_peek_next(skb, &sk->sk_receive_queue);
802 spin_unlock(&sk->sk_receive_queue.lock);
807 static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
809 struct sock *sk = sock->sk;
815 if (sock->type == SOCK_DGRAM) {
816 nr_fds = atomic_read(&u->scm_stat.nr_fds);
821 if (sk->sk_state != TCP_LISTEN)
822 nr_fds = atomic_read(&u->scm_stat.nr_fds);
824 nr_fds = unix_count_nr_fds(sk);
825 unix_state_unlock(sk);
827 seq_printf(m, "scm_fds: %u\n", nr_fds);
831 #define unix_show_fdinfo NULL
834 static const struct proto_ops unix_stream_ops = {
836 .owner = THIS_MODULE,
837 .release = unix_release,
839 .connect = unix_stream_connect,
840 .socketpair = unix_socketpair,
841 .accept = unix_accept,
842 .getname = unix_getname,
846 .compat_ioctl = unix_compat_ioctl,
848 .listen = unix_listen,
849 .shutdown = unix_shutdown,
850 .sendmsg = unix_stream_sendmsg,
851 .recvmsg = unix_stream_recvmsg,
852 .read_skb = unix_stream_read_skb,
853 .mmap = sock_no_mmap,
854 .sendpage = unix_stream_sendpage,
855 .splice_read = unix_stream_splice_read,
856 .set_peek_off = unix_set_peek_off,
857 .show_fdinfo = unix_show_fdinfo,
860 static const struct proto_ops unix_dgram_ops = {
862 .owner = THIS_MODULE,
863 .release = unix_release,
865 .connect = unix_dgram_connect,
866 .socketpair = unix_socketpair,
867 .accept = sock_no_accept,
868 .getname = unix_getname,
869 .poll = unix_dgram_poll,
872 .compat_ioctl = unix_compat_ioctl,
874 .listen = sock_no_listen,
875 .shutdown = unix_shutdown,
876 .sendmsg = unix_dgram_sendmsg,
877 .read_skb = unix_read_skb,
878 .recvmsg = unix_dgram_recvmsg,
879 .mmap = sock_no_mmap,
880 .sendpage = sock_no_sendpage,
881 .set_peek_off = unix_set_peek_off,
882 .show_fdinfo = unix_show_fdinfo,
885 static const struct proto_ops unix_seqpacket_ops = {
887 .owner = THIS_MODULE,
888 .release = unix_release,
890 .connect = unix_stream_connect,
891 .socketpair = unix_socketpair,
892 .accept = unix_accept,
893 .getname = unix_getname,
894 .poll = unix_dgram_poll,
897 .compat_ioctl = unix_compat_ioctl,
899 .listen = unix_listen,
900 .shutdown = unix_shutdown,
901 .sendmsg = unix_seqpacket_sendmsg,
902 .recvmsg = unix_seqpacket_recvmsg,
903 .mmap = sock_no_mmap,
904 .sendpage = sock_no_sendpage,
905 .set_peek_off = unix_set_peek_off,
906 .show_fdinfo = unix_show_fdinfo,
909 static void unix_close(struct sock *sk, long timeout)
911 /* Nothing to do here, unix socket does not need a ->close().
912 * This is merely for sockmap.
916 static void unix_unhash(struct sock *sk)
918 /* Nothing to do here, unix socket does not need a ->unhash().
919 * This is merely for sockmap.
923 struct proto unix_dgram_proto = {
925 .owner = THIS_MODULE,
926 .obj_size = sizeof(struct unix_sock),
928 #ifdef CONFIG_BPF_SYSCALL
929 .psock_update_sk_prot = unix_dgram_bpf_update_proto,
933 struct proto unix_stream_proto = {
934 .name = "UNIX-STREAM",
935 .owner = THIS_MODULE,
936 .obj_size = sizeof(struct unix_sock),
938 .unhash = unix_unhash,
939 #ifdef CONFIG_BPF_SYSCALL
940 .psock_update_sk_prot = unix_stream_bpf_update_proto,
944 static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, int type)
950 atomic_long_inc(&unix_nr_socks);
951 if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files()) {
956 if (type == SOCK_STREAM)
957 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_stream_proto, kern);
958 else /*dgram and seqpacket */
959 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_dgram_proto, kern);
966 sock_init_data(sock, sk);
968 sk->sk_hash = unix_unbound_hash(sk);
969 sk->sk_allocation = GFP_KERNEL_ACCOUNT;
970 sk->sk_write_space = unix_write_space;
971 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
972 sk->sk_destruct = unix_sock_destructor;
974 u->path.dentry = NULL;
976 spin_lock_init(&u->lock);
977 atomic_long_set(&u->inflight, 0);
978 INIT_LIST_HEAD(&u->link);
979 mutex_init(&u->iolock); /* single task reading lock */
980 mutex_init(&u->bindlock); /* single task binding lock */
981 init_waitqueue_head(&u->peer_wait);
982 init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
983 memset(&u->scm_stat, 0, sizeof(struct scm_stat));
984 unix_insert_unbound_socket(net, sk);
986 sock_prot_inuse_add(net, sk->sk_prot, 1);
991 atomic_long_dec(&unix_nr_socks);
995 static int unix_create(struct net *net, struct socket *sock, int protocol,
1000 if (protocol && protocol != PF_UNIX)
1001 return -EPROTONOSUPPORT;
1003 sock->state = SS_UNCONNECTED;
1005 switch (sock->type) {
1007 sock->ops = &unix_stream_ops;
1010 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
1014 sock->type = SOCK_DGRAM;
1017 sock->ops = &unix_dgram_ops;
1019 case SOCK_SEQPACKET:
1020 sock->ops = &unix_seqpacket_ops;
1023 return -ESOCKTNOSUPPORT;
1026 sk = unix_create1(net, sock, kern, sock->type);
1033 static int unix_release(struct socket *sock)
1035 struct sock *sk = sock->sk;
1040 sk->sk_prot->close(sk, 0);
1041 unix_release_sock(sk, 0);
1047 static struct sock *unix_find_bsd(struct sockaddr_un *sunaddr, int addr_len,
1050 struct inode *inode;
1055 unix_mkname_bsd(sunaddr, addr_len);
1056 err = kern_path(sunaddr->sun_path, LOOKUP_FOLLOW, &path);
1060 err = path_permission(&path, MAY_WRITE);
1064 err = -ECONNREFUSED;
1065 inode = d_backing_inode(path.dentry);
1066 if (!S_ISSOCK(inode->i_mode))
1069 sk = unix_find_socket_byinode(inode);
1074 if (sk->sk_type == type)
1088 return ERR_PTR(err);
1091 static struct sock *unix_find_abstract(struct net *net,
1092 struct sockaddr_un *sunaddr,
1093 int addr_len, int type)
1095 unsigned int hash = unix_abstract_hash(sunaddr, addr_len, type);
1096 struct dentry *dentry;
1099 sk = unix_find_socket_byname(net, sunaddr, addr_len, hash);
1101 return ERR_PTR(-ECONNREFUSED);
1103 dentry = unix_sk(sk)->path.dentry;
1105 touch_atime(&unix_sk(sk)->path);
1110 static struct sock *unix_find_other(struct net *net,
1111 struct sockaddr_un *sunaddr,
1112 int addr_len, int type)
1116 if (sunaddr->sun_path[0])
1117 sk = unix_find_bsd(sunaddr, addr_len, type);
1119 sk = unix_find_abstract(net, sunaddr, addr_len, type);
1124 static int unix_autobind(struct sock *sk)
1126 unsigned int new_hash, old_hash = sk->sk_hash;
1127 struct unix_sock *u = unix_sk(sk);
1128 struct net *net = sock_net(sk);
1129 struct unix_address *addr;
1130 u32 lastnum, ordernum;
1133 err = mutex_lock_interruptible(&u->bindlock);
1141 addr = kzalloc(sizeof(*addr) +
1142 offsetof(struct sockaddr_un, sun_path) + 16, GFP_KERNEL);
1146 addr->len = offsetof(struct sockaddr_un, sun_path) + 6;
1147 addr->name->sun_family = AF_UNIX;
1148 refcount_set(&addr->refcnt, 1);
1150 ordernum = get_random_u32();
1151 lastnum = ordernum & 0xFFFFF;
1153 ordernum = (ordernum + 1) & 0xFFFFF;
1154 sprintf(addr->name->sun_path + 1, "%05x", ordernum);
1156 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1157 unix_table_double_lock(net, old_hash, new_hash);
1159 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash)) {
1160 unix_table_double_unlock(net, old_hash, new_hash);
1162 /* __unix_find_socket_byname() may take long time if many names
1163 * are already in use.
1167 if (ordernum == lastnum) {
1168 /* Give up if all names seems to be in use. */
1170 unix_release_addr(addr);
1177 __unix_set_addr_hash(net, sk, addr, new_hash);
1178 unix_table_double_unlock(net, old_hash, new_hash);
1181 out: mutex_unlock(&u->bindlock);
1185 static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
1188 umode_t mode = S_IFSOCK |
1189 (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
1190 unsigned int new_hash, old_hash = sk->sk_hash;
1191 struct unix_sock *u = unix_sk(sk);
1192 struct net *net = sock_net(sk);
1193 struct mnt_idmap *idmap;
1194 struct unix_address *addr;
1195 struct dentry *dentry;
1199 unix_mkname_bsd(sunaddr, addr_len);
1200 addr_len = strlen(sunaddr->sun_path) +
1201 offsetof(struct sockaddr_un, sun_path) + 1;
1203 addr = unix_create_addr(sunaddr, addr_len);
1208 * Get the parent directory, calculate the hash for last
1211 dentry = kern_path_create(AT_FDCWD, addr->name->sun_path, &parent, 0);
1212 if (IS_ERR(dentry)) {
1213 err = PTR_ERR(dentry);
1218 * All right, let's create it.
1220 idmap = mnt_idmap(parent.mnt);
1221 err = security_path_mknod(&parent, dentry, mode, 0);
1223 err = vfs_mknod(idmap, d_inode(parent.dentry), dentry, mode, 0);
1226 err = mutex_lock_interruptible(&u->bindlock);
1232 new_hash = unix_bsd_hash(d_backing_inode(dentry));
1233 unix_table_double_lock(net, old_hash, new_hash);
1234 u->path.mnt = mntget(parent.mnt);
1235 u->path.dentry = dget(dentry);
1236 __unix_set_addr_hash(net, sk, addr, new_hash);
1237 unix_table_double_unlock(net, old_hash, new_hash);
1238 unix_insert_bsd_socket(sk);
1239 mutex_unlock(&u->bindlock);
1240 done_path_create(&parent, dentry);
1244 mutex_unlock(&u->bindlock);
1247 /* failed after successful mknod? unlink what we'd created... */
1248 vfs_unlink(idmap, d_inode(parent.dentry), dentry, NULL);
1250 done_path_create(&parent, dentry);
1252 unix_release_addr(addr);
1253 return err == -EEXIST ? -EADDRINUSE : err;
1256 static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
1259 unsigned int new_hash, old_hash = sk->sk_hash;
1260 struct unix_sock *u = unix_sk(sk);
1261 struct net *net = sock_net(sk);
1262 struct unix_address *addr;
1265 addr = unix_create_addr(sunaddr, addr_len);
1269 err = mutex_lock_interruptible(&u->bindlock);
1278 new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
1279 unix_table_double_lock(net, old_hash, new_hash);
1281 if (__unix_find_socket_byname(net, addr->name, addr->len, new_hash))
1284 __unix_set_addr_hash(net, sk, addr, new_hash);
1285 unix_table_double_unlock(net, old_hash, new_hash);
1286 mutex_unlock(&u->bindlock);
1290 unix_table_double_unlock(net, old_hash, new_hash);
1293 mutex_unlock(&u->bindlock);
1295 unix_release_addr(addr);
1299 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1301 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1302 struct sock *sk = sock->sk;
1305 if (addr_len == offsetof(struct sockaddr_un, sun_path) &&
1306 sunaddr->sun_family == AF_UNIX)
1307 return unix_autobind(sk);
1309 err = unix_validate_addr(sunaddr, addr_len);
1313 if (sunaddr->sun_path[0])
1314 err = unix_bind_bsd(sk, sunaddr, addr_len);
1316 err = unix_bind_abstract(sk, sunaddr, addr_len);
1321 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
1323 if (unlikely(sk1 == sk2) || !sk2) {
1324 unix_state_lock(sk1);
1328 unix_state_lock(sk1);
1329 unix_state_lock_nested(sk2);
1331 unix_state_lock(sk2);
1332 unix_state_lock_nested(sk1);
1336 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
1338 if (unlikely(sk1 == sk2) || !sk2) {
1339 unix_state_unlock(sk1);
1342 unix_state_unlock(sk1);
1343 unix_state_unlock(sk2);
1346 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
1347 int alen, int flags)
1349 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
1350 struct sock *sk = sock->sk;
1355 if (alen < offsetofend(struct sockaddr, sa_family))
1358 if (addr->sa_family != AF_UNSPEC) {
1359 err = unix_validate_addr(sunaddr, alen);
1363 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
1364 !unix_sk(sk)->addr) {
1365 err = unix_autobind(sk);
1371 other = unix_find_other(sock_net(sk), sunaddr, alen, sock->type);
1372 if (IS_ERR(other)) {
1373 err = PTR_ERR(other);
1377 unix_state_double_lock(sk, other);
1379 /* Apparently VFS overslept socket death. Retry. */
1380 if (sock_flag(other, SOCK_DEAD)) {
1381 unix_state_double_unlock(sk, other);
1387 if (!unix_may_send(sk, other))
1390 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1394 sk->sk_state = other->sk_state = TCP_ESTABLISHED;
1397 * 1003.1g breaking connected state with AF_UNSPEC
1400 unix_state_double_lock(sk, other);
1404 * If it was connected, reconnect.
1406 if (unix_peer(sk)) {
1407 struct sock *old_peer = unix_peer(sk);
1409 unix_peer(sk) = other;
1411 sk->sk_state = TCP_CLOSE;
1412 unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
1414 unix_state_double_unlock(sk, other);
1416 if (other != old_peer)
1417 unix_dgram_disconnected(sk, old_peer);
1420 unix_peer(sk) = other;
1421 unix_state_double_unlock(sk, other);
1427 unix_state_double_unlock(sk, other);
1433 static long unix_wait_for_peer(struct sock *other, long timeo)
1434 __releases(&unix_sk(other)->lock)
1436 struct unix_sock *u = unix_sk(other);
1440 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
1442 sched = !sock_flag(other, SOCK_DEAD) &&
1443 !(other->sk_shutdown & RCV_SHUTDOWN) &&
1444 unix_recvq_full(other);
1446 unix_state_unlock(other);
1449 timeo = schedule_timeout(timeo);
1451 finish_wait(&u->peer_wait, &wait);
1455 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1456 int addr_len, int flags)
1458 struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
1459 struct sock *sk = sock->sk, *newsk = NULL, *other = NULL;
1460 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1461 struct net *net = sock_net(sk);
1462 struct sk_buff *skb = NULL;
1467 err = unix_validate_addr(sunaddr, addr_len);
1471 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1472 err = unix_autobind(sk);
1477 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1479 /* First of all allocate resources.
1480 If we will make it after state is locked,
1481 we will have to recheck all again in any case.
1484 /* create new sock for complete connection */
1485 newsk = unix_create1(net, NULL, 0, sock->type);
1486 if (IS_ERR(newsk)) {
1487 err = PTR_ERR(newsk);
1494 /* Allocate skb for sending to listening sock */
1495 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1500 /* Find listening sock. */
1501 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type);
1502 if (IS_ERR(other)) {
1503 err = PTR_ERR(other);
1508 /* Latch state of peer */
1509 unix_state_lock(other);
1511 /* Apparently VFS overslept socket death. Retry. */
1512 if (sock_flag(other, SOCK_DEAD)) {
1513 unix_state_unlock(other);
1518 err = -ECONNREFUSED;
1519 if (other->sk_state != TCP_LISTEN)
1521 if (other->sk_shutdown & RCV_SHUTDOWN)
1524 if (unix_recvq_full(other)) {
1529 timeo = unix_wait_for_peer(other, timeo);
1531 err = sock_intr_errno(timeo);
1532 if (signal_pending(current))
1540 It is tricky place. We need to grab our state lock and cannot
1541 drop lock on peer. It is dangerous because deadlock is
1542 possible. Connect to self case and simultaneous
1543 attempt to connect are eliminated by checking socket
1544 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1545 check this before attempt to grab lock.
1547 Well, and we have to recheck the state after socket locked.
1553 /* This is ok... continue with connect */
1555 case TCP_ESTABLISHED:
1556 /* Socket is already connected */
1564 unix_state_lock_nested(sk);
1566 if (sk->sk_state != st) {
1567 unix_state_unlock(sk);
1568 unix_state_unlock(other);
1573 err = security_unix_stream_connect(sk, other, newsk);
1575 unix_state_unlock(sk);
1579 /* The way is open! Fastly set all the necessary fields... */
1582 unix_peer(newsk) = sk;
1583 newsk->sk_state = TCP_ESTABLISHED;
1584 newsk->sk_type = sk->sk_type;
1585 init_peercred(newsk);
1586 newu = unix_sk(newsk);
1587 RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
1588 otheru = unix_sk(other);
1590 /* copy address information from listening to new sock
1592 * The contents of *(otheru->addr) and otheru->path
1593 * are seen fully set up here, since we have found
1594 * otheru in hash under its lock. Insertion into the
1595 * hash chain we'd found it in had been done in an
1596 * earlier critical area protected by the chain's lock,
1597 * the same one where we'd set *(otheru->addr) contents,
1598 * as well as otheru->path and otheru->addr itself.
1600 * Using smp_store_release() here to set newu->addr
1601 * is enough to make those stores, as well as stores
1602 * to newu->path visible to anyone who gets newu->addr
1603 * by smp_load_acquire(). IOW, the same warranties
1604 * as for unix_sock instances bound in unix_bind() or
1605 * in unix_autobind().
1607 if (otheru->path.dentry) {
1608 path_get(&otheru->path);
1609 newu->path = otheru->path;
1611 refcount_inc(&otheru->addr->refcnt);
1612 smp_store_release(&newu->addr, otheru->addr);
1614 /* Set credentials */
1615 copy_peercred(sk, other);
1617 sock->state = SS_CONNECTED;
1618 sk->sk_state = TCP_ESTABLISHED;
1621 smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
1622 unix_peer(sk) = newsk;
1624 unix_state_unlock(sk);
1626 /* take ten and send info to listening sock */
1627 spin_lock(&other->sk_receive_queue.lock);
1628 __skb_queue_tail(&other->sk_receive_queue, skb);
1629 spin_unlock(&other->sk_receive_queue.lock);
1630 unix_state_unlock(other);
1631 other->sk_data_ready(other);
1637 unix_state_unlock(other);
1642 unix_release_sock(newsk, 0);
1648 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1650 struct sock *ska = socka->sk, *skb = sockb->sk;
1652 /* Join our sockets back to back */
1655 unix_peer(ska) = skb;
1656 unix_peer(skb) = ska;
1660 ska->sk_state = TCP_ESTABLISHED;
1661 skb->sk_state = TCP_ESTABLISHED;
1662 socka->state = SS_CONNECTED;
1663 sockb->state = SS_CONNECTED;
1667 static void unix_sock_inherit_flags(const struct socket *old,
1670 if (test_bit(SOCK_PASSCRED, &old->flags))
1671 set_bit(SOCK_PASSCRED, &new->flags);
1672 if (test_bit(SOCK_PASSSEC, &old->flags))
1673 set_bit(SOCK_PASSSEC, &new->flags);
1676 static int unix_accept(struct socket *sock, struct socket *newsock, int flags,
1679 struct sock *sk = sock->sk;
1681 struct sk_buff *skb;
1685 if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
1689 if (sk->sk_state != TCP_LISTEN)
1692 /* If socket state is TCP_LISTEN it cannot change (for now...),
1693 * so that no locks are necessary.
1696 skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
1699 /* This means receive shutdown. */
1706 skb_free_datagram(sk, skb);
1707 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1709 /* attach accepted sock to socket */
1710 unix_state_lock(tsk);
1711 newsock->state = SS_CONNECTED;
1712 unix_sock_inherit_flags(sock, newsock);
1713 sock_graft(tsk, newsock);
1714 unix_state_unlock(tsk);
1722 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
1724 struct sock *sk = sock->sk;
1725 struct unix_address *addr;
1726 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
1730 sk = unix_peer_get(sk);
1740 addr = smp_load_acquire(&unix_sk(sk)->addr);
1742 sunaddr->sun_family = AF_UNIX;
1743 sunaddr->sun_path[0] = 0;
1744 err = offsetof(struct sockaddr_un, sun_path);
1747 memcpy(sunaddr, addr->name, addr->len);
1754 static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1756 scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1759 * Garbage collection of unix sockets starts by selecting a set of
1760 * candidate sockets which have reference only from being in flight
1761 * (total_refs == inflight_refs). This condition is checked once during
1762 * the candidate collection phase, and candidates are marked as such, so
1763 * that non-candidates can later be ignored. While inflight_refs is
1764 * protected by unix_gc_lock, total_refs (file count) is not, hence this
1765 * is an instantaneous decision.
1767 * Once a candidate, however, the socket must not be reinstalled into a
1768 * file descriptor while the garbage collection is in progress.
1770 * If the above conditions are met, then the directed graph of
1771 * candidates (*) does not change while unix_gc_lock is held.
1773 * Any operations that changes the file count through file descriptors
1774 * (dup, close, sendmsg) does not change the graph since candidates are
1775 * not installed in fds.
1777 * Dequeing a candidate via recvmsg would install it into an fd, but
1778 * that takes unix_gc_lock to decrement the inflight count, so it's
1779 * serialized with garbage collection.
1781 * MSG_PEEK is special in that it does not change the inflight count,
1782 * yet does install the socket into an fd. The following lock/unlock
1783 * pair is to ensure serialization with garbage collection. It must be
1784 * done between incrementing the file count and installing the file into
1787 * If garbage collection starts after the barrier provided by the
1788 * lock/unlock, then it will see the elevated refcount and not mark this
1789 * as a candidate. If a garbage collection is already in progress
1790 * before the file count was incremented, then the lock/unlock pair will
1791 * ensure that garbage collection is finished before progressing to
1792 * installing the fd.
1794 * (*) A -> B where B is on the queue of A or B is on the queue of C
1795 * which is on the queue of listening socket A.
1797 spin_lock(&unix_gc_lock);
1798 spin_unlock(&unix_gc_lock);
1801 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1805 UNIXCB(skb).pid = get_pid(scm->pid);
1806 UNIXCB(skb).uid = scm->creds.uid;
1807 UNIXCB(skb).gid = scm->creds.gid;
1808 UNIXCB(skb).fp = NULL;
1809 unix_get_secdata(scm, skb);
1810 if (scm->fp && send_fds)
1811 err = unix_attach_fds(scm, skb);
1813 skb->destructor = unix_destruct_scm;
1817 static bool unix_passcred_enabled(const struct socket *sock,
1818 const struct sock *other)
1820 return test_bit(SOCK_PASSCRED, &sock->flags) ||
1821 !other->sk_socket ||
1822 test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
1826 * Some apps rely on write() giving SCM_CREDENTIALS
1827 * We include credentials if source or destination socket
1828 * asserted SOCK_PASSCRED.
1830 static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
1831 const struct sock *other)
1833 if (UNIXCB(skb).pid)
1835 if (unix_passcred_enabled(sock, other)) {
1836 UNIXCB(skb).pid = get_pid(task_tgid(current));
1837 current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
1841 static int maybe_init_creds(struct scm_cookie *scm,
1842 struct socket *socket,
1843 const struct sock *other)
1846 struct msghdr msg = { .msg_controllen = 0 };
1848 err = scm_send(socket, &msg, scm, false);
1852 if (unix_passcred_enabled(socket, other)) {
1853 scm->pid = get_pid(task_tgid(current));
1854 current_uid_gid(&scm->creds.uid, &scm->creds.gid);
1859 static bool unix_skb_scm_eq(struct sk_buff *skb,
1860 struct scm_cookie *scm)
1862 return UNIXCB(skb).pid == scm->pid &&
1863 uid_eq(UNIXCB(skb).uid, scm->creds.uid) &&
1864 gid_eq(UNIXCB(skb).gid, scm->creds.gid) &&
1865 unix_secdata_eq(scm, skb);
1868 static void scm_stat_add(struct sock *sk, struct sk_buff *skb)
1870 struct scm_fp_list *fp = UNIXCB(skb).fp;
1871 struct unix_sock *u = unix_sk(sk);
1873 if (unlikely(fp && fp->count))
1874 atomic_add(fp->count, &u->scm_stat.nr_fds);
1877 static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
1879 struct scm_fp_list *fp = UNIXCB(skb).fp;
1880 struct unix_sock *u = unix_sk(sk);
1882 if (unlikely(fp && fp->count))
1883 atomic_sub(fp->count, &u->scm_stat.nr_fds);
1887 * Send AF_UNIX data.
1890 static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1893 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
1894 struct sock *sk = sock->sk, *other = NULL;
1895 struct unix_sock *u = unix_sk(sk);
1896 struct scm_cookie scm;
1897 struct sk_buff *skb;
1904 err = scm_send(sock, msg, &scm, false);
1909 if (msg->msg_flags&MSG_OOB)
1912 if (msg->msg_namelen) {
1913 err = unix_validate_addr(sunaddr, msg->msg_namelen);
1919 other = unix_peer_get(sk);
1924 if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr) {
1925 err = unix_autobind(sk);
1931 if (len > sk->sk_sndbuf - 32)
1934 if (len > SKB_MAX_ALLOC) {
1935 data_len = min_t(size_t,
1936 len - SKB_MAX_ALLOC,
1937 MAX_SKB_FRAGS * PAGE_SIZE);
1938 data_len = PAGE_ALIGN(data_len);
1940 BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
1943 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1944 msg->msg_flags & MSG_DONTWAIT, &err,
1945 PAGE_ALLOC_COSTLY_ORDER);
1949 err = unix_scm_to_skb(&scm, skb, true);
1953 skb_put(skb, len - data_len);
1954 skb->data_len = data_len;
1956 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
1960 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1965 if (sunaddr == NULL)
1968 other = unix_find_other(sock_net(sk), sunaddr, msg->msg_namelen,
1970 if (IS_ERR(other)) {
1971 err = PTR_ERR(other);
1977 if (sk_filter(other, skb) < 0) {
1978 /* Toss the packet but do not return any error to the sender */
1984 unix_state_lock(other);
1987 if (!unix_may_send(sk, other))
1990 if (unlikely(sock_flag(other, SOCK_DEAD))) {
1992 * Check with 1003.1g - what should
1995 unix_state_unlock(other);
1999 unix_state_lock(sk);
2002 if (sk->sk_type == SOCK_SEQPACKET) {
2003 /* We are here only when racing with unix_release_sock()
2004 * is clearing @other. Never change state to TCP_CLOSE
2005 * unlike SOCK_DGRAM wants.
2007 unix_state_unlock(sk);
2009 } else if (unix_peer(sk) == other) {
2010 unix_peer(sk) = NULL;
2011 unix_dgram_peer_wake_disconnect_wakeup(sk, other);
2013 sk->sk_state = TCP_CLOSE;
2014 unix_state_unlock(sk);
2016 unix_dgram_disconnected(sk, other);
2018 err = -ECONNREFUSED;
2020 unix_state_unlock(sk);
2030 if (other->sk_shutdown & RCV_SHUTDOWN)
2033 if (sk->sk_type != SOCK_SEQPACKET) {
2034 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
2039 /* other == sk && unix_peer(other) != sk if
2040 * - unix_peer(sk) == NULL, destination address bound to sk
2041 * - unix_peer(sk) == sk by time of get but disconnected before lock
2044 unlikely(unix_peer(other) != sk &&
2045 unix_recvq_full_lockless(other))) {
2047 timeo = unix_wait_for_peer(other, timeo);
2049 err = sock_intr_errno(timeo);
2050 if (signal_pending(current))
2057 unix_state_unlock(other);
2058 unix_state_double_lock(sk, other);
2061 if (unix_peer(sk) != other ||
2062 unix_dgram_peer_wake_me(sk, other)) {
2070 goto restart_locked;
2074 if (unlikely(sk_locked))
2075 unix_state_unlock(sk);
2077 if (sock_flag(other, SOCK_RCVTSTAMP))
2078 __net_timestamp(skb);
2079 maybe_add_creds(skb, sock, other);
2080 scm_stat_add(other, skb);
2081 skb_queue_tail(&other->sk_receive_queue, skb);
2082 unix_state_unlock(other);
2083 other->sk_data_ready(other);
2090 unix_state_unlock(sk);
2091 unix_state_unlock(other);
2101 /* We use paged skbs for stream sockets, and limit occupancy to 32768
2102 * bytes, and a minimum of a full page.
2104 #define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
2106 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2107 static int queue_oob(struct socket *sock, struct msghdr *msg, struct sock *other)
2109 struct unix_sock *ousk = unix_sk(other);
2110 struct sk_buff *skb;
2113 skb = sock_alloc_send_skb(sock->sk, 1, msg->msg_flags & MSG_DONTWAIT, &err);
2119 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, 1);
2126 unix_state_lock(other);
2128 if (sock_flag(other, SOCK_DEAD) ||
2129 (other->sk_shutdown & RCV_SHUTDOWN)) {
2130 unix_state_unlock(other);
2135 maybe_add_creds(skb, sock, other);
2139 consume_skb(ousk->oob_skb);
2141 WRITE_ONCE(ousk->oob_skb, skb);
2143 scm_stat_add(other, skb);
2144 skb_queue_tail(&other->sk_receive_queue, skb);
2145 sk_send_sigurg(other);
2146 unix_state_unlock(other);
2147 other->sk_data_ready(other);
2153 static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
2156 struct sock *sk = sock->sk;
2157 struct sock *other = NULL;
2159 struct sk_buff *skb;
2161 struct scm_cookie scm;
2162 bool fds_sent = false;
2166 err = scm_send(sock, msg, &scm, false);
2171 if (msg->msg_flags & MSG_OOB) {
2172 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2180 if (msg->msg_namelen) {
2181 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
2185 other = unix_peer(sk);
2190 if (sk->sk_shutdown & SEND_SHUTDOWN)
2193 while (sent < len) {
2196 /* Keep two messages in the pipe so it schedules better */
2197 size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
2199 /* allow fallback to order-0 allocations */
2200 size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
2202 data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
2204 data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
2206 skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
2207 msg->msg_flags & MSG_DONTWAIT, &err,
2208 get_order(UNIX_SKB_FRAGS_SZ));
2212 /* Only send the fds in the first buffer */
2213 err = unix_scm_to_skb(&scm, skb, !fds_sent);
2220 skb_put(skb, size - data_len);
2221 skb->data_len = data_len;
2223 err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
2229 unix_state_lock(other);
2231 if (sock_flag(other, SOCK_DEAD) ||
2232 (other->sk_shutdown & RCV_SHUTDOWN))
2235 maybe_add_creds(skb, sock, other);
2236 scm_stat_add(other, skb);
2237 skb_queue_tail(&other->sk_receive_queue, skb);
2238 unix_state_unlock(other);
2239 other->sk_data_ready(other);
2243 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2244 if (msg->msg_flags & MSG_OOB) {
2245 err = queue_oob(sock, msg, other);
2257 unix_state_unlock(other);
2260 if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
2261 send_sig(SIGPIPE, current, 0);
2265 return sent ? : err;
2268 static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
2269 int offset, size_t size, int flags)
2272 bool send_sigpipe = false;
2273 bool init_scm = true;
2274 struct scm_cookie scm;
2275 struct sock *other, *sk = socket->sk;
2276 struct sk_buff *skb, *newskb = NULL, *tail = NULL;
2278 if (flags & MSG_OOB)
2281 other = unix_peer(sk);
2282 if (!other || sk->sk_state != TCP_ESTABLISHED)
2287 unix_state_unlock(other);
2288 mutex_unlock(&unix_sk(other)->iolock);
2289 newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
2295 /* we must acquire iolock as we modify already present
2296 * skbs in the sk_receive_queue and mess with skb->len
2298 err = mutex_lock_interruptible(&unix_sk(other)->iolock);
2300 err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
2304 if (sk->sk_shutdown & SEND_SHUTDOWN) {
2306 send_sigpipe = true;
2310 unix_state_lock(other);
2312 if (sock_flag(other, SOCK_DEAD) ||
2313 other->sk_shutdown & RCV_SHUTDOWN) {
2315 send_sigpipe = true;
2316 goto err_state_unlock;
2320 err = maybe_init_creds(&scm, socket, other);
2322 goto err_state_unlock;
2326 skb = skb_peek_tail(&other->sk_receive_queue);
2327 if (tail && tail == skb) {
2329 } else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
2336 } else if (newskb) {
2337 /* this is fast path, we don't necessarily need to
2338 * call to kfree_skb even though with newskb == NULL
2339 * this - does no harm
2341 consume_skb(newskb);
2345 if (skb_append_pagefrags(skb, page, offset, size)) {
2351 skb->data_len += size;
2352 skb->truesize += size;
2353 refcount_add(size, &sk->sk_wmem_alloc);
2356 err = unix_scm_to_skb(&scm, skb, false);
2358 goto err_state_unlock;
2359 spin_lock(&other->sk_receive_queue.lock);
2360 __skb_queue_tail(&other->sk_receive_queue, newskb);
2361 spin_unlock(&other->sk_receive_queue.lock);
2364 unix_state_unlock(other);
2365 mutex_unlock(&unix_sk(other)->iolock);
2367 other->sk_data_ready(other);
2372 unix_state_unlock(other);
2374 mutex_unlock(&unix_sk(other)->iolock);
2377 if (send_sigpipe && !(flags & MSG_NOSIGNAL))
2378 send_sig(SIGPIPE, current, 0);
2384 static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
2388 struct sock *sk = sock->sk;
2390 err = sock_error(sk);
2394 if (sk->sk_state != TCP_ESTABLISHED)
2397 if (msg->msg_namelen)
2398 msg->msg_namelen = 0;
2400 return unix_dgram_sendmsg(sock, msg, len);
2403 static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
2404 size_t size, int flags)
2406 struct sock *sk = sock->sk;
2408 if (sk->sk_state != TCP_ESTABLISHED)
2411 return unix_dgram_recvmsg(sock, msg, size, flags);
2414 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
2416 struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
2419 msg->msg_namelen = addr->len;
2420 memcpy(msg->msg_name, addr->name, addr->len);
2424 int __unix_dgram_recvmsg(struct sock *sk, struct msghdr *msg, size_t size,
2427 struct scm_cookie scm;
2428 struct socket *sock = sk->sk_socket;
2429 struct unix_sock *u = unix_sk(sk);
2430 struct sk_buff *skb, *last;
2439 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
2442 mutex_lock(&u->iolock);
2444 skip = sk_peek_offset(sk, flags);
2445 skb = __skb_try_recv_datagram(sk, &sk->sk_receive_queue, flags,
2446 &skip, &err, &last);
2448 if (!(flags & MSG_PEEK))
2449 scm_stat_del(sk, skb);
2453 mutex_unlock(&u->iolock);
2458 !__skb_wait_for_more_packets(sk, &sk->sk_receive_queue,
2459 &err, &timeo, last));
2461 if (!skb) { /* implies iolock unlocked */
2462 unix_state_lock(sk);
2463 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
2464 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
2465 (sk->sk_shutdown & RCV_SHUTDOWN))
2467 unix_state_unlock(sk);
2471 if (wq_has_sleeper(&u->peer_wait))
2472 wake_up_interruptible_sync_poll(&u->peer_wait,
2473 EPOLLOUT | EPOLLWRNORM |
2477 unix_copy_addr(msg, skb->sk);
2479 if (size > skb->len - skip)
2480 size = skb->len - skip;
2481 else if (size < skb->len - skip)
2482 msg->msg_flags |= MSG_TRUNC;
2484 err = skb_copy_datagram_msg(skb, skip, msg, size);
2488 if (sock_flag(sk, SOCK_RCVTSTAMP))
2489 __sock_recv_timestamp(msg, sk, skb);
2491 memset(&scm, 0, sizeof(scm));
2493 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2494 unix_set_secdata(&scm, skb);
2496 if (!(flags & MSG_PEEK)) {
2498 unix_detach_fds(&scm, skb);
2500 sk_peek_offset_bwd(sk, skb->len);
2502 /* It is questionable: on PEEK we could:
2503 - do not return fds - good, but too simple 8)
2504 - return fds, and do not return them on read (old strategy,
2506 - clone fds (I chose it for now, it is the most universal
2509 POSIX 1003.1g does not actually define this clearly
2510 at all. POSIX 1003.1g doesn't define a lot of things
2515 sk_peek_offset_fwd(sk, size);
2518 unix_peek_fds(&scm, skb);
2520 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
2522 scm_recv(sock, msg, &scm, flags);
2525 skb_free_datagram(sk, skb);
2526 mutex_unlock(&u->iolock);
2531 static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2534 struct sock *sk = sock->sk;
2536 #ifdef CONFIG_BPF_SYSCALL
2537 const struct proto *prot = READ_ONCE(sk->sk_prot);
2539 if (prot != &unix_dgram_proto)
2540 return prot->recvmsg(sk, msg, size, flags, NULL);
2542 return __unix_dgram_recvmsg(sk, msg, size, flags);
2545 static int unix_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2547 struct unix_sock *u = unix_sk(sk);
2548 struct sk_buff *skb;
2551 mutex_lock(&u->iolock);
2552 skb = skb_recv_datagram(sk, MSG_DONTWAIT, &err);
2553 mutex_unlock(&u->iolock);
2557 copied = recv_actor(sk, skb);
2564 * Sleep until more data has arrived. But check for races..
2566 static long unix_stream_data_wait(struct sock *sk, long timeo,
2567 struct sk_buff *last, unsigned int last_len,
2570 unsigned int state = TASK_INTERRUPTIBLE | freezable * TASK_FREEZABLE;
2571 struct sk_buff *tail;
2574 unix_state_lock(sk);
2577 prepare_to_wait(sk_sleep(sk), &wait, state);
2579 tail = skb_peek_tail(&sk->sk_receive_queue);
2581 (tail && tail->len != last_len) ||
2583 (sk->sk_shutdown & RCV_SHUTDOWN) ||
2584 signal_pending(current) ||
2588 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2589 unix_state_unlock(sk);
2590 timeo = schedule_timeout(timeo);
2591 unix_state_lock(sk);
2593 if (sock_flag(sk, SOCK_DEAD))
2596 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
2599 finish_wait(sk_sleep(sk), &wait);
2600 unix_state_unlock(sk);
2604 static unsigned int unix_skb_len(const struct sk_buff *skb)
2606 return skb->len - UNIXCB(skb).consumed;
2609 struct unix_stream_read_state {
2610 int (*recv_actor)(struct sk_buff *, int, int,
2611 struct unix_stream_read_state *);
2612 struct socket *socket;
2614 struct pipe_inode_info *pipe;
2617 unsigned int splice_flags;
2620 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2621 static int unix_stream_recv_urg(struct unix_stream_read_state *state)
2623 struct socket *sock = state->socket;
2624 struct sock *sk = sock->sk;
2625 struct unix_sock *u = unix_sk(sk);
2627 struct sk_buff *oob_skb;
2629 mutex_lock(&u->iolock);
2630 unix_state_lock(sk);
2632 if (sock_flag(sk, SOCK_URGINLINE) || !u->oob_skb) {
2633 unix_state_unlock(sk);
2634 mutex_unlock(&u->iolock);
2638 oob_skb = u->oob_skb;
2640 if (!(state->flags & MSG_PEEK))
2641 WRITE_ONCE(u->oob_skb, NULL);
2643 unix_state_unlock(sk);
2645 chunk = state->recv_actor(oob_skb, 0, chunk, state);
2647 if (!(state->flags & MSG_PEEK)) {
2648 UNIXCB(oob_skb).consumed += 1;
2652 mutex_unlock(&u->iolock);
2657 state->msg->msg_flags |= MSG_OOB;
2661 static struct sk_buff *manage_oob(struct sk_buff *skb, struct sock *sk,
2662 int flags, int copied)
2664 struct unix_sock *u = unix_sk(sk);
2666 if (!unix_skb_len(skb) && !(flags & MSG_PEEK)) {
2667 skb_unlink(skb, &sk->sk_receive_queue);
2671 if (skb == u->oob_skb) {
2674 } else if (sock_flag(sk, SOCK_URGINLINE)) {
2675 if (!(flags & MSG_PEEK)) {
2676 WRITE_ONCE(u->oob_skb, NULL);
2679 } else if (!(flags & MSG_PEEK)) {
2680 skb_unlink(skb, &sk->sk_receive_queue);
2682 skb = skb_peek(&sk->sk_receive_queue);
2690 static int unix_stream_read_skb(struct sock *sk, skb_read_actor_t recv_actor)
2692 if (unlikely(sk->sk_state != TCP_ESTABLISHED))
2695 return unix_read_skb(sk, recv_actor);
2698 static int unix_stream_read_generic(struct unix_stream_read_state *state,
2701 struct scm_cookie scm;
2702 struct socket *sock = state->socket;
2703 struct sock *sk = sock->sk;
2704 struct unix_sock *u = unix_sk(sk);
2706 int flags = state->flags;
2707 int noblock = flags & MSG_DONTWAIT;
2708 bool check_creds = false;
2713 size_t size = state->size;
2714 unsigned int last_len;
2716 if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
2721 if (unlikely(flags & MSG_OOB)) {
2723 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2724 err = unix_stream_recv_urg(state);
2729 target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
2730 timeo = sock_rcvtimeo(sk, noblock);
2732 memset(&scm, 0, sizeof(scm));
2734 /* Lock the socket to prevent queue disordering
2735 * while sleeps in memcpy_tomsg
2737 mutex_lock(&u->iolock);
2739 skip = max(sk_peek_offset(sk, flags), 0);
2744 struct sk_buff *skb, *last;
2747 unix_state_lock(sk);
2748 if (sock_flag(sk, SOCK_DEAD)) {
2752 last = skb = skb_peek(&sk->sk_receive_queue);
2753 last_len = last ? last->len : 0;
2755 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
2757 skb = manage_oob(skb, sk, flags, copied);
2759 unix_state_unlock(sk);
2768 if (copied >= target)
2772 * POSIX 1003.1g mandates this order.
2775 err = sock_error(sk);
2778 if (sk->sk_shutdown & RCV_SHUTDOWN)
2781 unix_state_unlock(sk);
2787 mutex_unlock(&u->iolock);
2789 timeo = unix_stream_data_wait(sk, timeo, last,
2790 last_len, freezable);
2792 if (signal_pending(current)) {
2793 err = sock_intr_errno(timeo);
2798 mutex_lock(&u->iolock);
2801 unix_state_unlock(sk);
2805 while (skip >= unix_skb_len(skb)) {
2806 skip -= unix_skb_len(skb);
2808 last_len = skb->len;
2809 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2814 unix_state_unlock(sk);
2817 /* Never glue messages from different writers */
2818 if (!unix_skb_scm_eq(skb, &scm))
2820 } else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
2821 /* Copy credentials */
2822 scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
2823 unix_set_secdata(&scm, skb);
2827 /* Copy address just once */
2828 if (state->msg && state->msg->msg_name) {
2829 DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
2830 state->msg->msg_name);
2831 unix_copy_addr(state->msg, skb->sk);
2835 chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
2837 chunk = state->recv_actor(skb, skip, chunk, state);
2838 drop_skb = !unix_skb_len(skb);
2839 /* skb is only safe to use if !drop_skb */
2850 /* the skb was touched by a concurrent reader;
2851 * we should not expect anything from this skb
2852 * anymore and assume it invalid - we can be
2853 * sure it was dropped from the socket queue
2855 * let's report a short read
2861 /* Mark read part of skb as used */
2862 if (!(flags & MSG_PEEK)) {
2863 UNIXCB(skb).consumed += chunk;
2865 sk_peek_offset_bwd(sk, chunk);
2867 if (UNIXCB(skb).fp) {
2868 scm_stat_del(sk, skb);
2869 unix_detach_fds(&scm, skb);
2872 if (unix_skb_len(skb))
2875 skb_unlink(skb, &sk->sk_receive_queue);
2881 /* It is questionable, see note in unix_dgram_recvmsg.
2884 unix_peek_fds(&scm, skb);
2886 sk_peek_offset_fwd(sk, chunk);
2893 last_len = skb->len;
2894 unix_state_lock(sk);
2895 skb = skb_peek_next(skb, &sk->sk_receive_queue);
2898 unix_state_unlock(sk);
2903 mutex_unlock(&u->iolock);
2905 scm_recv(sock, state->msg, &scm, flags);
2909 return copied ? : err;
2912 static int unix_stream_read_actor(struct sk_buff *skb,
2913 int skip, int chunk,
2914 struct unix_stream_read_state *state)
2918 ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
2920 return ret ?: chunk;
2923 int __unix_stream_recvmsg(struct sock *sk, struct msghdr *msg,
2924 size_t size, int flags)
2926 struct unix_stream_read_state state = {
2927 .recv_actor = unix_stream_read_actor,
2928 .socket = sk->sk_socket,
2934 return unix_stream_read_generic(&state, true);
2937 static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
2938 size_t size, int flags)
2940 struct unix_stream_read_state state = {
2941 .recv_actor = unix_stream_read_actor,
2948 #ifdef CONFIG_BPF_SYSCALL
2949 struct sock *sk = sock->sk;
2950 const struct proto *prot = READ_ONCE(sk->sk_prot);
2952 if (prot != &unix_stream_proto)
2953 return prot->recvmsg(sk, msg, size, flags, NULL);
2955 return unix_stream_read_generic(&state, true);
2958 static int unix_stream_splice_actor(struct sk_buff *skb,
2959 int skip, int chunk,
2960 struct unix_stream_read_state *state)
2962 return skb_splice_bits(skb, state->socket->sk,
2963 UNIXCB(skb).consumed + skip,
2964 state->pipe, chunk, state->splice_flags);
2967 static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
2968 struct pipe_inode_info *pipe,
2969 size_t size, unsigned int flags)
2971 struct unix_stream_read_state state = {
2972 .recv_actor = unix_stream_splice_actor,
2976 .splice_flags = flags,
2979 if (unlikely(*ppos))
2982 if (sock->file->f_flags & O_NONBLOCK ||
2983 flags & SPLICE_F_NONBLOCK)
2984 state.flags = MSG_DONTWAIT;
2986 return unix_stream_read_generic(&state, false);
2989 static int unix_shutdown(struct socket *sock, int mode)
2991 struct sock *sk = sock->sk;
2994 if (mode < SHUT_RD || mode > SHUT_RDWR)
2997 * SHUT_RD (0) -> RCV_SHUTDOWN (1)
2998 * SHUT_WR (1) -> SEND_SHUTDOWN (2)
2999 * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
3003 unix_state_lock(sk);
3004 sk->sk_shutdown |= mode;
3005 other = unix_peer(sk);
3008 unix_state_unlock(sk);
3009 sk->sk_state_change(sk);
3012 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
3015 const struct proto *prot = READ_ONCE(other->sk_prot);
3018 prot->unhash(other);
3019 if (mode&RCV_SHUTDOWN)
3020 peer_mode |= SEND_SHUTDOWN;
3021 if (mode&SEND_SHUTDOWN)
3022 peer_mode |= RCV_SHUTDOWN;
3023 unix_state_lock(other);
3024 other->sk_shutdown |= peer_mode;
3025 unix_state_unlock(other);
3026 other->sk_state_change(other);
3027 if (peer_mode == SHUTDOWN_MASK)
3028 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
3029 else if (peer_mode & RCV_SHUTDOWN)
3030 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
3038 long unix_inq_len(struct sock *sk)
3040 struct sk_buff *skb;
3043 if (sk->sk_state == TCP_LISTEN)
3046 spin_lock(&sk->sk_receive_queue.lock);
3047 if (sk->sk_type == SOCK_STREAM ||
3048 sk->sk_type == SOCK_SEQPACKET) {
3049 skb_queue_walk(&sk->sk_receive_queue, skb)
3050 amount += unix_skb_len(skb);
3052 skb = skb_peek(&sk->sk_receive_queue);
3056 spin_unlock(&sk->sk_receive_queue.lock);
3060 EXPORT_SYMBOL_GPL(unix_inq_len);
3062 long unix_outq_len(struct sock *sk)
3064 return sk_wmem_alloc_get(sk);
3066 EXPORT_SYMBOL_GPL(unix_outq_len);
3068 static int unix_open_file(struct sock *sk)
3074 if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
3077 if (!smp_load_acquire(&unix_sk(sk)->addr))
3080 path = unix_sk(sk)->path;
3086 fd = get_unused_fd_flags(O_CLOEXEC);
3090 f = dentry_open(&path, O_PATH, current_cred());
3104 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3106 struct sock *sk = sock->sk;
3112 amount = unix_outq_len(sk);
3113 err = put_user(amount, (int __user *)arg);
3116 amount = unix_inq_len(sk);
3120 err = put_user(amount, (int __user *)arg);
3123 err = unix_open_file(sk);
3125 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3128 struct sk_buff *skb;
3131 skb = skb_peek(&sk->sk_receive_queue);
3132 if (skb && skb == READ_ONCE(unix_sk(sk)->oob_skb))
3134 err = put_user(answ, (int __user *)arg);
3145 #ifdef CONFIG_COMPAT
3146 static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
3148 return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
3152 static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
3154 struct sock *sk = sock->sk;
3157 sock_poll_wait(file, sock, wait);
3160 /* exceptional events? */
3163 if (sk->sk_shutdown == SHUTDOWN_MASK)
3165 if (sk->sk_shutdown & RCV_SHUTDOWN)
3166 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3169 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3170 mask |= EPOLLIN | EPOLLRDNORM;
3171 if (sk_is_readable(sk))
3172 mask |= EPOLLIN | EPOLLRDNORM;
3173 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
3174 if (READ_ONCE(unix_sk(sk)->oob_skb))
3178 /* Connection-based need to check for termination and startup */
3179 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
3180 sk->sk_state == TCP_CLOSE)
3184 * we set writable also when the other side has shut down the
3185 * connection. This prevents stuck sockets.
3187 if (unix_writable(sk))
3188 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3193 static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
3196 struct sock *sk = sock->sk, *other;
3197 unsigned int writable;
3200 sock_poll_wait(file, sock, wait);
3203 /* exceptional events? */
3204 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
3206 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
3208 if (sk->sk_shutdown & RCV_SHUTDOWN)
3209 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
3210 if (sk->sk_shutdown == SHUTDOWN_MASK)
3214 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
3215 mask |= EPOLLIN | EPOLLRDNORM;
3216 if (sk_is_readable(sk))
3217 mask |= EPOLLIN | EPOLLRDNORM;
3219 /* Connection-based need to check for termination and startup */
3220 if (sk->sk_type == SOCK_SEQPACKET) {
3221 if (sk->sk_state == TCP_CLOSE)
3223 /* connection hasn't started yet? */
3224 if (sk->sk_state == TCP_SYN_SENT)
3228 /* No write status requested, avoid expensive OUT tests. */
3229 if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
3232 writable = unix_writable(sk);
3234 unix_state_lock(sk);
3236 other = unix_peer(sk);
3237 if (other && unix_peer(other) != sk &&
3238 unix_recvq_full_lockless(other) &&
3239 unix_dgram_peer_wake_me(sk, other))
3242 unix_state_unlock(sk);
3246 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
3248 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
3253 #ifdef CONFIG_PROC_FS
3255 #define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
3257 #define get_bucket(x) ((x) >> BUCKET_SPACE)
3258 #define get_offset(x) ((x) & ((1UL << BUCKET_SPACE) - 1))
3259 #define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
3261 static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
3263 unsigned long offset = get_offset(*pos);
3264 unsigned long bucket = get_bucket(*pos);
3265 unsigned long count = 0;
3268 for (sk = sk_head(&seq_file_net(seq)->unx.table.buckets[bucket]);
3269 sk; sk = sk_next(sk)) {
3270 if (++count == offset)
3277 static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
3279 unsigned long bucket = get_bucket(*pos);
3280 struct net *net = seq_file_net(seq);
3283 while (bucket < UNIX_HASH_SIZE) {
3284 spin_lock(&net->unx.table.locks[bucket]);
3286 sk = unix_from_bucket(seq, pos);
3290 spin_unlock(&net->unx.table.locks[bucket]);
3292 *pos = set_bucket_offset(++bucket, 1);
3298 static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
3301 unsigned long bucket = get_bucket(*pos);
3308 spin_unlock(&seq_file_net(seq)->unx.table.locks[bucket]);
3310 *pos = set_bucket_offset(++bucket, 1);
3312 return unix_get_first(seq, pos);
3315 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
3318 return SEQ_START_TOKEN;
3320 return unix_get_first(seq, pos);
3323 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3327 if (v == SEQ_START_TOKEN)
3328 return unix_get_first(seq, pos);
3330 return unix_get_next(seq, v, pos);
3333 static void unix_seq_stop(struct seq_file *seq, void *v)
3335 struct sock *sk = v;
3338 spin_unlock(&seq_file_net(seq)->unx.table.locks[sk->sk_hash]);
3341 static int unix_seq_show(struct seq_file *seq, void *v)
3344 if (v == SEQ_START_TOKEN)
3345 seq_puts(seq, "Num RefCount Protocol Flags Type St "
3349 struct unix_sock *u = unix_sk(s);
3352 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
3354 refcount_read(&s->sk_refcnt),
3356 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
3359 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
3360 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
3363 if (u->addr) { // under a hash table lock here
3368 len = u->addr->len -
3369 offsetof(struct sockaddr_un, sun_path);
3370 if (u->addr->name->sun_path[0]) {
3376 for ( ; i < len; i++)
3377 seq_putc(seq, u->addr->name->sun_path[i] ?:
3380 unix_state_unlock(s);
3381 seq_putc(seq, '\n');
3387 static const struct seq_operations unix_seq_ops = {
3388 .start = unix_seq_start,
3389 .next = unix_seq_next,
3390 .stop = unix_seq_stop,
3391 .show = unix_seq_show,
3394 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL)
3395 struct bpf_unix_iter_state {
3396 struct seq_net_private p;
3397 unsigned int cur_sk;
3398 unsigned int end_sk;
3399 unsigned int max_sk;
3400 struct sock **batch;
3401 bool st_bucket_done;
3404 struct bpf_iter__unix {
3405 __bpf_md_ptr(struct bpf_iter_meta *, meta);
3406 __bpf_md_ptr(struct unix_sock *, unix_sk);
3407 uid_t uid __aligned(8);
3410 static int unix_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
3411 struct unix_sock *unix_sk, uid_t uid)
3413 struct bpf_iter__unix ctx;
3415 meta->seq_num--; /* skip SEQ_START_TOKEN */
3417 ctx.unix_sk = unix_sk;
3419 return bpf_iter_run_prog(prog, &ctx);
3422 static int bpf_iter_unix_hold_batch(struct seq_file *seq, struct sock *start_sk)
3425 struct bpf_unix_iter_state *iter = seq->private;
3426 unsigned int expected = 1;
3429 sock_hold(start_sk);
3430 iter->batch[iter->end_sk++] = start_sk;
3432 for (sk = sk_next(start_sk); sk; sk = sk_next(sk)) {
3433 if (iter->end_sk < iter->max_sk) {
3435 iter->batch[iter->end_sk++] = sk;
3441 spin_unlock(&seq_file_net(seq)->unx.table.locks[start_sk->sk_hash]);
3446 static void bpf_iter_unix_put_batch(struct bpf_unix_iter_state *iter)
3448 while (iter->cur_sk < iter->end_sk)
3449 sock_put(iter->batch[iter->cur_sk++]);
3452 static int bpf_iter_unix_realloc_batch(struct bpf_unix_iter_state *iter,
3453 unsigned int new_batch_sz)
3455 struct sock **new_batch;
3457 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
3458 GFP_USER | __GFP_NOWARN);
3462 bpf_iter_unix_put_batch(iter);
3463 kvfree(iter->batch);
3464 iter->batch = new_batch;
3465 iter->max_sk = new_batch_sz;
3470 static struct sock *bpf_iter_unix_batch(struct seq_file *seq,
3473 struct bpf_unix_iter_state *iter = seq->private;
3474 unsigned int expected;
3475 bool resized = false;
3478 if (iter->st_bucket_done)
3479 *pos = set_bucket_offset(get_bucket(*pos) + 1, 1);
3482 /* Get a new batch */
3486 sk = unix_get_first(seq, pos);
3488 return NULL; /* Done */
3490 expected = bpf_iter_unix_hold_batch(seq, sk);
3492 if (iter->end_sk == expected) {
3493 iter->st_bucket_done = true;
3497 if (!resized && !bpf_iter_unix_realloc_batch(iter, expected * 3 / 2)) {
3505 static void *bpf_iter_unix_seq_start(struct seq_file *seq, loff_t *pos)
3508 return SEQ_START_TOKEN;
3510 /* bpf iter does not support lseek, so it always
3511 * continue from where it was stop()-ped.
3513 return bpf_iter_unix_batch(seq, pos);
3516 static void *bpf_iter_unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3518 struct bpf_unix_iter_state *iter = seq->private;
3521 /* Whenever seq_next() is called, the iter->cur_sk is
3522 * done with seq_show(), so advance to the next sk in
3525 if (iter->cur_sk < iter->end_sk)
3526 sock_put(iter->batch[iter->cur_sk++]);
3530 if (iter->cur_sk < iter->end_sk)
3531 sk = iter->batch[iter->cur_sk];
3533 sk = bpf_iter_unix_batch(seq, pos);
3538 static int bpf_iter_unix_seq_show(struct seq_file *seq, void *v)
3540 struct bpf_iter_meta meta;
3541 struct bpf_prog *prog;
3542 struct sock *sk = v;
3547 if (v == SEQ_START_TOKEN)
3550 slow = lock_sock_fast(sk);
3552 if (unlikely(sk_unhashed(sk))) {
3557 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
3559 prog = bpf_iter_get_info(&meta, false);
3560 ret = unix_prog_seq_show(prog, &meta, v, uid);
3562 unlock_sock_fast(sk, slow);
3566 static void bpf_iter_unix_seq_stop(struct seq_file *seq, void *v)
3568 struct bpf_unix_iter_state *iter = seq->private;
3569 struct bpf_iter_meta meta;
3570 struct bpf_prog *prog;
3574 prog = bpf_iter_get_info(&meta, true);
3576 (void)unix_prog_seq_show(prog, &meta, v, 0);
3579 if (iter->cur_sk < iter->end_sk)
3580 bpf_iter_unix_put_batch(iter);
3583 static const struct seq_operations bpf_iter_unix_seq_ops = {
3584 .start = bpf_iter_unix_seq_start,
3585 .next = bpf_iter_unix_seq_next,
3586 .stop = bpf_iter_unix_seq_stop,
3587 .show = bpf_iter_unix_seq_show,
3592 static const struct net_proto_family unix_family_ops = {
3594 .create = unix_create,
3595 .owner = THIS_MODULE,
3599 static int __net_init unix_net_init(struct net *net)
3603 net->unx.sysctl_max_dgram_qlen = 10;
3604 if (unix_sysctl_register(net))
3607 #ifdef CONFIG_PROC_FS
3608 if (!proc_create_net("unix", 0, net->proc_net, &unix_seq_ops,
3609 sizeof(struct seq_net_private)))
3613 net->unx.table.locks = kvmalloc_array(UNIX_HASH_SIZE,
3614 sizeof(spinlock_t), GFP_KERNEL);
3615 if (!net->unx.table.locks)
3618 net->unx.table.buckets = kvmalloc_array(UNIX_HASH_SIZE,
3619 sizeof(struct hlist_head),
3621 if (!net->unx.table.buckets)
3624 for (i = 0; i < UNIX_HASH_SIZE; i++) {
3625 spin_lock_init(&net->unx.table.locks[i]);
3626 INIT_HLIST_HEAD(&net->unx.table.buckets[i]);
3632 kvfree(net->unx.table.locks);
3634 #ifdef CONFIG_PROC_FS
3635 remove_proc_entry("unix", net->proc_net);
3638 unix_sysctl_unregister(net);
3643 static void __net_exit unix_net_exit(struct net *net)
3645 kvfree(net->unx.table.buckets);
3646 kvfree(net->unx.table.locks);
3647 unix_sysctl_unregister(net);
3648 remove_proc_entry("unix", net->proc_net);
3651 static struct pernet_operations unix_net_ops = {
3652 .init = unix_net_init,
3653 .exit = unix_net_exit,
3656 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3657 DEFINE_BPF_ITER_FUNC(unix, struct bpf_iter_meta *meta,
3658 struct unix_sock *unix_sk, uid_t uid)
3660 #define INIT_BATCH_SZ 16
3662 static int bpf_iter_init_unix(void *priv_data, struct bpf_iter_aux_info *aux)
3664 struct bpf_unix_iter_state *iter = priv_data;
3667 err = bpf_iter_init_seq_net(priv_data, aux);
3671 err = bpf_iter_unix_realloc_batch(iter, INIT_BATCH_SZ);
3673 bpf_iter_fini_seq_net(priv_data);
3680 static void bpf_iter_fini_unix(void *priv_data)
3682 struct bpf_unix_iter_state *iter = priv_data;
3684 bpf_iter_fini_seq_net(priv_data);
3685 kvfree(iter->batch);
3688 static const struct bpf_iter_seq_info unix_seq_info = {
3689 .seq_ops = &bpf_iter_unix_seq_ops,
3690 .init_seq_private = bpf_iter_init_unix,
3691 .fini_seq_private = bpf_iter_fini_unix,
3692 .seq_priv_size = sizeof(struct bpf_unix_iter_state),
3695 static const struct bpf_func_proto *
3696 bpf_iter_unix_get_func_proto(enum bpf_func_id func_id,
3697 const struct bpf_prog *prog)
3700 case BPF_FUNC_setsockopt:
3701 return &bpf_sk_setsockopt_proto;
3702 case BPF_FUNC_getsockopt:
3703 return &bpf_sk_getsockopt_proto;
3709 static struct bpf_iter_reg unix_reg_info = {
3711 .ctx_arg_info_size = 1,
3713 { offsetof(struct bpf_iter__unix, unix_sk),
3714 PTR_TO_BTF_ID_OR_NULL },
3716 .get_func_proto = bpf_iter_unix_get_func_proto,
3717 .seq_info = &unix_seq_info,
3720 static void __init bpf_iter_register(void)
3722 unix_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_UNIX];
3723 if (bpf_iter_reg_target(&unix_reg_info))
3724 pr_warn("Warning: could not register bpf iterator unix\n");
3728 static int __init af_unix_init(void)
3732 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb));
3734 for (i = 0; i < UNIX_HASH_SIZE / 2; i++) {
3735 spin_lock_init(&bsd_socket_locks[i]);
3736 INIT_HLIST_HEAD(&bsd_socket_buckets[i]);
3739 rc = proto_register(&unix_dgram_proto, 1);
3741 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3745 rc = proto_register(&unix_stream_proto, 1);
3747 pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
3748 proto_unregister(&unix_dgram_proto);
3752 sock_register(&unix_family_ops);
3753 register_pernet_subsys(&unix_net_ops);
3754 unix_bpf_build_proto();
3756 #if IS_BUILTIN(CONFIG_UNIX) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3757 bpf_iter_register();
3764 static void __exit af_unix_exit(void)
3766 sock_unregister(PF_UNIX);
3767 proto_unregister(&unix_dgram_proto);
3768 proto_unregister(&unix_stream_proto);
3769 unregister_pernet_subsys(&unix_net_ops);
3772 /* Earlier than device_initcall() so that other drivers invoking
3773 request_module() don't end up in a loop when modprobe tries
3774 to use a UNIX socket. But later than subsys_initcall() because
3775 we depend on stuff initialised there */
3776 fs_initcall(af_unix_init);
3777 module_exit(af_unix_exit);
3779 MODULE_LICENSE("GPL");
3780 MODULE_ALIAS_NETPROTO(PF_UNIX);