Merge branch 'master' into next
[linux-2.6-block.git] / net / unix / af_unix.c
CommitLineData
1da177e4
LT
1/*
2 * NET4: Implementation of BSD Unix domain sockets.
3 *
113aa838 4 * Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
1da177e4
LT
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
1da177e4
LT
11 * Fixes:
12 * Linus Torvalds : Assorted bug cures.
13 * Niibe Yutaka : async I/O support.
14 * Carsten Paeth : PF_UNIX check, address fixes.
15 * Alan Cox : Limit size of allocated blocks.
16 * Alan Cox : Fixed the stupid socketpair bug.
17 * Alan Cox : BSD compatibility fine tuning.
18 * Alan Cox : Fixed a bug in connect when interrupted.
19 * Alan Cox : Sorted out a proper draft version of
20 * file descriptor passing hacked up from
21 * Mike Shaver's work.
22 * Marty Leisner : Fixes to fd passing
23 * Nick Nevin : recvmsg bugfix.
24 * Alan Cox : Started proper garbage collector
25 * Heiko EiBfeldt : Missing verify_area check
26 * Alan Cox : Started POSIXisms
27 * Andreas Schwab : Replace inode by dentry for proper
28 * reference counting
29 * Kirk Petersen : Made this a module
30 * Christoph Rohland : Elegant non-blocking accept/connect algorithm.
31 * Lots of bug fixes.
32 * Alexey Kuznetosv : Repaired (I hope) bugs introduces
33 * by above two patches.
34 * Andrea Arcangeli : If possible we block in connect(2)
35 * if the max backlog of the listen socket
36 * is been reached. This won't break
37 * old apps and it will avoid huge amount
38 * of socks hashed (this for unix_gc()
39 * performances reasons).
40 * Security fix that limits the max
41 * number of socks to 2*max_files and
42 * the number of skb queueable in the
43 * dgram receiver.
44 * Artur Skawina : Hash function optimizations
45 * Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
46 * Malcolm Beattie : Set peercred for socketpair
47 * Michal Ostrowski : Module initialization cleanup.
48 * Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
49 * the core infrastructure is doing that
50 * for all net proto families now (2.5.69+)
51 *
52 *
53 * Known differences from reference BSD that was tested:
54 *
55 * [TO FIX]
56 * ECONNREFUSED is not returned from one end of a connected() socket to the
57 * other the moment one end closes.
58 * fstat() doesn't return st_dev=0, and give the blksize as high water mark
59 * and a fake inode identifier (nor the BSD first socket fstat twice bug).
60 * [NOT TO FIX]
61 * accept() returns a path name even if the connecting socket has closed
62 * in the meantime (BSD loses the path and gives up).
63 * accept() returns 0 length path for an unbound connector. BSD returns 16
64 * and a null first byte in the path (but not for gethost/peername - BSD bug ??)
65 * socketpair(...SOCK_RAW..) doesn't panic the kernel.
66 * BSD af_unix apparently has connect forgetting to block properly.
67 * (need to check this with the POSIX spec in detail)
68 *
69 * Differences from 2.0.0-11-... (ANK)
70 * Bug fixes and improvements.
71 * - client shutdown killed server socket.
72 * - removed all useless cli/sti pairs.
73 *
74 * Semantic changes/extensions.
75 * - generic control message passing.
76 * - SCM_CREDENTIALS control message.
77 * - "Abstract" (not FS based) socket bindings.
78 * Abstract names are sequences of bytes (not zero terminated)
79 * started by 0, so that this name space does not intersect
80 * with BSD names.
81 */
82
83#include <linux/module.h>
1da177e4 84#include <linux/kernel.h>
1da177e4
LT
85#include <linux/signal.h>
86#include <linux/sched.h>
87#include <linux/errno.h>
88#include <linux/string.h>
89#include <linux/stat.h>
90#include <linux/dcache.h>
91#include <linux/namei.h>
92#include <linux/socket.h>
93#include <linux/un.h>
94#include <linux/fcntl.h>
95#include <linux/termios.h>
96#include <linux/sockios.h>
97#include <linux/net.h>
98#include <linux/in.h>
99#include <linux/fs.h>
100#include <linux/slab.h>
101#include <asm/uaccess.h>
102#include <linux/skbuff.h>
103#include <linux/netdevice.h>
457c4cbc 104#include <net/net_namespace.h>
1da177e4 105#include <net/sock.h>
c752f073 106#include <net/tcp_states.h>
1da177e4
LT
107#include <net/af_unix.h>
108#include <linux/proc_fs.h>
109#include <linux/seq_file.h>
110#include <net/scm.h>
111#include <linux/init.h>
112#include <linux/poll.h>
1da177e4
LT
113#include <linux/rtnetlink.h>
114#include <linux/mount.h>
115#include <net/checksum.h>
116#include <linux/security.h>
117
13111698
AB
118static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
119static DEFINE_SPINLOCK(unix_table_lock);
1da177e4
LT
120static atomic_t unix_nr_socks = ATOMIC_INIT(0);
121
122#define unix_sockets_unbound (&unix_socket_table[UNIX_HASH_SIZE])
123
124#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
125
877ce7c1 126#ifdef CONFIG_SECURITY_NETWORK
dc49c1f9 127static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1 128{
dc49c1f9 129 memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
877ce7c1
CZ
130}
131
132static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
133{
dc49c1f9 134 scm->secid = *UNIXSID(skb);
877ce7c1
CZ
135}
136#else
dc49c1f9 137static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
877ce7c1
CZ
138{ }
139
140static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
141{ }
142#endif /* CONFIG_SECURITY_NETWORK */
143
1da177e4
LT
144/*
145 * SMP locking strategy:
fbe9cc4a 146 * hash table is protected with spinlock unix_table_lock
1da177e4
LT
147 * each socket state is protected by separate rwlock.
148 */
149
44bb9363 150static inline unsigned unix_hash_fold(__wsum n)
1da177e4 151{
44bb9363 152 unsigned hash = (__force unsigned)n;
1da177e4
LT
153 hash ^= hash>>16;
154 hash ^= hash>>8;
155 return hash&(UNIX_HASH_SIZE-1);
156}
157
158#define unix_peer(sk) (unix_sk(sk)->peer)
159
160static inline int unix_our_peer(struct sock *sk, struct sock *osk)
161{
162 return unix_peer(osk) == sk;
163}
164
165static inline int unix_may_send(struct sock *sk, struct sock *osk)
166{
167 return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
168}
169
3c73419c
RW
170static inline int unix_recvq_full(struct sock const *sk)
171{
172 return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
173}
174
1da177e4
LT
175static struct sock *unix_peer_get(struct sock *s)
176{
177 struct sock *peer;
178
1c92b4e5 179 unix_state_lock(s);
1da177e4
LT
180 peer = unix_peer(s);
181 if (peer)
182 sock_hold(peer);
1c92b4e5 183 unix_state_unlock(s);
1da177e4
LT
184 return peer;
185}
186
187static inline void unix_release_addr(struct unix_address *addr)
188{
189 if (atomic_dec_and_test(&addr->refcnt))
190 kfree(addr);
191}
192
193/*
194 * Check unix socket name:
195 * - should be not zero length.
196 * - if started by not zero, should be NULL terminated (FS object)
197 * - if started by zero, it is abstract name.
198 */
ac7bfa62 199
1da177e4
LT
200static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
201{
202 if (len <= sizeof(short) || len > sizeof(*sunaddr))
203 return -EINVAL;
204 if (!sunaddr || sunaddr->sun_family != AF_UNIX)
205 return -EINVAL;
206 if (sunaddr->sun_path[0]) {
207 /*
208 * This may look like an off by one error but it is a bit more
209 * subtle. 108 is the longest valid AF_UNIX path for a binding.
210 * sun_path[108] doesnt as such exist. However in kernel space
211 * we are guaranteed that it is a valid memory location in our
212 * kernel address buffer.
213 */
214 ((char *)sunaddr)[len]=0;
215 len = strlen(sunaddr->sun_path)+1+sizeof(short);
216 return len;
217 }
218
219 *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
220 return len;
221}
222
223static void __unix_remove_socket(struct sock *sk)
224{
225 sk_del_node_init(sk);
226}
227
228static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
229{
547b792c 230 WARN_ON(!sk_unhashed(sk));
1da177e4
LT
231 sk_add_node(sk, list);
232}
233
234static inline void unix_remove_socket(struct sock *sk)
235{
fbe9cc4a 236 spin_lock(&unix_table_lock);
1da177e4 237 __unix_remove_socket(sk);
fbe9cc4a 238 spin_unlock(&unix_table_lock);
1da177e4
LT
239}
240
241static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
242{
fbe9cc4a 243 spin_lock(&unix_table_lock);
1da177e4 244 __unix_insert_socket(list, sk);
fbe9cc4a 245 spin_unlock(&unix_table_lock);
1da177e4
LT
246}
247
097e66c5
DL
248static struct sock *__unix_find_socket_byname(struct net *net,
249 struct sockaddr_un *sunname,
1da177e4
LT
250 int len, int type, unsigned hash)
251{
252 struct sock *s;
253 struct hlist_node *node;
254
255 sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
256 struct unix_sock *u = unix_sk(s);
257
878628fb 258 if (!net_eq(sock_net(s), net))
097e66c5
DL
259 continue;
260
1da177e4
LT
261 if (u->addr->len == len &&
262 !memcmp(u->addr->name, sunname, len))
263 goto found;
264 }
265 s = NULL;
266found:
267 return s;
268}
269
097e66c5
DL
270static inline struct sock *unix_find_socket_byname(struct net *net,
271 struct sockaddr_un *sunname,
1da177e4
LT
272 int len, int type,
273 unsigned hash)
274{
275 struct sock *s;
276
fbe9cc4a 277 spin_lock(&unix_table_lock);
097e66c5 278 s = __unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
279 if (s)
280 sock_hold(s);
fbe9cc4a 281 spin_unlock(&unix_table_lock);
1da177e4
LT
282 return s;
283}
284
097e66c5 285static struct sock *unix_find_socket_byinode(struct net *net, struct inode *i)
1da177e4
LT
286{
287 struct sock *s;
288 struct hlist_node *node;
289
fbe9cc4a 290 spin_lock(&unix_table_lock);
1da177e4
LT
291 sk_for_each(s, node,
292 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
293 struct dentry *dentry = unix_sk(s)->dentry;
294
878628fb 295 if (!net_eq(sock_net(s), net))
097e66c5
DL
296 continue;
297
1da177e4
LT
298 if(dentry && dentry->d_inode == i)
299 {
300 sock_hold(s);
301 goto found;
302 }
303 }
304 s = NULL;
305found:
fbe9cc4a 306 spin_unlock(&unix_table_lock);
1da177e4
LT
307 return s;
308}
309
310static inline int unix_writable(struct sock *sk)
311{
312 return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
313}
314
315static void unix_write_space(struct sock *sk)
316{
317 read_lock(&sk->sk_callback_lock);
318 if (unix_writable(sk)) {
319 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
71e20f18 320 wake_up_interruptible_sync(sk->sk_sleep);
8d8ad9d7 321 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
1da177e4
LT
322 }
323 read_unlock(&sk->sk_callback_lock);
324}
325
326/* When dgram socket disconnects (or changes its peer), we clear its receive
327 * queue of packets arrived from previous peer. First, it allows to do
328 * flow control based only on wmem_alloc; second, sk connected to peer
329 * may receive messages only from that peer. */
330static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
331{
b03efcfb 332 if (!skb_queue_empty(&sk->sk_receive_queue)) {
1da177e4
LT
333 skb_queue_purge(&sk->sk_receive_queue);
334 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
335
336 /* If one link of bidirectional dgram pipe is disconnected,
337 * we signal error. Messages are lost. Do not make this,
338 * when peer was not connected to us.
339 */
340 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
341 other->sk_err = ECONNRESET;
342 other->sk_error_report(other);
343 }
344 }
345}
346
347static void unix_sock_destructor(struct sock *sk)
348{
349 struct unix_sock *u = unix_sk(sk);
350
351 skb_queue_purge(&sk->sk_receive_queue);
352
547b792c
IJ
353 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
354 WARN_ON(!sk_unhashed(sk));
355 WARN_ON(sk->sk_socket);
1da177e4
LT
356 if (!sock_flag(sk, SOCK_DEAD)) {
357 printk("Attempt to release alive unix socket: %p\n", sk);
358 return;
359 }
360
361 if (u->addr)
362 unix_release_addr(u->addr);
363
364 atomic_dec(&unix_nr_socks);
365#ifdef UNIX_REFCNT_DEBUG
366 printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
367#endif
368}
369
370static int unix_release_sock (struct sock *sk, int embrion)
371{
372 struct unix_sock *u = unix_sk(sk);
373 struct dentry *dentry;
374 struct vfsmount *mnt;
375 struct sock *skpair;
376 struct sk_buff *skb;
377 int state;
378
379 unix_remove_socket(sk);
380
381 /* Clear state */
1c92b4e5 382 unix_state_lock(sk);
1da177e4
LT
383 sock_orphan(sk);
384 sk->sk_shutdown = SHUTDOWN_MASK;
385 dentry = u->dentry;
386 u->dentry = NULL;
387 mnt = u->mnt;
388 u->mnt = NULL;
389 state = sk->sk_state;
390 sk->sk_state = TCP_CLOSE;
1c92b4e5 391 unix_state_unlock(sk);
1da177e4
LT
392
393 wake_up_interruptible_all(&u->peer_wait);
394
395 skpair=unix_peer(sk);
396
397 if (skpair!=NULL) {
398 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
1c92b4e5 399 unix_state_lock(skpair);
1da177e4
LT
400 /* No more writes */
401 skpair->sk_shutdown = SHUTDOWN_MASK;
402 if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
403 skpair->sk_err = ECONNRESET;
1c92b4e5 404 unix_state_unlock(skpair);
1da177e4
LT
405 skpair->sk_state_change(skpair);
406 read_lock(&skpair->sk_callback_lock);
8d8ad9d7 407 sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4
LT
408 read_unlock(&skpair->sk_callback_lock);
409 }
410 sock_put(skpair); /* It may now die */
411 unix_peer(sk) = NULL;
412 }
413
414 /* Try to flush out this socket. Throw out buffers at least */
415
416 while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
417 if (state==TCP_LISTEN)
418 unix_release_sock(skb->sk, 1);
419 /* passed fds are erased in the kfree_skb hook */
420 kfree_skb(skb);
421 }
422
423 if (dentry) {
424 dput(dentry);
425 mntput(mnt);
426 }
427
428 sock_put(sk);
429
430 /* ---- Socket is dead now and most probably destroyed ---- */
431
432 /*
433 * Fixme: BSD difference: In BSD all sockets connected to use get
434 * ECONNRESET and we die on the spot. In Linux we behave
435 * like files and pipes do and wait for the last
436 * dereference.
437 *
438 * Can't we simply set sock->err?
439 *
440 * What the above comment does talk about? --ANK(980817)
441 */
442
9305cfa4 443 if (unix_tot_inflight)
ac7bfa62 444 unix_gc(); /* Garbage collect fds */
1da177e4
LT
445
446 return 0;
447}
448
449static int unix_listen(struct socket *sock, int backlog)
450{
451 int err;
452 struct sock *sk = sock->sk;
453 struct unix_sock *u = unix_sk(sk);
454
455 err = -EOPNOTSUPP;
456 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
457 goto out; /* Only stream/seqpacket sockets accept */
458 err = -EINVAL;
459 if (!u->addr)
460 goto out; /* No listens on an unbound socket */
1c92b4e5 461 unix_state_lock(sk);
1da177e4
LT
462 if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
463 goto out_unlock;
464 if (backlog > sk->sk_max_ack_backlog)
465 wake_up_interruptible_all(&u->peer_wait);
466 sk->sk_max_ack_backlog = backlog;
467 sk->sk_state = TCP_LISTEN;
468 /* set credentials so connect can copy them */
b488893a 469 sk->sk_peercred.pid = task_tgid_vnr(current);
19d65624 470 current_euid_egid(&sk->sk_peercred.uid, &sk->sk_peercred.gid);
1da177e4
LT
471 err = 0;
472
473out_unlock:
1c92b4e5 474 unix_state_unlock(sk);
1da177e4
LT
475out:
476 return err;
477}
478
479static int unix_release(struct socket *);
480static int unix_bind(struct socket *, struct sockaddr *, int);
481static int unix_stream_connect(struct socket *, struct sockaddr *,
482 int addr_len, int flags);
483static int unix_socketpair(struct socket *, struct socket *);
484static int unix_accept(struct socket *, struct socket *, int);
485static int unix_getname(struct socket *, struct sockaddr *, int *, int);
486static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
ec0d215f
RW
487static unsigned int unix_dgram_poll(struct file *, struct socket *,
488 poll_table *);
1da177e4
LT
489static int unix_ioctl(struct socket *, unsigned int, unsigned long);
490static int unix_shutdown(struct socket *, int);
491static int unix_stream_sendmsg(struct kiocb *, struct socket *,
492 struct msghdr *, size_t);
493static int unix_stream_recvmsg(struct kiocb *, struct socket *,
494 struct msghdr *, size_t, int);
495static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
496 struct msghdr *, size_t);
497static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
498 struct msghdr *, size_t, int);
499static int unix_dgram_connect(struct socket *, struct sockaddr *,
500 int, int);
501static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
502 struct msghdr *, size_t);
503
90ddc4f0 504static const struct proto_ops unix_stream_ops = {
1da177e4
LT
505 .family = PF_UNIX,
506 .owner = THIS_MODULE,
507 .release = unix_release,
508 .bind = unix_bind,
509 .connect = unix_stream_connect,
510 .socketpair = unix_socketpair,
511 .accept = unix_accept,
512 .getname = unix_getname,
513 .poll = unix_poll,
514 .ioctl = unix_ioctl,
515 .listen = unix_listen,
516 .shutdown = unix_shutdown,
517 .setsockopt = sock_no_setsockopt,
518 .getsockopt = sock_no_getsockopt,
519 .sendmsg = unix_stream_sendmsg,
520 .recvmsg = unix_stream_recvmsg,
521 .mmap = sock_no_mmap,
522 .sendpage = sock_no_sendpage,
523};
524
90ddc4f0 525static const struct proto_ops unix_dgram_ops = {
1da177e4
LT
526 .family = PF_UNIX,
527 .owner = THIS_MODULE,
528 .release = unix_release,
529 .bind = unix_bind,
530 .connect = unix_dgram_connect,
531 .socketpair = unix_socketpair,
532 .accept = sock_no_accept,
533 .getname = unix_getname,
ec0d215f 534 .poll = unix_dgram_poll,
1da177e4
LT
535 .ioctl = unix_ioctl,
536 .listen = sock_no_listen,
537 .shutdown = unix_shutdown,
538 .setsockopt = sock_no_setsockopt,
539 .getsockopt = sock_no_getsockopt,
540 .sendmsg = unix_dgram_sendmsg,
541 .recvmsg = unix_dgram_recvmsg,
542 .mmap = sock_no_mmap,
543 .sendpage = sock_no_sendpage,
544};
545
90ddc4f0 546static const struct proto_ops unix_seqpacket_ops = {
1da177e4
LT
547 .family = PF_UNIX,
548 .owner = THIS_MODULE,
549 .release = unix_release,
550 .bind = unix_bind,
551 .connect = unix_stream_connect,
552 .socketpair = unix_socketpair,
553 .accept = unix_accept,
554 .getname = unix_getname,
ec0d215f 555 .poll = unix_dgram_poll,
1da177e4
LT
556 .ioctl = unix_ioctl,
557 .listen = unix_listen,
558 .shutdown = unix_shutdown,
559 .setsockopt = sock_no_setsockopt,
560 .getsockopt = sock_no_getsockopt,
561 .sendmsg = unix_seqpacket_sendmsg,
562 .recvmsg = unix_dgram_recvmsg,
563 .mmap = sock_no_mmap,
564 .sendpage = sock_no_sendpage,
565};
566
567static struct proto unix_proto = {
568 .name = "UNIX",
569 .owner = THIS_MODULE,
570 .obj_size = sizeof(struct unix_sock),
571};
572
a09785a2
IM
573/*
574 * AF_UNIX sockets do not interact with hardware, hence they
575 * dont trigger interrupts - so it's safe for them to have
576 * bh-unsafe locking for their sk_receive_queue.lock. Split off
577 * this special lock-class by reinitializing the spinlock key:
578 */
579static struct lock_class_key af_unix_sk_receive_queue_lock_key;
580
1b8d7ae4 581static struct sock * unix_create1(struct net *net, struct socket *sock)
1da177e4
LT
582{
583 struct sock *sk = NULL;
584 struct unix_sock *u;
585
284b327b
PE
586 atomic_inc(&unix_nr_socks);
587 if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
1da177e4
LT
588 goto out;
589
6257ff21 590 sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
1da177e4
LT
591 if (!sk)
592 goto out;
593
1da177e4 594 sock_init_data(sock,sk);
a09785a2
IM
595 lockdep_set_class(&sk->sk_receive_queue.lock,
596 &af_unix_sk_receive_queue_lock_key);
1da177e4
LT
597
598 sk->sk_write_space = unix_write_space;
a0a53c8b 599 sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
1da177e4
LT
600 sk->sk_destruct = unix_sock_destructor;
601 u = unix_sk(sk);
602 u->dentry = NULL;
603 u->mnt = NULL;
fd19f329 604 spin_lock_init(&u->lock);
516e0cc5 605 atomic_long_set(&u->inflight, 0);
1fd05ba5 606 INIT_LIST_HEAD(&u->link);
57b47a53 607 mutex_init(&u->readlock); /* single task reading lock */
1da177e4
LT
608 init_waitqueue_head(&u->peer_wait);
609 unix_insert_socket(unix_sockets_unbound, sk);
610out:
284b327b
PE
611 if (sk == NULL)
612 atomic_dec(&unix_nr_socks);
1da177e4
LT
613 return sk;
614}
615
1b8d7ae4 616static int unix_create(struct net *net, struct socket *sock, int protocol)
1da177e4
LT
617{
618 if (protocol && protocol != PF_UNIX)
619 return -EPROTONOSUPPORT;
620
621 sock->state = SS_UNCONNECTED;
622
623 switch (sock->type) {
624 case SOCK_STREAM:
625 sock->ops = &unix_stream_ops;
626 break;
627 /*
628 * Believe it or not BSD has AF_UNIX, SOCK_RAW though
629 * nothing uses it.
630 */
631 case SOCK_RAW:
632 sock->type=SOCK_DGRAM;
633 case SOCK_DGRAM:
634 sock->ops = &unix_dgram_ops;
635 break;
636 case SOCK_SEQPACKET:
637 sock->ops = &unix_seqpacket_ops;
638 break;
639 default:
640 return -ESOCKTNOSUPPORT;
641 }
642
1b8d7ae4 643 return unix_create1(net, sock) ? 0 : -ENOMEM;
1da177e4
LT
644}
645
646static int unix_release(struct socket *sock)
647{
648 struct sock *sk = sock->sk;
649
650 if (!sk)
651 return 0;
652
653 sock->sk = NULL;
654
655 return unix_release_sock (sk, 0);
656}
657
658static int unix_autobind(struct socket *sock)
659{
660 struct sock *sk = sock->sk;
3b1e0a65 661 struct net *net = sock_net(sk);
1da177e4
LT
662 struct unix_sock *u = unix_sk(sk);
663 static u32 ordernum = 1;
664 struct unix_address * addr;
665 int err;
666
57b47a53 667 mutex_lock(&u->readlock);
1da177e4
LT
668
669 err = 0;
670 if (u->addr)
671 goto out;
672
673 err = -ENOMEM;
0da974f4 674 addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
1da177e4
LT
675 if (!addr)
676 goto out;
677
1da177e4
LT
678 addr->name->sun_family = AF_UNIX;
679 atomic_set(&addr->refcnt, 1);
680
681retry:
682 addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
683 addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
684
fbe9cc4a 685 spin_lock(&unix_table_lock);
1da177e4
LT
686 ordernum = (ordernum+1)&0xFFFFF;
687
097e66c5 688 if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
1da177e4 689 addr->hash)) {
fbe9cc4a 690 spin_unlock(&unix_table_lock);
1da177e4
LT
691 /* Sanity yield. It is unusual case, but yet... */
692 if (!(ordernum&0xFF))
693 yield();
694 goto retry;
695 }
696 addr->hash ^= sk->sk_type;
697
698 __unix_remove_socket(sk);
699 u->addr = addr;
700 __unix_insert_socket(&unix_socket_table[addr->hash], sk);
fbe9cc4a 701 spin_unlock(&unix_table_lock);
1da177e4
LT
702 err = 0;
703
57b47a53 704out: mutex_unlock(&u->readlock);
1da177e4
LT
705 return err;
706}
707
097e66c5
DL
708static struct sock *unix_find_other(struct net *net,
709 struct sockaddr_un *sunname, int len,
1da177e4
LT
710 int type, unsigned hash, int *error)
711{
712 struct sock *u;
421748ec 713 struct path path;
1da177e4 714 int err = 0;
ac7bfa62 715
1da177e4 716 if (sunname->sun_path[0]) {
421748ec
AV
717 struct inode *inode;
718 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
1da177e4
LT
719 if (err)
720 goto fail;
421748ec
AV
721 inode = path.dentry->d_inode;
722 err = inode_permission(inode, MAY_WRITE);
1da177e4
LT
723 if (err)
724 goto put_fail;
725
726 err = -ECONNREFUSED;
421748ec 727 if (!S_ISSOCK(inode->i_mode))
1da177e4 728 goto put_fail;
421748ec 729 u = unix_find_socket_byinode(net, inode);
1da177e4
LT
730 if (!u)
731 goto put_fail;
732
733 if (u->sk_type == type)
421748ec 734 touch_atime(path.mnt, path.dentry);
1da177e4 735
421748ec 736 path_put(&path);
1da177e4
LT
737
738 err=-EPROTOTYPE;
739 if (u->sk_type != type) {
740 sock_put(u);
741 goto fail;
742 }
743 } else {
744 err = -ECONNREFUSED;
097e66c5 745 u=unix_find_socket_byname(net, sunname, len, type, hash);
1da177e4
LT
746 if (u) {
747 struct dentry *dentry;
748 dentry = unix_sk(u)->dentry;
749 if (dentry)
750 touch_atime(unix_sk(u)->mnt, dentry);
751 } else
752 goto fail;
753 }
754 return u;
755
756put_fail:
421748ec 757 path_put(&path);
1da177e4
LT
758fail:
759 *error=err;
760 return NULL;
761}
762
763
764static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
765{
766 struct sock *sk = sock->sk;
3b1e0a65 767 struct net *net = sock_net(sk);
1da177e4
LT
768 struct unix_sock *u = unix_sk(sk);
769 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
770 struct dentry * dentry = NULL;
771 struct nameidata nd;
772 int err;
773 unsigned hash;
774 struct unix_address *addr;
775 struct hlist_head *list;
776
777 err = -EINVAL;
778 if (sunaddr->sun_family != AF_UNIX)
779 goto out;
780
781 if (addr_len==sizeof(short)) {
782 err = unix_autobind(sock);
783 goto out;
784 }
785
786 err = unix_mkname(sunaddr, addr_len, &hash);
787 if (err < 0)
788 goto out;
789 addr_len = err;
790
57b47a53 791 mutex_lock(&u->readlock);
1da177e4
LT
792
793 err = -EINVAL;
794 if (u->addr)
795 goto out_up;
796
797 err = -ENOMEM;
798 addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
799 if (!addr)
800 goto out_up;
801
802 memcpy(addr->name, sunaddr, addr_len);
803 addr->len = addr_len;
804 addr->hash = hash ^ sk->sk_type;
805 atomic_set(&addr->refcnt, 1);
806
807 if (sunaddr->sun_path[0]) {
808 unsigned int mode;
809 err = 0;
810 /*
811 * Get the parent directory, calculate the hash for last
812 * component.
813 */
814 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
815 if (err)
816 goto out_mknod_parent;
f81a0bff
CH
817
818 dentry = lookup_create(&nd, 0);
1da177e4
LT
819 err = PTR_ERR(dentry);
820 if (IS_ERR(dentry))
821 goto out_mknod_unlock;
f81a0bff 822
1da177e4
LT
823 /*
824 * All right, let's create it.
825 */
826 mode = S_IFSOCK |
827 (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
463c3197
DH
828 err = mnt_want_write(nd.path.mnt);
829 if (err)
830 goto out_mknod_dput;
4ac91378 831 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
463c3197 832 mnt_drop_write(nd.path.mnt);
1da177e4
LT
833 if (err)
834 goto out_mknod_dput;
4ac91378
JB
835 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
836 dput(nd.path.dentry);
837 nd.path.dentry = dentry;
1da177e4
LT
838
839 addr->hash = UNIX_HASH_SIZE;
840 }
841
fbe9cc4a 842 spin_lock(&unix_table_lock);
1da177e4
LT
843
844 if (!sunaddr->sun_path[0]) {
845 err = -EADDRINUSE;
097e66c5 846 if (__unix_find_socket_byname(net, sunaddr, addr_len,
1da177e4
LT
847 sk->sk_type, hash)) {
848 unix_release_addr(addr);
849 goto out_unlock;
850 }
851
852 list = &unix_socket_table[addr->hash];
853 } else {
854 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
4ac91378
JB
855 u->dentry = nd.path.dentry;
856 u->mnt = nd.path.mnt;
1da177e4
LT
857 }
858
859 err = 0;
860 __unix_remove_socket(sk);
861 u->addr = addr;
862 __unix_insert_socket(list, sk);
863
864out_unlock:
fbe9cc4a 865 spin_unlock(&unix_table_lock);
1da177e4 866out_up:
57b47a53 867 mutex_unlock(&u->readlock);
1da177e4
LT
868out:
869 return err;
870
871out_mknod_dput:
872 dput(dentry);
873out_mknod_unlock:
4ac91378 874 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
1d957f9b 875 path_put(&nd.path);
1da177e4
LT
876out_mknod_parent:
877 if (err==-EEXIST)
878 err=-EADDRINUSE;
879 unix_release_addr(addr);
880 goto out_up;
881}
882
278a3de5
DM
883static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
884{
885 if (unlikely(sk1 == sk2) || !sk2) {
886 unix_state_lock(sk1);
887 return;
888 }
889 if (sk1 < sk2) {
890 unix_state_lock(sk1);
891 unix_state_lock_nested(sk2);
892 } else {
893 unix_state_lock(sk2);
894 unix_state_lock_nested(sk1);
895 }
896}
897
898static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
899{
900 if (unlikely(sk1 == sk2) || !sk2) {
901 unix_state_unlock(sk1);
902 return;
903 }
904 unix_state_unlock(sk1);
905 unix_state_unlock(sk2);
906}
907
1da177e4
LT
908static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
909 int alen, int flags)
910{
911 struct sock *sk = sock->sk;
3b1e0a65 912 struct net *net = sock_net(sk);
1da177e4
LT
913 struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
914 struct sock *other;
915 unsigned hash;
916 int err;
917
918 if (addr->sa_family != AF_UNSPEC) {
919 err = unix_mkname(sunaddr, alen, &hash);
920 if (err < 0)
921 goto out;
922 alen = err;
923
924 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
925 !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
926 goto out;
927
278a3de5 928restart:
097e66c5 929 other=unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
1da177e4
LT
930 if (!other)
931 goto out;
932
278a3de5
DM
933 unix_state_double_lock(sk, other);
934
935 /* Apparently VFS overslept socket death. Retry. */
936 if (sock_flag(other, SOCK_DEAD)) {
937 unix_state_double_unlock(sk, other);
938 sock_put(other);
939 goto restart;
940 }
1da177e4
LT
941
942 err = -EPERM;
943 if (!unix_may_send(sk, other))
944 goto out_unlock;
945
946 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
947 if (err)
948 goto out_unlock;
949
950 } else {
951 /*
952 * 1003.1g breaking connected state with AF_UNSPEC
953 */
954 other = NULL;
278a3de5 955 unix_state_double_lock(sk, other);
1da177e4
LT
956 }
957
958 /*
959 * If it was connected, reconnect.
960 */
961 if (unix_peer(sk)) {
962 struct sock *old_peer = unix_peer(sk);
963 unix_peer(sk)=other;
278a3de5 964 unix_state_double_unlock(sk, other);
1da177e4
LT
965
966 if (other != old_peer)
967 unix_dgram_disconnected(sk, old_peer);
968 sock_put(old_peer);
969 } else {
970 unix_peer(sk)=other;
278a3de5 971 unix_state_double_unlock(sk, other);
1da177e4 972 }
ac7bfa62 973 return 0;
1da177e4
LT
974
975out_unlock:
278a3de5 976 unix_state_double_unlock(sk, other);
1da177e4
LT
977 sock_put(other);
978out:
979 return err;
980}
981
982static long unix_wait_for_peer(struct sock *other, long timeo)
983{
984 struct unix_sock *u = unix_sk(other);
985 int sched;
986 DEFINE_WAIT(wait);
987
988 prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
989
990 sched = !sock_flag(other, SOCK_DEAD) &&
991 !(other->sk_shutdown & RCV_SHUTDOWN) &&
3c73419c 992 unix_recvq_full(other);
1da177e4 993
1c92b4e5 994 unix_state_unlock(other);
1da177e4
LT
995
996 if (sched)
997 timeo = schedule_timeout(timeo);
998
999 finish_wait(&u->peer_wait, &wait);
1000 return timeo;
1001}
1002
1003static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1004 int addr_len, int flags)
1005{
1006 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1007 struct sock *sk = sock->sk;
3b1e0a65 1008 struct net *net = sock_net(sk);
1da177e4
LT
1009 struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1010 struct sock *newsk = NULL;
1011 struct sock *other = NULL;
1012 struct sk_buff *skb = NULL;
1013 unsigned hash;
1014 int st;
1015 int err;
1016 long timeo;
1017
1018 err = unix_mkname(sunaddr, addr_len, &hash);
1019 if (err < 0)
1020 goto out;
1021 addr_len = err;
1022
1023 if (test_bit(SOCK_PASSCRED, &sock->flags)
1024 && !u->addr && (err = unix_autobind(sock)) != 0)
1025 goto out;
1026
1027 timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1028
1029 /* First of all allocate resources.
1030 If we will make it after state is locked,
1031 we will have to recheck all again in any case.
1032 */
1033
1034 err = -ENOMEM;
1035
1036 /* create new sock for complete connection */
3b1e0a65 1037 newsk = unix_create1(sock_net(sk), NULL);
1da177e4
LT
1038 if (newsk == NULL)
1039 goto out;
1040
1041 /* Allocate skb for sending to listening sock */
1042 skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1043 if (skb == NULL)
1044 goto out;
1045
1046restart:
1047 /* Find listening sock. */
097e66c5 1048 other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
1da177e4
LT
1049 if (!other)
1050 goto out;
1051
1052 /* Latch state of peer */
1c92b4e5 1053 unix_state_lock(other);
1da177e4
LT
1054
1055 /* Apparently VFS overslept socket death. Retry. */
1056 if (sock_flag(other, SOCK_DEAD)) {
1c92b4e5 1057 unix_state_unlock(other);
1da177e4
LT
1058 sock_put(other);
1059 goto restart;
1060 }
1061
1062 err = -ECONNREFUSED;
1063 if (other->sk_state != TCP_LISTEN)
1064 goto out_unlock;
1065
3c73419c 1066 if (unix_recvq_full(other)) {
1da177e4
LT
1067 err = -EAGAIN;
1068 if (!timeo)
1069 goto out_unlock;
1070
1071 timeo = unix_wait_for_peer(other, timeo);
1072
1073 err = sock_intr_errno(timeo);
1074 if (signal_pending(current))
1075 goto out;
1076 sock_put(other);
1077 goto restart;
ac7bfa62 1078 }
1da177e4
LT
1079
1080 /* Latch our state.
1081
1082 It is tricky place. We need to grab write lock and cannot
1083 drop lock on peer. It is dangerous because deadlock is
1084 possible. Connect to self case and simultaneous
1085 attempt to connect are eliminated by checking socket
1086 state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1087 check this before attempt to grab lock.
1088
1089 Well, and we have to recheck the state after socket locked.
1090 */
1091 st = sk->sk_state;
1092
1093 switch (st) {
1094 case TCP_CLOSE:
1095 /* This is ok... continue with connect */
1096 break;
1097 case TCP_ESTABLISHED:
1098 /* Socket is already connected */
1099 err = -EISCONN;
1100 goto out_unlock;
1101 default:
1102 err = -EINVAL;
1103 goto out_unlock;
1104 }
1105
1c92b4e5 1106 unix_state_lock_nested(sk);
1da177e4
LT
1107
1108 if (sk->sk_state != st) {
1c92b4e5
DM
1109 unix_state_unlock(sk);
1110 unix_state_unlock(other);
1da177e4
LT
1111 sock_put(other);
1112 goto restart;
1113 }
1114
1115 err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1116 if (err) {
1c92b4e5 1117 unix_state_unlock(sk);
1da177e4
LT
1118 goto out_unlock;
1119 }
1120
1121 /* The way is open! Fastly set all the necessary fields... */
1122
1123 sock_hold(sk);
1124 unix_peer(newsk) = sk;
1125 newsk->sk_state = TCP_ESTABLISHED;
1126 newsk->sk_type = sk->sk_type;
b488893a 1127 newsk->sk_peercred.pid = task_tgid_vnr(current);
19d65624 1128 current_euid_egid(&newsk->sk_peercred.uid, &newsk->sk_peercred.gid);
1da177e4
LT
1129 newu = unix_sk(newsk);
1130 newsk->sk_sleep = &newu->peer_wait;
1131 otheru = unix_sk(other);
1132
1133 /* copy address information from listening to new sock*/
1134 if (otheru->addr) {
1135 atomic_inc(&otheru->addr->refcnt);
1136 newu->addr = otheru->addr;
1137 }
1138 if (otheru->dentry) {
1139 newu->dentry = dget(otheru->dentry);
1140 newu->mnt = mntget(otheru->mnt);
1141 }
1142
1143 /* Set credentials */
1144 sk->sk_peercred = other->sk_peercred;
1145
1da177e4
LT
1146 sock->state = SS_CONNECTED;
1147 sk->sk_state = TCP_ESTABLISHED;
830a1e5c
BL
1148 sock_hold(newsk);
1149
1150 smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */
1151 unix_peer(sk) = newsk;
1da177e4 1152
1c92b4e5 1153 unix_state_unlock(sk);
1da177e4
LT
1154
1155 /* take ten and and send info to listening sock */
1156 spin_lock(&other->sk_receive_queue.lock);
1157 __skb_queue_tail(&other->sk_receive_queue, skb);
1da177e4 1158 spin_unlock(&other->sk_receive_queue.lock);
1c92b4e5 1159 unix_state_unlock(other);
1da177e4
LT
1160 other->sk_data_ready(other, 0);
1161 sock_put(other);
1162 return 0;
1163
1164out_unlock:
1165 if (other)
1c92b4e5 1166 unix_state_unlock(other);
1da177e4
LT
1167
1168out:
1169 if (skb)
1170 kfree_skb(skb);
1171 if (newsk)
1172 unix_release_sock(newsk, 0);
1173 if (other)
1174 sock_put(other);
1175 return err;
1176}
1177
1178static int unix_socketpair(struct socket *socka, struct socket *sockb)
1179{
1180 struct sock *ska=socka->sk, *skb = sockb->sk;
1181
1182 /* Join our sockets back to back */
1183 sock_hold(ska);
1184 sock_hold(skb);
1185 unix_peer(ska)=skb;
1186 unix_peer(skb)=ska;
b488893a 1187 ska->sk_peercred.pid = skb->sk_peercred.pid = task_tgid_vnr(current);
19d65624
DH
1188 current_euid_egid(&skb->sk_peercred.uid, &skb->sk_peercred.gid);
1189 ska->sk_peercred.uid = skb->sk_peercred.uid;
1190 ska->sk_peercred.gid = skb->sk_peercred.gid;
1da177e4
LT
1191
1192 if (ska->sk_type != SOCK_DGRAM) {
1193 ska->sk_state = TCP_ESTABLISHED;
1194 skb->sk_state = TCP_ESTABLISHED;
1195 socka->state = SS_CONNECTED;
1196 sockb->state = SS_CONNECTED;
1197 }
1198 return 0;
1199}
1200
1201static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1202{
1203 struct sock *sk = sock->sk;
1204 struct sock *tsk;
1205 struct sk_buff *skb;
1206 int err;
1207
1208 err = -EOPNOTSUPP;
1209 if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1210 goto out;
1211
1212 err = -EINVAL;
1213 if (sk->sk_state != TCP_LISTEN)
1214 goto out;
1215
1216 /* If socket state is TCP_LISTEN it cannot change (for now...),
1217 * so that no locks are necessary.
1218 */
1219
1220 skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1221 if (!skb) {
1222 /* This means receive shutdown. */
1223 if (err == 0)
1224 err = -EINVAL;
1225 goto out;
1226 }
1227
1228 tsk = skb->sk;
1229 skb_free_datagram(sk, skb);
1230 wake_up_interruptible(&unix_sk(sk)->peer_wait);
1231
1232 /* attach accepted sock to socket */
1c92b4e5 1233 unix_state_lock(tsk);
1da177e4
LT
1234 newsock->state = SS_CONNECTED;
1235 sock_graft(tsk, newsock);
1c92b4e5 1236 unix_state_unlock(tsk);
1da177e4
LT
1237 return 0;
1238
1239out:
1240 return err;
1241}
1242
1243
1244static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1245{
1246 struct sock *sk = sock->sk;
1247 struct unix_sock *u;
1248 struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1249 int err = 0;
1250
1251 if (peer) {
1252 sk = unix_peer_get(sk);
1253
1254 err = -ENOTCONN;
1255 if (!sk)
1256 goto out;
1257 err = 0;
1258 } else {
1259 sock_hold(sk);
1260 }
1261
1262 u = unix_sk(sk);
1c92b4e5 1263 unix_state_lock(sk);
1da177e4
LT
1264 if (!u->addr) {
1265 sunaddr->sun_family = AF_UNIX;
1266 sunaddr->sun_path[0] = 0;
1267 *uaddr_len = sizeof(short);
1268 } else {
1269 struct unix_address *addr = u->addr;
1270
1271 *uaddr_len = addr->len;
1272 memcpy(sunaddr, addr->name, *uaddr_len);
1273 }
1c92b4e5 1274 unix_state_unlock(sk);
1da177e4
LT
1275 sock_put(sk);
1276out:
1277 return err;
1278}
1279
1280static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1281{
1282 int i;
1283
1284 scm->fp = UNIXCB(skb).fp;
1285 skb->destructor = sock_wfree;
1286 UNIXCB(skb).fp = NULL;
1287
1288 for (i=scm->fp->count-1; i>=0; i--)
1289 unix_notinflight(scm->fp->fp[i]);
1290}
1291
1292static void unix_destruct_fds(struct sk_buff *skb)
1293{
1294 struct scm_cookie scm;
1295 memset(&scm, 0, sizeof(scm));
1296 unix_detach_fds(&scm, skb);
1297
1298 /* Alas, it calls VFS */
1299 /* So fscking what? fput() had been SMP-safe since the last Summer */
1300 scm_destroy(&scm);
1301 sock_wfree(skb);
1302}
1303
6209344f 1304static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1da177e4
LT
1305{
1306 int i;
6209344f
MS
1307
1308 /*
1309 * Need to duplicate file references for the sake of garbage
1310 * collection. Otherwise a socket in the fps might become a
1311 * candidate for GC while the skb is not yet queued.
1312 */
1313 UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1314 if (!UNIXCB(skb).fp)
1315 return -ENOMEM;
1316
1da177e4
LT
1317 for (i=scm->fp->count-1; i>=0; i--)
1318 unix_inflight(scm->fp->fp[i]);
1da177e4 1319 skb->destructor = unix_destruct_fds;
6209344f 1320 return 0;
1da177e4
LT
1321}
1322
1323/*
1324 * Send AF_UNIX data.
1325 */
1326
1327static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1328 struct msghdr *msg, size_t len)
1329{
1330 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1331 struct sock *sk = sock->sk;
3b1e0a65 1332 struct net *net = sock_net(sk);
1da177e4
LT
1333 struct unix_sock *u = unix_sk(sk);
1334 struct sockaddr_un *sunaddr=msg->msg_name;
1335 struct sock *other = NULL;
1336 int namelen = 0; /* fake GCC */
1337 int err;
1338 unsigned hash;
1339 struct sk_buff *skb;
1340 long timeo;
1341 struct scm_cookie tmp_scm;
1342
1343 if (NULL == siocb->scm)
1344 siocb->scm = &tmp_scm;
5f23b734 1345 wait_for_unix_gc();
1da177e4
LT
1346 err = scm_send(sock, msg, siocb->scm);
1347 if (err < 0)
1348 return err;
1349
1350 err = -EOPNOTSUPP;
1351 if (msg->msg_flags&MSG_OOB)
1352 goto out;
1353
1354 if (msg->msg_namelen) {
1355 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1356 if (err < 0)
1357 goto out;
1358 namelen = err;
1359 } else {
1360 sunaddr = NULL;
1361 err = -ENOTCONN;
1362 other = unix_peer_get(sk);
1363 if (!other)
1364 goto out;
1365 }
1366
1367 if (test_bit(SOCK_PASSCRED, &sock->flags)
1368 && !u->addr && (err = unix_autobind(sock)) != 0)
1369 goto out;
1370
1371 err = -EMSGSIZE;
1372 if (len > sk->sk_sndbuf - 32)
1373 goto out;
1374
1375 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1376 if (skb==NULL)
1377 goto out;
1378
1379 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
6209344f
MS
1380 if (siocb->scm->fp) {
1381 err = unix_attach_fds(siocb->scm, skb);
1382 if (err)
1383 goto out_free;
1384 }
dc49c1f9 1385 unix_get_secdata(siocb->scm, skb);
877ce7c1 1386
badff6d0 1387 skb_reset_transport_header(skb);
1da177e4
LT
1388 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1389 if (err)
1390 goto out_free;
1391
1392 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1393
1394restart:
1395 if (!other) {
1396 err = -ECONNRESET;
1397 if (sunaddr == NULL)
1398 goto out_free;
1399
097e66c5 1400 other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
1da177e4
LT
1401 hash, &err);
1402 if (other==NULL)
1403 goto out_free;
1404 }
1405
1c92b4e5 1406 unix_state_lock(other);
1da177e4
LT
1407 err = -EPERM;
1408 if (!unix_may_send(sk, other))
1409 goto out_unlock;
1410
1411 if (sock_flag(other, SOCK_DEAD)) {
1412 /*
1413 * Check with 1003.1g - what should
1414 * datagram error
1415 */
1c92b4e5 1416 unix_state_unlock(other);
1da177e4
LT
1417 sock_put(other);
1418
1419 err = 0;
1c92b4e5 1420 unix_state_lock(sk);
1da177e4
LT
1421 if (unix_peer(sk) == other) {
1422 unix_peer(sk)=NULL;
1c92b4e5 1423 unix_state_unlock(sk);
1da177e4
LT
1424
1425 unix_dgram_disconnected(sk, other);
1426 sock_put(other);
1427 err = -ECONNREFUSED;
1428 } else {
1c92b4e5 1429 unix_state_unlock(sk);
1da177e4
LT
1430 }
1431
1432 other = NULL;
1433 if (err)
1434 goto out_free;
1435 goto restart;
1436 }
1437
1438 err = -EPIPE;
1439 if (other->sk_shutdown & RCV_SHUTDOWN)
1440 goto out_unlock;
1441
1442 if (sk->sk_type != SOCK_SEQPACKET) {
1443 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1444 if (err)
1445 goto out_unlock;
1446 }
1447
3c73419c 1448 if (unix_peer(other) != sk && unix_recvq_full(other)) {
1da177e4
LT
1449 if (!timeo) {
1450 err = -EAGAIN;
1451 goto out_unlock;
1452 }
1453
1454 timeo = unix_wait_for_peer(other, timeo);
1455
1456 err = sock_intr_errno(timeo);
1457 if (signal_pending(current))
1458 goto out_free;
1459
1460 goto restart;
1461 }
1462
1463 skb_queue_tail(&other->sk_receive_queue, skb);
1c92b4e5 1464 unix_state_unlock(other);
1da177e4
LT
1465 other->sk_data_ready(other, len);
1466 sock_put(other);
1467 scm_destroy(siocb->scm);
1468 return len;
1469
1470out_unlock:
1c92b4e5 1471 unix_state_unlock(other);
1da177e4
LT
1472out_free:
1473 kfree_skb(skb);
1474out:
1475 if (other)
1476 sock_put(other);
1477 scm_destroy(siocb->scm);
1478 return err;
1479}
1480
ac7bfa62 1481
1da177e4
LT
1482static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1483 struct msghdr *msg, size_t len)
1484{
1485 struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1486 struct sock *sk = sock->sk;
1487 struct sock *other = NULL;
1488 struct sockaddr_un *sunaddr=msg->msg_name;
1489 int err,size;
1490 struct sk_buff *skb;
1491 int sent=0;
1492 struct scm_cookie tmp_scm;
1493
1494 if (NULL == siocb->scm)
1495 siocb->scm = &tmp_scm;
5f23b734 1496 wait_for_unix_gc();
1da177e4
LT
1497 err = scm_send(sock, msg, siocb->scm);
1498 if (err < 0)
1499 return err;
1500
1501 err = -EOPNOTSUPP;
1502 if (msg->msg_flags&MSG_OOB)
1503 goto out_err;
1504
1505 if (msg->msg_namelen) {
1506 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1507 goto out_err;
1508 } else {
1509 sunaddr = NULL;
1510 err = -ENOTCONN;
830a1e5c 1511 other = unix_peer(sk);
1da177e4
LT
1512 if (!other)
1513 goto out_err;
1514 }
1515
1516 if (sk->sk_shutdown & SEND_SHUTDOWN)
1517 goto pipe_err;
1518
1519 while(sent < len)
1520 {
1521 /*
e9df7d7f
BL
1522 * Optimisation for the fact that under 0.01% of X
1523 * messages typically need breaking up.
1da177e4
LT
1524 */
1525
e9df7d7f 1526 size = len-sent;
1da177e4
LT
1527
1528 /* Keep two messages in the pipe so it schedules better */
e9df7d7f
BL
1529 if (size > ((sk->sk_sndbuf >> 1) - 64))
1530 size = (sk->sk_sndbuf >> 1) - 64;
1da177e4
LT
1531
1532 if (size > SKB_MAX_ALLOC)
1533 size = SKB_MAX_ALLOC;
ac7bfa62 1534
1da177e4
LT
1535 /*
1536 * Grab a buffer
1537 */
ac7bfa62 1538
1da177e4
LT
1539 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1540
1541 if (skb==NULL)
1542 goto out_err;
1543
1544 /*
1545 * If you pass two values to the sock_alloc_send_skb
1546 * it tries to grab the large buffer with GFP_NOFS
1547 * (which can fail easily), and if it fails grab the
1548 * fallback size buffer which is under a page and will
1549 * succeed. [Alan]
1550 */
1551 size = min_t(int, size, skb_tailroom(skb));
1552
1553 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
6209344f
MS
1554 if (siocb->scm->fp) {
1555 err = unix_attach_fds(siocb->scm, skb);
1556 if (err) {
1557 kfree_skb(skb);
1558 goto out_err;
1559 }
1560 }
1da177e4
LT
1561
1562 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1563 kfree_skb(skb);
1564 goto out_err;
1565 }
1566
1c92b4e5 1567 unix_state_lock(other);
1da177e4
LT
1568
1569 if (sock_flag(other, SOCK_DEAD) ||
1570 (other->sk_shutdown & RCV_SHUTDOWN))
1571 goto pipe_err_free;
1572
1573 skb_queue_tail(&other->sk_receive_queue, skb);
1c92b4e5 1574 unix_state_unlock(other);
1da177e4
LT
1575 other->sk_data_ready(other, size);
1576 sent+=size;
1577 }
1da177e4
LT
1578
1579 scm_destroy(siocb->scm);
1580 siocb->scm = NULL;
1581
1582 return sent;
1583
1584pipe_err_free:
1c92b4e5 1585 unix_state_unlock(other);
1da177e4
LT
1586 kfree_skb(skb);
1587pipe_err:
1588 if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1589 send_sig(SIGPIPE,current,0);
1590 err = -EPIPE;
1591out_err:
1da177e4
LT
1592 scm_destroy(siocb->scm);
1593 siocb->scm = NULL;
1594 return sent ? : err;
1595}
1596
1597static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1598 struct msghdr *msg, size_t len)
1599{
1600 int err;
1601 struct sock *sk = sock->sk;
ac7bfa62 1602
1da177e4
LT
1603 err = sock_error(sk);
1604 if (err)
1605 return err;
1606
1607 if (sk->sk_state != TCP_ESTABLISHED)
1608 return -ENOTCONN;
1609
1610 if (msg->msg_namelen)
1611 msg->msg_namelen = 0;
1612
1613 return unix_dgram_sendmsg(kiocb, sock, msg, len);
1614}
ac7bfa62 1615
1da177e4
LT
1616static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1617{
1618 struct unix_sock *u = unix_sk(sk);
1619
1620 msg->msg_namelen = 0;
1621 if (u->addr) {
1622 msg->msg_namelen = u->addr->len;
1623 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1624 }
1625}
1626
1627static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1628 struct msghdr *msg, size_t size,
1629 int flags)
1630{
1631 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1632 struct scm_cookie tmp_scm;
1633 struct sock *sk = sock->sk;
1634 struct unix_sock *u = unix_sk(sk);
1635 int noblock = flags & MSG_DONTWAIT;
1636 struct sk_buff *skb;
1637 int err;
1638
1639 err = -EOPNOTSUPP;
1640 if (flags&MSG_OOB)
1641 goto out;
1642
1643 msg->msg_namelen = 0;
1644
57b47a53 1645 mutex_lock(&u->readlock);
1da177e4
LT
1646
1647 skb = skb_recv_datagram(sk, flags, noblock, &err);
0a112258
FZ
1648 if (!skb) {
1649 unix_state_lock(sk);
1650 /* Signal EOF on disconnected non-blocking SEQPACKET socket. */
1651 if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
1652 (sk->sk_shutdown & RCV_SHUTDOWN))
1653 err = 0;
1654 unix_state_unlock(sk);
1da177e4 1655 goto out_unlock;
0a112258 1656 }
1da177e4 1657
71e20f18 1658 wake_up_interruptible_sync(&u->peer_wait);
1da177e4
LT
1659
1660 if (msg->msg_name)
1661 unix_copy_addr(msg, skb->sk);
1662
1663 if (size > skb->len)
1664 size = skb->len;
1665 else if (size < skb->len)
1666 msg->msg_flags |= MSG_TRUNC;
1667
1668 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1669 if (err)
1670 goto out_free;
1671
1672 if (!siocb->scm) {
1673 siocb->scm = &tmp_scm;
1674 memset(&tmp_scm, 0, sizeof(tmp_scm));
1675 }
1676 siocb->scm->creds = *UNIXCREDS(skb);
877ce7c1 1677 unix_set_secdata(siocb->scm, skb);
1da177e4
LT
1678
1679 if (!(flags & MSG_PEEK))
1680 {
1681 if (UNIXCB(skb).fp)
1682 unix_detach_fds(siocb->scm, skb);
1683 }
ac7bfa62 1684 else
1da177e4
LT
1685 {
1686 /* It is questionable: on PEEK we could:
1687 - do not return fds - good, but too simple 8)
1688 - return fds, and do not return them on read (old strategy,
1689 apparently wrong)
1690 - clone fds (I chose it for now, it is the most universal
1691 solution)
ac7bfa62
YH
1692
1693 POSIX 1003.1g does not actually define this clearly
1694 at all. POSIX 1003.1g doesn't define a lot of things
1695 clearly however!
1696
1da177e4
LT
1697 */
1698 if (UNIXCB(skb).fp)
1699 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1700 }
1701 err = size;
1702
1703 scm_recv(sock, msg, siocb->scm, flags);
1704
1705out_free:
1706 skb_free_datagram(sk,skb);
1707out_unlock:
57b47a53 1708 mutex_unlock(&u->readlock);
1da177e4
LT
1709out:
1710 return err;
1711}
1712
1713/*
1714 * Sleep until data has arrive. But check for races..
1715 */
ac7bfa62 1716
1da177e4
LT
1717static long unix_stream_data_wait(struct sock * sk, long timeo)
1718{
1719 DEFINE_WAIT(wait);
1720
1c92b4e5 1721 unix_state_lock(sk);
1da177e4
LT
1722
1723 for (;;) {
1724 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1725
b03efcfb 1726 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1da177e4
LT
1727 sk->sk_err ||
1728 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1729 signal_pending(current) ||
1730 !timeo)
1731 break;
1732
1733 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1c92b4e5 1734 unix_state_unlock(sk);
1da177e4 1735 timeo = schedule_timeout(timeo);
1c92b4e5 1736 unix_state_lock(sk);
1da177e4
LT
1737 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1738 }
1739
1740 finish_wait(sk->sk_sleep, &wait);
1c92b4e5 1741 unix_state_unlock(sk);
1da177e4
LT
1742 return timeo;
1743}
1744
1745
1746
1747static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1748 struct msghdr *msg, size_t size,
1749 int flags)
1750{
1751 struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1752 struct scm_cookie tmp_scm;
1753 struct sock *sk = sock->sk;
1754 struct unix_sock *u = unix_sk(sk);
1755 struct sockaddr_un *sunaddr=msg->msg_name;
1756 int copied = 0;
1757 int check_creds = 0;
1758 int target;
1759 int err = 0;
1760 long timeo;
1761
1762 err = -EINVAL;
1763 if (sk->sk_state != TCP_ESTABLISHED)
1764 goto out;
1765
1766 err = -EOPNOTSUPP;
1767 if (flags&MSG_OOB)
1768 goto out;
1769
1770 target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1771 timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1772
1773 msg->msg_namelen = 0;
1774
1775 /* Lock the socket to prevent queue disordering
1776 * while sleeps in memcpy_tomsg
1777 */
1778
1779 if (!siocb->scm) {
1780 siocb->scm = &tmp_scm;
1781 memset(&tmp_scm, 0, sizeof(tmp_scm));
1782 }
1783
57b47a53 1784 mutex_lock(&u->readlock);
1da177e4
LT
1785
1786 do
1787 {
1788 int chunk;
1789 struct sk_buff *skb;
1790
3c0d2f37 1791 unix_state_lock(sk);
1da177e4
LT
1792 skb = skb_dequeue(&sk->sk_receive_queue);
1793 if (skb==NULL)
1794 {
1795 if (copied >= target)
3c0d2f37 1796 goto unlock;
1da177e4
LT
1797
1798 /*
1799 * POSIX 1003.1g mandates this order.
1800 */
ac7bfa62 1801
1da177e4 1802 if ((err = sock_error(sk)) != 0)
3c0d2f37 1803 goto unlock;
1da177e4 1804 if (sk->sk_shutdown & RCV_SHUTDOWN)
3c0d2f37
MS
1805 goto unlock;
1806
1807 unix_state_unlock(sk);
1da177e4
LT
1808 err = -EAGAIN;
1809 if (!timeo)
1810 break;
57b47a53 1811 mutex_unlock(&u->readlock);
1da177e4
LT
1812
1813 timeo = unix_stream_data_wait(sk, timeo);
1814
1815 if (signal_pending(current)) {
1816 err = sock_intr_errno(timeo);
1817 goto out;
1818 }
57b47a53 1819 mutex_lock(&u->readlock);
1da177e4 1820 continue;
3c0d2f37
MS
1821 unlock:
1822 unix_state_unlock(sk);
1823 break;
1da177e4 1824 }
3c0d2f37 1825 unix_state_unlock(sk);
1da177e4
LT
1826
1827 if (check_creds) {
1828 /* Never glue messages from different writers */
1829 if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1830 skb_queue_head(&sk->sk_receive_queue, skb);
1831 break;
1832 }
1833 } else {
1834 /* Copy credentials */
1835 siocb->scm->creds = *UNIXCREDS(skb);
1836 check_creds = 1;
1837 }
1838
1839 /* Copy address just once */
1840 if (sunaddr)
1841 {
1842 unix_copy_addr(msg, skb->sk);
1843 sunaddr = NULL;
1844 }
1845
1846 chunk = min_t(unsigned int, skb->len, size);
1847 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1848 skb_queue_head(&sk->sk_receive_queue, skb);
1849 if (copied == 0)
1850 copied = -EFAULT;
1851 break;
1852 }
1853 copied += chunk;
1854 size -= chunk;
1855
1856 /* Mark read part of skb as used */
1857 if (!(flags & MSG_PEEK))
1858 {
1859 skb_pull(skb, chunk);
1860
1861 if (UNIXCB(skb).fp)
1862 unix_detach_fds(siocb->scm, skb);
1863
1864 /* put the skb back if we didn't use it up.. */
1865 if (skb->len)
1866 {
1867 skb_queue_head(&sk->sk_receive_queue, skb);
1868 break;
1869 }
1870
1871 kfree_skb(skb);
1872
1873 if (siocb->scm->fp)
1874 break;
1875 }
1876 else
1877 {
1878 /* It is questionable, see note in unix_dgram_recvmsg.
1879 */
1880 if (UNIXCB(skb).fp)
1881 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1882
1883 /* put message back and return */
1884 skb_queue_head(&sk->sk_receive_queue, skb);
1885 break;
1886 }
1887 } while (size);
1888
57b47a53 1889 mutex_unlock(&u->readlock);
1da177e4
LT
1890 scm_recv(sock, msg, siocb->scm, flags);
1891out:
1892 return copied ? : err;
1893}
1894
1895static int unix_shutdown(struct socket *sock, int mode)
1896{
1897 struct sock *sk = sock->sk;
1898 struct sock *other;
1899
1900 mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1901
1902 if (mode) {
1c92b4e5 1903 unix_state_lock(sk);
1da177e4
LT
1904 sk->sk_shutdown |= mode;
1905 other=unix_peer(sk);
1906 if (other)
1907 sock_hold(other);
1c92b4e5 1908 unix_state_unlock(sk);
1da177e4
LT
1909 sk->sk_state_change(sk);
1910
1911 if (other &&
1912 (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1913
1914 int peer_mode = 0;
1915
1916 if (mode&RCV_SHUTDOWN)
1917 peer_mode |= SEND_SHUTDOWN;
1918 if (mode&SEND_SHUTDOWN)
1919 peer_mode |= RCV_SHUTDOWN;
1c92b4e5 1920 unix_state_lock(other);
1da177e4 1921 other->sk_shutdown |= peer_mode;
1c92b4e5 1922 unix_state_unlock(other);
1da177e4
LT
1923 other->sk_state_change(other);
1924 read_lock(&other->sk_callback_lock);
1925 if (peer_mode == SHUTDOWN_MASK)
8d8ad9d7 1926 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
1da177e4 1927 else if (peer_mode & RCV_SHUTDOWN)
8d8ad9d7 1928 sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
1da177e4
LT
1929 read_unlock(&other->sk_callback_lock);
1930 }
1931 if (other)
1932 sock_put(other);
1933 }
1934 return 0;
1935}
1936
1937static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1938{
1939 struct sock *sk = sock->sk;
1940 long amount=0;
1941 int err;
1942
1943 switch(cmd)
1944 {
1945 case SIOCOUTQ:
1946 amount = atomic_read(&sk->sk_wmem_alloc);
1947 err = put_user(amount, (int __user *)arg);
1948 break;
1949 case SIOCINQ:
1950 {
1951 struct sk_buff *skb;
1952
1953 if (sk->sk_state == TCP_LISTEN) {
1954 err = -EINVAL;
1955 break;
1956 }
1957
1958 spin_lock(&sk->sk_receive_queue.lock);
1959 if (sk->sk_type == SOCK_STREAM ||
1960 sk->sk_type == SOCK_SEQPACKET) {
1961 skb_queue_walk(&sk->sk_receive_queue, skb)
1962 amount += skb->len;
1963 } else {
1964 skb = skb_peek(&sk->sk_receive_queue);
1965 if (skb)
1966 amount=skb->len;
1967 }
1968 spin_unlock(&sk->sk_receive_queue.lock);
1969 err = put_user(amount, (int __user *)arg);
1970 break;
1971 }
1972
1973 default:
b5e5fa5e 1974 err = -ENOIOCTLCMD;
1da177e4
LT
1975 break;
1976 }
1977 return err;
1978}
1979
1980static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1981{
1982 struct sock *sk = sock->sk;
1983 unsigned int mask;
1984
1985 poll_wait(file, sk->sk_sleep, wait);
1986 mask = 0;
1987
1988 /* exceptional events? */
1989 if (sk->sk_err)
1990 mask |= POLLERR;
1991 if (sk->sk_shutdown == SHUTDOWN_MASK)
1992 mask |= POLLHUP;
f348d70a
DL
1993 if (sk->sk_shutdown & RCV_SHUTDOWN)
1994 mask |= POLLRDHUP;
1da177e4
LT
1995
1996 /* readable? */
1997 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1998 (sk->sk_shutdown & RCV_SHUTDOWN))
1999 mask |= POLLIN | POLLRDNORM;
2000
2001 /* Connection-based need to check for termination and startup */
2002 if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
2003 mask |= POLLHUP;
2004
2005 /*
2006 * we set writable also when the other side has shut down the
2007 * connection. This prevents stuck sockets.
2008 */
2009 if (unix_writable(sk))
2010 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2011
2012 return mask;
2013}
2014
ec0d215f
RW
2015static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2016 poll_table *wait)
3c73419c 2017{
ec0d215f
RW
2018 struct sock *sk = sock->sk, *other;
2019 unsigned int mask, writable;
3c73419c
RW
2020
2021 poll_wait(file, sk->sk_sleep, wait);
3c73419c
RW
2022 mask = 0;
2023
2024 /* exceptional events? */
2025 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2026 mask |= POLLERR;
2027 if (sk->sk_shutdown & RCV_SHUTDOWN)
2028 mask |= POLLRDHUP;
2029 if (sk->sk_shutdown == SHUTDOWN_MASK)
2030 mask |= POLLHUP;
2031
2032 /* readable? */
2033 if (!skb_queue_empty(&sk->sk_receive_queue) ||
2034 (sk->sk_shutdown & RCV_SHUTDOWN))
2035 mask |= POLLIN | POLLRDNORM;
2036
2037 /* Connection-based need to check for termination and startup */
2038 if (sk->sk_type == SOCK_SEQPACKET) {
2039 if (sk->sk_state == TCP_CLOSE)
2040 mask |= POLLHUP;
2041 /* connection hasn't started yet? */
2042 if (sk->sk_state == TCP_SYN_SENT)
2043 return mask;
2044 }
2045
2046 /* writable? */
ec0d215f
RW
2047 writable = unix_writable(sk);
2048 if (writable) {
2049 other = unix_peer_get(sk);
2050 if (other) {
2051 if (unix_peer(other) != sk) {
2052 poll_wait(file, &unix_sk(other)->peer_wait,
2053 wait);
2054 if (unix_recvq_full(other))
2055 writable = 0;
2056 }
2057
2058 sock_put(other);
2059 }
2060 }
2061
2062 if (writable)
3c73419c
RW
2063 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
2064 else
2065 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
2066
3c73419c
RW
2067 return mask;
2068}
1da177e4
LT
2069
2070#ifdef CONFIG_PROC_FS
a53eb3fe
PE
2071static struct sock *first_unix_socket(int *i)
2072{
2073 for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
2074 if (!hlist_empty(&unix_socket_table[*i]))
2075 return __sk_head(&unix_socket_table[*i]);
2076 }
2077 return NULL;
2078}
2079
2080static struct sock *next_unix_socket(int *i, struct sock *s)
2081{
2082 struct sock *next = sk_next(s);
2083 /* More in this chain? */
2084 if (next)
2085 return next;
2086 /* Look for next non-empty chain. */
2087 for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
2088 if (!hlist_empty(&unix_socket_table[*i]))
2089 return __sk_head(&unix_socket_table[*i]);
2090 }
2091 return NULL;
2092}
2093
097e66c5 2094struct unix_iter_state {
e372c414 2095 struct seq_net_private p;
097e66c5
DL
2096 int i;
2097};
1218854a 2098static struct sock *unix_seq_idx(struct seq_file *seq, loff_t pos)
1da177e4 2099{
1218854a 2100 struct unix_iter_state *iter = seq->private;
1da177e4
LT
2101 loff_t off = 0;
2102 struct sock *s;
2103
097e66c5 2104 for (s = first_unix_socket(&iter->i); s; s = next_unix_socket(&iter->i, s)) {
1218854a 2105 if (sock_net(s) != seq_file_net(seq))
097e66c5 2106 continue;
ac7bfa62 2107 if (off == pos)
1da177e4
LT
2108 return s;
2109 ++off;
2110 }
2111 return NULL;
2112}
2113
2114
2115static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
9a429c49 2116 __acquires(unix_table_lock)
1da177e4 2117{
fbe9cc4a 2118 spin_lock(&unix_table_lock);
b9f3124f 2119 return *pos ? unix_seq_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1da177e4
LT
2120}
2121
2122static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2123{
097e66c5
DL
2124 struct unix_iter_state *iter = seq->private;
2125 struct sock *sk = v;
1da177e4
LT
2126 ++*pos;
2127
b9f3124f 2128 if (v == SEQ_START_TOKEN)
097e66c5
DL
2129 sk = first_unix_socket(&iter->i);
2130 else
2131 sk = next_unix_socket(&iter->i, sk);
1218854a 2132 while (sk && (sock_net(sk) != seq_file_net(seq)))
097e66c5
DL
2133 sk = next_unix_socket(&iter->i, sk);
2134 return sk;
1da177e4
LT
2135}
2136
2137static void unix_seq_stop(struct seq_file *seq, void *v)
9a429c49 2138 __releases(unix_table_lock)
1da177e4 2139{
fbe9cc4a 2140 spin_unlock(&unix_table_lock);
1da177e4
LT
2141}
2142
2143static int unix_seq_show(struct seq_file *seq, void *v)
2144{
ac7bfa62 2145
b9f3124f 2146 if (v == SEQ_START_TOKEN)
1da177e4
LT
2147 seq_puts(seq, "Num RefCount Protocol Flags Type St "
2148 "Inode Path\n");
2149 else {
2150 struct sock *s = v;
2151 struct unix_sock *u = unix_sk(s);
1c92b4e5 2152 unix_state_lock(s);
1da177e4
LT
2153
2154 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2155 s,
2156 atomic_read(&s->sk_refcnt),
2157 0,
2158 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2159 s->sk_type,
2160 s->sk_socket ?
2161 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2162 (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2163 sock_i_ino(s));
2164
2165 if (u->addr) {
2166 int i, len;
2167 seq_putc(seq, ' ');
2168
2169 i = 0;
2170 len = u->addr->len - sizeof(short);
2171 if (!UNIX_ABSTRACT(s))
2172 len--;
2173 else {
2174 seq_putc(seq, '@');
2175 i++;
2176 }
2177 for ( ; i < len; i++)
2178 seq_putc(seq, u->addr->name->sun_path[i]);
2179 }
1c92b4e5 2180 unix_state_unlock(s);
1da177e4
LT
2181 seq_putc(seq, '\n');
2182 }
2183
2184 return 0;
2185}
2186
56b3d975 2187static const struct seq_operations unix_seq_ops = {
1da177e4
LT
2188 .start = unix_seq_start,
2189 .next = unix_seq_next,
2190 .stop = unix_seq_stop,
2191 .show = unix_seq_show,
2192};
2193
2194
2195static int unix_seq_open(struct inode *inode, struct file *file)
2196{
e372c414
DL
2197 return seq_open_net(inode, file, &unix_seq_ops,
2198 sizeof(struct unix_iter_state));
1da177e4
LT
2199}
2200
da7071d7 2201static const struct file_operations unix_seq_fops = {
1da177e4
LT
2202 .owner = THIS_MODULE,
2203 .open = unix_seq_open,
2204 .read = seq_read,
2205 .llseek = seq_lseek,
e372c414 2206 .release = seq_release_net,
1da177e4
LT
2207};
2208
2209#endif
2210
2211static struct net_proto_family unix_family_ops = {
2212 .family = PF_UNIX,
2213 .create = unix_create,
2214 .owner = THIS_MODULE,
2215};
2216
097e66c5
DL
2217
2218static int unix_net_init(struct net *net)
2219{
2220 int error = -ENOMEM;
2221
a0a53c8b 2222 net->unx.sysctl_max_dgram_qlen = 10;
1597fbc0
PE
2223 if (unix_sysctl_register(net))
2224 goto out;
d392e497 2225
097e66c5 2226#ifdef CONFIG_PROC_FS
1597fbc0
PE
2227 if (!proc_net_fops_create(net, "unix", 0, &unix_seq_fops)) {
2228 unix_sysctl_unregister(net);
097e66c5 2229 goto out;
1597fbc0 2230 }
097e66c5
DL
2231#endif
2232 error = 0;
2233out:
48dcc33e 2234 return error;
097e66c5
DL
2235}
2236
2237static void unix_net_exit(struct net *net)
2238{
1597fbc0 2239 unix_sysctl_unregister(net);
097e66c5
DL
2240 proc_net_remove(net, "unix");
2241}
2242
2243static struct pernet_operations unix_net_ops = {
2244 .init = unix_net_init,
2245 .exit = unix_net_exit,
2246};
2247
1da177e4
LT
2248static int __init af_unix_init(void)
2249{
2250 int rc = -1;
2251 struct sk_buff *dummy_skb;
2252
ef047f5e 2253 BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
1da177e4
LT
2254
2255 rc = proto_register(&unix_proto, 1);
ac7bfa62
YH
2256 if (rc != 0) {
2257 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
0dc47877 2258 __func__);
1da177e4
LT
2259 goto out;
2260 }
2261
2262 sock_register(&unix_family_ops);
097e66c5 2263 register_pernet_subsys(&unix_net_ops);
1da177e4
LT
2264out:
2265 return rc;
2266}
2267
2268static void __exit af_unix_exit(void)
2269{
2270 sock_unregister(PF_UNIX);
1da177e4 2271 proto_unregister(&unix_proto);
097e66c5 2272 unregister_pernet_subsys(&unix_net_ops);
1da177e4
LT
2273}
2274
3d366960
DW
2275/* Earlier than device_initcall() so that other drivers invoking
2276 request_module() don't end up in a loop when modprobe tries
2277 to use a UNIX socket. But later than subsys_initcall() because
2278 we depend on stuff initialised there */
2279fs_initcall(af_unix_init);
1da177e4
LT
2280module_exit(af_unix_exit);
2281
2282MODULE_LICENSE("GPL");
2283MODULE_ALIAS_NETPROTO(PF_UNIX);