Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6-block.git] / net / unix / af_unix.c
1 /*
2  * NET4:        Implementation of BSD Unix domain sockets.
3  *
4  * Authors:     Alan Cox, <alan.cox@linux.org>
5  *
6  *              This program is free software; you can redistribute it and/or
7  *              modify it under the terms of the GNU General Public License
8  *              as published by the Free Software Foundation; either version
9  *              2 of the License, or (at your option) any later version.
10  *
11  * Version:     $Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
12  *
13  * Fixes:
14  *              Linus Torvalds  :       Assorted bug cures.
15  *              Niibe Yutaka    :       async I/O support.
16  *              Carsten Paeth   :       PF_UNIX check, address fixes.
17  *              Alan Cox        :       Limit size of allocated blocks.
18  *              Alan Cox        :       Fixed the stupid socketpair bug.
19  *              Alan Cox        :       BSD compatibility fine tuning.
20  *              Alan Cox        :       Fixed a bug in connect when interrupted.
21  *              Alan Cox        :       Sorted out a proper draft version of
22  *                                      file descriptor passing hacked up from
23  *                                      Mike Shaver's work.
24  *              Marty Leisner   :       Fixes to fd passing
25  *              Nick Nevin      :       recvmsg bugfix.
26  *              Alan Cox        :       Started proper garbage collector
27  *              Heiko EiBfeldt  :       Missing verify_area check
28  *              Alan Cox        :       Started POSIXisms
29  *              Andreas Schwab  :       Replace inode by dentry for proper
30  *                                      reference counting
31  *              Kirk Petersen   :       Made this a module
32  *          Christoph Rohland   :       Elegant non-blocking accept/connect algorithm.
33  *                                      Lots of bug fixes.
34  *           Alexey Kuznetosv   :       Repaired (I hope) bugs introduces
35  *                                      by above two patches.
36  *           Andrea Arcangeli   :       If possible we block in connect(2)
37  *                                      if the max backlog of the listen socket
38  *                                      is been reached. This won't break
39  *                                      old apps and it will avoid huge amount
40  *                                      of socks hashed (this for unix_gc()
41  *                                      performances reasons).
42  *                                      Security fix that limits the max
43  *                                      number of socks to 2*max_files and
44  *                                      the number of skb queueable in the
45  *                                      dgram receiver.
46  *              Artur Skawina   :       Hash function optimizations
47  *           Alexey Kuznetsov   :       Full scale SMP. Lot of bugs are introduced 8)
48  *            Malcolm Beattie   :       Set peercred for socketpair
49  *           Michal Ostrowski   :       Module initialization cleanup.
50  *           Arnaldo C. Melo    :       Remove MOD_{INC,DEC}_USE_COUNT,
51  *                                      the core infrastructure is doing that
52  *                                      for all net proto families now (2.5.69+)
53  *
54  *
55  * Known differences from reference BSD that was tested:
56  *
57  *      [TO FIX]
58  *      ECONNREFUSED is not returned from one end of a connected() socket to the
59  *              other the moment one end closes.
60  *      fstat() doesn't return st_dev=0, and give the blksize as high water mark
61  *              and a fake inode identifier (nor the BSD first socket fstat twice bug).
62  *      [NOT TO FIX]
63  *      accept() returns a path name even if the connecting socket has closed
64  *              in the meantime (BSD loses the path and gives up).
65  *      accept() returns 0 length path for an unbound connector. BSD returns 16
66  *              and a null first byte in the path (but not for gethost/peername - BSD bug ??)
67  *      socketpair(...SOCK_RAW..) doesn't panic the kernel.
68  *      BSD af_unix apparently has connect forgetting to block properly.
69  *              (need to check this with the POSIX spec in detail)
70  *
71  * Differences from 2.0.0-11-... (ANK)
72  *      Bug fixes and improvements.
73  *              - client shutdown killed server socket.
74  *              - removed all useless cli/sti pairs.
75  *
76  *      Semantic changes/extensions.
77  *              - generic control message passing.
78  *              - SCM_CREDENTIALS control message.
79  *              - "Abstract" (not FS based) socket bindings.
80  *                Abstract names are sequences of bytes (not zero terminated)
81  *                started by 0, so that this name space does not intersect
82  *                with BSD names.
83  */
84
85 #include <linux/module.h>
86 #include <linux/kernel.h>
87 #include <linux/signal.h>
88 #include <linux/sched.h>
89 #include <linux/errno.h>
90 #include <linux/string.h>
91 #include <linux/stat.h>
92 #include <linux/dcache.h>
93 #include <linux/namei.h>
94 #include <linux/socket.h>
95 #include <linux/un.h>
96 #include <linux/fcntl.h>
97 #include <linux/termios.h>
98 #include <linux/sockios.h>
99 #include <linux/net.h>
100 #include <linux/in.h>
101 #include <linux/fs.h>
102 #include <linux/slab.h>
103 #include <asm/uaccess.h>
104 #include <linux/skbuff.h>
105 #include <linux/netdevice.h>
106 #include <net/sock.h>
107 #include <net/tcp_states.h>
108 #include <net/af_unix.h>
109 #include <linux/proc_fs.h>
110 #include <linux/seq_file.h>
111 #include <net/scm.h>
112 #include <linux/init.h>
113 #include <linux/poll.h>
114 #include <linux/rtnetlink.h>
115 #include <linux/mount.h>
116 #include <net/checksum.h>
117 #include <linux/security.h>
118
119 int sysctl_unix_max_dgram_qlen __read_mostly = 10;
120
121 static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
122 static DEFINE_SPINLOCK(unix_table_lock);
123 static atomic_t unix_nr_socks = ATOMIC_INIT(0);
124
125 #define unix_sockets_unbound    (&unix_socket_table[UNIX_HASH_SIZE])
126
127 #define UNIX_ABSTRACT(sk)       (unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)
128
129 static struct sock *first_unix_socket(int *i)
130 {
131         for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
132                 if (!hlist_empty(&unix_socket_table[*i]))
133                         return __sk_head(&unix_socket_table[*i]);
134         }
135         return NULL;
136 }
137
138 static struct sock *next_unix_socket(int *i, struct sock *s)
139 {
140         struct sock *next = sk_next(s);
141         /* More in this chain? */
142         if (next)
143                 return next;
144         /* Look for next non-empty chain. */
145         for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
146                 if (!hlist_empty(&unix_socket_table[*i]))
147                         return __sk_head(&unix_socket_table[*i]);
148         }
149         return NULL;
150 }
151
152 #define forall_unix_sockets(i, s) \
153         for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))
154
155 #ifdef CONFIG_SECURITY_NETWORK
156 static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
157 {
158         memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
159 }
160
161 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
162 {
163         scm->secid = *UNIXSID(skb);
164 }
165 #else
166 static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
167 { }
168
169 static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
170 { }
171 #endif /* CONFIG_SECURITY_NETWORK */
172
173 /*
174  *  SMP locking strategy:
175  *    hash table is protected with spinlock unix_table_lock
176  *    each socket state is protected by separate rwlock.
177  */
178
179 static inline unsigned unix_hash_fold(__wsum n)
180 {
181         unsigned hash = (__force unsigned)n;
182         hash ^= hash>>16;
183         hash ^= hash>>8;
184         return hash&(UNIX_HASH_SIZE-1);
185 }
186
187 #define unix_peer(sk) (unix_sk(sk)->peer)
188
189 static inline int unix_our_peer(struct sock *sk, struct sock *osk)
190 {
191         return unix_peer(osk) == sk;
192 }
193
194 static inline int unix_may_send(struct sock *sk, struct sock *osk)
195 {
196         return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
197 }
198
199 static struct sock *unix_peer_get(struct sock *s)
200 {
201         struct sock *peer;
202
203         unix_state_lock(s);
204         peer = unix_peer(s);
205         if (peer)
206                 sock_hold(peer);
207         unix_state_unlock(s);
208         return peer;
209 }
210
211 static inline void unix_release_addr(struct unix_address *addr)
212 {
213         if (atomic_dec_and_test(&addr->refcnt))
214                 kfree(addr);
215 }
216
217 /*
218  *      Check unix socket name:
219  *              - should be not zero length.
220  *              - if started by not zero, should be NULL terminated (FS object)
221  *              - if started by zero, it is abstract name.
222  */
223
224 static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
225 {
226         if (len <= sizeof(short) || len > sizeof(*sunaddr))
227                 return -EINVAL;
228         if (!sunaddr || sunaddr->sun_family != AF_UNIX)
229                 return -EINVAL;
230         if (sunaddr->sun_path[0]) {
231                 /*
232                  * This may look like an off by one error but it is a bit more
233                  * subtle. 108 is the longest valid AF_UNIX path for a binding.
234                  * sun_path[108] doesnt as such exist.  However in kernel space
235                  * we are guaranteed that it is a valid memory location in our
236                  * kernel address buffer.
237                  */
238                 ((char *)sunaddr)[len]=0;
239                 len = strlen(sunaddr->sun_path)+1+sizeof(short);
240                 return len;
241         }
242
243         *hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
244         return len;
245 }
246
247 static void __unix_remove_socket(struct sock *sk)
248 {
249         sk_del_node_init(sk);
250 }
251
252 static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
253 {
254         BUG_TRAP(sk_unhashed(sk));
255         sk_add_node(sk, list);
256 }
257
258 static inline void unix_remove_socket(struct sock *sk)
259 {
260         spin_lock(&unix_table_lock);
261         __unix_remove_socket(sk);
262         spin_unlock(&unix_table_lock);
263 }
264
265 static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
266 {
267         spin_lock(&unix_table_lock);
268         __unix_insert_socket(list, sk);
269         spin_unlock(&unix_table_lock);
270 }
271
272 static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
273                                               int len, int type, unsigned hash)
274 {
275         struct sock *s;
276         struct hlist_node *node;
277
278         sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
279                 struct unix_sock *u = unix_sk(s);
280
281                 if (u->addr->len == len &&
282                     !memcmp(u->addr->name, sunname, len))
283                         goto found;
284         }
285         s = NULL;
286 found:
287         return s;
288 }
289
290 static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
291                                                    int len, int type,
292                                                    unsigned hash)
293 {
294         struct sock *s;
295
296         spin_lock(&unix_table_lock);
297         s = __unix_find_socket_byname(sunname, len, type, hash);
298         if (s)
299                 sock_hold(s);
300         spin_unlock(&unix_table_lock);
301         return s;
302 }
303
304 static struct sock *unix_find_socket_byinode(struct inode *i)
305 {
306         struct sock *s;
307         struct hlist_node *node;
308
309         spin_lock(&unix_table_lock);
310         sk_for_each(s, node,
311                     &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
312                 struct dentry *dentry = unix_sk(s)->dentry;
313
314                 if(dentry && dentry->d_inode == i)
315                 {
316                         sock_hold(s);
317                         goto found;
318                 }
319         }
320         s = NULL;
321 found:
322         spin_unlock(&unix_table_lock);
323         return s;
324 }
325
326 static inline int unix_writable(struct sock *sk)
327 {
328         return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
329 }
330
331 static void unix_write_space(struct sock *sk)
332 {
333         read_lock(&sk->sk_callback_lock);
334         if (unix_writable(sk)) {
335                 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
336                         wake_up_interruptible(sk->sk_sleep);
337                 sk_wake_async(sk, 2, POLL_OUT);
338         }
339         read_unlock(&sk->sk_callback_lock);
340 }
341
342 /* When dgram socket disconnects (or changes its peer), we clear its receive
343  * queue of packets arrived from previous peer. First, it allows to do
344  * flow control based only on wmem_alloc; second, sk connected to peer
345  * may receive messages only from that peer. */
346 static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
347 {
348         if (!skb_queue_empty(&sk->sk_receive_queue)) {
349                 skb_queue_purge(&sk->sk_receive_queue);
350                 wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
351
352                 /* If one link of bidirectional dgram pipe is disconnected,
353                  * we signal error. Messages are lost. Do not make this,
354                  * when peer was not connected to us.
355                  */
356                 if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
357                         other->sk_err = ECONNRESET;
358                         other->sk_error_report(other);
359                 }
360         }
361 }
362
363 static void unix_sock_destructor(struct sock *sk)
364 {
365         struct unix_sock *u = unix_sk(sk);
366
367         skb_queue_purge(&sk->sk_receive_queue);
368
369         BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
370         BUG_TRAP(sk_unhashed(sk));
371         BUG_TRAP(!sk->sk_socket);
372         if (!sock_flag(sk, SOCK_DEAD)) {
373                 printk("Attempt to release alive unix socket: %p\n", sk);
374                 return;
375         }
376
377         if (u->addr)
378                 unix_release_addr(u->addr);
379
380         atomic_dec(&unix_nr_socks);
381 #ifdef UNIX_REFCNT_DEBUG
382         printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
383 #endif
384 }
385
386 static int unix_release_sock (struct sock *sk, int embrion)
387 {
388         struct unix_sock *u = unix_sk(sk);
389         struct dentry *dentry;
390         struct vfsmount *mnt;
391         struct sock *skpair;
392         struct sk_buff *skb;
393         int state;
394
395         unix_remove_socket(sk);
396
397         /* Clear state */
398         unix_state_lock(sk);
399         sock_orphan(sk);
400         sk->sk_shutdown = SHUTDOWN_MASK;
401         dentry       = u->dentry;
402         u->dentry    = NULL;
403         mnt          = u->mnt;
404         u->mnt       = NULL;
405         state = sk->sk_state;
406         sk->sk_state = TCP_CLOSE;
407         unix_state_unlock(sk);
408
409         wake_up_interruptible_all(&u->peer_wait);
410
411         skpair=unix_peer(sk);
412
413         if (skpair!=NULL) {
414                 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
415                         unix_state_lock(skpair);
416                         /* No more writes */
417                         skpair->sk_shutdown = SHUTDOWN_MASK;
418                         if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
419                                 skpair->sk_err = ECONNRESET;
420                         unix_state_unlock(skpair);
421                         skpair->sk_state_change(skpair);
422                         read_lock(&skpair->sk_callback_lock);
423                         sk_wake_async(skpair,1,POLL_HUP);
424                         read_unlock(&skpair->sk_callback_lock);
425                 }
426                 sock_put(skpair); /* It may now die */
427                 unix_peer(sk) = NULL;
428         }
429
430         /* Try to flush out this socket. Throw out buffers at least */
431
432         while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
433                 if (state==TCP_LISTEN)
434                         unix_release_sock(skb->sk, 1);
435                 /* passed fds are erased in the kfree_skb hook        */
436                 kfree_skb(skb);
437         }
438
439         if (dentry) {
440                 dput(dentry);
441                 mntput(mnt);
442         }
443
444         sock_put(sk);
445
446         /* ---- Socket is dead now and most probably destroyed ---- */
447
448         /*
449          * Fixme: BSD difference: In BSD all sockets connected to use get
450          *        ECONNRESET and we die on the spot. In Linux we behave
451          *        like files and pipes do and wait for the last
452          *        dereference.
453          *
454          * Can't we simply set sock->err?
455          *
456          *        What the above comment does talk about? --ANK(980817)
457          */
458
459         if (atomic_read(&unix_tot_inflight))
460                 unix_gc();              /* Garbage collect fds */
461
462         return 0;
463 }
464
465 static int unix_listen(struct socket *sock, int backlog)
466 {
467         int err;
468         struct sock *sk = sock->sk;
469         struct unix_sock *u = unix_sk(sk);
470
471         err = -EOPNOTSUPP;
472         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
473                 goto out;                       /* Only stream/seqpacket sockets accept */
474         err = -EINVAL;
475         if (!u->addr)
476                 goto out;                       /* No listens on an unbound socket */
477         unix_state_lock(sk);
478         if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
479                 goto out_unlock;
480         if (backlog > sk->sk_max_ack_backlog)
481                 wake_up_interruptible_all(&u->peer_wait);
482         sk->sk_max_ack_backlog  = backlog;
483         sk->sk_state            = TCP_LISTEN;
484         /* set credentials so connect can copy them */
485         sk->sk_peercred.pid     = current->tgid;
486         sk->sk_peercred.uid     = current->euid;
487         sk->sk_peercred.gid     = current->egid;
488         err = 0;
489
490 out_unlock:
491         unix_state_unlock(sk);
492 out:
493         return err;
494 }
495
496 static int unix_release(struct socket *);
497 static int unix_bind(struct socket *, struct sockaddr *, int);
498 static int unix_stream_connect(struct socket *, struct sockaddr *,
499                                int addr_len, int flags);
500 static int unix_socketpair(struct socket *, struct socket *);
501 static int unix_accept(struct socket *, struct socket *, int);
502 static int unix_getname(struct socket *, struct sockaddr *, int *, int);
503 static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
504 static int unix_ioctl(struct socket *, unsigned int, unsigned long);
505 static int unix_shutdown(struct socket *, int);
506 static int unix_stream_sendmsg(struct kiocb *, struct socket *,
507                                struct msghdr *, size_t);
508 static int unix_stream_recvmsg(struct kiocb *, struct socket *,
509                                struct msghdr *, size_t, int);
510 static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
511                               struct msghdr *, size_t);
512 static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
513                               struct msghdr *, size_t, int);
514 static int unix_dgram_connect(struct socket *, struct sockaddr *,
515                               int, int);
516 static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
517                                   struct msghdr *, size_t);
518
519 static const struct proto_ops unix_stream_ops = {
520         .family =       PF_UNIX,
521         .owner =        THIS_MODULE,
522         .release =      unix_release,
523         .bind =         unix_bind,
524         .connect =      unix_stream_connect,
525         .socketpair =   unix_socketpair,
526         .accept =       unix_accept,
527         .getname =      unix_getname,
528         .poll =         unix_poll,
529         .ioctl =        unix_ioctl,
530         .listen =       unix_listen,
531         .shutdown =     unix_shutdown,
532         .setsockopt =   sock_no_setsockopt,
533         .getsockopt =   sock_no_getsockopt,
534         .sendmsg =      unix_stream_sendmsg,
535         .recvmsg =      unix_stream_recvmsg,
536         .mmap =         sock_no_mmap,
537         .sendpage =     sock_no_sendpage,
538 };
539
540 static const struct proto_ops unix_dgram_ops = {
541         .family =       PF_UNIX,
542         .owner =        THIS_MODULE,
543         .release =      unix_release,
544         .bind =         unix_bind,
545         .connect =      unix_dgram_connect,
546         .socketpair =   unix_socketpair,
547         .accept =       sock_no_accept,
548         .getname =      unix_getname,
549         .poll =         datagram_poll,
550         .ioctl =        unix_ioctl,
551         .listen =       sock_no_listen,
552         .shutdown =     unix_shutdown,
553         .setsockopt =   sock_no_setsockopt,
554         .getsockopt =   sock_no_getsockopt,
555         .sendmsg =      unix_dgram_sendmsg,
556         .recvmsg =      unix_dgram_recvmsg,
557         .mmap =         sock_no_mmap,
558         .sendpage =     sock_no_sendpage,
559 };
560
561 static const struct proto_ops unix_seqpacket_ops = {
562         .family =       PF_UNIX,
563         .owner =        THIS_MODULE,
564         .release =      unix_release,
565         .bind =         unix_bind,
566         .connect =      unix_stream_connect,
567         .socketpair =   unix_socketpair,
568         .accept =       unix_accept,
569         .getname =      unix_getname,
570         .poll =         datagram_poll,
571         .ioctl =        unix_ioctl,
572         .listen =       unix_listen,
573         .shutdown =     unix_shutdown,
574         .setsockopt =   sock_no_setsockopt,
575         .getsockopt =   sock_no_getsockopt,
576         .sendmsg =      unix_seqpacket_sendmsg,
577         .recvmsg =      unix_dgram_recvmsg,
578         .mmap =         sock_no_mmap,
579         .sendpage =     sock_no_sendpage,
580 };
581
582 static struct proto unix_proto = {
583         .name     = "UNIX",
584         .owner    = THIS_MODULE,
585         .obj_size = sizeof(struct unix_sock),
586 };
587
588 /*
589  * AF_UNIX sockets do not interact with hardware, hence they
590  * dont trigger interrupts - so it's safe for them to have
591  * bh-unsafe locking for their sk_receive_queue.lock. Split off
592  * this special lock-class by reinitializing the spinlock key:
593  */
594 static struct lock_class_key af_unix_sk_receive_queue_lock_key;
595
596 static struct sock * unix_create1(struct socket *sock)
597 {
598         struct sock *sk = NULL;
599         struct unix_sock *u;
600
601         if (atomic_read(&unix_nr_socks) >= 2*get_max_files())
602                 goto out;
603
604         sk = sk_alloc(PF_UNIX, GFP_KERNEL, &unix_proto, 1);
605         if (!sk)
606                 goto out;
607
608         atomic_inc(&unix_nr_socks);
609
610         sock_init_data(sock,sk);
611         lockdep_set_class(&sk->sk_receive_queue.lock,
612                                 &af_unix_sk_receive_queue_lock_key);
613
614         sk->sk_write_space      = unix_write_space;
615         sk->sk_max_ack_backlog  = sysctl_unix_max_dgram_qlen;
616         sk->sk_destruct         = unix_sock_destructor;
617         u         = unix_sk(sk);
618         u->dentry = NULL;
619         u->mnt    = NULL;
620         spin_lock_init(&u->lock);
621         atomic_set(&u->inflight, 0);
622         INIT_LIST_HEAD(&u->link);
623         mutex_init(&u->readlock); /* single task reading lock */
624         init_waitqueue_head(&u->peer_wait);
625         unix_insert_socket(unix_sockets_unbound, sk);
626 out:
627         return sk;
628 }
629
630 static int unix_create(struct socket *sock, int protocol)
631 {
632         if (protocol && protocol != PF_UNIX)
633                 return -EPROTONOSUPPORT;
634
635         sock->state = SS_UNCONNECTED;
636
637         switch (sock->type) {
638         case SOCK_STREAM:
639                 sock->ops = &unix_stream_ops;
640                 break;
641                 /*
642                  *      Believe it or not BSD has AF_UNIX, SOCK_RAW though
643                  *      nothing uses it.
644                  */
645         case SOCK_RAW:
646                 sock->type=SOCK_DGRAM;
647         case SOCK_DGRAM:
648                 sock->ops = &unix_dgram_ops;
649                 break;
650         case SOCK_SEQPACKET:
651                 sock->ops = &unix_seqpacket_ops;
652                 break;
653         default:
654                 return -ESOCKTNOSUPPORT;
655         }
656
657         return unix_create1(sock) ? 0 : -ENOMEM;
658 }
659
660 static int unix_release(struct socket *sock)
661 {
662         struct sock *sk = sock->sk;
663
664         if (!sk)
665                 return 0;
666
667         sock->sk = NULL;
668
669         return unix_release_sock (sk, 0);
670 }
671
672 static int unix_autobind(struct socket *sock)
673 {
674         struct sock *sk = sock->sk;
675         struct unix_sock *u = unix_sk(sk);
676         static u32 ordernum = 1;
677         struct unix_address * addr;
678         int err;
679
680         mutex_lock(&u->readlock);
681
682         err = 0;
683         if (u->addr)
684                 goto out;
685
686         err = -ENOMEM;
687         addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
688         if (!addr)
689                 goto out;
690
691         addr->name->sun_family = AF_UNIX;
692         atomic_set(&addr->refcnt, 1);
693
694 retry:
695         addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
696         addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));
697
698         spin_lock(&unix_table_lock);
699         ordernum = (ordernum+1)&0xFFFFF;
700
701         if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
702                                       addr->hash)) {
703                 spin_unlock(&unix_table_lock);
704                 /* Sanity yield. It is unusual case, but yet... */
705                 if (!(ordernum&0xFF))
706                         yield();
707                 goto retry;
708         }
709         addr->hash ^= sk->sk_type;
710
711         __unix_remove_socket(sk);
712         u->addr = addr;
713         __unix_insert_socket(&unix_socket_table[addr->hash], sk);
714         spin_unlock(&unix_table_lock);
715         err = 0;
716
717 out:    mutex_unlock(&u->readlock);
718         return err;
719 }
720
721 static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
722                                     int type, unsigned hash, int *error)
723 {
724         struct sock *u;
725         struct nameidata nd;
726         int err = 0;
727
728         if (sunname->sun_path[0]) {
729                 err = path_lookup(sunname->sun_path, LOOKUP_FOLLOW, &nd);
730                 if (err)
731                         goto fail;
732                 err = vfs_permission(&nd, MAY_WRITE);
733                 if (err)
734                         goto put_fail;
735
736                 err = -ECONNREFUSED;
737                 if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
738                         goto put_fail;
739                 u=unix_find_socket_byinode(nd.dentry->d_inode);
740                 if (!u)
741                         goto put_fail;
742
743                 if (u->sk_type == type)
744                         touch_atime(nd.mnt, nd.dentry);
745
746                 path_release(&nd);
747
748                 err=-EPROTOTYPE;
749                 if (u->sk_type != type) {
750                         sock_put(u);
751                         goto fail;
752                 }
753         } else {
754                 err = -ECONNREFUSED;
755                 u=unix_find_socket_byname(sunname, len, type, hash);
756                 if (u) {
757                         struct dentry *dentry;
758                         dentry = unix_sk(u)->dentry;
759                         if (dentry)
760                                 touch_atime(unix_sk(u)->mnt, dentry);
761                 } else
762                         goto fail;
763         }
764         return u;
765
766 put_fail:
767         path_release(&nd);
768 fail:
769         *error=err;
770         return NULL;
771 }
772
773
774 static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
775 {
776         struct sock *sk = sock->sk;
777         struct unix_sock *u = unix_sk(sk);
778         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
779         struct dentry * dentry = NULL;
780         struct nameidata nd;
781         int err;
782         unsigned hash;
783         struct unix_address *addr;
784         struct hlist_head *list;
785
786         err = -EINVAL;
787         if (sunaddr->sun_family != AF_UNIX)
788                 goto out;
789
790         if (addr_len==sizeof(short)) {
791                 err = unix_autobind(sock);
792                 goto out;
793         }
794
795         err = unix_mkname(sunaddr, addr_len, &hash);
796         if (err < 0)
797                 goto out;
798         addr_len = err;
799
800         mutex_lock(&u->readlock);
801
802         err = -EINVAL;
803         if (u->addr)
804                 goto out_up;
805
806         err = -ENOMEM;
807         addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
808         if (!addr)
809                 goto out_up;
810
811         memcpy(addr->name, sunaddr, addr_len);
812         addr->len = addr_len;
813         addr->hash = hash ^ sk->sk_type;
814         atomic_set(&addr->refcnt, 1);
815
816         if (sunaddr->sun_path[0]) {
817                 unsigned int mode;
818                 err = 0;
819                 /*
820                  * Get the parent directory, calculate the hash for last
821                  * component.
822                  */
823                 err = path_lookup(sunaddr->sun_path, LOOKUP_PARENT, &nd);
824                 if (err)
825                         goto out_mknod_parent;
826
827                 dentry = lookup_create(&nd, 0);
828                 err = PTR_ERR(dentry);
829                 if (IS_ERR(dentry))
830                         goto out_mknod_unlock;
831
832                 /*
833                  * All right, let's create it.
834                  */
835                 mode = S_IFSOCK |
836                        (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
837                 err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
838                 if (err)
839                         goto out_mknod_dput;
840                 mutex_unlock(&nd.dentry->d_inode->i_mutex);
841                 dput(nd.dentry);
842                 nd.dentry = dentry;
843
844                 addr->hash = UNIX_HASH_SIZE;
845         }
846
847         spin_lock(&unix_table_lock);
848
849         if (!sunaddr->sun_path[0]) {
850                 err = -EADDRINUSE;
851                 if (__unix_find_socket_byname(sunaddr, addr_len,
852                                               sk->sk_type, hash)) {
853                         unix_release_addr(addr);
854                         goto out_unlock;
855                 }
856
857                 list = &unix_socket_table[addr->hash];
858         } else {
859                 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
860                 u->dentry = nd.dentry;
861                 u->mnt    = nd.mnt;
862         }
863
864         err = 0;
865         __unix_remove_socket(sk);
866         u->addr = addr;
867         __unix_insert_socket(list, sk);
868
869 out_unlock:
870         spin_unlock(&unix_table_lock);
871 out_up:
872         mutex_unlock(&u->readlock);
873 out:
874         return err;
875
876 out_mknod_dput:
877         dput(dentry);
878 out_mknod_unlock:
879         mutex_unlock(&nd.dentry->d_inode->i_mutex);
880         path_release(&nd);
881 out_mknod_parent:
882         if (err==-EEXIST)
883                 err=-EADDRINUSE;
884         unix_release_addr(addr);
885         goto out_up;
886 }
887
888 static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
889 {
890         if (unlikely(sk1 == sk2) || !sk2) {
891                 unix_state_lock(sk1);
892                 return;
893         }
894         if (sk1 < sk2) {
895                 unix_state_lock(sk1);
896                 unix_state_lock_nested(sk2);
897         } else {
898                 unix_state_lock(sk2);
899                 unix_state_lock_nested(sk1);
900         }
901 }
902
903 static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
904 {
905         if (unlikely(sk1 == sk2) || !sk2) {
906                 unix_state_unlock(sk1);
907                 return;
908         }
909         unix_state_unlock(sk1);
910         unix_state_unlock(sk2);
911 }
912
913 static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
914                               int alen, int flags)
915 {
916         struct sock *sk = sock->sk;
917         struct sockaddr_un *sunaddr=(struct sockaddr_un*)addr;
918         struct sock *other;
919         unsigned hash;
920         int err;
921
922         if (addr->sa_family != AF_UNSPEC) {
923                 err = unix_mkname(sunaddr, alen, &hash);
924                 if (err < 0)
925                         goto out;
926                 alen = err;
927
928                 if (test_bit(SOCK_PASSCRED, &sock->flags) &&
929                     !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
930                         goto out;
931
932 restart:
933                 other=unix_find_other(sunaddr, alen, sock->type, hash, &err);
934                 if (!other)
935                         goto out;
936
937                 unix_state_double_lock(sk, other);
938
939                 /* Apparently VFS overslept socket death. Retry. */
940                 if (sock_flag(other, SOCK_DEAD)) {
941                         unix_state_double_unlock(sk, other);
942                         sock_put(other);
943                         goto restart;
944                 }
945
946                 err = -EPERM;
947                 if (!unix_may_send(sk, other))
948                         goto out_unlock;
949
950                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
951                 if (err)
952                         goto out_unlock;
953
954         } else {
955                 /*
956                  *      1003.1g breaking connected state with AF_UNSPEC
957                  */
958                 other = NULL;
959                 unix_state_double_lock(sk, other);
960         }
961
962         /*
963          * If it was connected, reconnect.
964          */
965         if (unix_peer(sk)) {
966                 struct sock *old_peer = unix_peer(sk);
967                 unix_peer(sk)=other;
968                 unix_state_double_unlock(sk, other);
969
970                 if (other != old_peer)
971                         unix_dgram_disconnected(sk, old_peer);
972                 sock_put(old_peer);
973         } else {
974                 unix_peer(sk)=other;
975                 unix_state_double_unlock(sk, other);
976         }
977         return 0;
978
979 out_unlock:
980         unix_state_double_unlock(sk, other);
981         sock_put(other);
982 out:
983         return err;
984 }
985
986 static long unix_wait_for_peer(struct sock *other, long timeo)
987 {
988         struct unix_sock *u = unix_sk(other);
989         int sched;
990         DEFINE_WAIT(wait);
991
992         prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
993
994         sched = !sock_flag(other, SOCK_DEAD) &&
995                 !(other->sk_shutdown & RCV_SHUTDOWN) &&
996                 (skb_queue_len(&other->sk_receive_queue) >
997                  other->sk_max_ack_backlog);
998
999         unix_state_unlock(other);
1000
1001         if (sched)
1002                 timeo = schedule_timeout(timeo);
1003
1004         finish_wait(&u->peer_wait, &wait);
1005         return timeo;
1006 }
1007
1008 static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1009                                int addr_len, int flags)
1010 {
1011         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1012         struct sock *sk = sock->sk;
1013         struct unix_sock *u = unix_sk(sk), *newu, *otheru;
1014         struct sock *newsk = NULL;
1015         struct sock *other = NULL;
1016         struct sk_buff *skb = NULL;
1017         unsigned hash;
1018         int st;
1019         int err;
1020         long timeo;
1021
1022         err = unix_mkname(sunaddr, addr_len, &hash);
1023         if (err < 0)
1024                 goto out;
1025         addr_len = err;
1026
1027         if (test_bit(SOCK_PASSCRED, &sock->flags)
1028                 && !u->addr && (err = unix_autobind(sock)) != 0)
1029                 goto out;
1030
1031         timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
1032
1033         /* First of all allocate resources.
1034            If we will make it after state is locked,
1035            we will have to recheck all again in any case.
1036          */
1037
1038         err = -ENOMEM;
1039
1040         /* create new sock for complete connection */
1041         newsk = unix_create1(NULL);
1042         if (newsk == NULL)
1043                 goto out;
1044
1045         /* Allocate skb for sending to listening sock */
1046         skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
1047         if (skb == NULL)
1048                 goto out;
1049
1050 restart:
1051         /*  Find listening sock. */
1052         other = unix_find_other(sunaddr, addr_len, sk->sk_type, hash, &err);
1053         if (!other)
1054                 goto out;
1055
1056         /* Latch state of peer */
1057         unix_state_lock(other);
1058
1059         /* Apparently VFS overslept socket death. Retry. */
1060         if (sock_flag(other, SOCK_DEAD)) {
1061                 unix_state_unlock(other);
1062                 sock_put(other);
1063                 goto restart;
1064         }
1065
1066         err = -ECONNREFUSED;
1067         if (other->sk_state != TCP_LISTEN)
1068                 goto out_unlock;
1069
1070         if (skb_queue_len(&other->sk_receive_queue) >
1071             other->sk_max_ack_backlog) {
1072                 err = -EAGAIN;
1073                 if (!timeo)
1074                         goto out_unlock;
1075
1076                 timeo = unix_wait_for_peer(other, timeo);
1077
1078                 err = sock_intr_errno(timeo);
1079                 if (signal_pending(current))
1080                         goto out;
1081                 sock_put(other);
1082                 goto restart;
1083         }
1084
1085         /* Latch our state.
1086
1087            It is tricky place. We need to grab write lock and cannot
1088            drop lock on peer. It is dangerous because deadlock is
1089            possible. Connect to self case and simultaneous
1090            attempt to connect are eliminated by checking socket
1091            state. other is TCP_LISTEN, if sk is TCP_LISTEN we
1092            check this before attempt to grab lock.
1093
1094            Well, and we have to recheck the state after socket locked.
1095          */
1096         st = sk->sk_state;
1097
1098         switch (st) {
1099         case TCP_CLOSE:
1100                 /* This is ok... continue with connect */
1101                 break;
1102         case TCP_ESTABLISHED:
1103                 /* Socket is already connected */
1104                 err = -EISCONN;
1105                 goto out_unlock;
1106         default:
1107                 err = -EINVAL;
1108                 goto out_unlock;
1109         }
1110
1111         unix_state_lock_nested(sk);
1112
1113         if (sk->sk_state != st) {
1114                 unix_state_unlock(sk);
1115                 unix_state_unlock(other);
1116                 sock_put(other);
1117                 goto restart;
1118         }
1119
1120         err = security_unix_stream_connect(sock, other->sk_socket, newsk);
1121         if (err) {
1122                 unix_state_unlock(sk);
1123                 goto out_unlock;
1124         }
1125
1126         /* The way is open! Fastly set all the necessary fields... */
1127
1128         sock_hold(sk);
1129         unix_peer(newsk)        = sk;
1130         newsk->sk_state         = TCP_ESTABLISHED;
1131         newsk->sk_type          = sk->sk_type;
1132         newsk->sk_peercred.pid  = current->tgid;
1133         newsk->sk_peercred.uid  = current->euid;
1134         newsk->sk_peercred.gid  = current->egid;
1135         newu = unix_sk(newsk);
1136         newsk->sk_sleep         = &newu->peer_wait;
1137         otheru = unix_sk(other);
1138
1139         /* copy address information from listening to new sock*/
1140         if (otheru->addr) {
1141                 atomic_inc(&otheru->addr->refcnt);
1142                 newu->addr = otheru->addr;
1143         }
1144         if (otheru->dentry) {
1145                 newu->dentry    = dget(otheru->dentry);
1146                 newu->mnt       = mntget(otheru->mnt);
1147         }
1148
1149         /* Set credentials */
1150         sk->sk_peercred = other->sk_peercred;
1151
1152         sock->state     = SS_CONNECTED;
1153         sk->sk_state    = TCP_ESTABLISHED;
1154         sock_hold(newsk);
1155
1156         smp_mb__after_atomic_inc();     /* sock_hold() does an atomic_inc() */
1157         unix_peer(sk)   = newsk;
1158
1159         unix_state_unlock(sk);
1160
1161         /* take ten and and send info to listening sock */
1162         spin_lock(&other->sk_receive_queue.lock);
1163         __skb_queue_tail(&other->sk_receive_queue, skb);
1164         spin_unlock(&other->sk_receive_queue.lock);
1165         unix_state_unlock(other);
1166         other->sk_data_ready(other, 0);
1167         sock_put(other);
1168         return 0;
1169
1170 out_unlock:
1171         if (other)
1172                 unix_state_unlock(other);
1173
1174 out:
1175         if (skb)
1176                 kfree_skb(skb);
1177         if (newsk)
1178                 unix_release_sock(newsk, 0);
1179         if (other)
1180                 sock_put(other);
1181         return err;
1182 }
1183
1184 static int unix_socketpair(struct socket *socka, struct socket *sockb)
1185 {
1186         struct sock *ska=socka->sk, *skb = sockb->sk;
1187
1188         /* Join our sockets back to back */
1189         sock_hold(ska);
1190         sock_hold(skb);
1191         unix_peer(ska)=skb;
1192         unix_peer(skb)=ska;
1193         ska->sk_peercred.pid = skb->sk_peercred.pid = current->tgid;
1194         ska->sk_peercred.uid = skb->sk_peercred.uid = current->euid;
1195         ska->sk_peercred.gid = skb->sk_peercred.gid = current->egid;
1196
1197         if (ska->sk_type != SOCK_DGRAM) {
1198                 ska->sk_state = TCP_ESTABLISHED;
1199                 skb->sk_state = TCP_ESTABLISHED;
1200                 socka->state  = SS_CONNECTED;
1201                 sockb->state  = SS_CONNECTED;
1202         }
1203         return 0;
1204 }
1205
1206 static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
1207 {
1208         struct sock *sk = sock->sk;
1209         struct sock *tsk;
1210         struct sk_buff *skb;
1211         int err;
1212
1213         err = -EOPNOTSUPP;
1214         if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
1215                 goto out;
1216
1217         err = -EINVAL;
1218         if (sk->sk_state != TCP_LISTEN)
1219                 goto out;
1220
1221         /* If socket state is TCP_LISTEN it cannot change (for now...),
1222          * so that no locks are necessary.
1223          */
1224
1225         skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
1226         if (!skb) {
1227                 /* This means receive shutdown. */
1228                 if (err == 0)
1229                         err = -EINVAL;
1230                 goto out;
1231         }
1232
1233         tsk = skb->sk;
1234         skb_free_datagram(sk, skb);
1235         wake_up_interruptible(&unix_sk(sk)->peer_wait);
1236
1237         /* attach accepted sock to socket */
1238         unix_state_lock(tsk);
1239         newsock->state = SS_CONNECTED;
1240         sock_graft(tsk, newsock);
1241         unix_state_unlock(tsk);
1242         return 0;
1243
1244 out:
1245         return err;
1246 }
1247
1248
1249 static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
1250 {
1251         struct sock *sk = sock->sk;
1252         struct unix_sock *u;
1253         struct sockaddr_un *sunaddr=(struct sockaddr_un *)uaddr;
1254         int err = 0;
1255
1256         if (peer) {
1257                 sk = unix_peer_get(sk);
1258
1259                 err = -ENOTCONN;
1260                 if (!sk)
1261                         goto out;
1262                 err = 0;
1263         } else {
1264                 sock_hold(sk);
1265         }
1266
1267         u = unix_sk(sk);
1268         unix_state_lock(sk);
1269         if (!u->addr) {
1270                 sunaddr->sun_family = AF_UNIX;
1271                 sunaddr->sun_path[0] = 0;
1272                 *uaddr_len = sizeof(short);
1273         } else {
1274                 struct unix_address *addr = u->addr;
1275
1276                 *uaddr_len = addr->len;
1277                 memcpy(sunaddr, addr->name, *uaddr_len);
1278         }
1279         unix_state_unlock(sk);
1280         sock_put(sk);
1281 out:
1282         return err;
1283 }
1284
1285 static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1286 {
1287         int i;
1288
1289         scm->fp = UNIXCB(skb).fp;
1290         skb->destructor = sock_wfree;
1291         UNIXCB(skb).fp = NULL;
1292
1293         for (i=scm->fp->count-1; i>=0; i--)
1294                 unix_notinflight(scm->fp->fp[i]);
1295 }
1296
1297 static void unix_destruct_fds(struct sk_buff *skb)
1298 {
1299         struct scm_cookie scm;
1300         memset(&scm, 0, sizeof(scm));
1301         unix_detach_fds(&scm, skb);
1302
1303         /* Alas, it calls VFS */
1304         /* So fscking what? fput() had been SMP-safe since the last Summer */
1305         scm_destroy(&scm);
1306         sock_wfree(skb);
1307 }
1308
1309 static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1310 {
1311         int i;
1312         for (i=scm->fp->count-1; i>=0; i--)
1313                 unix_inflight(scm->fp->fp[i]);
1314         UNIXCB(skb).fp = scm->fp;
1315         skb->destructor = unix_destruct_fds;
1316         scm->fp = NULL;
1317 }
1318
1319 /*
1320  *      Send AF_UNIX data.
1321  */
1322
1323 static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1324                               struct msghdr *msg, size_t len)
1325 {
1326         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1327         struct sock *sk = sock->sk;
1328         struct unix_sock *u = unix_sk(sk);
1329         struct sockaddr_un *sunaddr=msg->msg_name;
1330         struct sock *other = NULL;
1331         int namelen = 0; /* fake GCC */
1332         int err;
1333         unsigned hash;
1334         struct sk_buff *skb;
1335         long timeo;
1336         struct scm_cookie tmp_scm;
1337
1338         if (NULL == siocb->scm)
1339                 siocb->scm = &tmp_scm;
1340         err = scm_send(sock, msg, siocb->scm);
1341         if (err < 0)
1342                 return err;
1343
1344         err = -EOPNOTSUPP;
1345         if (msg->msg_flags&MSG_OOB)
1346                 goto out;
1347
1348         if (msg->msg_namelen) {
1349                 err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
1350                 if (err < 0)
1351                         goto out;
1352                 namelen = err;
1353         } else {
1354                 sunaddr = NULL;
1355                 err = -ENOTCONN;
1356                 other = unix_peer_get(sk);
1357                 if (!other)
1358                         goto out;
1359         }
1360
1361         if (test_bit(SOCK_PASSCRED, &sock->flags)
1362                 && !u->addr && (err = unix_autobind(sock)) != 0)
1363                 goto out;
1364
1365         err = -EMSGSIZE;
1366         if (len > sk->sk_sndbuf - 32)
1367                 goto out;
1368
1369         skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err);
1370         if (skb==NULL)
1371                 goto out;
1372
1373         memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1374         if (siocb->scm->fp)
1375                 unix_attach_fds(siocb->scm, skb);
1376         unix_get_secdata(siocb->scm, skb);
1377
1378         skb_reset_transport_header(skb);
1379         err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
1380         if (err)
1381                 goto out_free;
1382
1383         timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1384
1385 restart:
1386         if (!other) {
1387                 err = -ECONNRESET;
1388                 if (sunaddr == NULL)
1389                         goto out_free;
1390
1391                 other = unix_find_other(sunaddr, namelen, sk->sk_type,
1392                                         hash, &err);
1393                 if (other==NULL)
1394                         goto out_free;
1395         }
1396
1397         unix_state_lock(other);
1398         err = -EPERM;
1399         if (!unix_may_send(sk, other))
1400                 goto out_unlock;
1401
1402         if (sock_flag(other, SOCK_DEAD)) {
1403                 /*
1404                  *      Check with 1003.1g - what should
1405                  *      datagram error
1406                  */
1407                 unix_state_unlock(other);
1408                 sock_put(other);
1409
1410                 err = 0;
1411                 unix_state_lock(sk);
1412                 if (unix_peer(sk) == other) {
1413                         unix_peer(sk)=NULL;
1414                         unix_state_unlock(sk);
1415
1416                         unix_dgram_disconnected(sk, other);
1417                         sock_put(other);
1418                         err = -ECONNREFUSED;
1419                 } else {
1420                         unix_state_unlock(sk);
1421                 }
1422
1423                 other = NULL;
1424                 if (err)
1425                         goto out_free;
1426                 goto restart;
1427         }
1428
1429         err = -EPIPE;
1430         if (other->sk_shutdown & RCV_SHUTDOWN)
1431                 goto out_unlock;
1432
1433         if (sk->sk_type != SOCK_SEQPACKET) {
1434                 err = security_unix_may_send(sk->sk_socket, other->sk_socket);
1435                 if (err)
1436                         goto out_unlock;
1437         }
1438
1439         if (unix_peer(other) != sk &&
1440             (skb_queue_len(&other->sk_receive_queue) >
1441              other->sk_max_ack_backlog)) {
1442                 if (!timeo) {
1443                         err = -EAGAIN;
1444                         goto out_unlock;
1445                 }
1446
1447                 timeo = unix_wait_for_peer(other, timeo);
1448
1449                 err = sock_intr_errno(timeo);
1450                 if (signal_pending(current))
1451                         goto out_free;
1452
1453                 goto restart;
1454         }
1455
1456         skb_queue_tail(&other->sk_receive_queue, skb);
1457         unix_state_unlock(other);
1458         other->sk_data_ready(other, len);
1459         sock_put(other);
1460         scm_destroy(siocb->scm);
1461         return len;
1462
1463 out_unlock:
1464         unix_state_unlock(other);
1465 out_free:
1466         kfree_skb(skb);
1467 out:
1468         if (other)
1469                 sock_put(other);
1470         scm_destroy(siocb->scm);
1471         return err;
1472 }
1473
1474
1475 static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1476                                struct msghdr *msg, size_t len)
1477 {
1478         struct sock_iocb *siocb = kiocb_to_siocb(kiocb);
1479         struct sock *sk = sock->sk;
1480         struct sock *other = NULL;
1481         struct sockaddr_un *sunaddr=msg->msg_name;
1482         int err,size;
1483         struct sk_buff *skb;
1484         int sent=0;
1485         struct scm_cookie tmp_scm;
1486
1487         if (NULL == siocb->scm)
1488                 siocb->scm = &tmp_scm;
1489         err = scm_send(sock, msg, siocb->scm);
1490         if (err < 0)
1491                 return err;
1492
1493         err = -EOPNOTSUPP;
1494         if (msg->msg_flags&MSG_OOB)
1495                 goto out_err;
1496
1497         if (msg->msg_namelen) {
1498                 err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1499                 goto out_err;
1500         } else {
1501                 sunaddr = NULL;
1502                 err = -ENOTCONN;
1503                 other = unix_peer(sk);
1504                 if (!other)
1505                         goto out_err;
1506         }
1507
1508         if (sk->sk_shutdown & SEND_SHUTDOWN)
1509                 goto pipe_err;
1510
1511         while(sent < len)
1512         {
1513                 /*
1514                  *      Optimisation for the fact that under 0.01% of X
1515                  *      messages typically need breaking up.
1516                  */
1517
1518                 size = len-sent;
1519
1520                 /* Keep two messages in the pipe so it schedules better */
1521                 if (size > ((sk->sk_sndbuf >> 1) - 64))
1522                         size = (sk->sk_sndbuf >> 1) - 64;
1523
1524                 if (size > SKB_MAX_ALLOC)
1525                         size = SKB_MAX_ALLOC;
1526
1527                 /*
1528                  *      Grab a buffer
1529                  */
1530
1531                 skb=sock_alloc_send_skb(sk,size,msg->msg_flags&MSG_DONTWAIT, &err);
1532
1533                 if (skb==NULL)
1534                         goto out_err;
1535
1536                 /*
1537                  *      If you pass two values to the sock_alloc_send_skb
1538                  *      it tries to grab the large buffer with GFP_NOFS
1539                  *      (which can fail easily), and if it fails grab the
1540                  *      fallback size buffer which is under a page and will
1541                  *      succeed. [Alan]
1542                  */
1543                 size = min_t(int, size, skb_tailroom(skb));
1544
1545                 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1546                 if (siocb->scm->fp)
1547                         unix_attach_fds(siocb->scm, skb);
1548
1549                 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
1550                         kfree_skb(skb);
1551                         goto out_err;
1552                 }
1553
1554                 unix_state_lock(other);
1555
1556                 if (sock_flag(other, SOCK_DEAD) ||
1557                     (other->sk_shutdown & RCV_SHUTDOWN))
1558                         goto pipe_err_free;
1559
1560                 skb_queue_tail(&other->sk_receive_queue, skb);
1561                 unix_state_unlock(other);
1562                 other->sk_data_ready(other, size);
1563                 sent+=size;
1564         }
1565
1566         scm_destroy(siocb->scm);
1567         siocb->scm = NULL;
1568
1569         return sent;
1570
1571 pipe_err_free:
1572         unix_state_unlock(other);
1573         kfree_skb(skb);
1574 pipe_err:
1575         if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL))
1576                 send_sig(SIGPIPE,current,0);
1577         err = -EPIPE;
1578 out_err:
1579         scm_destroy(siocb->scm);
1580         siocb->scm = NULL;
1581         return sent ? : err;
1582 }
1583
1584 static int unix_seqpacket_sendmsg(struct kiocb *kiocb, struct socket *sock,
1585                                   struct msghdr *msg, size_t len)
1586 {
1587         int err;
1588         struct sock *sk = sock->sk;
1589
1590         err = sock_error(sk);
1591         if (err)
1592                 return err;
1593
1594         if (sk->sk_state != TCP_ESTABLISHED)
1595                 return -ENOTCONN;
1596
1597         if (msg->msg_namelen)
1598                 msg->msg_namelen = 0;
1599
1600         return unix_dgram_sendmsg(kiocb, sock, msg, len);
1601 }
1602
1603 static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
1604 {
1605         struct unix_sock *u = unix_sk(sk);
1606
1607         msg->msg_namelen = 0;
1608         if (u->addr) {
1609                 msg->msg_namelen = u->addr->len;
1610                 memcpy(msg->msg_name, u->addr->name, u->addr->len);
1611         }
1612 }
1613
1614 static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1615                               struct msghdr *msg, size_t size,
1616                               int flags)
1617 {
1618         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1619         struct scm_cookie tmp_scm;
1620         struct sock *sk = sock->sk;
1621         struct unix_sock *u = unix_sk(sk);
1622         int noblock = flags & MSG_DONTWAIT;
1623         struct sk_buff *skb;
1624         int err;
1625
1626         err = -EOPNOTSUPP;
1627         if (flags&MSG_OOB)
1628                 goto out;
1629
1630         msg->msg_namelen = 0;
1631
1632         mutex_lock(&u->readlock);
1633
1634         skb = skb_recv_datagram(sk, flags, noblock, &err);
1635         if (!skb)
1636                 goto out_unlock;
1637
1638         wake_up_interruptible(&u->peer_wait);
1639
1640         if (msg->msg_name)
1641                 unix_copy_addr(msg, skb->sk);
1642
1643         if (size > skb->len)
1644                 size = skb->len;
1645         else if (size < skb->len)
1646                 msg->msg_flags |= MSG_TRUNC;
1647
1648         err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, size);
1649         if (err)
1650                 goto out_free;
1651
1652         if (!siocb->scm) {
1653                 siocb->scm = &tmp_scm;
1654                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1655         }
1656         siocb->scm->creds = *UNIXCREDS(skb);
1657         unix_set_secdata(siocb->scm, skb);
1658
1659         if (!(flags & MSG_PEEK))
1660         {
1661                 if (UNIXCB(skb).fp)
1662                         unix_detach_fds(siocb->scm, skb);
1663         }
1664         else
1665         {
1666                 /* It is questionable: on PEEK we could:
1667                    - do not return fds - good, but too simple 8)
1668                    - return fds, and do not return them on read (old strategy,
1669                      apparently wrong)
1670                    - clone fds (I chose it for now, it is the most universal
1671                      solution)
1672
1673                    POSIX 1003.1g does not actually define this clearly
1674                    at all. POSIX 1003.1g doesn't define a lot of things
1675                    clearly however!
1676
1677                 */
1678                 if (UNIXCB(skb).fp)
1679                         siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1680         }
1681         err = size;
1682
1683         scm_recv(sock, msg, siocb->scm, flags);
1684
1685 out_free:
1686         skb_free_datagram(sk,skb);
1687 out_unlock:
1688         mutex_unlock(&u->readlock);
1689 out:
1690         return err;
1691 }
1692
1693 /*
1694  *      Sleep until data has arrive. But check for races..
1695  */
1696
1697 static long unix_stream_data_wait(struct sock * sk, long timeo)
1698 {
1699         DEFINE_WAIT(wait);
1700
1701         unix_state_lock(sk);
1702
1703         for (;;) {
1704                 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
1705
1706                 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1707                     sk->sk_err ||
1708                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
1709                     signal_pending(current) ||
1710                     !timeo)
1711                         break;
1712
1713                 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1714                 unix_state_unlock(sk);
1715                 timeo = schedule_timeout(timeo);
1716                 unix_state_lock(sk);
1717                 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1718         }
1719
1720         finish_wait(sk->sk_sleep, &wait);
1721         unix_state_unlock(sk);
1722         return timeo;
1723 }
1724
1725
1726
1727 static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1728                                struct msghdr *msg, size_t size,
1729                                int flags)
1730 {
1731         struct sock_iocb *siocb = kiocb_to_siocb(iocb);
1732         struct scm_cookie tmp_scm;
1733         struct sock *sk = sock->sk;
1734         struct unix_sock *u = unix_sk(sk);
1735         struct sockaddr_un *sunaddr=msg->msg_name;
1736         int copied = 0;
1737         int check_creds = 0;
1738         int target;
1739         int err = 0;
1740         long timeo;
1741
1742         err = -EINVAL;
1743         if (sk->sk_state != TCP_ESTABLISHED)
1744                 goto out;
1745
1746         err = -EOPNOTSUPP;
1747         if (flags&MSG_OOB)
1748                 goto out;
1749
1750         target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
1751         timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
1752
1753         msg->msg_namelen = 0;
1754
1755         /* Lock the socket to prevent queue disordering
1756          * while sleeps in memcpy_tomsg
1757          */
1758
1759         if (!siocb->scm) {
1760                 siocb->scm = &tmp_scm;
1761                 memset(&tmp_scm, 0, sizeof(tmp_scm));
1762         }
1763
1764         mutex_lock(&u->readlock);
1765
1766         do
1767         {
1768                 int chunk;
1769                 struct sk_buff *skb;
1770
1771                 unix_state_lock(sk);
1772                 skb = skb_dequeue(&sk->sk_receive_queue);
1773                 if (skb==NULL)
1774                 {
1775                         if (copied >= target)
1776                                 goto unlock;
1777
1778                         /*
1779                          *      POSIX 1003.1g mandates this order.
1780                          */
1781
1782                         if ((err = sock_error(sk)) != 0)
1783                                 goto unlock;
1784                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1785                                 goto unlock;
1786
1787                         unix_state_unlock(sk);
1788                         err = -EAGAIN;
1789                         if (!timeo)
1790                                 break;
1791                         mutex_unlock(&u->readlock);
1792
1793                         timeo = unix_stream_data_wait(sk, timeo);
1794
1795                         if (signal_pending(current)) {
1796                                 err = sock_intr_errno(timeo);
1797                                 goto out;
1798                         }
1799                         mutex_lock(&u->readlock);
1800                         continue;
1801  unlock:
1802                         unix_state_unlock(sk);
1803                         break;
1804                 }
1805                 unix_state_unlock(sk);
1806
1807                 if (check_creds) {
1808                         /* Never glue messages from different writers */
1809                         if (memcmp(UNIXCREDS(skb), &siocb->scm->creds, sizeof(siocb->scm->creds)) != 0) {
1810                                 skb_queue_head(&sk->sk_receive_queue, skb);
1811                                 break;
1812                         }
1813                 } else {
1814                         /* Copy credentials */
1815                         siocb->scm->creds = *UNIXCREDS(skb);
1816                         check_creds = 1;
1817                 }
1818
1819                 /* Copy address just once */
1820                 if (sunaddr)
1821                 {
1822                         unix_copy_addr(msg, skb->sk);
1823                         sunaddr = NULL;
1824                 }
1825
1826                 chunk = min_t(unsigned int, skb->len, size);
1827                 if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
1828                         skb_queue_head(&sk->sk_receive_queue, skb);
1829                         if (copied == 0)
1830                                 copied = -EFAULT;
1831                         break;
1832                 }
1833                 copied += chunk;
1834                 size -= chunk;
1835
1836                 /* Mark read part of skb as used */
1837                 if (!(flags & MSG_PEEK))
1838                 {
1839                         skb_pull(skb, chunk);
1840
1841                         if (UNIXCB(skb).fp)
1842                                 unix_detach_fds(siocb->scm, skb);
1843
1844                         /* put the skb back if we didn't use it up.. */
1845                         if (skb->len)
1846                         {
1847                                 skb_queue_head(&sk->sk_receive_queue, skb);
1848                                 break;
1849                         }
1850
1851                         kfree_skb(skb);
1852
1853                         if (siocb->scm->fp)
1854                                 break;
1855                 }
1856                 else
1857                 {
1858                         /* It is questionable, see note in unix_dgram_recvmsg.
1859                          */
1860                         if (UNIXCB(skb).fp)
1861                                 siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1862
1863                         /* put message back and return */
1864                         skb_queue_head(&sk->sk_receive_queue, skb);
1865                         break;
1866                 }
1867         } while (size);
1868
1869         mutex_unlock(&u->readlock);
1870         scm_recv(sock, msg, siocb->scm, flags);
1871 out:
1872         return copied ? : err;
1873 }
1874
1875 static int unix_shutdown(struct socket *sock, int mode)
1876 {
1877         struct sock *sk = sock->sk;
1878         struct sock *other;
1879
1880         mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN);
1881
1882         if (mode) {
1883                 unix_state_lock(sk);
1884                 sk->sk_shutdown |= mode;
1885                 other=unix_peer(sk);
1886                 if (other)
1887                         sock_hold(other);
1888                 unix_state_unlock(sk);
1889                 sk->sk_state_change(sk);
1890
1891                 if (other &&
1892                         (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
1893
1894                         int peer_mode = 0;
1895
1896                         if (mode&RCV_SHUTDOWN)
1897                                 peer_mode |= SEND_SHUTDOWN;
1898                         if (mode&SEND_SHUTDOWN)
1899                                 peer_mode |= RCV_SHUTDOWN;
1900                         unix_state_lock(other);
1901                         other->sk_shutdown |= peer_mode;
1902                         unix_state_unlock(other);
1903                         other->sk_state_change(other);
1904                         read_lock(&other->sk_callback_lock);
1905                         if (peer_mode == SHUTDOWN_MASK)
1906                                 sk_wake_async(other,1,POLL_HUP);
1907                         else if (peer_mode & RCV_SHUTDOWN)
1908                                 sk_wake_async(other,1,POLL_IN);
1909                         read_unlock(&other->sk_callback_lock);
1910                 }
1911                 if (other)
1912                         sock_put(other);
1913         }
1914         return 0;
1915 }
1916
1917 static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
1918 {
1919         struct sock *sk = sock->sk;
1920         long amount=0;
1921         int err;
1922
1923         switch(cmd)
1924         {
1925                 case SIOCOUTQ:
1926                         amount = atomic_read(&sk->sk_wmem_alloc);
1927                         err = put_user(amount, (int __user *)arg);
1928                         break;
1929                 case SIOCINQ:
1930                 {
1931                         struct sk_buff *skb;
1932
1933                         if (sk->sk_state == TCP_LISTEN) {
1934                                 err = -EINVAL;
1935                                 break;
1936                         }
1937
1938                         spin_lock(&sk->sk_receive_queue.lock);
1939                         if (sk->sk_type == SOCK_STREAM ||
1940                             sk->sk_type == SOCK_SEQPACKET) {
1941                                 skb_queue_walk(&sk->sk_receive_queue, skb)
1942                                         amount += skb->len;
1943                         } else {
1944                                 skb = skb_peek(&sk->sk_receive_queue);
1945                                 if (skb)
1946                                         amount=skb->len;
1947                         }
1948                         spin_unlock(&sk->sk_receive_queue.lock);
1949                         err = put_user(amount, (int __user *)arg);
1950                         break;
1951                 }
1952
1953                 default:
1954                         err = -ENOIOCTLCMD;
1955                         break;
1956         }
1957         return err;
1958 }
1959
1960 static unsigned int unix_poll(struct file * file, struct socket *sock, poll_table *wait)
1961 {
1962         struct sock *sk = sock->sk;
1963         unsigned int mask;
1964
1965         poll_wait(file, sk->sk_sleep, wait);
1966         mask = 0;
1967
1968         /* exceptional events? */
1969         if (sk->sk_err)
1970                 mask |= POLLERR;
1971         if (sk->sk_shutdown == SHUTDOWN_MASK)
1972                 mask |= POLLHUP;
1973         if (sk->sk_shutdown & RCV_SHUTDOWN)
1974                 mask |= POLLRDHUP;
1975
1976         /* readable? */
1977         if (!skb_queue_empty(&sk->sk_receive_queue) ||
1978             (sk->sk_shutdown & RCV_SHUTDOWN))
1979                 mask |= POLLIN | POLLRDNORM;
1980
1981         /* Connection-based need to check for termination and startup */
1982         if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) && sk->sk_state == TCP_CLOSE)
1983                 mask |= POLLHUP;
1984
1985         /*
1986          * we set writable also when the other side has shut down the
1987          * connection. This prevents stuck sockets.
1988          */
1989         if (unix_writable(sk))
1990                 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
1991
1992         return mask;
1993 }
1994
1995
1996 #ifdef CONFIG_PROC_FS
1997 static struct sock *unix_seq_idx(int *iter, loff_t pos)
1998 {
1999         loff_t off = 0;
2000         struct sock *s;
2001
2002         for (s = first_unix_socket(iter); s; s = next_unix_socket(iter, s)) {
2003                 if (off == pos)
2004                         return s;
2005                 ++off;
2006         }
2007         return NULL;
2008 }
2009
2010
2011 static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
2012 {
2013         spin_lock(&unix_table_lock);
2014         return *pos ? unix_seq_idx(seq->private, *pos - 1) : ((void *) 1);
2015 }
2016
2017 static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2018 {
2019         ++*pos;
2020
2021         if (v == (void *)1)
2022                 return first_unix_socket(seq->private);
2023         return next_unix_socket(seq->private, v);
2024 }
2025
2026 static void unix_seq_stop(struct seq_file *seq, void *v)
2027 {
2028         spin_unlock(&unix_table_lock);
2029 }
2030
2031 static int unix_seq_show(struct seq_file *seq, void *v)
2032 {
2033
2034         if (v == (void *)1)
2035                 seq_puts(seq, "Num       RefCount Protocol Flags    Type St "
2036                          "Inode Path\n");
2037         else {
2038                 struct sock *s = v;
2039                 struct unix_sock *u = unix_sk(s);
2040                 unix_state_lock(s);
2041
2042                 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
2043                         s,
2044                         atomic_read(&s->sk_refcnt),
2045                         0,
2046                         s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
2047                         s->sk_type,
2048                         s->sk_socket ?
2049                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
2050                         (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
2051                         sock_i_ino(s));
2052
2053                 if (u->addr) {
2054                         int i, len;
2055                         seq_putc(seq, ' ');
2056
2057                         i = 0;
2058                         len = u->addr->len - sizeof(short);
2059                         if (!UNIX_ABSTRACT(s))
2060                                 len--;
2061                         else {
2062                                 seq_putc(seq, '@');
2063                                 i++;
2064                         }
2065                         for ( ; i < len; i++)
2066                                 seq_putc(seq, u->addr->name->sun_path[i]);
2067                 }
2068                 unix_state_unlock(s);
2069                 seq_putc(seq, '\n');
2070         }
2071
2072         return 0;
2073 }
2074
2075 static const struct seq_operations unix_seq_ops = {
2076         .start  = unix_seq_start,
2077         .next   = unix_seq_next,
2078         .stop   = unix_seq_stop,
2079         .show   = unix_seq_show,
2080 };
2081
2082
2083 static int unix_seq_open(struct inode *inode, struct file *file)
2084 {
2085         struct seq_file *seq;
2086         int rc = -ENOMEM;
2087         int *iter = kmalloc(sizeof(int), GFP_KERNEL);
2088
2089         if (!iter)
2090                 goto out;
2091
2092         rc = seq_open(file, &unix_seq_ops);
2093         if (rc)
2094                 goto out_kfree;
2095
2096         seq          = file->private_data;
2097         seq->private = iter;
2098         *iter = 0;
2099 out:
2100         return rc;
2101 out_kfree:
2102         kfree(iter);
2103         goto out;
2104 }
2105
2106 static const struct file_operations unix_seq_fops = {
2107         .owner          = THIS_MODULE,
2108         .open           = unix_seq_open,
2109         .read           = seq_read,
2110         .llseek         = seq_lseek,
2111         .release        = seq_release_private,
2112 };
2113
2114 #endif
2115
2116 static struct net_proto_family unix_family_ops = {
2117         .family = PF_UNIX,
2118         .create = unix_create,
2119         .owner  = THIS_MODULE,
2120 };
2121
2122 static int __init af_unix_init(void)
2123 {
2124         int rc = -1;
2125         struct sk_buff *dummy_skb;
2126
2127         BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof(dummy_skb->cb));
2128
2129         rc = proto_register(&unix_proto, 1);
2130         if (rc != 0) {
2131                 printk(KERN_CRIT "%s: Cannot create unix_sock SLAB cache!\n",
2132                        __FUNCTION__);
2133                 goto out;
2134         }
2135
2136         sock_register(&unix_family_ops);
2137 #ifdef CONFIG_PROC_FS
2138         proc_net_fops_create("unix", 0, &unix_seq_fops);
2139 #endif
2140         unix_sysctl_register();
2141 out:
2142         return rc;
2143 }
2144
2145 static void __exit af_unix_exit(void)
2146 {
2147         sock_unregister(PF_UNIX);
2148         unix_sysctl_unregister();
2149         proc_net_remove("unix");
2150         proto_unregister(&unix_proto);
2151 }
2152
2153 module_init(af_unix_init);
2154 module_exit(af_unix_exit);
2155
2156 MODULE_LICENSE("GPL");
2157 MODULE_ALIAS_NETPROTO(PF_UNIX);