clarify usage expectations for cnt32_to_63()
[linux-2.6-block.git] / net / unix / garbage.c
CommitLineData
1da177e4
LT
1/*
2 * NET3: Garbage Collector For AF_UNIX sockets
3 *
4 * Garbage Collector:
5 * Copyright (C) Barak A. Pearlmutter.
6 * Released under the GPL version 2 or later.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 * Fixes:
33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 * Cope with changing max_files.
35 * Al Viro 11 Oct 1998
36 * Graph may have cycles. That is, we can send the descriptor
37 * of foo to bar and vice versa. Current code chokes on that.
38 * Fix: move SCM_RIGHTS ones into the separate list and then
39 * skb_free() them all instead of doing explicit fput's.
40 * Another problem: since fput() may block somebody may
41 * create a new unix_socket when we are in the middle of sweep
42 * phase. Fix: revert the logic wrt MARKED. Mark everything
43 * upon the beginning and unmark non-junk ones.
44 *
45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 * sent to connect()'ed but still not accept()'ed sockets.
47 * Fixed. Old code had slightly different problem here:
48 * extra fput() in situation when we passed the descriptor via
49 * such socket and closed it (descriptor). That would happen on
50 * each unix_gc() until the accept(). Since the struct file in
51 * question would go to the free list and might be reused...
52 * That might be the reason of random oopses on filp_close()
53 * in unrelated processes.
54 *
55 * AV 28 Feb 1999
56 * Kill the explicit allocation of stack. Now we keep the tree
57 * with root in dummy + pointer (gc_current) to one of the nodes.
58 * Stack is represented as path from gc_current to dummy. Unmark
59 * now means "add to tree". Push == "make it a son of gc_current".
60 * Pop == "move gc_current to parent". We keep only pointers to
61 * parents (->gc_tree).
62 * AV 1 Mar 1999
63 * Damn. Added missing check for ->dead in listen queues scanning.
64 *
1fd05ba5
MS
65 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
1da177e4 69 */
ac7bfa62 70
1da177e4 71#include <linux/kernel.h>
1da177e4
LT
72#include <linux/string.h>
73#include <linux/socket.h>
74#include <linux/un.h>
75#include <linux/net.h>
76#include <linux/fs.h>
77#include <linux/slab.h>
78#include <linux/skbuff.h>
79#include <linux/netdevice.h>
80#include <linux/file.h>
81#include <linux/proc_fs.h>
4a3e2f71 82#include <linux/mutex.h>
1da177e4
LT
83
84#include <net/sock.h>
85#include <net/af_unix.h>
86#include <net/scm.h>
c752f073 87#include <net/tcp_states.h>
1da177e4
LT
88
89/* Internal data structures and random procedures: */
90
1fd05ba5
MS
91static LIST_HEAD(gc_inflight_list);
92static LIST_HEAD(gc_candidates);
93static DEFINE_SPINLOCK(unix_gc_lock);
1da177e4 94
9305cfa4 95unsigned int unix_tot_inflight;
1da177e4
LT
96
97
98static struct sock *unix_get_socket(struct file *filp)
99{
100 struct sock *u_sock = NULL;
592ccbf9 101 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
102
103 /*
104 * Socket ?
105 */
106 if (S_ISSOCK(inode->i_mode)) {
107 struct socket * sock = SOCKET_I(inode);
108 struct sock * s = sock->sk;
109
110 /*
111 * PF_UNIX ?
112 */
113 if (s && sock->ops && sock->ops->family == PF_UNIX)
114 u_sock = s;
115 }
116 return u_sock;
117}
118
119/*
120 * Keep the number of times in flight count for the file
121 * descriptor if it is for an AF_UNIX socket.
122 */
ac7bfa62 123
1da177e4
LT
124void unix_inflight(struct file *fp)
125{
126 struct sock *s = unix_get_socket(fp);
127 if(s) {
1fd05ba5
MS
128 struct unix_sock *u = unix_sk(s);
129 spin_lock(&unix_gc_lock);
516e0cc5 130 if (atomic_long_inc_return(&u->inflight) == 1) {
1fd05ba5
MS
131 BUG_ON(!list_empty(&u->link));
132 list_add_tail(&u->link, &gc_inflight_list);
133 } else {
134 BUG_ON(list_empty(&u->link));
135 }
9305cfa4 136 unix_tot_inflight++;
1fd05ba5 137 spin_unlock(&unix_gc_lock);
1da177e4
LT
138 }
139}
140
141void unix_notinflight(struct file *fp)
142{
143 struct sock *s = unix_get_socket(fp);
144 if(s) {
1fd05ba5
MS
145 struct unix_sock *u = unix_sk(s);
146 spin_lock(&unix_gc_lock);
147 BUG_ON(list_empty(&u->link));
516e0cc5 148 if (atomic_long_dec_and_test(&u->inflight))
1fd05ba5 149 list_del_init(&u->link);
9305cfa4 150 unix_tot_inflight--;
1fd05ba5 151 spin_unlock(&unix_gc_lock);
1da177e4
LT
152 }
153}
154
1fd05ba5
MS
155static inline struct sk_buff *sock_queue_head(struct sock *sk)
156{
157 return (struct sk_buff *) &sk->sk_receive_queue;
158}
1da177e4 159
1fd05ba5
MS
160#define receive_queue_for_each_skb(sk, next, skb) \
161 for (skb = sock_queue_head(sk)->next, next = skb->next; \
162 skb != sock_queue_head(sk); skb = next, next = skb->next)
1da177e4 163
5c80f1ae 164static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
1fd05ba5 165 struct sk_buff_head *hitlist)
1da177e4 166{
1fd05ba5
MS
167 struct sk_buff *skb;
168 struct sk_buff *next;
169
170 spin_lock(&x->sk_receive_queue.lock);
171 receive_queue_for_each_skb(x, next, skb) {
172 /*
173 * Do we have file descriptors ?
174 */
175 if (UNIXCB(skb).fp) {
176 bool hit = false;
177 /*
178 * Process the descriptors of this socket
179 */
180 int nfd = UNIXCB(skb).fp->count;
181 struct file **fp = UNIXCB(skb).fp->fp;
182 while (nfd--) {
183 /*
184 * Get the socket the fd matches
185 * if it indeed does so
186 */
187 struct sock *sk = unix_get_socket(*fp++);
5c80f1ae 188 if (sk) {
1fd05ba5 189 hit = true;
5c80f1ae 190 func(unix_sk(sk));
1fd05ba5
MS
191 }
192 }
193 if (hit && hitlist != NULL) {
194 __skb_unlink(skb, &x->sk_receive_queue);
195 __skb_queue_tail(hitlist, skb);
196 }
197 }
198 }
199 spin_unlock(&x->sk_receive_queue.lock);
1da177e4
LT
200}
201
5c80f1ae 202static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
1fd05ba5 203 struct sk_buff_head *hitlist)
1da177e4 204{
1fd05ba5
MS
205 if (x->sk_state != TCP_LISTEN)
206 scan_inflight(x, func, hitlist);
207 else {
208 struct sk_buff *skb;
209 struct sk_buff *next;
210 struct unix_sock *u;
211 LIST_HEAD(embryos);
212
213 /*
214 * For a listening socket collect the queued embryos
215 * and perform a scan on them as well.
216 */
217 spin_lock(&x->sk_receive_queue.lock);
218 receive_queue_for_each_skb(x, next, skb) {
219 u = unix_sk(skb->sk);
220
221 /*
222 * An embryo cannot be in-flight, so it's safe
223 * to use the list link.
224 */
225 BUG_ON(!list_empty(&u->link));
226 list_add_tail(&u->link, &embryos);
227 }
228 spin_unlock(&x->sk_receive_queue.lock);
229
230 while (!list_empty(&embryos)) {
231 u = list_entry(embryos.next, struct unix_sock, link);
232 scan_inflight(&u->sk, func, hitlist);
233 list_del_init(&u->link);
234 }
235 }
1da177e4
LT
236}
237
5c80f1ae 238static void dec_inflight(struct unix_sock *usk)
1da177e4 239{
516e0cc5 240 atomic_long_dec(&usk->inflight);
1fd05ba5 241}
1da177e4 242
5c80f1ae 243static void inc_inflight(struct unix_sock *usk)
1fd05ba5 244{
516e0cc5 245 atomic_long_inc(&usk->inflight);
1da177e4
LT
246}
247
5c80f1ae 248static void inc_inflight_move_tail(struct unix_sock *u)
1fd05ba5 249{
516e0cc5 250 atomic_long_inc(&u->inflight);
1fd05ba5
MS
251 /*
252 * If this is still a candidate, move it to the end of the
253 * list, so that it's checked even if it was already passed
254 * over
255 */
256 if (u->gc_candidate)
257 list_move_tail(&u->link, &gc_candidates);
258}
1da177e4
LT
259
260/* The external entry point: unix_gc() */
261
262void unix_gc(void)
263{
1fd05ba5 264 static bool gc_in_progress = false;
1da177e4 265
1fd05ba5
MS
266 struct unix_sock *u;
267 struct unix_sock *next;
268 struct sk_buff_head hitlist;
269 struct list_head cursor;
1da177e4 270
1fd05ba5 271 spin_lock(&unix_gc_lock);
1da177e4 272
1fd05ba5
MS
273 /* Avoid a recursive GC. */
274 if (gc_in_progress)
275 goto out;
1da177e4 276
1fd05ba5 277 gc_in_progress = true;
1da177e4 278 /*
1fd05ba5
MS
279 * First, select candidates for garbage collection. Only
280 * in-flight sockets are considered, and from those only ones
281 * which don't have any external reference.
282 *
283 * Holding unix_gc_lock will protect these candidates from
284 * being detached, and hence from gaining an external
285 * reference. This also means, that since there are no
286 * possible receivers, the receive queues of these sockets are
287 * static during the GC, even though the dequeue is done
288 * before the detach without atomicity guarantees.
1da177e4 289 */
1fd05ba5 290 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
516e0cc5
AV
291 long total_refs;
292 long inflight_refs;
1fd05ba5
MS
293
294 total_refs = file_count(u->sk.sk_socket->file);
516e0cc5 295 inflight_refs = atomic_long_read(&u->inflight);
1fd05ba5
MS
296
297 BUG_ON(inflight_refs < 1);
298 BUG_ON(total_refs < inflight_refs);
299 if (total_refs == inflight_refs) {
300 list_move_tail(&u->link, &gc_candidates);
301 u->gc_candidate = 1;
302 }
303 }
1da177e4
LT
304
305 /*
1fd05ba5
MS
306 * Now remove all internal in-flight reference to children of
307 * the candidates.
1da177e4 308 */
1fd05ba5
MS
309 list_for_each_entry(u, &gc_candidates, link)
310 scan_children(&u->sk, dec_inflight, NULL);
1da177e4
LT
311
312 /*
1fd05ba5
MS
313 * Restore the references for children of all candidates,
314 * which have remaining references. Do this recursively, so
315 * only those remain, which form cyclic references.
316 *
317 * Use a "cursor" link, to make the list traversal safe, even
318 * though elements might be moved about.
1da177e4 319 */
1fd05ba5
MS
320 list_add(&cursor, &gc_candidates);
321 while (cursor.next != &gc_candidates) {
322 u = list_entry(cursor.next, struct unix_sock, link);
1da177e4 323
1fd05ba5
MS
324 /* Move cursor to after the current position. */
325 list_move(&cursor, &u->link);
ac7bfa62 326
516e0cc5 327 if (atomic_long_read(&u->inflight) > 0) {
1fd05ba5
MS
328 list_move_tail(&u->link, &gc_inflight_list);
329 u->gc_candidate = 0;
330 scan_children(&u->sk, inc_inflight_move_tail, NULL);
1da177e4 331 }
1da177e4 332 }
1fd05ba5 333 list_del(&cursor);
1da177e4 334
1fd05ba5
MS
335 /*
336 * Now gc_candidates contains only garbage. Restore original
337 * inflight counters for these as well, and remove the skbuffs
338 * which are creating the cycle(s).
339 */
1da177e4 340 skb_queue_head_init(&hitlist);
1fd05ba5
MS
341 list_for_each_entry(u, &gc_candidates, link)
342 scan_children(&u->sk, inc_inflight, &hitlist);
1da177e4 343
1fd05ba5 344 spin_unlock(&unix_gc_lock);
1da177e4 345
1fd05ba5
MS
346 /* Here we are. Hitlist is filled. Die. */
347 __skb_queue_purge(&hitlist);
1da177e4 348
1fd05ba5 349 spin_lock(&unix_gc_lock);
1da177e4 350
1fd05ba5
MS
351 /* All candidates should have been detached by now. */
352 BUG_ON(!list_empty(&gc_candidates));
353 gc_in_progress = false;
1da177e4 354
1fd05ba5
MS
355 out:
356 spin_unlock(&unix_gc_lock);
1da177e4 357}