Merge branches 'acpi-resources', 'acpi-battery', 'acpi-doc' and 'acpi-pnp'
[linux-2.6-block.git] / net / unix / garbage.c
CommitLineData
1da177e4
LT
1/*
2 * NET3: Garbage Collector For AF_UNIX sockets
3 *
4 * Garbage Collector:
5 * Copyright (C) Barak A. Pearlmutter.
6 * Released under the GPL version 2 or later.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 * Fixes:
33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 * Cope with changing max_files.
35 * Al Viro 11 Oct 1998
36 * Graph may have cycles. That is, we can send the descriptor
37 * of foo to bar and vice versa. Current code chokes on that.
38 * Fix: move SCM_RIGHTS ones into the separate list and then
39 * skb_free() them all instead of doing explicit fput's.
40 * Another problem: since fput() may block somebody may
41 * create a new unix_socket when we are in the middle of sweep
42 * phase. Fix: revert the logic wrt MARKED. Mark everything
43 * upon the beginning and unmark non-junk ones.
44 *
45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 * sent to connect()'ed but still not accept()'ed sockets.
47 * Fixed. Old code had slightly different problem here:
48 * extra fput() in situation when we passed the descriptor via
49 * such socket and closed it (descriptor). That would happen on
50 * each unix_gc() until the accept(). Since the struct file in
51 * question would go to the free list and might be reused...
52 * That might be the reason of random oopses on filp_close()
53 * in unrelated processes.
54 *
55 * AV 28 Feb 1999
56 * Kill the explicit allocation of stack. Now we keep the tree
57 * with root in dummy + pointer (gc_current) to one of the nodes.
58 * Stack is represented as path from gc_current to dummy. Unmark
59 * now means "add to tree". Push == "make it a son of gc_current".
60 * Pop == "move gc_current to parent". We keep only pointers to
61 * parents (->gc_tree).
62 * AV 1 Mar 1999
63 * Damn. Added missing check for ->dead in listen queues scanning.
64 *
1fd05ba5
MS
65 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
1da177e4 69 */
ac7bfa62 70
1da177e4 71#include <linux/kernel.h>
1da177e4
LT
72#include <linux/string.h>
73#include <linux/socket.h>
74#include <linux/un.h>
75#include <linux/net.h>
76#include <linux/fs.h>
1da177e4
LT
77#include <linux/skbuff.h>
78#include <linux/netdevice.h>
79#include <linux/file.h>
80#include <linux/proc_fs.h>
4a3e2f71 81#include <linux/mutex.h>
5f23b734 82#include <linux/wait.h>
1da177e4
LT
83
84#include <net/sock.h>
85#include <net/af_unix.h>
86#include <net/scm.h>
c752f073 87#include <net/tcp_states.h>
1da177e4
LT
88
89/* Internal data structures and random procedures: */
90
1fd05ba5
MS
91static LIST_HEAD(gc_inflight_list);
92static LIST_HEAD(gc_candidates);
93static DEFINE_SPINLOCK(unix_gc_lock);
5f23b734 94static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
1da177e4 95
9305cfa4 96unsigned int unix_tot_inflight;
1da177e4 97
25888e30 98struct sock *unix_get_socket(struct file *filp)
1da177e4
LT
99{
100 struct sock *u_sock = NULL;
496ad9aa 101 struct inode *inode = file_inode(filp);
1da177e4 102
d1ab39f1 103 /* Socket ? */
326be7b4 104 if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
e27dfcea
JK
105 struct socket *sock = SOCKET_I(inode);
106 struct sock *s = sock->sk;
1da177e4 107
d1ab39f1 108 /* PF_UNIX ? */
1da177e4
LT
109 if (s && sock->ops && sock->ops->family == PF_UNIX)
110 u_sock = s;
111 }
112 return u_sock;
113}
114
d1ab39f1
JE
115/* Keep the number of times in flight count for the file
116 * descriptor if it is for an AF_UNIX socket.
1da177e4 117 */
ac7bfa62 118
1da177e4
LT
119void unix_inflight(struct file *fp)
120{
121 struct sock *s = unix_get_socket(fp);
d1ab39f1 122
e27dfcea 123 if (s) {
1fd05ba5 124 struct unix_sock *u = unix_sk(s);
d1ab39f1 125
1fd05ba5 126 spin_lock(&unix_gc_lock);
d1ab39f1 127
516e0cc5 128 if (atomic_long_inc_return(&u->inflight) == 1) {
1fd05ba5
MS
129 BUG_ON(!list_empty(&u->link));
130 list_add_tail(&u->link, &gc_inflight_list);
131 } else {
132 BUG_ON(list_empty(&u->link));
133 }
9305cfa4 134 unix_tot_inflight++;
1fd05ba5 135 spin_unlock(&unix_gc_lock);
1da177e4
LT
136 }
137}
138
139void unix_notinflight(struct file *fp)
140{
141 struct sock *s = unix_get_socket(fp);
d1ab39f1 142
e27dfcea 143 if (s) {
1fd05ba5 144 struct unix_sock *u = unix_sk(s);
d1ab39f1 145
1fd05ba5
MS
146 spin_lock(&unix_gc_lock);
147 BUG_ON(list_empty(&u->link));
d1ab39f1 148
516e0cc5 149 if (atomic_long_dec_and_test(&u->inflight))
1fd05ba5 150 list_del_init(&u->link);
9305cfa4 151 unix_tot_inflight--;
1fd05ba5 152 spin_unlock(&unix_gc_lock);
1da177e4
LT
153 }
154}
155
5c80f1ae 156static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
1fd05ba5 157 struct sk_buff_head *hitlist)
1da177e4 158{
1fd05ba5
MS
159 struct sk_buff *skb;
160 struct sk_buff *next;
161
162 spin_lock(&x->sk_receive_queue.lock);
a2f3be17 163 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
d1ab39f1 164 /* Do we have file descriptors ? */
1fd05ba5
MS
165 if (UNIXCB(skb).fp) {
166 bool hit = false;
d1ab39f1 167 /* Process the descriptors of this socket */
1fd05ba5
MS
168 int nfd = UNIXCB(skb).fp->count;
169 struct file **fp = UNIXCB(skb).fp->fp;
d1ab39f1 170
1fd05ba5 171 while (nfd--) {
d1ab39f1 172 /* Get the socket the fd matches if it indeed does so */
1fd05ba5 173 struct sock *sk = unix_get_socket(*fp++);
d1ab39f1 174
5c80f1ae 175 if (sk) {
6209344f
MS
176 struct unix_sock *u = unix_sk(sk);
177
d1ab39f1 178 /* Ignore non-candidates, they could
6209344f
MS
179 * have been added to the queues after
180 * starting the garbage collection
181 */
60bc851a 182 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
6209344f 183 hit = true;
d1ab39f1 184
6209344f
MS
185 func(u);
186 }
1fd05ba5
MS
187 }
188 }
189 if (hit && hitlist != NULL) {
190 __skb_unlink(skb, &x->sk_receive_queue);
191 __skb_queue_tail(hitlist, skb);
192 }
193 }
194 }
195 spin_unlock(&x->sk_receive_queue.lock);
1da177e4
LT
196}
197
5c80f1ae 198static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
1fd05ba5 199 struct sk_buff_head *hitlist)
1da177e4 200{
d1ab39f1 201 if (x->sk_state != TCP_LISTEN) {
1fd05ba5 202 scan_inflight(x, func, hitlist);
d1ab39f1 203 } else {
1fd05ba5
MS
204 struct sk_buff *skb;
205 struct sk_buff *next;
206 struct unix_sock *u;
207 LIST_HEAD(embryos);
208
d1ab39f1 209 /* For a listening socket collect the queued embryos
1fd05ba5
MS
210 * and perform a scan on them as well.
211 */
212 spin_lock(&x->sk_receive_queue.lock);
a2f3be17 213 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
1fd05ba5
MS
214 u = unix_sk(skb->sk);
215
d1ab39f1 216 /* An embryo cannot be in-flight, so it's safe
1fd05ba5
MS
217 * to use the list link.
218 */
219 BUG_ON(!list_empty(&u->link));
220 list_add_tail(&u->link, &embryos);
221 }
222 spin_unlock(&x->sk_receive_queue.lock);
223
224 while (!list_empty(&embryos)) {
225 u = list_entry(embryos.next, struct unix_sock, link);
226 scan_inflight(&u->sk, func, hitlist);
227 list_del_init(&u->link);
228 }
229 }
1da177e4
LT
230}
231
5c80f1ae 232static void dec_inflight(struct unix_sock *usk)
1da177e4 233{
516e0cc5 234 atomic_long_dec(&usk->inflight);
1fd05ba5 235}
1da177e4 236
5c80f1ae 237static void inc_inflight(struct unix_sock *usk)
1fd05ba5 238{
516e0cc5 239 atomic_long_inc(&usk->inflight);
1da177e4
LT
240}
241
5c80f1ae 242static void inc_inflight_move_tail(struct unix_sock *u)
1fd05ba5 243{
516e0cc5 244 atomic_long_inc(&u->inflight);
d1ab39f1 245 /* If this still might be part of a cycle, move it to the end
6209344f
MS
246 * of the list, so that it's checked even if it was already
247 * passed over
1fd05ba5 248 */
60bc851a 249 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
1fd05ba5
MS
250 list_move_tail(&u->link, &gc_candidates);
251}
1da177e4 252
505e907d 253static bool gc_in_progress;
9915672d 254#define UNIX_INFLIGHT_TRIGGER_GC 16000
1da177e4 255
5f23b734 256void wait_for_unix_gc(void)
1da177e4 257{
d1ab39f1 258 /* If number of inflight sockets is insane,
9915672d
ED
259 * force a garbage collect right now.
260 */
261 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
262 unix_gc();
5f23b734 263 wait_event(unix_gc_wait, gc_in_progress == false);
264}
1da177e4 265
5f23b734 266/* The external entry point: unix_gc() */
267void unix_gc(void)
268{
1fd05ba5
MS
269 struct unix_sock *u;
270 struct unix_sock *next;
271 struct sk_buff_head hitlist;
272 struct list_head cursor;
6209344f 273 LIST_HEAD(not_cycle_list);
1da177e4 274
1fd05ba5 275 spin_lock(&unix_gc_lock);
1da177e4 276
1fd05ba5
MS
277 /* Avoid a recursive GC. */
278 if (gc_in_progress)
279 goto out;
1da177e4 280
1fd05ba5 281 gc_in_progress = true;
d1ab39f1 282 /* First, select candidates for garbage collection. Only
1fd05ba5
MS
283 * in-flight sockets are considered, and from those only ones
284 * which don't have any external reference.
285 *
286 * Holding unix_gc_lock will protect these candidates from
287 * being detached, and hence from gaining an external
6209344f
MS
288 * reference. Since there are no possible receivers, all
289 * buffers currently on the candidates' queues stay there
290 * during the garbage collection.
291 *
292 * We also know that no new candidate can be added onto the
293 * receive queues. Other, non candidate sockets _can_ be
294 * added to queue, so we must make sure only to touch
295 * candidates.
1da177e4 296 */
1fd05ba5 297 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
516e0cc5
AV
298 long total_refs;
299 long inflight_refs;
1fd05ba5
MS
300
301 total_refs = file_count(u->sk.sk_socket->file);
516e0cc5 302 inflight_refs = atomic_long_read(&u->inflight);
1fd05ba5
MS
303
304 BUG_ON(inflight_refs < 1);
305 BUG_ON(total_refs < inflight_refs);
306 if (total_refs == inflight_refs) {
307 list_move_tail(&u->link, &gc_candidates);
60bc851a
ED
308 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
309 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
1fd05ba5
MS
310 }
311 }
1da177e4 312
d1ab39f1 313 /* Now remove all internal in-flight reference to children of
1fd05ba5 314 * the candidates.
1da177e4 315 */
1fd05ba5
MS
316 list_for_each_entry(u, &gc_candidates, link)
317 scan_children(&u->sk, dec_inflight, NULL);
1da177e4 318
d1ab39f1 319 /* Restore the references for children of all candidates,
1fd05ba5
MS
320 * which have remaining references. Do this recursively, so
321 * only those remain, which form cyclic references.
322 *
323 * Use a "cursor" link, to make the list traversal safe, even
324 * though elements might be moved about.
1da177e4 325 */
1fd05ba5
MS
326 list_add(&cursor, &gc_candidates);
327 while (cursor.next != &gc_candidates) {
328 u = list_entry(cursor.next, struct unix_sock, link);
1da177e4 329
1fd05ba5
MS
330 /* Move cursor to after the current position. */
331 list_move(&cursor, &u->link);
ac7bfa62 332
516e0cc5 333 if (atomic_long_read(&u->inflight) > 0) {
6209344f 334 list_move_tail(&u->link, &not_cycle_list);
60bc851a 335 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
1fd05ba5 336 scan_children(&u->sk, inc_inflight_move_tail, NULL);
1da177e4 337 }
1da177e4 338 }
1fd05ba5 339 list_del(&cursor);
1da177e4 340
d1ab39f1 341 /* not_cycle_list contains those sockets which do not make up a
6209344f
MS
342 * cycle. Restore these to the inflight list.
343 */
344 while (!list_empty(&not_cycle_list)) {
345 u = list_entry(not_cycle_list.next, struct unix_sock, link);
60bc851a 346 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
6209344f
MS
347 list_move_tail(&u->link, &gc_inflight_list);
348 }
349
d1ab39f1 350 /* Now gc_candidates contains only garbage. Restore original
1fd05ba5
MS
351 * inflight counters for these as well, and remove the skbuffs
352 * which are creating the cycle(s).
353 */
1da177e4 354 skb_queue_head_init(&hitlist);
1fd05ba5 355 list_for_each_entry(u, &gc_candidates, link)
e27dfcea 356 scan_children(&u->sk, inc_inflight, &hitlist);
1da177e4 357
1fd05ba5 358 spin_unlock(&unix_gc_lock);
1da177e4 359
1fd05ba5
MS
360 /* Here we are. Hitlist is filled. Die. */
361 __skb_queue_purge(&hitlist);
1da177e4 362
1fd05ba5 363 spin_lock(&unix_gc_lock);
1da177e4 364
1fd05ba5
MS
365 /* All candidates should have been detached by now. */
366 BUG_ON(!list_empty(&gc_candidates));
367 gc_in_progress = false;
5f23b734 368 wake_up(&unix_gc_wait);
1da177e4 369
1fd05ba5
MS
370 out:
371 spin_unlock(&unix_gc_lock);
1da177e4 372}