Commit | Line | Data |
---|---|---|
a85036f6 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * NET3: Garbage Collector For AF_UNIX sockets | |
4 | * | |
5 | * Garbage Collector: | |
6 | * Copyright (C) Barak A. Pearlmutter. | |
1da177e4 LT |
7 | * |
8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. | |
9 | * If it doesn't work blame me, it worked when Barak sent it. | |
10 | * | |
11 | * Assumptions: | |
12 | * | |
13 | * - object w/ a bit | |
14 | * - free list | |
15 | * | |
16 | * Current optimizations: | |
17 | * | |
18 | * - explicit stack instead of recursion | |
19 | * - tail recurse on first born instead of immediate push/pop | |
20 | * - we gather the stuff that should not be killed into tree | |
21 | * and stack is just a path from root to the current pointer. | |
22 | * | |
23 | * Future optimizations: | |
24 | * | |
25 | * - don't just push entire root set; process in place | |
26 | * | |
1da177e4 LT |
27 | * Fixes: |
28 | * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. | |
29 | * Cope with changing max_files. | |
30 | * Al Viro 11 Oct 1998 | |
31 | * Graph may have cycles. That is, we can send the descriptor | |
32 | * of foo to bar and vice versa. Current code chokes on that. | |
33 | * Fix: move SCM_RIGHTS ones into the separate list and then | |
34 | * skb_free() them all instead of doing explicit fput's. | |
35 | * Another problem: since fput() may block somebody may | |
36 | * create a new unix_socket when we are in the middle of sweep | |
37 | * phase. Fix: revert the logic wrt MARKED. Mark everything | |
38 | * upon the beginning and unmark non-junk ones. | |
39 | * | |
40 | * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS | |
41 | * sent to connect()'ed but still not accept()'ed sockets. | |
42 | * Fixed. Old code had slightly different problem here: | |
43 | * extra fput() in situation when we passed the descriptor via | |
44 | * such socket and closed it (descriptor). That would happen on | |
45 | * each unix_gc() until the accept(). Since the struct file in | |
46 | * question would go to the free list and might be reused... | |
47 | * That might be the reason of random oopses on filp_close() | |
48 | * in unrelated processes. | |
49 | * | |
50 | * AV 28 Feb 1999 | |
51 | * Kill the explicit allocation of stack. Now we keep the tree | |
52 | * with root in dummy + pointer (gc_current) to one of the nodes. | |
53 | * Stack is represented as path from gc_current to dummy. Unmark | |
54 | * now means "add to tree". Push == "make it a son of gc_current". | |
55 | * Pop == "move gc_current to parent". We keep only pointers to | |
56 | * parents (->gc_tree). | |
57 | * AV 1 Mar 1999 | |
58 | * Damn. Added missing check for ->dead in listen queues scanning. | |
59 | * | |
1fd05ba5 MS |
60 | * Miklos Szeredi 25 Jun 2007 |
61 | * Reimplement with a cycle collecting algorithm. This should | |
62 | * solve several problems with the previous code, like being racy | |
63 | * wrt receive and holding up unrelated socket operations. | |
1da177e4 | 64 | */ |
ac7bfa62 | 65 | |
1da177e4 | 66 | #include <linux/kernel.h> |
1da177e4 LT |
67 | #include <linux/string.h> |
68 | #include <linux/socket.h> | |
69 | #include <linux/un.h> | |
70 | #include <linux/net.h> | |
71 | #include <linux/fs.h> | |
1da177e4 LT |
72 | #include <linux/skbuff.h> |
73 | #include <linux/netdevice.h> | |
74 | #include <linux/file.h> | |
75 | #include <linux/proc_fs.h> | |
4a3e2f71 | 76 | #include <linux/mutex.h> |
5f23b734 | 77 | #include <linux/wait.h> |
1da177e4 LT |
78 | |
79 | #include <net/sock.h> | |
80 | #include <net/af_unix.h> | |
81 | #include <net/scm.h> | |
c752f073 | 82 | #include <net/tcp_states.h> |
1da177e4 | 83 | |
f4e65870 JA |
84 | #include "scm.h" |
85 | ||
1da177e4 LT |
86 | /* Internal data structures and random procedures: */ |
87 | ||
1fd05ba5 | 88 | static LIST_HEAD(gc_candidates); |
5f23b734 | 89 | static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); |
1da177e4 | 90 | |
5c80f1ae | 91 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 92 | struct sk_buff_head *hitlist) |
1da177e4 | 93 | { |
1fd05ba5 MS |
94 | struct sk_buff *skb; |
95 | struct sk_buff *next; | |
96 | ||
97 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 98 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
d1ab39f1 | 99 | /* Do we have file descriptors ? */ |
1fd05ba5 MS |
100 | if (UNIXCB(skb).fp) { |
101 | bool hit = false; | |
d1ab39f1 | 102 | /* Process the descriptors of this socket */ |
1fd05ba5 MS |
103 | int nfd = UNIXCB(skb).fp->count; |
104 | struct file **fp = UNIXCB(skb).fp->fp; | |
d1ab39f1 | 105 | |
1fd05ba5 | 106 | while (nfd--) { |
d1ab39f1 | 107 | /* Get the socket the fd matches if it indeed does so */ |
1fd05ba5 | 108 | struct sock *sk = unix_get_socket(*fp++); |
d1ab39f1 | 109 | |
5c80f1ae | 110 | if (sk) { |
6209344f MS |
111 | struct unix_sock *u = unix_sk(sk); |
112 | ||
d1ab39f1 | 113 | /* Ignore non-candidates, they could |
6209344f MS |
114 | * have been added to the queues after |
115 | * starting the garbage collection | |
116 | */ | |
60bc851a | 117 | if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { |
6209344f | 118 | hit = true; |
d1ab39f1 | 119 | |
6209344f MS |
120 | func(u); |
121 | } | |
1fd05ba5 MS |
122 | } |
123 | } | |
124 | if (hit && hitlist != NULL) { | |
125 | __skb_unlink(skb, &x->sk_receive_queue); | |
126 | __skb_queue_tail(hitlist, skb); | |
127 | } | |
128 | } | |
129 | } | |
130 | spin_unlock(&x->sk_receive_queue.lock); | |
1da177e4 LT |
131 | } |
132 | ||
5c80f1ae | 133 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 134 | struct sk_buff_head *hitlist) |
1da177e4 | 135 | { |
d1ab39f1 | 136 | if (x->sk_state != TCP_LISTEN) { |
1fd05ba5 | 137 | scan_inflight(x, func, hitlist); |
d1ab39f1 | 138 | } else { |
1fd05ba5 MS |
139 | struct sk_buff *skb; |
140 | struct sk_buff *next; | |
141 | struct unix_sock *u; | |
142 | LIST_HEAD(embryos); | |
143 | ||
d1ab39f1 | 144 | /* For a listening socket collect the queued embryos |
1fd05ba5 MS |
145 | * and perform a scan on them as well. |
146 | */ | |
147 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 148 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
1fd05ba5 MS |
149 | u = unix_sk(skb->sk); |
150 | ||
d1ab39f1 | 151 | /* An embryo cannot be in-flight, so it's safe |
1fd05ba5 MS |
152 | * to use the list link. |
153 | */ | |
154 | BUG_ON(!list_empty(&u->link)); | |
155 | list_add_tail(&u->link, &embryos); | |
156 | } | |
157 | spin_unlock(&x->sk_receive_queue.lock); | |
158 | ||
159 | while (!list_empty(&embryos)) { | |
160 | u = list_entry(embryos.next, struct unix_sock, link); | |
161 | scan_inflight(&u->sk, func, hitlist); | |
162 | list_del_init(&u->link); | |
163 | } | |
164 | } | |
1da177e4 LT |
165 | } |
166 | ||
5c80f1ae | 167 | static void dec_inflight(struct unix_sock *usk) |
1da177e4 | 168 | { |
516e0cc5 | 169 | atomic_long_dec(&usk->inflight); |
1fd05ba5 | 170 | } |
1da177e4 | 171 | |
5c80f1ae | 172 | static void inc_inflight(struct unix_sock *usk) |
1fd05ba5 | 173 | { |
516e0cc5 | 174 | atomic_long_inc(&usk->inflight); |
1da177e4 LT |
175 | } |
176 | ||
5c80f1ae | 177 | static void inc_inflight_move_tail(struct unix_sock *u) |
1fd05ba5 | 178 | { |
516e0cc5 | 179 | atomic_long_inc(&u->inflight); |
d1ab39f1 | 180 | /* If this still might be part of a cycle, move it to the end |
6209344f MS |
181 | * of the list, so that it's checked even if it was already |
182 | * passed over | |
1fd05ba5 | 183 | */ |
60bc851a | 184 | if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) |
1fd05ba5 MS |
185 | list_move_tail(&u->link, &gc_candidates); |
186 | } | |
1da177e4 | 187 | |
505e907d | 188 | static bool gc_in_progress; |
9915672d | 189 | #define UNIX_INFLIGHT_TRIGGER_GC 16000 |
1da177e4 | 190 | |
5f23b734 | 191 | void wait_for_unix_gc(void) |
1da177e4 | 192 | { |
d1ab39f1 | 193 | /* If number of inflight sockets is insane, |
9915672d | 194 | * force a garbage collect right now. |
9d6d7f1c ED |
195 | * Paired with the WRITE_ONCE() in unix_inflight(), |
196 | * unix_notinflight() and gc_in_progress(). | |
9915672d | 197 | */ |
9d6d7f1c ED |
198 | if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && |
199 | !READ_ONCE(gc_in_progress)) | |
9915672d | 200 | unix_gc(); |
5f23b734 | 201 | wait_event(unix_gc_wait, gc_in_progress == false); |
202 | } | |
1da177e4 | 203 | |
5f23b734 | 204 | /* The external entry point: unix_gc() */ |
205 | void unix_gc(void) | |
206 | { | |
1fd05ba5 MS |
207 | struct unix_sock *u; |
208 | struct unix_sock *next; | |
209 | struct sk_buff_head hitlist; | |
210 | struct list_head cursor; | |
6209344f | 211 | LIST_HEAD(not_cycle_list); |
1da177e4 | 212 | |
1fd05ba5 | 213 | spin_lock(&unix_gc_lock); |
1da177e4 | 214 | |
1fd05ba5 MS |
215 | /* Avoid a recursive GC. */ |
216 | if (gc_in_progress) | |
217 | goto out; | |
1da177e4 | 218 | |
9d6d7f1c ED |
219 | /* Paired with READ_ONCE() in wait_for_unix_gc(). */ |
220 | WRITE_ONCE(gc_in_progress, true); | |
221 | ||
d1ab39f1 | 222 | /* First, select candidates for garbage collection. Only |
1fd05ba5 MS |
223 | * in-flight sockets are considered, and from those only ones |
224 | * which don't have any external reference. | |
225 | * | |
226 | * Holding unix_gc_lock will protect these candidates from | |
227 | * being detached, and hence from gaining an external | |
6209344f MS |
228 | * reference. Since there are no possible receivers, all |
229 | * buffers currently on the candidates' queues stay there | |
230 | * during the garbage collection. | |
231 | * | |
232 | * We also know that no new candidate can be added onto the | |
233 | * receive queues. Other, non candidate sockets _can_ be | |
234 | * added to queue, so we must make sure only to touch | |
235 | * candidates. | |
1da177e4 | 236 | */ |
1fd05ba5 | 237 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
516e0cc5 AV |
238 | long total_refs; |
239 | long inflight_refs; | |
1fd05ba5 MS |
240 | |
241 | total_refs = file_count(u->sk.sk_socket->file); | |
516e0cc5 | 242 | inflight_refs = atomic_long_read(&u->inflight); |
1fd05ba5 MS |
243 | |
244 | BUG_ON(inflight_refs < 1); | |
245 | BUG_ON(total_refs < inflight_refs); | |
246 | if (total_refs == inflight_refs) { | |
247 | list_move_tail(&u->link, &gc_candidates); | |
60bc851a ED |
248 | __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
249 | __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); | |
1fd05ba5 MS |
250 | } |
251 | } | |
1da177e4 | 252 | |
d1ab39f1 | 253 | /* Now remove all internal in-flight reference to children of |
1fd05ba5 | 254 | * the candidates. |
1da177e4 | 255 | */ |
1fd05ba5 MS |
256 | list_for_each_entry(u, &gc_candidates, link) |
257 | scan_children(&u->sk, dec_inflight, NULL); | |
1da177e4 | 258 | |
d1ab39f1 | 259 | /* Restore the references for children of all candidates, |
1fd05ba5 MS |
260 | * which have remaining references. Do this recursively, so |
261 | * only those remain, which form cyclic references. | |
262 | * | |
263 | * Use a "cursor" link, to make the list traversal safe, even | |
264 | * though elements might be moved about. | |
1da177e4 | 265 | */ |
1fd05ba5 MS |
266 | list_add(&cursor, &gc_candidates); |
267 | while (cursor.next != &gc_candidates) { | |
268 | u = list_entry(cursor.next, struct unix_sock, link); | |
1da177e4 | 269 | |
1fd05ba5 MS |
270 | /* Move cursor to after the current position. */ |
271 | list_move(&cursor, &u->link); | |
ac7bfa62 | 272 | |
516e0cc5 | 273 | if (atomic_long_read(&u->inflight) > 0) { |
6209344f | 274 | list_move_tail(&u->link, ¬_cycle_list); |
60bc851a | 275 | __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); |
1fd05ba5 | 276 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
1da177e4 | 277 | } |
1da177e4 | 278 | } |
1fd05ba5 | 279 | list_del(&cursor); |
1da177e4 | 280 | |
7df9c246 AU |
281 | /* Now gc_candidates contains only garbage. Restore original |
282 | * inflight counters for these as well, and remove the skbuffs | |
283 | * which are creating the cycle(s). | |
284 | */ | |
285 | skb_queue_head_init(&hitlist); | |
286 | list_for_each_entry(u, &gc_candidates, link) | |
287 | scan_children(&u->sk, inc_inflight, &hitlist); | |
288 | ||
d1ab39f1 | 289 | /* not_cycle_list contains those sockets which do not make up a |
6209344f MS |
290 | * cycle. Restore these to the inflight list. |
291 | */ | |
292 | while (!list_empty(¬_cycle_list)) { | |
293 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | |
60bc851a | 294 | __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
6209344f MS |
295 | list_move_tail(&u->link, &gc_inflight_list); |
296 | } | |
297 | ||
1fd05ba5 | 298 | spin_unlock(&unix_gc_lock); |
1da177e4 | 299 | |
1fd05ba5 MS |
300 | /* Here we are. Hitlist is filled. Die. */ |
301 | __skb_queue_purge(&hitlist); | |
1da177e4 | 302 | |
1fd05ba5 | 303 | spin_lock(&unix_gc_lock); |
1da177e4 | 304 | |
1fd05ba5 MS |
305 | /* All candidates should have been detached by now. */ |
306 | BUG_ON(!list_empty(&gc_candidates)); | |
9d6d7f1c ED |
307 | |
308 | /* Paired with READ_ONCE() in wait_for_unix_gc(). */ | |
309 | WRITE_ONCE(gc_in_progress, false); | |
310 | ||
5f23b734 | 311 | wake_up(&unix_gc_wait); |
1da177e4 | 312 | |
1fd05ba5 MS |
313 | out: |
314 | spin_unlock(&unix_gc_lock); | |
1da177e4 | 315 | } |