Commit | Line | Data |
---|---|---|
a85036f6 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * NET3: Garbage Collector For AF_UNIX sockets | |
4 | * | |
5 | * Garbage Collector: | |
6 | * Copyright (C) Barak A. Pearlmutter. | |
1da177e4 LT |
7 | * |
8 | * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem. | |
9 | * If it doesn't work blame me, it worked when Barak sent it. | |
10 | * | |
11 | * Assumptions: | |
12 | * | |
13 | * - object w/ a bit | |
14 | * - free list | |
15 | * | |
16 | * Current optimizations: | |
17 | * | |
18 | * - explicit stack instead of recursion | |
19 | * - tail recurse on first born instead of immediate push/pop | |
20 | * - we gather the stuff that should not be killed into tree | |
21 | * and stack is just a path from root to the current pointer. | |
22 | * | |
23 | * Future optimizations: | |
24 | * | |
25 | * - don't just push entire root set; process in place | |
26 | * | |
1da177e4 LT |
27 | * Fixes: |
28 | * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed. | |
29 | * Cope with changing max_files. | |
30 | * Al Viro 11 Oct 1998 | |
31 | * Graph may have cycles. That is, we can send the descriptor | |
32 | * of foo to bar and vice versa. Current code chokes on that. | |
33 | * Fix: move SCM_RIGHTS ones into the separate list and then | |
34 | * skb_free() them all instead of doing explicit fput's. | |
35 | * Another problem: since fput() may block somebody may | |
36 | * create a new unix_socket when we are in the middle of sweep | |
37 | * phase. Fix: revert the logic wrt MARKED. Mark everything | |
38 | * upon the beginning and unmark non-junk ones. | |
39 | * | |
40 | * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS | |
41 | * sent to connect()'ed but still not accept()'ed sockets. | |
42 | * Fixed. Old code had slightly different problem here: | |
43 | * extra fput() in situation when we passed the descriptor via | |
44 | * such socket and closed it (descriptor). That would happen on | |
45 | * each unix_gc() until the accept(). Since the struct file in | |
46 | * question would go to the free list and might be reused... | |
47 | * That might be the reason of random oopses on filp_close() | |
48 | * in unrelated processes. | |
49 | * | |
50 | * AV 28 Feb 1999 | |
51 | * Kill the explicit allocation of stack. Now we keep the tree | |
52 | * with root in dummy + pointer (gc_current) to one of the nodes. | |
53 | * Stack is represented as path from gc_current to dummy. Unmark | |
54 | * now means "add to tree". Push == "make it a son of gc_current". | |
55 | * Pop == "move gc_current to parent". We keep only pointers to | |
56 | * parents (->gc_tree). | |
57 | * AV 1 Mar 1999 | |
58 | * Damn. Added missing check for ->dead in listen queues scanning. | |
59 | * | |
1fd05ba5 MS |
60 | * Miklos Szeredi 25 Jun 2007 |
61 | * Reimplement with a cycle collecting algorithm. This should | |
62 | * solve several problems with the previous code, like being racy | |
63 | * wrt receive and holding up unrelated socket operations. | |
1da177e4 | 64 | */ |
ac7bfa62 | 65 | |
1da177e4 | 66 | #include <linux/kernel.h> |
1da177e4 LT |
67 | #include <linux/string.h> |
68 | #include <linux/socket.h> | |
69 | #include <linux/un.h> | |
70 | #include <linux/net.h> | |
71 | #include <linux/fs.h> | |
1da177e4 LT |
72 | #include <linux/skbuff.h> |
73 | #include <linux/netdevice.h> | |
74 | #include <linux/file.h> | |
75 | #include <linux/proc_fs.h> | |
4a3e2f71 | 76 | #include <linux/mutex.h> |
5f23b734 | 77 | #include <linux/wait.h> |
1da177e4 LT |
78 | |
79 | #include <net/sock.h> | |
80 | #include <net/af_unix.h> | |
81 | #include <net/scm.h> | |
c752f073 | 82 | #include <net/tcp_states.h> |
1da177e4 | 83 | |
99a7a5b9 KI |
84 | struct unix_sock *unix_get_socket(struct file *filp) |
85 | { | |
86 | struct inode *inode = file_inode(filp); | |
87 | ||
88 | /* Socket ? */ | |
89 | if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) { | |
90 | struct socket *sock = SOCKET_I(inode); | |
91 | const struct proto_ops *ops; | |
92 | struct sock *sk = sock->sk; | |
93 | ||
94 | ops = READ_ONCE(sock->ops); | |
f4e65870 | 95 | |
99a7a5b9 KI |
96 | /* PF_UNIX ? */ |
97 | if (sk && ops && ops->family == PF_UNIX) | |
98 | return unix_sk(sk); | |
99 | } | |
1da177e4 | 100 | |
99a7a5b9 KI |
101 | return NULL; |
102 | } | |
103 | ||
104 | DEFINE_SPINLOCK(unix_gc_lock); | |
105 | unsigned int unix_tot_inflight; | |
1fd05ba5 | 106 | static LIST_HEAD(gc_candidates); |
99a7a5b9 KI |
107 | static LIST_HEAD(gc_inflight_list); |
108 | ||
109 | /* Keep the number of times in flight count for the file | |
110 | * descriptor if it is for an AF_UNIX socket. | |
111 | */ | |
112 | void unix_inflight(struct user_struct *user, struct file *filp) | |
113 | { | |
114 | struct unix_sock *u = unix_get_socket(filp); | |
115 | ||
116 | spin_lock(&unix_gc_lock); | |
117 | ||
118 | if (u) { | |
119 | if (!u->inflight) { | |
120 | WARN_ON_ONCE(!list_empty(&u->link)); | |
121 | list_add_tail(&u->link, &gc_inflight_list); | |
122 | } else { | |
123 | WARN_ON_ONCE(list_empty(&u->link)); | |
124 | } | |
125 | u->inflight++; | |
126 | ||
127 | /* Paired with READ_ONCE() in wait_for_unix_gc() */ | |
128 | WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); | |
129 | } | |
130 | ||
131 | WRITE_ONCE(user->unix_inflight, user->unix_inflight + 1); | |
132 | ||
133 | spin_unlock(&unix_gc_lock); | |
134 | } | |
135 | ||
136 | void unix_notinflight(struct user_struct *user, struct file *filp) | |
137 | { | |
138 | struct unix_sock *u = unix_get_socket(filp); | |
139 | ||
140 | spin_lock(&unix_gc_lock); | |
141 | ||
142 | if (u) { | |
143 | WARN_ON_ONCE(!u->inflight); | |
144 | WARN_ON_ONCE(list_empty(&u->link)); | |
145 | ||
146 | u->inflight--; | |
147 | if (!u->inflight) | |
148 | list_del_init(&u->link); | |
149 | ||
150 | /* Paired with READ_ONCE() in wait_for_unix_gc() */ | |
151 | WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); | |
152 | } | |
153 | ||
154 | WRITE_ONCE(user->unix_inflight, user->unix_inflight - 1); | |
155 | ||
156 | spin_unlock(&unix_gc_lock); | |
157 | } | |
1da177e4 | 158 | |
5c80f1ae | 159 | static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 160 | struct sk_buff_head *hitlist) |
1da177e4 | 161 | { |
1fd05ba5 MS |
162 | struct sk_buff *skb; |
163 | struct sk_buff *next; | |
164 | ||
165 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 166 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
d1ab39f1 | 167 | /* Do we have file descriptors ? */ |
1fd05ba5 MS |
168 | if (UNIXCB(skb).fp) { |
169 | bool hit = false; | |
d1ab39f1 | 170 | /* Process the descriptors of this socket */ |
1fd05ba5 MS |
171 | int nfd = UNIXCB(skb).fp->count; |
172 | struct file **fp = UNIXCB(skb).fp->fp; | |
d1ab39f1 | 173 | |
1fd05ba5 | 174 | while (nfd--) { |
d1ab39f1 | 175 | /* Get the socket the fd matches if it indeed does so */ |
5b17307b | 176 | struct unix_sock *u = unix_get_socket(*fp++); |
d1ab39f1 | 177 | |
5b17307b KI |
178 | /* Ignore non-candidates, they could have been added |
179 | * to the queues after starting the garbage collection | |
180 | */ | |
181 | if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { | |
182 | hit = true; | |
6209344f | 183 | |
5b17307b | 184 | func(u); |
1fd05ba5 MS |
185 | } |
186 | } | |
187 | if (hit && hitlist != NULL) { | |
188 | __skb_unlink(skb, &x->sk_receive_queue); | |
189 | __skb_queue_tail(hitlist, skb); | |
190 | } | |
191 | } | |
192 | } | |
193 | spin_unlock(&x->sk_receive_queue.lock); | |
1da177e4 LT |
194 | } |
195 | ||
5c80f1ae | 196 | static void scan_children(struct sock *x, void (*func)(struct unix_sock *), |
1fd05ba5 | 197 | struct sk_buff_head *hitlist) |
1da177e4 | 198 | { |
d1ab39f1 | 199 | if (x->sk_state != TCP_LISTEN) { |
1fd05ba5 | 200 | scan_inflight(x, func, hitlist); |
d1ab39f1 | 201 | } else { |
1fd05ba5 MS |
202 | struct sk_buff *skb; |
203 | struct sk_buff *next; | |
204 | struct unix_sock *u; | |
205 | LIST_HEAD(embryos); | |
206 | ||
d1ab39f1 | 207 | /* For a listening socket collect the queued embryos |
1fd05ba5 MS |
208 | * and perform a scan on them as well. |
209 | */ | |
210 | spin_lock(&x->sk_receive_queue.lock); | |
a2f3be17 | 211 | skb_queue_walk_safe(&x->sk_receive_queue, skb, next) { |
1fd05ba5 MS |
212 | u = unix_sk(skb->sk); |
213 | ||
d1ab39f1 | 214 | /* An embryo cannot be in-flight, so it's safe |
1fd05ba5 MS |
215 | * to use the list link. |
216 | */ | |
d0f6dc26 | 217 | WARN_ON_ONCE(!list_empty(&u->link)); |
1fd05ba5 MS |
218 | list_add_tail(&u->link, &embryos); |
219 | } | |
220 | spin_unlock(&x->sk_receive_queue.lock); | |
221 | ||
222 | while (!list_empty(&embryos)) { | |
223 | u = list_entry(embryos.next, struct unix_sock, link); | |
224 | scan_inflight(&u->sk, func, hitlist); | |
225 | list_del_init(&u->link); | |
226 | } | |
227 | } | |
1da177e4 LT |
228 | } |
229 | ||
5c80f1ae | 230 | static void dec_inflight(struct unix_sock *usk) |
1da177e4 | 231 | { |
97af84a6 | 232 | usk->inflight--; |
1fd05ba5 | 233 | } |
1da177e4 | 234 | |
5c80f1ae | 235 | static void inc_inflight(struct unix_sock *usk) |
1fd05ba5 | 236 | { |
97af84a6 | 237 | usk->inflight++; |
1da177e4 LT |
238 | } |
239 | ||
5c80f1ae | 240 | static void inc_inflight_move_tail(struct unix_sock *u) |
1fd05ba5 | 241 | { |
97af84a6 KI |
242 | u->inflight++; |
243 | ||
d1ab39f1 | 244 | /* If this still might be part of a cycle, move it to the end |
6209344f MS |
245 | * of the list, so that it's checked even if it was already |
246 | * passed over | |
1fd05ba5 | 247 | */ |
60bc851a | 248 | if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags)) |
1fd05ba5 MS |
249 | list_move_tail(&u->link, &gc_candidates); |
250 | } | |
1da177e4 | 251 | |
505e907d | 252 | static bool gc_in_progress; |
1da177e4 | 253 | |
8b90a9f8 | 254 | static void __unix_gc(struct work_struct *work) |
5f23b734 | 255 | { |
1fd05ba5 | 256 | struct sk_buff_head hitlist; |
11498715 | 257 | struct unix_sock *u, *next; |
6209344f | 258 | LIST_HEAD(not_cycle_list); |
11498715 | 259 | struct list_head cursor; |
1da177e4 | 260 | |
1fd05ba5 | 261 | spin_lock(&unix_gc_lock); |
1da177e4 | 262 | |
d1ab39f1 | 263 | /* First, select candidates for garbage collection. Only |
1fd05ba5 MS |
264 | * in-flight sockets are considered, and from those only ones |
265 | * which don't have any external reference. | |
266 | * | |
267 | * Holding unix_gc_lock will protect these candidates from | |
268 | * being detached, and hence from gaining an external | |
6209344f MS |
269 | * reference. Since there are no possible receivers, all |
270 | * buffers currently on the candidates' queues stay there | |
271 | * during the garbage collection. | |
272 | * | |
273 | * We also know that no new candidate can be added onto the | |
274 | * receive queues. Other, non candidate sockets _can_ be | |
275 | * added to queue, so we must make sure only to touch | |
276 | * candidates. | |
47d8ac01 ML |
277 | * |
278 | * Embryos, though never candidates themselves, affect which | |
279 | * candidates are reachable by the garbage collector. Before | |
280 | * being added to a listener's queue, an embryo may already | |
281 | * receive data carrying SCM_RIGHTS, potentially making the | |
282 | * passed socket a candidate that is not yet reachable by the | |
283 | * collector. It becomes reachable once the embryo is | |
284 | * enqueued. Therefore, we must ensure that no SCM-laden | |
285 | * embryo appears in a (candidate) listener's queue between | |
286 | * consecutive scan_children() calls. | |
1da177e4 | 287 | */ |
1fd05ba5 | 288 | list_for_each_entry_safe(u, next, &gc_inflight_list, link) { |
47d8ac01 | 289 | struct sock *sk = &u->sk; |
516e0cc5 | 290 | long total_refs; |
1fd05ba5 | 291 | |
47d8ac01 | 292 | total_refs = file_count(sk->sk_socket->file); |
1fd05ba5 | 293 | |
d0f6dc26 KI |
294 | WARN_ON_ONCE(!u->inflight); |
295 | WARN_ON_ONCE(total_refs < u->inflight); | |
97af84a6 | 296 | if (total_refs == u->inflight) { |
1fd05ba5 | 297 | list_move_tail(&u->link, &gc_candidates); |
60bc851a ED |
298 | __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
299 | __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); | |
47d8ac01 ML |
300 | |
301 | if (sk->sk_state == TCP_LISTEN) { | |
1971d13f | 302 | unix_state_lock_nested(sk, U_LOCK_GC_LISTENER); |
47d8ac01 ML |
303 | unix_state_unlock(sk); |
304 | } | |
1fd05ba5 MS |
305 | } |
306 | } | |
1da177e4 | 307 | |
d1ab39f1 | 308 | /* Now remove all internal in-flight reference to children of |
1fd05ba5 | 309 | * the candidates. |
1da177e4 | 310 | */ |
1fd05ba5 MS |
311 | list_for_each_entry(u, &gc_candidates, link) |
312 | scan_children(&u->sk, dec_inflight, NULL); | |
1da177e4 | 313 | |
d1ab39f1 | 314 | /* Restore the references for children of all candidates, |
1fd05ba5 MS |
315 | * which have remaining references. Do this recursively, so |
316 | * only those remain, which form cyclic references. | |
317 | * | |
318 | * Use a "cursor" link, to make the list traversal safe, even | |
319 | * though elements might be moved about. | |
1da177e4 | 320 | */ |
1fd05ba5 MS |
321 | list_add(&cursor, &gc_candidates); |
322 | while (cursor.next != &gc_candidates) { | |
323 | u = list_entry(cursor.next, struct unix_sock, link); | |
1da177e4 | 324 | |
1fd05ba5 MS |
325 | /* Move cursor to after the current position. */ |
326 | list_move(&cursor, &u->link); | |
ac7bfa62 | 327 | |
97af84a6 | 328 | if (u->inflight) { |
6209344f | 329 | list_move_tail(&u->link, ¬_cycle_list); |
60bc851a | 330 | __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); |
1fd05ba5 | 331 | scan_children(&u->sk, inc_inflight_move_tail, NULL); |
1da177e4 | 332 | } |
1da177e4 | 333 | } |
1fd05ba5 | 334 | list_del(&cursor); |
1da177e4 | 335 | |
7df9c246 AU |
336 | /* Now gc_candidates contains only garbage. Restore original |
337 | * inflight counters for these as well, and remove the skbuffs | |
338 | * which are creating the cycle(s). | |
339 | */ | |
340 | skb_queue_head_init(&hitlist); | |
aa82ac51 | 341 | list_for_each_entry(u, &gc_candidates, link) { |
7df9c246 AU |
342 | scan_children(&u->sk, inc_inflight, &hitlist); |
343 | ||
aa82ac51 KI |
344 | #if IS_ENABLED(CONFIG_AF_UNIX_OOB) |
345 | if (u->oob_skb) { | |
346 | kfree_skb(u->oob_skb); | |
347 | u->oob_skb = NULL; | |
348 | } | |
349 | #endif | |
350 | } | |
351 | ||
d1ab39f1 | 352 | /* not_cycle_list contains those sockets which do not make up a |
6209344f MS |
353 | * cycle. Restore these to the inflight list. |
354 | */ | |
355 | while (!list_empty(¬_cycle_list)) { | |
356 | u = list_entry(not_cycle_list.next, struct unix_sock, link); | |
60bc851a | 357 | __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags); |
6209344f MS |
358 | list_move_tail(&u->link, &gc_inflight_list); |
359 | } | |
360 | ||
1fd05ba5 | 361 | spin_unlock(&unix_gc_lock); |
1da177e4 | 362 | |
1fd05ba5 MS |
363 | /* Here we are. Hitlist is filled. Die. */ |
364 | __skb_queue_purge(&hitlist); | |
1da177e4 | 365 | |
1fd05ba5 | 366 | spin_lock(&unix_gc_lock); |
1da177e4 | 367 | |
1fd05ba5 | 368 | /* All candidates should have been detached by now. */ |
d0f6dc26 | 369 | WARN_ON_ONCE(!list_empty(&gc_candidates)); |
9d6d7f1c ED |
370 | |
371 | /* Paired with READ_ONCE() in wait_for_unix_gc(). */ | |
372 | WRITE_ONCE(gc_in_progress, false); | |
373 | ||
1fd05ba5 | 374 | spin_unlock(&unix_gc_lock); |
1da177e4 | 375 | } |
8b90a9f8 KI |
376 | |
377 | static DECLARE_WORK(unix_gc_work, __unix_gc); | |
378 | ||
379 | void unix_gc(void) | |
380 | { | |
381 | WRITE_ONCE(gc_in_progress, true); | |
382 | queue_work(system_unbound_wq, &unix_gc_work); | |
383 | } | |
384 | ||
385 | #define UNIX_INFLIGHT_TRIGGER_GC 16000 | |
d9f21b36 | 386 | #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8) |
8b90a9f8 | 387 | |
d9f21b36 | 388 | void wait_for_unix_gc(struct scm_fp_list *fpl) |
8b90a9f8 KI |
389 | { |
390 | /* If number of inflight sockets is insane, | |
391 | * force a garbage collect right now. | |
392 | * | |
393 | * Paired with the WRITE_ONCE() in unix_inflight(), | |
394 | * unix_notinflight(), and __unix_gc(). | |
395 | */ | |
396 | if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC && | |
397 | !READ_ONCE(gc_in_progress)) | |
398 | unix_gc(); | |
399 | ||
d9f21b36 KI |
400 | /* Penalise users who want to send AF_UNIX sockets |
401 | * but whose sockets have not been received yet. | |
402 | */ | |
403 | if (!fpl || !fpl->count_unix || | |
404 | READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER) | |
405 | return; | |
406 | ||
8b90a9f8 KI |
407 | if (READ_ONCE(gc_in_progress)) |
408 | flush_work(&unix_gc_work); | |
409 | } |