Merge tag 'nfs-for-6.10-3' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
[linux-2.6-block.git] / net / unix / garbage.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * NET3:        Garbage Collector For AF_UNIX sockets
4  *
5  * Garbage Collector:
6  *      Copyright (C) Barak A. Pearlmutter.
7  *
8  * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9  * If it doesn't work blame me, it worked when Barak sent it.
10  *
11  * Assumptions:
12  *
13  *  - object w/ a bit
14  *  - free list
15  *
16  * Current optimizations:
17  *
18  *  - explicit stack instead of recursion
19  *  - tail recurse on first born instead of immediate push/pop
20  *  - we gather the stuff that should not be killed into tree
21  *    and stack is just a path from root to the current pointer.
22  *
23  *  Future optimizations:
24  *
25  *  - don't just push entire root set; process in place
26  *
27  *  Fixes:
28  *      Alan Cox        07 Sept 1997    Vmalloc internal stack as needed.
29  *                                      Cope with changing max_files.
30  *      Al Viro         11 Oct 1998
31  *              Graph may have cycles. That is, we can send the descriptor
32  *              of foo to bar and vice versa. Current code chokes on that.
33  *              Fix: move SCM_RIGHTS ones into the separate list and then
34  *              skb_free() them all instead of doing explicit fput's.
35  *              Another problem: since fput() may block somebody may
36  *              create a new unix_socket when we are in the middle of sweep
37  *              phase. Fix: revert the logic wrt MARKED. Mark everything
38  *              upon the beginning and unmark non-junk ones.
39  *
40  *              [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
41  *              sent to connect()'ed but still not accept()'ed sockets.
42  *              Fixed. Old code had slightly different problem here:
43  *              extra fput() in situation when we passed the descriptor via
44  *              such socket and closed it (descriptor). That would happen on
45  *              each unix_gc() until the accept(). Since the struct file in
46  *              question would go to the free list and might be reused...
47  *              That might be the reason of random oopses on filp_close()
48  *              in unrelated processes.
49  *
50  *      AV              28 Feb 1999
51  *              Kill the explicit allocation of stack. Now we keep the tree
52  *              with root in dummy + pointer (gc_current) to one of the nodes.
53  *              Stack is represented as path from gc_current to dummy. Unmark
54  *              now means "add to tree". Push == "make it a son of gc_current".
55  *              Pop == "move gc_current to parent". We keep only pointers to
56  *              parents (->gc_tree).
57  *      AV              1 Mar 1999
58  *              Damn. Added missing check for ->dead in listen queues scanning.
59  *
60  *      Miklos Szeredi 25 Jun 2007
61  *              Reimplement with a cycle collecting algorithm. This should
62  *              solve several problems with the previous code, like being racy
63  *              wrt receive and holding up unrelated socket operations.
64  */
65
66 #include <linux/kernel.h>
67 #include <linux/string.h>
68 #include <linux/socket.h>
69 #include <linux/un.h>
70 #include <linux/net.h>
71 #include <linux/fs.h>
72 #include <linux/skbuff.h>
73 #include <linux/netdevice.h>
74 #include <linux/file.h>
75 #include <linux/proc_fs.h>
76 #include <linux/mutex.h>
77 #include <linux/wait.h>
78
79 #include <net/sock.h>
80 #include <net/af_unix.h>
81 #include <net/scm.h>
82 #include <net/tcp_states.h>
83
84 struct unix_sock *unix_get_socket(struct file *filp)
85 {
86         struct inode *inode = file_inode(filp);
87
88         /* Socket ? */
89         if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
90                 struct socket *sock = SOCKET_I(inode);
91                 const struct proto_ops *ops;
92                 struct sock *sk = sock->sk;
93
94                 ops = READ_ONCE(sock->ops);
95
96                 /* PF_UNIX ? */
97                 if (sk && ops && ops->family == PF_UNIX)
98                         return unix_sk(sk);
99         }
100
101         return NULL;
102 }
103
104 static struct unix_vertex *unix_edge_successor(struct unix_edge *edge)
105 {
106         /* If an embryo socket has a fd,
107          * the listener indirectly holds the fd's refcnt.
108          */
109         if (edge->successor->listener)
110                 return unix_sk(edge->successor->listener)->vertex;
111
112         return edge->successor->vertex;
113 }
114
115 static bool unix_graph_maybe_cyclic;
116 static bool unix_graph_grouped;
117
118 static void unix_update_graph(struct unix_vertex *vertex)
119 {
120         /* If the receiver socket is not inflight, no cyclic
121          * reference could be formed.
122          */
123         if (!vertex)
124                 return;
125
126         unix_graph_maybe_cyclic = true;
127         unix_graph_grouped = false;
128 }
129
130 static LIST_HEAD(unix_unvisited_vertices);
131
132 enum unix_vertex_index {
133         UNIX_VERTEX_INDEX_MARK1,
134         UNIX_VERTEX_INDEX_MARK2,
135         UNIX_VERTEX_INDEX_START,
136 };
137
138 static unsigned long unix_vertex_unvisited_index = UNIX_VERTEX_INDEX_MARK1;
139
140 static void unix_add_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
141 {
142         struct unix_vertex *vertex = edge->predecessor->vertex;
143
144         if (!vertex) {
145                 vertex = list_first_entry(&fpl->vertices, typeof(*vertex), entry);
146                 vertex->index = unix_vertex_unvisited_index;
147                 vertex->out_degree = 0;
148                 INIT_LIST_HEAD(&vertex->edges);
149                 INIT_LIST_HEAD(&vertex->scc_entry);
150
151                 list_move_tail(&vertex->entry, &unix_unvisited_vertices);
152                 edge->predecessor->vertex = vertex;
153         }
154
155         vertex->out_degree++;
156         list_add_tail(&edge->vertex_entry, &vertex->edges);
157
158         unix_update_graph(unix_edge_successor(edge));
159 }
160
161 static void unix_del_edge(struct scm_fp_list *fpl, struct unix_edge *edge)
162 {
163         struct unix_vertex *vertex = edge->predecessor->vertex;
164
165         if (!fpl->dead)
166                 unix_update_graph(unix_edge_successor(edge));
167
168         list_del(&edge->vertex_entry);
169         vertex->out_degree--;
170
171         if (!vertex->out_degree) {
172                 edge->predecessor->vertex = NULL;
173                 list_move_tail(&vertex->entry, &fpl->vertices);
174         }
175 }
176
177 static void unix_free_vertices(struct scm_fp_list *fpl)
178 {
179         struct unix_vertex *vertex, *next_vertex;
180
181         list_for_each_entry_safe(vertex, next_vertex, &fpl->vertices, entry) {
182                 list_del(&vertex->entry);
183                 kfree(vertex);
184         }
185 }
186
187 static DEFINE_SPINLOCK(unix_gc_lock);
188 unsigned int unix_tot_inflight;
189
190 void unix_add_edges(struct scm_fp_list *fpl, struct unix_sock *receiver)
191 {
192         int i = 0, j = 0;
193
194         spin_lock(&unix_gc_lock);
195
196         if (!fpl->count_unix)
197                 goto out;
198
199         do {
200                 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]);
201                 struct unix_edge *edge;
202
203                 if (!inflight)
204                         continue;
205
206                 edge = fpl->edges + i++;
207                 edge->predecessor = inflight;
208                 edge->successor = receiver;
209
210                 unix_add_edge(fpl, edge);
211         } while (i < fpl->count_unix);
212
213         receiver->scm_stat.nr_unix_fds += fpl->count_unix;
214         WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + fpl->count_unix);
215 out:
216         WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight + fpl->count);
217
218         spin_unlock(&unix_gc_lock);
219
220         fpl->inflight = true;
221
222         unix_free_vertices(fpl);
223 }
224
225 void unix_del_edges(struct scm_fp_list *fpl)
226 {
227         struct unix_sock *receiver;
228         int i = 0;
229
230         spin_lock(&unix_gc_lock);
231
232         if (!fpl->count_unix)
233                 goto out;
234
235         do {
236                 struct unix_edge *edge = fpl->edges + i++;
237
238                 unix_del_edge(fpl, edge);
239         } while (i < fpl->count_unix);
240
241         if (!fpl->dead) {
242                 receiver = fpl->edges[0].successor;
243                 receiver->scm_stat.nr_unix_fds -= fpl->count_unix;
244         }
245         WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - fpl->count_unix);
246 out:
247         WRITE_ONCE(fpl->user->unix_inflight, fpl->user->unix_inflight - fpl->count);
248
249         spin_unlock(&unix_gc_lock);
250
251         fpl->inflight = false;
252 }
253
254 void unix_update_edges(struct unix_sock *receiver)
255 {
256         /* nr_unix_fds is only updated under unix_state_lock().
257          * If it's 0 here, the embryo socket is not part of the
258          * inflight graph, and GC will not see it, so no lock needed.
259          */
260         if (!receiver->scm_stat.nr_unix_fds) {
261                 receiver->listener = NULL;
262         } else {
263                 spin_lock(&unix_gc_lock);
264                 unix_update_graph(unix_sk(receiver->listener)->vertex);
265                 receiver->listener = NULL;
266                 spin_unlock(&unix_gc_lock);
267         }
268 }
269
270 int unix_prepare_fpl(struct scm_fp_list *fpl)
271 {
272         struct unix_vertex *vertex;
273         int i;
274
275         if (!fpl->count_unix)
276                 return 0;
277
278         for (i = 0; i < fpl->count_unix; i++) {
279                 vertex = kmalloc(sizeof(*vertex), GFP_KERNEL);
280                 if (!vertex)
281                         goto err;
282
283                 list_add(&vertex->entry, &fpl->vertices);
284         }
285
286         fpl->edges = kvmalloc_array(fpl->count_unix, sizeof(*fpl->edges),
287                                     GFP_KERNEL_ACCOUNT);
288         if (!fpl->edges)
289                 goto err;
290
291         return 0;
292
293 err:
294         unix_free_vertices(fpl);
295         return -ENOMEM;
296 }
297
298 void unix_destroy_fpl(struct scm_fp_list *fpl)
299 {
300         if (fpl->inflight)
301                 unix_del_edges(fpl);
302
303         kvfree(fpl->edges);
304         unix_free_vertices(fpl);
305 }
306
307 static bool unix_vertex_dead(struct unix_vertex *vertex)
308 {
309         struct unix_edge *edge;
310         struct unix_sock *u;
311         long total_ref;
312
313         list_for_each_entry(edge, &vertex->edges, vertex_entry) {
314                 struct unix_vertex *next_vertex = unix_edge_successor(edge);
315
316                 /* The vertex's fd can be received by a non-inflight socket. */
317                 if (!next_vertex)
318                         return false;
319
320                 /* The vertex's fd can be received by an inflight socket in
321                  * another SCC.
322                  */
323                 if (next_vertex->scc_index != vertex->scc_index)
324                         return false;
325         }
326
327         /* No receiver exists out of the same SCC. */
328
329         edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
330         u = edge->predecessor;
331         total_ref = file_count(u->sk.sk_socket->file);
332
333         /* If not close()d, total_ref > out_degree. */
334         if (total_ref != vertex->out_degree)
335                 return false;
336
337         return true;
338 }
339
340 enum unix_recv_queue_lock_class {
341         U_RECVQ_LOCK_NORMAL,
342         U_RECVQ_LOCK_EMBRYO,
343 };
344
345 static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist)
346 {
347         skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist);
348
349 #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
350         if (u->oob_skb) {
351                 WARN_ON_ONCE(skb_unref(u->oob_skb));
352                 u->oob_skb = NULL;
353         }
354 #endif
355 }
356
357 static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist)
358 {
359         struct unix_vertex *vertex;
360
361         list_for_each_entry_reverse(vertex, scc, scc_entry) {
362                 struct sk_buff_head *queue;
363                 struct unix_edge *edge;
364                 struct unix_sock *u;
365
366                 edge = list_first_entry(&vertex->edges, typeof(*edge), vertex_entry);
367                 u = edge->predecessor;
368                 queue = &u->sk.sk_receive_queue;
369
370                 spin_lock(&queue->lock);
371
372                 if (u->sk.sk_state == TCP_LISTEN) {
373                         struct sk_buff *skb;
374
375                         skb_queue_walk(queue, skb) {
376                                 struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue;
377
378                                 /* listener -> embryo order, the inversion never happens. */
379                                 spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO);
380                                 unix_collect_queue(unix_sk(skb->sk), hitlist);
381                                 spin_unlock(&embryo_queue->lock);
382                         }
383                 } else {
384                         unix_collect_queue(u, hitlist);
385                 }
386
387                 spin_unlock(&queue->lock);
388         }
389 }
390
391 static bool unix_scc_cyclic(struct list_head *scc)
392 {
393         struct unix_vertex *vertex;
394         struct unix_edge *edge;
395
396         /* SCC containing multiple vertices ? */
397         if (!list_is_singular(scc))
398                 return true;
399
400         vertex = list_first_entry(scc, typeof(*vertex), scc_entry);
401
402         /* Self-reference or a embryo-listener circle ? */
403         list_for_each_entry(edge, &vertex->edges, vertex_entry) {
404                 if (unix_edge_successor(edge) == vertex)
405                         return true;
406         }
407
408         return false;
409 }
410
411 static LIST_HEAD(unix_visited_vertices);
412 static unsigned long unix_vertex_grouped_index = UNIX_VERTEX_INDEX_MARK2;
413
414 static void __unix_walk_scc(struct unix_vertex *vertex, unsigned long *last_index,
415                             struct sk_buff_head *hitlist)
416 {
417         LIST_HEAD(vertex_stack);
418         struct unix_edge *edge;
419         LIST_HEAD(edge_stack);
420
421 next_vertex:
422         /* Push vertex to vertex_stack and mark it as on-stack
423          * (index >= UNIX_VERTEX_INDEX_START).
424          * The vertex will be popped when finalising SCC later.
425          */
426         list_add(&vertex->scc_entry, &vertex_stack);
427
428         vertex->index = *last_index;
429         vertex->scc_index = *last_index;
430         (*last_index)++;
431
432         /* Explore neighbour vertices (receivers of the current vertex's fd). */
433         list_for_each_entry(edge, &vertex->edges, vertex_entry) {
434                 struct unix_vertex *next_vertex = unix_edge_successor(edge);
435
436                 if (!next_vertex)
437                         continue;
438
439                 if (next_vertex->index == unix_vertex_unvisited_index) {
440                         /* Iterative deepening depth first search
441                          *
442                          *   1. Push a forward edge to edge_stack and set
443                          *      the successor to vertex for the next iteration.
444                          */
445                         list_add(&edge->stack_entry, &edge_stack);
446
447                         vertex = next_vertex;
448                         goto next_vertex;
449
450                         /*   2. Pop the edge directed to the current vertex
451                          *      and restore the ancestor for backtracking.
452                          */
453 prev_vertex:
454                         edge = list_first_entry(&edge_stack, typeof(*edge), stack_entry);
455                         list_del_init(&edge->stack_entry);
456
457                         next_vertex = vertex;
458                         vertex = edge->predecessor->vertex;
459
460                         /* If the successor has a smaller scc_index, two vertices
461                          * are in the same SCC, so propagate the smaller scc_index
462                          * to skip SCC finalisation.
463                          */
464                         vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
465                 } else if (next_vertex->index != unix_vertex_grouped_index) {
466                         /* Loop detected by a back/cross edge.
467                          *
468                          * The successor is on vertex_stack, so two vertices are in
469                          * the same SCC.  If the successor has a smaller *scc_index*,
470                          * propagate it to skip SCC finalisation.
471                          */
472                         vertex->scc_index = min(vertex->scc_index, next_vertex->scc_index);
473                 } else {
474                         /* The successor was already grouped as another SCC */
475                 }
476         }
477
478         if (vertex->index == vertex->scc_index) {
479                 struct list_head scc;
480                 bool scc_dead = true;
481
482                 /* SCC finalised.
483                  *
484                  * If the scc_index was not updated, all the vertices above on
485                  * vertex_stack are in the same SCC.  Group them using scc_entry.
486                  */
487                 __list_cut_position(&scc, &vertex_stack, &vertex->scc_entry);
488
489                 list_for_each_entry_reverse(vertex, &scc, scc_entry) {
490                         /* Don't restart DFS from this vertex in unix_walk_scc(). */
491                         list_move_tail(&vertex->entry, &unix_visited_vertices);
492
493                         /* Mark vertex as off-stack. */
494                         vertex->index = unix_vertex_grouped_index;
495
496                         if (scc_dead)
497                                 scc_dead = unix_vertex_dead(vertex);
498                 }
499
500                 if (scc_dead)
501                         unix_collect_skb(&scc, hitlist);
502                 else if (!unix_graph_maybe_cyclic)
503                         unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
504
505                 list_del(&scc);
506         }
507
508         /* Need backtracking ? */
509         if (!list_empty(&edge_stack))
510                 goto prev_vertex;
511 }
512
513 static void unix_walk_scc(struct sk_buff_head *hitlist)
514 {
515         unsigned long last_index = UNIX_VERTEX_INDEX_START;
516
517         unix_graph_maybe_cyclic = false;
518
519         /* Visit every vertex exactly once.
520          * __unix_walk_scc() moves visited vertices to unix_visited_vertices.
521          */
522         while (!list_empty(&unix_unvisited_vertices)) {
523                 struct unix_vertex *vertex;
524
525                 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
526                 __unix_walk_scc(vertex, &last_index, hitlist);
527         }
528
529         list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
530         swap(unix_vertex_unvisited_index, unix_vertex_grouped_index);
531
532         unix_graph_grouped = true;
533 }
534
535 static void unix_walk_scc_fast(struct sk_buff_head *hitlist)
536 {
537         unix_graph_maybe_cyclic = false;
538
539         while (!list_empty(&unix_unvisited_vertices)) {
540                 struct unix_vertex *vertex;
541                 struct list_head scc;
542                 bool scc_dead = true;
543
544                 vertex = list_first_entry(&unix_unvisited_vertices, typeof(*vertex), entry);
545                 list_add(&scc, &vertex->scc_entry);
546
547                 list_for_each_entry_reverse(vertex, &scc, scc_entry) {
548                         list_move_tail(&vertex->entry, &unix_visited_vertices);
549
550                         if (scc_dead)
551                                 scc_dead = unix_vertex_dead(vertex);
552                 }
553
554                 if (scc_dead)
555                         unix_collect_skb(&scc, hitlist);
556                 else if (!unix_graph_maybe_cyclic)
557                         unix_graph_maybe_cyclic = unix_scc_cyclic(&scc);
558
559                 list_del(&scc);
560         }
561
562         list_replace_init(&unix_visited_vertices, &unix_unvisited_vertices);
563 }
564
565 static bool gc_in_progress;
566
567 static void __unix_gc(struct work_struct *work)
568 {
569         struct sk_buff_head hitlist;
570         struct sk_buff *skb;
571
572         spin_lock(&unix_gc_lock);
573
574         if (!unix_graph_maybe_cyclic) {
575                 spin_unlock(&unix_gc_lock);
576                 goto skip_gc;
577         }
578
579         __skb_queue_head_init(&hitlist);
580
581         if (unix_graph_grouped)
582                 unix_walk_scc_fast(&hitlist);
583         else
584                 unix_walk_scc(&hitlist);
585
586         spin_unlock(&unix_gc_lock);
587
588         skb_queue_walk(&hitlist, skb) {
589                 if (UNIXCB(skb).fp)
590                         UNIXCB(skb).fp->dead = true;
591         }
592
593         __skb_queue_purge(&hitlist);
594 skip_gc:
595         WRITE_ONCE(gc_in_progress, false);
596 }
597
598 static DECLARE_WORK(unix_gc_work, __unix_gc);
599
600 void unix_gc(void)
601 {
602         WRITE_ONCE(gc_in_progress, true);
603         queue_work(system_unbound_wq, &unix_gc_work);
604 }
605
606 #define UNIX_INFLIGHT_TRIGGER_GC 16000
607 #define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
608
609 void wait_for_unix_gc(struct scm_fp_list *fpl)
610 {
611         /* If number of inflight sockets is insane,
612          * force a garbage collect right now.
613          *
614          * Paired with the WRITE_ONCE() in unix_inflight(),
615          * unix_notinflight(), and __unix_gc().
616          */
617         if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
618             !READ_ONCE(gc_in_progress))
619                 unix_gc();
620
621         /* Penalise users who want to send AF_UNIX sockets
622          * but whose sockets have not been received yet.
623          */
624         if (!fpl || !fpl->count_unix ||
625             READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
626                 return;
627
628         if (READ_ONCE(gc_in_progress))
629                 flush_work(&unix_gc_work);
630 }