blob: 8bbe1b8e4ff7f3da19a1809dc75b6fac8d23e1f1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * NET3: Garbage Collector For AF_UNIX sockets
3 *
4 * Garbage Collector:
5 * Copyright (C) Barak A. Pearlmutter.
6 * Released under the GPL version 2 or later.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 * - object w/ a bit
14 * - free list
15 *
16 * Current optimizations:
17 *
18 * - explicit stack instead of recursion
19 * - tail recurse on first born instead of immediate push/pop
20 * - we gather the stuff that should not be killed into tree
21 * and stack is just a path from root to the current pointer.
22 *
23 * Future optimizations:
24 *
25 * - don't just push entire root set; process in place
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; either version
30 * 2 of the License, or (at your option) any later version.
31 *
32 * Fixes:
33 * Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
34 * Cope with changing max_files.
35 * Al Viro 11 Oct 1998
36 * Graph may have cycles. That is, we can send the descriptor
37 * of foo to bar and vice versa. Current code chokes on that.
38 * Fix: move SCM_RIGHTS ones into the separate list and then
39 * skb_free() them all instead of doing explicit fput's.
40 * Another problem: since fput() may block somebody may
41 * create a new unix_socket when we are in the middle of sweep
42 * phase. Fix: revert the logic wrt MARKED. Mark everything
43 * upon the beginning and unmark non-junk ones.
44 *
45 * [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 * sent to connect()'ed but still not accept()'ed sockets.
47 * Fixed. Old code had slightly different problem here:
48 * extra fput() in situation when we passed the descriptor via
49 * such socket and closed it (descriptor). That would happen on
50 * each unix_gc() until the accept(). Since the struct file in
51 * question would go to the free list and might be reused...
52 * That might be the reason of random oopses on filp_close()
53 * in unrelated processes.
54 *
55 * AV 28 Feb 1999
56 * Kill the explicit allocation of stack. Now we keep the tree
57 * with root in dummy + pointer (gc_current) to one of the nodes.
58 * Stack is represented as path from gc_current to dummy. Unmark
59 * now means "add to tree". Push == "make it a son of gc_current".
60 * Pop == "move gc_current to parent". We keep only pointers to
61 * parents (->gc_tree).
62 * AV 1 Mar 1999
63 * Damn. Added missing check for ->dead in listen queues scanning.
64 *
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -070065 * Miklos Szeredi 25 Jun 2007
66 * Reimplement with a cycle collecting algorithm. This should
67 * solve several problems with the previous code, like being racy
68 * wrt receive and holding up unrelated socket operations.
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 */
YOSHIFUJI Hideakiac7bfa62007-02-09 23:25:23 +090070
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <linux/string.h>
73#include <linux/socket.h>
74#include <linux/un.h>
75#include <linux/net.h>
76#include <linux/fs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#include <linux/skbuff.h>
78#include <linux/netdevice.h>
79#include <linux/file.h>
80#include <linux/proc_fs.h>
Arjan van de Ven4a3e2f72006-03-20 22:33:17 -080081#include <linux/mutex.h>
dann frazier5f23b732008-11-26 15:32:27 -080082#include <linux/wait.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84#include <net/sock.h>
85#include <net/af_unix.h>
86#include <net/scm.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070087#include <net/tcp_states.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Jens Axboee32d0082019-02-08 09:01:44 -070089#include "scm.h"
90
Linus Torvalds1da177e2005-04-16 15:20:36 -070091/* Internal data structures and random procedures: */
92
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -070093static LIST_HEAD(gc_candidates);
dann frazier5f23b732008-11-26 15:32:27 -080094static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Pavel Emelyanov5c80f1a2007-11-10 22:07:13 -080096static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -070097 struct sk_buff_head *hitlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -070099 struct sk_buff *skb;
100 struct sk_buff *next;
101
102 spin_lock(&x->sk_receive_queue.lock);
Ilpo Järvinena2f3be12010-05-03 03:22:18 +0000103 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600104 /* Do we have file descriptors ? */
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700105 if (UNIXCB(skb).fp) {
106 bool hit = false;
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600107 /* Process the descriptors of this socket */
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700108 int nfd = UNIXCB(skb).fp->count;
109 struct file **fp = UNIXCB(skb).fp->fp;
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600110
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700111 while (nfd--) {
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600112 /* Get the socket the fd matches if it indeed does so */
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700113 struct sock *sk = unix_get_socket(*fp++);
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600114
Pavel Emelyanov5c80f1a2007-11-10 22:07:13 -0800115 if (sk) {
Miklos Szeredi62093442008-11-09 15:23:57 +0100116 struct unix_sock *u = unix_sk(sk);
117
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600118 /* Ignore non-candidates, they could
Miklos Szeredi62093442008-11-09 15:23:57 +0100119 * have been added to the queues after
120 * starting the garbage collection
121 */
Eric Dumazet60bc8512013-05-01 05:24:03 +0000122 if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
Miklos Szeredi62093442008-11-09 15:23:57 +0100123 hit = true;
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600124
Miklos Szeredi62093442008-11-09 15:23:57 +0100125 func(u);
126 }
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700127 }
128 }
129 if (hit && hitlist != NULL) {
130 __skb_unlink(skb, &x->sk_receive_queue);
131 __skb_queue_tail(hitlist, skb);
132 }
133 }
134 }
135 spin_unlock(&x->sk_receive_queue.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136}
137
Pavel Emelyanov5c80f1a2007-11-10 22:07:13 -0800138static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700139 struct sk_buff_head *hitlist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600141 if (x->sk_state != TCP_LISTEN) {
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700142 scan_inflight(x, func, hitlist);
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600143 } else {
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700144 struct sk_buff *skb;
145 struct sk_buff *next;
146 struct unix_sock *u;
147 LIST_HEAD(embryos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600149 /* For a listening socket collect the queued embryos
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700150 * and perform a scan on them as well.
151 */
152 spin_lock(&x->sk_receive_queue.lock);
Ilpo Järvinena2f3be12010-05-03 03:22:18 +0000153 skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700154 u = unix_sk(skb->sk);
155
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600156 /* An embryo cannot be in-flight, so it's safe
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700157 * to use the list link.
158 */
159 BUG_ON(!list_empty(&u->link));
160 list_add_tail(&u->link, &embryos);
161 }
162 spin_unlock(&x->sk_receive_queue.lock);
163
164 while (!list_empty(&embryos)) {
165 u = list_entry(embryos.next, struct unix_sock, link);
166 scan_inflight(&u->sk, func, hitlist);
167 list_del_init(&u->link);
168 }
169 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
Pavel Emelyanov5c80f1a2007-11-10 22:07:13 -0800172static void dec_inflight(struct unix_sock *usk)
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700173{
Al Viro516e0cc2008-07-26 00:39:17 -0400174 atomic_long_dec(&usk->inflight);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700175}
176
Pavel Emelyanov5c80f1a2007-11-10 22:07:13 -0800177static void inc_inflight(struct unix_sock *usk)
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700178{
Al Viro516e0cc2008-07-26 00:39:17 -0400179 atomic_long_inc(&usk->inflight);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700180}
181
Pavel Emelyanov5c80f1a2007-11-10 22:07:13 -0800182static void inc_inflight_move_tail(struct unix_sock *u)
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700183{
Al Viro516e0cc2008-07-26 00:39:17 -0400184 atomic_long_inc(&u->inflight);
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600185 /* If this still might be part of a cycle, move it to the end
Miklos Szeredi62093442008-11-09 15:23:57 +0100186 * of the list, so that it's checked even if it was already
187 * passed over
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700188 */
Eric Dumazet60bc8512013-05-01 05:24:03 +0000189 if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700190 list_move_tail(&u->link, &gc_candidates);
191}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
Fabian Frederick505e9072014-10-07 23:02:15 +0200193static bool gc_in_progress;
Eric Dumazet99156722010-11-24 09:15:27 -0800194#define UNIX_INFLIGHT_TRIGGER_GC 16000
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
dann frazier5f23b732008-11-26 15:32:27 -0800196void wait_for_unix_gc(void)
197{
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600198 /* If number of inflight sockets is insane,
Eric Dumazet99156722010-11-24 09:15:27 -0800199 * force a garbage collect right now.
200 */
201 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
202 unix_gc();
dann frazier5f23b732008-11-26 15:32:27 -0800203 wait_event(unix_gc_wait, gc_in_progress == false);
204}
205
206/* The external entry point: unix_gc() */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207void unix_gc(void)
208{
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700209 struct unix_sock *u;
210 struct unix_sock *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 struct sk_buff_head hitlist;
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700212 struct list_head cursor;
Miklos Szeredi62093442008-11-09 15:23:57 +0100213 LIST_HEAD(not_cycle_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700215 spin_lock(&unix_gc_lock);
216
217 /* Avoid a recursive GC. */
218 if (gc_in_progress)
219 goto out;
220
221 gc_in_progress = true;
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600222 /* First, select candidates for garbage collection. Only
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700223 * in-flight sockets are considered, and from those only ones
224 * which don't have any external reference.
225 *
226 * Holding unix_gc_lock will protect these candidates from
227 * being detached, and hence from gaining an external
Miklos Szeredi62093442008-11-09 15:23:57 +0100228 * reference. Since there are no possible receivers, all
229 * buffers currently on the candidates' queues stay there
230 * during the garbage collection.
231 *
232 * We also know that no new candidate can be added onto the
233 * receive queues. Other, non candidate sockets _can_ be
234 * added to queue, so we must make sure only to touch
235 * candidates.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 */
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700237 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
Al Viro516e0cc2008-07-26 00:39:17 -0400238 long total_refs;
239 long inflight_refs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700241 total_refs = file_count(u->sk.sk_socket->file);
Al Viro516e0cc2008-07-26 00:39:17 -0400242 inflight_refs = atomic_long_read(&u->inflight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700244 BUG_ON(inflight_refs < 1);
245 BUG_ON(total_refs < inflight_refs);
246 if (total_refs == inflight_refs) {
247 list_move_tail(&u->link, &gc_candidates);
Eric Dumazet60bc8512013-05-01 05:24:03 +0000248 __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
249 __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 }
252
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600253 /* Now remove all internal in-flight reference to children of
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700254 * the candidates.
255 */
256 list_for_each_entry(u, &gc_candidates, link)
257 scan_children(&u->sk, dec_inflight, NULL);
258
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600259 /* Restore the references for children of all candidates,
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700260 * which have remaining references. Do this recursively, so
261 * only those remain, which form cyclic references.
262 *
263 * Use a "cursor" link, to make the list traversal safe, even
264 * though elements might be moved about.
265 */
266 list_add(&cursor, &gc_candidates);
267 while (cursor.next != &gc_candidates) {
268 u = list_entry(cursor.next, struct unix_sock, link);
269
270 /* Move cursor to after the current position. */
271 list_move(&cursor, &u->link);
272
Al Viro516e0cc2008-07-26 00:39:17 -0400273 if (atomic_long_read(&u->inflight) > 0) {
Miklos Szeredi62093442008-11-09 15:23:57 +0100274 list_move_tail(&u->link, &not_cycle_list);
Eric Dumazet60bc8512013-05-01 05:24:03 +0000275 __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700276 scan_children(&u->sk, inc_inflight_move_tail, NULL);
277 }
278 }
279 list_del(&cursor);
280
Andrey Ulanov91ad0c02017-03-14 20:16:42 -0700281 /* Now gc_candidates contains only garbage. Restore original
282 * inflight counters for these as well, and remove the skbuffs
283 * which are creating the cycle(s).
284 */
285 skb_queue_head_init(&hitlist);
286 list_for_each_entry(u, &gc_candidates, link)
287 scan_children(&u->sk, inc_inflight, &hitlist);
288
Jason Eastmand1ab39f2015-04-22 00:56:42 -0600289 /* not_cycle_list contains those sockets which do not make up a
Miklos Szeredi62093442008-11-09 15:23:57 +0100290 * cycle. Restore these to the inflight list.
291 */
292 while (!list_empty(&not_cycle_list)) {
293 u = list_entry(not_cycle_list.next, struct unix_sock, link);
Eric Dumazet60bc8512013-05-01 05:24:03 +0000294 __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
Miklos Szeredi62093442008-11-09 15:23:57 +0100295 list_move_tail(&u->link, &gc_inflight_list);
296 }
297
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700298 spin_unlock(&unix_gc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700300 /* Here we are. Hitlist is filled. Die. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 __skb_queue_purge(&hitlist);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700302
303 spin_lock(&unix_gc_lock);
304
305 /* All candidates should have been detached by now. */
306 BUG_ON(!list_empty(&gc_candidates));
307 gc_in_progress = false;
dann frazier5f23b732008-11-26 15:32:27 -0800308 wake_up(&unix_gc_wait);
Miklos Szeredi1fd05ba2007-07-11 14:22:39 -0700309
310 out:
311 spin_unlock(&unix_gc_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}