source: src/linux/universal/linux-3.18/net/unix/garbage.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 3 months ago

update

File size: 10.6 KB
Line 
1/*
2 * NET3:        Garbage Collector For AF_UNIX sockets
3 *
4 * Garbage Collector:
5 *      Copyright (C) Barak A. Pearlmutter.
6 *      Released under the GPL version 2 or later.
7 *
8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
9 * If it doesn't work blame me, it worked when Barak sent it.
10 *
11 * Assumptions:
12 *
13 *  - object w/ a bit
14 *  - free list
15 *
16 * Current optimizations:
17 *
18 *  - explicit stack instead of recursion
19 *  - tail recurse on first born instead of immediate push/pop
20 *  - we gather the stuff that should not be killed into tree
21 *    and stack is just a path from root to the current pointer.
22 *
23 *  Future optimizations:
24 *
25 *  - don't just push entire root set; process in place
26 *
27 *      This program is free software; you can redistribute it and/or
28 *      modify it under the terms of the GNU General Public License
29 *      as published by the Free Software Foundation; either version
30 *      2 of the License, or (at your option) any later version.
31 *
32 *  Fixes:
33 *      Alan Cox        07 Sept 1997    Vmalloc internal stack as needed.
34 *                                      Cope with changing max_files.
35 *      Al Viro         11 Oct 1998
36 *              Graph may have cycles. That is, we can send the descriptor
37 *              of foo to bar and vice versa. Current code chokes on that.
38 *              Fix: move SCM_RIGHTS ones into the separate list and then
39 *              skb_free() them all instead of doing explicit fput's.
40 *              Another problem: since fput() may block somebody may
41 *              create a new unix_socket when we are in the middle of sweep
42 *              phase. Fix: revert the logic wrt MARKED. Mark everything
43 *              upon the beginning and unmark non-junk ones.
44 *
45 *              [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
46 *              sent to connect()'ed but still not accept()'ed sockets.
47 *              Fixed. Old code had slightly different problem here:
48 *              extra fput() in situation when we passed the descriptor via
49 *              such socket and closed it (descriptor). That would happen on
50 *              each unix_gc() until the accept(). Since the struct file in
51 *              question would go to the free list and might be reused...
52 *              That might be the reason of random oopses on filp_close()
53 *              in unrelated processes.
54 *
55 *      AV              28 Feb 1999
56 *              Kill the explicit allocation of stack. Now we keep the tree
57 *              with root in dummy + pointer (gc_current) to one of the nodes.
58 *              Stack is represented as path from gc_current to dummy. Unmark
59 *              now means "add to tree". Push == "make it a son of gc_current".
60 *              Pop == "move gc_current to parent". We keep only pointers to
61 *              parents (->gc_tree).
62 *      AV              1 Mar 1999
63 *              Damn. Added missing check for ->dead in listen queues scanning.
64 *
65 *      Miklos Szeredi 25 Jun 2007
66 *              Reimplement with a cycle collecting algorithm. This should
67 *              solve several problems with the previous code, like being racy
68 *              wrt receive and holding up unrelated socket operations.
69 */
70
71#include <linux/kernel.h>
72#include <linux/string.h>
73#include <linux/socket.h>
74#include <linux/un.h>
75#include <linux/net.h>
76#include <linux/fs.h>
77#include <linux/skbuff.h>
78#include <linux/netdevice.h>
79#include <linux/file.h>
80#include <linux/proc_fs.h>
81#include <linux/mutex.h>
82#include <linux/wait.h>
83
84#include <net/sock.h>
85#include <net/af_unix.h>
86#include <net/scm.h>
87#include <net/tcp_states.h>
88
89/* Internal data structures and random procedures: */
90
91static LIST_HEAD(gc_inflight_list);
92static LIST_HEAD(gc_candidates);
93static DEFINE_SPINLOCK(unix_gc_lock);
94static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
95
96unsigned int unix_tot_inflight;
97
98
99struct sock *unix_get_socket(struct file *filp)
100{
101        struct sock *u_sock = NULL;
102        struct inode *inode = file_inode(filp);
103
104        /*
105         *      Socket ?
106         */
107        if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
108                struct socket *sock = SOCKET_I(inode);
109                struct sock *s = sock->sk;
110
111                /*
112                 *      PF_UNIX ?
113                 */
114                if (s && sock->ops && sock->ops->family == PF_UNIX)
115                        u_sock = s;
116        }
117        return u_sock;
118}
119
120/*
121 *      Keep the number of times in flight count for the file
122 *      descriptor if it is for an AF_UNIX socket.
123 */
124
125void unix_inflight(struct file *fp)
126{
127        struct sock *s = unix_get_socket(fp);
128
129        spin_lock(&unix_gc_lock);
130
131        if (s) {
132                struct unix_sock *u = unix_sk(s);
133
134                if (atomic_long_inc_return(&u->inflight) == 1) {
135                        BUG_ON(!list_empty(&u->link));
136                        list_add_tail(&u->link, &gc_inflight_list);
137                } else {
138                        BUG_ON(list_empty(&u->link));
139                }
140                unix_tot_inflight++;
141        }
142        fp->f_cred->user->unix_inflight++;
143        spin_unlock(&unix_gc_lock);
144}
145
146void unix_notinflight(struct file *fp)
147{
148        struct sock *s = unix_get_socket(fp);
149
150        spin_lock(&unix_gc_lock);
151
152        if (s) {
153                struct unix_sock *u = unix_sk(s);
154
155                BUG_ON(!atomic_long_read(&u->inflight));
156                BUG_ON(list_empty(&u->link));
157                if (atomic_long_dec_and_test(&u->inflight))
158                        list_del_init(&u->link);
159                unix_tot_inflight--;
160        }
161        fp->f_cred->user->unix_inflight--;
162        spin_unlock(&unix_gc_lock);
163}
164
165static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
166                          struct sk_buff_head *hitlist)
167{
168        struct sk_buff *skb;
169        struct sk_buff *next;
170
171        spin_lock(&x->sk_receive_queue.lock);
172        skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
173                /*
174                 *      Do we have file descriptors ?
175                 */
176                if (UNIXCB(skb).fp) {
177                        bool hit = false;
178                        /*
179                         *      Process the descriptors of this socket
180                         */
181                        int nfd = UNIXCB(skb).fp->count;
182                        struct file **fp = UNIXCB(skb).fp->fp;
183                        while (nfd--) {
184                                /*
185                                 *      Get the socket the fd matches
186                                 *      if it indeed does so
187                                 */
188                                struct sock *sk = unix_get_socket(*fp++);
189                                if (sk) {
190                                        struct unix_sock *u = unix_sk(sk);
191
192                                        /*
193                                         * Ignore non-candidates, they could
194                                         * have been added to the queues after
195                                         * starting the garbage collection
196                                         */
197                                        if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
198                                                hit = true;
199                                                func(u);
200                                        }
201                                }
202                        }
203                        if (hit && hitlist != NULL) {
204                                __skb_unlink(skb, &x->sk_receive_queue);
205                                __skb_queue_tail(hitlist, skb);
206                        }
207                }
208        }
209        spin_unlock(&x->sk_receive_queue.lock);
210}
211
212static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
213                          struct sk_buff_head *hitlist)
214{
215        if (x->sk_state != TCP_LISTEN)
216                scan_inflight(x, func, hitlist);
217        else {
218                struct sk_buff *skb;
219                struct sk_buff *next;
220                struct unix_sock *u;
221                LIST_HEAD(embryos);
222
223                /*
224                 * For a listening socket collect the queued embryos
225                 * and perform a scan on them as well.
226                 */
227                spin_lock(&x->sk_receive_queue.lock);
228                skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
229                        u = unix_sk(skb->sk);
230
231                        /*
232                         * An embryo cannot be in-flight, so it's safe
233                         * to use the list link.
234                         */
235                        BUG_ON(!list_empty(&u->link));
236                        list_add_tail(&u->link, &embryos);
237                }
238                spin_unlock(&x->sk_receive_queue.lock);
239
240                while (!list_empty(&embryos)) {
241                        u = list_entry(embryos.next, struct unix_sock, link);
242                        scan_inflight(&u->sk, func, hitlist);
243                        list_del_init(&u->link);
244                }
245        }
246}
247
248static void dec_inflight(struct unix_sock *usk)
249{
250        atomic_long_dec(&usk->inflight);
251}
252
253static void inc_inflight(struct unix_sock *usk)
254{
255        atomic_long_inc(&usk->inflight);
256}
257
258static void inc_inflight_move_tail(struct unix_sock *u)
259{
260        atomic_long_inc(&u->inflight);
261        /*
262         * If this still might be part of a cycle, move it to the end
263         * of the list, so that it's checked even if it was already
264         * passed over
265         */
266        if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
267                list_move_tail(&u->link, &gc_candidates);
268}
269
270static bool gc_in_progress;
271#define UNIX_INFLIGHT_TRIGGER_GC 16000
272
273void wait_for_unix_gc(void)
274{
275        /*
276         * If number of inflight sockets is insane,
277         * force a garbage collect right now.
278         */
279        if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
280                unix_gc();
281        wait_event(unix_gc_wait, gc_in_progress == false);
282}
283
284/* The external entry point: unix_gc() */
285void unix_gc(void)
286{
287        struct unix_sock *u;
288        struct unix_sock *next;
289        struct sk_buff_head hitlist;
290        struct list_head cursor;
291        LIST_HEAD(not_cycle_list);
292
293        spin_lock(&unix_gc_lock);
294
295        /* Avoid a recursive GC. */
296        if (gc_in_progress)
297                goto out;
298
299        gc_in_progress = true;
300        /*
301         * First, select candidates for garbage collection.  Only
302         * in-flight sockets are considered, and from those only ones
303         * which don't have any external reference.
304         *
305         * Holding unix_gc_lock will protect these candidates from
306         * being detached, and hence from gaining an external
307         * reference.  Since there are no possible receivers, all
308         * buffers currently on the candidates' queues stay there
309         * during the garbage collection.
310         *
311         * We also know that no new candidate can be added onto the
312         * receive queues.  Other, non candidate sockets _can_ be
313         * added to queue, so we must make sure only to touch
314         * candidates.
315         */
316        list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
317                long total_refs;
318                long inflight_refs;
319
320                total_refs = file_count(u->sk.sk_socket->file);
321                inflight_refs = atomic_long_read(&u->inflight);
322
323                BUG_ON(inflight_refs < 1);
324                BUG_ON(total_refs < inflight_refs);
325                if (total_refs == inflight_refs) {
326                        list_move_tail(&u->link, &gc_candidates);
327                        __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
328                        __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
329                }
330        }
331
332        /*
333         * Now remove all internal in-flight reference to children of
334         * the candidates.
335         */
336        list_for_each_entry(u, &gc_candidates, link)
337                scan_children(&u->sk, dec_inflight, NULL);
338
339        /*
340         * Restore the references for children of all candidates,
341         * which have remaining references.  Do this recursively, so
342         * only those remain, which form cyclic references.
343         *
344         * Use a "cursor" link, to make the list traversal safe, even
345         * though elements might be moved about.
346         */
347        list_add(&cursor, &gc_candidates);
348        while (cursor.next != &gc_candidates) {
349                u = list_entry(cursor.next, struct unix_sock, link);
350
351                /* Move cursor to after the current position. */
352                list_move(&cursor, &u->link);
353
354                if (atomic_long_read(&u->inflight) > 0) {
355                        list_move_tail(&u->link, &not_cycle_list);
356                        __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
357                        scan_children(&u->sk, inc_inflight_move_tail, NULL);
358                }
359        }
360        list_del(&cursor);
361
362        /* Now gc_candidates contains only garbage.  Restore original
363         * inflight counters for these as well, and remove the skbuffs
364         * which are creating the cycle(s).
365         */
366        skb_queue_head_init(&hitlist);
367        list_for_each_entry(u, &gc_candidates, link)
368                scan_children(&u->sk, inc_inflight, &hitlist);
369
370        /*
371         * not_cycle_list contains those sockets which do not make up a
372         * cycle.  Restore these to the inflight list.
373         */
374        while (!list_empty(&not_cycle_list)) {
375                u = list_entry(not_cycle_list.next, struct unix_sock, link);
376                __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
377                list_move_tail(&u->link, &gc_inflight_list);
378        }
379
380        spin_unlock(&unix_gc_lock);
381
382        /* Here we are. Hitlist is filled. Die. */
383        __skb_queue_purge(&hitlist);
384
385        spin_lock(&unix_gc_lock);
386
387        /* All candidates should have been detached by now. */
388        BUG_ON(!list_empty(&gc_candidates));
389        gc_in_progress = false;
390        wake_up(&unix_gc_wait);
391
392 out:
393        spin_unlock(&unix_gc_lock);
394}
Note: See TracBrowser for help on using the repository browser.