source: src/linux/universal/linux-4.9/fs/pnode.c @ 31662

Last change on this file since 31662 was 31662, checked in by brainslayer, 6 weeks ago

use new squashfs in all kernels

File size: 12.2 KB
Line 
1/*
2 *  linux/fs/pnode.c
3 *
4 * (C) Copyright IBM Corporation 2005.
5 *      Released under GPL v2.
6 *      Author : Ram Pai (linuxram@us.ibm.com)
7 *
8 */
9#include <linux/mnt_namespace.h>
10#include <linux/mount.h>
11#include <linux/fs.h>
12#include <linux/nsproxy.h>
13#include "internal.h"
14#include "pnode.h"
15
16/* return the next shared peer mount of @p */
17static inline struct mount *next_peer(struct mount *p)
18{
19        return list_entry(p->mnt_share.next, struct mount, mnt_share);
20}
21
22static inline struct mount *first_slave(struct mount *p)
23{
24        return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
25}
26
27static inline struct mount *next_slave(struct mount *p)
28{
29        return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
30}
31
32static struct mount *get_peer_under_root(struct mount *mnt,
33                                         struct mnt_namespace *ns,
34                                         const struct path *root)
35{
36        struct mount *m = mnt;
37
38        do {
39                /* Check the namespace first for optimization */
40                if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root))
41                        return m;
42
43                m = next_peer(m);
44        } while (m != mnt);
45
46        return NULL;
47}
48
49/*
50 * Get ID of closest dominating peer group having a representative
51 * under the given root.
52 *
53 * Caller must hold namespace_sem
54 */
55int get_dominating_id(struct mount *mnt, const struct path *root)
56{
57        struct mount *m;
58
59        for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) {
60                struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root);
61                if (d)
62                        return d->mnt_group_id;
63        }
64
65        return 0;
66}
67
68static int do_make_slave(struct mount *mnt)
69{
70        struct mount *peer_mnt = mnt, *master = mnt->mnt_master;
71        struct mount *slave_mnt;
72
73        /*
74         * slave 'mnt' to a peer mount that has the
75         * same root dentry. If none is available then
76         * slave it to anything that is available.
77         */
78        while ((peer_mnt = next_peer(peer_mnt)) != mnt &&
79               peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ;
80
81        if (peer_mnt == mnt) {
82                peer_mnt = next_peer(mnt);
83                if (peer_mnt == mnt)
84                        peer_mnt = NULL;
85        }
86        if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) &&
87            list_empty(&mnt->mnt_share))
88                mnt_release_group_id(mnt);
89
90        list_del_init(&mnt->mnt_share);
91        mnt->mnt_group_id = 0;
92
93        if (peer_mnt)
94                master = peer_mnt;
95
96        if (master) {
97                list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave)
98                        slave_mnt->mnt_master = master;
99                list_move(&mnt->mnt_slave, &master->mnt_slave_list);
100                list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev);
101                INIT_LIST_HEAD(&mnt->mnt_slave_list);
102        } else {
103                struct list_head *p = &mnt->mnt_slave_list;
104                while (!list_empty(p)) {
105                        slave_mnt = list_first_entry(p,
106                                        struct mount, mnt_slave);
107                        list_del_init(&slave_mnt->mnt_slave);
108                        slave_mnt->mnt_master = NULL;
109                }
110        }
111        mnt->mnt_master = master;
112        CLEAR_MNT_SHARED(mnt);
113        return 0;
114}
115
116/*
117 * vfsmount lock must be held for write
118 */
119void change_mnt_propagation(struct mount *mnt, int type)
120{
121        if (type == MS_SHARED) {
122                set_mnt_shared(mnt);
123                return;
124        }
125        do_make_slave(mnt);
126        if (type != MS_SLAVE) {
127                list_del_init(&mnt->mnt_slave);
128                mnt->mnt_master = NULL;
129                if (type == MS_UNBINDABLE)
130                        mnt->mnt.mnt_flags |= MNT_UNBINDABLE;
131                else
132                        mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE;
133        }
134}
135
136/*
137 * get the next mount in the propagation tree.
138 * @m: the mount seen last
139 * @origin: the original mount from where the tree walk initiated
140 *
141 * Note that peer groups form contiguous segments of slave lists.
142 * We rely on that in get_source() to be able to find out if
143 * vfsmount found while iterating with propagation_next() is
144 * a peer of one we'd found earlier.
145 */
146static struct mount *propagation_next(struct mount *m,
147                                         struct mount *origin)
148{
149        /* are there any slaves of this mount? */
150        if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
151                return first_slave(m);
152
153        while (1) {
154                struct mount *master = m->mnt_master;
155
156                if (master == origin->mnt_master) {
157                        struct mount *next = next_peer(m);
158                        return (next == origin) ? NULL : next;
159                } else if (m->mnt_slave.next != &master->mnt_slave_list)
160                        return next_slave(m);
161
162                /* back at master */
163                m = master;
164        }
165}
166
167static struct mount *next_group(struct mount *m, struct mount *origin)
168{
169        while (1) {
170                while (1) {
171                        struct mount *next;
172                        if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
173                                return first_slave(m);
174                        next = next_peer(m);
175                        if (m->mnt_group_id == origin->mnt_group_id) {
176                                if (next == origin)
177                                        return NULL;
178                        } else if (m->mnt_slave.next != &next->mnt_slave)
179                                break;
180                        m = next;
181                }
182                /* m is the last peer */
183                while (1) {
184                        struct mount *master = m->mnt_master;
185                        if (m->mnt_slave.next != &master->mnt_slave_list)
186                                return next_slave(m);
187                        m = next_peer(master);
188                        if (master->mnt_group_id == origin->mnt_group_id)
189                                break;
190                        if (master->mnt_slave.next == &m->mnt_slave)
191                                break;
192                        m = master;
193                }
194                if (m == origin)
195                        return NULL;
196        }
197}
198
199/* all accesses are serialized by namespace_sem */
200static struct user_namespace *user_ns;
201static struct mount *last_dest, *first_source, *last_source, *dest_master;
202static struct mountpoint *mp;
203static struct hlist_head *list;
204
205static inline bool peers(struct mount *m1, struct mount *m2)
206{
207        return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
208}
209
210static int propagate_one(struct mount *m)
211{
212        struct mount *child;
213        int type;
214        /* skip ones added by this propagate_mnt() */
215        if (IS_MNT_NEW(m))
216                return 0;
217        /* skip if mountpoint isn't covered by it */
218        if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
219                return 0;
220        if (peers(m, last_dest)) {
221                type = CL_MAKE_SHARED;
222        } else {
223                struct mount *n, *p;
224                bool done;
225                for (n = m; ; n = p) {
226                        p = n->mnt_master;
227                        if (p == dest_master || IS_MNT_MARKED(p))
228                                break;
229                }
230                do {
231                        struct mount *parent = last_source->mnt_parent;
232                        if (last_source == first_source)
233                                break;
234                        done = parent->mnt_master == p;
235                        if (done && peers(n, parent))
236                                break;
237                        last_source = last_source->mnt_master;
238                } while (!done);
239
240                type = CL_SLAVE;
241                /* beginning of peer group among the slaves? */
242                if (IS_MNT_SHARED(m))
243                        type |= CL_MAKE_SHARED;
244        }
245               
246        /* Notice when we are propagating across user namespaces */
247        if (m->mnt_ns->user_ns != user_ns)
248                type |= CL_UNPRIVILEGED;
249        child = copy_tree(last_source, last_source->mnt.mnt_root, type);
250        if (IS_ERR(child))
251                return PTR_ERR(child);
252        child->mnt.mnt_flags &= ~MNT_LOCKED;
253        mnt_set_mountpoint(m, mp, child);
254        last_dest = m;
255        last_source = child;
256        if (m->mnt_master != dest_master) {
257                read_seqlock_excl(&mount_lock);
258                SET_MNT_MARK(m->mnt_master);
259                read_sequnlock_excl(&mount_lock);
260        }
261        hlist_add_head(&child->mnt_hash, list);
262        return count_mounts(m->mnt_ns, child);
263}
264
265/*
266 * mount 'source_mnt' under the destination 'dest_mnt' at
267 * dentry 'dest_dentry'. And propagate that mount to
268 * all the peer and slave mounts of 'dest_mnt'.
269 * Link all the new mounts into a propagation tree headed at
270 * source_mnt. Also link all the new mounts using ->mnt_list
271 * headed at source_mnt's ->mnt_list
272 *
273 * @dest_mnt: destination mount.
274 * @dest_dentry: destination dentry.
275 * @source_mnt: source mount.
276 * @tree_list : list of heads of trees to be attached.
277 */
278int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
279                    struct mount *source_mnt, struct hlist_head *tree_list)
280{
281        struct mount *m, *n;
282        int ret = 0;
283
284        /*
285         * we don't want to bother passing tons of arguments to
286         * propagate_one(); everything is serialized by namespace_sem,
287         * so globals will do just fine.
288         */
289        user_ns = current->nsproxy->mnt_ns->user_ns;
290        last_dest = dest_mnt;
291        first_source = source_mnt;
292        last_source = source_mnt;
293        mp = dest_mp;
294        list = tree_list;
295        dest_master = dest_mnt->mnt_master;
296
297        /* all peers of dest_mnt, except dest_mnt itself */
298        for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) {
299                ret = propagate_one(n);
300                if (ret)
301                        goto out;
302        }
303
304        /* all slave groups */
305        for (m = next_group(dest_mnt, dest_mnt); m;
306                        m = next_group(m, dest_mnt)) {
307                /* everything in that slave group */
308                n = m;
309                do {
310                        ret = propagate_one(n);
311                        if (ret)
312                                goto out;
313                        n = next_peer(n);
314                } while (n != m);
315        }
316out:
317        read_seqlock_excl(&mount_lock);
318        hlist_for_each_entry(n, tree_list, mnt_hash) {
319                m = n->mnt_parent;
320                if (m->mnt_master != dest_mnt->mnt_master)
321                        CLEAR_MNT_MARK(m->mnt_master);
322        }
323        read_sequnlock_excl(&mount_lock);
324        return ret;
325}
326
327static struct mount *find_topper(struct mount *mnt)
328{
329        /* If there is exactly one mount covering mnt completely return it. */
330        struct mount *child;
331
332        if (!list_is_singular(&mnt->mnt_mounts))
333                return NULL;
334
335        child = list_first_entry(&mnt->mnt_mounts, struct mount, mnt_child);
336        if (child->mnt_mountpoint != mnt->mnt.mnt_root)
337                return NULL;
338
339        return child;
340}
341
342/*
343 * return true if the refcount is greater than count
344 */
345static inline int do_refcount_check(struct mount *mnt, int count)
346{
347        return mnt_get_count(mnt) > count;
348}
349
350/*
351 * check if the mount 'mnt' can be unmounted successfully.
352 * @mnt: the mount to be checked for unmount
353 * NOTE: unmounting 'mnt' would naturally propagate to all
354 * other mounts its parent propagates to.
355 * Check if any of these mounts that **do not have submounts**
356 * have more references than 'refcnt'. If so return busy.
357 *
358 * vfsmount lock must be held for write
359 */
360int propagate_mount_busy(struct mount *mnt, int refcnt)
361{
362        struct mount *m, *child, *topper;
363        struct mount *parent = mnt->mnt_parent;
364
365        if (mnt == parent)
366                return do_refcount_check(mnt, refcnt);
367
368        /*
369         * quickly check if the current mount can be unmounted.
370         * If not, we don't have to go checking for all other
371         * mounts
372         */
373        if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt))
374                return 1;
375
376        for (m = propagation_next(parent, parent); m;
377                        m = propagation_next(m, parent)) {
378                int count = 1;
379                child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
380                if (!child)
381                        continue;
382
383                /* Is there exactly one mount on the child that covers
384                 * it completely whose reference should be ignored?
385                 */
386                topper = find_topper(child);
387                if (topper)
388                        count += 1;
389                else if (!list_empty(&child->mnt_mounts))
390                        continue;
391
392                if (do_refcount_check(child, count))
393                        return 1;
394        }
395        return 0;
396}
397
398/*
399 * Clear MNT_LOCKED when it can be shown to be safe.
400 *
401 * mount_lock lock must be held for write
402 */
403void propagate_mount_unlock(struct mount *mnt)
404{
405        struct mount *parent = mnt->mnt_parent;
406        struct mount *m, *child;
407
408        BUG_ON(parent == mnt);
409
410        for (m = propagation_next(parent, parent); m;
411                        m = propagation_next(m, parent)) {
412                child = __lookup_mnt(&m->mnt, mnt->mnt_mountpoint);
413                if (child)
414                        child->mnt.mnt_flags &= ~MNT_LOCKED;
415        }
416}
417
418/*
419 * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
420 */
421static void mark_umount_candidates(struct mount *mnt)
422{
423        struct mount *parent = mnt->mnt_parent;
424        struct mount *m;
425
426        BUG_ON(parent == mnt);
427
428        for (m = propagation_next(parent, parent); m;
429                        m = propagation_next(m, parent)) {
430                struct mount *child = __lookup_mnt(&m->mnt,
431                                                mnt->mnt_mountpoint);
432                if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
433                        continue;
434                if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
435                        SET_MNT_MARK(child);
436                }
437        }
438}
439
440/*
441 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
442 * parent propagates to.
443 */
444static void __propagate_umount(struct mount *mnt)
445{
446        struct mount *parent = mnt->mnt_parent;
447        struct mount *m;
448
449        BUG_ON(parent == mnt);
450
451        for (m = propagation_next(parent, parent); m;
452                        m = propagation_next(m, parent)) {
453                struct mount *topper;
454                struct mount *child = __lookup_mnt(&m->mnt,
455                                                mnt->mnt_mountpoint);
456                /*
457                 * umount the child only if the child has no children
458                 * and the child is marked safe to unmount.
459                 */
460                if (!child || !IS_MNT_MARKED(child))
461                        continue;
462                CLEAR_MNT_MARK(child);
463
464                /* If there is exactly one mount covering all of child
465                 * replace child with that mount.
466                 */
467                topper = find_topper(child);
468                if (topper)
469                        mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
470                                              topper);
471
472                if (list_empty(&child->mnt_mounts)) {
473                        list_del_init(&child->mnt_child);
474                        child->mnt.mnt_flags |= MNT_UMOUNT;
475                        list_move_tail(&child->mnt_list, &mnt->mnt_list);
476                }
477        }
478}
479
480/*
481 * collect all mounts that receive propagation from the mount in @list,
482 * and return these additional mounts in the same list.
483 * @list: the list of mounts to be unmounted.
484 *
485 * vfsmount lock must be held for write
486 */
487int propagate_umount(struct list_head *list)
488{
489        struct mount *mnt;
490
491        list_for_each_entry_reverse(mnt, list, mnt_list)
492                mark_umount_candidates(mnt);
493
494        list_for_each_entry(mnt, list, mnt_list)
495                __propagate_umount(mnt);
496        return 0;
497}
Note: See TracBrowser for help on using the repository browser.