source: src/linux/universal/linux-4.9/kernel/cgroup.c @ 31885

Last change on this file since 31885 was 31885, checked in by brainslayer, 5 weeks ago

update

File size: 176.3 KB
Line 
1/*
2 *  Generic process-grouping system.
3 *
4 *  Based originally on the cpuset system, extracted by Paul Menage
5 *  Copyright (C) 2006 Google, Inc
6 *
7 *  Notifications support
8 *  Copyright (C) 2009 Nokia Corporation
9 *  Author: Kirill A. Shutemov
10 *
11 *  Copyright notices from the original cpuset code:
12 *  --------------------------------------------------
13 *  Copyright (C) 2003 BULL SA.
14 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
15 *
16 *  Portions derived from Patrick Mochel's sysfs code.
17 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
18 *
19 *  2003-10-10 Written by Simon Derr.
20 *  2003-10-22 Updates by Stephen Hemminger.
21 *  2004 May-July Rework by Paul Jackson.
22 *  ---------------------------------------------------
23 *
24 *  This file is subject to the terms and conditions of the GNU General Public
25 *  License.  See the file COPYING in the main directory of the Linux
26 *  distribution for more details.
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <linux/cgroup.h>
32#include <linux/cred.h>
33#include <linux/ctype.h>
34#include <linux/errno.h>
35#include <linux/init_task.h>
36#include <linux/kernel.h>
37#include <linux/list.h>
38#include <linux/magic.h>
39#include <linux/mm.h>
40#include <linux/mutex.h>
41#include <linux/mount.h>
42#include <linux/pagemap.h>
43#include <linux/proc_fs.h>
44#include <linux/rcupdate.h>
45#include <linux/sched.h>
46#include <linux/slab.h>
47#include <linux/spinlock.h>
48#include <linux/percpu-rwsem.h>
49#include <linux/string.h>
50#include <linux/sort.h>
51#include <linux/kmod.h>
52#include <linux/delayacct.h>
53#include <linux/cgroupstats.h>
54#include <linux/hashtable.h>
55#include <linux/pid_namespace.h>
56#include <linux/idr.h>
57#include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58#include <linux/kthread.h>
59#include <linux/delay.h>
60#include <linux/atomic.h>
61#include <linux/cpuset.h>
62#include <linux/proc_ns.h>
63#include <linux/nsproxy.h>
64#include <linux/file.h>
65#include <net/sock.h>
66
67#define CREATE_TRACE_POINTS
68#include <trace/events/cgroup.h>
69
70/*
71 * pidlists linger the following amount before being destroyed.  The goal
72 * is avoiding frequent destruction in the middle of consecutive read calls
73 * Expiring in the middle is a performance problem not a correctness one.
74 * 1 sec should be enough.
75 */
76#define CGROUP_PIDLIST_DESTROY_DELAY    HZ
77
78#define CGROUP_FILE_NAME_MAX            (MAX_CGROUP_TYPE_NAMELEN +      \
79                                         MAX_CFTYPE_NAME + 2)
80
81/*
82 * cgroup_mutex is the master lock.  Any modification to cgroup or its
83 * hierarchy must be performed while holding it.
84 *
85 * css_set_lock protects task->cgroups pointer, the list of css_set
86 * objects, and the chain of tasks off each css_set.
87 *
88 * These locks are exported if CONFIG_PROVE_RCU so that accessors in
89 * cgroup.h can use them for lockdep annotations.
90 */
91#ifdef CONFIG_PROVE_RCU
92DEFINE_MUTEX(cgroup_mutex);
93DEFINE_SPINLOCK(css_set_lock);
94EXPORT_SYMBOL_GPL(cgroup_mutex);
95EXPORT_SYMBOL_GPL(css_set_lock);
96#else
97static DEFINE_MUTEX(cgroup_mutex);
98static DEFINE_SPINLOCK(css_set_lock);
99#endif
100
101/*
102 * Protects cgroup_idr and css_idr so that IDs can be released without
103 * grabbing cgroup_mutex.
104 */
105static DEFINE_SPINLOCK(cgroup_idr_lock);
106
107/*
108 * Protects cgroup_file->kn for !self csses.  It synchronizes notifications
109 * against file removal/re-creation across css hiding.
110 */
111static DEFINE_SPINLOCK(cgroup_file_kn_lock);
112
113/*
114 * Protects cgroup_subsys->release_agent_path.  Modifying it also requires
115 * cgroup_mutex.  Reading requires either cgroup_mutex or this spinlock.
116 */
117static DEFINE_SPINLOCK(release_agent_path_lock);
118
119struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
120
121#define cgroup_assert_mutex_or_rcu_locked()                             \
122        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
123                           !lockdep_is_held(&cgroup_mutex),             \
124                           "cgroup_mutex or RCU read lock required");
125
126/*
127 * cgroup destruction makes heavy use of work items and there can be a lot
128 * of concurrent destructions.  Use a separate workqueue so that cgroup
129 * destruction work items don't end up filling up max_active of system_wq
130 * which may lead to deadlock.
131 */
132static struct workqueue_struct *cgroup_destroy_wq;
133
134/*
135 * pidlist destructions need to be flushed on cgroup destruction.  Use a
136 * separate workqueue as flush domain.
137 */
138static struct workqueue_struct *cgroup_pidlist_destroy_wq;
139
140/* generate an array of cgroup subsystem pointers */
141#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
142static struct cgroup_subsys *cgroup_subsys[] = {
143#include <linux/cgroup_subsys.h>
144};
145#undef SUBSYS
146
147/* array of cgroup subsystem names */
148#define SUBSYS(_x) [_x ## _cgrp_id] = #_x,
149static const char *cgroup_subsys_name[] = {
150#include <linux/cgroup_subsys.h>
151};
152#undef SUBSYS
153
154/* array of static_keys for cgroup_subsys_enabled() and cgroup_subsys_on_dfl() */
155#define SUBSYS(_x)                                                              \
156        DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_enabled_key);                 \
157        DEFINE_STATIC_KEY_TRUE(_x ## _cgrp_subsys_on_dfl_key);                  \
158        EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_enabled_key);                      \
159        EXPORT_SYMBOL_GPL(_x ## _cgrp_subsys_on_dfl_key);
160#include <linux/cgroup_subsys.h>
161#undef SUBSYS
162
163#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_enabled_key,
164static struct static_key_true *cgroup_subsys_enabled_key[] = {
165#include <linux/cgroup_subsys.h>
166};
167#undef SUBSYS
168
169#define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys_on_dfl_key,
170static struct static_key_true *cgroup_subsys_on_dfl_key[] = {
171#include <linux/cgroup_subsys.h>
172};
173#undef SUBSYS
174
175/*
176 * The default hierarchy, reserved for the subsystems that are otherwise
177 * unattached - it never has more than a single cgroup, and all tasks are
178 * part of that cgroup.
179 */
180struct cgroup_root cgrp_dfl_root;
181EXPORT_SYMBOL_GPL(cgrp_dfl_root);
182
183/*
184 * The default hierarchy always exists but is hidden until mounted for the
185 * first time.  This is for backward compatibility.
186 */
187static bool cgrp_dfl_visible;
188
189/* Controllers blocked by the commandline in v1 */
190static u16 cgroup_no_v1_mask;
191
192/* some controllers are not supported in the default hierarchy */
193static u16 cgrp_dfl_inhibit_ss_mask;
194
195/* some controllers are implicitly enabled on the default hierarchy */
196static unsigned long cgrp_dfl_implicit_ss_mask;
197
198/* The list of hierarchy roots */
199
200static LIST_HEAD(cgroup_roots);
201static int cgroup_root_count;
202
203/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
204static DEFINE_IDR(cgroup_hierarchy_idr);
205
206/*
207 * Assign a monotonically increasing serial number to csses.  It guarantees
208 * cgroups with bigger numbers are newer than those with smaller numbers.
209 * Also, as csses are always appended to the parent's ->children list, it
210 * guarantees that sibling csses are always sorted in the ascending serial
211 * number order on the list.  Protected by cgroup_mutex.
212 */
213static u64 css_serial_nr_next = 1;
214
215/*
216 * These bitmask flags indicate whether tasks in the fork and exit paths have
217 * fork/exit handlers to call. This avoids us having to do extra work in the
218 * fork/exit path to check which subsystems have fork/exit callbacks.
219 */
220static u16 have_fork_callback __read_mostly;
221static u16 have_exit_callback __read_mostly;
222static u16 have_free_callback __read_mostly;
223
224/* cgroup namespace for init task */
225struct cgroup_namespace init_cgroup_ns = {
226        .count          = { .counter = 2, },
227        .user_ns        = &init_user_ns,
228        .ns.ops         = &cgroupns_operations,
229        .ns.inum        = PROC_CGROUP_INIT_INO,
230        .root_cset      = &init_css_set,
231};
232
233/* Ditto for the can_fork callback. */
234static u16 have_canfork_callback __read_mostly;
235
236static struct file_system_type cgroup2_fs_type;
237static struct cftype cgroup_dfl_base_files[];
238static struct cftype cgroup_legacy_base_files[];
239
240static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask);
241static void cgroup_lock_and_drain_offline(struct cgroup *cgrp);
242static int cgroup_apply_control(struct cgroup *cgrp);
243static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
244static void css_task_iter_advance(struct css_task_iter *it);
245static int cgroup_destroy_locked(struct cgroup *cgrp);
246static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
247                                              struct cgroup_subsys *ss);
248static void css_release(struct percpu_ref *ref);
249static void kill_css(struct cgroup_subsys_state *css);
250static int cgroup_addrm_files(struct cgroup_subsys_state *css,
251                              struct cgroup *cgrp, struct cftype cfts[],
252                              bool is_add);
253
254/**
255 * cgroup_ssid_enabled - cgroup subsys enabled test by subsys ID
256 * @ssid: subsys ID of interest
257 *
258 * cgroup_subsys_enabled() can only be used with literal subsys names which
259 * is fine for individual subsystems but unsuitable for cgroup core.  This
260 * is slower static_key_enabled() based test indexed by @ssid.
261 */
262static bool cgroup_ssid_enabled(int ssid)
263{
264        if (CGROUP_SUBSYS_COUNT == 0)
265                return false;
266
267        return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
268}
269
270static bool cgroup_ssid_no_v1(int ssid)
271{
272        return cgroup_no_v1_mask & (1 << ssid);
273}
274
275/**
276 * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
277 * @cgrp: the cgroup of interest
278 *
279 * The default hierarchy is the v2 interface of cgroup and this function
280 * can be used to test whether a cgroup is on the default hierarchy for
281 * cases where a subsystem should behave differnetly depending on the
282 * interface version.
283 *
284 * The set of behaviors which change on the default hierarchy are still
285 * being determined and the mount option is prefixed with __DEVEL__.
286 *
287 * List of changed behaviors:
288 *
289 * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
290 *   and "name" are disallowed.
291 *
292 * - When mounting an existing superblock, mount options should match.
293 *
294 * - Remount is disallowed.
295 *
296 * - rename(2) is disallowed.
297 *
298 * - "tasks" is removed.  Everything should be at process granularity.  Use
299 *   "cgroup.procs" instead.
300 *
301 * - "cgroup.procs" is not sorted.  pids will be unique unless they got
302 *   recycled inbetween reads.
303 *
304 * - "release_agent" and "notify_on_release" are removed.  Replacement
305 *   notification mechanism will be implemented.
306 *
307 * - "cgroup.clone_children" is removed.
308 *
309 * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
310 *   and its descendants contain no task; otherwise, 1.  The file also
311 *   generates kernfs notification which can be monitored through poll and
312 *   [di]notify when the value of the file changes.
313 *
314 * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
315 *   take masks of ancestors with non-empty cpus/mems, instead of being
316 *   moved to an ancestor.
317 *
318 * - cpuset: a task can be moved into an empty cpuset, and again it takes
319 *   masks of ancestors.
320 *
321 * - memcg: use_hierarchy is on by default and the cgroup file for the flag
322 *   is not created.
323 *
324 * - blkcg: blk-throttle becomes properly hierarchical.
325 *
326 * - debug: disallowed on the default hierarchy.
327 */
328static bool cgroup_on_dfl(const struct cgroup *cgrp)
329{
330        return cgrp->root == &cgrp_dfl_root;
331}
332
333/* IDR wrappers which synchronize using cgroup_idr_lock */
334static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end,
335                            gfp_t gfp_mask)
336{
337        int ret;
338
339        idr_preload(gfp_mask);
340        spin_lock_bh(&cgroup_idr_lock);
341        ret = idr_alloc(idr, ptr, start, end, gfp_mask & ~__GFP_DIRECT_RECLAIM);
342        spin_unlock_bh(&cgroup_idr_lock);
343        idr_preload_end();
344        return ret;
345}
346
347static void *cgroup_idr_replace(struct idr *idr, void *ptr, int id)
348{
349        void *ret;
350
351        spin_lock_bh(&cgroup_idr_lock);
352        ret = idr_replace(idr, ptr, id);
353        spin_unlock_bh(&cgroup_idr_lock);
354        return ret;
355}
356
357static void cgroup_idr_remove(struct idr *idr, int id)
358{
359        spin_lock_bh(&cgroup_idr_lock);
360        idr_remove(idr, id);
361        spin_unlock_bh(&cgroup_idr_lock);
362}
363
364static struct cgroup *cgroup_parent(struct cgroup *cgrp)
365{
366        struct cgroup_subsys_state *parent_css = cgrp->self.parent;
367
368        if (parent_css)
369                return container_of(parent_css, struct cgroup, self);
370        return NULL;
371}
372
373/* subsystems visibly enabled on a cgroup */
374static u16 cgroup_control(struct cgroup *cgrp)
375{
376        struct cgroup *parent = cgroup_parent(cgrp);
377        u16 root_ss_mask = cgrp->root->subsys_mask;
378
379        if (parent)
380                return parent->subtree_control;
381
382        if (cgroup_on_dfl(cgrp))
383                root_ss_mask &= ~(cgrp_dfl_inhibit_ss_mask |
384                                  cgrp_dfl_implicit_ss_mask);
385        return root_ss_mask;
386}
387
388/* subsystems enabled on a cgroup */
389static u16 cgroup_ss_mask(struct cgroup *cgrp)
390{
391        struct cgroup *parent = cgroup_parent(cgrp);
392
393        if (parent)
394                return parent->subtree_ss_mask;
395
396        return cgrp->root->subsys_mask;
397}
398
399/**
400 * cgroup_css - obtain a cgroup's css for the specified subsystem
401 * @cgrp: the cgroup of interest
402 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
403 *
404 * Return @cgrp's css (cgroup_subsys_state) associated with @ss.  This
405 * function must be called either under cgroup_mutex or rcu_read_lock() and
406 * the caller is responsible for pinning the returned css if it wants to
407 * keep accessing it outside the said locks.  This function may return
408 * %NULL if @cgrp doesn't have @subsys_id enabled.
409 */
410static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
411                                              struct cgroup_subsys *ss)
412{
413        if (ss)
414                return rcu_dereference_check(cgrp->subsys[ss->id],
415                                        lockdep_is_held(&cgroup_mutex));
416        else
417                return &cgrp->self;
418}
419
420/**
421 * cgroup_e_css - obtain a cgroup's effective css for the specified subsystem
422 * @cgrp: the cgroup of interest
423 * @ss: the subsystem of interest (%NULL returns @cgrp->self)
424 *
425 * Similar to cgroup_css() but returns the effective css, which is defined
426 * as the matching css of the nearest ancestor including self which has @ss
427 * enabled.  If @ss is associated with the hierarchy @cgrp is on, this
428 * function is guaranteed to return non-NULL css.
429 */
430static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
431                                                struct cgroup_subsys *ss)
432{
433        lockdep_assert_held(&cgroup_mutex);
434
435        if (!ss)
436                return &cgrp->self;
437
438        /*
439         * This function is used while updating css associations and thus
440         * can't test the csses directly.  Test ss_mask.
441         */
442        while (!(cgroup_ss_mask(cgrp) & (1 << ss->id))) {
443                cgrp = cgroup_parent(cgrp);
444                if (!cgrp)
445                        return NULL;
446        }
447
448        return cgroup_css(cgrp, ss);
449}
450
451/**
452 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
453 * @cgrp: the cgroup of interest
454 * @ss: the subsystem of interest
455 *
456 * Find and get the effective css of @cgrp for @ss.  The effective css is
457 * defined as the matching css of the nearest ancestor including self which
458 * has @ss enabled.  If @ss is not mounted on the hierarchy @cgrp is on,
459 * the root css is returned, so this function always returns a valid css.
460 * The returned css must be put using css_put().
461 */
462struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
463                                             struct cgroup_subsys *ss)
464{
465        struct cgroup_subsys_state *css;
466
467        rcu_read_lock();
468
469        do {
470                css = cgroup_css(cgrp, ss);
471
472                if (css && css_tryget_online(css))
473                        goto out_unlock;
474                cgrp = cgroup_parent(cgrp);
475        } while (cgrp);
476
477        css = init_css_set.subsys[ss->id];
478        css_get(css);
479out_unlock:
480        rcu_read_unlock();
481        return css;
482}
483
484/* convenient tests for these bits */
485static inline bool cgroup_is_dead(const struct cgroup *cgrp)
486{
487        return !(cgrp->self.flags & CSS_ONLINE);
488}
489
490static void cgroup_get(struct cgroup *cgrp)
491{
492        WARN_ON_ONCE(cgroup_is_dead(cgrp));
493        css_get(&cgrp->self);
494}
495
496static bool cgroup_tryget(struct cgroup *cgrp)
497{
498        return css_tryget(&cgrp->self);
499}
500
501struct cgroup_subsys_state *of_css(struct kernfs_open_file *of)
502{
503        struct cgroup *cgrp = of->kn->parent->priv;
504        struct cftype *cft = of_cft(of);
505
506        /*
507         * This is open and unprotected implementation of cgroup_css().
508         * seq_css() is only called from a kernfs file operation which has
509         * an active reference on the file.  Because all the subsystem
510         * files are drained before a css is disassociated with a cgroup,
511         * the matching css from the cgroup's subsys table is guaranteed to
512         * be and stay valid until the enclosing operation is complete.
513         */
514        if (cft->ss)
515                return rcu_dereference_raw(cgrp->subsys[cft->ss->id]);
516        else
517                return &cgrp->self;
518}
519EXPORT_SYMBOL_GPL(of_css);
520
521static int notify_on_release(const struct cgroup *cgrp)
522{
523        return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
524}
525
526/**
527 * for_each_css - iterate all css's of a cgroup
528 * @css: the iteration cursor
529 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
530 * @cgrp: the target cgroup to iterate css's of
531 *
532 * Should be called under cgroup_[tree_]mutex.
533 */
534#define for_each_css(css, ssid, cgrp)                                   \
535        for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)        \
536                if (!((css) = rcu_dereference_check(                    \
537                                (cgrp)->subsys[(ssid)],                 \
538                                lockdep_is_held(&cgroup_mutex)))) { }   \
539                else
540
541/**
542 * for_each_e_css - iterate all effective css's of a cgroup
543 * @css: the iteration cursor
544 * @ssid: the index of the subsystem, CGROUP_SUBSYS_COUNT after reaching the end
545 * @cgrp: the target cgroup to iterate css's of
546 *
547 * Should be called under cgroup_[tree_]mutex.
548 */
549#define for_each_e_css(css, ssid, cgrp)                                 \
550        for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT; (ssid)++)        \
551                if (!((css) = cgroup_e_css(cgrp, cgroup_subsys[(ssid)]))) \
552                        ;                                               \
553                else
554
555/**
556 * for_each_subsys - iterate all enabled cgroup subsystems
557 * @ss: the iteration cursor
558 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
559 */
560#define for_each_subsys(ss, ssid)                                       \
561        for ((ssid) = 0; (ssid) < CGROUP_SUBSYS_COUNT &&                \
562             (((ss) = cgroup_subsys[ssid]) || true); (ssid)++)
563
564/**
565 * do_each_subsys_mask - filter for_each_subsys with a bitmask
566 * @ss: the iteration cursor
567 * @ssid: the index of @ss, CGROUP_SUBSYS_COUNT after reaching the end
568 * @ss_mask: the bitmask
569 *
570 * The block will only run for cases where the ssid-th bit (1 << ssid) of
571 * @ss_mask is set.
572 */
573#define do_each_subsys_mask(ss, ssid, ss_mask) do {                     \
574        unsigned long __ss_mask = (ss_mask);                            \
575        if (!CGROUP_SUBSYS_COUNT) { /* to avoid spurious gcc warning */ \
576                (ssid) = 0;                                             \
577                break;                                                  \
578        }                                                               \
579        for_each_set_bit(ssid, &__ss_mask, CGROUP_SUBSYS_COUNT) {       \
580                (ss) = cgroup_subsys[ssid];                             \
581                {
582
583#define while_each_subsys_mask()                                        \
584                }                                                       \
585        }                                                               \
586} while (false)
587
588/* iterate across the hierarchies */
589#define for_each_root(root)                                             \
590        list_for_each_entry((root), &cgroup_roots, root_list)
591
592/* iterate over child cgrps, lock should be held throughout iteration */
593#define cgroup_for_each_live_child(child, cgrp)                         \
594        list_for_each_entry((child), &(cgrp)->self.children, self.sibling) \
595                if (({ lockdep_assert_held(&cgroup_mutex);              \
596                       cgroup_is_dead(child); }))                       \
597                        ;                                               \
598                else
599
600/* walk live descendants in preorder */
601#define cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp)          \
602        css_for_each_descendant_pre((d_css), cgroup_css((cgrp), NULL))  \
603                if (({ lockdep_assert_held(&cgroup_mutex);              \
604                       (dsct) = (d_css)->cgroup;                        \
605                       cgroup_is_dead(dsct); }))                        \
606                        ;                                               \
607                else
608
609/* walk live descendants in postorder */
610#define cgroup_for_each_live_descendant_post(dsct, d_css, cgrp)         \
611        css_for_each_descendant_post((d_css), cgroup_css((cgrp), NULL)) \
612                if (({ lockdep_assert_held(&cgroup_mutex);              \
613                       (dsct) = (d_css)->cgroup;                        \
614                       cgroup_is_dead(dsct); }))                        \
615                        ;                                               \
616                else
617
618static void cgroup_release_agent(struct work_struct *work);
619static void check_for_release(struct cgroup *cgrp);
620
621/*
622 * A cgroup can be associated with multiple css_sets as different tasks may
623 * belong to different cgroups on different hierarchies.  In the other
624 * direction, a css_set is naturally associated with multiple cgroups.
625 * This M:N relationship is represented by the following link structure
626 * which exists for each association and allows traversing the associations
627 * from both sides.
628 */
629struct cgrp_cset_link {
630        /* the cgroup and css_set this link associates */
631        struct cgroup           *cgrp;
632        struct css_set          *cset;
633
634        /* list of cgrp_cset_links anchored at cgrp->cset_links */
635        struct list_head        cset_link;
636
637        /* list of cgrp_cset_links anchored at css_set->cgrp_links */
638        struct list_head        cgrp_link;
639};
640
641/*
642 * The default css_set - used by init and its children prior to any
643 * hierarchies being mounted. It contains a pointer to the root state
644 * for each subsystem. Also used to anchor the list of css_sets. Not
645 * reference-counted, to improve performance when child cgroups
646 * haven't been created.
647 */
648struct css_set init_css_set = {
649        .refcount               = ATOMIC_INIT(1),
650        .cgrp_links             = LIST_HEAD_INIT(init_css_set.cgrp_links),
651        .tasks                  = LIST_HEAD_INIT(init_css_set.tasks),
652        .mg_tasks               = LIST_HEAD_INIT(init_css_set.mg_tasks),
653        .mg_preload_node        = LIST_HEAD_INIT(init_css_set.mg_preload_node),
654        .mg_node                = LIST_HEAD_INIT(init_css_set.mg_node),
655        .task_iters             = LIST_HEAD_INIT(init_css_set.task_iters),
656};
657
658static int css_set_count        = 1;    /* 1 for init_css_set */
659
660/**
661 * css_set_populated - does a css_set contain any tasks?
662 * @cset: target css_set
663 */
664static bool css_set_populated(struct css_set *cset)
665{
666        lockdep_assert_held(&css_set_lock);
667
668        return !list_empty(&cset->tasks) || !list_empty(&cset->mg_tasks);
669}
670
671/**
672 * cgroup_update_populated - updated populated count of a cgroup
673 * @cgrp: the target cgroup
674 * @populated: inc or dec populated count
675 *
676 * One of the css_sets associated with @cgrp is either getting its first
677 * task or losing the last.  Update @cgrp->populated_cnt accordingly.  The
678 * count is propagated towards root so that a given cgroup's populated_cnt
679 * is zero iff the cgroup and all its descendants don't contain any tasks.
680 *
681 * @cgrp's interface file "cgroup.populated" is zero if
682 * @cgrp->populated_cnt is zero and 1 otherwise.  When @cgrp->populated_cnt
683 * changes from or to zero, userland is notified that the content of the
684 * interface file has changed.  This can be used to detect when @cgrp and
685 * its descendants become populated or empty.
686 */
687static void cgroup_update_populated(struct cgroup *cgrp, bool populated)
688{
689        lockdep_assert_held(&css_set_lock);
690
691        do {
692                bool trigger;
693
694                if (populated)
695                        trigger = !cgrp->populated_cnt++;
696                else
697                        trigger = !--cgrp->populated_cnt;
698
699                if (!trigger)
700                        break;
701
702                check_for_release(cgrp);
703                cgroup_file_notify(&cgrp->events_file);
704
705                cgrp = cgroup_parent(cgrp);
706        } while (cgrp);
707}
708
709/**
710 * css_set_update_populated - update populated state of a css_set
711 * @cset: target css_set
712 * @populated: whether @cset is populated or depopulated
713 *
714 * @cset is either getting the first task or losing the last.  Update the
715 * ->populated_cnt of all associated cgroups accordingly.
716 */
717static void css_set_update_populated(struct css_set *cset, bool populated)
718{
719        struct cgrp_cset_link *link;
720
721        lockdep_assert_held(&css_set_lock);
722
723        list_for_each_entry(link, &cset->cgrp_links, cgrp_link)
724                cgroup_update_populated(link->cgrp, populated);
725}
726
727/**
728 * css_set_move_task - move a task from one css_set to another
729 * @task: task being moved
730 * @from_cset: css_set @task currently belongs to (may be NULL)
731 * @to_cset: new css_set @task is being moved to (may be NULL)
732 * @use_mg_tasks: move to @to_cset->mg_tasks instead of ->tasks
733 *
734 * Move @task from @from_cset to @to_cset.  If @task didn't belong to any
735 * css_set, @from_cset can be NULL.  If @task is being disassociated
736 * instead of moved, @to_cset can be NULL.
737 *
738 * This function automatically handles populated_cnt updates and
739 * css_task_iter adjustments but the caller is responsible for managing
740 * @from_cset and @to_cset's reference counts.
741 */
742static void css_set_move_task(struct task_struct *task,
743                              struct css_set *from_cset, struct css_set *to_cset,
744                              bool use_mg_tasks)
745{
746        lockdep_assert_held(&css_set_lock);
747
748        if (to_cset && !css_set_populated(to_cset))
749                css_set_update_populated(to_cset, true);
750
751        if (from_cset) {
752                struct css_task_iter *it, *pos;
753
754                WARN_ON_ONCE(list_empty(&task->cg_list));
755
756                /*
757                 * @task is leaving, advance task iterators which are
758                 * pointing to it so that they can resume at the next
759                 * position.  Advancing an iterator might remove it from
760                 * the list, use safe walk.  See css_task_iter_advance*()
761                 * for details.
762                 */
763                list_for_each_entry_safe(it, pos, &from_cset->task_iters,
764                                         iters_node)
765                        if (it->task_pos == &task->cg_list)
766                                css_task_iter_advance(it);
767
768                list_del_init(&task->cg_list);
769                if (!css_set_populated(from_cset))
770                        css_set_update_populated(from_cset, false);
771        } else {
772                WARN_ON_ONCE(!list_empty(&task->cg_list));
773        }
774
775        if (to_cset) {
776                /*
777                 * We are synchronized through cgroup_threadgroup_rwsem
778                 * against PF_EXITING setting such that we can't race
779                 * against cgroup_exit() changing the css_set to
780                 * init_css_set and dropping the old one.
781                 */
782                WARN_ON_ONCE(task->flags & PF_EXITING);
783
784                rcu_assign_pointer(task->cgroups, to_cset);
785                list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
786                                                             &to_cset->tasks);
787        }
788}
789
790/*
791 * hash table for cgroup groups. This improves the performance to find
792 * an existing css_set. This hash doesn't (currently) take into
793 * account cgroups in empty hierarchies.
794 */
795#define CSS_SET_HASH_BITS       7
796static DEFINE_HASHTABLE(css_set_table, CSS_SET_HASH_BITS);
797
798static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
799{
800        unsigned long key = 0UL;
801        struct cgroup_subsys *ss;
802        int i;
803
804        for_each_subsys(ss, i)
805                key += (unsigned long)css[i];
806        key = (key >> 16) ^ key;
807
808        return key;
809}
810
811static void put_css_set_locked(struct css_set *cset)
812{
813        struct cgrp_cset_link *link, *tmp_link;
814        struct cgroup_subsys *ss;
815        int ssid;
816
817        lockdep_assert_held(&css_set_lock);
818
819        if (!atomic_dec_and_test(&cset->refcount))
820                return;
821
822        /* This css_set is dead. unlink it and release cgroup and css refs */
823        for_each_subsys(ss, ssid) {
824                list_del(&cset->e_cset_node[ssid]);
825                css_put(cset->subsys[ssid]);
826        }
827        hash_del(&cset->hlist);
828        css_set_count--;
829
830        list_for_each_entry_safe(link, tmp_link, &cset->cgrp_links, cgrp_link) {
831                list_del(&link->cset_link);
832                list_del(&link->cgrp_link);
833                if (cgroup_parent(link->cgrp))
834                        cgroup_put(link->cgrp);
835                kfree(link);
836        }
837
838        kfree_rcu(cset, rcu_head);
839}
840
841static void put_css_set(struct css_set *cset)
842{
843        unsigned long flags;
844
845        /*
846         * Ensure that the refcount doesn't hit zero while any readers
847         * can see it. Similar to atomic_dec_and_lock(), but for an
848         * rwlock
849         */
850        if (atomic_add_unless(&cset->refcount, -1, 1))
851                return;
852
853        spin_lock_irqsave(&css_set_lock, flags);
854        put_css_set_locked(cset);
855        spin_unlock_irqrestore(&css_set_lock, flags);
856}
857
858/*
859 * refcounted get/put for css_set objects
860 */
861static inline void get_css_set(struct css_set *cset)
862{
863        atomic_inc(&cset->refcount);
864}
865
866/**
867 * compare_css_sets - helper function for find_existing_css_set().
868 * @cset: candidate css_set being tested
869 * @old_cset: existing css_set for a task
870 * @new_cgrp: cgroup that's being entered by the task
871 * @template: desired set of css pointers in css_set (pre-calculated)
872 *
873 * Returns true if "cset" matches "old_cset" except for the hierarchy
874 * which "new_cgrp" belongs to, for which it should match "new_cgrp".
875 */
876static bool compare_css_sets(struct css_set *cset,
877                             struct css_set *old_cset,
878                             struct cgroup *new_cgrp,
879                             struct cgroup_subsys_state *template[])
880{
881        struct list_head *l1, *l2;
882
883        /*
884         * On the default hierarchy, there can be csets which are
885         * associated with the same set of cgroups but different csses.
886         * Let's first ensure that csses match.
887         */
888        if (memcmp(template, cset->subsys, sizeof(cset->subsys)))
889                return false;
890
891        /*
892         * Compare cgroup pointers in order to distinguish between
893         * different cgroups in hierarchies.  As different cgroups may
894         * share the same effective css, this comparison is always
895         * necessary.
896         */
897        l1 = &cset->cgrp_links;
898        l2 = &old_cset->cgrp_links;
899        while (1) {
900                struct cgrp_cset_link *link1, *link2;
901                struct cgroup *cgrp1, *cgrp2;
902
903                l1 = l1->next;
904                l2 = l2->next;
905                /* See if we reached the end - both lists are equal length. */
906                if (l1 == &cset->cgrp_links) {
907                        BUG_ON(l2 != &old_cset->cgrp_links);
908                        break;
909                } else {
910                        BUG_ON(l2 == &old_cset->cgrp_links);
911                }
912                /* Locate the cgroups associated with these links. */
913                link1 = list_entry(l1, struct cgrp_cset_link, cgrp_link);
914                link2 = list_entry(l2, struct cgrp_cset_link, cgrp_link);
915                cgrp1 = link1->cgrp;
916                cgrp2 = link2->cgrp;
917                /* Hierarchies should be linked in the same order. */
918                BUG_ON(cgrp1->root != cgrp2->root);
919
920                /*
921                 * If this hierarchy is the hierarchy of the cgroup
922                 * that's changing, then we need to check that this
923                 * css_set points to the new cgroup; if it's any other
924                 * hierarchy, then this css_set should point to the
925                 * same cgroup as the old css_set.
926                 */
927                if (cgrp1->root == new_cgrp->root) {
928                        if (cgrp1 != new_cgrp)
929                                return false;
930                } else {
931                        if (cgrp1 != cgrp2)
932                                return false;
933                }
934        }
935        return true;
936}
937
938/**
939 * find_existing_css_set - init css array and find the matching css_set
940 * @old_cset: the css_set that we're using before the cgroup transition
941 * @cgrp: the cgroup that we're moving into
942 * @template: out param for the new set of csses, should be clear on entry
943 */
944static struct css_set *find_existing_css_set(struct css_set *old_cset,
945                                        struct cgroup *cgrp,
946                                        struct cgroup_subsys_state *template[])
947{
948        struct cgroup_root *root = cgrp->root;
949        struct cgroup_subsys *ss;
950        struct css_set *cset;
951        unsigned long key;
952        int i;
953
954        /*
955         * Build the set of subsystem state objects that we want to see in the
956         * new css_set. while subsystems can change globally, the entries here
957         * won't change, so no need for locking.
958         */
959        for_each_subsys(ss, i) {
960                if (root->subsys_mask & (1UL << i)) {
961                        /*
962                         * @ss is in this hierarchy, so we want the
963                         * effective css from @cgrp.
964                         */
965                        template[i] = cgroup_e_css(cgrp, ss);
966                } else {
967                        /*
968                         * @ss is not in this hierarchy, so we don't want
969                         * to change the css.
970                         */
971                        template[i] = old_cset->subsys[i];
972                }
973        }
974
975        key = css_set_hash(template);
976        hash_for_each_possible(css_set_table, cset, hlist, key) {
977                if (!compare_css_sets(cset, old_cset, cgrp, template))
978                        continue;
979
980                /* This css_set matches what we need */
981                return cset;
982        }
983
984        /* No existing cgroup group matched */
985        return NULL;
986}
987
988static void free_cgrp_cset_links(struct list_head *links_to_free)
989{
990        struct cgrp_cset_link *link, *tmp_link;
991
992        list_for_each_entry_safe(link, tmp_link, links_to_free, cset_link) {
993                list_del(&link->cset_link);
994                kfree(link);
995        }
996}
997
998/**
999 * allocate_cgrp_cset_links - allocate cgrp_cset_links
1000 * @count: the number of links to allocate
1001 * @tmp_links: list_head the allocated links are put on
1002 *
1003 * Allocate @count cgrp_cset_link structures and chain them on @tmp_links
1004 * through ->cset_link.  Returns 0 on success or -errno.
1005 */
1006static int allocate_cgrp_cset_links(int count, struct list_head *tmp_links)
1007{
1008        struct cgrp_cset_link *link;
1009        int i;
1010
1011        INIT_LIST_HEAD(tmp_links);
1012
1013        for (i = 0; i < count; i++) {
1014                link = kzalloc(sizeof(*link), GFP_KERNEL);
1015                if (!link) {
1016                        free_cgrp_cset_links(tmp_links);
1017                        return -ENOMEM;
1018                }
1019                list_add(&link->cset_link, tmp_links);
1020        }
1021        return 0;
1022}
1023
1024/**
1025 * link_css_set - a helper function to link a css_set to a cgroup
1026 * @tmp_links: cgrp_cset_link objects allocated by allocate_cgrp_cset_links()
1027 * @cset: the css_set to be linked
1028 * @cgrp: the destination cgroup
1029 */
1030static void link_css_set(struct list_head *tmp_links, struct css_set *cset,
1031                         struct cgroup *cgrp)
1032{
1033        struct cgrp_cset_link *link;
1034
1035        BUG_ON(list_empty(tmp_links));
1036
1037        if (cgroup_on_dfl(cgrp))
1038                cset->dfl_cgrp = cgrp;
1039
1040        link = list_first_entry(tmp_links, struct cgrp_cset_link, cset_link);
1041        link->cset = cset;
1042        link->cgrp = cgrp;
1043
1044        /*
1045         * Always add links to the tail of the lists so that the lists are
1046         * in choronological order.
1047         */
1048        list_move_tail(&link->cset_link, &cgrp->cset_links);
1049        list_add_tail(&link->cgrp_link, &cset->cgrp_links);
1050
1051        if (cgroup_parent(cgrp))
1052                cgroup_get(cgrp);
1053}
1054
1055/**
1056 * find_css_set - return a new css_set with one cgroup updated
1057 * @old_cset: the baseline css_set
1058 * @cgrp: the cgroup to be updated
1059 *
1060 * Return a new css_set that's equivalent to @old_cset, but with @cgrp
1061 * substituted into the appropriate hierarchy.
1062 */
1063static struct css_set *find_css_set(struct css_set *old_cset,
1064                                    struct cgroup *cgrp)
1065{
1066        struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT] = { };
1067        struct css_set *cset;
1068        struct list_head tmp_links;
1069        struct cgrp_cset_link *link;
1070        struct cgroup_subsys *ss;
1071        unsigned long key;
1072        int ssid;
1073
1074        lockdep_assert_held(&cgroup_mutex);
1075
1076        /* First see if we already have a cgroup group that matches
1077         * the desired set */
1078        spin_lock_irq(&css_set_lock);
1079        cset = find_existing_css_set(old_cset, cgrp, template);
1080        if (cset)
1081                get_css_set(cset);
1082        spin_unlock_irq(&css_set_lock);
1083
1084        if (cset)
1085                return cset;
1086
1087        cset = kzalloc(sizeof(*cset), GFP_KERNEL);
1088        if (!cset)
1089                return NULL;
1090
1091        /* Allocate all the cgrp_cset_link objects that we'll need */
1092        if (allocate_cgrp_cset_links(cgroup_root_count, &tmp_links) < 0) {
1093                kfree(cset);
1094                return NULL;
1095        }
1096
1097        atomic_set(&cset->refcount, 1);
1098        INIT_LIST_HEAD(&cset->cgrp_links);
1099        INIT_LIST_HEAD(&cset->tasks);
1100        INIT_LIST_HEAD(&cset->mg_tasks);
1101        INIT_LIST_HEAD(&cset->mg_preload_node);
1102        INIT_LIST_HEAD(&cset->mg_node);
1103        INIT_LIST_HEAD(&cset->task_iters);
1104        INIT_HLIST_NODE(&cset->hlist);
1105
1106        /* Copy the set of subsystem state objects generated in
1107         * find_existing_css_set() */
1108        memcpy(cset->subsys, template, sizeof(cset->subsys));
1109
1110        spin_lock_irq(&css_set_lock);
1111        /* Add reference counts and links from the new css_set. */
1112        list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
1113                struct cgroup *c = link->cgrp;
1114
1115                if (c->root == cgrp->root)
1116                        c = cgrp;
1117                link_css_set(&tmp_links, cset, c);
1118        }
1119
1120        BUG_ON(!list_empty(&tmp_links));
1121
1122        css_set_count++;
1123
1124        /* Add @cset to the hash table */
1125        key = css_set_hash(cset->subsys);
1126        hash_add(css_set_table, &cset->hlist, key);
1127
1128        for_each_subsys(ss, ssid) {
1129                struct cgroup_subsys_state *css = cset->subsys[ssid];
1130
1131                list_add_tail(&cset->e_cset_node[ssid],
1132                              &css->cgroup->e_csets[ssid]);
1133                css_get(css);
1134        }
1135
1136        spin_unlock_irq(&css_set_lock);
1137
1138        return cset;
1139}
1140
1141static struct cgroup_root *cgroup_root_from_kf(struct kernfs_root *kf_root)
1142{
1143        struct cgroup *root_cgrp = kf_root->kn->priv;
1144
1145        return root_cgrp->root;
1146}
1147
1148static int cgroup_init_root_id(struct cgroup_root *root)
1149{
1150        int id;
1151
1152        lockdep_assert_held(&cgroup_mutex);
1153
1154        id = idr_alloc_cyclic(&cgroup_hierarchy_idr, root, 0, 0, GFP_KERNEL);
1155        if (id < 0)
1156                return id;
1157
1158        root->hierarchy_id = id;
1159        return 0;
1160}
1161
1162static void cgroup_exit_root_id(struct cgroup_root *root)
1163{
1164        lockdep_assert_held(&cgroup_mutex);
1165
1166        idr_remove(&cgroup_hierarchy_idr, root->hierarchy_id);
1167}
1168
1169static void cgroup_free_root(struct cgroup_root *root)
1170{
1171        if (root) {
1172                idr_destroy(&root->cgroup_idr);
1173                kfree(root);
1174        }
1175}
1176
1177static void cgroup_destroy_root(struct cgroup_root *root)
1178{
1179        struct cgroup *cgrp = &root->cgrp;
1180        struct cgrp_cset_link *link, *tmp_link;
1181
1182        trace_cgroup_destroy_root(root);
1183
1184        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1185
1186        BUG_ON(atomic_read(&root->nr_cgrps));
1187        BUG_ON(!list_empty(&cgrp->self.children));
1188
1189        /* Rebind all subsystems back to the default hierarchy */
1190        WARN_ON(rebind_subsystems(&cgrp_dfl_root, root->subsys_mask));
1191
1192        /*
1193         * Release all the links from cset_links to this hierarchy's
1194         * root cgroup
1195         */
1196        spin_lock_irq(&css_set_lock);
1197
1198        list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
1199                list_del(&link->cset_link);
1200                list_del(&link->cgrp_link);
1201                kfree(link);
1202        }
1203
1204        spin_unlock_irq(&css_set_lock);
1205
1206        if (!list_empty(&root->root_list)) {
1207                list_del(&root->root_list);
1208                cgroup_root_count--;
1209        }
1210
1211        cgroup_exit_root_id(root);
1212
1213        mutex_unlock(&cgroup_mutex);
1214
1215        kernfs_destroy_root(root->kf_root);
1216        cgroup_free_root(root);
1217}
1218
1219/*
1220 * look up cgroup associated with current task's cgroup namespace on the
1221 * specified hierarchy
1222 */
1223static struct cgroup *
1224current_cgns_cgroup_from_root(struct cgroup_root *root)
1225{
1226        struct cgroup *res = NULL;
1227        struct css_set *cset;
1228
1229        lockdep_assert_held(&css_set_lock);
1230
1231        rcu_read_lock();
1232
1233        cset = current->nsproxy->cgroup_ns->root_cset;
1234        if (cset == &init_css_set) {
1235                res = &root->cgrp;
1236        } else {
1237                struct cgrp_cset_link *link;
1238
1239                list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1240                        struct cgroup *c = link->cgrp;
1241
1242                        if (c->root == root) {
1243                                res = c;
1244                                break;
1245                        }
1246                }
1247        }
1248        rcu_read_unlock();
1249
1250        BUG_ON(!res);
1251        return res;
1252}
1253
1254/* look up cgroup associated with given css_set on the specified hierarchy */
1255static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
1256                                            struct cgroup_root *root)
1257{
1258        struct cgroup *res = NULL;
1259
1260        lockdep_assert_held(&cgroup_mutex);
1261        lockdep_assert_held(&css_set_lock);
1262
1263        if (cset == &init_css_set) {
1264                res = &root->cgrp;
1265        } else {
1266                struct cgrp_cset_link *link;
1267
1268                list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
1269                        struct cgroup *c = link->cgrp;
1270
1271                        if (c->root == root) {
1272                                res = c;
1273                                break;
1274                        }
1275                }
1276        }
1277
1278        BUG_ON(!res);
1279        return res;
1280}
1281
1282/*
1283 * Return the cgroup for "task" from the given hierarchy. Must be
1284 * called with cgroup_mutex and css_set_lock held.
1285 */
1286static struct cgroup *task_cgroup_from_root(struct task_struct *task,
1287                                            struct cgroup_root *root)
1288{
1289        /*
1290         * No need to lock the task - since we hold cgroup_mutex the
1291         * task can't change groups, so the only thing that can happen
1292         * is that it exits and its css is set back to init_css_set.
1293         */
1294        return cset_cgroup_from_root(task_css_set(task), root);
1295}
1296
1297/*
1298 * A task must hold cgroup_mutex to modify cgroups.
1299 *
1300 * Any task can increment and decrement the count field without lock.
1301 * So in general, code holding cgroup_mutex can't rely on the count
1302 * field not changing.  However, if the count goes to zero, then only
1303 * cgroup_attach_task() can increment it again.  Because a count of zero
1304 * means that no tasks are currently attached, therefore there is no
1305 * way a task attached to that cgroup can fork (the other way to
1306 * increment the count).  So code holding cgroup_mutex can safely
1307 * assume that if the count is zero, it will stay zero. Similarly, if
1308 * a task holds cgroup_mutex on a cgroup with zero count, it
1309 * knows that the cgroup won't be removed, as cgroup_rmdir()
1310 * needs that mutex.
1311 *
1312 * A cgroup can only be deleted if both its 'count' of using tasks
1313 * is zero, and its list of 'children' cgroups is empty.  Since all
1314 * tasks in the system use _some_ cgroup, and since there is always at
1315 * least one task in the system (init, pid == 1), therefore, root cgroup
1316 * always has either children cgroups and/or using tasks.  So we don't
1317 * need a special hack to ensure that root cgroup cannot be deleted.
1318 *
1319 * P.S.  One more locking exception.  RCU is used to guard the
1320 * update of a tasks cgroup pointer by cgroup_attach_task()
1321 */
1322
1323static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
1324static const struct file_operations proc_cgroupstats_operations;
1325
1326static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
1327                              char *buf)
1328{
1329        struct cgroup_subsys *ss = cft->ss;
1330
1331        if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
1332            !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
1333                snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
1334                         cgroup_on_dfl(cgrp) ? ss->name : ss->legacy_name,
1335                         cft->name);
1336        else
1337                strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
1338        return buf;
1339}
1340
1341/**
1342 * cgroup_file_mode - deduce file mode of a control file
1343 * @cft: the control file in question
1344 *
1345 * S_IRUGO for read, S_IWUSR for write.
1346 */
1347static umode_t cgroup_file_mode(const struct cftype *cft)
1348{
1349        umode_t mode = 0;
1350
1351        if (cft->read_u64 || cft->read_s64 || cft->seq_show)
1352                mode |= S_IRUGO;
1353
1354        if (cft->write_u64 || cft->write_s64 || cft->write) {
1355                if (cft->flags & CFTYPE_WORLD_WRITABLE)
1356                        mode |= S_IWUGO;
1357                else
1358                        mode |= S_IWUSR;
1359        }
1360
1361        return mode;
1362}
1363
1364/**
1365 * cgroup_calc_subtree_ss_mask - calculate subtree_ss_mask
1366 * @subtree_control: the new subtree_control mask to consider
1367 * @this_ss_mask: available subsystems
1368 *
1369 * On the default hierarchy, a subsystem may request other subsystems to be
1370 * enabled together through its ->depends_on mask.  In such cases, more
1371 * subsystems than specified in "cgroup.subtree_control" may be enabled.
1372 *
1373 * This function calculates which subsystems need to be enabled if
1374 * @subtree_control is to be applied while restricted to @this_ss_mask.
1375 */
1376static u16 cgroup_calc_subtree_ss_mask(u16 subtree_control, u16 this_ss_mask)
1377{
1378        u16 cur_ss_mask = subtree_control;
1379        struct cgroup_subsys *ss;
1380        int ssid;
1381
1382        lockdep_assert_held(&cgroup_mutex);
1383
1384        cur_ss_mask |= cgrp_dfl_implicit_ss_mask;
1385
1386        while (true) {
1387                u16 new_ss_mask = cur_ss_mask;
1388
1389                do_each_subsys_mask(ss, ssid, cur_ss_mask) {
1390                        new_ss_mask |= ss->depends_on;
1391                } while_each_subsys_mask();
1392
1393                /*
1394                 * Mask out subsystems which aren't available.  This can
1395                 * happen only if some depended-upon subsystems were bound
1396                 * to non-default hierarchies.
1397                 */
1398                new_ss_mask &= this_ss_mask;
1399
1400                if (new_ss_mask == cur_ss_mask)
1401                        break;
1402                cur_ss_mask = new_ss_mask;
1403        }
1404
1405        return cur_ss_mask;
1406}
1407
1408/**
1409 * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
1410 * @kn: the kernfs_node being serviced
1411 *
1412 * This helper undoes cgroup_kn_lock_live() and should be invoked before
1413 * the method finishes if locking succeeded.  Note that once this function
1414 * returns the cgroup returned by cgroup_kn_lock_live() may become
1415 * inaccessible any time.  If the caller intends to continue to access the
1416 * cgroup, it should pin it before invoking this function.
1417 */
1418static void cgroup_kn_unlock(struct kernfs_node *kn)
1419{
1420        struct cgroup *cgrp;
1421
1422        if (kernfs_type(kn) == KERNFS_DIR)
1423                cgrp = kn->priv;
1424        else
1425                cgrp = kn->parent->priv;
1426
1427        mutex_unlock(&cgroup_mutex);
1428
1429        kernfs_unbreak_active_protection(kn);
1430        cgroup_put(cgrp);
1431}
1432
1433/**
1434 * cgroup_kn_lock_live - locking helper for cgroup kernfs methods
1435 * @kn: the kernfs_node being serviced
1436 * @drain_offline: perform offline draining on the cgroup
1437 *
1438 * This helper is to be used by a cgroup kernfs method currently servicing
1439 * @kn.  It breaks the active protection, performs cgroup locking and
1440 * verifies that the associated cgroup is alive.  Returns the cgroup if
1441 * alive; otherwise, %NULL.  A successful return should be undone by a
1442 * matching cgroup_kn_unlock() invocation.  If @drain_offline is %true, the
1443 * cgroup is drained of offlining csses before return.
1444 *
1445 * Any cgroup kernfs method implementation which requires locking the
1446 * associated cgroup should use this helper.  It avoids nesting cgroup
1447 * locking under kernfs active protection and allows all kernfs operations
1448 * including self-removal.
1449 */
1450static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn,
1451                                          bool drain_offline)
1452{
1453        struct cgroup *cgrp;
1454
1455        if (kernfs_type(kn) == KERNFS_DIR)
1456                cgrp = kn->priv;
1457        else
1458                cgrp = kn->parent->priv;
1459
1460        /*
1461         * We're gonna grab cgroup_mutex which nests outside kernfs
1462         * active_ref.  cgroup liveliness check alone provides enough
1463         * protection against removal.  Ensure @cgrp stays accessible and
1464         * break the active_ref protection.
1465         */
1466        if (!cgroup_tryget(cgrp))
1467                return NULL;
1468        kernfs_break_active_protection(kn);
1469
1470        if (drain_offline)
1471                cgroup_lock_and_drain_offline(cgrp);
1472        else
1473                mutex_lock(&cgroup_mutex);
1474
1475        if (!cgroup_is_dead(cgrp))
1476                return cgrp;
1477
1478        cgroup_kn_unlock(kn);
1479        return NULL;
1480}
1481
1482static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
1483{
1484        char name[CGROUP_FILE_NAME_MAX];
1485
1486        lockdep_assert_held(&cgroup_mutex);
1487
1488        if (cft->file_offset) {
1489                struct cgroup_subsys_state *css = cgroup_css(cgrp, cft->ss);
1490                struct cgroup_file *cfile = (void *)css + cft->file_offset;
1491
1492                spin_lock_irq(&cgroup_file_kn_lock);
1493                cfile->kn = NULL;
1494                spin_unlock_irq(&cgroup_file_kn_lock);
1495        }
1496
1497        kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
1498}
1499
1500/**
1501 * css_clear_dir - remove subsys files in a cgroup directory
1502 * @css: taget css
1503 */
1504static void css_clear_dir(struct cgroup_subsys_state *css)
1505{
1506        struct cgroup *cgrp = css->cgroup;
1507        struct cftype *cfts;
1508
1509        if (!(css->flags & CSS_VISIBLE))
1510                return;
1511
1512        css->flags &= ~CSS_VISIBLE;
1513
1514        list_for_each_entry(cfts, &css->ss->cfts, node)
1515                cgroup_addrm_files(css, cgrp, cfts, false);
1516}
1517
1518/**
1519 * css_populate_dir - create subsys files in a cgroup directory
1520 * @css: target css
1521 *
1522 * On failure, no file is added.
1523 */
1524static int css_populate_dir(struct cgroup_subsys_state *css)
1525{
1526        struct cgroup *cgrp = css->cgroup;
1527        struct cftype *cfts, *failed_cfts;
1528        int ret;
1529
1530        if ((css->flags & CSS_VISIBLE) || !cgrp->kn)
1531                return 0;
1532
1533        if (!css->ss) {
1534                if (cgroup_on_dfl(cgrp))
1535                        cfts = cgroup_dfl_base_files;
1536                else
1537                        cfts = cgroup_legacy_base_files;
1538
1539                return cgroup_addrm_files(&cgrp->self, cgrp, cfts, true);
1540        }
1541
1542        list_for_each_entry(cfts, &css->ss->cfts, node) {
1543                ret = cgroup_addrm_files(css, cgrp, cfts, true);
1544                if (ret < 0) {
1545                        failed_cfts = cfts;
1546                        goto err;
1547                }
1548        }
1549
1550        css->flags |= CSS_VISIBLE;
1551
1552        return 0;
1553err:
1554        list_for_each_entry(cfts, &css->ss->cfts, node) {
1555                if (cfts == failed_cfts)
1556                        break;
1557                cgroup_addrm_files(css, cgrp, cfts, false);
1558        }
1559        return ret;
1560}
1561
1562static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
1563{
1564        struct cgroup *dcgrp = &dst_root->cgrp;
1565        struct cgroup_subsys *ss;
1566        int ssid, i, ret;
1567
1568        lockdep_assert_held(&cgroup_mutex);
1569
1570        do_each_subsys_mask(ss, ssid, ss_mask) {
1571                /*
1572                 * If @ss has non-root csses attached to it, can't move.
1573                 * If @ss is an implicit controller, it is exempt from this
1574                 * rule and can be stolen.
1575                 */
1576                if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)) &&
1577                    !ss->implicit_on_dfl)
1578                        return -EBUSY;
1579
1580                /* can't move between two non-dummy roots either */
1581                if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
1582                        return -EBUSY;
1583        } while_each_subsys_mask();
1584
1585        do_each_subsys_mask(ss, ssid, ss_mask) {
1586                struct cgroup_root *src_root = ss->root;
1587                struct cgroup *scgrp = &src_root->cgrp;
1588                struct cgroup_subsys_state *css = cgroup_css(scgrp, ss);
1589                struct css_set *cset;
1590
1591                WARN_ON(!css || cgroup_css(dcgrp, ss));
1592
1593                /* disable from the source */
1594                src_root->subsys_mask &= ~(1 << ssid);
1595                WARN_ON(cgroup_apply_control(scgrp));
1596                cgroup_finalize_control(scgrp, 0);
1597
1598                /* rebind */
1599                RCU_INIT_POINTER(scgrp->subsys[ssid], NULL);
1600                rcu_assign_pointer(dcgrp->subsys[ssid], css);
1601                ss->root = dst_root;
1602                css->cgroup = dcgrp;
1603
1604                spin_lock_irq(&css_set_lock);
1605                hash_for_each(css_set_table, i, cset, hlist)
1606                        list_move_tail(&cset->e_cset_node[ss->id],
1607                                       &dcgrp->e_csets[ss->id]);
1608                spin_unlock_irq(&css_set_lock);
1609
1610                /* default hierarchy doesn't enable controllers by default */
1611                dst_root->subsys_mask |= 1 << ssid;
1612                if (dst_root == &cgrp_dfl_root) {
1613                        static_branch_enable(cgroup_subsys_on_dfl_key[ssid]);
1614                } else {
1615                        dcgrp->subtree_control |= 1 << ssid;
1616                        static_branch_disable(cgroup_subsys_on_dfl_key[ssid]);
1617                }
1618
1619                ret = cgroup_apply_control(dcgrp);
1620                if (ret)
1621                        pr_warn("partial failure to rebind %s controller (err=%d)\n",
1622                                ss->name, ret);
1623
1624                if (ss->bind)
1625                        ss->bind(css);
1626        } while_each_subsys_mask();
1627
1628        kernfs_activate(dcgrp->kn);
1629        return 0;
1630}
1631
1632static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
1633                            struct kernfs_root *kf_root)
1634{
1635        int len = 0;
1636        char *buf = NULL;
1637        struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
1638        struct cgroup *ns_cgroup;
1639
1640        buf = kmalloc(PATH_MAX, GFP_KERNEL);
1641        if (!buf)
1642                return -ENOMEM;
1643
1644        spin_lock_irq(&css_set_lock);
1645        ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
1646        len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
1647        spin_unlock_irq(&css_set_lock);
1648
1649        if (len >= PATH_MAX)
1650                len = -ERANGE;
1651        else if (len > 0) {
1652                seq_escape(sf, buf, " \t\n\\");
1653                len = 0;
1654        }
1655        kfree(buf);
1656        return len;
1657}
1658
1659static int cgroup_show_options(struct seq_file *seq,
1660                               struct kernfs_root *kf_root)
1661{
1662        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1663        struct cgroup_subsys *ss;
1664        int ssid;
1665
1666        if (root != &cgrp_dfl_root)
1667                for_each_subsys(ss, ssid)
1668                        if (root->subsys_mask & (1 << ssid))
1669                                seq_show_option(seq, ss->legacy_name, NULL);
1670        if (root->flags & CGRP_ROOT_NOPREFIX)
1671                seq_puts(seq, ",noprefix");
1672        if (root->flags & CGRP_ROOT_XATTR)
1673                seq_puts(seq, ",xattr");
1674
1675        spin_lock(&release_agent_path_lock);
1676        if (strlen(root->release_agent_path))
1677                seq_show_option(seq, "release_agent",
1678                                root->release_agent_path);
1679        spin_unlock(&release_agent_path_lock);
1680
1681        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
1682                seq_puts(seq, ",clone_children");
1683        if (strlen(root->name))
1684                seq_show_option(seq, "name", root->name);
1685        return 0;
1686}
1687
1688struct cgroup_sb_opts {
1689        u16 subsys_mask;
1690        unsigned int flags;
1691        char *release_agent;
1692        bool cpuset_clone_children;
1693        char *name;
1694        /* User explicitly requested empty subsystem */
1695        bool none;
1696};
1697
1698static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1699{
1700        char *token, *o = data;
1701        bool all_ss = false, one_ss = false;
1702        u16 mask = U16_MAX;
1703        struct cgroup_subsys *ss;
1704        int nr_opts = 0;
1705        int i;
1706
1707#ifdef CONFIG_CPUSETS
1708        mask = ~((u16)1 << cpuset_cgrp_id);
1709#endif
1710
1711        memset(opts, 0, sizeof(*opts));
1712
1713        while ((token = strsep(&o, ",")) != NULL) {
1714                nr_opts++;
1715
1716                if (!*token)
1717                        return -EINVAL;
1718                if (!strcmp(token, "none")) {
1719                        /* Explicitly have no subsystems */
1720                        opts->none = true;
1721                        continue;
1722                }
1723                if (!strcmp(token, "all")) {
1724                        /* Mutually exclusive option 'all' + subsystem name */
1725                        if (one_ss)
1726                                return -EINVAL;
1727                        all_ss = true;
1728                        continue;
1729                }
1730                if (!strcmp(token, "noprefix")) {
1731                        opts->flags |= CGRP_ROOT_NOPREFIX;
1732                        continue;
1733                }
1734                if (!strcmp(token, "clone_children")) {
1735                        opts->cpuset_clone_children = true;
1736                        continue;
1737                }
1738                if (!strcmp(token, "xattr")) {
1739                        opts->flags |= CGRP_ROOT_XATTR;
1740                        continue;
1741                }
1742                if (!strncmp(token, "release_agent=", 14)) {
1743                        /* Specifying two release agents is forbidden */
1744                        if (opts->release_agent)
1745                                return -EINVAL;
1746                        opts->release_agent =
1747                                kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1748                        if (!opts->release_agent)
1749                                return -ENOMEM;
1750                        continue;
1751                }
1752                if (!strncmp(token, "name=", 5)) {
1753                        const char *name = token + 5;
1754                        /* Can't specify an empty name */
1755                        if (!strlen(name))
1756                                return -EINVAL;
1757                        /* Must match [\w.-]+ */
1758                        for (i = 0; i < strlen(name); i++) {
1759                                char c = name[i];
1760                                if (isalnum(c))
1761                                        continue;
1762                                if ((c == '.') || (c == '-') || (c == '_'))
1763                                        continue;
1764                                return -EINVAL;
1765                        }
1766                        /* Specifying two names is forbidden */
1767                        if (opts->name)
1768                                return -EINVAL;
1769                        opts->name = kstrndup(name,
1770                                              MAX_CGROUP_ROOT_NAMELEN - 1,
1771                                              GFP_KERNEL);
1772                        if (!opts->name)
1773                                return -ENOMEM;
1774
1775                        continue;
1776                }
1777
1778                for_each_subsys(ss, i) {
1779                        if (strcmp(token, ss->legacy_name))
1780                                continue;
1781                        if (!cgroup_ssid_enabled(i))
1782                                continue;
1783                        if (cgroup_ssid_no_v1(i))
1784                                continue;
1785
1786                        /* Mutually exclusive option 'all' + subsystem name */
1787                        if (all_ss)
1788                                return -EINVAL;
1789                        opts->subsys_mask |= (1 << i);
1790                        one_ss = true;
1791
1792                        break;
1793                }
1794                if (i == CGROUP_SUBSYS_COUNT)
1795                        return -ENOENT;
1796        }
1797
1798        /*
1799         * If the 'all' option was specified select all the subsystems,
1800         * otherwise if 'none', 'name=' and a subsystem name options were
1801         * not specified, let's default to 'all'
1802         */
1803        if (all_ss || (!one_ss && !opts->none && !opts->name))
1804                for_each_subsys(ss, i)
1805                        if (cgroup_ssid_enabled(i) && !cgroup_ssid_no_v1(i))
1806                                opts->subsys_mask |= (1 << i);
1807
1808        /*
1809         * We either have to specify by name or by subsystems. (So all
1810         * empty hierarchies must have a name).
1811         */
1812        if (!opts->subsys_mask && !opts->name)
1813                return -EINVAL;
1814
1815        /*
1816         * Option noprefix was introduced just for backward compatibility
1817         * with the old cpuset, so we allow noprefix only if mounting just
1818         * the cpuset subsystem.
1819         */
1820        if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
1821                return -EINVAL;
1822
1823        /* Can't specify "none" and some subsystems */
1824        if (opts->subsys_mask && opts->none)
1825                return -EINVAL;
1826
1827        return 0;
1828}
1829
1830static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
1831{
1832        int ret = 0;
1833        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
1834        struct cgroup_sb_opts opts;
1835        u16 added_mask, removed_mask;
1836
1837        if (root == &cgrp_dfl_root) {
1838                pr_err("remount is not allowed\n");
1839                return -EINVAL;
1840        }
1841
1842        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
1843
1844        /* See what subsystems are wanted */
1845        ret = parse_cgroupfs_options(data, &opts);
1846        if (ret)
1847                goto out_unlock;
1848
1849        if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
1850                pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
1851                        task_tgid_nr(current), current->comm);
1852
1853        added_mask = opts.subsys_mask & ~root->subsys_mask;
1854        removed_mask = root->subsys_mask & ~opts.subsys_mask;
1855
1856        /* Don't allow flags or name to change at remount */
1857        if ((opts.flags ^ root->flags) ||
1858            (opts.name && strcmp(opts.name, root->name))) {
1859                pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
1860                       opts.flags, opts.name ?: "", root->flags, root->name);
1861                ret = -EINVAL;
1862                goto out_unlock;
1863        }
1864
1865        /* remounting is not allowed for populated hierarchies */
1866        if (!list_empty(&root->cgrp.self.children)) {
1867                ret = -EBUSY;
1868                goto out_unlock;
1869        }
1870
1871        ret = rebind_subsystems(root, added_mask);
1872        if (ret)
1873                goto out_unlock;
1874
1875        WARN_ON(rebind_subsystems(&cgrp_dfl_root, removed_mask));
1876
1877        if (opts.release_agent) {
1878                spin_lock(&release_agent_path_lock);
1879                strcpy(root->release_agent_path, opts.release_agent);
1880                spin_unlock(&release_agent_path_lock);
1881        }
1882
1883        trace_cgroup_remount(root);
1884
1885 out_unlock:
1886        kfree(opts.release_agent);
1887        kfree(opts.name);
1888        mutex_unlock(&cgroup_mutex);
1889        return ret;
1890}
1891
1892/*
1893 * To reduce the fork() overhead for systems that are not actually using
1894 * their cgroups capability, we don't maintain the lists running through
1895 * each css_set to its tasks until we see the list actually used - in other
1896 * words after the first mount.
1897 */
1898static bool use_task_css_set_links __read_mostly;
1899
1900static void cgroup_enable_task_cg_lists(void)
1901{
1902        struct task_struct *p, *g;
1903
1904        spin_lock_irq(&css_set_lock);
1905
1906        if (use_task_css_set_links)
1907                goto out_unlock;
1908
1909        use_task_css_set_links = true;
1910
1911        /*
1912         * We need tasklist_lock because RCU is not safe against
1913         * while_each_thread(). Besides, a forking task that has passed
1914         * cgroup_post_fork() without seeing use_task_css_set_links = 1
1915         * is not guaranteed to have its child immediately visible in the
1916         * tasklist if we walk through it with RCU.
1917         */
1918        read_lock(&tasklist_lock);
1919        do_each_thread(g, p) {
1920                WARN_ON_ONCE(!list_empty(&p->cg_list) ||
1921                             task_css_set(p) != &init_css_set);
1922
1923                /*
1924                 * We should check if the process is exiting, otherwise
1925                 * it will race with cgroup_exit() in that the list
1926                 * entry won't be deleted though the process has exited.
1927                 * Do it while holding siglock so that we don't end up
1928                 * racing against cgroup_exit().
1929                 *
1930                 * Interrupts were already disabled while acquiring
1931                 * the css_set_lock, so we do not need to disable it
1932                 * again when acquiring the sighand->siglock here.
1933                 */
1934                spin_lock(&p->sighand->siglock);
1935                if (!(p->flags & PF_EXITING)) {
1936                        struct css_set *cset = task_css_set(p);
1937
1938                        if (!css_set_populated(cset))
1939                                css_set_update_populated(cset, true);
1940                        list_add_tail(&p->cg_list, &cset->tasks);
1941                        get_css_set(cset);
1942                }
1943                spin_unlock(&p->sighand->siglock);
1944        } while_each_thread(g, p);
1945        read_unlock(&tasklist_lock);
1946out_unlock:
1947        spin_unlock_irq(&css_set_lock);
1948}
1949
1950static void init_cgroup_housekeeping(struct cgroup *cgrp)
1951{
1952        struct cgroup_subsys *ss;
1953        int ssid;
1954
1955        INIT_LIST_HEAD(&cgrp->self.sibling);
1956        INIT_LIST_HEAD(&cgrp->self.children);
1957        INIT_LIST_HEAD(&cgrp->cset_links);
1958        INIT_LIST_HEAD(&cgrp->pidlists);
1959        mutex_init(&cgrp->pidlist_mutex);
1960        cgrp->self.cgroup = cgrp;
1961        cgrp->self.flags |= CSS_ONLINE;
1962
1963        for_each_subsys(ss, ssid)
1964                INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
1965
1966        init_waitqueue_head(&cgrp->offline_waitq);
1967        INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent);
1968}
1969
1970static void init_cgroup_root(struct cgroup_root *root,
1971                             struct cgroup_sb_opts *opts)
1972{
1973        struct cgroup *cgrp = &root->cgrp;
1974
1975        INIT_LIST_HEAD(&root->root_list);
1976        atomic_set(&root->nr_cgrps, 1);
1977        cgrp->root = root;
1978        init_cgroup_housekeeping(cgrp);
1979        idr_init(&root->cgroup_idr);
1980
1981        root->flags = opts->flags;
1982        if (opts->release_agent)
1983                strcpy(root->release_agent_path, opts->release_agent);
1984        if (opts->name)
1985                strcpy(root->name, opts->name);
1986        if (opts->cpuset_clone_children)
1987                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
1988}
1989
1990static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask)
1991{
1992        LIST_HEAD(tmp_links);
1993        struct cgroup *root_cgrp = &root->cgrp;
1994        struct css_set *cset;
1995        int i, ret;
1996
1997        lockdep_assert_held(&cgroup_mutex);
1998
1999        ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_KERNEL);
2000        if (ret < 0)
2001                goto out;
2002        root_cgrp->id = ret;
2003        root_cgrp->ancestor_ids[0] = ret;
2004
2005        ret = percpu_ref_init(&root_cgrp->self.refcnt, css_release, 0,
2006                              GFP_KERNEL);
2007        if (ret)
2008                goto out;
2009
2010        /*
2011         * We're accessing css_set_count without locking css_set_lock here,
2012         * but that's OK - it can only be increased by someone holding
2013         * cgroup_lock, and that's us.  Later rebinding may disable
2014         * controllers on the default hierarchy and thus create new csets,
2015         * which can't be more than the existing ones.  Allocate 2x.
2016         */
2017        ret = allocate_cgrp_cset_links(2 * css_set_count, &tmp_links);
2018        if (ret)
2019                goto cancel_ref;
2020
2021        ret = cgroup_init_root_id(root);
2022        if (ret)
2023                goto cancel_ref;
2024
2025        root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
2026                                           KERNFS_ROOT_CREATE_DEACTIVATED,
2027                                           root_cgrp);
2028        if (IS_ERR(root->kf_root)) {
2029                ret = PTR_ERR(root->kf_root);
2030                goto exit_root_id;
2031        }
2032        root_cgrp->kn = root->kf_root->kn;
2033
2034        ret = css_populate_dir(&root_cgrp->self);
2035        if (ret)
2036                goto destroy_root;
2037
2038        ret = rebind_subsystems(root, ss_mask);
2039        if (ret)
2040                goto destroy_root;
2041
2042        trace_cgroup_setup_root(root);
2043
2044        /*
2045         * There must be no failure case after here, since rebinding takes
2046         * care of subsystems' refcounts, which are explicitly dropped in
2047         * the failure exit path.
2048         */
2049        list_add(&root->root_list, &cgroup_roots);
2050        cgroup_root_count++;
2051
2052        /*
2053         * Link the root cgroup in this hierarchy into all the css_set
2054         * objects.
2055         */
2056        spin_lock_irq(&css_set_lock);
2057        hash_for_each(css_set_table, i, cset, hlist) {
2058                link_css_set(&tmp_links, cset, root_cgrp);
2059                if (css_set_populated(cset))
2060                        cgroup_update_populated(root_cgrp, true);
2061        }
2062        spin_unlock_irq(&css_set_lock);
2063
2064        BUG_ON(!list_empty(&root_cgrp->self.children));
2065        BUG_ON(atomic_read(&root->nr_cgrps) != 1);
2066
2067        kernfs_activate(root_cgrp->kn);
2068        ret = 0;
2069        goto out;
2070
2071destroy_root:
2072        kernfs_destroy_root(root->kf_root);
2073        root->kf_root = NULL;
2074exit_root_id:
2075        cgroup_exit_root_id(root);
2076cancel_ref:
2077        percpu_ref_exit(&root_cgrp->self.refcnt);
2078out:
2079        free_cgrp_cset_links(&tmp_links);
2080        return ret;
2081}
2082
2083static struct dentry *cgroup_mount(struct file_system_type *fs_type,
2084                         int flags, const char *unused_dev_name,
2085                         void *data)
2086{
2087        bool is_v2 = fs_type == &cgroup2_fs_type;
2088        struct super_block *pinned_sb = NULL;
2089        struct cgroup_namespace *ns = current->nsproxy->cgroup_ns;
2090        struct cgroup_subsys *ss;
2091        struct cgroup_root *root;
2092        struct cgroup_sb_opts opts;
2093        struct dentry *dentry;
2094        int ret;
2095        int i;
2096        bool new_sb;
2097
2098        get_cgroup_ns(ns);
2099
2100        /* Check if the caller has permission to mount. */
2101        if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) {
2102                put_cgroup_ns(ns);
2103                return ERR_PTR(-EPERM);
2104        }
2105
2106        /*
2107         * The first time anyone tries to mount a cgroup, enable the list
2108         * linking each css_set to its tasks and fix up all existing tasks.
2109         */
2110        if (!use_task_css_set_links)
2111                cgroup_enable_task_cg_lists();
2112
2113        if (is_v2) {
2114                if (data) {
2115                        pr_err("cgroup2: unknown option \"%s\"\n", (char *)data);
2116                        put_cgroup_ns(ns);
2117                        return ERR_PTR(-EINVAL);
2118                }
2119                cgrp_dfl_visible = true;
2120                root = &cgrp_dfl_root;
2121                cgroup_get(&root->cgrp);
2122                goto out_mount;
2123        }
2124
2125        cgroup_lock_and_drain_offline(&cgrp_dfl_root.cgrp);
2126
2127        /* First find the desired set of subsystems */
2128        ret = parse_cgroupfs_options(data, &opts);
2129        if (ret)
2130                goto out_unlock;
2131
2132        /*
2133         * Destruction of cgroup root is asynchronous, so subsystems may
2134         * still be dying after the previous unmount.  Let's drain the
2135         * dying subsystems.  We just need to ensure that the ones
2136         * unmounted previously finish dying and don't care about new ones
2137         * starting.  Testing ref liveliness is good enough.
2138         */
2139        for_each_subsys(ss, i) {
2140                if (!(opts.subsys_mask & (1 << i)) ||
2141                    ss->root == &cgrp_dfl_root)
2142                        continue;
2143
2144                if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
2145                        mutex_unlock(&cgroup_mutex);
2146                        msleep(10);
2147                        ret = restart_syscall();
2148                        goto out_free;
2149                }
2150                cgroup_put(&ss->root->cgrp);
2151        }
2152
2153        for_each_root(root) {
2154                bool name_match = false;
2155
2156                if (root == &cgrp_dfl_root)
2157                        continue;
2158
2159                /*
2160                 * If we asked for a name then it must match.  Also, if
2161                 * name matches but sybsys_mask doesn't, we should fail.
2162                 * Remember whether name matched.
2163                 */
2164                if (opts.name) {
2165                        if (strcmp(opts.name, root->name))
2166                                continue;
2167                        name_match = true;
2168                }
2169
2170                /*
2171                 * If we asked for subsystems (or explicitly for no
2172                 * subsystems) then they must match.
2173                 */
2174                if ((opts.subsys_mask || opts.none) &&
2175                    (opts.subsys_mask != root->subsys_mask)) {
2176                        if (!name_match)
2177                                continue;
2178                        ret = -EBUSY;
2179                        goto out_unlock;
2180                }
2181
2182                if (root->flags ^ opts.flags)
2183                        pr_warn("new mount options do not match the existing superblock, will be ignored\n");
2184
2185                /*
2186                 * We want to reuse @root whose lifetime is governed by its
2187                 * ->cgrp.  Let's check whether @root is alive and keep it
2188                 * that way.  As cgroup_kill_sb() can happen anytime, we
2189                 * want to block it by pinning the sb so that @root doesn't
2190                 * get killed before mount is complete.
2191                 *
2192                 * With the sb pinned, tryget_live can reliably indicate
2193                 * whether @root can be reused.  If it's being killed,
2194                 * drain it.  We can use wait_queue for the wait but this
2195                 * path is super cold.  Let's just sleep a bit and retry.
2196                 */
2197                pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
2198                if (IS_ERR(pinned_sb) ||
2199                    !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
2200                        mutex_unlock(&cgroup_mutex);
2201                        if (!IS_ERR_OR_NULL(pinned_sb))
2202                                deactivate_super(pinned_sb);
2203                        msleep(10);
2204                        ret = restart_syscall();
2205                        goto out_free;
2206                }
2207
2208                ret = 0;
2209                goto out_unlock;
2210        }
2211
2212        /*
2213         * No such thing, create a new one.  name= matching without subsys
2214         * specification is allowed for already existing hierarchies but we
2215         * can't create new one without subsys specification.
2216         */
2217        if (!opts.subsys_mask && !opts.none) {
2218                ret = -EINVAL;
2219                goto out_unlock;
2220        }
2221
2222        /* Hierarchies may only be created in the initial cgroup namespace. */
2223        if (ns != &init_cgroup_ns) {
2224                ret = -EPERM;
2225                goto out_unlock;
2226        }
2227
2228        root = kzalloc(sizeof(*root), GFP_KERNEL);
2229        if (!root) {
2230                ret = -ENOMEM;
2231                goto out_unlock;
2232        }
2233
2234        init_cgroup_root(root, &opts);
2235
2236        ret = cgroup_setup_root(root, opts.subsys_mask);
2237        if (ret)
2238                cgroup_free_root(root);
2239
2240out_unlock:
2241        mutex_unlock(&cgroup_mutex);
2242out_free:
2243        kfree(opts.release_agent);
2244        kfree(opts.name);
2245
2246        if (ret) {
2247                put_cgroup_ns(ns);
2248                return ERR_PTR(ret);
2249        }
2250out_mount:
2251        dentry = kernfs_mount(fs_type, flags, root->kf_root,
2252                              is_v2 ? CGROUP2_SUPER_MAGIC : CGROUP_SUPER_MAGIC,
2253                              &new_sb);
2254
2255        /*
2256         * In non-init cgroup namespace, instead of root cgroup's
2257         * dentry, we return the dentry corresponding to the
2258         * cgroupns->root_cgrp.
2259         */
2260        if (!IS_ERR(dentry) && ns != &init_cgroup_ns) {
2261                struct dentry *nsdentry;
2262                struct cgroup *cgrp;
2263
2264                mutex_lock(&cgroup_mutex);
2265                spin_lock_irq(&css_set_lock);
2266
2267                cgrp = cset_cgroup_from_root(ns->root_cset, root);
2268
2269                spin_unlock_irq(&css_set_lock);
2270                mutex_unlock(&cgroup_mutex);
2271
2272                nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb);
2273                dput(dentry);
2274                dentry = nsdentry;
2275        }
2276
2277        if (IS_ERR(dentry) || !new_sb)
2278                cgroup_put(&root->cgrp);
2279
2280        /*
2281         * If @pinned_sb, we're reusing an existing root and holding an
2282         * extra ref on its sb.  Mount is complete.  Put the extra ref.
2283         */
2284        if (pinned_sb) {
2285                WARN_ON(new_sb);
2286                deactivate_super(pinned_sb);
2287        }
2288
2289        put_cgroup_ns(ns);
2290        return dentry;
2291}
2292
2293static void cgroup_kill_sb(struct super_block *sb)
2294{
2295        struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
2296        struct cgroup_root *root = cgroup_root_from_kf(kf_root);
2297
2298        /*
2299         * If @root doesn't have any mounts or children, start killing it.
2300         * This prevents new mounts by disabling percpu_ref_tryget_live().
2301         * cgroup_mount() may wait for @root's release.
2302         *
2303         * And don't kill the default root.
2304         */
2305        if (!list_empty(&root->cgrp.self.children) ||
2306            root == &cgrp_dfl_root)
2307                cgroup_put(&root->cgrp);
2308        else
2309                percpu_ref_kill(&root->cgrp.self.refcnt);
2310
2311        kernfs_kill_sb(sb);
2312}
2313
2314static struct file_system_type cgroup_fs_type = {
2315        .name = "cgroup",
2316        .mount = cgroup_mount,
2317        .kill_sb = cgroup_kill_sb,
2318        .fs_flags = FS_USERNS_MOUNT,
2319};
2320
2321static struct file_system_type cgroup2_fs_type = {
2322        .name = "cgroup2",
2323        .mount = cgroup_mount,
2324        .kill_sb = cgroup_kill_sb,
2325        .fs_flags = FS_USERNS_MOUNT,
2326};
2327
2328static int cgroup_path_ns_locked(struct cgroup *cgrp, char *buf, size_t buflen,
2329                                 struct cgroup_namespace *ns)
2330{
2331        struct cgroup *root = cset_cgroup_from_root(ns->root_cset, cgrp->root);
2332
2333        return kernfs_path_from_node(cgrp->kn, root->kn, buf, buflen);
2334}
2335
2336int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
2337                   struct cgroup_namespace *ns)
2338{
2339        int ret;
2340
2341        mutex_lock(&cgroup_mutex);
2342        spin_lock_irq(&css_set_lock);
2343
2344        ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns);
2345
2346        spin_unlock_irq(&css_set_lock);
2347        mutex_unlock(&cgroup_mutex);
2348
2349        return ret;
2350}
2351EXPORT_SYMBOL_GPL(cgroup_path_ns);
2352
2353/**
2354 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
2355 * @task: target task
2356 * @buf: the buffer to write the path into
2357 * @buflen: the length of the buffer
2358 *
2359 * Determine @task's cgroup on the first (the one with the lowest non-zero
2360 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
2361 * function grabs cgroup_mutex and shouldn't be used inside locks used by
2362 * cgroup controller callbacks.
2363 *
2364 * Return value is the same as kernfs_path().
2365 */
2366int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
2367{
2368        struct cgroup_root *root;
2369        struct cgroup *cgrp;
2370        int hierarchy_id = 1;
2371        int ret;
2372
2373        mutex_lock(&cgroup_mutex);
2374        spin_lock_irq(&css_set_lock);
2375
2376        root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
2377
2378        if (root) {
2379                cgrp = task_cgroup_from_root(task, root);
2380                ret = cgroup_path_ns_locked(cgrp, buf, buflen, &init_cgroup_ns);
2381        } else {
2382                /* if no hierarchy exists, everyone is in "/" */
2383                ret = strlcpy(buf, "/", buflen);
2384        }
2385
2386        spin_unlock_irq(&css_set_lock);
2387        mutex_unlock(&cgroup_mutex);
2388        return ret;
2389}
2390EXPORT_SYMBOL_GPL(task_cgroup_path);
2391
2392/* used to track tasks and other necessary states during migration */
2393struct cgroup_taskset {
2394        /* the src and dst cset list running through cset->mg_node */
2395        struct list_head        src_csets;
2396        struct list_head        dst_csets;
2397
2398        /* the subsys currently being processed */
2399        int                     ssid;
2400
2401        /*
2402         * Fields for cgroup_taskset_*() iteration.
2403         *
2404         * Before migration is committed, the target migration tasks are on
2405         * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
2406         * the csets on ->dst_csets.  ->csets point to either ->src_csets
2407         * or ->dst_csets depending on whether migration is committed.
2408         *
2409         * ->cur_csets and ->cur_task point to the current task position
2410         * during iteration.
2411         */
2412        struct list_head        *csets;
2413        struct css_set          *cur_cset;
2414        struct task_struct      *cur_task;
2415};
2416
2417#define CGROUP_TASKSET_INIT(tset)       (struct cgroup_taskset){        \
2418        .src_csets              = LIST_HEAD_INIT(tset.src_csets),       \
2419        .dst_csets              = LIST_HEAD_INIT(tset.dst_csets),       \
2420        .csets                  = &tset.src_csets,                      \
2421}
2422
2423/**
2424 * cgroup_taskset_add - try to add a migration target task to a taskset
2425 * @task: target task
2426 * @tset: target taskset
2427 *
2428 * Add @task, which is a migration target, to @tset.  This function becomes
2429 * noop if @task doesn't need to be migrated.  @task's css_set should have
2430 * been added as a migration source and @task->cg_list will be moved from
2431 * the css_set's tasks list to mg_tasks one.
2432 */
2433static void cgroup_taskset_add(struct task_struct *task,
2434                               struct cgroup_taskset *tset)
2435{
2436        struct css_set *cset;
2437
2438        lockdep_assert_held(&css_set_lock);
2439
2440        /* @task either already exited or can't exit until the end */
2441        if (task->flags & PF_EXITING)
2442                return;
2443
2444        /* leave @task alone if post_fork() hasn't linked it yet */
2445        if (list_empty(&task->cg_list))
2446                return;
2447
2448        cset = task_css_set(task);
2449        if (!cset->mg_src_cgrp)
2450                return;
2451
2452        list_move_tail(&task->cg_list, &cset->mg_tasks);
2453        if (list_empty(&cset->mg_node))
2454                list_add_tail(&cset->mg_node, &tset->src_csets);
2455        if (list_empty(&cset->mg_dst_cset->mg_node))
2456                list_move_tail(&cset->mg_dst_cset->mg_node,
2457                               &tset->dst_csets);
2458}
2459
2460/**
2461 * cgroup_taskset_first - reset taskset and return the first task
2462 * @tset: taskset of interest
2463 * @dst_cssp: output variable for the destination css
2464 *
2465 * @tset iteration is initialized and the first task is returned.
2466 */
2467struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
2468                                         struct cgroup_subsys_state **dst_cssp)
2469{
2470        tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
2471        tset->cur_task = NULL;
2472
2473        return cgroup_taskset_next(tset, dst_cssp);
2474}
2475
2476/**
2477 * cgroup_taskset_next - iterate to the next task in taskset
2478 * @tset: taskset of interest
2479 * @dst_cssp: output variable for the destination css
2480 *
2481 * Return the next task in @tset.  Iteration must have been initialized
2482 * with cgroup_taskset_first().
2483 */
2484struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
2485                                        struct cgroup_subsys_state **dst_cssp)
2486{
2487        struct css_set *cset = tset->cur_cset;
2488        struct task_struct *task = tset->cur_task;
2489
2490        while (&cset->mg_node != tset->csets) {
2491                if (!task)
2492                        task = list_first_entry(&cset->mg_tasks,
2493                                                struct task_struct, cg_list);
2494                else
2495                        task = list_next_entry(task, cg_list);
2496
2497                if (&task->cg_list != &cset->mg_tasks) {
2498                        tset->cur_cset = cset;
2499                        tset->cur_task = task;
2500
2501                        /*
2502                         * This function may be called both before and
2503                         * after cgroup_taskset_migrate().  The two cases
2504                         * can be distinguished by looking at whether @cset
2505                         * has its ->mg_dst_cset set.
2506                         */
2507                        if (cset->mg_dst_cset)
2508                                *dst_cssp = cset->mg_dst_cset->subsys[tset->ssid];
2509                        else
2510                                *dst_cssp = cset->subsys[tset->ssid];
2511
2512                        return task;
2513                }
2514
2515                cset = list_next_entry(cset, mg_node);
2516                task = NULL;
2517        }
2518
2519        return NULL;
2520}
2521
2522/**
2523 * cgroup_taskset_migrate - migrate a taskset
2524 * @tset: taget taskset
2525 * @root: cgroup root the migration is taking place on
2526 *
2527 * Migrate tasks in @tset as setup by migration preparation functions.
2528 * This function fails iff one of the ->can_attach callbacks fails and
2529 * guarantees that either all or none of the tasks in @tset are migrated.
2530 * @tset is consumed regardless of success.
2531 */
2532static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
2533                                  struct cgroup_root *root)
2534{
2535        struct cgroup_subsys *ss;
2536        struct task_struct *task, *tmp_task;
2537        struct css_set *cset, *tmp_cset;
2538        int ssid, failed_ssid, ret;
2539
2540        /* methods shouldn't be called if no task is actually migrating */
2541        if (list_empty(&tset->src_csets))
2542                return 0;
2543
2544        /* check that we can legitimately attach to the cgroup */
2545        do_each_subsys_mask(ss, ssid, root->subsys_mask) {
2546                if (ss->can_attach) {
2547                        tset->ssid = ssid;
2548                        ret = ss->can_attach(tset);
2549                        if (ret) {
2550                                failed_ssid = ssid;
2551                                goto out_cancel_attach;
2552                        }
2553                }
2554        } while_each_subsys_mask();
2555
2556        /*
2557         * Now that we're guaranteed success, proceed to move all tasks to
2558         * the new cgroup.  There are no failure cases after here, so this
2559         * is the commit point.
2560         */
2561        spin_lock_irq(&css_set_lock);
2562        list_for_each_entry(cset, &tset->src_csets, mg_node) {
2563                list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
2564                        struct css_set *from_cset = task_css_set(task);
2565                        struct css_set *to_cset = cset->mg_dst_cset;
2566
2567                        get_css_set(to_cset);
2568                        css_set_move_task(task, from_cset, to_cset, true);
2569                        put_css_set_locked(from_cset);
2570                }
2571        }
2572        spin_unlock_irq(&css_set_lock);
2573
2574        /*
2575         * Migration is committed, all target tasks are now on dst_csets.
2576         * Nothing is sensitive to fork() after this point.  Notify
2577         * controllers that migration is complete.
2578         */
2579        tset->csets = &tset->dst_csets;
2580
2581        do_each_subsys_mask(ss, ssid, root->subsys_mask) {
2582                if (ss->attach) {
2583                        tset->ssid = ssid;
2584                        ss->attach(tset);
2585                }
2586        } while_each_subsys_mask();
2587
2588        ret = 0;
2589        goto out_release_tset;
2590
2591out_cancel_attach:
2592        do_each_subsys_mask(ss, ssid, root->subsys_mask) {
2593                if (ssid == failed_ssid)
2594                        break;
2595                if (ss->cancel_attach) {
2596                        tset->ssid = ssid;
2597                        ss->cancel_attach(tset);
2598                }
2599        } while_each_subsys_mask();
2600out_release_tset:
2601        spin_lock_irq(&css_set_lock);
2602        list_splice_init(&tset->dst_csets, &tset->src_csets);
2603        list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
2604                list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
2605                list_del_init(&cset->mg_node);
2606        }
2607        spin_unlock_irq(&css_set_lock);
2608        return ret;
2609}
2610
2611/**
2612 * cgroup_may_migrate_to - verify whether a cgroup can be migration destination
2613 * @dst_cgrp: destination cgroup to test
2614 *
2615 * On the default hierarchy, except for the root, subtree_control must be
2616 * zero for migration destination cgroups with tasks so that child cgroups
2617 * don't compete against tasks.
2618 */
2619static bool cgroup_may_migrate_to(struct cgroup *dst_cgrp)
2620{
2621        return !cgroup_on_dfl(dst_cgrp) || !cgroup_parent(dst_cgrp) ||
2622                !dst_cgrp->subtree_control;
2623}
2624
2625/**
2626 * cgroup_migrate_finish - cleanup after attach
2627 * @preloaded_csets: list of preloaded css_sets
2628 *
2629 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
2630 * those functions for details.
2631 */
2632static void cgroup_migrate_finish(struct list_head *preloaded_csets)
2633{
2634        struct css_set *cset, *tmp_cset;
2635
2636        lockdep_assert_held(&cgroup_mutex);
2637
2638        spin_lock_irq(&css_set_lock);
2639        list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
2640                cset->mg_src_cgrp = NULL;
2641                cset->mg_dst_cgrp = NULL;
2642                cset->mg_dst_cset = NULL;
2643                list_del_init(&cset->mg_preload_node);
2644                put_css_set_locked(cset);
2645        }
2646        spin_unlock_irq(&css_set_lock);
2647}
2648
2649/**
2650 * cgroup_migrate_add_src - add a migration source css_set
2651 * @src_cset: the source css_set to add
2652 * @dst_cgrp: the destination cgroup
2653 * @preloaded_csets: list of preloaded css_sets
2654 *
2655 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
2656 * @src_cset and add it to @preloaded_csets, which should later be cleaned
2657 * up by cgroup_migrate_finish().
2658 *
2659 * This function may be called without holding cgroup_threadgroup_rwsem
2660 * even if the target is a process.  Threads may be created and destroyed
2661 * but as long as cgroup_mutex is not dropped, no new css_set can be put
2662 * into play and the preloaded css_sets are guaranteed to cover all
2663 * migrations.
2664 */
2665static void cgroup_migrate_add_src(struct css_set *src_cset,
2666                                   struct cgroup *dst_cgrp,
2667                                   struct list_head *preloaded_csets)
2668{
2669        struct cgroup *src_cgrp;
2670
2671        lockdep_assert_held(&cgroup_mutex);
2672        lockdep_assert_held(&css_set_lock);
2673
2674        /*
2675         * If ->dead, @src_set is associated with one or more dead cgroups
2676         * and doesn't contain any migratable tasks.  Ignore it early so
2677         * that the rest of migration path doesn't get confused by it.
2678         */
2679        if (src_cset->dead)
2680                return;
2681
2682        src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);
2683
2684        if (!list_empty(&src_cset->mg_preload_node))
2685                return;
2686
2687        WARN_ON(src_cset->mg_src_cgrp);
2688        WARN_ON(src_cset->mg_dst_cgrp);
2689        WARN_ON(!list_empty(&src_cset->mg_tasks));
2690        WARN_ON(!list_empty(&src_cset->mg_node));
2691
2692        src_cset->mg_src_cgrp = src_cgrp;
2693        src_cset->mg_dst_cgrp = dst_cgrp;
2694        get_css_set(src_cset);
2695        list_add(&src_cset->mg_preload_node, preloaded_csets);
2696}
2697
2698/**
2699 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
2700 * @preloaded_csets: list of preloaded source css_sets
2701 *
2702 * Tasks are about to be moved and all the source css_sets have been
2703 * preloaded to @preloaded_csets.  This function looks up and pins all
2704 * destination css_sets, links each to its source, and append them to
2705 * @preloaded_csets.
2706 *
2707 * This function must be called after cgroup_migrate_add_src() has been
2708 * called on each migration source css_set.  After migration is performed
2709 * using cgroup_migrate(), cgroup_migrate_finish() must be called on
2710 * @preloaded_csets.
2711 */
2712static int cgroup_migrate_prepare_dst(struct list_head *preloaded_csets)
2713{
2714        LIST_HEAD(csets);
2715        struct css_set *src_cset, *tmp_cset;
2716
2717        lockdep_assert_held(&cgroup_mutex);
2718
2719        /* look up the dst cset for each src cset and link it to src */
2720        list_for_each_entry_safe(src_cset, tmp_cset, preloaded_csets, mg_preload_node) {
2721                struct css_set *dst_cset;
2722
2723                dst_cset = find_css_set(src_cset, src_cset->mg_dst_cgrp);
2724                if (!dst_cset)
2725                        goto err;
2726
2727                WARN_ON_ONCE(src_cset->mg_dst_cset || dst_cset->mg_dst_cset);
2728
2729                /*
2730                 * If src cset equals dst, it's noop.  Drop the src.
2731                 * cgroup_migrate() will skip the cset too.  Note that we
2732                 * can't handle src == dst as some nodes are used by both.
2733                 */
2734                if (src_cset == dst_cset) {
2735                        src_cset->mg_src_cgrp = NULL;
2736                        src_cset->mg_dst_cgrp = NULL;
2737                        list_del_init(&src_cset->mg_preload_node);
2738                        put_css_set(src_cset);
2739                        put_css_set(dst_cset);
2740                        continue;
2741                }
2742
2743                src_cset->mg_dst_cset = dst_cset;
2744
2745                if (list_empty(&dst_cset->mg_preload_node))
2746                        list_add(&dst_cset->mg_preload_node, &csets);
2747                else
2748                        put_css_set(dst_cset);
2749        }
2750
2751        list_splice_tail(&csets, preloaded_csets);
2752        return 0;
2753err:
2754        cgroup_migrate_finish(&csets);
2755        return -ENOMEM;
2756}
2757
2758/**
2759 * cgroup_migrate - migrate a process or task to a cgroup
2760 * @leader: the leader of the process or the task to migrate
2761 * @threadgroup: whether @leader points to the whole process or a single task
2762 * @root: cgroup root migration is taking place on
2763 *
2764 * Migrate a process or task denoted by @leader.  If migrating a process,
2765 * the caller must be holding cgroup_threadgroup_rwsem.  The caller is also
2766 * responsible for invoking cgroup_migrate_add_src() and
2767 * cgroup_migrate_prepare_dst() on the targets before invoking this
2768 * function and following up with cgroup_migrate_finish().
2769 *
2770 * As long as a controller's ->can_attach() doesn't fail, this function is
2771 * guaranteed to succeed.  This means that, excluding ->can_attach()
2772 * failure, when migrating multiple targets, the success or failure can be
2773 * decided for all targets by invoking group_migrate_prepare_dst() before
2774 * actually starting migrating.
2775 */
2776static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
2777                          struct cgroup_root *root)
2778{
2779        struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
2780        struct task_struct *task;
2781
2782        /*
2783         * Prevent freeing of tasks while we take a snapshot. Tasks that are
2784         * already PF_EXITING could be freed from underneath us unless we
2785         * take an rcu_read_lock.
2786         */
2787        spin_lock_irq(&css_set_lock);
2788        rcu_read_lock();
2789        task = leader;
2790        do {
2791                cgroup_taskset_add(task, &tset);
2792                if (!threadgroup)
2793                        break;
2794        } while_each_thread(leader, task);
2795        rcu_read_unlock();
2796        spin_unlock_irq(&css_set_lock);
2797
2798        return cgroup_taskset_migrate(&tset, root);
2799}
2800
2801/**
2802 * cgroup_attach_task - attach a task or a whole threadgroup to a cgroup
2803 * @dst_cgrp: the cgroup to attach to
2804 * @leader: the task or the leader of the threadgroup to be attached
2805 * @threadgroup: attach the whole threadgroup?
2806 *
2807 * Call holding cgroup_mutex and cgroup_threadgroup_rwsem.
2808 */
2809static int cgroup_attach_task(struct cgroup *dst_cgrp,
2810                              struct task_struct *leader, bool threadgroup)
2811{
2812        LIST_HEAD(preloaded_csets);
2813        struct task_struct *task;
2814        int ret;
2815
2816        if (!cgroup_may_migrate_to(dst_cgrp))
2817                return -EBUSY;
2818
2819        /* look up all src csets */
2820        spin_lock_irq(&css_set_lock);
2821        rcu_read_lock();
2822        task = leader;
2823        do {
2824                cgroup_migrate_add_src(task_css_set(task), dst_cgrp,
2825                                       &preloaded_csets);
2826                if (!threadgroup)
2827                        break;
2828        } while_each_thread(leader, task);
2829        rcu_read_unlock();
2830        spin_unlock_irq(&css_set_lock);
2831
2832        /* prepare dst csets and commit */
2833        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
2834        if (!ret)
2835                ret = cgroup_migrate(leader, threadgroup, dst_cgrp->root);
2836
2837        cgroup_migrate_finish(&preloaded_csets);
2838
2839        if (!ret)
2840                trace_cgroup_attach_task(dst_cgrp, leader, threadgroup);
2841
2842        return ret;
2843}
2844
2845static int cgroup_procs_write_permission(struct task_struct *task,
2846                                         struct cgroup *dst_cgrp,
2847                                         struct kernfs_open_file *of)
2848{
2849        const struct cred *cred = current_cred();
2850        const struct cred *tcred = get_task_cred(task);
2851        int ret = 0;
2852
2853        /*
2854         * even if we're attaching all tasks in the thread group, we only
2855         * need to check permissions on one of them.
2856         */
2857        if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
2858            !uid_eq(cred->euid, tcred->uid) &&
2859            !uid_eq(cred->euid, tcred->suid))
2860                ret = -EACCES;
2861
2862        if (!ret && cgroup_on_dfl(dst_cgrp)) {
2863                struct super_block *sb = of->file->f_path.dentry->d_sb;
2864                struct cgroup *cgrp;
2865                struct inode *inode;
2866
2867                spin_lock_irq(&css_set_lock);
2868                cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
2869                spin_unlock_irq(&css_set_lock);
2870
2871                while (!cgroup_is_descendant(dst_cgrp, cgrp))
2872                        cgrp = cgroup_parent(cgrp);
2873
2874                ret = -ENOMEM;
2875                inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
2876                if (inode) {
2877                        ret = inode_permission(inode, MAY_WRITE);
2878                        iput(inode);
2879                }
2880        }
2881
2882        put_cred(tcred);
2883        return ret;
2884}
2885
2886/*
2887 * Find the task_struct of the task to attach by vpid and pass it along to the
2888 * function to attach either it or all tasks in its threadgroup. Will lock
2889 * cgroup_mutex and threadgroup.
2890 */
2891static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
2892                                    size_t nbytes, loff_t off, bool threadgroup)
2893{
2894        struct task_struct *tsk;
2895        struct cgroup_subsys *ss;
2896        struct cgroup *cgrp;
2897        pid_t pid;
2898        int ssid, ret;
2899
2900        if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
2901                return -EINVAL;
2902
2903        cgrp = cgroup_kn_lock_live(of->kn, false);
2904        if (!cgrp)
2905                return -ENODEV;
2906
2907        percpu_down_write(&cgroup_threadgroup_rwsem);
2908        rcu_read_lock();
2909        if (pid) {
2910                tsk = find_task_by_vpid(pid);
2911                if (!tsk) {
2912                        ret = -ESRCH;
2913                        goto out_unlock_rcu;
2914                }
2915        } else {
2916                tsk = current;
2917        }
2918
2919        if (threadgroup)
2920                tsk = tsk->group_leader;
2921
2922        /*
2923         * kthreads may acquire PF_NO_SETAFFINITY during initialization.
2924         * If userland migrates such a kthread to a non-root cgroup, it can
2925         * become trapped in a cpuset, or RT kthread may be born in a
2926         * cgroup with no rt_runtime allocated.  Just say no.
2927         */
2928        if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
2929                ret = -EINVAL;
2930                goto out_unlock_rcu;
2931        }
2932
2933        get_task_struct(tsk);
2934        rcu_read_unlock();
2935
2936        ret = cgroup_procs_write_permission(tsk, cgrp, of);
2937        if (!ret)
2938                ret = cgroup_attach_task(cgrp, tsk, threadgroup);
2939
2940        put_task_struct(tsk);
2941        goto out_unlock_threadgroup;
2942
2943out_unlock_rcu:
2944        rcu_read_unlock();
2945out_unlock_threadgroup:
2946        percpu_up_write(&cgroup_threadgroup_rwsem);
2947        for_each_subsys(ss, ssid)
2948                if (ss->post_attach)
2949                        ss->post_attach();
2950        cgroup_kn_unlock(of->kn);
2951        return ret ?: nbytes;
2952}
2953
2954/**
2955 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
2956 * @from: attach to all cgroups of a given task
2957 * @tsk: the task to be attached
2958 */
2959int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
2960{
2961        struct cgroup_root *root;
2962        int retval = 0;
2963
2964        mutex_lock(&cgroup_mutex);
2965        percpu_down_write(&cgroup_threadgroup_rwsem);
2966        for_each_root(root) {
2967                struct cgroup *from_cgrp;
2968
2969                if (root == &cgrp_dfl_root)
2970                        continue;
2971
2972                spin_lock_irq(&css_set_lock);
2973                from_cgrp = task_cgroup_from_root(from, root);
2974                spin_unlock_irq(&css_set_lock);
2975
2976                retval = cgroup_attach_task(from_cgrp, tsk, false);
2977                if (retval)
2978                        break;
2979        }
2980        percpu_up_write(&cgroup_threadgroup_rwsem);
2981        mutex_unlock(&cgroup_mutex);
2982
2983        return retval;
2984}
2985EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
2986
2987static ssize_t cgroup_tasks_write(struct kernfs_open_file *of,
2988                                  char *buf, size_t nbytes, loff_t off)
2989{
2990        return __cgroup_procs_write(of, buf, nbytes, off, false);
2991}
2992
2993static ssize_t cgroup_procs_write(struct kernfs_open_file *of,
2994                                  char *buf, size_t nbytes, loff_t off)
2995{
2996        return __cgroup_procs_write(of, buf, nbytes, off, true);
2997}
2998
2999static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of,
3000                                          char *buf, size_t nbytes, loff_t off)
3001{
3002        struct cgroup *cgrp;
3003
3004        BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
3005
3006        cgrp = cgroup_kn_lock_live(of->kn, false);
3007        if (!cgrp)
3008                return -ENODEV;
3009        spin_lock(&release_agent_path_lock);
3010        strlcpy(cgrp->root->release_agent_path, strstrip(buf),
3011                sizeof(cgrp->root->release_agent_path));
3012        spin_unlock(&release_agent_path_lock);
3013        cgroup_kn_unlock(of->kn);
3014        return nbytes;
3015}
3016
3017static int cgroup_release_agent_show(struct seq_file *seq, void *v)
3018{
3019        struct cgroup *cgrp = seq_css(seq)->cgroup;
3020
3021        spin_lock(&release_agent_path_lock);
3022        seq_puts(seq, cgrp->root->release_agent_path);
3023        spin_unlock(&release_agent_path_lock);
3024        seq_putc(seq, '\n');
3025        return 0;
3026}
3027
3028static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
3029{
3030        seq_puts(seq, "0\n");
3031        return 0;
3032}
3033
3034static void cgroup_print_ss_mask(struct seq_file *seq, u16 ss_mask)
3035{
3036        struct cgroup_subsys *ss;
3037        bool printed = false;
3038        int ssid;
3039
3040        do_each_subsys_mask(ss, ssid, ss_mask) {
3041                if (printed)
3042                        seq_putc(seq, ' ');
3043                seq_printf(seq, "%s", ss->name);
3044                printed = true;
3045        } while_each_subsys_mask();
3046        if (printed)
3047                seq_putc(seq, '\n');
3048}
3049
3050/* show controllers which are enabled from the parent */
3051static int cgroup_controllers_show(struct seq_file *seq, void *v)
3052{
3053        struct cgroup *cgrp = seq_css(seq)->cgroup;
3054
3055        cgroup_print_ss_mask(seq, cgroup_control(cgrp));
3056        return 0;
3057}
3058
3059/* show controllers which are enabled for a given cgroup's children */
3060static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
3061{
3062        struct cgroup *cgrp = seq_css(seq)->cgroup;
3063
3064        cgroup_print_ss_mask(seq, cgrp->subtree_control);
3065        return 0;
3066}
3067
3068/**
3069 * cgroup_update_dfl_csses - update css assoc of a subtree in default hierarchy
3070 * @cgrp: root of the subtree to update csses for
3071 *
3072 * @cgrp's control masks have changed and its subtree's css associations
3073 * need to be updated accordingly.  This function looks up all css_sets
3074 * which are attached to the subtree, creates the matching updated css_sets
3075 * and migrates the tasks to the new ones.
3076 */
3077static int cgroup_update_dfl_csses(struct cgroup *cgrp)
3078{
3079        LIST_HEAD(preloaded_csets);
3080        struct cgroup_taskset tset = CGROUP_TASKSET_INIT(tset);
3081        struct cgroup_subsys_state *d_css;
3082        struct cgroup *dsct;
3083        struct css_set *src_cset;
3084        int ret;
3085
3086        lockdep_assert_held(&cgroup_mutex);
3087
3088        percpu_down_write(&cgroup_threadgroup_rwsem);
3089
3090        /* look up all csses currently attached to @cgrp's subtree */
3091        spin_lock_irq(&css_set_lock);
3092        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3093                struct cgrp_cset_link *link;
3094
3095                list_for_each_entry(link, &dsct->cset_links, cset_link)
3096                        cgroup_migrate_add_src(link->cset, dsct,
3097                                               &preloaded_csets);
3098        }
3099        spin_unlock_irq(&css_set_lock);
3100
3101        /* NULL dst indicates self on default hierarchy */
3102        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
3103        if (ret)
3104                goto out_finish;
3105
3106        spin_lock_irq(&css_set_lock);
3107        list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
3108                struct task_struct *task, *ntask;
3109
3110                /* src_csets precede dst_csets, break on the first dst_cset */
3111                if (!src_cset->mg_src_cgrp)
3112                        break;
3113
3114                /* all tasks in src_csets need to be migrated */
3115                list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
3116                        cgroup_taskset_add(task, &tset);
3117        }
3118        spin_unlock_irq(&css_set_lock);
3119
3120        ret = cgroup_taskset_migrate(&tset, cgrp->root);
3121out_finish:
3122        cgroup_migrate_finish(&preloaded_csets);
3123        percpu_up_write(&cgroup_threadgroup_rwsem);
3124        return ret;
3125}
3126
3127/**
3128 * cgroup_lock_and_drain_offline - lock cgroup_mutex and drain offlined csses
3129 * @cgrp: root of the target subtree
3130 *
3131 * Because css offlining is asynchronous, userland may try to re-enable a
3132 * controller while the previous css is still around.  This function grabs
3133 * cgroup_mutex and drains the previous css instances of @cgrp's subtree.
3134 */
3135static void cgroup_lock_and_drain_offline(struct cgroup *cgrp)
3136        __acquires(&cgroup_mutex)
3137{
3138        struct cgroup *dsct;
3139        struct cgroup_subsys_state *d_css;
3140        struct cgroup_subsys *ss;
3141        int ssid;
3142
3143restart:
3144        mutex_lock(&cgroup_mutex);
3145
3146        cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3147                for_each_subsys(ss, ssid) {
3148                        struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3149                        DEFINE_WAIT(wait);
3150
3151                        if (!css || !percpu_ref_is_dying(&css->refcnt))
3152                                continue;
3153
3154                        cgroup_get(dsct);
3155                        prepare_to_wait(&dsct->offline_waitq, &wait,
3156                                        TASK_UNINTERRUPTIBLE);
3157
3158                        mutex_unlock(&cgroup_mutex);
3159                        schedule();
3160                        finish_wait(&dsct->offline_waitq, &wait);
3161
3162                        cgroup_put(dsct);
3163                        goto restart;
3164                }
3165        }
3166}
3167
3168/**
3169 * cgroup_save_control - save control masks of a subtree
3170 * @cgrp: root of the target subtree
3171 *
3172 * Save ->subtree_control and ->subtree_ss_mask to the respective old_
3173 * prefixed fields for @cgrp's subtree including @cgrp itself.
3174 */
3175static void cgroup_save_control(struct cgroup *cgrp)
3176{
3177        struct cgroup *dsct;
3178        struct cgroup_subsys_state *d_css;
3179
3180        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3181                dsct->old_subtree_control = dsct->subtree_control;
3182                dsct->old_subtree_ss_mask = dsct->subtree_ss_mask;
3183        }
3184}
3185
3186/**
3187 * cgroup_propagate_control - refresh control masks of a subtree
3188 * @cgrp: root of the target subtree
3189 *
3190 * For @cgrp and its subtree, ensure ->subtree_ss_mask matches
3191 * ->subtree_control and propagate controller availability through the
3192 * subtree so that descendants don't have unavailable controllers enabled.
3193 */
3194static void cgroup_propagate_control(struct cgroup *cgrp)
3195{
3196        struct cgroup *dsct;
3197        struct cgroup_subsys_state *d_css;
3198
3199        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3200                dsct->subtree_control &= cgroup_control(dsct);
3201                dsct->subtree_ss_mask =
3202                        cgroup_calc_subtree_ss_mask(dsct->subtree_control,
3203                                                    cgroup_ss_mask(dsct));
3204        }
3205}
3206
3207/**
3208 * cgroup_restore_control - restore control masks of a subtree
3209 * @cgrp: root of the target subtree
3210 *
3211 * Restore ->subtree_control and ->subtree_ss_mask from the respective old_
3212 * prefixed fields for @cgrp's subtree including @cgrp itself.
3213 */
3214static void cgroup_restore_control(struct cgroup *cgrp)
3215{
3216        struct cgroup *dsct;
3217        struct cgroup_subsys_state *d_css;
3218
3219        cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3220                dsct->subtree_control = dsct->old_subtree_control;
3221                dsct->subtree_ss_mask = dsct->old_subtree_ss_mask;
3222        }
3223}
3224
3225static bool css_visible(struct cgroup_subsys_state *css)
3226{
3227        struct cgroup_subsys *ss = css->ss;
3228        struct cgroup *cgrp = css->cgroup;
3229
3230        if (cgroup_control(cgrp) & (1 << ss->id))
3231                return true;
3232        if (!(cgroup_ss_mask(cgrp) & (1 << ss->id)))
3233                return false;
3234        return cgroup_on_dfl(cgrp) && ss->implicit_on_dfl;
3235}
3236
3237/**
3238 * cgroup_apply_control_enable - enable or show csses according to control
3239 * @cgrp: root of the target subtree
3240 *
3241 * Walk @cgrp's subtree and create new csses or make the existing ones
3242 * visible.  A css is created invisible if it's being implicitly enabled
3243 * through dependency.  An invisible css is made visible when the userland
3244 * explicitly enables it.
3245 *
3246 * Returns 0 on success, -errno on failure.  On failure, csses which have
3247 * been processed already aren't cleaned up.  The caller is responsible for
3248 * cleaning up with cgroup_apply_control_disble().
3249 */
3250static int cgroup_apply_control_enable(struct cgroup *cgrp)
3251{
3252        struct cgroup *dsct;
3253        struct cgroup_subsys_state *d_css;
3254        struct cgroup_subsys *ss;
3255        int ssid, ret;
3256
3257        cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) {
3258                for_each_subsys(ss, ssid) {
3259                        struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3260
3261                        WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
3262
3263                        if (!(cgroup_ss_mask(dsct) & (1 << ss->id)))
3264                                continue;
3265
3266                        if (!css) {
3267                                css = css_create(dsct, ss);
3268                                if (IS_ERR(css))
3269                                        return PTR_ERR(css);
3270                        }
3271
3272                        if (css_visible(css)) {
3273                                ret = css_populate_dir(css);
3274                                if (ret)
3275                                        return ret;
3276                        }
3277                }
3278        }
3279
3280        return 0;
3281}
3282
3283/**
3284 * cgroup_apply_control_disable - kill or hide csses according to control
3285 * @cgrp: root of the target subtree
3286 *
3287 * Walk @cgrp's subtree and kill and hide csses so that they match
3288 * cgroup_ss_mask() and cgroup_visible_mask().
3289 *
3290 * A css is hidden when the userland requests it to be disabled while other
3291 * subsystems are still depending on it.  The css must not actively control
3292 * resources and be in the vanilla state if it's made visible again later.
3293 * Controllers which may be depended upon should provide ->css_reset() for
3294 * this purpose.
3295 */
3296static void cgroup_apply_control_disable(struct cgroup *cgrp)
3297{
3298        struct cgroup *dsct;
3299        struct cgroup_subsys_state *d_css;
3300        struct cgroup_subsys *ss;
3301        int ssid;
3302
3303        cgroup_for_each_live_descendant_post(dsct, d_css, cgrp) {
3304                for_each_subsys(ss, ssid) {
3305                        struct cgroup_subsys_state *css = cgroup_css(dsct, ss);
3306
3307                        WARN_ON_ONCE(css && percpu_ref_is_dying(&css->refcnt));
3308
3309                        if (!css)
3310                                continue;
3311
3312                        if (css->parent &&
3313                            !(cgroup_ss_mask(dsct) & (1 << ss->id))) {
3314                                kill_css(css);
3315                        } else if (!css_visible(css)) {
3316                                css_clear_dir(css);
3317                                if (ss->css_reset)
3318                                        ss->css_reset(css);
3319                        }
3320                }
3321        }
3322}
3323
3324/**
3325 * cgroup_apply_control - apply control mask updates to the subtree
3326 * @cgrp: root of the target subtree
3327 *
3328 * subsystems can be enabled and disabled in a subtree using the following
3329 * steps.
3330 *
3331 * 1. Call cgroup_save_control() to stash the current state.
3332 * 2. Update ->subtree_control masks in the subtree as desired.
3333 * 3. Call cgroup_apply_control() to apply the changes.
3334 * 4. Optionally perform other related operations.
3335 * 5. Call cgroup_finalize_control() to finish up.
3336 *
3337 * This function implements step 3 and propagates the mask changes
3338 * throughout @cgrp's subtree, updates csses accordingly and perform
3339 * process migrations.
3340 */
3341static int cgroup_apply_control(struct cgroup *cgrp)
3342{
3343        int ret;
3344
3345        cgroup_propagate_control(cgrp);
3346
3347        ret = cgroup_apply_control_enable(cgrp);
3348        if (ret)
3349                return ret;
3350
3351        /*
3352         * At this point, cgroup_e_css() results reflect the new csses
3353         * making the following cgroup_update_dfl_csses() properly update
3354         * css associations of all tasks in the subtree.
3355         */
3356        ret = cgroup_update_dfl_csses(cgrp);
3357        if (ret)
3358                return ret;
3359
3360        return 0;
3361}
3362
3363/**
3364 * cgroup_finalize_control - finalize control mask update
3365 * @cgrp: root of the target subtree
3366 * @ret: the result of the update
3367 *
3368 * Finalize control mask update.  See cgroup_apply_control() for more info.
3369 */
3370static void cgroup_finalize_control(struct cgroup *cgrp, int ret)
3371{
3372        if (ret) {
3373                cgroup_restore_control(cgrp);
3374                cgroup_propagate_control(cgrp);
3375        }
3376
3377        cgroup_apply_control_disable(cgrp);
3378}
3379
3380/* change the enabled child controllers for a cgroup in the default hierarchy */
3381static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
3382                                            char *buf, size_t nbytes,
3383                                            loff_t off)
3384{
3385        u16 enable = 0, disable = 0;
3386        struct cgroup *cgrp, *child;
3387        struct cgroup_subsys *ss;
3388        char *tok;
3389        int ssid, ret;
3390
3391        /*
3392         * Parse input - space separated list of subsystem names prefixed
3393         * with either + or -.
3394         */
3395        buf = strstrip(buf);
3396        while ((tok = strsep(&buf, " "))) {
3397                if (tok[0] == '\0')
3398                        continue;
3399                do_each_subsys_mask(ss, ssid, ~cgrp_dfl_inhibit_ss_mask) {
3400                        if (!cgroup_ssid_enabled(ssid) ||
3401                            strcmp(tok + 1, ss->name))
3402                                continue;
3403
3404                        if (*tok == '+') {
3405                                enable |= 1 << ssid;
3406                                disable &= ~(1 << ssid);
3407                        } else if (*tok == '-') {
3408                                disable |= 1 << ssid;
3409                                enable &= ~(1 << ssid);
3410                        } else {
3411                                return -EINVAL;
3412                        }
3413                        break;
3414                } while_each_subsys_mask();
3415                if (ssid == CGROUP_SUBSYS_COUNT)
3416                        return -EINVAL;
3417        }
3418
3419        cgrp = cgroup_kn_lock_live(of->kn, true);
3420        if (!cgrp)
3421                return -ENODEV;
3422
3423        for_each_subsys(ss, ssid) {
3424                if (enable & (1 << ssid)) {
3425                        if (cgrp->subtree_control & (1 << ssid)) {
3426                                enable &= ~(1 << ssid);
3427                                continue;
3428                        }
3429
3430                        if (!(cgroup_control(cgrp) & (1 << ssid))) {
3431                                ret = -ENOENT;
3432                                goto out_unlock;
3433                        }
3434                } else if (disable & (1 << ssid)) {
3435                        if (!(cgrp->subtree_control & (1 << ssid))) {
3436                                disable &= ~(1 << ssid);
3437                                continue;
3438                        }
3439
3440                        /* a child has it enabled? */
3441                        cgroup_for_each_live_child(child, cgrp) {
3442                                if (child->subtree_control & (1 << ssid)) {
3443                                        ret = -EBUSY;
3444                                        goto out_unlock;
3445                                }
3446                        }
3447                }
3448        }
3449
3450        if (!enable && !disable) {
3451                ret = 0;
3452                goto out_unlock;
3453        }
3454
3455        /*
3456         * Except for the root, subtree_control must be zero for a cgroup
3457         * with tasks so that child cgroups don't compete against tasks.
3458         */
3459        if (enable && cgroup_parent(cgrp)) {
3460                struct cgrp_cset_link *link;
3461
3462                /*
3463                 * Because namespaces pin csets too, @cgrp->cset_links
3464                 * might not be empty even when @cgrp is empty.  Walk and
3465                 * verify each cset.
3466                 */
3467                spin_lock_irq(&css_set_lock);
3468
3469                ret = 0;
3470                list_for_each_entry(link, &cgrp->cset_links, cset_link) {
3471                        if (css_set_populated(link->cset)) {
3472                                ret = -EBUSY;
3473                                break;
3474                        }
3475                }
3476
3477                spin_unlock_irq(&css_set_lock);
3478
3479                if (ret)
3480                        goto out_unlock;
3481        }
3482
3483        /* save and update control masks and prepare csses */
3484        cgroup_save_control(cgrp);
3485
3486        cgrp->subtree_control |= enable;
3487        cgrp->subtree_control &= ~disable;
3488
3489        ret = cgroup_apply_control(cgrp);
3490
3491        cgroup_finalize_control(cgrp, ret);
3492
3493        kernfs_activate(cgrp->kn);
3494        ret = 0;
3495out_unlock:
3496        cgroup_kn_unlock(of->kn);
3497        return ret ?: nbytes;
3498}
3499
3500static int cgroup_events_show(struct seq_file *seq, void *v)
3501{
3502        seq_printf(seq, "populated %d\n",
3503                   cgroup_is_populated(seq_css(seq)->cgroup));
3504        return 0;
3505}
3506
3507static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
3508                                 size_t nbytes, loff_t off)
3509{
3510        struct cgroup *cgrp = of->kn->parent->priv;
3511        struct cftype *cft = of->kn->priv;
3512        struct cgroup_subsys_state *css;
3513        int ret;
3514
3515        if (cft->write)
3516                return cft->write(of, buf, nbytes, off);
3517
3518        /*
3519         * kernfs guarantees that a file isn't deleted with operations in
3520         * flight, which means that the matching css is and stays alive and
3521         * doesn't need to be pinned.  The RCU locking is not necessary
3522         * either.  It's just for the convenience of using cgroup_css().
3523         */
3524        rcu_read_lock();
3525        css = cgroup_css(cgrp, cft->ss);
3526        rcu_read_unlock();
3527
3528        if (cft->write_u64) {
3529                unsigned long long v;
3530                ret = kstrtoull(buf, 0, &v);
3531                if (!ret)
3532                        ret = cft->write_u64(css, cft, v);
3533        } else if (cft->write_s64) {
3534                long long v;
3535                ret = kstrtoll(buf, 0, &v);
3536                if (!ret)
3537                        ret = cft->write_s64(css, cft, v);
3538        } else {
3539                ret = -EINVAL;
3540        }
3541
3542        return ret ?: nbytes;
3543}
3544
3545static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
3546{
3547        return seq_cft(seq)->seq_start(seq, ppos);
3548}
3549
3550static void *cgroup_seqfile_next(struct seq_file *seq, void *v, loff_t *ppos)
3551{
3552        return seq_cft(seq)->seq_next(seq, v, ppos);
3553}
3554
3555static void cgroup_seqfile_stop(struct seq_file *seq, void *v)
3556{
3557        seq_cft(seq)->seq_stop(seq, v);
3558}
3559
3560static int cgroup_seqfile_show(struct seq_file *m, void *arg)
3561{
3562        struct cftype *cft = seq_cft(m);
3563        struct cgroup_subsys_state *css = seq_css(m);
3564
3565        if (cft->seq_show)
3566                return cft->seq_show(m, arg);
3567
3568        if (cft->read_u64)
3569                seq_printf(m, "%llu\n", cft->read_u64(css, cft));
3570        else if (cft->read_s64)
3571                seq_printf(m, "%lld\n", cft->read_s64(css, cft));
3572        else
3573                return -EINVAL;
3574        return 0;
3575}
3576
3577static struct kernfs_ops cgroup_kf_single_ops = {
3578        .atomic_write_len       = PAGE_SIZE,
3579        .write                  = cgroup_file_write,
3580        .seq_show               = cgroup_seqfile_show,
3581};
3582
3583static struct kernfs_ops cgroup_kf_ops = {
3584        .atomic_write_len       = PAGE_SIZE,
3585        .write                  = cgroup_file_write,
3586        .seq_start              = cgroup_seqfile_start,
3587        .seq_next               = cgroup_seqfile_next,
3588        .seq_stop               = cgroup_seqfile_stop,
3589        .seq_show               = cgroup_seqfile_show,
3590};
3591
3592/*
3593 * cgroup_rename - Only allow simple rename of directories in place.
3594 */
3595static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
3596                         const char *new_name_str)
3597{
3598        struct cgroup *cgrp = kn->priv;
3599        int ret;
3600
3601        if (kernfs_type(kn) != KERNFS_DIR)
3602                return -ENOTDIR;
3603        if (kn->parent != new_parent)
3604                return -EIO;
3605
3606        /*
3607         * This isn't a proper migration and its usefulness is very
3608         * limited.  Disallow on the default hierarchy.
3609         */
3610        if (cgroup_on_dfl(cgrp))
3611                return -EPERM;
3612
3613        /*
3614         * We're gonna grab cgroup_mutex which nests outside kernfs
3615         * active_ref.  kernfs_rename() doesn't require active_ref
3616         * protection.  Break them before grabbing cgroup_mutex.
3617         */
3618        kernfs_break_active_protection(new_parent);
3619        kernfs_break_active_protection(kn);
3620
3621        mutex_lock(&cgroup_mutex);
3622
3623        ret = kernfs_rename(kn, new_parent, new_name_str);
3624        if (!ret)
3625                trace_cgroup_rename(cgrp);
3626
3627        mutex_unlock(&cgroup_mutex);
3628
3629        kernfs_unbreak_active_protection(kn);
3630        kernfs_unbreak_active_protection(new_parent);
3631        return ret;
3632}
3633
3634/* set uid and gid of cgroup dirs and files to that of the creator */
3635static int cgroup_kn_set_ugid(struct kernfs_node *kn)
3636{
3637        struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
3638                               .ia_uid = current_fsuid(),
3639                               .ia_gid = current_fsgid(), };
3640
3641        if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
3642            gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
3643                return 0;
3644
3645        return kernfs_setattr(kn, &iattr);
3646}
3647
3648static int cgroup_add_file(struct cgroup_subsys_state *css, struct cgroup *cgrp,
3649                           struct cftype *cft)
3650{
3651        char name[CGROUP_FILE_NAME_MAX];
3652        struct kernfs_node *kn;
3653        struct lock_class_key *key = NULL;
3654        int ret;
3655
3656#ifdef CONFIG_DEBUG_LOCK_ALLOC
3657        key = &cft->lockdep_key;
3658#endif
3659        kn = __kernfs_create_file(cgrp->kn, cgroup_file_name(cgrp, cft, name),
3660                                  cgroup_file_mode(cft), 0, cft->kf_ops, cft,
3661                                  NULL, key);
3662        if (IS_ERR(kn))
3663                return PTR_ERR(kn);
3664
3665        ret = cgroup_kn_set_ugid(kn);
3666        if (ret) {
3667                kernfs_remove(kn);
3668                return ret;
3669        }
3670
3671        if (cft->file_offset) {
3672                struct cgroup_file *cfile = (void *)css + cft->file_offset;
3673
3674                spin_lock_irq(&cgroup_file_kn_lock);
3675                cfile->kn = kn;
3676                spin_unlock_irq(&cgroup_file_kn_lock);
3677        }
3678
3679        return 0;
3680}
3681
3682/**
3683 * cgroup_addrm_files - add or remove files to a cgroup directory
3684 * @css: the target css
3685 * @cgrp: the target cgroup (usually css->cgroup)
3686 * @cfts: array of cftypes to be added
3687 * @is_add: whether to add or remove
3688 *
3689 * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
3690 * For removals, this function never fails.
3691 */
3692static int cgroup_addrm_files(struct cgroup_subsys_state *css,
3693                              struct cgroup *cgrp, struct cftype cfts[],
3694                              bool is_add)
3695{
3696        struct cftype *cft, *cft_end = NULL;
3697        int ret = 0;
3698
3699        lockdep_assert_held(&cgroup_mutex);
3700
3701restart:
3702        for (cft = cfts; cft != cft_end && cft->name[0] != '\0'; cft++) {
3703                /* does cft->flags tell us to skip this file on @cgrp? */
3704                if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
3705                        continue;
3706                if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
3707                        continue;
3708                if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
3709                        continue;
3710                if ((cft->flags & CFTYPE_ONLY_ON_ROOT) && cgroup_parent(cgrp))
3711                        continue;
3712
3713                if (is_add) {
3714                        ret = cgroup_add_file(css, cgrp, cft);
3715                        if (ret) {
3716                                pr_warn("%s: failed to add %s, err=%d\n",
3717                                        __func__, cft->name, ret);
3718                                cft_end = cft;
3719                                is_add = false;
3720                                goto restart;
3721                        }
3722                } else {
3723                        cgroup_rm_file(cgrp, cft);
3724                }
3725        }
3726        return ret;
3727}
3728
3729static int cgroup_apply_cftypes(struct cftype *cfts, bool is_add)
3730{
3731        LIST_HEAD(pending);
3732        struct cgroup_subsys *ss = cfts[0].ss;
3733        struct cgroup *root = &ss->root->cgrp;
3734        struct cgroup_subsys_state *css;
3735        int ret = 0;
3736
3737        lockdep_assert_held(&cgroup_mutex);
3738
3739        /* add/rm files for all cgroups created before */
3740        css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
3741                struct cgroup *cgrp = css->cgroup;
3742
3743                if (!(css->flags & CSS_VISIBLE))
3744                        continue;
3745
3746                ret = cgroup_addrm_files(css, cgrp, cfts, is_add);
3747                if (ret)
3748                        break;
3749        }
3750
3751        if (is_add && !ret)
3752                kernfs_activate(root->kn);
3753        return ret;
3754}
3755
3756static void cgroup_exit_cftypes(struct cftype *cfts)
3757{
3758        struct cftype *cft;
3759
3760        for (cft = cfts; cft->name[0] != '\0'; cft++) {
3761                /* free copy for custom atomic_write_len, see init_cftypes() */
3762                if (cft->max_write_len && cft->max_write_len != PAGE_SIZE)
3763                        kfree(cft->kf_ops);
3764                cft->kf_ops = NULL;
3765                cft->ss = NULL;
3766
3767                /* revert flags set by cgroup core while adding @cfts */
3768                cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
3769        }
3770}
3771
3772static int cgroup_init_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3773{
3774        struct cftype *cft;
3775
3776        for (cft = cfts; cft->name[0] != '\0'; cft++) {
3777                struct kernfs_ops *kf_ops;
3778
3779                WARN_ON(cft->ss || cft->kf_ops);
3780
3781                if (cft->seq_start)
3782                        kf_ops = &cgroup_kf_ops;
3783                else
3784                        kf_ops = &cgroup_kf_single_ops;
3785
3786                /*
3787                 * Ugh... if @cft wants a custom max_write_len, we need to
3788                 * make a copy of kf_ops to set its atomic_write_len.
3789                 */
3790                if (cft->max_write_len && cft->max_write_len != PAGE_SIZE) {
3791                        kf_ops = kmemdup(kf_ops, sizeof(*kf_ops), GFP_KERNEL);
3792                        if (!kf_ops) {
3793                                cgroup_exit_cftypes(cfts);
3794                                return -ENOMEM;
3795                        }
3796                        kf_ops->atomic_write_len = cft->max_write_len;
3797                }
3798
3799                cft->kf_ops = kf_ops;
3800                cft->ss = ss;
3801        }
3802
3803        return 0;
3804}
3805
3806static int cgroup_rm_cftypes_locked(struct cftype *cfts)
3807{
3808        lockdep_assert_held(&cgroup_mutex);
3809
3810        if (!cfts || !cfts[0].ss)
3811                return -ENOENT;
3812
3813        list_del(&cfts->node);
3814        cgroup_apply_cftypes(cfts, false);
3815        cgroup_exit_cftypes(cfts);
3816        return 0;
3817}
3818
3819/**
3820 * cgroup_rm_cftypes - remove an array of cftypes from a subsystem
3821 * @cfts: zero-length name terminated array of cftypes
3822 *
3823 * Unregister @cfts.  Files described by @cfts are removed from all
3824 * existing cgroups and all future cgroups won't have them either.  This
3825 * function can be called anytime whether @cfts' subsys is attached or not.
3826 *
3827 * Returns 0 on successful unregistration, -ENOENT if @cfts is not
3828 * registered.
3829 */
3830int cgroup_rm_cftypes(struct cftype *cfts)
3831{
3832        int ret;
3833
3834        mutex_lock(&cgroup_mutex);
3835        ret = cgroup_rm_cftypes_locked(cfts);
3836        mutex_unlock(&cgroup_mutex);
3837        return ret;
3838}
3839
3840/**
3841 * cgroup_add_cftypes - add an array of cftypes to a subsystem
3842 * @ss: target cgroup subsystem
3843 * @cfts: zero-length name terminated array of cftypes
3844 *
3845 * Register @cfts to @ss.  Files described by @cfts are created for all
3846 * existing cgroups to which @ss is attached and all future cgroups will
3847 * have them too.  This function can be called anytime whether @ss is
3848 * attached or not.
3849 *
3850 * Returns 0 on successful registration, -errno on failure.  Note that this
3851 * function currently returns 0 as long as @cfts registration is successful
3852 * even if some file creation attempts on existing cgroups fail.
3853 */
3854static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3855{
3856        int ret;
3857
3858        if (!cgroup_ssid_enabled(ss->id))
3859                return 0;
3860
3861        if (!cfts || cfts[0].name[0] == '\0')
3862                return 0;
3863
3864        ret = cgroup_init_cftypes(ss, cfts);
3865        if (ret)
3866                return ret;
3867
3868        mutex_lock(&cgroup_mutex);
3869
3870        list_add_tail(&cfts->node, &ss->cfts);
3871        ret = cgroup_apply_cftypes(cfts, true);
3872        if (ret)
3873                cgroup_rm_cftypes_locked(cfts);
3874
3875        mutex_unlock(&cgroup_mutex);
3876        return ret;
3877}
3878
3879/**
3880 * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
3881 * @ss: target cgroup subsystem
3882 * @cfts: zero-length name terminated array of cftypes
3883 *
3884 * Similar to cgroup_add_cftypes() but the added files are only used for
3885 * the default hierarchy.
3886 */
3887int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3888{
3889        struct cftype *cft;
3890
3891        for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3892                cft->flags |= __CFTYPE_ONLY_ON_DFL;
3893        return cgroup_add_cftypes(ss, cfts);
3894}
3895
3896/**
3897 * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
3898 * @ss: target cgroup subsystem
3899 * @cfts: zero-length name terminated array of cftypes
3900 *
3901 * Similar to cgroup_add_cftypes() but the added files are only used for
3902 * the legacy hierarchies.
3903 */
3904int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
3905{
3906        struct cftype *cft;
3907
3908        for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
3909                cft->flags |= __CFTYPE_NOT_ON_DFL;
3910        return cgroup_add_cftypes(ss, cfts);
3911}
3912
3913/**
3914 * cgroup_file_notify - generate a file modified event for a cgroup_file
3915 * @cfile: target cgroup_file
3916 *
3917 * @cfile must have been obtained by setting cftype->file_offset.
3918 */
3919void cgroup_file_notify(struct cgroup_file *cfile)
3920{
3921        unsigned long flags;
3922
3923        spin_lock_irqsave(&cgroup_file_kn_lock, flags);
3924        if (cfile->kn)
3925                kernfs_notify(cfile->kn);
3926        spin_unlock_irqrestore(&cgroup_file_kn_lock, flags);
3927}
3928
3929/**
3930 * cgroup_task_count - count the number of tasks in a cgroup.
3931 * @cgrp: the cgroup in question
3932 *
3933 * Return the number of tasks in the cgroup.  The returned number can be
3934 * higher than the actual number of tasks due to css_set references from
3935 * namespace roots and temporary usages.
3936 */
3937static int cgroup_task_count(const struct cgroup *cgrp)
3938{
3939        int count = 0;
3940        struct cgrp_cset_link *link;
3941
3942        spin_lock_irq(&css_set_lock);
3943        list_for_each_entry(link, &cgrp->cset_links, cset_link)
3944                count += atomic_read(&link->cset->refcount);
3945        spin_unlock_irq(&css_set_lock);
3946        return count;
3947}
3948
3949/**
3950 * css_next_child - find the next child of a given css
3951 * @pos: the current position (%NULL to initiate traversal)
3952 * @parent: css whose children to walk
3953 *
3954 * This function returns the next child of @parent and should be called
3955 * under either cgroup_mutex or RCU read lock.  The only requirement is
3956 * that @parent and @pos are accessible.  The next sibling is guaranteed to
3957 * be returned regardless of their states.
3958 *
3959 * If a subsystem synchronizes ->css_online() and the start of iteration, a
3960 * css which finished ->css_online() is guaranteed to be visible in the
3961 * future iterations and will stay visible until the last reference is put.
3962 * A css which hasn't finished ->css_online() or already finished
3963 * ->css_offline() may show up during traversal.  It's each subsystem's
3964 * responsibility to synchronize against on/offlining.
3965 */
3966struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
3967                                           struct cgroup_subsys_state *parent)
3968{
3969        struct cgroup_subsys_state *next;
3970
3971        cgroup_assert_mutex_or_rcu_locked();
3972
3973        /*
3974         * @pos could already have been unlinked from the sibling list.
3975         * Once a cgroup is removed, its ->sibling.next is no longer
3976         * updated when its next sibling changes.  CSS_RELEASED is set when
3977         * @pos is taken off list, at which time its next pointer is valid,
3978         * and, as releases are serialized, the one pointed to by the next
3979         * pointer is guaranteed to not have started release yet.  This
3980         * implies that if we observe !CSS_RELEASED on @pos in this RCU
3981         * critical section, the one pointed to by its next pointer is
3982         * guaranteed to not have finished its RCU grace period even if we
3983         * have dropped rcu_read_lock() inbetween iterations.
3984         *
3985         * If @pos has CSS_RELEASED set, its next pointer can't be
3986         * dereferenced; however, as each css is given a monotonically
3987         * increasing unique serial number and always appended to the
3988         * sibling list, the next one can be found by walking the parent's
3989         * children until the first css with higher serial number than
3990         * @pos's.  While this path can be slower, it happens iff iteration
3991         * races against release and the race window is very small.
3992         */
3993        if (!pos) {
3994                next = list_entry_rcu(parent->children.next, struct cgroup_subsys_state, sibling);
3995        } else if (likely(!(pos->flags & CSS_RELEASED))) {
3996                next = list_entry_rcu(pos->sibling.next, struct cgroup_subsys_state, sibling);
3997        } else {
3998                list_for_each_entry_rcu(next, &parent->children, sibling)
3999                        if (next->serial_nr > pos->serial_nr)
4000                                break;
4001        }
4002
4003        /*
4004         * @next, if not pointing to the head, can be dereferenced and is
4005         * the next sibling.
4006         */
4007        if (&next->sibling != &parent->children)
4008                return next;
4009        return NULL;
4010}
4011
4012/**
4013 * css_next_descendant_pre - find the next descendant for pre-order walk
4014 * @pos: the current position (%NULL to initiate traversal)
4015 * @root: css whose descendants to walk
4016 *
4017 * To be used by css_for_each_descendant_pre().  Find the next descendant
4018 * to visit for pre-order traversal of @root's descendants.  @root is
4019 * included in the iteration and the first node to be visited.
4020 *
4021 * While this function requires cgroup_mutex or RCU read locking, it
4022 * doesn't require the whole traversal to be contained in a single critical
4023 * section.  This function will return the correct next descendant as long
4024 * as both @pos and @root are accessible and @pos is a descendant of @root.
4025 *
4026 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4027 * css which finished ->css_online() is guaranteed to be visible in the
4028 * future iterations and will stay visible until the last reference is put.
4029 * A css which hasn't finished ->css_online() or already finished
4030 * ->css_offline() may show up during traversal.  It's each subsystem's
4031 * responsibility to synchronize against on/offlining.
4032 */
4033struct cgroup_subsys_state *
4034css_next_descendant_pre(struct cgroup_subsys_state *pos,
4035                        struct cgroup_subsys_state *root)
4036{
4037        struct cgroup_subsys_state *next;
4038
4039        cgroup_assert_mutex_or_rcu_locked();
4040
4041        /* if first iteration, visit @root */
4042        if (!pos)
4043                return root;
4044
4045        /* visit the first child if exists */
4046        next = css_next_child(NULL, pos);
4047        if (next)
4048                return next;
4049
4050        /* no child, visit my or the closest ancestor's next sibling */
4051        while (pos != root) {
4052                next = css_next_child(pos, pos->parent);
4053                if (next)
4054                        return next;
4055                pos = pos->parent;
4056        }
4057
4058        return NULL;
4059}
4060
4061/**
4062 * css_rightmost_descendant - return the rightmost descendant of a css
4063 * @pos: css of interest
4064 *
4065 * Return the rightmost descendant of @pos.  If there's no descendant, @pos
4066 * is returned.  This can be used during pre-order traversal to skip
4067 * subtree of @pos.
4068 *
4069 * While this function requires cgroup_mutex or RCU read locking, it
4070 * doesn't require the whole traversal to be contained in a single critical
4071 * section.  This function will return the correct rightmost descendant as
4072 * long as @pos is accessible.
4073 */
4074struct cgroup_subsys_state *
4075css_rightmost_descendant(struct cgroup_subsys_state *pos)
4076{
4077        struct cgroup_subsys_state *last, *tmp;
4078
4079        cgroup_assert_mutex_or_rcu_locked();
4080
4081        do {
4082                last = pos;
4083                /* ->prev isn't RCU safe, walk ->next till the end */
4084                pos = NULL;
4085                css_for_each_child(tmp, last)
4086                        pos = tmp;
4087        } while (pos);
4088
4089        return last;
4090}
4091
4092static struct cgroup_subsys_state *
4093css_leftmost_descendant(struct cgroup_subsys_state *pos)
4094{
4095        struct cgroup_subsys_state *last;
4096
4097        do {
4098                last = pos;
4099                pos = css_next_child(NULL, pos);
4100        } while (pos);
4101
4102        return last;
4103}
4104
4105/**
4106 * css_next_descendant_post - find the next descendant for post-order walk
4107 * @pos: the current position (%NULL to initiate traversal)
4108 * @root: css whose descendants to walk
4109 *
4110 * To be used by css_for_each_descendant_post().  Find the next descendant
4111 * to visit for post-order traversal of @root's descendants.  @root is
4112 * included in the iteration and the last node to be visited.
4113 *
4114 * While this function requires cgroup_mutex or RCU read locking, it
4115 * doesn't require the whole traversal to be contained in a single critical
4116 * section.  This function will return the correct next descendant as long
4117 * as both @pos and @cgroup are accessible and @pos is a descendant of
4118 * @cgroup.
4119 *
4120 * If a subsystem synchronizes ->css_online() and the start of iteration, a
4121 * css which finished ->css_online() is guaranteed to be visible in the
4122 * future iterations and will stay visible until the last reference is put.
4123 * A css which hasn't finished ->css_online() or already finished
4124 * ->css_offline() may show up during traversal.  It's each subsystem's
4125 * responsibility to synchronize against on/offlining.
4126 */
4127struct cgroup_subsys_state *
4128css_next_descendant_post(struct cgroup_subsys_state *pos,
4129                         struct cgroup_subsys_state *root)
4130{
4131        struct cgroup_subsys_state *next;
4132
4133        cgroup_assert_mutex_or_rcu_locked();
4134
4135        /* if first iteration, visit leftmost descendant which may be @root */
4136        if (!pos)
4137                return css_leftmost_descendant(root);
4138
4139        /* if we visited @root, we're done */
4140        if (pos == root)
4141                return NULL;
4142
4143        /* if there's an unvisited sibling, visit its leftmost descendant */
4144        next = css_next_child(pos, pos->parent);
4145        if (next)
4146                return css_leftmost_descendant(next);
4147
4148        /* no sibling left, visit parent */
4149        return pos->parent;
4150}
4151
4152/**
4153 * css_has_online_children - does a css have online children
4154 * @css: the target css
4155 *
4156 * Returns %true if @css has any online children; otherwise, %false.  This
4157 * function can be called from any context but the caller is responsible
4158 * for synchronizing against on/offlining as necessary.
4159 */
4160bool css_has_online_children(struct cgroup_subsys_state *css)
4161{
4162        struct cgroup_subsys_state *child;
4163        bool ret = false;
4164
4165        rcu_read_lock();
4166        css_for_each_child(child, css) {
4167                if (child->flags & CSS_ONLINE) {
4168                        ret = true;
4169                        break;
4170                }
4171        }
4172        rcu_read_unlock();
4173        return ret;
4174}
4175
4176/**
4177 * css_task_iter_advance_css_set - advance a task itererator to the next css_set
4178 * @it: the iterator to advance
4179 *
4180 * Advance @it to the next css_set to walk.
4181 */
4182static void css_task_iter_advance_css_set(struct css_task_iter *it)
4183{
4184        struct list_head *l = it->cset_pos;
4185        struct cgrp_cset_link *link;
4186        struct css_set *cset;
4187
4188        lockdep_assert_held(&css_set_lock);
4189
4190        /* Advance to the next non-empty css_set */
4191        do {
4192                l = l->next;
4193                if (l == it->cset_head) {
4194                        it->cset_pos = NULL;
4195                        it->task_pos = NULL;
4196                        return;
4197                }
4198
4199                if (it->ss) {
4200                        cset = container_of(l, struct css_set,
4201                                            e_cset_node[it->ss->id]);
4202                } else {
4203                        link = list_entry(l, struct cgrp_cset_link, cset_link);
4204                        cset = link->cset;
4205                }
4206        } while (!css_set_populated(cset));
4207
4208        it->cset_pos = l;
4209
4210        if (!list_empty(&cset->tasks))
4211                it->task_pos = cset->tasks.next;
4212        else
4213                it->task_pos = cset->mg_tasks.next;
4214
4215        it->tasks_head = &cset->tasks;
4216        it->mg_tasks_head = &cset->mg_tasks;
4217
4218        /*
4219         * We don't keep css_sets locked across iteration steps and thus
4220         * need to take steps to ensure that iteration can be resumed after
4221         * the lock is re-acquired.  Iteration is performed at two levels -
4222         * css_sets and tasks in them.
4223         *
4224         * Once created, a css_set never leaves its cgroup lists, so a
4225         * pinned css_set is guaranteed to stay put and we can resume
4226         * iteration afterwards.
4227         *
4228         * Tasks may leave @cset across iteration steps.  This is resolved
4229         * by registering each iterator with the css_set currently being
4230         * walked and making css_set_move_task() advance iterators whose
4231         * next task is leaving.
4232         */
4233        if (it->cur_cset) {
4234                list_del(&it->iters_node);
4235                put_css_set_locked(it->cur_cset);
4236        }
4237        get_css_set(cset);
4238        it->cur_cset = cset;
4239        list_add(&it->iters_node, &cset->task_iters);
4240}
4241
4242static void css_task_iter_advance(struct css_task_iter *it)
4243{
4244        struct list_head *l = it->task_pos;
4245
4246        lockdep_assert_held(&css_set_lock);
4247        WARN_ON_ONCE(!l);
4248
4249        /*
4250         * Advance iterator to find next entry.  cset->tasks is consumed
4251         * first and then ->mg_tasks.  After ->mg_tasks, we move onto the
4252         * next cset.
4253         */
4254        l = l->next;
4255
4256        if (l == it->tasks_head)
4257                l = it->mg_tasks_head->next;
4258
4259        if (l == it->mg_tasks_head)
4260                css_task_iter_advance_css_set(it);
4261        else
4262                it->task_pos = l;
4263}
4264
4265/**
4266 * css_task_iter_start - initiate task iteration
4267 * @css: the css to walk tasks of
4268 * @it: the task iterator to use
4269 *
4270 * Initiate iteration through the tasks of @css.  The caller can call
4271 * css_task_iter_next() to walk through the tasks until the function
4272 * returns NULL.  On completion of iteration, css_task_iter_end() must be
4273 * called.
4274 */
4275void css_task_iter_start(struct cgroup_subsys_state *css,
4276                         struct css_task_iter *it)
4277{
4278        /* no one should try to iterate before mounting cgroups */
4279        WARN_ON_ONCE(!use_task_css_set_links);
4280
4281        memset(it, 0, sizeof(*it));
4282
4283        spin_lock_irq(&css_set_lock);
4284
4285        it->ss = css->ss;
4286
4287        if (it->ss)
4288                it->cset_pos = &css->cgroup->e_csets[css->ss->id];
4289        else
4290                it->cset_pos = &css->cgroup->cset_links;
4291
4292        it->cset_head = it->cset_pos;
4293
4294        css_task_iter_advance_css_set(it);
4295
4296        spin_unlock_irq(&css_set_lock);
4297}
4298
4299/**
4300 * css_task_iter_next - return the next task for the iterator
4301 * @it: the task iterator being iterated
4302 *
4303 * The "next" function for task iteration.  @it should have been
4304 * initialized via css_task_iter_start().  Returns NULL when the iteration
4305 * reaches the end.
4306 */
4307struct task_struct *css_task_iter_next(struct css_task_iter *it)
4308{
4309        if (it->cur_task) {
4310                put_task_struct(it->cur_task);
4311                it->cur_task = NULL;
4312        }
4313
4314        spin_lock_irq(&css_set_lock);
4315
4316        if (it->task_pos) {
4317                it->cur_task = list_entry(it->task_pos, struct task_struct,
4318                                          cg_list);
4319                get_task_struct(it->cur_task);
4320                css_task_iter_advance(it);
4321        }
4322
4323        spin_unlock_irq(&css_set_lock);
4324
4325        return it->cur_task;
4326}
4327
4328/**
4329 * css_task_iter_end - finish task iteration
4330 * @it: the task iterator to finish
4331 *
4332 * Finish task iteration started by css_task_iter_start().
4333 */
4334void css_task_iter_end(struct css_task_iter *it)
4335{
4336        if (it->cur_cset) {
4337                spin_lock_irq(&css_set_lock);
4338                list_del(&it->iters_node);
4339                put_css_set_locked(it->cur_cset);
4340                spin_unlock_irq(&css_set_lock);
4341        }
4342
4343        if (it->cur_task)
4344                put_task_struct(it->cur_task);
4345}
4346
4347/**
4348 * cgroup_trasnsfer_tasks - move tasks from one cgroup to another
4349 * @to: cgroup to which the tasks will be moved
4350 * @from: cgroup in which the tasks currently reside
4351 *
4352 * Locking rules between cgroup_post_fork() and the migration path
4353 * guarantee that, if a task is forking while being migrated, the new child
4354 * is guaranteed to be either visible in the source cgroup after the
4355 * parent's migration is complete or put into the target cgroup.  No task
4356 * can slip out of migration through forking.
4357 */
4358int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
4359{
4360        LIST_HEAD(preloaded_csets);
4361        struct cgrp_cset_link *link;
4362        struct css_task_iter it;
4363        struct task_struct *task;
4364        int ret;
4365
4366        if (!cgroup_may_migrate_to(to))
4367                return -EBUSY;
4368
4369        mutex_lock(&cgroup_mutex);
4370
4371        percpu_down_write(&cgroup_threadgroup_rwsem);
4372
4373        /* all tasks in @from are being moved, all csets are source */
4374        spin_lock_irq(&css_set_lock);
4375        list_for_each_entry(link, &from->cset_links, cset_link)
4376                cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
4377        spin_unlock_irq(&css_set_lock);
4378
4379        ret = cgroup_migrate_prepare_dst(&preloaded_csets);
4380        if (ret)
4381                goto out_err;
4382
4383        /*
4384         * Migrate tasks one-by-one until @from is empty.  This fails iff
4385         * ->can_attach() fails.
4386         */
4387        do {
4388                css_task_iter_start(&from->self, &it);
4389                task = css_task_iter_next(&it);
4390                if (task)
4391                        get_task_struct(task);
4392                css_task_iter_end(&it);
4393
4394                if (task) {
4395                        ret = cgroup_migrate(task, false, to->root);
4396                        if (!ret)
4397                                trace_cgroup_transfer_tasks(to, task, false);
4398                        put_task_struct(task);
4399                }
4400        } while (task && !ret);
4401out_err:
4402        cgroup_migrate_finish(&preloaded_csets);
4403        percpu_up_write(&cgroup_threadgroup_rwsem);
4404        mutex_unlock(&cgroup_mutex);
4405        return ret;
4406}
4407
4408/*
4409 * Stuff for reading the 'tasks'/'procs' files.
4410 *
4411 * Reading this file can return large amounts of data if a cgroup has
4412 * *lots* of attached tasks. So it may need several calls to read(),
4413 * but we cannot guarantee that the information we produce is correct
4414 * unless we produce it entirely atomically.
4415 *
4416 */
4417
4418/* which pidlist file are we talking about? */
4419enum cgroup_filetype {
4420        CGROUP_FILE_PROCS,
4421        CGROUP_FILE_TASKS,
4422};
4423
4424/*
4425 * A pidlist is a list of pids that virtually represents the contents of one
4426 * of the cgroup files ("procs" or "tasks"). We keep a list of such pidlists,
4427 * a pair (one each for procs, tasks) for each pid namespace that's relevant
4428 * to the cgroup.
4429 */
4430struct cgroup_pidlist {
4431        /*
4432         * used to find which pidlist is wanted. doesn't change as long as
4433         * this particular list stays in the list.
4434        */
4435        struct { enum cgroup_filetype type; struct pid_namespace *ns; } key;
4436        /* array of xids */
4437        pid_t *list;
4438        /* how many elements the above list has */
4439        int length;
4440        /* each of these stored in a list by its cgroup */
4441        struct list_head links;
4442        /* pointer to the cgroup we belong to, for list removal purposes */
4443        struct cgroup *owner;
4444        /* for delayed destruction */
4445        struct delayed_work destroy_dwork;
4446};
4447
4448/*
4449 * The following two functions "fix" the issue where there are more pids
4450 * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
4451 * TODO: replace with a kernel-wide solution to this problem
4452 */
4453#define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
4454static void *pidlist_allocate(int count)
4455{
4456        if (PIDLIST_TOO_LARGE(count))
4457                return vmalloc(count * sizeof(pid_t));
4458        else
4459                return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
4460}
4461
4462static void pidlist_free(void *p)
4463{
4464        kvfree(p);
4465}
4466
4467/*
4468 * Used to destroy all pidlists lingering waiting for destroy timer.  None
4469 * should be left afterwards.
4470 */
4471static void cgroup_pidlist_destroy_all(struct cgroup *cgrp)
4472{
4473        struct cgroup_pidlist *l, *tmp_l;
4474
4475        mutex_lock(&cgrp->pidlist_mutex);
4476        list_for_each_entry_safe(l, tmp_l, &cgrp->pidlists, links)
4477                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 0);
4478        mutex_unlock(&cgrp->pidlist_mutex);
4479
4480        flush_workqueue(cgroup_pidlist_destroy_wq);
4481        BUG_ON(!list_empty(&cgrp->pidlists));
4482}
4483
4484static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
4485{
4486        struct delayed_work *dwork = to_delayed_work(work);
4487        struct cgroup_pidlist *l = container_of(dwork, struct cgroup_pidlist,
4488                                                destroy_dwork);
4489        struct cgroup_pidlist *tofree = NULL;
4490
4491        mutex_lock(&l->owner->pidlist_mutex);
4492
4493        /*
4494         * Destroy iff we didn't get queued again.  The state won't change
4495         * as destroy_dwork can only be queued while locked.
4496         */
4497        if (!delayed_work_pending(dwork)) {
4498                list_del(&l->links);
4499                pidlist_free(l->list);
4500                put_pid_ns(l->key.ns);
4501                tofree = l;
4502        }
4503
4504        mutex_unlock(&l->owner->pidlist_mutex);
4505        kfree(tofree);
4506}
4507
4508/*
4509 * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
4510 * Returns the number of unique elements.
4511 */
4512static int pidlist_uniq(pid_t *list, int length)
4513{
4514        int src, dest = 1;
4515
4516        /*
4517         * we presume the 0th element is unique, so i starts at 1. trivial
4518         * edge cases first; no work needs to be done for either
4519         */
4520        if (length == 0 || length == 1)
4521                return length;
4522        /* src and dest walk down the list; dest counts unique elements */
4523        for (src = 1; src < length; src++) {
4524                /* find next unique element */
4525                while (list[src] == list[src-1]) {
4526                        src++;
4527                        if (src == length)
4528                                goto after;
4529                }
4530                /* dest always points to where the next unique element goes */
4531                list[dest] = list[src];
4532                dest++;
4533        }
4534after:
4535        return dest;
4536}
4537
4538/*
4539 * The two pid files - task and cgroup.procs - guaranteed that the result
4540 * is sorted, which forced this whole pidlist fiasco.  As pid order is
4541 * different per namespace, each namespace needs differently sorted list,
4542 * making it impossible to use, for example, single rbtree of member tasks
4543 * sorted by task pointer.  As pidlists can be fairly large, allocating one
4544 * per open file is dangerous, so cgroup had to implement shared pool of
4545 * pidlists keyed by cgroup and namespace.
4546 *
4547 * All this extra complexity was caused by the original implementation
4548 * committing to an entirely unnecessary property.  In the long term, we
4549 * want to do away with it.  Explicitly scramble sort order if on the
4550 * default hierarchy so that no such expectation exists in the new
4551 * interface.
4552 *
4553 * Scrambling is done by swapping every two consecutive bits, which is
4554 * non-identity one-to-one mapping which disturbs sort order sufficiently.
4555 */
4556static pid_t pid_fry(pid_t pid)
4557{
4558        unsigned a = pid & 0x55555555;
4559        unsigned b = pid & 0xAAAAAAAA;
4560
4561        return (a << 1) | (b >> 1);
4562}
4563
4564static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
4565{
4566        if (cgroup_on_dfl(cgrp))
4567                return pid_fry(pid);
4568        else
4569                return pid;
4570}
4571
4572static int cmppid(const void *a, const void *b)
4573{
4574        return *(pid_t *)a - *(pid_t *)b;
4575}
4576
4577static int fried_cmppid(const void *a, const void *b)
4578{
4579        return pid_fry(*(pid_t *)a) - pid_fry(*(pid_t *)b);
4580}
4581
4582static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
4583                                                  enum cgroup_filetype type)
4584{
4585        struct cgroup_pidlist *l;
4586        /* don't need task_nsproxy() if we're looking at ourself */
4587        struct pid_namespace *ns = task_active_pid_ns(current);
4588
4589        lockdep_assert_held(&cgrp->pidlist_mutex);
4590
4591        list_for_each_entry(l, &cgrp->pidlists, links)
4592                if (l->key.type == type && l->key.ns == ns)
4593                        return l;
4594        return NULL;
4595}
4596
4597/*
4598 * find the appropriate pidlist for our purpose (given procs vs tasks)
4599 * returns with the lock on that pidlist already held, and takes care
4600 * of the use count, or returns NULL with no locks held if we're out of
4601 * memory.
4602 */
4603static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
4604                                                enum cgroup_filetype type)
4605{
4606        struct cgroup_pidlist *l;
4607
4608        lockdep_assert_held(&cgrp->pidlist_mutex);
4609
4610        l = cgroup_pidlist_find(cgrp, type);
4611        if (l)
4612                return l;
4613
4614        /* entry not found; create a new one */
4615        l = kzalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
4616        if (!l)
4617                return l;
4618
4619        INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
4620        l->key.type = type;
4621        /* don't need task_nsproxy() if we're looking at ourself */
4622        l->key.ns = get_pid_ns(task_active_pid_ns(current));
4623        l->owner = cgrp;
4624        list_add(&l->links, &cgrp->pidlists);
4625        return l;
4626}
4627
4628/*
4629 * Load a cgroup's pidarray with either procs' tgids or tasks' pids
4630 */
4631static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
4632                              struct cgroup_pidlist **lp)
4633{
4634        pid_t *array;
4635        int length;
4636        int pid, n = 0; /* used for populating the array */
4637        struct css_task_iter it;
4638        struct task_struct *tsk;
4639        struct cgroup_pidlist *l;
4640
4641        lockdep_assert_held(&cgrp->pidlist_mutex);
4642
4643        /*
4644         * If cgroup gets more users after we read count, we won't have
4645         * enough space - tough.  This race is indistinguishable to the
4646         * caller from the case that the additional cgroup users didn't
4647         * show up until sometime later on.
4648         */
4649        length = cgroup_task_count(cgrp);
4650        array = pidlist_allocate(length);
4651        if (!array)
4652                return -ENOMEM;
4653        /* now, populate the array */
4654        css_task_iter_start(&cgrp->self, &it);
4655        while ((tsk = css_task_iter_next(&it))) {
4656                if (unlikely(n == length))
4657                        break;
4658                /* get tgid or pid for procs or tasks file respectively */
4659                if (type == CGROUP_FILE_PROCS)
4660                        pid = task_tgid_vnr(tsk);
4661                else
4662                        pid = task_pid_vnr(tsk);
4663                if (pid > 0) /* make sure to only use valid results */
4664                        array[n++] = pid;
4665        }
4666        css_task_iter_end(&it);
4667        length = n;
4668        /* now sort & (if procs) strip out duplicates */
4669        if (cgroup_on_dfl(cgrp))
4670                sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
4671        else
4672                sort(array, length, sizeof(pid_t), cmppid, NULL);
4673        if (type == CGROUP_FILE_PROCS)
4674                length = pidlist_uniq(array, length);
4675
4676        l = cgroup_pidlist_find_create(cgrp, type);
4677        if (!l) {
4678                pidlist_free(array);
4679                return -ENOMEM;
4680        }
4681
4682        /* store array, freeing old if necessary */
4683        pidlist_free(l->list);
4684        l->list = array;
4685        l->length = length;
4686        *lp = l;
4687        return 0;
4688}
4689
4690/**
4691 * cgroupstats_build - build and fill cgroupstats
4692 * @stats: cgroupstats to fill information into
4693 * @dentry: A dentry entry belonging to the cgroup for which stats have
4694 * been requested.
4695 *
4696 * Build and fill cgroupstats so that taskstats can export it to user
4697 * space.
4698 */
4699int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
4700{
4701        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
4702        struct cgroup *cgrp;
4703        struct css_task_iter it;
4704        struct task_struct *tsk;
4705
4706        /* it should be kernfs_node belonging to cgroupfs and is a directory */
4707        if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
4708            kernfs_type(kn) != KERNFS_DIR)
4709                return -EINVAL;
4710
4711        mutex_lock(&cgroup_mutex);
4712
4713        /*
4714         * We aren't being called from kernfs and there's no guarantee on
4715         * @kn->priv's validity.  For this and css_tryget_online_from_dir(),
4716         * @kn->priv is RCU safe.  Let's do the RCU dancing.
4717         */
4718        rcu_read_lock();
4719        cgrp = rcu_dereference(kn->priv);
4720        if (!cgrp || cgroup_is_dead(cgrp)) {
4721                rcu_read_unlock();
4722                mutex_unlock(&cgroup_mutex);
4723                return -ENOENT;
4724        }
4725        rcu_read_unlock();
4726
4727        css_task_iter_start(&cgrp->self, &it);
4728        while ((tsk = css_task_iter_next(&it))) {
4729                switch (tsk->state) {
4730                case TASK_RUNNING:
4731                        stats->nr_running++;
4732                        break;
4733                case TASK_INTERRUPTIBLE:
4734                        stats->nr_sleeping++;
4735                        break;
4736                case TASK_UNINTERRUPTIBLE:
4737                        stats->nr_uninterruptible++;
4738                        break;
4739                case TASK_STOPPED:
4740                        stats->nr_stopped++;
4741                        break;
4742                default:
4743                        if (delayacct_is_task_waiting_on_io(tsk))
4744                                stats->nr_io_wait++;
4745                        break;
4746                }
4747        }
4748        css_task_iter_end(&it);
4749
4750        mutex_unlock(&cgroup_mutex);
4751        return 0;
4752}
4753
4754
4755/*
4756 * seq_file methods for the tasks/procs files. The seq_file position is the
4757 * next pid to display; the seq_file iterator is a pointer to the pid
4758 * in the cgroup->l->list array.
4759 */
4760
4761static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
4762{
4763        /*
4764         * Initially we receive a position value that corresponds to
4765         * one more than the last pid shown (or 0 on the first call or
4766         * after a seek to the start). Use a binary-search to find the
4767         * next pid to display, if any
4768         */
4769        struct kernfs_open_file *of = s->private;
4770        struct cgroup *cgrp = seq_css(s)->cgroup;
4771        struct cgroup_pidlist *l;
4772        enum cgroup_filetype type = seq_cft(s)->private;
4773        int index = 0, pid = *pos;
4774        int *iter, ret;
4775
4776        mutex_lock(&cgrp->pidlist_mutex);
4777
4778        /*
4779         * !NULL @of->priv indicates that this isn't the first start()
4780         * after open.  If the matching pidlist is around, we can use that.
4781         * Look for it.  Note that @of->priv can't be used directly.  It
4782         * could already have been destroyed.
4783         */
4784        if (of->priv)
4785                of->priv = cgroup_pidlist_find(cgrp, type);
4786
4787        /*
4788         * Either this is the first start() after open or the matching
4789         * pidlist has been destroyed inbetween.  Create a new one.
4790         */
4791        if (!of->priv) {
4792                ret = pidlist_array_load(cgrp, type,
4793                                         (struct cgroup_pidlist **)&of->priv);
4794                if (ret)
4795                        return ERR_PTR(ret);
4796        }
4797        l = of->priv;
4798
4799        if (pid) {
4800                int end = l->length;
4801
4802                while (index < end) {
4803                        int mid = (index + end) / 2;
4804                        if (cgroup_pid_fry(cgrp, l->list[mid]) == pid) {
4805                                index = mid;
4806                                break;
4807                        } else if (cgroup_pid_fry(cgrp, l->list[mid]) <= pid)
4808                                index = mid + 1;
4809                        else
4810                                end = mid;
4811                }
4812        }
4813        /* If we're off the end of the array, we're done */
4814        if (index >= l->length)
4815                return NULL;
4816        /* Update the abstract position to be the actual pid that we found */
4817        iter = l->list + index;
4818        *pos = cgroup_pid_fry(cgrp, *iter);
4819        return iter;
4820}
4821
4822static void cgroup_pidlist_stop(struct seq_file *s, void *v)
4823{
4824        struct kernfs_open_file *of = s->private;
4825        struct cgroup_pidlist *l = of->priv;
4826
4827        if (l)
4828                mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
4829                                 CGROUP_PIDLIST_DESTROY_DELAY);
4830        mutex_unlock(&seq_css(s)->cgroup->pidlist_mutex);
4831}
4832
4833static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4834{
4835        struct kernfs_open_file *of = s->private;
4836        struct cgroup_pidlist *l = of->priv;
4837        pid_t *p = v;
4838        pid_t *end = l->list + l->length;
4839        /*
4840         * Advance to the next pid in the array. If this goes off the
4841         * end, we're done
4842         */
4843        p++;
4844        if (p >= end) {
4845                return NULL;
4846        } else {
4847                *pos = cgroup_pid_fry(seq_css(s)->cgroup, *p);
4848                return p;
4849        }
4850}
4851
4852static int cgroup_pidlist_show(struct seq_file *s, void *v)
4853{
4854        seq_printf(s, "%d\n", *(int *)v);
4855
4856        return 0;
4857}
4858
4859static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
4860                                         struct cftype *cft)
4861{
4862        return notify_on_release(css->cgroup);
4863}
4864
4865static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
4866                                          struct cftype *cft, u64 val)
4867{
4868        if (val)
4869                set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4870        else
4871                clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
4872        return 0;
4873}
4874
4875static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
4876                                      struct cftype *cft)
4877{
4878        return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4879}
4880
4881static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
4882                                       struct cftype *cft, u64 val)
4883{
4884        if (val)
4885                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4886        else
4887                clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
4888        return 0;
4889}
4890
4891/* cgroup core interface files for the default hierarchy */
4892static struct cftype cgroup_dfl_base_files[] = {
4893        {
4894                .name = "cgroup.procs",
4895                .file_offset = offsetof(struct cgroup, procs_file),
4896                .seq_start = cgroup_pidlist_start,
4897                .seq_next = cgroup_pidlist_next,
4898                .seq_stop = cgroup_pidlist_stop,
4899                .seq_show = cgroup_pidlist_show,
4900                .private = CGROUP_FILE_PROCS,
4901                .write = cgroup_procs_write,
4902        },
4903        {
4904                .name = "cgroup.controllers",
4905                .seq_show = cgroup_controllers_show,
4906        },
4907        {
4908                .name = "cgroup.subtree_control",
4909                .seq_show = cgroup_subtree_control_show,
4910                .write = cgroup_subtree_control_write,
4911        },
4912        {
4913                .name = "cgroup.events",
4914                .flags = CFTYPE_NOT_ON_ROOT,
4915                .file_offset = offsetof(struct cgroup, events_file),
4916                .seq_show = cgroup_events_show,
4917        },
4918        { }     /* terminate */
4919};
4920
4921/* cgroup core interface files for the legacy hierarchies */
4922static struct cftype cgroup_legacy_base_files[] = {
4923        {
4924                .name = "cgroup.procs",
4925                .seq_start = cgroup_pidlist_start,
4926                .seq_next = cgroup_pidlist_next,
4927                .seq_stop = cgroup_pidlist_stop,
4928                .seq_show = cgroup_pidlist_show,
4929                .private = CGROUP_FILE_PROCS,
4930                .write = cgroup_procs_write,
4931        },
4932        {
4933                .name = "cgroup.clone_children",
4934                .read_u64 = cgroup_clone_children_read,
4935                .write_u64 = cgroup_clone_children_write,
4936        },
4937        {
4938                .name = "cgroup.sane_behavior",
4939                .flags = CFTYPE_ONLY_ON_ROOT,
4940                .seq_show = cgroup_sane_behavior_show,
4941        },
4942        {
4943                .name = "tasks",
4944                .seq_start = cgroup_pidlist_start,
4945                .seq_next = cgroup_pidlist_next,
4946                .seq_stop = cgroup_pidlist_stop,
4947                .seq_show = cgroup_pidlist_show,
4948                .private = CGROUP_FILE_TASKS,
4949                .write = cgroup_tasks_write,
4950        },
4951        {
4952                .name = "notify_on_release",
4953                .read_u64 = cgroup_read_notify_on_release,
4954                .write_u64 = cgroup_write_notify_on_release,
4955        },
4956        {
4957                .name = "release_agent",
4958                .flags = CFTYPE_ONLY_ON_ROOT,
4959                .seq_show = cgroup_release_agent_show,
4960                .write = cgroup_release_agent_write,
4961                .max_write_len = PATH_MAX - 1,
4962        },
4963        { }     /* terminate */
4964};
4965
4966/*
4967 * css destruction is four-stage process.
4968 *
4969 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
4970 *    Implemented in kill_css().
4971 *
4972 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
4973 *    and thus css_tryget_online() is guaranteed to fail, the css can be
4974 *    offlined by invoking offline_css().  After offlining, the base ref is
4975 *    put.  Implemented in css_killed_work_fn().
4976 *
4977 * 3. When the percpu_ref reaches zero, the only possible remaining
4978 *    accessors are inside RCU read sections.  css_release() schedules the
4979 *    RCU callback.
4980 *
4981 * 4. After the grace period, the css can be freed.  Implemented in
4982 *    css_free_work_fn().
4983 *
4984 * It is actually hairier because both step 2 and 4 require process context
4985 * and thus involve punting to css->destroy_work adding two additional
4986 * steps to the already complex sequence.
4987 */
4988static void css_free_work_fn(struct work_struct *work)
4989{
4990        struct cgroup_subsys_state *css =
4991                container_of(work, struct cgroup_subsys_state, destroy_work);
4992        struct cgroup_subsys *ss = css->ss;
4993        struct cgroup *cgrp = css->cgroup;
4994
4995        percpu_ref_exit(&css->refcnt);
4996
4997        if (ss) {
4998                /* css free path */
4999                struct cgroup_subsys_state *parent = css->parent;
5000                int id = css->id;
5001
5002                ss->css_free(css);
5003                cgroup_idr_remove(&ss->css_idr, id);
5004                cgroup_put(cgrp);
5005
5006                if (parent)
5007                        css_put(parent);
5008        } else {
5009                /* cgroup free path */
5010                atomic_dec(&cgrp->root->nr_cgrps);
5011                cgroup_pidlist_destroy_all(cgrp);
5012                cancel_work_sync(&cgrp->release_agent_work);
5013
5014                if (cgroup_parent(cgrp)) {
5015                        /*
5016                         * We get a ref to the parent, and put the ref when
5017                         * this cgroup is being freed, so it's guaranteed
5018                         * that the parent won't be destroyed before its
5019                         * children.
5020                         */
5021                        cgroup_put(cgroup_parent(cgrp));
5022                        kernfs_put(cgrp->kn);
5023                        kfree(cgrp);
5024                } else {
5025                        /*
5026                         * This is root cgroup's refcnt reaching zero,
5027                         * which indicates that the root should be
5028                         * released.
5029                         */
5030                        cgroup_destroy_root(cgrp->root);
5031                }
5032        }
5033}
5034
5035static void css_free_rcu_fn(struct rcu_head *rcu_head)
5036{
5037        struct cgroup_subsys_state *css =
5038                container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
5039
5040        INIT_WORK(&css->destroy_work, css_free_work_fn);
5041        queue_work(cgroup_destroy_wq, &css->destroy_work);
5042}
5043
5044static void css_release_work_fn(struct work_struct *work)
5045{
5046        struct cgroup_subsys_state *css =
5047                container_of(work, struct cgroup_subsys_state, destroy_work);
5048        struct cgroup_subsys *ss = css->ss;
5049        struct cgroup *cgrp = css->cgroup;
5050
5051        mutex_lock(&cgroup_mutex);
5052
5053        css->flags |= CSS_RELEASED;
5054        list_del_rcu(&css->sibling);
5055
5056        if (ss) {
5057                /* css release path */
5058                cgroup_idr_replace(&ss->css_idr, NULL, css->id);
5059                if (ss->css_released)
5060                        ss->css_released(css);
5061        } else {
5062                /* cgroup release path */
5063                trace_cgroup_release(cgrp);
5064
5065                cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
5066                cgrp->id = -1;
5067
5068                /*
5069                 * There are two control paths which try to determine
5070                 * cgroup from dentry without going through kernfs -
5071                 * cgroupstats_build() and css_tryget_online_from_dir().
5072                 * Those are supported by RCU protecting clearing of
5073                 * cgrp->kn->priv backpointer.
5074                 */
5075                if (cgrp->kn)
5076                        RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv,
5077                                         NULL);
5078        }
5079
5080        mutex_unlock(&cgroup_mutex);
5081
5082        call_rcu(&css->rcu_head, css_free_rcu_fn);
5083}
5084
5085static void css_release(struct percpu_ref *ref)
5086{
5087        struct cgroup_subsys_state *css =
5088                container_of(ref, struct cgroup_subsys_state, refcnt);
5089
5090        INIT_WORK(&css->destroy_work, css_release_work_fn);
5091        queue_work(cgroup_destroy_wq, &css->destroy_work);
5092}
5093
5094static void init_and_link_css(struct cgroup_subsys_state *css,
5095                              struct cgroup_subsys *ss, struct cgroup *cgrp)
5096{
5097        lockdep_assert_held(&cgroup_mutex);
5098
5099        cgroup_get(cgrp);
5100
5101        memset(css, 0, sizeof(*css));
5102        css->cgroup = cgrp;
5103        css->ss = ss;
5104        css->id = -1;
5105        INIT_LIST_HEAD(&css->sibling);
5106        INIT_LIST_HEAD(&css->children);
5107        css->serial_nr = css_serial_nr_next++;
5108        atomic_set(&css->online_cnt, 0);
5109
5110        if (cgroup_parent(cgrp)) {
5111                css->parent = cgroup_css(cgroup_parent(cgrp), ss);
5112                css_get(css->parent);
5113        }
5114
5115        BUG_ON(cgroup_css(cgrp, ss));
5116}
5117
5118/* invoke ->css_online() on a new CSS and mark it online if successful */
5119static int online_css(struct cgroup_subsys_state *css)
5120{
5121        struct cgroup_subsys *ss = css->ss;
5122        int ret = 0;
5123
5124        lockdep_assert_held(&cgroup_mutex);
5125
5126        if (ss->css_online)
5127                ret = ss->css_online(css);
5128        if (!ret) {
5129                css->flags |= CSS_ONLINE;
5130                rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
5131
5132                atomic_inc(&css->online_cnt);
5133                if (css->parent)
5134                        atomic_inc(&css->parent->online_cnt);
5135        }
5136        return ret;
5137}
5138
5139/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
5140static void offline_css(struct cgroup_subsys_state *css)
5141{
5142        struct cgroup_subsys *ss = css->ss;
5143
5144        lockdep_assert_held(&cgroup_mutex);
5145
5146        if (!(css->flags & CSS_ONLINE))
5147                return;
5148
5149        if (ss->css_reset)
5150                ss->css_reset(css);
5151
5152        if (ss->css_offline)
5153                ss->css_offline(css);
5154
5155        css->flags &= ~CSS_ONLINE;
5156        RCU_INIT_POINTER(css->cgroup->subsys[ss->id], NULL);
5157
5158        wake_up_all(&css->cgroup->offline_waitq);
5159}
5160
5161/**
5162 * css_create - create a cgroup_subsys_state
5163 * @cgrp: the cgroup new css will be associated with
5164 * @ss: the subsys of new css
5165 *
5166 * Create a new css associated with @cgrp - @ss pair.  On success, the new
5167 * css is online and installed in @cgrp.  This function doesn't create the
5168 * interface files.  Returns 0 on success, -errno on failure.
5169 */
5170static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
5171                                              struct cgroup_subsys *ss)
5172{
5173        struct cgroup *parent = cgroup_parent(cgrp);
5174        struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
5175        struct cgroup_subsys_state *css;
5176        int err;
5177
5178        lockdep_assert_held(&cgroup_mutex);
5179
5180        css = ss->css_alloc(parent_css);
5181        if (!css)
5182                css = ERR_PTR(-ENOMEM);
5183        if (IS_ERR(css))
5184                return css;
5185
5186        init_and_link_css(css, ss, cgrp);
5187
5188        err = percpu_ref_init(&css->refcnt, css_release, 0, GFP_KERNEL);
5189        if (err)
5190                goto err_free_css;
5191
5192        err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL);
5193        if (err < 0)
5194                goto err_free_css;
5195        css->id = err;
5196
5197        /* @css is ready to be brought online now, make it visible */
5198        list_add_tail_rcu(&css->sibling, &parent_css->children);
5199        cgroup_idr_replace(&ss->css_idr, css, css->id);
5200
5201        err = online_css(css);
5202        if (err)
5203                goto err_list_del;
5204
5205        if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
5206            cgroup_parent(parent)) {
5207                pr_warn("%s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
5208                        current->comm, current->pid, ss->name);
5209                if (!strcmp(ss->name, "memory"))
5210                        pr_warn("\"memory\" requires setting use_hierarchy to 1 on the root\n");
5211                ss->warned_broken_hierarchy = true;
5212        }
5213
5214        return css;
5215
5216err_list_del:
5217        list_del_rcu(&css->sibling);
5218err_free_css:
5219        call_rcu(&css->rcu_head, css_free_rcu_fn);
5220        return ERR_PTR(err);
5221}
5222
5223/*
5224 * The returned cgroup is fully initialized including its control mask, but
5225 * it isn't associated with its kernfs_node and doesn't have the control
5226 * mask applied.
5227 */
5228static struct cgroup *cgroup_create(struct cgroup *parent)
5229{
5230        struct cgroup_root *root = parent->root;
5231        struct cgroup *cgrp, *tcgrp;
5232        int level = parent->level + 1;
5233        int ret;
5234
5235        /* allocate the cgroup and its ID, 0 is reserved for the root */
5236        cgrp = kzalloc(sizeof(*cgrp) +
5237                       sizeof(cgrp->ancestor_ids[0]) * (level + 1), GFP_KERNEL);
5238        if (!cgrp)
5239                return ERR_PTR(-ENOMEM);
5240
5241        ret = percpu_ref_init(&cgrp->self.refcnt, css_release, 0, GFP_KERNEL);
5242        if (ret)
5243                goto out_free_cgrp;
5244
5245        /*
5246         * Temporarily set the pointer to NULL, so idr_find() won't return
5247         * a half-baked cgroup.
5248         */
5249        cgrp->id = cgroup_idr_alloc(&root->cgroup_idr, NULL, 2, 0, GFP_KERNEL);
5250        if (cgrp->id < 0) {
5251                ret = -ENOMEM;
5252                goto out_cancel_ref;
5253        }
5254
5255        init_cgroup_housekeeping(cgrp);
5256
5257        cgrp->self.parent = &parent->self;
5258        cgrp->root = root;
5259        cgrp->level = level;
5260
5261        for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp))
5262                cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
5263
5264        if (notify_on_release(parent))
5265                set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
5266
5267        if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
5268                set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
5269
5270        cgrp->self.serial_nr = css_serial_nr_next++;
5271
5272        /* allocation complete, commit to creation */
5273        list_add_tail_rcu(&cgrp->self.sibling, &cgroup_parent(cgrp)->self.children);
5274        atomic_inc(&root->nr_cgrps);
5275        cgroup_get(parent);
5276
5277        /*
5278         * @cgrp is now fully operational.  If something fails after this
5279         * point, it'll be released via the normal destruction path.
5280         */
5281        cgroup_idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
5282
5283        /*
5284         * On the default hierarchy, a child doesn't automatically inherit
5285         * subtree_control from the parent.  Each is configured manually.
5286         */
5287        if (!cgroup_on_dfl(cgrp))
5288                cgrp->subtree_control = cgroup_control(cgrp);
5289
5290        cgroup_propagate_control(cgrp);
5291
5292        return cgrp;
5293
5294out_cancel_ref:
5295        percpu_ref_exit(&cgrp->self.refcnt);
5296out_free_cgrp:
5297        kfree(cgrp);
5298        return ERR_PTR(ret);
5299}
5300
5301static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
5302                        umode_t mode)
5303{
5304        struct cgroup *parent, *cgrp;
5305        struct kernfs_node *kn;
5306        int ret;
5307
5308        /* do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable */
5309        if (strchr(name, '\n'))
5310                return -EINVAL;
5311
5312        parent = cgroup_kn_lock_live(parent_kn, false);
5313        if (!parent)
5314                return -ENODEV;
5315
5316        cgrp = cgroup_create(parent);
5317        if (IS_ERR(cgrp)) {
5318                ret = PTR_ERR(cgrp);
5319                goto out_unlock;
5320        }
5321
5322        /* create the directory */
5323        kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
5324        if (IS_ERR(kn)) {
5325                ret = PTR_ERR(kn);
5326                goto out_destroy;
5327        }
5328        cgrp->kn = kn;
5329
5330        /*
5331         * This extra ref will be put in cgroup_free_fn() and guarantees
5332         * that @cgrp->kn is always accessible.
5333         */
5334        kernfs_get(kn);
5335
5336        ret = cgroup_kn_set_ugid(kn);
5337        if (ret)
5338                goto out_destroy;
5339
5340        ret = css_populate_dir(&cgrp->self);
5341        if (ret)
5342                goto out_destroy;
5343
5344        ret = cgroup_apply_control_enable(cgrp);
5345        if (ret)
5346                goto out_destroy;
5347
5348        trace_cgroup_mkdir(cgrp);
5349
5350        /* let's create and online css's */
5351        kernfs_activate(kn);
5352
5353        ret = 0;
5354        goto out_unlock;
5355
5356out_destroy:
5357        cgroup_destroy_locked(cgrp);
5358out_unlock:
5359        cgroup_kn_unlock(parent_kn);
5360        return ret;
5361}
5362
5363/*
5364 * This is called when the refcnt of a css is confirmed to be killed.
5365 * css_tryget_online() is now guaranteed to fail.  Tell the subsystem to
5366 * initate destruction and put the css ref from kill_css().
5367 */
5368static void css_killed_work_fn(struct work_struct *work)
5369{
5370        struct cgroup_subsys_state *css =
5371                container_of(work, struct cgroup_subsys_state, destroy_work);
5372
5373        mutex_lock(&cgroup_mutex);
5374
5375        do {
5376                offline_css(css);
5377                css_put(css);
5378                /* @css can't go away while we're holding cgroup_mutex */
5379                css = css->parent;
5380        } while (css && atomic_dec_and_test(&css->online_cnt));
5381
5382        mutex_unlock(&cgroup_mutex);
5383}
5384
5385/* css kill confirmation processing requires process context, bounce */
5386static void css_killed_ref_fn(struct percpu_ref *ref)
5387{
5388        struct cgroup_subsys_state *css =
5389                container_of(ref, struct cgroup_subsys_state, refcnt);
5390
5391        if (atomic_dec_and_test(&css->online_cnt)) {
5392                INIT_WORK(&css->destroy_work, css_killed_work_fn);
5393                queue_work(cgroup_destroy_wq, &css->destroy_work);
5394        }
5395}
5396
5397/**
5398 * kill_css - destroy a css
5399 * @css: css to destroy
5400 *
5401 * This function initiates destruction of @css by removing cgroup interface
5402 * files and putting its base reference.  ->css_offline() will be invoked
5403 * asynchronously once css_tryget_online() is guaranteed to fail and when
5404 * the reference count reaches zero, @css will be released.
5405 */
5406static void kill_css(struct cgroup_subsys_state *css)
5407{
5408        lockdep_assert_held(&cgroup_mutex);
5409
5410        /*
5411         * This must happen before css is disassociated with its cgroup.
5412         * See seq_css() for details.
5413         */
5414        css_clear_dir(css);
5415
5416        /*
5417         * Killing would put the base ref, but we need to keep it alive
5418         * until after ->css_offline().
5419         */
5420        css_get(css);
5421
5422        /*
5423         * cgroup core guarantees that, by the time ->css_offline() is
5424         * invoked, no new css reference will be given out via
5425         * css_tryget_online().  We can't simply call percpu_ref_kill() and
5426         * proceed to offlining css's because percpu_ref_kill() doesn't
5427         * guarantee that the ref is seen as killed on all CPUs on return.
5428         *
5429         * Use percpu_ref_kill_and_confirm() to get notifications as each
5430         * css is confirmed to be seen as killed on all CPUs.
5431         */
5432        percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
5433}
5434
5435/**
5436 * cgroup_destroy_locked - the first stage of cgroup destruction
5437 * @cgrp: cgroup to be destroyed
5438 *
5439 * css's make use of percpu refcnts whose killing latency shouldn't be
5440 * exposed to userland and are RCU protected.  Also, cgroup core needs to
5441 * guarantee that css_tryget_online() won't succeed by the time
5442 * ->css_offline() is invoked.  To satisfy all the requirements,
5443 * destruction is implemented in the following two steps.
5444 *
5445 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
5446 *     userland visible parts and start killing the percpu refcnts of
5447 *     css's.  Set up so that the next stage will be kicked off once all
5448 *     the percpu refcnts are confirmed to be killed.
5449 *
5450 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
5451 *     rest of destruction.  Once all cgroup references are gone, the
5452 *     cgroup is RCU-freed.
5453 *
5454 * This function implements s1.  After this step, @cgrp is gone as far as
5455 * the userland is concerned and a new cgroup with the same name may be
5456 * created.  As cgroup doesn't care about the names internally, this
5457 * doesn't cause any problem.
5458 */
5459static int cgroup_destroy_locked(struct cgroup *cgrp)
5460        __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
5461{
5462        struct cgroup_subsys_state *css;
5463        struct cgrp_cset_link *link;
5464        int ssid;
5465
5466        lockdep_assert_held(&cgroup_mutex);
5467
5468        /*
5469         * Only migration can raise populated from zero and we're already
5470         * holding cgroup_mutex.
5471         */
5472        if (cgroup_is_populated(cgrp))
5473                return -EBUSY;
5474
5475        /*
5476         * Make sure there's no live children.  We can't test emptiness of
5477         * ->self.children as dead children linger on it while being
5478         * drained; otherwise, "rmdir parent/child parent" may fail.
5479         */
5480        if (css_has_online_children(&cgrp->self))
5481                return -EBUSY;
5482
5483        /*
5484         * Mark @cgrp and the associated csets dead.  The former prevents
5485         * further task migration and child creation by disabling
5486         * cgroup_lock_live_group().  The latter makes the csets ignored by
5487         * the migration path.
5488         */
5489        cgrp->self.flags &= ~CSS_ONLINE;
5490
5491        spin_lock_irq(&css_set_lock);
5492        list_for_each_entry(link, &cgrp->cset_links, cset_link)
5493                link->cset->dead = true;
5494        spin_unlock_irq(&css_set_lock);
5495
5496        /* initiate massacre of all css's */
5497        for_each_css(css, ssid, cgrp)
5498                kill_css(css);
5499
5500        /*
5501         * Remove @cgrp directory along with the base files.  @cgrp has an
5502         * extra ref on its kn.
5503         */
5504        kernfs_remove(cgrp->kn);
5505
5506        check_for_release(cgroup_parent(cgrp));
5507
5508        /* put the base reference */
5509        percpu_ref_kill(&cgrp->self.refcnt);
5510
5511        return 0;
5512};
5513
5514static int cgroup_rmdir(struct kernfs_node *kn)
5515{
5516        struct cgroup *cgrp;
5517        int ret = 0;
5518
5519        cgrp = cgroup_kn_lock_live(kn, false);
5520        if (!cgrp)
5521                return 0;
5522
5523        ret = cgroup_destroy_locked(cgrp);
5524
5525        if (!ret)
5526                trace_cgroup_rmdir(cgrp);
5527
5528        cgroup_kn_unlock(kn);
5529        return ret;
5530}
5531
5532static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
5533        .remount_fs             = cgroup_remount,
5534        .show_options           = cgroup_show_options,
5535        .mkdir                  = cgroup_mkdir,
5536        .rmdir                  = cgroup_rmdir,
5537        .rename                 = cgroup_rename,
5538        .show_path              = cgroup_show_path,
5539};
5540
5541static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
5542{
5543        struct cgroup_subsys_state *css;
5544
5545        pr_debug("Initializing cgroup subsys %s\n", ss->name);
5546
5547        mutex_lock(&cgroup_mutex);
5548
5549        idr_init(&ss->css_idr);
5550        INIT_LIST_HEAD(&ss->cfts);
5551
5552        /* Create the root cgroup state for this subsystem */
5553        ss->root = &cgrp_dfl_root;
5554        css = ss->css_alloc(cgroup_css(&cgrp_dfl_root.cgrp, ss));
5555        /* We don't handle early failures gracefully */
5556        BUG_ON(IS_ERR(css));
5557        init_and_link_css(css, ss, &cgrp_dfl_root.cgrp);
5558
5559        /*
5560         * Root csses are never destroyed and we can't initialize
5561         * percpu_ref during early init.  Disable refcnting.
5562         */
5563        css->flags |= CSS_NO_REF;
5564
5565        if (early) {
5566                /* allocation can't be done safely during early init */
5567                css->id = 1;
5568        } else {
5569                css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);
5570                BUG_ON(css->id < 0);
5571        }
5572
5573        /* Update the init_css_set to contain a subsys
5574         * pointer to this state - since the subsystem is
5575         * newly registered, all tasks and hence the
5576         * init_css_set is in the subsystem's root cgroup. */
5577        init_css_set.subsys[ss->id] = css;
5578
5579        have_fork_callback |= (bool)ss->fork << ss->id;
5580        have_exit_callback |= (bool)ss->exit << ss->id;
5581        have_free_callback |= (bool)ss->free << ss->id;
5582        have_canfork_callback |= (bool)ss->can_fork << ss->id;
5583
5584        /* At system boot, before all subsystems have been
5585         * registered, no tasks have been forked, so we don't
5586         * need to invoke fork callbacks here. */
5587        BUG_ON(!list_empty(&init_task.tasks));
5588
5589        BUG_ON(online_css(css));
5590
5591        mutex_unlock(&cgroup_mutex);
5592}
5593
5594/**
5595 * cgroup_init_early - cgroup initialization at system boot
5596 *
5597 * Initialize cgroups at system boot, and initialize any
5598 * subsystems that request early init.
5599 */
5600int __init cgroup_init_early(void)
5601{
5602        static struct cgroup_sb_opts __initdata opts;
5603        struct cgroup_subsys *ss;
5604        int i;
5605
5606        init_cgroup_root(&cgrp_dfl_root, &opts);
5607        cgrp_dfl_root.cgrp.self.flags |= CSS_NO_REF;
5608
5609        RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
5610
5611        for_each_subsys(ss, i) {
5612                WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
5613                     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p id:name=%d:%s\n",
5614                     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
5615                     ss->id, ss->name);
5616                WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
5617                     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);
5618
5619                ss->id = i;
5620                ss->name = cgroup_subsys_name[i];
5621                if (!ss->legacy_name)
5622                        ss->legacy_name = cgroup_subsys_name[i];
5623
5624                if (ss->early_init)
5625                        cgroup_init_subsys(ss, true);
5626        }
5627        return 0;
5628}
5629
5630static u16 cgroup_disable_mask __initdata;
5631
5632/**
5633 * cgroup_init - cgroup initialization
5634 *
5635 * Register cgroup filesystem and /proc file, and initialize
5636 * any subsystems that didn't request early init.
5637 */
5638int __init cgroup_init(void)
5639{
5640        struct cgroup_subsys *ss;
5641        int ssid;
5642
5643        BUILD_BUG_ON(CGROUP_SUBSYS_COUNT > 16);
5644        BUG_ON(percpu_init_rwsem(&cgroup_threadgroup_rwsem));
5645        BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
5646        BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
5647
5648        /*
5649         * The latency of the synchronize_sched() is too high for cgroups,
5650         * avoid it at the cost of forcing all readers into the slow path.
5651         */
5652        rcu_sync_enter_start(&cgroup_threadgroup_rwsem.rss);
5653
5654        get_user_ns(init_cgroup_ns.user_ns);
5655
5656        mutex_lock(&cgroup_mutex);
5657
5658        /*
5659         * Add init_css_set to the hash table so that dfl_root can link to
5660         * it during init.
5661         */
5662        hash_add(css_set_table, &init_css_set.hlist,
5663                 css_set_hash(init_css_set.subsys));
5664
5665        BUG_ON(cgroup_setup_root(&cgrp_dfl_root, 0));
5666
5667        mutex_unlock(&cgroup_mutex);
5668
5669        for_each_subsys(ss, ssid) {
5670                if (ss->early_init) {
5671                        struct cgroup_subsys_state *css =
5672                                init_css_set.subsys[ss->id];
5673
5674                        css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2,
5675                                                   GFP_KERNEL);
5676                        BUG_ON(css->id < 0);
5677                } else {
5678                        cgroup_init_subsys(ss, false);
5679                }
5680
5681                list_add_tail(&init_css_set.e_cset_node[ssid],
5682                              &cgrp_dfl_root.cgrp.e_csets[ssid]);
5683
5684                /*
5685                 * Setting dfl_root subsys_mask needs to consider the
5686                 * disabled flag and cftype registration needs kmalloc,
5687                 * both of which aren't available during early_init.
5688                 */
5689                if (cgroup_disable_mask & (1 << ssid)) {
5690                        static_branch_disable(cgroup_subsys_enabled_key[ssid]);
5691                        printk(KERN_INFO "Disabling %s control group subsystem\n",
5692                               ss->name);
5693                        continue;
5694                }
5695
5696                if (cgroup_ssid_no_v1(ssid))
5697                        printk(KERN_INFO "Disabling %s control group subsystem in v1 mounts\n",
5698                               ss->name);
5699
5700                cgrp_dfl_root.subsys_mask |= 1 << ss->id;
5701
5702                if (ss->implicit_on_dfl)
5703                        cgrp_dfl_implicit_ss_mask |= 1 << ss->id;
5704                else if (!ss->dfl_cftypes)
5705                        cgrp_dfl_inhibit_ss_mask |= 1 << ss->id;
5706
5707                if (ss->dfl_cftypes == ss->legacy_cftypes) {
5708                        WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
5709                } else {
5710                        WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
5711                        WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
5712                }
5713
5714                if (ss->bind)
5715                        ss->bind(init_css_set.subsys[ssid]);
5716        }
5717
5718        /* init_css_set.subsys[] has been updated, re-hash */
5719        hash_del(&init_css_set.hlist);
5720        hash_add(css_set_table, &init_css_set.hlist,
5721                 css_set_hash(init_css_set.subsys));
5722
5723        WARN_ON(sysfs_create_mount_point(fs_kobj, "cgroup"));
5724        WARN_ON(register_filesystem(&cgroup_fs_type));
5725        WARN_ON(register_filesystem(&cgroup2_fs_type));
5726        WARN_ON(!proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations));
5727
5728        return 0;
5729}
5730
5731static int __init cgroup_wq_init(void)
5732{
5733        /*
5734         * There isn't much point in executing destruction path in
5735         * parallel.  Good chunk is serialized with cgroup_mutex anyway.
5736         * Use 1 for @max_active.
5737         *
5738         * We would prefer to do this in cgroup_init() above, but that
5739         * is called before init_workqueues(): so leave this until after.
5740         */
5741        cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
5742        BUG_ON(!cgroup_destroy_wq);
5743
5744        /*
5745         * Used to destroy pidlists and separate to serve as flush domain.
5746         * Cap @max_active to 1 too.
5747         */
5748        cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
5749                                                    0, 1);
5750        BUG_ON(!cgroup_pidlist_destroy_wq);
5751
5752        return 0;
5753}
5754core_initcall(cgroup_wq_init);
5755
5756/*
5757 * proc_cgroup_show()
5758 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
5759 *  - Used for /proc/<pid>/cgroup.
5760 */
5761int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
5762                     struct pid *pid, struct task_struct *tsk)
5763{
5764        char *buf;
5765        int retval;
5766        struct cgroup_root *root;
5767
5768        retval = -ENOMEM;
5769        buf = kmalloc(PATH_MAX, GFP_KERNEL);
5770        if (!buf)
5771                goto out;
5772
5773        mutex_lock(&cgroup_mutex);
5774        spin_lock_irq(&css_set_lock);
5775
5776        for_each_root(root) {
5777                struct cgroup_subsys *ss;
5778                struct cgroup *cgrp;
5779                int ssid, count = 0;
5780
5781                if (root == &cgrp_dfl_root && !cgrp_dfl_visible)
5782                        continue;
5783
5784                seq_printf(m, "%d:", root->hierarchy_id);
5785                if (root != &cgrp_dfl_root)
5786                        for_each_subsys(ss, ssid)
5787                                if (root->subsys_mask & (1 << ssid))
5788                                        seq_printf(m, "%s%s", count++ ? "," : "",
5789                                                   ss->legacy_name);
5790                if (strlen(root->name))
5791                        seq_printf(m, "%sname=%s", count ? "," : "",
5792                                   root->name);
5793                seq_putc(m, ':');
5794
5795                cgrp = task_cgroup_from_root(tsk, root);
5796
5797                /*
5798                 * On traditional hierarchies, all zombie tasks show up as
5799                 * belonging to the root cgroup.  On the default hierarchy,
5800                 * while a zombie doesn't show up in "cgroup.procs" and
5801                 * thus can't be migrated, its /proc/PID/cgroup keeps
5802                 * reporting the cgroup it belonged to before exiting.  If
5803                 * the cgroup is removed before the zombie is reaped,
5804                 * " (deleted)" is appended to the cgroup path.
5805                 */
5806                if (cgroup_on_dfl(cgrp) || !(tsk->flags & PF_EXITING)) {
5807                        retval = cgroup_path_ns_locked(cgrp, buf, PATH_MAX,
5808                                                current->nsproxy->cgroup_ns);
5809                        if (retval >= PATH_MAX)
5810                                retval = -ENAMETOOLONG;
5811                        if (retval < 0)
5812                                goto out_unlock;
5813
5814                        seq_puts(m, buf);
5815                } else {
5816                        seq_puts(m, "/");
5817                }
5818
5819                if (cgroup_on_dfl(cgrp) && cgroup_is_dead(cgrp))
5820                        seq_puts(m, " (deleted)\n");
5821                else
5822                        seq_putc(m, '\n');
5823        }
5824
5825        retval = 0;
5826out_unlock:
5827        spin_unlock_irq(&css_set_lock);
5828        mutex_unlock(&cgroup_mutex);
5829        kfree(buf);
5830out:
5831        return retval;
5832}
5833
5834/* Display information about each subsystem and each hierarchy */
5835static int proc_cgroupstats_show(struct seq_file *m, void *v)
5836{
5837        struct cgroup_subsys *ss;
5838        int i;
5839
5840        seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
5841        /*
5842         * ideally we don't want subsystems moving around while we do this.
5843         * cgroup_mutex is also necessary to guarantee an atomic snapshot of
5844         * subsys/hierarchy state.
5845         */
5846        mutex_lock(&cgroup_mutex);
5847
5848        for_each_subsys(ss, i)
5849                seq_printf(m, "%s\t%d\t%d\t%d\n",
5850                           ss->legacy_name, ss->root->hierarchy_id,
5851                           atomic_read(&ss->root->nr_cgrps),
5852                           cgroup_ssid_enabled(i));
5853
5854        mutex_unlock(&cgroup_mutex);
5855        return 0;
5856}
5857
5858static int cgroupstats_open(struct inode *inode, struct file *file)
5859{
5860        return single_open(file, proc_cgroupstats_show, NULL);
5861}
5862
5863static const struct file_operations proc_cgroupstats_operations = {
5864        .open = cgroupstats_open,
5865        .read = seq_read,
5866        .llseek = seq_lseek,
5867        .release = single_release,
5868};
5869
5870/**
5871 * cgroup_fork - initialize cgroup related fields during copy_process()
5872 * @child: pointer to task_struct of forking parent process.
5873 *
5874 * A task is associated with the init_css_set until cgroup_post_fork()
5875 * attaches it to the parent's css_set.  Empty cg_list indicates that
5876 * @child isn't holding reference to its css_set.
5877 */
5878void cgroup_fork(struct task_struct *child)
5879{
5880        RCU_INIT_POINTER(child->cgroups, &init_css_set);
5881        INIT_LIST_HEAD(&child->cg_list);
5882}
5883
5884/**
5885 * cgroup_can_fork - called on a new task before the process is exposed
5886 * @child: the task in question.
5887 *
5888 * This calls the subsystem can_fork() callbacks. If the can_fork() callback
5889 * returns an error, the fork aborts with that error code. This allows for
5890 * a cgroup subsystem to conditionally allow or deny new forks.
5891 */
5892int cgroup_can_fork(struct task_struct *child)
5893{
5894        struct cgroup_subsys *ss;
5895        int i, j, ret;
5896
5897        do_each_subsys_mask(ss, i, have_canfork_callback) {
5898                ret = ss->can_fork(child);
5899                if (ret)
5900                        goto out_revert;
5901        } while_each_subsys_mask();
5902
5903        return 0;
5904
5905out_revert:
5906        for_each_subsys(ss, j) {
5907                if (j >= i)
5908                        break;
5909                if (ss->cancel_fork)
5910                        ss->cancel_fork(child);
5911        }
5912
5913        return ret;
5914}
5915
5916/**
5917 * cgroup_cancel_fork - called if a fork failed after cgroup_can_fork()
5918 * @child: the task in question
5919 *
5920 * This calls the cancel_fork() callbacks if a fork failed *after*
5921 * cgroup_can_fork() succeded.
5922 */
5923void cgroup_cancel_fork(struct task_struct *child)
5924{
5925        struct cgroup_subsys *ss;
5926        int i;
5927
5928        for_each_subsys(ss, i)
5929                if (ss->cancel_fork)
5930                        ss->cancel_fork(child);
5931}
5932
5933/**
5934 * cgroup_post_fork - called on a new task after adding it to the task list
5935 * @child: the task in question
5936 *
5937 * Adds the task to the list running through its css_set if necessary and
5938 * call the subsystem fork() callbacks.  Has to be after the task is
5939 * visible on the task list in case we race with the first call to
5940 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
5941 * list.
5942 */
5943void cgroup_post_fork(struct task_struct *child)
5944{
5945        struct cgroup_subsys *ss;
5946        int i;
5947
5948        /*
5949         * This may race against cgroup_enable_task_cg_lists().  As that
5950         * function sets use_task_css_set_links before grabbing
5951         * tasklist_lock and we just went through tasklist_lock to add
5952         * @child, it's guaranteed that either we see the set
5953         * use_task_css_set_links or cgroup_enable_task_cg_lists() sees
5954         * @child during its iteration.
5955         *
5956         * If we won the race, @child is associated with %current's
5957         * css_set.  Grabbing css_set_lock guarantees both that the
5958         * association is stable, and, on completion of the parent's
5959         * migration, @child is visible in the source of migration or
5960         * already in the destination cgroup.  This guarantee is necessary
5961         * when implementing operations which need to migrate all tasks of
5962         * a cgroup to another.
5963         *
5964         * Note that if we lose to cgroup_enable_task_cg_lists(), @child
5965         * will remain in init_css_set.  This is safe because all tasks are
5966         * in the init_css_set before cg_links is enabled and there's no
5967         * operation which transfers all tasks out of init_css_set.
5968         */
5969        if (use_task_css_set_links) {
5970                struct css_set *cset;
5971
5972                spin_lock_irq(&css_set_lock);
5973                cset = task_css_set(current);
5974                if (list_empty(&child->cg_list)) {
5975                        get_css_set(cset);
5976                        css_set_move_task(child, NULL, cset, false);
5977                }
5978                spin_unlock_irq(&css_set_lock);
5979        }
5980
5981        /*
5982         * Call ss->fork().  This must happen after @child is linked on
5983         * css_set; otherwise, @child might change state between ->fork()
5984         * and addition to css_set.
5985         */
5986        do_each_subsys_mask(ss, i, have_fork_callback) {
5987                ss->fork(child);
5988        } while_each_subsys_mask();
5989}
5990
5991/**
5992 * cgroup_exit - detach cgroup from exiting task
5993 * @tsk: pointer to task_struct of exiting process
5994 *
5995 * Description: Detach cgroup from @tsk and release it.
5996 *
5997 * Note that cgroups marked notify_on_release force every task in
5998 * them to take the global cgroup_mutex mutex when exiting.
5999 * This could impact scaling on very large systems.  Be reluctant to
6000 * use notify_on_release cgroups where very high task exit scaling
6001 * is required on large systems.
6002 *
6003 * We set the exiting tasks cgroup to the root cgroup (top_cgroup).  We
6004 * call cgroup_exit() while the task is still competent to handle
6005 * notify_on_release(), then leave the task attached to the root cgroup in
6006 * each hierarchy for the remainder of its exit.  No need to bother with
6007 * init_css_set refcnting.  init_css_set never goes away and we can't race
6008 * with migration path - PF_EXITING is visible to migration path.
6009 */
6010void cgroup_exit(struct task_struct *tsk)
6011{
6012        struct cgroup_subsys *ss;
6013        struct css_set *cset;
6014        int i;
6015
6016        /*
6017         * Unlink from @tsk from its css_set.  As migration path can't race
6018         * with us, we can check css_set and cg_list without synchronization.
6019         */
6020        cset = task_css_set(tsk);
6021
6022        if (!list_empty(&tsk->cg_list)) {
6023                spin_lock_irq(&css_set_lock);
6024                css_set_move_task(tsk, cset, NULL, false);
6025                spin_unlock_irq(&css_set_lock);
6026        } else {
6027                get_css_set(cset);
6028        }
6029
6030        /* see cgroup_post_fork() for details */
6031        do_each_subsys_mask(ss, i, have_exit_callback) {
6032                ss->exit(tsk);
6033        } while_each_subsys_mask();
6034}
6035
6036void cgroup_free(struct task_struct *task)
6037{
6038        struct css_set *cset = task_css_set(task);
6039        struct cgroup_subsys *ss;
6040        int ssid;
6041
6042        do_each_subsys_mask(ss, ssid, have_free_callback) {
6043                ss->free(task);
6044        } while_each_subsys_mask();
6045
6046        put_css_set(cset);
6047}
6048
6049static void check_for_release(struct cgroup *cgrp)
6050{
6051        if (notify_on_release(cgrp) && !cgroup_is_populated(cgrp) &&
6052            !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp))
6053                schedule_work(&cgrp->release_agent_work);
6054}
6055
6056/*
6057 * Notify userspace when a cgroup is released, by running the
6058 * configured release agent with the name of the cgroup (path
6059 * relative to the root of cgroup file system) as the argument.
6060 *
6061 * Most likely, this user command will try to rmdir this cgroup.
6062 *
6063 * This races with the possibility that some other task will be
6064 * attached to this cgroup before it is removed, or that some other
6065 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
6066 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
6067 * unused, and this cgroup will be reprieved from its death sentence,
6068 * to continue to serve a useful existence.  Next time it's released,
6069 * we will get notified again, if it still has 'notify_on_release' set.
6070 *
6071 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
6072 * means only wait until the task is successfully execve()'d.  The
6073 * separate release agent task is forked by call_usermodehelper(),
6074 * then control in this thread returns here, without waiting for the
6075 * release agent task.  We don't bother to wait because the caller of
6076 * this routine has no use for the exit status of the release agent
6077 * task, so no sense holding our caller up for that.
6078 */
6079static void cgroup_release_agent(struct work_struct *work)
6080{
6081        struct cgroup *cgrp =
6082                container_of(work, struct cgroup, release_agent_work);
6083        char *pathbuf = NULL, *agentbuf = NULL;
6084        char *argv[3], *envp[3];
6085        int ret;
6086
6087        mutex_lock(&cgroup_mutex);
6088
6089        pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
6090        agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
6091        if (!pathbuf || !agentbuf)
6092                goto out;
6093
6094        spin_lock_irq(&css_set_lock);
6095        ret = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns);
6096        spin_unlock_irq(&css_set_lock);
6097        if (ret < 0 || ret >= PATH_MAX)
6098                goto out;
6099
6100        argv[0] = agentbuf;
6101        argv[1] = pathbuf;
6102        argv[2] = NULL;
6103
6104        /* minimal command environment */
6105        envp[0] = "HOME=/";
6106        envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
6107        envp[2] = NULL;
6108
6109        mutex_unlock(&cgroup_mutex);
6110        call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
6111        goto out_free;
6112out:
6113        mutex_unlock(&cgroup_mutex);
6114out_free:
6115        kfree(agentbuf);
6116        kfree(pathbuf);
6117}
6118
6119static int __init cgroup_disable(char *str)
6120{
6121        struct cgroup_subsys *ss;
6122        char *token;
6123        int i;
6124
6125        while ((token = strsep(&str, ",")) != NULL) {
6126                if (!*token)
6127                        continue;
6128
6129                for_each_subsys(ss, i) {
6130                        if (strcmp(token, ss->name) &&
6131                            strcmp(token, ss->legacy_name))
6132                                continue;
6133                        cgroup_disable_mask |= 1 << i;
6134                }
6135        }
6136        return 1;
6137}
6138__setup("cgroup_disable=", cgroup_disable);
6139
6140static int __init cgroup_no_v1(char *str)
6141{
6142        struct cgroup_subsys *ss;
6143        char *token;
6144        int i;
6145
6146        while ((token = strsep(&str, ",")) != NULL) {
6147                if (!*token)
6148                        continue;
6149
6150                if (!strcmp(token, "all")) {
6151                        cgroup_no_v1_mask = U16_MAX;
6152                        break;
6153                }
6154
6155                for_each_subsys(ss, i) {
6156                        if (strcmp(token, ss->name) &&
6157                            strcmp(token, ss->legacy_name))
6158                                continue;
6159
6160                        cgroup_no_v1_mask |= 1 << i;
6161                }
6162        }
6163        return 1;
6164}
6165__setup("cgroup_no_v1=", cgroup_no_v1);
6166
6167/**
6168 * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
6169 * @dentry: directory dentry of interest
6170 * @ss: subsystem of interest
6171 *
6172 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
6173 * to get the corresponding css and return it.  If such css doesn't exist
6174 * or can't be pinned, an ERR_PTR value is returned.
6175 */
6176struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
6177                                                       struct cgroup_subsys *ss)
6178{
6179        struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
6180        struct file_system_type *s_type = dentry->d_sb->s_type;
6181        struct cgroup_subsys_state *css = NULL;
6182        struct cgroup *cgrp;
6183
6184        /* is @dentry a cgroup dir? */
6185        if ((s_type != &cgroup_fs_type && s_type != &cgroup2_fs_type) ||
6186            !kn || kernfs_type(kn) != KERNFS_DIR)
6187                return ERR_PTR(-EBADF);
6188
6189        rcu_read_lock();
6190
6191        /*
6192         * This path doesn't originate from kernfs and @kn could already
6193         * have been or be removed at any point.  @kn->priv is RCU
6194         * protected for this access.  See css_release_work_fn() for details.
6195         */
6196        cgrp = rcu_dereference(kn->priv);
6197        if (cgrp)
6198                css = cgroup_css(cgrp, ss);
6199
6200        if (!css || !css_tryget_online(css))
6201                css = ERR_PTR(-ENOENT);
6202
6203        rcu_read_unlock();
6204        return css;
6205}
6206
6207/**
6208 * css_from_id - lookup css by id
6209 * @id: the cgroup id
6210 * @ss: cgroup subsys to be looked into
6211 *
6212 * Returns the css if there's valid one with @id, otherwise returns NULL.
6213 * Should be called under rcu_read_lock().
6214 */
6215struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
6216{
6217        WARN_ON_ONCE(!rcu_read_lock_held());
6218        return idr_find(&ss->css_idr, id);
6219}
6220
6221/**
6222 * cgroup_get_from_path - lookup and get a cgroup from its default hierarchy path
6223 * @path: path on the default hierarchy
6224 *
6225 * Find the cgroup at @path on the default hierarchy, increment its
6226 * reference count and return it.  Returns pointer to the found cgroup on
6227 * success, ERR_PTR(-ENOENT) if @path doens't exist and ERR_PTR(-ENOTDIR)
6228 * if @path points to a non-directory.
6229 */
6230struct cgroup *cgroup_get_from_path(const char *path)
6231{
6232        struct kernfs_node *kn;
6233        struct cgroup *cgrp;
6234
6235        mutex_lock(&cgroup_mutex);
6236
6237        kn = kernfs_walk_and_get(cgrp_dfl_root.cgrp.kn, path);
6238        if (kn) {
6239                if (kernfs_type(kn) == KERNFS_DIR) {
6240                        cgrp = kn->priv;
6241                        cgroup_get(cgrp);
6242                } else {
6243                        cgrp = ERR_PTR(-ENOTDIR);
6244                }
6245                kernfs_put(kn);
6246        } else {
6247                cgrp = ERR_PTR(-ENOENT);
6248        }
6249
6250        mutex_unlock(&cgroup_mutex);
6251        return cgrp;
6252}
6253EXPORT_SYMBOL_GPL(cgroup_get_from_path);
6254
6255/**
6256 * cgroup_get_from_fd - get a cgroup pointer from a fd
6257 * @fd: fd obtained by open(cgroup2_dir)
6258 *
6259 * Find the cgroup from a fd which should be obtained
6260 * by opening a cgroup directory.  Returns a pointer to the
6261 * cgroup on success. ERR_PTR is returned if the cgroup
6262 * cannot be found.
6263 */
6264struct cgroup *cgroup_get_from_fd(int fd)
6265{
6266        struct cgroup_subsys_state *css;
6267        struct cgroup *cgrp;
6268        struct file *f;
6269
6270        f = fget_raw(fd);
6271        if (!f)
6272                return ERR_PTR(-EBADF);
6273
6274        css = css_tryget_online_from_dir(f->f_path.dentry, NULL);
6275        fput(f);
6276        if (IS_ERR(css))
6277                return ERR_CAST(css);
6278
6279        cgrp = css->cgroup;
6280        if (!cgroup_on_dfl(cgrp)) {
6281                cgroup_put(cgrp);
6282                return ERR_PTR(-EBADF);
6283        }
6284
6285        return cgrp;
6286}
6287EXPORT_SYMBOL_GPL(cgroup_get_from_fd);
6288
6289/*
6290 * sock->sk_cgrp_data handling.  For more info, see sock_cgroup_data
6291 * definition in cgroup-defs.h.
6292 */
6293#ifdef CONFIG_SOCK_CGROUP_DATA
6294
6295#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
6296
6297DEFINE_SPINLOCK(cgroup_sk_update_lock);
6298static bool cgroup_sk_alloc_disabled __read_mostly;
6299
6300void cgroup_sk_alloc_disable(void)
6301{
6302        if (cgroup_sk_alloc_disabled)
6303                return;
6304        pr_info("cgroup: disabling cgroup2 socket matching due to net_prio or net_cls activation\n");
6305        cgroup_sk_alloc_disabled = true;
6306}
6307
6308#else
6309
6310#define cgroup_sk_alloc_disabled        false
6311
6312#endif
6313
6314void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
6315{
6316        if (cgroup_sk_alloc_disabled)
6317                return;
6318
6319        /* Socket clone path */
6320        if (skcd->val) {
6321                cgroup_get(sock_cgroup_ptr(skcd));
6322                return;
6323        }
6324
6325        rcu_read_lock();
6326
6327        while (true) {
6328                struct css_set *cset;
6329
6330                cset = task_css_set(current);
6331                if (likely(cgroup_tryget(cset->dfl_cgrp))) {
6332                        skcd->val = (unsigned long)cset->dfl_cgrp;
6333                        break;
6334                }
6335                cpu_relax();
6336        }
6337
6338        rcu_read_unlock();
6339}
6340
6341void cgroup_sk_free(struct sock_cgroup_data *skcd)
6342{
6343        cgroup_put(sock_cgroup_ptr(skcd));
6344}
6345
6346#endif  /* CONFIG_SOCK_CGROUP_DATA */
6347
6348/* cgroup namespaces */
6349
6350static struct ucounts *inc_cgroup_namespaces(struct user_namespace *ns)
6351{
6352        return inc_ucount(ns, current_euid(), UCOUNT_CGROUP_NAMESPACES);
6353}
6354
6355static void dec_cgroup_namespaces(struct ucounts *ucounts)
6356{
6357        dec_ucount(ucounts, UCOUNT_CGROUP_NAMESPACES);
6358}
6359
6360static struct cgroup_namespace *alloc_cgroup_ns(void)
6361{
6362        struct cgroup_namespace *new_ns;
6363        int ret;
6364
6365        new_ns = kzalloc(sizeof(struct cgroup_namespace), GFP_KERNEL);
6366        if (!new_ns)
6367                return ERR_PTR(-ENOMEM);
6368        ret = ns_alloc_inum(&new_ns->ns);
6369        if (ret) {
6370                kfree(new_ns);
6371                return ERR_PTR(ret);
6372        }
6373        atomic_set(&new_ns->count, 1);
6374        new_ns->ns.ops = &cgroupns_operations;
6375        return new_ns;
6376}
6377
6378void free_cgroup_ns(struct cgroup_namespace *ns)
6379{
6380        put_css_set(ns->root_cset);
6381        dec_cgroup_namespaces(ns->ucounts);
6382        put_user_ns(ns->user_ns);
6383        ns_free_inum(&ns->ns);
6384        kfree(ns);
6385}
6386EXPORT_SYMBOL(free_cgroup_ns);
6387
6388struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
6389                                        struct user_namespace *user_ns,
6390                                        struct cgroup_namespace *old_ns)
6391{
6392        struct cgroup_namespace *new_ns;
6393        struct ucounts *ucounts;
6394        struct css_set *cset;
6395
6396        BUG_ON(!old_ns);
6397
6398        if (!(flags & CLONE_NEWCGROUP)) {
6399                get_cgroup_ns(old_ns);
6400                return old_ns;
6401        }
6402
6403        /* Allow only sysadmin to create cgroup namespace. */
6404        if (!ns_capable(user_ns, CAP_SYS_ADMIN))
6405                return ERR_PTR(-EPERM);
6406
6407        ucounts = inc_cgroup_namespaces(user_ns);
6408        if (!ucounts)
6409                return ERR_PTR(-ENOSPC);
6410
6411        /* It is not safe to take cgroup_mutex here */
6412        spin_lock_irq(&css_set_lock);
6413        cset = task_css_set(current);
6414        get_css_set(cset);
6415        spin_unlock_irq(&css_set_lock);
6416
6417        new_ns = alloc_cgroup_ns();
6418        if (IS_ERR(new_ns)) {
6419                put_css_set(cset);
6420                dec_cgroup_namespaces(ucounts);
6421                return new_ns;
6422        }
6423
6424        new_ns->user_ns = get_user_ns(user_ns);
6425        new_ns->ucounts = ucounts;
6426        new_ns->root_cset = cset;
6427
6428        return new_ns;
6429}
6430
6431static inline struct cgroup_namespace *to_cg_ns(struct ns_common *ns)
6432{
6433        return container_of(ns, struct cgroup_namespace, ns);
6434}
6435
6436static int cgroupns_install(struct nsproxy *nsproxy, struct ns_common *ns)
6437{
6438        struct cgroup_namespace *cgroup_ns = to_cg_ns(ns);
6439
6440        if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN) ||
6441            !ns_capable(cgroup_ns->user_ns, CAP_SYS_ADMIN))
6442                return -EPERM;
6443
6444        /* Don't need to do anything if we are attaching to our own cgroupns. */
6445        if (cgroup_ns == nsproxy->cgroup_ns)
6446                return 0;
6447
6448        get_cgroup_ns(cgroup_ns);
6449        put_cgroup_ns(nsproxy->cgroup_ns);
6450        nsproxy->cgroup_ns = cgroup_ns;
6451
6452        return 0;
6453}
6454
6455static struct ns_common *cgroupns_get(struct task_struct *task)
6456{
6457        struct cgroup_namespace *ns = NULL;
6458        struct nsproxy *nsproxy;
6459
6460        task_lock(task);
6461        nsproxy = task->nsproxy;
6462        if (nsproxy) {
6463                ns = nsproxy->cgroup_ns;
6464                get_cgroup_ns(ns);
6465        }
6466        task_unlock(task);
6467
6468        return ns ? &ns->ns : NULL;
6469}
6470
6471static void cgroupns_put(struct ns_common *ns)
6472{
6473        put_cgroup_ns(to_cg_ns(ns));
6474}
6475
6476static struct user_namespace *cgroupns_owner(struct ns_common *ns)
6477{
6478        return to_cg_ns(ns)->user_ns;
6479}
6480
6481const struct proc_ns_operations cgroupns_operations = {
6482        .name           = "cgroup",
6483        .type           = CLONE_NEWCGROUP,
6484        .get            = cgroupns_get,
6485        .put            = cgroupns_put,
6486        .install        = cgroupns_install,
6487        .owner          = cgroupns_owner,
6488};
6489
6490static __init int cgroup_namespaces_init(void)
6491{
6492        return 0;
6493}
6494subsys_initcall(cgroup_namespaces_init);
6495
6496#ifdef CONFIG_CGROUP_DEBUG
6497static struct cgroup_subsys_state *
6498debug_css_alloc(struct cgroup_subsys_state *parent_css)
6499{
6500        struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
6501
6502        if (!css)
6503                return ERR_PTR(-ENOMEM);
6504
6505        return css;
6506}
6507
6508static void debug_css_free(struct cgroup_subsys_state *css)
6509{
6510        kfree(css);
6511}
6512
6513static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
6514                                struct cftype *cft)
6515{
6516        return cgroup_task_count(css->cgroup);
6517}
6518
6519static u64 current_css_set_read(struct cgroup_subsys_state *css,
6520                                struct cftype *cft)
6521{
6522        return (u64)(unsigned long)current->cgroups;
6523}
6524
6525static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
6526                                         struct cftype *cft)
6527{
6528        u64 count;
6529
6530        rcu_read_lock();
6531        count = atomic_read(&task_css_set(current)->refcount);
6532        rcu_read_unlock();
6533        return count;
6534}
6535
6536static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
6537{
6538        struct cgrp_cset_link *link;
6539        struct css_set *cset;
6540        char *name_buf;
6541
6542        name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
6543        if (!name_buf)
6544                return -ENOMEM;
6545
6546        spin_lock_irq(&css_set_lock);
6547        rcu_read_lock();
6548        cset = rcu_dereference(current->cgroups);
6549        list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
6550                struct cgroup *c = link->cgrp;
6551
6552                cgroup_name(c, name_buf, NAME_MAX + 1);
6553                seq_printf(seq, "Root %d group %s\n",
6554                           c->root->hierarchy_id, name_buf);
6555        }
6556        rcu_read_unlock();
6557        spin_unlock_irq(&css_set_lock);
6558        kfree(name_buf);
6559        return 0;
6560}
6561
6562#define MAX_TASKS_SHOWN_PER_CSS 25
6563static int cgroup_css_links_read(struct seq_file *seq, void *v)
6564{
6565        struct cgroup_subsys_state *css = seq_css(seq);
6566        struct cgrp_cset_link *link;
6567
6568        spin_lock_irq(&css_set_lock);
6569        list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
6570                struct css_set *cset = link->cset;
6571                struct task_struct *task;
6572                int count = 0;
6573
6574                seq_printf(seq, "css_set %p\n", cset);
6575
6576                list_for_each_entry(task, &cset->tasks, cg_list) {
6577                        if (count++ > MAX_TASKS_SHOWN_PER_CSS)
6578                                goto overflow;
6579                        seq_printf(seq, "  task %d\n", task_pid_vnr(task));
6580                }
6581
6582                list_for_each_entry(task, &cset->mg_tasks, cg_list) {
6583                        if (count++ > MAX_TASKS_SHOWN_PER_CSS)
6584                                goto overflow;
6585                        seq_printf(seq, "  task %d\n", task_pid_vnr(task));
6586                }
6587                continue;
6588        overflow:
6589                seq_puts(seq, "  ...\n");
6590        }
6591        spin_unlock_irq(&css_set_lock);
6592        return 0;
6593}
6594
6595static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
6596{
6597        return (!cgroup_is_populated(css->cgroup) &&
6598                !css_has_online_children(&css->cgroup->self));
6599}
6600
6601static struct cftype debug_files[] =  {
6602        {
6603                .name = "taskcount",
6604                .read_u64 = debug_taskcount_read,
6605        },
6606
6607        {
6608                .name = "current_css_set",
6609                .read_u64 = current_css_set_read,
6610        },
6611
6612        {
6613                .name = "current_css_set_refcount",
6614                .read_u64 = current_css_set_refcount_read,
6615        },
6616
6617        {
6618                .name = "current_css_set_cg_links",
6619                .seq_show = current_css_set_cg_links_read,
6620        },
6621
6622        {
6623                .name = "cgroup_css_links",
6624                .seq_show = cgroup_css_links_read,
6625        },
6626
6627        {
6628                .name = "releasable",
6629                .read_u64 = releasable_read,
6630        },
6631
6632        { }     /* terminate */
6633};
6634
6635struct cgroup_subsys debug_cgrp_subsys = {
6636        .css_alloc = debug_css_alloc,
6637        .css_free = debug_css_free,
6638        .legacy_cftypes = debug_files,
6639};
6640#endif /* CONFIG_CGROUP_DEBUG */
Note: See TracBrowser for help on using the repository browser.