1 /*
2  *  Generic process-grouping system.
3  *
4  *  Based originally on the cpuset system, extracted by Paul Menage
5  *  Copyright (C) 2006 Google, Inc
6  *
7  *  Notifications support
8  *  Copyright (C) 2009 Nokia Corporation
9  *  Author: Kirill A. Shutemov
10  *
11  *  Copyright notices from the original cpuset code:
12  *  --------------------------------------------------
13  *  Copyright (C) 2003 BULL SA.
14  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
15  *
16  *  Portions derived from Patrick Mochel's sysfs code.
17  *  sysfs is Copyright (c) 2001-3 Patrick Mochel
18  *
19  *  2003-10-10 Written by Simon Derr.
20  *  2003-10-22 Updates by Stephen Hemminger.
21  *  2004 May-July Rework by Paul Jackson.
22  *  ---------------------------------------------------
23  *
24  *  This file is subject to the terms and conditions of the GNU General Public
25  *  License.  See the file COPYING in the main directory of the Linux
26  *  distribution for more details.
27  */
28 
29 #include <linux/cgroup.h>
30 #include <linux/ctype.h>
31 #include <linux/errno.h>
32 #include <linux/fs.h>
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/mm.h>
36 #include <linux/mutex.h>
37 #include <linux/mount.h>
38 #include <linux/pagemap.h>
39 #include <linux/proc_fs.h>
40 #include <linux/rcupdate.h>
41 #include <linux/sched.h>
42 #include <linux/backing-dev.h>
43 #include <linux/seq_file.h>
44 #include <linux/slab.h>
45 #include <linux/magic.h>
46 #include <linux/spinlock.h>
47 #include <linux/string.h>
48 #include <linux/sort.h>
49 #include <linux/kmod.h>
50 #include <linux/module.h>
51 #include <linux/delayacct.h>
52 #include <linux/cgroupstats.h>
53 #include <linux/hash.h>
54 #include <linux/namei.h>
55 #include <linux/pid_namespace.h>
56 #include <linux/idr.h>
57 #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
58 #include <linux/eventfd.h>
59 #include <linux/poll.h>
60 
61 #include <asm/atomic.h>
62 
63 static DEFINE_MUTEX(cgroup_mutex);
64 
65 /*
66  * Generate an array of cgroup subsystem pointers. At boot time, this is
67  * populated up to CGROUP_BUILTIN_SUBSYS_COUNT, and modular subsystems are
68  * registered after that. The mutable section of this array is protected by
69  * cgroup_mutex.
70  */
71 #define SUBSYS(_x) &_x ## _subsys,
72 static struct cgroup_subsys *subsys[CGROUP_SUBSYS_COUNT] = {
73 #include <linux/cgroup_subsys.h>
74 };
75 
76 #define MAX_CGROUP_ROOT_NAMELEN 64
77 
78 /*
79  * A cgroupfs_root represents the root of a cgroup hierarchy,
80  * and may be associated with a superblock to form an active
81  * hierarchy
82  */
83 struct cgroupfs_root {
84 	struct super_block *sb;
85 
86 	/*
87 	 * The bitmask of subsystems intended to be attached to this
88 	 * hierarchy
89 	 */
90 	unsigned long subsys_bits;
91 
92 	/* Unique id for this hierarchy. */
93 	int hierarchy_id;
94 
95 	/* The bitmask of subsystems currently attached to this hierarchy */
96 	unsigned long actual_subsys_bits;
97 
98 	/* A list running through the attached subsystems */
99 	struct list_head subsys_list;
100 
101 	/* The root cgroup for this hierarchy */
102 	struct cgroup top_cgroup;
103 
104 	/* Tracks how many cgroups are currently defined in hierarchy.*/
105 	int number_of_cgroups;
106 
107 	/* A list running through the active hierarchies */
108 	struct list_head root_list;
109 
110 	/* Hierarchy-specific flags */
111 	unsigned long flags;
112 
113 	/* The path to use for release notifications. */
114 	char release_agent_path[PATH_MAX];
115 
116 	/* The name for this hierarchy - may be empty */
117 	char name[MAX_CGROUP_ROOT_NAMELEN];
118 };
119 
120 /*
121  * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
122  * subsystems that are otherwise unattached - it never has more than a
123  * single cgroup, and all tasks are part of that cgroup.
124  */
125 static struct cgroupfs_root rootnode;
126 
127 /*
128  * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
129  * cgroup_subsys->use_id != 0.
130  */
131 #define CSS_ID_MAX	(65535)
132 struct css_id {
133 	/*
134 	 * The css to which this ID points. This pointer is set to valid value
135 	 * after cgroup is populated. If cgroup is removed, this will be NULL.
136 	 * This pointer is expected to be RCU-safe because destroy()
137 	 * is called after synchronize_rcu(). But for safe use, css_is_removed()
138 	 * css_tryget() should be used for avoiding race.
139 	 */
140 	struct cgroup_subsys_state __rcu *css;
141 	/*
142 	 * ID of this css.
143 	 */
144 	unsigned short id;
145 	/*
146 	 * Depth in hierarchy which this ID belongs to.
147 	 */
148 	unsigned short depth;
149 	/*
150 	 * ID is freed by RCU. (and lookup routine is RCU safe.)
151 	 */
152 	struct rcu_head rcu_head;
153 	/*
154 	 * Hierarchy of CSS ID belongs to.
155 	 */
156 	unsigned short stack[0]; /* Array of Length (depth+1) */
157 };
158 
159 /*
160  * cgroup_event represents events which userspace want to receive.
161  */
162 struct cgroup_event {
163 	/*
164 	 * Cgroup which the event belongs to.
165 	 */
166 	struct cgroup *cgrp;
167 	/*
168 	 * Control file which the event associated.
169 	 */
170 	struct cftype *cft;
171 	/*
172 	 * eventfd to signal userspace about the event.
173 	 */
174 	struct eventfd_ctx *eventfd;
175 	/*
176 	 * Each of these stored in a list by the cgroup.
177 	 */
178 	struct list_head list;
179 	/*
180 	 * All fields below needed to unregister event when
181 	 * userspace closes eventfd.
182 	 */
183 	poll_table pt;
184 	wait_queue_head_t *wqh;
185 	wait_queue_t wait;
186 	struct work_struct remove;
187 };
188 
189 /* The list of hierarchy roots */
190 
191 static LIST_HEAD(roots);
192 static int root_count;
193 
194 static DEFINE_IDA(hierarchy_ida);
195 static int next_hierarchy_id;
196 static DEFINE_SPINLOCK(hierarchy_id_lock);
197 
198 /* dummytop is a shorthand for the dummy hierarchy's top cgroup */
199 #define dummytop (&rootnode.top_cgroup)
200 
201 /* This flag indicates whether tasks in the fork and exit paths should
202  * check for fork/exit handlers to call. This avoids us having to do
203  * extra work in the fork/exit path if none of the subsystems need to
204  * be called.
205  */
206 static int need_forkexit_callback __read_mostly;
207 
208 #ifdef CONFIG_PROVE_LOCKING
cgroup_lock_is_held(void)209 int cgroup_lock_is_held(void)
210 {
211 	return lockdep_is_held(&cgroup_mutex);
212 }
213 #else /* #ifdef CONFIG_PROVE_LOCKING */
cgroup_lock_is_held(void)214 int cgroup_lock_is_held(void)
215 {
216 	return mutex_is_locked(&cgroup_mutex);
217 }
218 #endif /* #else #ifdef CONFIG_PROVE_LOCKING */
219 
220 EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
221 
222 /* convenient tests for these bits */
cgroup_is_removed(const struct cgroup * cgrp)223 inline int cgroup_is_removed(const struct cgroup *cgrp)
224 {
225 	return test_bit(CGRP_REMOVED, &cgrp->flags);
226 }
227 
228 /* bits in struct cgroupfs_root flags field */
229 enum {
230 	ROOT_NOPREFIX, /* mounted subsystems have no named prefix */
231 };
232 
cgroup_is_releasable(const struct cgroup * cgrp)233 static int cgroup_is_releasable(const struct cgroup *cgrp)
234 {
235 	const int bits =
236 		(1 << CGRP_RELEASABLE) |
237 		(1 << CGRP_NOTIFY_ON_RELEASE);
238 	return (cgrp->flags & bits) == bits;
239 }
240 
notify_on_release(const struct cgroup * cgrp)241 static int notify_on_release(const struct cgroup *cgrp)
242 {
243 	return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
244 }
245 
clone_children(const struct cgroup * cgrp)246 static int clone_children(const struct cgroup *cgrp)
247 {
248 	return test_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
249 }
250 
251 /*
252  * for_each_subsys() allows you to iterate on each subsystem attached to
253  * an active hierarchy
254  */
255 #define for_each_subsys(_root, _ss) \
256 list_for_each_entry(_ss, &_root->subsys_list, sibling)
257 
258 /* for_each_active_root() allows you to iterate across the active hierarchies */
259 #define for_each_active_root(_root) \
260 list_for_each_entry(_root, &roots, root_list)
261 
262 /* the list of cgroups eligible for automatic release. Protected by
263  * release_list_lock */
264 static LIST_HEAD(release_list);
265 static DEFINE_SPINLOCK(release_list_lock);
266 static void cgroup_release_agent(struct work_struct *work);
267 static DECLARE_WORK(release_agent_work, cgroup_release_agent);
268 static void check_for_release(struct cgroup *cgrp);
269 
270 /* Link structure for associating css_set objects with cgroups */
271 struct cg_cgroup_link {
272 	/*
273 	 * List running through cg_cgroup_links associated with a
274 	 * cgroup, anchored on cgroup->css_sets
275 	 */
276 	struct list_head cgrp_link_list;
277 	struct cgroup *cgrp;
278 	/*
279 	 * List running through cg_cgroup_links pointing at a
280 	 * single css_set object, anchored on css_set->cg_links
281 	 */
282 	struct list_head cg_link_list;
283 	struct css_set *cg;
284 };
285 
286 /* The default css_set - used by init and its children prior to any
287  * hierarchies being mounted. It contains a pointer to the root state
288  * for each subsystem. Also used to anchor the list of css_sets. Not
289  * reference-counted, to improve performance when child cgroups
290  * haven't been created.
291  */
292 
293 static struct css_set init_css_set;
294 static struct cg_cgroup_link init_css_set_link;
295 
296 static int cgroup_init_idr(struct cgroup_subsys *ss,
297 			   struct cgroup_subsys_state *css);
298 
299 /* css_set_lock protects the list of css_set objects, and the
300  * chain of tasks off each css_set.  Nests outside task->alloc_lock
301  * due to cgroup_iter_start() */
302 static DEFINE_RWLOCK(css_set_lock);
303 static int css_set_count;
304 
305 /*
306  * hash table for cgroup groups. This improves the performance to find
307  * an existing css_set. This hash doesn't (currently) take into
308  * account cgroups in empty hierarchies.
309  */
310 #define CSS_SET_HASH_BITS	7
311 #define CSS_SET_TABLE_SIZE	(1 << CSS_SET_HASH_BITS)
312 static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
313 
css_set_hash(struct cgroup_subsys_state * css[])314 static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
315 {
316 	int i;
317 	int index;
318 	unsigned long tmp = 0UL;
319 
320 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
321 		tmp += (unsigned long)css[i];
322 	tmp = (tmp >> 16) ^ tmp;
323 
324 	index = hash_long(tmp, CSS_SET_HASH_BITS);
325 
326 	return &css_set_table[index];
327 }
328 
free_css_set_rcu(struct rcu_head * obj)329 static void free_css_set_rcu(struct rcu_head *obj)
330 {
331 	struct css_set *cg = container_of(obj, struct css_set, rcu_head);
332 	kfree(cg);
333 }
334 
335 /* We don't maintain the lists running through each css_set to its
336  * task until after the first call to cgroup_iter_start(). This
337  * reduces the fork()/exit() overhead for people who have cgroups
338  * compiled into their kernel but not actually in use */
339 static int use_task_css_set_links __read_mostly;
340 
__put_css_set(struct css_set * cg,int taskexit)341 static void __put_css_set(struct css_set *cg, int taskexit)
342 {
343 	struct cg_cgroup_link *link;
344 	struct cg_cgroup_link *saved_link;
345 	/*
346 	 * Ensure that the refcount doesn't hit zero while any readers
347 	 * can see it. Similar to atomic_dec_and_lock(), but for an
348 	 * rwlock
349 	 */
350 	if (atomic_add_unless(&cg->refcount, -1, 1))
351 		return;
352 	write_lock(&css_set_lock);
353 	if (!atomic_dec_and_test(&cg->refcount)) {
354 		write_unlock(&css_set_lock);
355 		return;
356 	}
357 
358 	/* This css_set is dead. unlink it and release cgroup refcounts */
359 	hlist_del(&cg->hlist);
360 	css_set_count--;
361 
362 	list_for_each_entry_safe(link, saved_link, &cg->cg_links,
363 				 cg_link_list) {
364 		struct cgroup *cgrp = link->cgrp;
365 		list_del(&link->cg_link_list);
366 		list_del(&link->cgrp_link_list);
367 		if (atomic_dec_and_test(&cgrp->count) &&
368 		    notify_on_release(cgrp)) {
369 			if (taskexit)
370 				set_bit(CGRP_RELEASABLE, &cgrp->flags);
371 			check_for_release(cgrp);
372 		}
373 
374 		kfree(link);
375 	}
376 
377 	write_unlock(&css_set_lock);
378 	call_rcu(&cg->rcu_head, free_css_set_rcu);
379 }
380 
381 /*
382  * refcounted get/put for css_set objects
383  */
get_css_set(struct css_set * cg)384 static inline void get_css_set(struct css_set *cg)
385 {
386 	atomic_inc(&cg->refcount);
387 }
388 
put_css_set(struct css_set * cg)389 static inline void put_css_set(struct css_set *cg)
390 {
391 	__put_css_set(cg, 0);
392 }
393 
put_css_set_taskexit(struct css_set * cg)394 static inline void put_css_set_taskexit(struct css_set *cg)
395 {
396 	__put_css_set(cg, 1);
397 }
398 
399 /*
400  * compare_css_sets - helper function for find_existing_css_set().
401  * @cg: candidate css_set being tested
402  * @old_cg: existing css_set for a task
403  * @new_cgrp: cgroup that's being entered by the task
404  * @template: desired set of css pointers in css_set (pre-calculated)
405  *
406  * Returns true if "cg" matches "old_cg" except for the hierarchy
407  * which "new_cgrp" belongs to, for which it should match "new_cgrp".
408  */
compare_css_sets(struct css_set * cg,struct css_set * old_cg,struct cgroup * new_cgrp,struct cgroup_subsys_state * template[])409 static bool compare_css_sets(struct css_set *cg,
410 			     struct css_set *old_cg,
411 			     struct cgroup *new_cgrp,
412 			     struct cgroup_subsys_state *template[])
413 {
414 	struct list_head *l1, *l2;
415 
416 	if (memcmp(template, cg->subsys, sizeof(cg->subsys))) {
417 		/* Not all subsystems matched */
418 		return false;
419 	}
420 
421 	/*
422 	 * Compare cgroup pointers in order to distinguish between
423 	 * different cgroups in heirarchies with no subsystems. We
424 	 * could get by with just this check alone (and skip the
425 	 * memcmp above) but on most setups the memcmp check will
426 	 * avoid the need for this more expensive check on almost all
427 	 * candidates.
428 	 */
429 
430 	l1 = &cg->cg_links;
431 	l2 = &old_cg->cg_links;
432 	while (1) {
433 		struct cg_cgroup_link *cgl1, *cgl2;
434 		struct cgroup *cg1, *cg2;
435 
436 		l1 = l1->next;
437 		l2 = l2->next;
438 		/* See if we reached the end - both lists are equal length. */
439 		if (l1 == &cg->cg_links) {
440 			BUG_ON(l2 != &old_cg->cg_links);
441 			break;
442 		} else {
443 			BUG_ON(l2 == &old_cg->cg_links);
444 		}
445 		/* Locate the cgroups associated with these links. */
446 		cgl1 = list_entry(l1, struct cg_cgroup_link, cg_link_list);
447 		cgl2 = list_entry(l2, struct cg_cgroup_link, cg_link_list);
448 		cg1 = cgl1->cgrp;
449 		cg2 = cgl2->cgrp;
450 		/* Hierarchies should be linked in the same order. */
451 		BUG_ON(cg1->root != cg2->root);
452 
453 		/*
454 		 * If this hierarchy is the hierarchy of the cgroup
455 		 * that's changing, then we need to check that this
456 		 * css_set points to the new cgroup; if it's any other
457 		 * hierarchy, then this css_set should point to the
458 		 * same cgroup as the old css_set.
459 		 */
460 		if (cg1->root == new_cgrp->root) {
461 			if (cg1 != new_cgrp)
462 				return false;
463 		} else {
464 			if (cg1 != cg2)
465 				return false;
466 		}
467 	}
468 	return true;
469 }
470 
471 /*
472  * find_existing_css_set() is a helper for
473  * find_css_set(), and checks to see whether an existing
474  * css_set is suitable.
475  *
476  * oldcg: the cgroup group that we're using before the cgroup
477  * transition
478  *
479  * cgrp: the cgroup that we're moving into
480  *
481  * template: location in which to build the desired set of subsystem
482  * state objects for the new cgroup group
483  */
find_existing_css_set(struct css_set * oldcg,struct cgroup * cgrp,struct cgroup_subsys_state * template[])484 static struct css_set *find_existing_css_set(
485 	struct css_set *oldcg,
486 	struct cgroup *cgrp,
487 	struct cgroup_subsys_state *template[])
488 {
489 	int i;
490 	struct cgroupfs_root *root = cgrp->root;
491 	struct hlist_head *hhead;
492 	struct hlist_node *node;
493 	struct css_set *cg;
494 
495 	/*
496 	 * Build the set of subsystem state objects that we want to see in the
497 	 * new css_set. while subsystems can change globally, the entries here
498 	 * won't change, so no need for locking.
499 	 */
500 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
501 		if (root->subsys_bits & (1UL << i)) {
502 			/* Subsystem is in this hierarchy. So we want
503 			 * the subsystem state from the new
504 			 * cgroup */
505 			template[i] = cgrp->subsys[i];
506 		} else {
507 			/* Subsystem is not in this hierarchy, so we
508 			 * don't want to change the subsystem state */
509 			template[i] = oldcg->subsys[i];
510 		}
511 	}
512 
513 	hhead = css_set_hash(template);
514 	hlist_for_each_entry(cg, node, hhead, hlist) {
515 		if (!compare_css_sets(cg, oldcg, cgrp, template))
516 			continue;
517 
518 		/* This css_set matches what we need */
519 		return cg;
520 	}
521 
522 	/* No existing cgroup group matched */
523 	return NULL;
524 }
525 
free_cg_links(struct list_head * tmp)526 static void free_cg_links(struct list_head *tmp)
527 {
528 	struct cg_cgroup_link *link;
529 	struct cg_cgroup_link *saved_link;
530 
531 	list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
532 		list_del(&link->cgrp_link_list);
533 		kfree(link);
534 	}
535 }
536 
537 /*
538  * allocate_cg_links() allocates "count" cg_cgroup_link structures
539  * and chains them on tmp through their cgrp_link_list fields. Returns 0 on
540  * success or a negative error
541  */
allocate_cg_links(int count,struct list_head * tmp)542 static int allocate_cg_links(int count, struct list_head *tmp)
543 {
544 	struct cg_cgroup_link *link;
545 	int i;
546 	INIT_LIST_HEAD(tmp);
547 	for (i = 0; i < count; i++) {
548 		link = kmalloc(sizeof(*link), GFP_KERNEL);
549 		if (!link) {
550 			free_cg_links(tmp);
551 			return -ENOMEM;
552 		}
553 		list_add(&link->cgrp_link_list, tmp);
554 	}
555 	return 0;
556 }
557 
558 /**
559  * link_css_set - a helper function to link a css_set to a cgroup
560  * @tmp_cg_links: cg_cgroup_link objects allocated by allocate_cg_links()
561  * @cg: the css_set to be linked
562  * @cgrp: the destination cgroup
563  */
link_css_set(struct list_head * tmp_cg_links,struct css_set * cg,struct cgroup * cgrp)564 static void link_css_set(struct list_head *tmp_cg_links,
565 			 struct css_set *cg, struct cgroup *cgrp)
566 {
567 	struct cg_cgroup_link *link;
568 
569 	BUG_ON(list_empty(tmp_cg_links));
570 	link = list_first_entry(tmp_cg_links, struct cg_cgroup_link,
571 				cgrp_link_list);
572 	link->cg = cg;
573 	link->cgrp = cgrp;
574 	atomic_inc(&cgrp->count);
575 	list_move(&link->cgrp_link_list, &cgrp->css_sets);
576 	/*
577 	 * Always add links to the tail of the list so that the list
578 	 * is sorted by order of hierarchy creation
579 	 */
580 	list_add_tail(&link->cg_link_list, &cg->cg_links);
581 }
582 
583 /*
584  * find_css_set() takes an existing cgroup group and a
585  * cgroup object, and returns a css_set object that's
586  * equivalent to the old group, but with the given cgroup
587  * substituted into the appropriate hierarchy. Must be called with
588  * cgroup_mutex held
589  */
find_css_set(struct css_set * oldcg,struct cgroup * cgrp)590 static struct css_set *find_css_set(
591 	struct css_set *oldcg, struct cgroup *cgrp)
592 {
593 	struct css_set *res;
594 	struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT];
595 
596 	struct list_head tmp_cg_links;
597 
598 	struct hlist_head *hhead;
599 	struct cg_cgroup_link *link;
600 
601 	/* First see if we already have a cgroup group that matches
602 	 * the desired set */
603 	read_lock(&css_set_lock);
604 	res = find_existing_css_set(oldcg, cgrp, template);
605 	if (res)
606 		get_css_set(res);
607 	read_unlock(&css_set_lock);
608 
609 	if (res)
610 		return res;
611 
612 	res = kmalloc(sizeof(*res), GFP_KERNEL);
613 	if (!res)
614 		return NULL;
615 
616 	/* Allocate all the cg_cgroup_link objects that we'll need */
617 	if (allocate_cg_links(root_count, &tmp_cg_links) < 0) {
618 		kfree(res);
619 		return NULL;
620 	}
621 
622 	atomic_set(&res->refcount, 1);
623 	INIT_LIST_HEAD(&res->cg_links);
624 	INIT_LIST_HEAD(&res->tasks);
625 	INIT_HLIST_NODE(&res->hlist);
626 
627 	/* Copy the set of subsystem state objects generated in
628 	 * find_existing_css_set() */
629 	memcpy(res->subsys, template, sizeof(res->subsys));
630 
631 	write_lock(&css_set_lock);
632 	/* Add reference counts and links from the new css_set. */
633 	list_for_each_entry(link, &oldcg->cg_links, cg_link_list) {
634 		struct cgroup *c = link->cgrp;
635 		if (c->root == cgrp->root)
636 			c = cgrp;
637 		link_css_set(&tmp_cg_links, res, c);
638 	}
639 
640 	BUG_ON(!list_empty(&tmp_cg_links));
641 
642 	css_set_count++;
643 
644 	/* Add this cgroup group to the hash table */
645 	hhead = css_set_hash(res->subsys);
646 	hlist_add_head(&res->hlist, hhead);
647 
648 	write_unlock(&css_set_lock);
649 
650 	return res;
651 }
652 
653 /*
654  * Return the cgroup for "task" from the given hierarchy. Must be
655  * called with cgroup_mutex held.
656  */
task_cgroup_from_root(struct task_struct * task,struct cgroupfs_root * root)657 static struct cgroup *task_cgroup_from_root(struct task_struct *task,
658 					    struct cgroupfs_root *root)
659 {
660 	struct css_set *css;
661 	struct cgroup *res = NULL;
662 
663 	BUG_ON(!mutex_is_locked(&cgroup_mutex));
664 	read_lock(&css_set_lock);
665 	/*
666 	 * No need to lock the task - since we hold cgroup_mutex the
667 	 * task can't change groups, so the only thing that can happen
668 	 * is that it exits and its css is set back to init_css_set.
669 	 */
670 	css = task->cgroups;
671 	if (css == &init_css_set) {
672 		res = &root->top_cgroup;
673 	} else {
674 		struct cg_cgroup_link *link;
675 		list_for_each_entry(link, &css->cg_links, cg_link_list) {
676 			struct cgroup *c = link->cgrp;
677 			if (c->root == root) {
678 				res = c;
679 				break;
680 			}
681 		}
682 	}
683 	read_unlock(&css_set_lock);
684 	BUG_ON(!res);
685 	return res;
686 }
687 
688 /*
689  * There is one global cgroup mutex. We also require taking
690  * task_lock() when dereferencing a task's cgroup subsys pointers.
691  * See "The task_lock() exception", at the end of this comment.
692  *
693  * A task must hold cgroup_mutex to modify cgroups.
694  *
695  * Any task can increment and decrement the count field without lock.
696  * So in general, code holding cgroup_mutex can't rely on the count
697  * field not changing.  However, if the count goes to zero, then only
698  * cgroup_attach_task() can increment it again.  Because a count of zero
699  * means that no tasks are currently attached, therefore there is no
700  * way a task attached to that cgroup can fork (the other way to
701  * increment the count).  So code holding cgroup_mutex can safely
702  * assume that if the count is zero, it will stay zero. Similarly, if
703  * a task holds cgroup_mutex on a cgroup with zero count, it
704  * knows that the cgroup won't be removed, as cgroup_rmdir()
705  * needs that mutex.
706  *
707  * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
708  * (usually) take cgroup_mutex.  These are the two most performance
709  * critical pieces of code here.  The exception occurs on cgroup_exit(),
710  * when a task in a notify_on_release cgroup exits.  Then cgroup_mutex
711  * is taken, and if the cgroup count is zero, a usermode call made
712  * to the release agent with the name of the cgroup (path relative to
713  * the root of cgroup file system) as the argument.
714  *
715  * A cgroup can only be deleted if both its 'count' of using tasks
716  * is zero, and its list of 'children' cgroups is empty.  Since all
717  * tasks in the system use _some_ cgroup, and since there is always at
718  * least one task in the system (init, pid == 1), therefore, top_cgroup
719  * always has either children cgroups and/or using tasks.  So we don't
720  * need a special hack to ensure that top_cgroup cannot be deleted.
721  *
722  *	The task_lock() exception
723  *
724  * The need for this exception arises from the action of
725  * cgroup_attach_task(), which overwrites one tasks cgroup pointer with
726  * another.  It does so using cgroup_mutex, however there are
727  * several performance critical places that need to reference
728  * task->cgroup without the expense of grabbing a system global
729  * mutex.  Therefore except as noted below, when dereferencing or, as
730  * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use
731  * task_lock(), which acts on a spinlock (task->alloc_lock) already in
732  * the task_struct routinely used for such matters.
733  *
734  * P.S.  One more locking exception.  RCU is used to guard the
735  * update of a tasks cgroup pointer by cgroup_attach_task()
736  */
737 
738 /**
739  * cgroup_lock - lock out any changes to cgroup structures
740  *
741  */
cgroup_lock(void)742 void cgroup_lock(void)
743 {
744 	mutex_lock(&cgroup_mutex);
745 }
746 EXPORT_SYMBOL_GPL(cgroup_lock);
747 
748 /**
749  * cgroup_unlock - release lock on cgroup changes
750  *
751  * Undo the lock taken in a previous cgroup_lock() call.
752  */
cgroup_unlock(void)753 void cgroup_unlock(void)
754 {
755 	mutex_unlock(&cgroup_mutex);
756 }
757 EXPORT_SYMBOL_GPL(cgroup_unlock);
758 
759 /*
760  * A couple of forward declarations required, due to cyclic reference loop:
761  * cgroup_mkdir -> cgroup_create -> cgroup_populate_dir ->
762  * cgroup_add_file -> cgroup_create_file -> cgroup_dir_inode_operations
763  * -> cgroup_mkdir.
764  */
765 
766 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode);
767 static struct dentry *cgroup_lookup(struct inode *, struct dentry *, struct nameidata *);
768 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
769 static int cgroup_populate_dir(struct cgroup *cgrp);
770 static const struct inode_operations cgroup_dir_inode_operations;
771 static const struct file_operations proc_cgroupstats_operations;
772 
773 static struct backing_dev_info cgroup_backing_dev_info = {
774 	.name		= "cgroup",
775 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
776 };
777 
778 static int alloc_css_id(struct cgroup_subsys *ss,
779 			struct cgroup *parent, struct cgroup *child);
780 
cgroup_new_inode(mode_t mode,struct super_block * sb)781 static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
782 {
783 	struct inode *inode = new_inode(sb);
784 
785 	if (inode) {
786 		inode->i_ino = get_next_ino();
787 		inode->i_mode = mode;
788 		inode->i_uid = current_fsuid();
789 		inode->i_gid = current_fsgid();
790 		inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
791 		inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info;
792 	}
793 	return inode;
794 }
795 
796 /*
797  * Call subsys's pre_destroy handler.
798  * This is called before css refcnt check.
799  */
cgroup_call_pre_destroy(struct cgroup * cgrp)800 static int cgroup_call_pre_destroy(struct cgroup *cgrp)
801 {
802 	struct cgroup_subsys *ss;
803 	int ret = 0;
804 
805 	for_each_subsys(cgrp->root, ss)
806 		if (ss->pre_destroy) {
807 			ret = ss->pre_destroy(ss, cgrp);
808 			if (ret)
809 				break;
810 		}
811 
812 	return ret;
813 }
814 
free_cgroup_rcu(struct rcu_head * obj)815 static void free_cgroup_rcu(struct rcu_head *obj)
816 {
817 	struct cgroup *cgrp = container_of(obj, struct cgroup, rcu_head);
818 
819 	kfree(cgrp);
820 }
821 
cgroup_diput(struct dentry * dentry,struct inode * inode)822 static void cgroup_diput(struct dentry *dentry, struct inode *inode)
823 {
824 	/* is dentry a directory ? if so, kfree() associated cgroup */
825 	if (S_ISDIR(inode->i_mode)) {
826 		struct cgroup *cgrp = dentry->d_fsdata;
827 		struct cgroup_subsys *ss;
828 		BUG_ON(!(cgroup_is_removed(cgrp)));
829 		/* It's possible for external users to be holding css
830 		 * reference counts on a cgroup; css_put() needs to
831 		 * be able to access the cgroup after decrementing
832 		 * the reference count in order to know if it needs to
833 		 * queue the cgroup to be handled by the release
834 		 * agent */
835 		synchronize_rcu();
836 
837 		mutex_lock(&cgroup_mutex);
838 		/*
839 		 * Release the subsystem state objects.
840 		 */
841 		for_each_subsys(cgrp->root, ss)
842 			ss->destroy(ss, cgrp);
843 
844 		cgrp->root->number_of_cgroups--;
845 		mutex_unlock(&cgroup_mutex);
846 
847 		/*
848 		 * Drop the active superblock reference that we took when we
849 		 * created the cgroup
850 		 */
851 		deactivate_super(cgrp->root->sb);
852 
853 		/*
854 		 * if we're getting rid of the cgroup, refcount should ensure
855 		 * that there are no pidlists left.
856 		 */
857 		BUG_ON(!list_empty(&cgrp->pidlists));
858 
859 		call_rcu(&cgrp->rcu_head, free_cgroup_rcu);
860 	}
861 	iput(inode);
862 }
863 
cgroup_delete(const struct dentry * d)864 static int cgroup_delete(const struct dentry *d)
865 {
866 	return 1;
867 }
868 
remove_dir(struct dentry * d)869 static void remove_dir(struct dentry *d)
870 {
871 	struct dentry *parent = dget(d->d_parent);
872 
873 	d_delete(d);
874 	simple_rmdir(parent->d_inode, d);
875 	dput(parent);
876 }
877 
cgroup_clear_directory(struct dentry * dentry)878 static void cgroup_clear_directory(struct dentry *dentry)
879 {
880 	struct list_head *node;
881 
882 	BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
883 	spin_lock(&dentry->d_lock);
884 	node = dentry->d_subdirs.next;
885 	while (node != &dentry->d_subdirs) {
886 		struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
887 
888 		spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
889 		list_del_init(node);
890 		if (d->d_inode) {
891 			/* This should never be called on a cgroup
892 			 * directory with child cgroups */
893 			BUG_ON(d->d_inode->i_mode & S_IFDIR);
894 			dget_dlock(d);
895 			spin_unlock(&d->d_lock);
896 			spin_unlock(&dentry->d_lock);
897 			d_delete(d);
898 			simple_unlink(dentry->d_inode, d);
899 			dput(d);
900 			spin_lock(&dentry->d_lock);
901 		} else
902 			spin_unlock(&d->d_lock);
903 		node = dentry->d_subdirs.next;
904 	}
905 	spin_unlock(&dentry->d_lock);
906 }
907 
908 /*
909  * NOTE : the dentry must have been dget()'ed
910  */
cgroup_d_remove_dir(struct dentry * dentry)911 static void cgroup_d_remove_dir(struct dentry *dentry)
912 {
913 	struct dentry *parent;
914 
915 	cgroup_clear_directory(dentry);
916 
917 	parent = dentry->d_parent;
918 	spin_lock(&parent->d_lock);
919 	spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
920 	list_del_init(&dentry->d_u.d_child);
921 	spin_unlock(&dentry->d_lock);
922 	spin_unlock(&parent->d_lock);
923 	remove_dir(dentry);
924 }
925 
926 /*
927  * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
928  * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
929  * reference to css->refcnt. In general, this refcnt is expected to goes down
930  * to zero, soon.
931  *
932  * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
933  */
934 DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
935 
cgroup_wakeup_rmdir_waiter(struct cgroup * cgrp)936 static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
937 {
938 	if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
939 		wake_up_all(&cgroup_rmdir_waitq);
940 }
941 
cgroup_exclude_rmdir(struct cgroup_subsys_state * css)942 void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
943 {
944 	css_get(css);
945 }
946 
cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state * css)947 void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
948 {
949 	cgroup_wakeup_rmdir_waiter(css->cgroup);
950 	css_put(css);
951 }
952 
953 /*
954  * Call with cgroup_mutex held. Drops reference counts on modules, including
955  * any duplicate ones that parse_cgroupfs_options took. If this function
956  * returns an error, no reference counts are touched.
957  */
rebind_subsystems(struct cgroupfs_root * root,unsigned long final_bits)958 static int rebind_subsystems(struct cgroupfs_root *root,
959 			      unsigned long final_bits)
960 {
961 	unsigned long added_bits, removed_bits;
962 	struct cgroup *cgrp = &root->top_cgroup;
963 	int i;
964 
965 	BUG_ON(!mutex_is_locked(&cgroup_mutex));
966 
967 	removed_bits = root->actual_subsys_bits & ~final_bits;
968 	added_bits = final_bits & ~root->actual_subsys_bits;
969 	/* Check that any added subsystems are currently free */
970 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
971 		unsigned long bit = 1UL << i;
972 		struct cgroup_subsys *ss = subsys[i];
973 		if (!(bit & added_bits))
974 			continue;
975 		/*
976 		 * Nobody should tell us to do a subsys that doesn't exist:
977 		 * parse_cgroupfs_options should catch that case and refcounts
978 		 * ensure that subsystems won't disappear once selected.
979 		 */
980 		BUG_ON(ss == NULL);
981 		if (ss->root != &rootnode) {
982 			/* Subsystem isn't free */
983 			return -EBUSY;
984 		}
985 	}
986 
987 	/* Currently we don't handle adding/removing subsystems when
988 	 * any child cgroups exist. This is theoretically supportable
989 	 * but involves complex error handling, so it's being left until
990 	 * later */
991 	if (root->number_of_cgroups > 1)
992 		return -EBUSY;
993 
994 	/* Process each subsystem */
995 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
996 		struct cgroup_subsys *ss = subsys[i];
997 		unsigned long bit = 1UL << i;
998 		if (bit & added_bits) {
999 			/* We're binding this subsystem to this hierarchy */
1000 			BUG_ON(ss == NULL);
1001 			BUG_ON(cgrp->subsys[i]);
1002 			BUG_ON(!dummytop->subsys[i]);
1003 			BUG_ON(dummytop->subsys[i]->cgroup != dummytop);
1004 			mutex_lock(&ss->hierarchy_mutex);
1005 			cgrp->subsys[i] = dummytop->subsys[i];
1006 			cgrp->subsys[i]->cgroup = cgrp;
1007 			list_move(&ss->sibling, &root->subsys_list);
1008 			ss->root = root;
1009 			if (ss->bind)
1010 				ss->bind(ss, cgrp);
1011 			mutex_unlock(&ss->hierarchy_mutex);
1012 			/* refcount was already taken, and we're keeping it */
1013 		} else if (bit & removed_bits) {
1014 			/* We're removing this subsystem */
1015 			BUG_ON(ss == NULL);
1016 			BUG_ON(cgrp->subsys[i] != dummytop->subsys[i]);
1017 			BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
1018 			mutex_lock(&ss->hierarchy_mutex);
1019 			if (ss->bind)
1020 				ss->bind(ss, dummytop);
1021 			dummytop->subsys[i]->cgroup = dummytop;
1022 			cgrp->subsys[i] = NULL;
1023 			subsys[i]->root = &rootnode;
1024 			list_move(&ss->sibling, &rootnode.subsys_list);
1025 			mutex_unlock(&ss->hierarchy_mutex);
1026 			/* subsystem is now free - drop reference on module */
1027 			module_put(ss->module);
1028 		} else if (bit & final_bits) {
1029 			/* Subsystem state should already exist */
1030 			BUG_ON(ss == NULL);
1031 			BUG_ON(!cgrp->subsys[i]);
1032 			/*
1033 			 * a refcount was taken, but we already had one, so
1034 			 * drop the extra reference.
1035 			 */
1036 			module_put(ss->module);
1037 #ifdef CONFIG_MODULE_UNLOAD
1038 			BUG_ON(ss->module && !module_refcount(ss->module));
1039 #endif
1040 		} else {
1041 			/* Subsystem state shouldn't exist */
1042 			BUG_ON(cgrp->subsys[i]);
1043 		}
1044 	}
1045 	root->subsys_bits = root->actual_subsys_bits = final_bits;
1046 	synchronize_rcu();
1047 
1048 	return 0;
1049 }
1050 
cgroup_show_options(struct seq_file * seq,struct vfsmount * vfs)1051 static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)
1052 {
1053 	struct cgroupfs_root *root = vfs->mnt_sb->s_fs_info;
1054 	struct cgroup_subsys *ss;
1055 
1056 	mutex_lock(&cgroup_mutex);
1057 	for_each_subsys(root, ss)
1058 		seq_printf(seq, ",%s", ss->name);
1059 	if (test_bit(ROOT_NOPREFIX, &root->flags))
1060 		seq_puts(seq, ",noprefix");
1061 	if (strlen(root->release_agent_path))
1062 		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
1063 	if (clone_children(&root->top_cgroup))
1064 		seq_puts(seq, ",clone_children");
1065 	if (strlen(root->name))
1066 		seq_printf(seq, ",name=%s", root->name);
1067 	mutex_unlock(&cgroup_mutex);
1068 	return 0;
1069 }
1070 
1071 struct cgroup_sb_opts {
1072 	unsigned long subsys_bits;
1073 	unsigned long flags;
1074 	char *release_agent;
1075 	bool clone_children;
1076 	char *name;
1077 	/* User explicitly requested empty subsystem */
1078 	bool none;
1079 
1080 	struct cgroupfs_root *new_root;
1081 
1082 };
1083 
1084 /*
1085  * Convert a hierarchy specifier into a bitmask of subsystems and flags. Call
1086  * with cgroup_mutex held to protect the subsys[] array. This function takes
1087  * refcounts on subsystems to be used, unless it returns error, in which case
1088  * no refcounts are taken.
1089  */
parse_cgroupfs_options(char * data,struct cgroup_sb_opts * opts)1090 static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
1091 {
1092 	char *token, *o = data;
1093 	bool all_ss = false, one_ss = false;
1094 	unsigned long mask = (unsigned long)-1;
1095 	int i;
1096 	bool module_pin_failed = false;
1097 
1098 	BUG_ON(!mutex_is_locked(&cgroup_mutex));
1099 
1100 #ifdef CONFIG_CPUSETS
1101 	mask = ~(1UL << cpuset_subsys_id);
1102 #endif
1103 
1104 	memset(opts, 0, sizeof(*opts));
1105 
1106 	while ((token = strsep(&o, ",")) != NULL) {
1107 		if (!*token)
1108 			return -EINVAL;
1109 		if (!strcmp(token, "none")) {
1110 			/* Explicitly have no subsystems */
1111 			opts->none = true;
1112 			continue;
1113 		}
1114 		if (!strcmp(token, "all")) {
1115 			/* Mutually exclusive option 'all' + subsystem name */
1116 			if (one_ss)
1117 				return -EINVAL;
1118 			all_ss = true;
1119 			continue;
1120 		}
1121 		if (!strcmp(token, "noprefix")) {
1122 			set_bit(ROOT_NOPREFIX, &opts->flags);
1123 			continue;
1124 		}
1125 		if (!strcmp(token, "clone_children")) {
1126 			opts->clone_children = true;
1127 			continue;
1128 		}
1129 		if (!strncmp(token, "release_agent=", 14)) {
1130 			/* Specifying two release agents is forbidden */
1131 			if (opts->release_agent)
1132 				return -EINVAL;
1133 			opts->release_agent =
1134 				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
1135 			if (!opts->release_agent)
1136 				return -ENOMEM;
1137 			continue;
1138 		}
1139 		if (!strncmp(token, "name=", 5)) {
1140 			const char *name = token + 5;
1141 			/* Can't specify an empty name */
1142 			if (!strlen(name))
1143 				return -EINVAL;
1144 			/* Must match [\w.-]+ */
1145 			for (i = 0; i < strlen(name); i++) {
1146 				char c = name[i];
1147 				if (isalnum(c))
1148 					continue;
1149 				if ((c == '.') || (c == '-') || (c == '_'))
1150 					continue;
1151 				return -EINVAL;
1152 			}
1153 			/* Specifying two names is forbidden */
1154 			if (opts->name)
1155 				return -EINVAL;
1156 			opts->name = kstrndup(name,
1157 					      MAX_CGROUP_ROOT_NAMELEN - 1,
1158 					      GFP_KERNEL);
1159 			if (!opts->name)
1160 				return -ENOMEM;
1161 
1162 			continue;
1163 		}
1164 
1165 		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1166 			struct cgroup_subsys *ss = subsys[i];
1167 			if (ss == NULL)
1168 				continue;
1169 			if (strcmp(token, ss->name))
1170 				continue;
1171 			if (ss->disabled)
1172 				continue;
1173 
1174 			/* Mutually exclusive option 'all' + subsystem name */
1175 			if (all_ss)
1176 				return -EINVAL;
1177 			set_bit(i, &opts->subsys_bits);
1178 			one_ss = true;
1179 
1180 			break;
1181 		}
1182 		if (i == CGROUP_SUBSYS_COUNT)
1183 			return -ENOENT;
1184 	}
1185 
1186 	/*
1187 	 * If the 'all' option was specified select all the subsystems,
1188 	 * otherwise 'all, 'none' and a subsystem name options were not
1189 	 * specified, let's default to 'all'
1190 	 */
1191 	if (all_ss || (!all_ss && !one_ss && !opts->none)) {
1192 		for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
1193 			struct cgroup_subsys *ss = subsys[i];
1194 			if (ss == NULL)
1195 				continue;
1196 			if (ss->disabled)
1197 				continue;
1198 			set_bit(i, &opts->subsys_bits);
1199 		}
1200 	}
1201 
1202 	/* Consistency checks */
1203 
1204 	/*
1205 	 * Option noprefix was introduced just for backward compatibility
1206 	 * with the old cpuset, so we allow noprefix only if mounting just
1207 	 * the cpuset subsystem.
1208 	 */
1209 	if (test_bit(ROOT_NOPREFIX, &opts->flags) &&
1210 	    (opts->subsys_bits & mask))
1211 		return -EINVAL;
1212 
1213 
1214 	/* Can't specify "none" and some subsystems */
1215 	if (opts->subsys_bits && opts->none)
1216 		return -EINVAL;
1217 
1218 	/*
1219 	 * We either have to specify by name or by subsystems. (So all
1220 	 * empty hierarchies must have a name).
1221 	 */
1222 	if (!opts->subsys_bits && !opts->name)
1223 		return -EINVAL;
1224 
1225 	/*
1226 	 * Grab references on all the modules we'll need, so the subsystems
1227 	 * don't dance around before rebind_subsystems attaches them. This may
1228 	 * take duplicate reference counts on a subsystem that's already used,
1229 	 * but rebind_subsystems handles this case.
1230 	 */
1231 	for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
1232 		unsigned long bit = 1UL << i;
1233 
1234 		if (!(bit & opts->subsys_bits))
1235 			continue;
1236 		if (!try_module_get(subsys[i]->module)) {
1237 			module_pin_failed = true;
1238 			break;
1239 		}
1240 	}
1241 	if (module_pin_failed) {
1242 		/*
1243 		 * oops, one of the modules was going away. this means that we
1244 		 * raced with a module_delete call, and to the user this is
1245 		 * essentially a "subsystem doesn't exist" case.
1246 		 */
1247 		for (i--; i >= CGROUP_BUILTIN_SUBSYS_COUNT; i--) {
1248 			/* drop refcounts only on the ones we took */
1249 			unsigned long bit = 1UL << i;
1250 
1251 			if (!(bit & opts->subsys_bits))
1252 				continue;
1253 			module_put(subsys[i]->module);
1254 		}
1255 		return -ENOENT;
1256 	}
1257 
1258 	return 0;
1259 }
1260 
drop_parsed_module_refcounts(unsigned long subsys_bits)1261 static void drop_parsed_module_refcounts(unsigned long subsys_bits)
1262 {
1263 	int i;
1264 	for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
1265 		unsigned long bit = 1UL << i;
1266 
1267 		if (!(bit & subsys_bits))
1268 			continue;
1269 		module_put(subsys[i]->module);
1270 	}
1271 }
1272 
cgroup_remount(struct super_block * sb,int * flags,char * data)1273 static int cgroup_remount(struct super_block *sb, int *flags, char *data)
1274 {
1275 	int ret = 0;
1276 	struct cgroupfs_root *root = sb->s_fs_info;
1277 	struct cgroup *cgrp = &root->top_cgroup;
1278 	struct cgroup_sb_opts opts;
1279 
1280 	mutex_lock(&cgrp->dentry->d_inode->i_mutex);
1281 	mutex_lock(&cgroup_mutex);
1282 
1283 	/* See what subsystems are wanted */
1284 	ret = parse_cgroupfs_options(data, &opts);
1285 	if (ret)
1286 		goto out_unlock;
1287 
1288 	/* Don't allow flags or name to change at remount */
1289 	if (opts.flags != root->flags ||
1290 	    (opts.name && strcmp(opts.name, root->name))) {
1291 		ret = -EINVAL;
1292 		drop_parsed_module_refcounts(opts.subsys_bits);
1293 		goto out_unlock;
1294 	}
1295 
1296 	ret = rebind_subsystems(root, opts.subsys_bits);
1297 	if (ret) {
1298 		drop_parsed_module_refcounts(opts.subsys_bits);
1299 		goto out_unlock;
1300 	}
1301 
1302 	/* (re)populate subsystem files */
1303 	cgroup_populate_dir(cgrp);
1304 
1305 	if (opts.release_agent)
1306 		strcpy(root->release_agent_path, opts.release_agent);
1307  out_unlock:
1308 	kfree(opts.release_agent);
1309 	kfree(opts.name);
1310 	mutex_unlock(&cgroup_mutex);
1311 	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
1312 	return ret;
1313 }
1314 
1315 static const struct super_operations cgroup_ops = {
1316 	.statfs = simple_statfs,
1317 	.drop_inode = generic_delete_inode,
1318 	.show_options = cgroup_show_options,
1319 	.remount_fs = cgroup_remount,
1320 };
1321 
init_cgroup_housekeeping(struct cgroup * cgrp)1322 static void init_cgroup_housekeeping(struct cgroup *cgrp)
1323 {
1324 	INIT_LIST_HEAD(&cgrp->sibling);
1325 	INIT_LIST_HEAD(&cgrp->children);
1326 	INIT_LIST_HEAD(&cgrp->css_sets);
1327 	INIT_LIST_HEAD(&cgrp->release_list);
1328 	INIT_LIST_HEAD(&cgrp->pidlists);
1329 	mutex_init(&cgrp->pidlist_mutex);
1330 	INIT_LIST_HEAD(&cgrp->event_list);
1331 	spin_lock_init(&cgrp->event_list_lock);
1332 }
1333 
init_cgroup_root(struct cgroupfs_root * root)1334 static void init_cgroup_root(struct cgroupfs_root *root)
1335 {
1336 	struct cgroup *cgrp = &root->top_cgroup;
1337 	INIT_LIST_HEAD(&root->subsys_list);
1338 	INIT_LIST_HEAD(&root->root_list);
1339 	root->number_of_cgroups = 1;
1340 	cgrp->root = root;
1341 	cgrp->top_cgroup = cgrp;
1342 	init_cgroup_housekeeping(cgrp);
1343 }
1344 
init_root_id(struct cgroupfs_root * root)1345 static bool init_root_id(struct cgroupfs_root *root)
1346 {
1347 	int ret = 0;
1348 
1349 	do {
1350 		if (!ida_pre_get(&hierarchy_ida, GFP_KERNEL))
1351 			return false;
1352 		spin_lock(&hierarchy_id_lock);
1353 		/* Try to allocate the next unused ID */
1354 		ret = ida_get_new_above(&hierarchy_ida, next_hierarchy_id,
1355 					&root->hierarchy_id);
1356 		if (ret == -ENOSPC)
1357 			/* Try again starting from 0 */
1358 			ret = ida_get_new(&hierarchy_ida, &root->hierarchy_id);
1359 		if (!ret) {
1360 			next_hierarchy_id = root->hierarchy_id + 1;
1361 		} else if (ret != -EAGAIN) {
1362 			/* Can only get here if the 31-bit IDR is full ... */
1363 			BUG_ON(ret);
1364 		}
1365 		spin_unlock(&hierarchy_id_lock);
1366 	} while (ret);
1367 	return true;
1368 }
1369 
cgroup_test_super(struct super_block * sb,void * data)1370 static int cgroup_test_super(struct super_block *sb, void *data)
1371 {
1372 	struct cgroup_sb_opts *opts = data;
1373 	struct cgroupfs_root *root = sb->s_fs_info;
1374 
1375 	/* If we asked for a name then it must match */
1376 	if (opts->name && strcmp(opts->name, root->name))
1377 		return 0;
1378 
1379 	/*
1380 	 * If we asked for subsystems (or explicitly for no
1381 	 * subsystems) then they must match
1382 	 */
1383 	if ((opts->subsys_bits || opts->none)
1384 	    && (opts->subsys_bits != root->subsys_bits))
1385 		return 0;
1386 
1387 	return 1;
1388 }
1389 
cgroup_root_from_opts(struct cgroup_sb_opts * opts)1390 static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
1391 {
1392 	struct cgroupfs_root *root;
1393 
1394 	if (!opts->subsys_bits && !opts->none)
1395 		return NULL;
1396 
1397 	root = kzalloc(sizeof(*root), GFP_KERNEL);
1398 	if (!root)
1399 		return ERR_PTR(-ENOMEM);
1400 
1401 	if (!init_root_id(root)) {
1402 		kfree(root);
1403 		return ERR_PTR(-ENOMEM);
1404 	}
1405 	init_cgroup_root(root);
1406 
1407 	root->subsys_bits = opts->subsys_bits;
1408 	root->flags = opts->flags;
1409 	if (opts->release_agent)
1410 		strcpy(root->release_agent_path, opts->release_agent);
1411 	if (opts->name)
1412 		strcpy(root->name, opts->name);
1413 	if (opts->clone_children)
1414 		set_bit(CGRP_CLONE_CHILDREN, &root->top_cgroup.flags);
1415 	return root;
1416 }
1417 
cgroup_drop_root(struct cgroupfs_root * root)1418 static void cgroup_drop_root(struct cgroupfs_root *root)
1419 {
1420 	if (!root)
1421 		return;
1422 
1423 	BUG_ON(!root->hierarchy_id);
1424 	spin_lock(&hierarchy_id_lock);
1425 	ida_remove(&hierarchy_ida, root->hierarchy_id);
1426 	spin_unlock(&hierarchy_id_lock);
1427 	kfree(root);
1428 }
1429 
cgroup_set_super(struct super_block * sb,void * data)1430 static int cgroup_set_super(struct super_block *sb, void *data)
1431 {
1432 	int ret;
1433 	struct cgroup_sb_opts *opts = data;
1434 
1435 	/* If we don't have a new root, we can't set up a new sb */
1436 	if (!opts->new_root)
1437 		return -EINVAL;
1438 
1439 	BUG_ON(!opts->subsys_bits && !opts->none);
1440 
1441 	ret = set_anon_super(sb, NULL);
1442 	if (ret)
1443 		return ret;
1444 
1445 	sb->s_fs_info = opts->new_root;
1446 	opts->new_root->sb = sb;
1447 
1448 	sb->s_blocksize = PAGE_CACHE_SIZE;
1449 	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
1450 	sb->s_magic = CGROUP_SUPER_MAGIC;
1451 	sb->s_op = &cgroup_ops;
1452 
1453 	return 0;
1454 }
1455 
cgroup_get_rootdir(struct super_block * sb)1456 static int cgroup_get_rootdir(struct super_block *sb)
1457 {
1458 	static const struct dentry_operations cgroup_dops = {
1459 		.d_iput = cgroup_diput,
1460 		.d_delete = cgroup_delete,
1461 	};
1462 
1463 	struct inode *inode =
1464 		cgroup_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR, sb);
1465 	struct dentry *dentry;
1466 
1467 	if (!inode)
1468 		return -ENOMEM;
1469 
1470 	inode->i_fop = &simple_dir_operations;
1471 	inode->i_op = &cgroup_dir_inode_operations;
1472 	/* directories start off with i_nlink == 2 (for "." entry) */
1473 	inc_nlink(inode);
1474 	dentry = d_alloc_root(inode);
1475 	if (!dentry) {
1476 		iput(inode);
1477 		return -ENOMEM;
1478 	}
1479 	sb->s_root = dentry;
1480 	/* for everything else we want ->d_op set */
1481 	sb->s_d_op = &cgroup_dops;
1482 	return 0;
1483 }
1484 
cgroup_mount(struct file_system_type * fs_type,int flags,const char * unused_dev_name,void * data)1485 static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1486 			 int flags, const char *unused_dev_name,
1487 			 void *data)
1488 {
1489 	struct cgroup_sb_opts opts;
1490 	struct cgroupfs_root *root;
1491 	int ret = 0;
1492 	struct super_block *sb;
1493 	struct cgroupfs_root *new_root;
1494 
1495 	/* First find the desired set of subsystems */
1496 	mutex_lock(&cgroup_mutex);
1497 	ret = parse_cgroupfs_options(data, &opts);
1498 	mutex_unlock(&cgroup_mutex);
1499 	if (ret)
1500 		goto out_err;
1501 
1502 	/*
1503 	 * Allocate a new cgroup root. We may not need it if we're
1504 	 * reusing an existing hierarchy.
1505 	 */
1506 	new_root = cgroup_root_from_opts(&opts);
1507 	if (IS_ERR(new_root)) {
1508 		ret = PTR_ERR(new_root);
1509 		goto drop_modules;
1510 	}
1511 	opts.new_root = new_root;
1512 
1513 	/* Locate an existing or new sb for this hierarchy */
1514 	sb = sget(fs_type, cgroup_test_super, cgroup_set_super, &opts);
1515 	if (IS_ERR(sb)) {
1516 		ret = PTR_ERR(sb);
1517 		cgroup_drop_root(opts.new_root);
1518 		goto drop_modules;
1519 	}
1520 
1521 	root = sb->s_fs_info;
1522 	BUG_ON(!root);
1523 	if (root == opts.new_root) {
1524 		/* We used the new root structure, so this is a new hierarchy */
1525 		struct list_head tmp_cg_links;
1526 		struct cgroup *root_cgrp = &root->top_cgroup;
1527 		struct inode *inode;
1528 		struct cgroupfs_root *existing_root;
1529 		int i;
1530 
1531 		BUG_ON(sb->s_root != NULL);
1532 
1533 		ret = cgroup_get_rootdir(sb);
1534 		if (ret)
1535 			goto drop_new_super;
1536 		inode = sb->s_root->d_inode;
1537 
1538 		mutex_lock(&inode->i_mutex);
1539 		mutex_lock(&cgroup_mutex);
1540 
1541 		if (strlen(root->name)) {
1542 			/* Check for name clashes with existing mounts */
1543 			for_each_active_root(existing_root) {
1544 				if (!strcmp(existing_root->name, root->name)) {
1545 					ret = -EBUSY;
1546 					mutex_unlock(&cgroup_mutex);
1547 					mutex_unlock(&inode->i_mutex);
1548 					goto drop_new_super;
1549 				}
1550 			}
1551 		}
1552 
1553 		/*
1554 		 * We're accessing css_set_count without locking
1555 		 * css_set_lock here, but that's OK - it can only be
1556 		 * increased by someone holding cgroup_lock, and
1557 		 * that's us. The worst that can happen is that we
1558 		 * have some link structures left over
1559 		 */
1560 		ret = allocate_cg_links(css_set_count, &tmp_cg_links);
1561 		if (ret) {
1562 			mutex_unlock(&cgroup_mutex);
1563 			mutex_unlock(&inode->i_mutex);
1564 			goto drop_new_super;
1565 		}
1566 
1567 		ret = rebind_subsystems(root, root->subsys_bits);
1568 		if (ret == -EBUSY) {
1569 			mutex_unlock(&cgroup_mutex);
1570 			mutex_unlock(&inode->i_mutex);
1571 			free_cg_links(&tmp_cg_links);
1572 			goto drop_new_super;
1573 		}
1574 		/*
1575 		 * There must be no failure case after here, since rebinding
1576 		 * takes care of subsystems' refcounts, which are explicitly
1577 		 * dropped in the failure exit path.
1578 		 */
1579 
1580 		/* EBUSY should be the only error here */
1581 		BUG_ON(ret);
1582 
1583 		list_add(&root->root_list, &roots);
1584 		root_count++;
1585 
1586 		sb->s_root->d_fsdata = root_cgrp;
1587 		root->top_cgroup.dentry = sb->s_root;
1588 
1589 		/* Link the top cgroup in this hierarchy into all
1590 		 * the css_set objects */
1591 		write_lock(&css_set_lock);
1592 		for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
1593 			struct hlist_head *hhead = &css_set_table[i];
1594 			struct hlist_node *node;
1595 			struct css_set *cg;
1596 
1597 			hlist_for_each_entry(cg, node, hhead, hlist)
1598 				link_css_set(&tmp_cg_links, cg, root_cgrp);
1599 		}
1600 		write_unlock(&css_set_lock);
1601 
1602 		free_cg_links(&tmp_cg_links);
1603 
1604 		BUG_ON(!list_empty(&root_cgrp->sibling));
1605 		BUG_ON(!list_empty(&root_cgrp->children));
1606 		BUG_ON(root->number_of_cgroups != 1);
1607 
1608 		cgroup_populate_dir(root_cgrp);
1609 		mutex_unlock(&cgroup_mutex);
1610 		mutex_unlock(&inode->i_mutex);
1611 	} else {
1612 		/*
1613 		 * We re-used an existing hierarchy - the new root (if
1614 		 * any) is not needed
1615 		 */
1616 		cgroup_drop_root(opts.new_root);
1617 		/* no subsys rebinding, so refcounts don't change */
1618 		drop_parsed_module_refcounts(opts.subsys_bits);
1619 	}
1620 
1621 	kfree(opts.release_agent);
1622 	kfree(opts.name);
1623 	return dget(sb->s_root);
1624 
1625  drop_new_super:
1626 	deactivate_locked_super(sb);
1627  drop_modules:
1628 	drop_parsed_module_refcounts(opts.subsys_bits);
1629  out_err:
1630 	kfree(opts.release_agent);
1631 	kfree(opts.name);
1632 	return ERR_PTR(ret);
1633 }
1634 
cgroup_kill_sb(struct super_block * sb)1635 static void cgroup_kill_sb(struct super_block *sb) {
1636 	struct cgroupfs_root *root = sb->s_fs_info;
1637 	struct cgroup *cgrp = &root->top_cgroup;
1638 	int ret;
1639 	struct cg_cgroup_link *link;
1640 	struct cg_cgroup_link *saved_link;
1641 
1642 	BUG_ON(!root);
1643 
1644 	BUG_ON(root->number_of_cgroups != 1);
1645 	BUG_ON(!list_empty(&cgrp->children));
1646 	BUG_ON(!list_empty(&cgrp->sibling));
1647 
1648 	mutex_lock(&cgroup_mutex);
1649 
1650 	/* Rebind all subsystems back to the default hierarchy */
1651 	ret = rebind_subsystems(root, 0);
1652 	/* Shouldn't be able to fail ... */
1653 	BUG_ON(ret);
1654 
1655 	/*
1656 	 * Release all the links from css_sets to this hierarchy's
1657 	 * root cgroup
1658 	 */
1659 	write_lock(&css_set_lock);
1660 
1661 	list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1662 				 cgrp_link_list) {
1663 		list_del(&link->cg_link_list);
1664 		list_del(&link->cgrp_link_list);
1665 		kfree(link);
1666 	}
1667 	write_unlock(&css_set_lock);
1668 
1669 	if (!list_empty(&root->root_list)) {
1670 		list_del(&root->root_list);
1671 		root_count--;
1672 	}
1673 
1674 	mutex_unlock(&cgroup_mutex);
1675 
1676 	kill_litter_super(sb);
1677 	cgroup_drop_root(root);
1678 }
1679 
1680 static struct file_system_type cgroup_fs_type = {
1681 	.name = "cgroup",
1682 	.mount = cgroup_mount,
1683 	.kill_sb = cgroup_kill_sb,
1684 };
1685 
1686 static struct kobject *cgroup_kobj;
1687 
__d_cgrp(struct dentry * dentry)1688 static inline struct cgroup *__d_cgrp(struct dentry *dentry)
1689 {
1690 	return dentry->d_fsdata;
1691 }
1692 
__d_cft(struct dentry * dentry)1693 static inline struct cftype *__d_cft(struct dentry *dentry)
1694 {
1695 	return dentry->d_fsdata;
1696 }
1697 
1698 /**
1699  * cgroup_path - generate the path of a cgroup
1700  * @cgrp: the cgroup in question
1701  * @buf: the buffer to write the path into
1702  * @buflen: the length of the buffer
1703  *
1704  * Called with cgroup_mutex held or else with an RCU-protected cgroup
1705  * reference.  Writes path of cgroup into buf.  Returns 0 on success,
1706  * -errno on error.
1707  */
cgroup_path(const struct cgroup * cgrp,char * buf,int buflen)1708 int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1709 {
1710 	char *start;
1711 	struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
1712 						      rcu_read_lock_held() ||
1713 						      cgroup_lock_is_held());
1714 
1715 	if (!dentry || cgrp == dummytop) {
1716 		/*
1717 		 * Inactive subsystems have no dentry for their root
1718 		 * cgroup
1719 		 */
1720 		strcpy(buf, "/");
1721 		return 0;
1722 	}
1723 
1724 	start = buf + buflen;
1725 
1726 	*--start = '\0';
1727 	for (;;) {
1728 		int len = dentry->d_name.len;
1729 
1730 		if ((start -= len) < buf)
1731 			return -ENAMETOOLONG;
1732 		memcpy(start, dentry->d_name.name, len);
1733 		cgrp = cgrp->parent;
1734 		if (!cgrp)
1735 			break;
1736 
1737 		dentry = rcu_dereference_check(cgrp->dentry,
1738 					       rcu_read_lock_held() ||
1739 					       cgroup_lock_is_held());
1740 		if (!cgrp->parent)
1741 			continue;
1742 		if (--start < buf)
1743 			return -ENAMETOOLONG;
1744 		*start = '/';
1745 	}
1746 	memmove(buf, start, buf + buflen - start);
1747 	return 0;
1748 }
1749 EXPORT_SYMBOL_GPL(cgroup_path);
1750 
1751 /**
1752  * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp'
1753  * @cgrp: the cgroup the task is attaching to
1754  * @tsk: the task to be attached
1755  *
1756  * Call holding cgroup_mutex. May take task_lock of
1757  * the task 'tsk' during call.
1758  */
cgroup_attach_task(struct cgroup * cgrp,struct task_struct * tsk)1759 int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1760 {
1761 	int retval = 0;
1762 	struct cgroup_subsys *ss, *failed_ss = NULL;
1763 	struct cgroup *oldcgrp;
1764 	struct css_set *cg;
1765 	struct css_set *newcg;
1766 	struct cgroupfs_root *root = cgrp->root;
1767 
1768 	/* Nothing to do if the task is already in that cgroup */
1769 	oldcgrp = task_cgroup_from_root(tsk, root);
1770 	if (cgrp == oldcgrp)
1771 		return 0;
1772 
1773 	for_each_subsys(root, ss) {
1774 		if (ss->can_attach) {
1775 			retval = ss->can_attach(ss, cgrp, tsk, false);
1776 			if (retval) {
1777 				/*
1778 				 * Remember on which subsystem the can_attach()
1779 				 * failed, so that we only call cancel_attach()
1780 				 * against the subsystems whose can_attach()
1781 				 * succeeded. (See below)
1782 				 */
1783 				failed_ss = ss;
1784 				goto out;
1785 			}
1786 		}
1787 	}
1788 
1789 	task_lock(tsk);
1790 	cg = tsk->cgroups;
1791 	get_css_set(cg);
1792 	task_unlock(tsk);
1793 	/*
1794 	 * Locate or allocate a new css_set for this task,
1795 	 * based on its final set of cgroups
1796 	 */
1797 	newcg = find_css_set(cg, cgrp);
1798 	put_css_set(cg);
1799 	if (!newcg) {
1800 		retval = -ENOMEM;
1801 		goto out;
1802 	}
1803 
1804 	task_lock(tsk);
1805 	if (tsk->flags & PF_EXITING) {
1806 		task_unlock(tsk);
1807 		put_css_set(newcg);
1808 		retval = -ESRCH;
1809 		goto out;
1810 	}
1811 	rcu_assign_pointer(tsk->cgroups, newcg);
1812 	task_unlock(tsk);
1813 
1814 	/* Update the css_set linked lists if we're using them */
1815 	write_lock(&css_set_lock);
1816 	if (!list_empty(&tsk->cg_list))
1817 		list_move(&tsk->cg_list, &newcg->tasks);
1818 	write_unlock(&css_set_lock);
1819 
1820 	for_each_subsys(root, ss) {
1821 		if (ss->attach)
1822 			ss->attach(ss, cgrp, oldcgrp, tsk, false);
1823 	}
1824 	set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1825 	synchronize_rcu();
1826 	put_css_set(cg);
1827 
1828 	/*
1829 	 * wake up rmdir() waiter. the rmdir should fail since the cgroup
1830 	 * is no longer empty.
1831 	 */
1832 	cgroup_wakeup_rmdir_waiter(cgrp);
1833 out:
1834 	if (retval) {
1835 		for_each_subsys(root, ss) {
1836 			if (ss == failed_ss)
1837 				/*
1838 				 * This subsystem was the one that failed the
1839 				 * can_attach() check earlier, so we don't need
1840 				 * to call cancel_attach() against it or any
1841 				 * remaining subsystems.
1842 				 */
1843 				break;
1844 			if (ss->cancel_attach)
1845 				ss->cancel_attach(ss, cgrp, tsk, false);
1846 		}
1847 	}
1848 	return retval;
1849 }
1850 
1851 /**
1852  * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
1853  * @from: attach to all cgroups of a given task
1854  * @tsk: the task to be attached
1855  */
cgroup_attach_task_all(struct task_struct * from,struct task_struct * tsk)1856 int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1857 {
1858 	struct cgroupfs_root *root;
1859 	int retval = 0;
1860 
1861 	cgroup_lock();
1862 	for_each_active_root(root) {
1863 		struct cgroup *from_cg = task_cgroup_from_root(from, root);
1864 
1865 		retval = cgroup_attach_task(from_cg, tsk);
1866 		if (retval)
1867 			break;
1868 	}
1869 	cgroup_unlock();
1870 
1871 	return retval;
1872 }
1873 EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1874 
1875 /*
1876  * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
1877  * held. May take task_lock of task
1878  */
attach_task_by_pid(struct cgroup * cgrp,u64 pid)1879 static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
1880 {
1881 	struct task_struct *tsk;
1882 	const struct cred *cred = current_cred(), *tcred;
1883 	int ret;
1884 
1885 	if (pid) {
1886 		rcu_read_lock();
1887 		tsk = find_task_by_vpid(pid);
1888 		if (!tsk || tsk->flags & PF_EXITING) {
1889 			rcu_read_unlock();
1890 			return -ESRCH;
1891 		}
1892 
1893 		tcred = __task_cred(tsk);
1894 		if (cred->euid &&
1895 		    cred->euid != tcred->uid &&
1896 		    cred->euid != tcred->suid) {
1897 			rcu_read_unlock();
1898 			return -EACCES;
1899 		}
1900 		get_task_struct(tsk);
1901 		rcu_read_unlock();
1902 	} else {
1903 		tsk = current;
1904 		get_task_struct(tsk);
1905 	}
1906 
1907 	ret = cgroup_attach_task(cgrp, tsk);
1908 	put_task_struct(tsk);
1909 	return ret;
1910 }
1911 
cgroup_tasks_write(struct cgroup * cgrp,struct cftype * cft,u64 pid)1912 static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1913 {
1914 	int ret;
1915 	if (!cgroup_lock_live_group(cgrp))
1916 		return -ENODEV;
1917 	ret = attach_task_by_pid(cgrp, pid);
1918 	cgroup_unlock();
1919 	return ret;
1920 }
1921 
1922 /**
1923  * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
1924  * @cgrp: the cgroup to be checked for liveness
1925  *
1926  * On success, returns true; the lock should be later released with
1927  * cgroup_unlock(). On failure returns false with no lock held.
1928  */
cgroup_lock_live_group(struct cgroup * cgrp)1929 bool cgroup_lock_live_group(struct cgroup *cgrp)
1930 {
1931 	mutex_lock(&cgroup_mutex);
1932 	if (cgroup_is_removed(cgrp)) {
1933 		mutex_unlock(&cgroup_mutex);
1934 		return false;
1935 	}
1936 	return true;
1937 }
1938 EXPORT_SYMBOL_GPL(cgroup_lock_live_group);
1939 
cgroup_release_agent_write(struct cgroup * cgrp,struct cftype * cft,const char * buffer)1940 static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
1941 				      const char *buffer)
1942 {
1943 	BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1944 	if (strlen(buffer) >= PATH_MAX)
1945 		return -EINVAL;
1946 	if (!cgroup_lock_live_group(cgrp))
1947 		return -ENODEV;
1948 	strcpy(cgrp->root->release_agent_path, buffer);
1949 	cgroup_unlock();
1950 	return 0;
1951 }
1952 
cgroup_release_agent_show(struct cgroup * cgrp,struct cftype * cft,struct seq_file * seq)1953 static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
1954 				     struct seq_file *seq)
1955 {
1956 	if (!cgroup_lock_live_group(cgrp))
1957 		return -ENODEV;
1958 	seq_puts(seq, cgrp->root->release_agent_path);
1959 	seq_putc(seq, '\n');
1960 	cgroup_unlock();
1961 	return 0;
1962 }
1963 
1964 /* A buffer size big enough for numbers or short strings */
1965 #define CGROUP_LOCAL_BUFFER_SIZE 64
1966 
cgroup_write_X64(struct cgroup * cgrp,struct cftype * cft,struct file * file,const char __user * userbuf,size_t nbytes,loff_t * unused_ppos)1967 static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1968 				struct file *file,
1969 				const char __user *userbuf,
1970 				size_t nbytes, loff_t *unused_ppos)
1971 {
1972 	char buffer[CGROUP_LOCAL_BUFFER_SIZE];
1973 	int retval = 0;
1974 	char *end;
1975 
1976 	if (!nbytes)
1977 		return -EINVAL;
1978 	if (nbytes >= sizeof(buffer))
1979 		return -E2BIG;
1980 	if (copy_from_user(buffer, userbuf, nbytes))
1981 		return -EFAULT;
1982 
1983 	buffer[nbytes] = 0;     /* nul-terminate */
1984 	if (cft->write_u64) {
1985 		u64 val = simple_strtoull(strstrip(buffer), &end, 0);
1986 		if (*end)
1987 			return -EINVAL;
1988 		retval = cft->write_u64(cgrp, cft, val);
1989 	} else {
1990 		s64 val = simple_strtoll(strstrip(buffer), &end, 0);
1991 		if (*end)
1992 			return -EINVAL;
1993 		retval = cft->write_s64(cgrp, cft, val);
1994 	}
1995 	if (!retval)
1996 		retval = nbytes;
1997 	return retval;
1998 }
1999 
cgroup_write_string(struct cgroup * cgrp,struct cftype * cft,struct file * file,const char __user * userbuf,size_t nbytes,loff_t * unused_ppos)2000 static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
2001 				   struct file *file,
2002 				   const char __user *userbuf,
2003 				   size_t nbytes, loff_t *unused_ppos)
2004 {
2005 	char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
2006 	int retval = 0;
2007 	size_t max_bytes = cft->max_write_len;
2008 	char *buffer = local_buffer;
2009 
2010 	if (!max_bytes)
2011 		max_bytes = sizeof(local_buffer) - 1;
2012 	if (nbytes >= max_bytes)
2013 		return -E2BIG;
2014 	/* Allocate a dynamic buffer if we need one */
2015 	if (nbytes >= sizeof(local_buffer)) {
2016 		buffer = kmalloc(nbytes + 1, GFP_KERNEL);
2017 		if (buffer == NULL)
2018 			return -ENOMEM;
2019 	}
2020 	if (nbytes && copy_from_user(buffer, userbuf, nbytes)) {
2021 		retval = -EFAULT;
2022 		goto out;
2023 	}
2024 
2025 	buffer[nbytes] = 0;     /* nul-terminate */
2026 	retval = cft->write_string(cgrp, cft, strstrip(buffer));
2027 	if (!retval)
2028 		retval = nbytes;
2029 out:
2030 	if (buffer != local_buffer)
2031 		kfree(buffer);
2032 	return retval;
2033 }
2034 
cgroup_file_write(struct file * file,const char __user * buf,size_t nbytes,loff_t * ppos)2035 static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
2036 						size_t nbytes, loff_t *ppos)
2037 {
2038 	struct cftype *cft = __d_cft(file->f_dentry);
2039 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2040 
2041 	if (cgroup_is_removed(cgrp))
2042 		return -ENODEV;
2043 	if (cft->write)
2044 		return cft->write(cgrp, cft, file, buf, nbytes, ppos);
2045 	if (cft->write_u64 || cft->write_s64)
2046 		return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
2047 	if (cft->write_string)
2048 		return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
2049 	if (cft->trigger) {
2050 		int ret = cft->trigger(cgrp, (unsigned int)cft->private);
2051 		return ret ? ret : nbytes;
2052 	}
2053 	return -EINVAL;
2054 }
2055 
cgroup_read_u64(struct cgroup * cgrp,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)2056 static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
2057 			       struct file *file,
2058 			       char __user *buf, size_t nbytes,
2059 			       loff_t *ppos)
2060 {
2061 	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2062 	u64 val = cft->read_u64(cgrp, cft);
2063 	int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
2064 
2065 	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
2066 }
2067 
cgroup_read_s64(struct cgroup * cgrp,struct cftype * cft,struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)2068 static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
2069 			       struct file *file,
2070 			       char __user *buf, size_t nbytes,
2071 			       loff_t *ppos)
2072 {
2073 	char tmp[CGROUP_LOCAL_BUFFER_SIZE];
2074 	s64 val = cft->read_s64(cgrp, cft);
2075 	int len = sprintf(tmp, "%lld\n", (long long) val);
2076 
2077 	return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
2078 }
2079 
cgroup_file_read(struct file * file,char __user * buf,size_t nbytes,loff_t * ppos)2080 static ssize_t cgroup_file_read(struct file *file, char __user *buf,
2081 				   size_t nbytes, loff_t *ppos)
2082 {
2083 	struct cftype *cft = __d_cft(file->f_dentry);
2084 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2085 
2086 	if (cgroup_is_removed(cgrp))
2087 		return -ENODEV;
2088 
2089 	if (cft->read)
2090 		return cft->read(cgrp, cft, file, buf, nbytes, ppos);
2091 	if (cft->read_u64)
2092 		return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
2093 	if (cft->read_s64)
2094 		return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
2095 	return -EINVAL;
2096 }
2097 
2098 /*
2099  * seqfile ops/methods for returning structured data. Currently just
2100  * supports string->u64 maps, but can be extended in future.
2101  */
2102 
2103 struct cgroup_seqfile_state {
2104 	struct cftype *cft;
2105 	struct cgroup *cgroup;
2106 };
2107 
cgroup_map_add(struct cgroup_map_cb * cb,const char * key,u64 value)2108 static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
2109 {
2110 	struct seq_file *sf = cb->state;
2111 	return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
2112 }
2113 
cgroup_seqfile_show(struct seq_file * m,void * arg)2114 static int cgroup_seqfile_show(struct seq_file *m, void *arg)
2115 {
2116 	struct cgroup_seqfile_state *state = m->private;
2117 	struct cftype *cft = state->cft;
2118 	if (cft->read_map) {
2119 		struct cgroup_map_cb cb = {
2120 			.fill = cgroup_map_add,
2121 			.state = m,
2122 		};
2123 		return cft->read_map(state->cgroup, cft, &cb);
2124 	}
2125 	return cft->read_seq_string(state->cgroup, cft, m);
2126 }
2127 
cgroup_seqfile_release(struct inode * inode,struct file * file)2128 static int cgroup_seqfile_release(struct inode *inode, struct file *file)
2129 {
2130 	struct seq_file *seq = file->private_data;
2131 	kfree(seq->private);
2132 	return single_release(inode, file);
2133 }
2134 
2135 static const struct file_operations cgroup_seqfile_operations = {
2136 	.read = seq_read,
2137 	.write = cgroup_file_write,
2138 	.llseek = seq_lseek,
2139 	.release = cgroup_seqfile_release,
2140 };
2141 
cgroup_file_open(struct inode * inode,struct file * file)2142 static int cgroup_file_open(struct inode *inode, struct file *file)
2143 {
2144 	int err;
2145 	struct cftype *cft;
2146 
2147 	err = generic_file_open(inode, file);
2148 	if (err)
2149 		return err;
2150 	cft = __d_cft(file->f_dentry);
2151 
2152 	if (cft->read_map || cft->read_seq_string) {
2153 		struct cgroup_seqfile_state *state =
2154 			kzalloc(sizeof(*state), GFP_USER);
2155 		if (!state)
2156 			return -ENOMEM;
2157 		state->cft = cft;
2158 		state->cgroup = __d_cgrp(file->f_dentry->d_parent);
2159 		file->f_op = &cgroup_seqfile_operations;
2160 		err = single_open(file, cgroup_seqfile_show, state);
2161 		if (err < 0)
2162 			kfree(state);
2163 	} else if (cft->open)
2164 		err = cft->open(inode, file);
2165 	else
2166 		err = 0;
2167 
2168 	return err;
2169 }
2170 
cgroup_file_release(struct inode * inode,struct file * file)2171 static int cgroup_file_release(struct inode *inode, struct file *file)
2172 {
2173 	struct cftype *cft = __d_cft(file->f_dentry);
2174 	if (cft->release)
2175 		return cft->release(inode, file);
2176 	return 0;
2177 }
2178 
2179 /*
2180  * cgroup_rename - Only allow simple rename of directories in place.
2181  */
cgroup_rename(struct inode * old_dir,struct dentry * old_dentry,struct inode * new_dir,struct dentry * new_dentry)2182 static int cgroup_rename(struct inode *old_dir, struct dentry *old_dentry,
2183 			    struct inode *new_dir, struct dentry *new_dentry)
2184 {
2185 	if (!S_ISDIR(old_dentry->d_inode->i_mode))
2186 		return -ENOTDIR;
2187 	if (new_dentry->d_inode)
2188 		return -EEXIST;
2189 	if (old_dir != new_dir)
2190 		return -EIO;
2191 	return simple_rename(old_dir, old_dentry, new_dir, new_dentry);
2192 }
2193 
2194 static const struct file_operations cgroup_file_operations = {
2195 	.read = cgroup_file_read,
2196 	.write = cgroup_file_write,
2197 	.llseek = generic_file_llseek,
2198 	.open = cgroup_file_open,
2199 	.release = cgroup_file_release,
2200 };
2201 
2202 static const struct inode_operations cgroup_dir_inode_operations = {
2203 	.lookup = cgroup_lookup,
2204 	.mkdir = cgroup_mkdir,
2205 	.rmdir = cgroup_rmdir,
2206 	.rename = cgroup_rename,
2207 };
2208 
cgroup_lookup(struct inode * dir,struct dentry * dentry,struct nameidata * nd)2209 static struct dentry *cgroup_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
2210 {
2211 	if (dentry->d_name.len > NAME_MAX)
2212 		return ERR_PTR(-ENAMETOOLONG);
2213 	d_add(dentry, NULL);
2214 	return NULL;
2215 }
2216 
2217 /*
2218  * Check if a file is a control file
2219  */
__file_cft(struct file * file)2220 static inline struct cftype *__file_cft(struct file *file)
2221 {
2222 	if (file->f_dentry->d_inode->i_fop != &cgroup_file_operations)
2223 		return ERR_PTR(-EINVAL);
2224 	return __d_cft(file->f_dentry);
2225 }
2226 
cgroup_create_file(struct dentry * dentry,mode_t mode,struct super_block * sb)2227 static int cgroup_create_file(struct dentry *dentry, mode_t mode,
2228 				struct super_block *sb)
2229 {
2230 	struct inode *inode;
2231 
2232 	if (!dentry)
2233 		return -ENOENT;
2234 	if (dentry->d_inode)
2235 		return -EEXIST;
2236 
2237 	inode = cgroup_new_inode(mode, sb);
2238 	if (!inode)
2239 		return -ENOMEM;
2240 
2241 	if (S_ISDIR(mode)) {
2242 		inode->i_op = &cgroup_dir_inode_operations;
2243 		inode->i_fop = &simple_dir_operations;
2244 
2245 		/* start off with i_nlink == 2 (for "." entry) */
2246 		inc_nlink(inode);
2247 
2248 		/* start with the directory inode held, so that we can
2249 		 * populate it without racing with another mkdir */
2250 		mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
2251 	} else if (S_ISREG(mode)) {
2252 		inode->i_size = 0;
2253 		inode->i_fop = &cgroup_file_operations;
2254 	}
2255 	d_instantiate(dentry, inode);
2256 	dget(dentry);	/* Extra count - pin the dentry in core */
2257 	return 0;
2258 }
2259 
2260 /*
2261  * cgroup_create_dir - create a directory for an object.
2262  * @cgrp: the cgroup we create the directory for. It must have a valid
2263  *        ->parent field. And we are going to fill its ->dentry field.
2264  * @dentry: dentry of the new cgroup
2265  * @mode: mode to set on new directory.
2266  */
cgroup_create_dir(struct cgroup * cgrp,struct dentry * dentry,mode_t mode)2267 static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
2268 				mode_t mode)
2269 {
2270 	struct dentry *parent;
2271 	int error = 0;
2272 
2273 	parent = cgrp->parent->dentry;
2274 	error = cgroup_create_file(dentry, S_IFDIR | mode, cgrp->root->sb);
2275 	if (!error) {
2276 		dentry->d_fsdata = cgrp;
2277 		inc_nlink(parent->d_inode);
2278 		rcu_assign_pointer(cgrp->dentry, dentry);
2279 		dget(dentry);
2280 	}
2281 	dput(dentry);
2282 
2283 	return error;
2284 }
2285 
2286 /**
2287  * cgroup_file_mode - deduce file mode of a control file
2288  * @cft: the control file in question
2289  *
2290  * returns cft->mode if ->mode is not 0
2291  * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
2292  * returns S_IRUGO if it has only a read handler
2293  * returns S_IWUSR if it has only a write hander
2294  */
cgroup_file_mode(const struct cftype * cft)2295 static mode_t cgroup_file_mode(const struct cftype *cft)
2296 {
2297 	mode_t mode = 0;
2298 
2299 	if (cft->mode)
2300 		return cft->mode;
2301 
2302 	if (cft->read || cft->read_u64 || cft->read_s64 ||
2303 	    cft->read_map || cft->read_seq_string)
2304 		mode |= S_IRUGO;
2305 
2306 	if (cft->write || cft->write_u64 || cft->write_s64 ||
2307 	    cft->write_string || cft->trigger)
2308 		mode |= S_IWUSR;
2309 
2310 	return mode;
2311 }
2312 
cgroup_add_file(struct cgroup * cgrp,struct cgroup_subsys * subsys,const struct cftype * cft)2313 int cgroup_add_file(struct cgroup *cgrp,
2314 		       struct cgroup_subsys *subsys,
2315 		       const struct cftype *cft)
2316 {
2317 	struct dentry *dir = cgrp->dentry;
2318 	struct dentry *dentry;
2319 	int error;
2320 	mode_t mode;
2321 
2322 	char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
2323 	if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
2324 		strcpy(name, subsys->name);
2325 		strcat(name, ".");
2326 	}
2327 	strcat(name, cft->name);
2328 	BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
2329 	dentry = lookup_one_len(name, dir, strlen(name));
2330 	if (!IS_ERR(dentry)) {
2331 		mode = cgroup_file_mode(cft);
2332 		error = cgroup_create_file(dentry, mode | S_IFREG,
2333 						cgrp->root->sb);
2334 		if (!error)
2335 			dentry->d_fsdata = (void *)cft;
2336 		dput(dentry);
2337 	} else
2338 		error = PTR_ERR(dentry);
2339 	return error;
2340 }
2341 EXPORT_SYMBOL_GPL(cgroup_add_file);
2342 
cgroup_add_files(struct cgroup * cgrp,struct cgroup_subsys * subsys,const struct cftype cft[],int count)2343 int cgroup_add_files(struct cgroup *cgrp,
2344 			struct cgroup_subsys *subsys,
2345 			const struct cftype cft[],
2346 			int count)
2347 {
2348 	int i, err;
2349 	for (i = 0; i < count; i++) {
2350 		err = cgroup_add_file(cgrp, subsys, &cft[i]);
2351 		if (err)
2352 			return err;
2353 	}
2354 	return 0;
2355 }
2356 EXPORT_SYMBOL_GPL(cgroup_add_files);
2357 
2358 /**
2359  * cgroup_task_count - count the number of tasks in a cgroup.
2360  * @cgrp: the cgroup in question
2361  *
2362  * Return the number of tasks in the cgroup.
2363  */
cgroup_task_count(const struct cgroup * cgrp)2364 int cgroup_task_count(const struct cgroup *cgrp)
2365 {
2366 	int count = 0;
2367 	struct cg_cgroup_link *link;
2368 
2369 	read_lock(&css_set_lock);
2370 	list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
2371 		count += atomic_read(&link->cg->refcount);
2372 	}
2373 	read_unlock(&css_set_lock);
2374 	return count;
2375 }
2376 
2377 /*
2378  * Advance a list_head iterator.  The iterator should be positioned at
2379  * the start of a css_set
2380  */
cgroup_advance_iter(struct cgroup * cgrp,struct cgroup_iter * it)2381 static void cgroup_advance_iter(struct cgroup *cgrp,
2382 				struct cgroup_iter *it)
2383 {
2384 	struct list_head *l = it->cg_link;
2385 	struct cg_cgroup_link *link;
2386 	struct css_set *cg;
2387 
2388 	/* Advance to the next non-empty css_set */
2389 	do {
2390 		l = l->next;
2391 		if (l == &cgrp->css_sets) {
2392 			it->cg_link = NULL;
2393 			return;
2394 		}
2395 		link = list_entry(l, struct cg_cgroup_link, cgrp_link_list);
2396 		cg = link->cg;
2397 	} while (list_empty(&cg->tasks));
2398 	it->cg_link = l;
2399 	it->task = cg->tasks.next;
2400 }
2401 
2402 /*
2403  * To reduce the fork() overhead for systems that are not actually
2404  * using their cgroups capability, we don't maintain the lists running
2405  * through each css_set to its tasks until we see the list actually
2406  * used - in other words after the first call to cgroup_iter_start().
2407  *
2408  * The tasklist_lock is not held here, as do_each_thread() and
2409  * while_each_thread() are protected by RCU.
2410  */
cgroup_enable_task_cg_lists(void)2411 static void cgroup_enable_task_cg_lists(void)
2412 {
2413 	struct task_struct *p, *g;
2414 	write_lock(&css_set_lock);
2415 	use_task_css_set_links = 1;
2416 	do_each_thread(g, p) {
2417 		task_lock(p);
2418 		/*
2419 		 * We should check if the process is exiting, otherwise
2420 		 * it will race with cgroup_exit() in that the list
2421 		 * entry won't be deleted though the process has exited.
2422 		 */
2423 		if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
2424 			list_add(&p->cg_list, &p->cgroups->tasks);
2425 		task_unlock(p);
2426 	} while_each_thread(g, p);
2427 	write_unlock(&css_set_lock);
2428 }
2429 
cgroup_iter_start(struct cgroup * cgrp,struct cgroup_iter * it)2430 void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
2431 {
2432 	/*
2433 	 * The first time anyone tries to iterate across a cgroup,
2434 	 * we need to enable the list linking each css_set to its
2435 	 * tasks, and fix up all existing tasks.
2436 	 */
2437 	if (!use_task_css_set_links)
2438 		cgroup_enable_task_cg_lists();
2439 
2440 	read_lock(&css_set_lock);
2441 	it->cg_link = &cgrp->css_sets;
2442 	cgroup_advance_iter(cgrp, it);
2443 }
2444 
cgroup_iter_next(struct cgroup * cgrp,struct cgroup_iter * it)2445 struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
2446 					struct cgroup_iter *it)
2447 {
2448 	struct task_struct *res;
2449 	struct list_head *l = it->task;
2450 	struct cg_cgroup_link *link;
2451 
2452 	/* If the iterator cg is NULL, we have no tasks */
2453 	if (!it->cg_link)
2454 		return NULL;
2455 	res = list_entry(l, struct task_struct, cg_list);
2456 	/* Advance iterator to find next entry */
2457 	l = l->next;
2458 	link = list_entry(it->cg_link, struct cg_cgroup_link, cgrp_link_list);
2459 	if (l == &link->cg->tasks) {
2460 		/* We reached the end of this task list - move on to
2461 		 * the next cg_cgroup_link */
2462 		cgroup_advance_iter(cgrp, it);
2463 	} else {
2464 		it->task = l;
2465 	}
2466 	return res;
2467 }
2468 
cgroup_iter_end(struct cgroup * cgrp,struct cgroup_iter * it)2469 void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
2470 {
2471 	read_unlock(&css_set_lock);
2472 }
2473 
started_after_time(struct task_struct * t1,struct timespec * time,struct task_struct * t2)2474 static inline int started_after_time(struct task_struct *t1,
2475 				     struct timespec *time,
2476 				     struct task_struct *t2)
2477 {
2478 	int start_diff = timespec_compare(&t1->start_time, time);
2479 	if (start_diff > 0) {
2480 		return 1;
2481 	} else if (start_diff < 0) {
2482 		return 0;
2483 	} else {
2484 		/*
2485 		 * Arbitrarily, if two processes started at the same
2486 		 * time, we'll say that the lower pointer value
2487 		 * started first. Note that t2 may have exited by now
2488 		 * so this may not be a valid pointer any longer, but
2489 		 * that's fine - it still serves to distinguish
2490 		 * between two tasks started (effectively) simultaneously.
2491 		 */
2492 		return t1 > t2;
2493 	}
2494 }
2495 
2496 /*
2497  * This function is a callback from heap_insert() and is used to order
2498  * the heap.
2499  * In this case we order the heap in descending task start time.
2500  */
started_after(void * p1,void * p2)2501 static inline int started_after(void *p1, void *p2)
2502 {
2503 	struct task_struct *t1 = p1;
2504 	struct task_struct *t2 = p2;
2505 	return started_after_time(t1, &t2->start_time, t2);
2506 }
2507 
2508 /**
2509  * cgroup_scan_tasks - iterate though all the tasks in a cgroup
2510  * @scan: struct cgroup_scanner containing arguments for the scan
2511  *
2512  * Arguments include pointers to callback functions test_task() and
2513  * process_task().
2514  * Iterate through all the tasks in a cgroup, calling test_task() for each,
2515  * and if it returns true, call process_task() for it also.
2516  * The test_task pointer may be NULL, meaning always true (select all tasks).
2517  * Effectively duplicates cgroup_iter_{start,next,end}()
2518  * but does not lock css_set_lock for the call to process_task().
2519  * The struct cgroup_scanner may be embedded in any structure of the caller's
2520  * creation.
2521  * It is guaranteed that process_task() will act on every task that
2522  * is a member of the cgroup for the duration of this call. This
2523  * function may or may not call process_task() for tasks that exit
2524  * or move to a different cgroup during the call, or are forked or
2525  * move into the cgroup during the call.
2526  *
2527  * Note that test_task() may be called with locks held, and may in some
2528  * situations be called multiple times for the same task, so it should
2529  * be cheap.
2530  * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
2531  * pre-allocated and will be used for heap operations (and its "gt" member will
2532  * be overwritten), else a temporary heap will be used (allocation of which
2533  * may cause this function to fail).
2534  */
cgroup_scan_tasks(struct cgroup_scanner * scan)2535 int cgroup_scan_tasks(struct cgroup_scanner *scan)
2536 {
2537 	int retval, i;
2538 	struct cgroup_iter it;
2539 	struct task_struct *p, *dropped;
2540 	/* Never dereference latest_task, since it's not refcounted */
2541 	struct task_struct *latest_task = NULL;
2542 	struct ptr_heap tmp_heap;
2543 	struct ptr_heap *heap;
2544 	struct timespec latest_time = { 0, 0 };
2545 
2546 	if (scan->heap) {
2547 		/* The caller supplied our heap and pre-allocated its memory */
2548 		heap = scan->heap;
2549 		heap->gt = &started_after;
2550 	} else {
2551 		/* We need to allocate our own heap memory */
2552 		heap = &tmp_heap;
2553 		retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after);
2554 		if (retval)
2555 			/* cannot allocate the heap */
2556 			return retval;
2557 	}
2558 
2559  again:
2560 	/*
2561 	 * Scan tasks in the cgroup, using the scanner's "test_task" callback
2562 	 * to determine which are of interest, and using the scanner's
2563 	 * "process_task" callback to process any of them that need an update.
2564 	 * Since we don't want to hold any locks during the task updates,
2565 	 * gather tasks to be processed in a heap structure.
2566 	 * The heap is sorted by descending task start time.
2567 	 * If the statically-sized heap fills up, we overflow tasks that
2568 	 * started later, and in future iterations only consider tasks that
2569 	 * started after the latest task in the previous pass. This
2570 	 * guarantees forward progress and that we don't miss any tasks.
2571 	 */
2572 	heap->size = 0;
2573 	cgroup_iter_start(scan->cg, &it);
2574 	while ((p = cgroup_iter_next(scan->cg, &it))) {
2575 		/*
2576 		 * Only affect tasks that qualify per the caller's callback,
2577 		 * if he provided one
2578 		 */
2579 		if (scan->test_task && !scan->test_task(p, scan))
2580 			continue;
2581 		/*
2582 		 * Only process tasks that started after the last task
2583 		 * we processed
2584 		 */
2585 		if (!started_after_time(p, &latest_time, latest_task))
2586 			continue;
2587 		dropped = heap_insert(heap, p);
2588 		if (dropped == NULL) {
2589 			/*
2590 			 * The new task was inserted; the heap wasn't
2591 			 * previously full
2592 			 */
2593 			get_task_struct(p);
2594 		} else if (dropped != p) {
2595 			/*
2596 			 * The new task was inserted, and pushed out a
2597 			 * different task
2598 			 */
2599 			get_task_struct(p);
2600 			put_task_struct(dropped);
2601 		}
2602 		/*
2603 		 * Else the new task was newer than anything already in
2604 		 * the heap and wasn't inserted
2605 		 */
2606 	}
2607 	cgroup_iter_end(scan->cg, &it);
2608 
2609 	if (heap->size) {
2610 		for (i = 0; i < heap->size; i++) {
2611 			struct task_struct *q = heap->ptrs[i];
2612 			if (i == 0) {
2613 				latest_time = q->start_time;
2614 				latest_task = q;
2615 			}
2616 			/* Process the task per the caller's callback */
2617 			scan->process_task(q, scan);
2618 			put_task_struct(q);
2619 		}
2620 		/*
2621 		 * If we had to process any tasks at all, scan again
2622 		 * in case some of them were in the middle of forking
2623 		 * children that didn't get processed.
2624 		 * Not the most efficient way to do it, but it avoids
2625 		 * having to take callback_mutex in the fork path
2626 		 */
2627 		goto again;
2628 	}
2629 	if (heap == &tmp_heap)
2630 		heap_free(&tmp_heap);
2631 	return 0;
2632 }
2633 
2634 /*
2635  * Stuff for reading the 'tasks'/'procs' files.
2636  *
2637  * Reading this file can return large amounts of data if a cgroup has
2638  * *lots* of attached tasks. So it may need several calls to read(),
2639  * but we cannot guarantee that the information we produce is correct
2640  * unless we produce it entirely atomically.
2641  *
2642  */
2643 
2644 /*
2645  * The following two functions "fix" the issue where there are more pids
2646  * than kmalloc will give memory for; in such cases, we use vmalloc/vfree.
2647  * TODO: replace with a kernel-wide solution to this problem
2648  */
2649 #define PIDLIST_TOO_LARGE(c) ((c) * sizeof(pid_t) > (PAGE_SIZE * 2))
pidlist_allocate(int count)2650 static void *pidlist_allocate(int count)
2651 {
2652 	if (PIDLIST_TOO_LARGE(count))
2653 		return vmalloc(count * sizeof(pid_t));
2654 	else
2655 		return kmalloc(count * sizeof(pid_t), GFP_KERNEL);
2656 }
pidlist_free(void * p)2657 static void pidlist_free(void *p)
2658 {
2659 	if (is_vmalloc_addr(p))
2660 		vfree(p);
2661 	else
2662 		kfree(p);
2663 }
pidlist_resize(void * p,int newcount)2664 static void *pidlist_resize(void *p, int newcount)
2665 {
2666 	void *newlist;
2667 	/* note: if new alloc fails, old p will still be valid either way */
2668 	if (is_vmalloc_addr(p)) {
2669 		newlist = vmalloc(newcount * sizeof(pid_t));
2670 		if (!newlist)
2671 			return NULL;
2672 		memcpy(newlist, p, newcount * sizeof(pid_t));
2673 		vfree(p);
2674 	} else {
2675 		newlist = krealloc(p, newcount * sizeof(pid_t), GFP_KERNEL);
2676 	}
2677 	return newlist;
2678 }
2679 
2680 /*
2681  * pidlist_uniq - given a kmalloc()ed list, strip out all duplicate entries
2682  * If the new stripped list is sufficiently smaller and there's enough memory
2683  * to allocate a new buffer, will let go of the unneeded memory. Returns the
2684  * number of unique elements.
2685  */
2686 /* is the size difference enough that we should re-allocate the array? */
2687 #define PIDLIST_REALLOC_DIFFERENCE(old, new) ((old) - PAGE_SIZE >= (new))
pidlist_uniq(pid_t ** p,int length)2688 static int pidlist_uniq(pid_t **p, int length)
2689 {
2690 	int src, dest = 1;
2691 	pid_t *list = *p;
2692 	pid_t *newlist;
2693 
2694 	/*
2695 	 * we presume the 0th element is unique, so i starts at 1. trivial
2696 	 * edge cases first; no work needs to be done for either
2697 	 */
2698 	if (length == 0 || length == 1)
2699 		return length;
2700 	/* src and dest walk down the list; dest counts unique elements */
2701 	for (src = 1; src < length; src++) {
2702 		/* find next unique element */
2703 		while (list[src] == list[src-1]) {
2704 			src++;
2705 			if (src == length)
2706 				goto after;
2707 		}
2708 		/* dest always points to where the next unique element goes */
2709 		list[dest] = list[src];
2710 		dest++;
2711 	}
2712 after:
2713 	/*
2714 	 * if the length difference is large enough, we want to allocate a
2715 	 * smaller buffer to save memory. if this fails due to out of memory,
2716 	 * we'll just stay with what we've got.
2717 	 */
2718 	if (PIDLIST_REALLOC_DIFFERENCE(length, dest)) {
2719 		newlist = pidlist_resize(list, dest);
2720 		if (newlist)
2721 			*p = newlist;
2722 	}
2723 	return dest;
2724 }
2725 
cmppid(const void * a,const void * b)2726 static int cmppid(const void *a, const void *b)
2727 {
2728 	return *(pid_t *)a - *(pid_t *)b;
2729 }
2730 
2731 /*
2732  * find the appropriate pidlist for our purpose (given procs vs tasks)
2733  * returns with the lock on that pidlist already held, and takes care
2734  * of the use count, or returns NULL with no locks held if we're out of
2735  * memory.
2736  */
cgroup_pidlist_find(struct cgroup * cgrp,enum cgroup_filetype type)2737 static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
2738 						  enum cgroup_filetype type)
2739 {
2740 	struct cgroup_pidlist *l;
2741 	/* don't need task_nsproxy() if we're looking at ourself */
2742 	struct pid_namespace *ns = current->nsproxy->pid_ns;
2743 
2744 	/*
2745 	 * We can't drop the pidlist_mutex before taking the l->mutex in case
2746 	 * the last ref-holder is trying to remove l from the list at the same
2747 	 * time. Holding the pidlist_mutex precludes somebody taking whichever
2748 	 * list we find out from under us - compare release_pid_array().
2749 	 */
2750 	mutex_lock(&cgrp->pidlist_mutex);
2751 	list_for_each_entry(l, &cgrp->pidlists, links) {
2752 		if (l->key.type == type && l->key.ns == ns) {
2753 			/* make sure l doesn't vanish out from under us */
2754 			down_write(&l->mutex);
2755 			mutex_unlock(&cgrp->pidlist_mutex);
2756 			return l;
2757 		}
2758 	}
2759 	/* entry not found; create a new one */
2760 	l = kmalloc(sizeof(struct cgroup_pidlist), GFP_KERNEL);
2761 	if (!l) {
2762 		mutex_unlock(&cgrp->pidlist_mutex);
2763 		return l;
2764 	}
2765 	init_rwsem(&l->mutex);
2766 	down_write(&l->mutex);
2767 	l->key.type = type;
2768 	l->key.ns = get_pid_ns(ns);
2769 	l->use_count = 0; /* don't increment here */
2770 	l->list = NULL;
2771 	l->owner = cgrp;
2772 	list_add(&l->links, &cgrp->pidlists);
2773 	mutex_unlock(&cgrp->pidlist_mutex);
2774 	return l;
2775 }
2776 
2777 /*
2778  * Load a cgroup's pidarray with either procs' tgids or tasks' pids
2779  */
pidlist_array_load(struct cgroup * cgrp,enum cgroup_filetype type,struct cgroup_pidlist ** lp)2780 static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
2781 			      struct cgroup_pidlist **lp)
2782 {
2783 	pid_t *array;
2784 	int length;
2785 	int pid, n = 0; /* used for populating the array */
2786 	struct cgroup_iter it;
2787 	struct task_struct *tsk;
2788 	struct cgroup_pidlist *l;
2789 
2790 	/*
2791 	 * If cgroup gets more users after we read count, we won't have
2792 	 * enough space - tough.  This race is indistinguishable to the
2793 	 * caller from the case that the additional cgroup users didn't
2794 	 * show up until sometime later on.
2795 	 */
2796 	length = cgroup_task_count(cgrp);
2797 	array = pidlist_allocate(length);
2798 	if (!array)
2799 		return -ENOMEM;
2800 	/* now, populate the array */
2801 	cgroup_iter_start(cgrp, &it);
2802 	while ((tsk = cgroup_iter_next(cgrp, &it))) {
2803 		if (unlikely(n == length))
2804 			break;
2805 		/* get tgid or pid for procs or tasks file respectively */
2806 		if (type == CGROUP_FILE_PROCS)
2807 			pid = task_tgid_vnr(tsk);
2808 		else
2809 			pid = task_pid_vnr(tsk);
2810 		if (pid > 0) /* make sure to only use valid results */
2811 			array[n++] = pid;
2812 	}
2813 	cgroup_iter_end(cgrp, &it);
2814 	length = n;
2815 	/* now sort & (if procs) strip out duplicates */
2816 	sort(array, length, sizeof(pid_t), cmppid, NULL);
2817 	if (type == CGROUP_FILE_PROCS)
2818 		length = pidlist_uniq(&array, length);
2819 	l = cgroup_pidlist_find(cgrp, type);
2820 	if (!l) {
2821 		pidlist_free(array);
2822 		return -ENOMEM;
2823 	}
2824 	/* store array, freeing old if necessary - lock already held */
2825 	pidlist_free(l->list);
2826 	l->list = array;
2827 	l->length = length;
2828 	l->use_count++;
2829 	up_write(&l->mutex);
2830 	*lp = l;
2831 	return 0;
2832 }
2833 
2834 /**
2835  * cgroupstats_build - build and fill cgroupstats
2836  * @stats: cgroupstats to fill information into
2837  * @dentry: A dentry entry belonging to the cgroup for which stats have
2838  * been requested.
2839  *
2840  * Build and fill cgroupstats so that taskstats can export it to user
2841  * space.
2842  */
cgroupstats_build(struct cgroupstats * stats,struct dentry * dentry)2843 int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
2844 {
2845 	int ret = -EINVAL;
2846 	struct cgroup *cgrp;
2847 	struct cgroup_iter it;
2848 	struct task_struct *tsk;
2849 
2850 	/*
2851 	 * Validate dentry by checking the superblock operations,
2852 	 * and make sure it's a directory.
2853 	 */
2854 	if (dentry->d_sb->s_op != &cgroup_ops ||
2855 	    !S_ISDIR(dentry->d_inode->i_mode))
2856 		 goto err;
2857 
2858 	ret = 0;
2859 	cgrp = dentry->d_fsdata;
2860 
2861 	cgroup_iter_start(cgrp, &it);
2862 	while ((tsk = cgroup_iter_next(cgrp, &it))) {
2863 		switch (tsk->state) {
2864 		case TASK_RUNNING:
2865 			stats->nr_running++;
2866 			break;
2867 		case TASK_INTERRUPTIBLE:
2868 			stats->nr_sleeping++;
2869 			break;
2870 		case TASK_UNINTERRUPTIBLE:
2871 			stats->nr_uninterruptible++;
2872 			break;
2873 		case TASK_STOPPED:
2874 			stats->nr_stopped++;
2875 			break;
2876 		default:
2877 			if (delayacct_is_task_waiting_on_io(tsk))
2878 				stats->nr_io_wait++;
2879 			break;
2880 		}
2881 	}
2882 	cgroup_iter_end(cgrp, &it);
2883 
2884 err:
2885 	return ret;
2886 }
2887 
2888 
2889 /*
2890  * seq_file methods for the tasks/procs files. The seq_file position is the
2891  * next pid to display; the seq_file iterator is a pointer to the pid
2892  * in the cgroup->l->list array.
2893  */
2894 
cgroup_pidlist_start(struct seq_file * s,loff_t * pos)2895 static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
2896 {
2897 	/*
2898 	 * Initially we receive a position value that corresponds to
2899 	 * one more than the last pid shown (or 0 on the first call or
2900 	 * after a seek to the start). Use a binary-search to find the
2901 	 * next pid to display, if any
2902 	 */
2903 	struct cgroup_pidlist *l = s->private;
2904 	int index = 0, pid = *pos;
2905 	int *iter;
2906 
2907 	down_read(&l->mutex);
2908 	if (pid) {
2909 		int end = l->length;
2910 
2911 		while (index < end) {
2912 			int mid = (index + end) / 2;
2913 			if (l->list[mid] == pid) {
2914 				index = mid;
2915 				break;
2916 			} else if (l->list[mid] <= pid)
2917 				index = mid + 1;
2918 			else
2919 				end = mid;
2920 		}
2921 	}
2922 	/* If we're off the end of the array, we're done */
2923 	if (index >= l->length)
2924 		return NULL;
2925 	/* Update the abstract position to be the actual pid that we found */
2926 	iter = l->list + index;
2927 	*pos = *iter;
2928 	return iter;
2929 }
2930 
cgroup_pidlist_stop(struct seq_file * s,void * v)2931 static void cgroup_pidlist_stop(struct seq_file *s, void *v)
2932 {
2933 	struct cgroup_pidlist *l = s->private;
2934 	up_read(&l->mutex);
2935 }
2936 
cgroup_pidlist_next(struct seq_file * s,void * v,loff_t * pos)2937 static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
2938 {
2939 	struct cgroup_pidlist *l = s->private;
2940 	pid_t *p = v;
2941 	pid_t *end = l->list + l->length;
2942 	/*
2943 	 * Advance to the next pid in the array. If this goes off the
2944 	 * end, we're done
2945 	 */
2946 	p++;
2947 	if (p >= end) {
2948 		return NULL;
2949 	} else {
2950 		*pos = *p;
2951 		return p;
2952 	}
2953 }
2954 
cgroup_pidlist_show(struct seq_file * s,void * v)2955 static int cgroup_pidlist_show(struct seq_file *s, void *v)
2956 {
2957 	return seq_printf(s, "%d\n", *(int *)v);
2958 }
2959 
2960 /*
2961  * seq_operations functions for iterating on pidlists through seq_file -
2962  * independent of whether it's tasks or procs
2963  */
2964 static const struct seq_operations cgroup_pidlist_seq_operations = {
2965 	.start = cgroup_pidlist_start,
2966 	.stop = cgroup_pidlist_stop,
2967 	.next = cgroup_pidlist_next,
2968 	.show = cgroup_pidlist_show,
2969 };
2970 
cgroup_release_pid_array(struct cgroup_pidlist * l)2971 static void cgroup_release_pid_array(struct cgroup_pidlist *l)
2972 {
2973 	/*
2974 	 * the case where we're the last user of this particular pidlist will
2975 	 * have us remove it from the cgroup's list, which entails taking the
2976 	 * mutex. since in pidlist_find the pidlist->lock depends on cgroup->
2977 	 * pidlist_mutex, we have to take pidlist_mutex first.
2978 	 */
2979 	mutex_lock(&l->owner->pidlist_mutex);
2980 	down_write(&l->mutex);
2981 	BUG_ON(!l->use_count);
2982 	if (!--l->use_count) {
2983 		/* we're the last user if refcount is 0; remove and free */
2984 		list_del(&l->links);
2985 		mutex_unlock(&l->owner->pidlist_mutex);
2986 		pidlist_free(l->list);
2987 		put_pid_ns(l->key.ns);
2988 		up_write(&l->mutex);
2989 		kfree(l);
2990 		return;
2991 	}
2992 	mutex_unlock(&l->owner->pidlist_mutex);
2993 	up_write(&l->mutex);
2994 }
2995 
cgroup_pidlist_release(struct inode * inode,struct file * file)2996 static int cgroup_pidlist_release(struct inode *inode, struct file *file)
2997 {
2998 	struct cgroup_pidlist *l;
2999 	if (!(file->f_mode & FMODE_READ))
3000 		return 0;
3001 	/*
3002 	 * the seq_file will only be initialized if the file was opened for
3003 	 * reading; hence we check if it's not null only in that case.
3004 	 */
3005 	l = ((struct seq_file *)file->private_data)->private;
3006 	cgroup_release_pid_array(l);
3007 	return seq_release(inode, file);
3008 }
3009 
3010 static const struct file_operations cgroup_pidlist_operations = {
3011 	.read = seq_read,
3012 	.llseek = seq_lseek,
3013 	.write = cgroup_file_write,
3014 	.release = cgroup_pidlist_release,
3015 };
3016 
3017 /*
3018  * The following functions handle opens on a file that displays a pidlist
3019  * (tasks or procs). Prepare an array of the process/thread IDs of whoever's
3020  * in the cgroup.
3021  */
3022 /* helper function for the two below it */
cgroup_pidlist_open(struct file * file,enum cgroup_filetype type)3023 static int cgroup_pidlist_open(struct file *file, enum cgroup_filetype type)
3024 {
3025 	struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
3026 	struct cgroup_pidlist *l;
3027 	int retval;
3028 
3029 	/* Nothing to do for write-only files */
3030 	if (!(file->f_mode & FMODE_READ))
3031 		return 0;
3032 
3033 	/* have the array populated */
3034 	retval = pidlist_array_load(cgrp, type, &l);
3035 	if (retval)
3036 		return retval;
3037 	/* configure file information */
3038 	file->f_op = &cgroup_pidlist_operations;
3039 
3040 	retval = seq_open(file, &cgroup_pidlist_seq_operations);
3041 	if (retval) {
3042 		cgroup_release_pid_array(l);
3043 		return retval;
3044 	}
3045 	((struct seq_file *)file->private_data)->private = l;
3046 	return 0;
3047 }
cgroup_tasks_open(struct inode * unused,struct file * file)3048 static int cgroup_tasks_open(struct inode *unused, struct file *file)
3049 {
3050 	return cgroup_pidlist_open(file, CGROUP_FILE_TASKS);
3051 }
cgroup_procs_open(struct inode * unused,struct file * file)3052 static int cgroup_procs_open(struct inode *unused, struct file *file)
3053 {
3054 	return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
3055 }
3056 
cgroup_read_notify_on_release(struct cgroup * cgrp,struct cftype * cft)3057 static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
3058 					    struct cftype *cft)
3059 {
3060 	return notify_on_release(cgrp);
3061 }
3062 
cgroup_write_notify_on_release(struct cgroup * cgrp,struct cftype * cft,u64 val)3063 static int cgroup_write_notify_on_release(struct cgroup *cgrp,
3064 					  struct cftype *cft,
3065 					  u64 val)
3066 {
3067 	clear_bit(CGRP_RELEASABLE, &cgrp->flags);
3068 	if (val)
3069 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3070 	else
3071 		clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3072 	return 0;
3073 }
3074 
3075 /*
3076  * Unregister event and free resources.
3077  *
3078  * Gets called from workqueue.
3079  */
cgroup_event_remove(struct work_struct * work)3080 static void cgroup_event_remove(struct work_struct *work)
3081 {
3082 	struct cgroup_event *event = container_of(work, struct cgroup_event,
3083 			remove);
3084 	struct cgroup *cgrp = event->cgrp;
3085 
3086 	event->cft->unregister_event(cgrp, event->cft, event->eventfd);
3087 
3088 	eventfd_ctx_put(event->eventfd);
3089 	kfree(event);
3090 	dput(cgrp->dentry);
3091 }
3092 
3093 /*
3094  * Gets called on POLLHUP on eventfd when user closes it.
3095  *
3096  * Called with wqh->lock held and interrupts disabled.
3097  */
cgroup_event_wake(wait_queue_t * wait,unsigned mode,int sync,void * key)3098 static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
3099 		int sync, void *key)
3100 {
3101 	struct cgroup_event *event = container_of(wait,
3102 			struct cgroup_event, wait);
3103 	struct cgroup *cgrp = event->cgrp;
3104 	unsigned long flags = (unsigned long)key;
3105 
3106 	if (flags & POLLHUP) {
3107 		__remove_wait_queue(event->wqh, &event->wait);
3108 		spin_lock(&cgrp->event_list_lock);
3109 		list_del(&event->list);
3110 		spin_unlock(&cgrp->event_list_lock);
3111 		/*
3112 		 * We are in atomic context, but cgroup_event_remove() may
3113 		 * sleep, so we have to call it in workqueue.
3114 		 */
3115 		schedule_work(&event->remove);
3116 	}
3117 
3118 	return 0;
3119 }
3120 
cgroup_event_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)3121 static void cgroup_event_ptable_queue_proc(struct file *file,
3122 		wait_queue_head_t *wqh, poll_table *pt)
3123 {
3124 	struct cgroup_event *event = container_of(pt,
3125 			struct cgroup_event, pt);
3126 
3127 	event->wqh = wqh;
3128 	add_wait_queue(wqh, &event->wait);
3129 }
3130 
3131 /*
3132  * Parse input and register new cgroup event handler.
3133  *
3134  * Input must be in format '<event_fd> <control_fd> <args>'.
3135  * Interpretation of args is defined by control file implementation.
3136  */
cgroup_write_event_control(struct cgroup * cgrp,struct cftype * cft,const char * buffer)3137 static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
3138 				      const char *buffer)
3139 {
3140 	struct cgroup_event *event = NULL;
3141 	unsigned int efd, cfd;
3142 	struct file *efile = NULL;
3143 	struct file *cfile = NULL;
3144 	char *endp;
3145 	int ret;
3146 
3147 	efd = simple_strtoul(buffer, &endp, 10);
3148 	if (*endp != ' ')
3149 		return -EINVAL;
3150 	buffer = endp + 1;
3151 
3152 	cfd = simple_strtoul(buffer, &endp, 10);
3153 	if ((*endp != ' ') && (*endp != '\0'))
3154 		return -EINVAL;
3155 	buffer = endp + 1;
3156 
3157 	event = kzalloc(sizeof(*event), GFP_KERNEL);
3158 	if (!event)
3159 		return -ENOMEM;
3160 	event->cgrp = cgrp;
3161 	INIT_LIST_HEAD(&event->list);
3162 	init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
3163 	init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
3164 	INIT_WORK(&event->remove, cgroup_event_remove);
3165 
3166 	efile = eventfd_fget(efd);
3167 	if (IS_ERR(efile)) {
3168 		ret = PTR_ERR(efile);
3169 		goto fail;
3170 	}
3171 
3172 	event->eventfd = eventfd_ctx_fileget(efile);
3173 	if (IS_ERR(event->eventfd)) {
3174 		ret = PTR_ERR(event->eventfd);
3175 		goto fail;
3176 	}
3177 
3178 	cfile = fget(cfd);
3179 	if (!cfile) {
3180 		ret = -EBADF;
3181 		goto fail;
3182 	}
3183 
3184 	/* the process need read permission on control file */
3185 	ret = file_permission(cfile, MAY_READ);
3186 	if (ret < 0)
3187 		goto fail;
3188 
3189 	event->cft = __file_cft(cfile);
3190 	if (IS_ERR(event->cft)) {
3191 		ret = PTR_ERR(event->cft);
3192 		goto fail;
3193 	}
3194 
3195 	if (!event->cft->register_event || !event->cft->unregister_event) {
3196 		ret = -EINVAL;
3197 		goto fail;
3198 	}
3199 
3200 	ret = event->cft->register_event(cgrp, event->cft,
3201 			event->eventfd, buffer);
3202 	if (ret)
3203 		goto fail;
3204 
3205 	if (efile->f_op->poll(efile, &event->pt) & POLLHUP) {
3206 		event->cft->unregister_event(cgrp, event->cft, event->eventfd);
3207 		ret = 0;
3208 		goto fail;
3209 	}
3210 
3211 	/*
3212 	 * Events should be removed after rmdir of cgroup directory, but before
3213 	 * destroying subsystem state objects. Let's take reference to cgroup
3214 	 * directory dentry to do that.
3215 	 */
3216 	dget(cgrp->dentry);
3217 
3218 	spin_lock(&cgrp->event_list_lock);
3219 	list_add(&event->list, &cgrp->event_list);
3220 	spin_unlock(&cgrp->event_list_lock);
3221 
3222 	fput(cfile);
3223 	fput(efile);
3224 
3225 	return 0;
3226 
3227 fail:
3228 	if (cfile)
3229 		fput(cfile);
3230 
3231 	if (event && event->eventfd && !IS_ERR(event->eventfd))
3232 		eventfd_ctx_put(event->eventfd);
3233 
3234 	if (!IS_ERR_OR_NULL(efile))
3235 		fput(efile);
3236 
3237 	kfree(event);
3238 
3239 	return ret;
3240 }
3241 
cgroup_clone_children_read(struct cgroup * cgrp,struct cftype * cft)3242 static u64 cgroup_clone_children_read(struct cgroup *cgrp,
3243 				    struct cftype *cft)
3244 {
3245 	return clone_children(cgrp);
3246 }
3247 
cgroup_clone_children_write(struct cgroup * cgrp,struct cftype * cft,u64 val)3248 static int cgroup_clone_children_write(struct cgroup *cgrp,
3249 				     struct cftype *cft,
3250 				     u64 val)
3251 {
3252 	if (val)
3253 		set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3254 	else
3255 		clear_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3256 	return 0;
3257 }
3258 
3259 /*
3260  * for the common functions, 'private' gives the type of file
3261  */
3262 /* for hysterical raisins, we can't put this on the older files */
3263 #define CGROUP_FILE_GENERIC_PREFIX "cgroup."
3264 static struct cftype files[] = {
3265 	{
3266 		.name = "tasks",
3267 		.open = cgroup_tasks_open,
3268 		.write_u64 = cgroup_tasks_write,
3269 		.release = cgroup_pidlist_release,
3270 		.mode = S_IRUGO | S_IWUSR,
3271 	},
3272 	{
3273 		.name = CGROUP_FILE_GENERIC_PREFIX "procs",
3274 		.open = cgroup_procs_open,
3275 		/* .write_u64 = cgroup_procs_write, TODO */
3276 		.release = cgroup_pidlist_release,
3277 		.mode = S_IRUGO,
3278 	},
3279 	{
3280 		.name = "notify_on_release",
3281 		.read_u64 = cgroup_read_notify_on_release,
3282 		.write_u64 = cgroup_write_notify_on_release,
3283 	},
3284 	{
3285 		.name = CGROUP_FILE_GENERIC_PREFIX "event_control",
3286 		.write_string = cgroup_write_event_control,
3287 		.mode = S_IWUGO,
3288 	},
3289 	{
3290 		.name = "cgroup.clone_children",
3291 		.read_u64 = cgroup_clone_children_read,
3292 		.write_u64 = cgroup_clone_children_write,
3293 	},
3294 };
3295 
3296 static struct cftype cft_release_agent = {
3297 	.name = "release_agent",
3298 	.read_seq_string = cgroup_release_agent_show,
3299 	.write_string = cgroup_release_agent_write,
3300 	.max_write_len = PATH_MAX,
3301 };
3302 
cgroup_populate_dir(struct cgroup * cgrp)3303 static int cgroup_populate_dir(struct cgroup *cgrp)
3304 {
3305 	int err;
3306 	struct cgroup_subsys *ss;
3307 
3308 	/* First clear out any existing files */
3309 	cgroup_clear_directory(cgrp->dentry);
3310 
3311 	err = cgroup_add_files(cgrp, NULL, files, ARRAY_SIZE(files));
3312 	if (err < 0)
3313 		return err;
3314 
3315 	if (cgrp == cgrp->top_cgroup) {
3316 		if ((err = cgroup_add_file(cgrp, NULL, &cft_release_agent)) < 0)
3317 			return err;
3318 	}
3319 
3320 	for_each_subsys(cgrp->root, ss) {
3321 		if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
3322 			return err;
3323 	}
3324 	/* This cgroup is ready now */
3325 	for_each_subsys(cgrp->root, ss) {
3326 		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
3327 		/*
3328 		 * Update id->css pointer and make this css visible from
3329 		 * CSS ID functions. This pointer will be dereferened
3330 		 * from RCU-read-side without locks.
3331 		 */
3332 		if (css->id)
3333 			rcu_assign_pointer(css->id->css, css);
3334 	}
3335 
3336 	return 0;
3337 }
3338 
init_cgroup_css(struct cgroup_subsys_state * css,struct cgroup_subsys * ss,struct cgroup * cgrp)3339 static void init_cgroup_css(struct cgroup_subsys_state *css,
3340 			       struct cgroup_subsys *ss,
3341 			       struct cgroup *cgrp)
3342 {
3343 	css->cgroup = cgrp;
3344 	atomic_set(&css->refcnt, 1);
3345 	css->flags = 0;
3346 	css->id = NULL;
3347 	if (cgrp == dummytop)
3348 		set_bit(CSS_ROOT, &css->flags);
3349 	BUG_ON(cgrp->subsys[ss->subsys_id]);
3350 	cgrp->subsys[ss->subsys_id] = css;
3351 }
3352 
cgroup_lock_hierarchy(struct cgroupfs_root * root)3353 static void cgroup_lock_hierarchy(struct cgroupfs_root *root)
3354 {
3355 	/* We need to take each hierarchy_mutex in a consistent order */
3356 	int i;
3357 
3358 	/*
3359 	 * No worry about a race with rebind_subsystems that might mess up the
3360 	 * locking order, since both parties are under cgroup_mutex.
3361 	 */
3362 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3363 		struct cgroup_subsys *ss = subsys[i];
3364 		if (ss == NULL)
3365 			continue;
3366 		if (ss->root == root)
3367 			mutex_lock(&ss->hierarchy_mutex);
3368 	}
3369 }
3370 
cgroup_unlock_hierarchy(struct cgroupfs_root * root)3371 static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
3372 {
3373 	int i;
3374 
3375 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3376 		struct cgroup_subsys *ss = subsys[i];
3377 		if (ss == NULL)
3378 			continue;
3379 		if (ss->root == root)
3380 			mutex_unlock(&ss->hierarchy_mutex);
3381 	}
3382 }
3383 
3384 /*
3385  * cgroup_create - create a cgroup
3386  * @parent: cgroup that will be parent of the new cgroup
3387  * @dentry: dentry of the new cgroup
3388  * @mode: mode to set on new inode
3389  *
3390  * Must be called with the mutex on the parent inode held
3391  */
cgroup_create(struct cgroup * parent,struct dentry * dentry,mode_t mode)3392 static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
3393 			     mode_t mode)
3394 {
3395 	struct cgroup *cgrp;
3396 	struct cgroupfs_root *root = parent->root;
3397 	int err = 0;
3398 	struct cgroup_subsys *ss;
3399 	struct super_block *sb = root->sb;
3400 
3401 	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
3402 	if (!cgrp)
3403 		return -ENOMEM;
3404 
3405 	/* Grab a reference on the superblock so the hierarchy doesn't
3406 	 * get deleted on unmount if there are child cgroups.  This
3407 	 * can be done outside cgroup_mutex, since the sb can't
3408 	 * disappear while someone has an open control file on the
3409 	 * fs */
3410 	atomic_inc(&sb->s_active);
3411 
3412 	mutex_lock(&cgroup_mutex);
3413 
3414 	init_cgroup_housekeeping(cgrp);
3415 
3416 	cgrp->parent = parent;
3417 	cgrp->root = parent->root;
3418 	cgrp->top_cgroup = parent->top_cgroup;
3419 
3420 	if (notify_on_release(parent))
3421 		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
3422 
3423 	if (clone_children(parent))
3424 		set_bit(CGRP_CLONE_CHILDREN, &cgrp->flags);
3425 
3426 	for_each_subsys(root, ss) {
3427 		struct cgroup_subsys_state *css = ss->create(ss, cgrp);
3428 
3429 		if (IS_ERR(css)) {
3430 			err = PTR_ERR(css);
3431 			goto err_destroy;
3432 		}
3433 		init_cgroup_css(css, ss, cgrp);
3434 		if (ss->use_id) {
3435 			err = alloc_css_id(ss, parent, cgrp);
3436 			if (err)
3437 				goto err_destroy;
3438 		}
3439 		/* At error, ->destroy() callback has to free assigned ID. */
3440 		if (clone_children(parent) && ss->post_clone)
3441 			ss->post_clone(ss, cgrp);
3442 	}
3443 
3444 	cgroup_lock_hierarchy(root);
3445 	list_add(&cgrp->sibling, &cgrp->parent->children);
3446 	cgroup_unlock_hierarchy(root);
3447 	root->number_of_cgroups++;
3448 
3449 	err = cgroup_create_dir(cgrp, dentry, mode);
3450 	if (err < 0)
3451 		goto err_remove;
3452 
3453 	/* The cgroup directory was pre-locked for us */
3454 	BUG_ON(!mutex_is_locked(&cgrp->dentry->d_inode->i_mutex));
3455 
3456 	err = cgroup_populate_dir(cgrp);
3457 	/* If err < 0, we have a half-filled directory - oh well ;) */
3458 
3459 	mutex_unlock(&cgroup_mutex);
3460 	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
3461 
3462 	return 0;
3463 
3464  err_remove:
3465 
3466 	cgroup_lock_hierarchy(root);
3467 	list_del(&cgrp->sibling);
3468 	cgroup_unlock_hierarchy(root);
3469 	root->number_of_cgroups--;
3470 
3471  err_destroy:
3472 
3473 	for_each_subsys(root, ss) {
3474 		if (cgrp->subsys[ss->subsys_id])
3475 			ss->destroy(ss, cgrp);
3476 	}
3477 
3478 	mutex_unlock(&cgroup_mutex);
3479 
3480 	/* Release the reference count that we took on the superblock */
3481 	deactivate_super(sb);
3482 
3483 	kfree(cgrp);
3484 	return err;
3485 }
3486 
cgroup_mkdir(struct inode * dir,struct dentry * dentry,int mode)3487 static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3488 {
3489 	struct cgroup *c_parent = dentry->d_parent->d_fsdata;
3490 
3491 	/* the vfs holds inode->i_mutex already */
3492 	return cgroup_create(c_parent, dentry, mode | S_IFDIR);
3493 }
3494 
cgroup_has_css_refs(struct cgroup * cgrp)3495 static int cgroup_has_css_refs(struct cgroup *cgrp)
3496 {
3497 	/* Check the reference count on each subsystem. Since we
3498 	 * already established that there are no tasks in the
3499 	 * cgroup, if the css refcount is also 1, then there should
3500 	 * be no outstanding references, so the subsystem is safe to
3501 	 * destroy. We scan across all subsystems rather than using
3502 	 * the per-hierarchy linked list of mounted subsystems since
3503 	 * we can be called via check_for_release() with no
3504 	 * synchronization other than RCU, and the subsystem linked
3505 	 * list isn't RCU-safe */
3506 	int i;
3507 	/*
3508 	 * We won't need to lock the subsys array, because the subsystems
3509 	 * we're concerned about aren't going anywhere since our cgroup root
3510 	 * has a reference on them.
3511 	 */
3512 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
3513 		struct cgroup_subsys *ss = subsys[i];
3514 		struct cgroup_subsys_state *css;
3515 		/* Skip subsystems not present or not in this hierarchy */
3516 		if (ss == NULL || ss->root != cgrp->root)
3517 			continue;
3518 		css = cgrp->subsys[ss->subsys_id];
3519 		/* When called from check_for_release() it's possible
3520 		 * that by this point the cgroup has been removed
3521 		 * and the css deleted. But a false-positive doesn't
3522 		 * matter, since it can only happen if the cgroup
3523 		 * has been deleted and hence no longer needs the
3524 		 * release agent to be called anyway. */
3525 		if (css && (atomic_read(&css->refcnt) > 1))
3526 			return 1;
3527 	}
3528 	return 0;
3529 }
3530 
3531 /*
3532  * Atomically mark all (or else none) of the cgroup's CSS objects as
3533  * CSS_REMOVED. Return true on success, or false if the cgroup has
3534  * busy subsystems. Call with cgroup_mutex held
3535  */
3536 
cgroup_clear_css_refs(struct cgroup * cgrp)3537 static int cgroup_clear_css_refs(struct cgroup *cgrp)
3538 {
3539 	struct cgroup_subsys *ss;
3540 	unsigned long flags;
3541 	bool failed = false;
3542 	local_irq_save(flags);
3543 	for_each_subsys(cgrp->root, ss) {
3544 		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
3545 		int refcnt;
3546 		while (1) {
3547 			/* We can only remove a CSS with a refcnt==1 */
3548 			refcnt = atomic_read(&css->refcnt);
3549 			if (refcnt > 1) {
3550 				failed = true;
3551 				goto done;
3552 			}
3553 			BUG_ON(!refcnt);
3554 			/*
3555 			 * Drop the refcnt to 0 while we check other
3556 			 * subsystems. This will cause any racing
3557 			 * css_tryget() to spin until we set the
3558 			 * CSS_REMOVED bits or abort
3559 			 */
3560 			if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
3561 				break;
3562 			cpu_relax();
3563 		}
3564 	}
3565  done:
3566 	for_each_subsys(cgrp->root, ss) {
3567 		struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
3568 		if (failed) {
3569 			/*
3570 			 * Restore old refcnt if we previously managed
3571 			 * to clear it from 1 to 0
3572 			 */
3573 			if (!atomic_read(&css->refcnt))
3574 				atomic_set(&css->refcnt, 1);
3575 		} else {
3576 			/* Commit the fact that the CSS is removed */
3577 			set_bit(CSS_REMOVED, &css->flags);
3578 		}
3579 	}
3580 	local_irq_restore(flags);
3581 	return !failed;
3582 }
3583 
cgroup_rmdir(struct inode * unused_dir,struct dentry * dentry)3584 static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
3585 {
3586 	struct cgroup *cgrp = dentry->d_fsdata;
3587 	struct dentry *d;
3588 	struct cgroup *parent;
3589 	DEFINE_WAIT(wait);
3590 	struct cgroup_event *event, *tmp;
3591 	int ret;
3592 
3593 	/* the vfs holds both inode->i_mutex already */
3594 again:
3595 	mutex_lock(&cgroup_mutex);
3596 	if (atomic_read(&cgrp->count) != 0) {
3597 		mutex_unlock(&cgroup_mutex);
3598 		return -EBUSY;
3599 	}
3600 	if (!list_empty(&cgrp->children)) {
3601 		mutex_unlock(&cgroup_mutex);
3602 		return -EBUSY;
3603 	}
3604 	mutex_unlock(&cgroup_mutex);
3605 
3606 	/*
3607 	 * In general, subsystem has no css->refcnt after pre_destroy(). But
3608 	 * in racy cases, subsystem may have to get css->refcnt after
3609 	 * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
3610 	 * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
3611 	 * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
3612 	 * and subsystem's reference count handling. Please see css_get/put
3613 	 * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
3614 	 */
3615 	set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3616 
3617 	/*
3618 	 * Call pre_destroy handlers of subsys. Notify subsystems
3619 	 * that rmdir() request comes.
3620 	 */
3621 	ret = cgroup_call_pre_destroy(cgrp);
3622 	if (ret) {
3623 		clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3624 		return ret;
3625 	}
3626 
3627 	mutex_lock(&cgroup_mutex);
3628 	parent = cgrp->parent;
3629 	if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
3630 		clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3631 		mutex_unlock(&cgroup_mutex);
3632 		return -EBUSY;
3633 	}
3634 	prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
3635 	if (!cgroup_clear_css_refs(cgrp)) {
3636 		mutex_unlock(&cgroup_mutex);
3637 		/*
3638 		 * Because someone may call cgroup_wakeup_rmdir_waiter() before
3639 		 * prepare_to_wait(), we need to check this flag.
3640 		 */
3641 		if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
3642 			schedule();
3643 		finish_wait(&cgroup_rmdir_waitq, &wait);
3644 		clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3645 		if (signal_pending(current))
3646 			return -EINTR;
3647 		goto again;
3648 	}
3649 	/* NO css_tryget() can success after here. */
3650 	finish_wait(&cgroup_rmdir_waitq, &wait);
3651 	clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
3652 
3653 	spin_lock(&release_list_lock);
3654 	set_bit(CGRP_REMOVED, &cgrp->flags);
3655 	if (!list_empty(&cgrp->release_list))
3656 		list_del_init(&cgrp->release_list);
3657 	spin_unlock(&release_list_lock);
3658 
3659 	cgroup_lock_hierarchy(cgrp->root);
3660 	/* delete this cgroup from parent->children */
3661 	list_del_init(&cgrp->sibling);
3662 	cgroup_unlock_hierarchy(cgrp->root);
3663 
3664 	d = dget(cgrp->dentry);
3665 
3666 	cgroup_d_remove_dir(d);
3667 	dput(d);
3668 
3669 	set_bit(CGRP_RELEASABLE, &parent->flags);
3670 	check_for_release(parent);
3671 
3672 	/*
3673 	 * Unregister events and notify userspace.
3674 	 * Notify userspace about cgroup removing only after rmdir of cgroup
3675 	 * directory to avoid race between userspace and kernelspace
3676 	 */
3677 	spin_lock(&cgrp->event_list_lock);
3678 	list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
3679 		list_del(&event->list);
3680 		remove_wait_queue(event->wqh, &event->wait);
3681 		eventfd_signal(event->eventfd, 1);
3682 		schedule_work(&event->remove);
3683 	}
3684 	spin_unlock(&cgrp->event_list_lock);
3685 
3686 	mutex_unlock(&cgroup_mutex);
3687 	return 0;
3688 }
3689 
cgroup_init_subsys(struct cgroup_subsys * ss)3690 static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
3691 {
3692 	struct cgroup_subsys_state *css;
3693 
3694 	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
3695 
3696 	/* Create the top cgroup state for this subsystem */
3697 	list_add(&ss->sibling, &rootnode.subsys_list);
3698 	ss->root = &rootnode;
3699 	css = ss->create(ss, dummytop);
3700 	/* We don't handle early failures gracefully */
3701 	BUG_ON(IS_ERR(css));
3702 	init_cgroup_css(css, ss, dummytop);
3703 
3704 	/* Update the init_css_set to contain a subsys
3705 	 * pointer to this state - since the subsystem is
3706 	 * newly registered, all tasks and hence the
3707 	 * init_css_set is in the subsystem's top cgroup. */
3708 	init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
3709 
3710 	need_forkexit_callback |= ss->fork || ss->exit;
3711 
3712 	/* At system boot, before all subsystems have been
3713 	 * registered, no tasks have been forked, so we don't
3714 	 * need to invoke fork callbacks here. */
3715 	BUG_ON(!list_empty(&init_task.tasks));
3716 
3717 	mutex_init(&ss->hierarchy_mutex);
3718 	lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
3719 	ss->active = 1;
3720 
3721 	/* this function shouldn't be used with modular subsystems, since they
3722 	 * need to register a subsys_id, among other things */
3723 	BUG_ON(ss->module);
3724 }
3725 
3726 /**
3727  * cgroup_load_subsys: load and register a modular subsystem at runtime
3728  * @ss: the subsystem to load
3729  *
3730  * This function should be called in a modular subsystem's initcall. If the
3731  * subsystem is built as a module, it will be assigned a new subsys_id and set
3732  * up for use. If the subsystem is built-in anyway, work is delegated to the
3733  * simpler cgroup_init_subsys.
3734  */
cgroup_load_subsys(struct cgroup_subsys * ss)3735 int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
3736 {
3737 	int i;
3738 	struct cgroup_subsys_state *css;
3739 
3740 	/* check name and function validity */
3741 	if (ss->name == NULL || strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN ||
3742 	    ss->create == NULL || ss->destroy == NULL)
3743 		return -EINVAL;
3744 
3745 	/*
3746 	 * we don't support callbacks in modular subsystems. this check is
3747 	 * before the ss->module check for consistency; a subsystem that could
3748 	 * be a module should still have no callbacks even if the user isn't
3749 	 * compiling it as one.
3750 	 */
3751 	if (ss->fork || ss->exit)
3752 		return -EINVAL;
3753 
3754 	/*
3755 	 * an optionally modular subsystem is built-in: we want to do nothing,
3756 	 * since cgroup_init_subsys will have already taken care of it.
3757 	 */
3758 	if (ss->module == NULL) {
3759 		/* a few sanity checks */
3760 		BUG_ON(ss->subsys_id >= CGROUP_BUILTIN_SUBSYS_COUNT);
3761 		BUG_ON(subsys[ss->subsys_id] != ss);
3762 		return 0;
3763 	}
3764 
3765 	/*
3766 	 * need to register a subsys id before anything else - for example,
3767 	 * init_cgroup_css needs it.
3768 	 */
3769 	mutex_lock(&cgroup_mutex);
3770 	/* find the first empty slot in the array */
3771 	for (i = CGROUP_BUILTIN_SUBSYS_COUNT; i < CGROUP_SUBSYS_COUNT; i++) {
3772 		if (subsys[i] == NULL)
3773 			break;
3774 	}
3775 	if (i == CGROUP_SUBSYS_COUNT) {
3776 		/* maximum number of subsystems already registered! */
3777 		mutex_unlock(&cgroup_mutex);
3778 		return -EBUSY;
3779 	}
3780 	/* assign ourselves the subsys_id */
3781 	ss->subsys_id = i;
3782 	subsys[i] = ss;
3783 
3784 	/*
3785 	 * no ss->create seems to need anything important in the ss struct, so
3786 	 * this can happen first (i.e. before the rootnode attachment).
3787 	 */
3788 	css = ss->create(ss, dummytop);
3789 	if (IS_ERR(css)) {
3790 		/* failure case - need to deassign the subsys[] slot. */
3791 		subsys[i] = NULL;
3792 		mutex_unlock(&cgroup_mutex);
3793 		return PTR_ERR(css);
3794 	}
3795 
3796 	list_add(&ss->sibling, &rootnode.subsys_list);
3797 	ss->root = &rootnode;
3798 
3799 	/* our new subsystem will be attached to the dummy hierarchy. */
3800 	init_cgroup_css(css, ss, dummytop);
3801 	/* init_idr must be after init_cgroup_css because it sets css->id. */
3802 	if (ss->use_id) {
3803 		int ret = cgroup_init_idr(ss, css);
3804 		if (ret) {
3805 			dummytop->subsys[ss->subsys_id] = NULL;
3806 			ss->destroy(ss, dummytop);
3807 			subsys[i] = NULL;
3808 			mutex_unlock(&cgroup_mutex);
3809 			return ret;
3810 		}
3811 	}
3812 
3813 	/*
3814 	 * Now we need to entangle the css into the existing css_sets. unlike
3815 	 * in cgroup_init_subsys, there are now multiple css_sets, so each one
3816 	 * will need a new pointer to it; done by iterating the css_set_table.
3817 	 * furthermore, modifying the existing css_sets will corrupt the hash
3818 	 * table state, so each changed css_set will need its hash recomputed.
3819 	 * this is all done under the css_set_lock.
3820 	 */
3821 	write_lock(&css_set_lock);
3822 	for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
3823 		struct css_set *cg;
3824 		struct hlist_node *node, *tmp;
3825 		struct hlist_head *bucket = &css_set_table[i], *new_bucket;
3826 
3827 		hlist_for_each_entry_safe(cg, node, tmp, bucket, hlist) {
3828 			/* skip entries that we already rehashed */
3829 			if (cg->subsys[ss->subsys_id])
3830 				continue;
3831 			/* remove existing entry */
3832 			hlist_del(&cg->hlist);
3833 			/* set new value */
3834 			cg->subsys[ss->subsys_id] = css;
3835 			/* recompute hash and restore entry */
3836 			new_bucket = css_set_hash(cg->subsys);
3837 			hlist_add_head(&cg->hlist, new_bucket);
3838 		}
3839 	}
3840 	write_unlock(&css_set_lock);
3841 
3842 	mutex_init(&ss->hierarchy_mutex);
3843 	lockdep_set_class(&ss->hierarchy_mutex, &ss->subsys_key);
3844 	ss->active = 1;
3845 
3846 	/* success! */
3847 	mutex_unlock(&cgroup_mutex);
3848 	return 0;
3849 }
3850 EXPORT_SYMBOL_GPL(cgroup_load_subsys);
3851 
3852 /**
3853  * cgroup_unload_subsys: unload a modular subsystem
3854  * @ss: the subsystem to unload
3855  *
3856  * This function should be called in a modular subsystem's exitcall. When this
3857  * function is invoked, the refcount on the subsystem's module will be 0, so
3858  * the subsystem will not be attached to any hierarchy.
3859  */
cgroup_unload_subsys(struct cgroup_subsys * ss)3860 void cgroup_unload_subsys(struct cgroup_subsys *ss)
3861 {
3862 	struct cg_cgroup_link *link;
3863 	struct hlist_head *hhead;
3864 
3865 	BUG_ON(ss->module == NULL);
3866 
3867 	/*
3868 	 * we shouldn't be called if the subsystem is in use, and the use of
3869 	 * try_module_get in parse_cgroupfs_options should ensure that it
3870 	 * doesn't start being used while we're killing it off.
3871 	 */
3872 	BUG_ON(ss->root != &rootnode);
3873 
3874 	mutex_lock(&cgroup_mutex);
3875 	/* deassign the subsys_id */
3876 	BUG_ON(ss->subsys_id < CGROUP_BUILTIN_SUBSYS_COUNT);
3877 	subsys[ss->subsys_id] = NULL;
3878 
3879 	/* remove subsystem from rootnode's list of subsystems */
3880 	list_del_init(&ss->sibling);
3881 
3882 	/*
3883 	 * disentangle the css from all css_sets attached to the dummytop. as
3884 	 * in loading, we need to pay our respects to the hashtable gods.
3885 	 */
3886 	write_lock(&css_set_lock);
3887 	list_for_each_entry(link, &dummytop->css_sets, cgrp_link_list) {
3888 		struct css_set *cg = link->cg;
3889 
3890 		hlist_del(&cg->hlist);
3891 		BUG_ON(!cg->subsys[ss->subsys_id]);
3892 		cg->subsys[ss->subsys_id] = NULL;
3893 		hhead = css_set_hash(cg->subsys);
3894 		hlist_add_head(&cg->hlist, hhead);
3895 	}
3896 	write_unlock(&css_set_lock);
3897 
3898 	/*
3899 	 * remove subsystem's css from the dummytop and free it - need to free
3900 	 * before marking as null because ss->destroy needs the cgrp->subsys
3901 	 * pointer to find their state. note that this also takes care of
3902 	 * freeing the css_id.
3903 	 */
3904 	ss->destroy(ss, dummytop);
3905 	dummytop->subsys[ss->subsys_id] = NULL;
3906 
3907 	mutex_unlock(&cgroup_mutex);
3908 }
3909 EXPORT_SYMBOL_GPL(cgroup_unload_subsys);
3910 
3911 /**
3912  * cgroup_init_early - cgroup initialization at system boot
3913  *
3914  * Initialize cgroups at system boot, and initialize any
3915  * subsystems that request early init.
3916  */
cgroup_init_early(void)3917 int __init cgroup_init_early(void)
3918 {
3919 	int i;
3920 	atomic_set(&init_css_set.refcount, 1);
3921 	INIT_LIST_HEAD(&init_css_set.cg_links);
3922 	INIT_LIST_HEAD(&init_css_set.tasks);
3923 	INIT_HLIST_NODE(&init_css_set.hlist);
3924 	css_set_count = 1;
3925 	init_cgroup_root(&rootnode);
3926 	root_count = 1;
3927 	init_task.cgroups = &init_css_set;
3928 
3929 	init_css_set_link.cg = &init_css_set;
3930 	init_css_set_link.cgrp = dummytop;
3931 	list_add(&init_css_set_link.cgrp_link_list,
3932 		 &rootnode.top_cgroup.css_sets);
3933 	list_add(&init_css_set_link.cg_link_list,
3934 		 &init_css_set.cg_links);
3935 
3936 	for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
3937 		INIT_HLIST_HEAD(&css_set_table[i]);
3938 
3939 	/* at bootup time, we don't worry about modular subsystems */
3940 	for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
3941 		struct cgroup_subsys *ss = subsys[i];
3942 
3943 		BUG_ON(!ss->name);
3944 		BUG_ON(strlen(ss->name) > MAX_CGROUP_TYPE_NAMELEN);
3945 		BUG_ON(!ss->create);
3946 		BUG_ON(!ss->destroy);
3947 		if (ss->subsys_id != i) {
3948 			printk(KERN_ERR "cgroup: Subsys %s id == %d\n",
3949 			       ss->name, ss->subsys_id);
3950 			BUG();
3951 		}
3952 
3953 		if (ss->early_init)
3954 			cgroup_init_subsys(ss);
3955 	}
3956 	return 0;
3957 }
3958 
3959 /**
3960  * cgroup_init - cgroup initialization
3961  *
3962  * Register cgroup filesystem and /proc file, and initialize
3963  * any subsystems that didn't request early init.
3964  */
cgroup_init(void)3965 int __init cgroup_init(void)
3966 {
3967 	int err;
3968 	int i;
3969 	struct hlist_head *hhead;
3970 
3971 	err = bdi_init(&cgroup_backing_dev_info);
3972 	if (err)
3973 		return err;
3974 
3975 	/* at bootup time, we don't worry about modular subsystems */
3976 	for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
3977 		struct cgroup_subsys *ss = subsys[i];
3978 		if (!ss->early_init)
3979 			cgroup_init_subsys(ss);
3980 		if (ss->use_id)
3981 			cgroup_init_idr(ss, init_css_set.subsys[ss->subsys_id]);
3982 	}
3983 
3984 	/* Add init_css_set to the hash table */
3985 	hhead = css_set_hash(init_css_set.subsys);
3986 	hlist_add_head(&init_css_set.hlist, hhead);
3987 	BUG_ON(!init_root_id(&rootnode));
3988 
3989 	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
3990 	if (!cgroup_kobj) {
3991 		err = -ENOMEM;
3992 		goto out;
3993 	}
3994 
3995 	err = register_filesystem(&cgroup_fs_type);
3996 	if (err < 0) {
3997 		kobject_put(cgroup_kobj);
3998 		goto out;
3999 	}
4000 
4001 	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
4002 
4003 out:
4004 	if (err)
4005 		bdi_destroy(&cgroup_backing_dev_info);
4006 
4007 	return err;
4008 }
4009 
4010 /*
4011  * proc_cgroup_show()
4012  *  - Print task's cgroup paths into seq_file, one line for each hierarchy
4013  *  - Used for /proc/<pid>/cgroup.
4014  *  - No need to task_lock(tsk) on this tsk->cgroup reference, as it
4015  *    doesn't really matter if tsk->cgroup changes after we read it,
4016  *    and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
4017  *    anyway.  No need to check that tsk->cgroup != NULL, thanks to
4018  *    the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
4019  *    cgroup to top_cgroup.
4020  */
4021 
4022 /* TODO: Use a proper seq_file iterator */
proc_cgroup_show(struct seq_file * m,void * v)4023 static int proc_cgroup_show(struct seq_file *m, void *v)
4024 {
4025 	struct pid *pid;
4026 	struct task_struct *tsk;
4027 	char *buf;
4028 	int retval;
4029 	struct cgroupfs_root *root;
4030 
4031 	retval = -ENOMEM;
4032 	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4033 	if (!buf)
4034 		goto out;
4035 
4036 	retval = -ESRCH;
4037 	pid = m->private;
4038 	tsk = get_pid_task(pid, PIDTYPE_PID);
4039 	if (!tsk)
4040 		goto out_free;
4041 
4042 	retval = 0;
4043 
4044 	mutex_lock(&cgroup_mutex);
4045 
4046 	for_each_active_root(root) {
4047 		struct cgroup_subsys *ss;
4048 		struct cgroup *cgrp;
4049 		int count = 0;
4050 
4051 		seq_printf(m, "%d:", root->hierarchy_id);
4052 		for_each_subsys(root, ss)
4053 			seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
4054 		if (strlen(root->name))
4055 			seq_printf(m, "%sname=%s", count ? "," : "",
4056 				   root->name);
4057 		seq_putc(m, ':');
4058 		cgrp = task_cgroup_from_root(tsk, root);
4059 		retval = cgroup_path(cgrp, buf, PAGE_SIZE);
4060 		if (retval < 0)
4061 			goto out_unlock;
4062 		seq_puts(m, buf);
4063 		seq_putc(m, '\n');
4064 	}
4065 
4066 out_unlock:
4067 	mutex_unlock(&cgroup_mutex);
4068 	put_task_struct(tsk);
4069 out_free:
4070 	kfree(buf);
4071 out:
4072 	return retval;
4073 }
4074 
cgroup_open(struct inode * inode,struct file * file)4075 static int cgroup_open(struct inode *inode, struct file *file)
4076 {
4077 	struct pid *pid = PROC_I(inode)->pid;
4078 	return single_open(file, proc_cgroup_show, pid);
4079 }
4080 
4081 const struct file_operations proc_cgroup_operations = {
4082 	.open		= cgroup_open,
4083 	.read		= seq_read,
4084 	.llseek		= seq_lseek,
4085 	.release	= single_release,
4086 };
4087 
4088 /* Display information about each subsystem and each hierarchy */
proc_cgroupstats_show(struct seq_file * m,void * v)4089 static int proc_cgroupstats_show(struct seq_file *m, void *v)
4090 {
4091 	int i;
4092 
4093 	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
4094 	/*
4095 	 * ideally we don't want subsystems moving around while we do this.
4096 	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
4097 	 * subsys/hierarchy state.
4098 	 */
4099 	mutex_lock(&cgroup_mutex);
4100 	for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
4101 		struct cgroup_subsys *ss = subsys[i];
4102 		if (ss == NULL)
4103 			continue;
4104 		seq_printf(m, "%s\t%d\t%d\t%d\n",
4105 			   ss->name, ss->root->hierarchy_id,
4106 			   ss->root->number_of_cgroups, !ss->disabled);
4107 	}
4108 	mutex_unlock(&cgroup_mutex);
4109 	return 0;
4110 }
4111 
cgroupstats_open(struct inode * inode,struct file * file)4112 static int cgroupstats_open(struct inode *inode, struct file *file)
4113 {
4114 	return single_open(file, proc_cgroupstats_show, NULL);
4115 }
4116 
4117 static const struct file_operations proc_cgroupstats_operations = {
4118 	.open = cgroupstats_open,
4119 	.read = seq_read,
4120 	.llseek = seq_lseek,
4121 	.release = single_release,
4122 };
4123 
4124 /**
4125  * cgroup_fork - attach newly forked task to its parents cgroup.
4126  * @child: pointer to task_struct of forking parent process.
4127  *
4128  * Description: A task inherits its parent's cgroup at fork().
4129  *
4130  * A pointer to the shared css_set was automatically copied in
4131  * fork.c by dup_task_struct().  However, we ignore that copy, since
4132  * it was not made under the protection of RCU or cgroup_mutex, so
4133  * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
4134  * have already changed current->cgroups, allowing the previously
4135  * referenced cgroup group to be removed and freed.
4136  *
4137  * At the point that cgroup_fork() is called, 'current' is the parent
4138  * task, and the passed argument 'child' points to the child task.
4139  */
cgroup_fork(struct task_struct * child)4140 void cgroup_fork(struct task_struct *child)
4141 {
4142 	task_lock(current);
4143 	child->cgroups = current->cgroups;
4144 	get_css_set(child->cgroups);
4145 	task_unlock(current);
4146 	INIT_LIST_HEAD(&child->cg_list);
4147 }
4148 
4149 /**
4150  * cgroup_fork_callbacks - run fork callbacks
4151  * @child: the new task
4152  *
4153  * Called on a new task very soon before adding it to the
4154  * tasklist. No need to take any locks since no-one can
4155  * be operating on this task.
4156  */
cgroup_fork_callbacks(struct task_struct * child)4157 void cgroup_fork_callbacks(struct task_struct *child)
4158 {
4159 	if (need_forkexit_callback) {
4160 		int i;
4161 		/*
4162 		 * forkexit callbacks are only supported for builtin
4163 		 * subsystems, and the builtin section of the subsys array is
4164 		 * immutable, so we don't need to lock the subsys array here.
4165 		 */
4166 		for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4167 			struct cgroup_subsys *ss = subsys[i];
4168 			if (ss->fork)
4169 				ss->fork(ss, child);
4170 		}
4171 	}
4172 }
4173 
4174 /**
4175  * cgroup_post_fork - called on a new task after adding it to the task list
4176  * @child: the task in question
4177  *
4178  * Adds the task to the list running through its css_set if necessary.
4179  * Has to be after the task is visible on the task list in case we race
4180  * with the first call to cgroup_iter_start() - to guarantee that the
4181  * new task ends up on its list.
4182  */
cgroup_post_fork(struct task_struct * child)4183 void cgroup_post_fork(struct task_struct *child)
4184 {
4185 	if (use_task_css_set_links) {
4186 		write_lock(&css_set_lock);
4187 		task_lock(child);
4188 		if (list_empty(&child->cg_list))
4189 			list_add(&child->cg_list, &child->cgroups->tasks);
4190 		task_unlock(child);
4191 		write_unlock(&css_set_lock);
4192 	}
4193 }
4194 /**
4195  * cgroup_exit - detach cgroup from exiting task
4196  * @tsk: pointer to task_struct of exiting process
4197  * @run_callback: run exit callbacks?
4198  *
4199  * Description: Detach cgroup from @tsk and release it.
4200  *
4201  * Note that cgroups marked notify_on_release force every task in
4202  * them to take the global cgroup_mutex mutex when exiting.
4203  * This could impact scaling on very large systems.  Be reluctant to
4204  * use notify_on_release cgroups where very high task exit scaling
4205  * is required on large systems.
4206  *
4207  * the_top_cgroup_hack:
4208  *
4209  *    Set the exiting tasks cgroup to the root cgroup (top_cgroup).
4210  *
4211  *    We call cgroup_exit() while the task is still competent to
4212  *    handle notify_on_release(), then leave the task attached to the
4213  *    root cgroup in each hierarchy for the remainder of its exit.
4214  *
4215  *    To do this properly, we would increment the reference count on
4216  *    top_cgroup, and near the very end of the kernel/exit.c do_exit()
4217  *    code we would add a second cgroup function call, to drop that
4218  *    reference.  This would just create an unnecessary hot spot on
4219  *    the top_cgroup reference count, to no avail.
4220  *
4221  *    Normally, holding a reference to a cgroup without bumping its
4222  *    count is unsafe.   The cgroup could go away, or someone could
4223  *    attach us to a different cgroup, decrementing the count on
4224  *    the first cgroup that we never incremented.  But in this case,
4225  *    top_cgroup isn't going away, and either task has PF_EXITING set,
4226  *    which wards off any cgroup_attach_task() attempts, or task is a failed
4227  *    fork, never visible to cgroup_attach_task.
4228  */
cgroup_exit(struct task_struct * tsk,int run_callbacks)4229 void cgroup_exit(struct task_struct *tsk, int run_callbacks)
4230 {
4231 	struct css_set *cg;
4232 	int i;
4233 
4234 	/*
4235 	 * Unlink from the css_set task list if necessary.
4236 	 * Optimistically check cg_list before taking
4237 	 * css_set_lock
4238 	 */
4239 	if (!list_empty(&tsk->cg_list)) {
4240 		write_lock(&css_set_lock);
4241 		if (!list_empty(&tsk->cg_list))
4242 			list_del_init(&tsk->cg_list);
4243 		write_unlock(&css_set_lock);
4244 	}
4245 
4246 	/* Reassign the task to the init_css_set. */
4247 	task_lock(tsk);
4248 	cg = tsk->cgroups;
4249 	tsk->cgroups = &init_css_set;
4250 
4251 	if (run_callbacks && need_forkexit_callback) {
4252 		/*
4253 		 * modular subsystems can't use callbacks, so no need to lock
4254 		 * the subsys array
4255 		 */
4256 		for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4257 			struct cgroup_subsys *ss = subsys[i];
4258 			if (ss->exit) {
4259 				struct cgroup *old_cgrp =
4260 					rcu_dereference_raw(cg->subsys[i])->cgroup;
4261 				struct cgroup *cgrp = task_cgroup(tsk, i);
4262 				ss->exit(ss, cgrp, old_cgrp, tsk);
4263 			}
4264 		}
4265 	}
4266 	task_unlock(tsk);
4267 
4268 	if (cg)
4269 		put_css_set_taskexit(cg);
4270 }
4271 
4272 /**
4273  * cgroup_clone - clone the cgroup the given subsystem is attached to
4274  * @tsk: the task to be moved
4275  * @subsys: the given subsystem
4276  * @nodename: the name for the new cgroup
4277  *
4278  * Duplicate the current cgroup in the hierarchy that the given
4279  * subsystem is attached to, and move this task into the new
4280  * child.
4281  */
cgroup_clone(struct task_struct * tsk,struct cgroup_subsys * subsys,char * nodename)4282 int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
4283 							char *nodename)
4284 {
4285 	struct dentry *dentry;
4286 	int ret = 0;
4287 	struct cgroup *parent, *child;
4288 	struct inode *inode;
4289 	struct css_set *cg;
4290 	struct cgroupfs_root *root;
4291 	struct cgroup_subsys *ss;
4292 
4293 	/* We shouldn't be called by an unregistered subsystem */
4294 	BUG_ON(!subsys->active);
4295 
4296 	/* First figure out what hierarchy and cgroup we're dealing
4297 	 * with, and pin them so we can drop cgroup_mutex */
4298 	mutex_lock(&cgroup_mutex);
4299  again:
4300 	root = subsys->root;
4301 	if (root == &rootnode) {
4302 		mutex_unlock(&cgroup_mutex);
4303 		return 0;
4304 	}
4305 
4306 	/* Pin the hierarchy */
4307 	if (!atomic_inc_not_zero(&root->sb->s_active)) {
4308 		/* We race with the final deactivate_super() */
4309 		mutex_unlock(&cgroup_mutex);
4310 		return 0;
4311 	}
4312 
4313 	/* Keep the cgroup alive */
4314 	task_lock(tsk);
4315 	parent = task_cgroup(tsk, subsys->subsys_id);
4316 	cg = tsk->cgroups;
4317 	get_css_set(cg);
4318 	task_unlock(tsk);
4319 
4320 	mutex_unlock(&cgroup_mutex);
4321 
4322 	/* Now do the VFS work to create a cgroup */
4323 	inode = parent->dentry->d_inode;
4324 
4325 	/* Hold the parent directory mutex across this operation to
4326 	 * stop anyone else deleting the new cgroup */
4327 	mutex_lock(&inode->i_mutex);
4328 	dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename));
4329 	if (IS_ERR(dentry)) {
4330 		printk(KERN_INFO
4331 		       "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename,
4332 		       PTR_ERR(dentry));
4333 		ret = PTR_ERR(dentry);
4334 		goto out_release;
4335 	}
4336 
4337 	/* Create the cgroup directory, which also creates the cgroup */
4338 	ret = vfs_mkdir(inode, dentry, 0755);
4339 	child = __d_cgrp(dentry);
4340 	dput(dentry);
4341 	if (ret) {
4342 		printk(KERN_INFO
4343 		       "Failed to create cgroup %s: %d\n", nodename,
4344 		       ret);
4345 		goto out_release;
4346 	}
4347 
4348 	/* The cgroup now exists. Retake cgroup_mutex and check
4349 	 * that we're still in the same state that we thought we
4350 	 * were. */
4351 	mutex_lock(&cgroup_mutex);
4352 	if ((root != subsys->root) ||
4353 	    (parent != task_cgroup(tsk, subsys->subsys_id))) {
4354 		/* Aargh, we raced ... */
4355 		mutex_unlock(&inode->i_mutex);
4356 		put_css_set(cg);
4357 
4358 		deactivate_super(root->sb);
4359 		/* The cgroup is still accessible in the VFS, but
4360 		 * we're not going to try to rmdir() it at this
4361 		 * point. */
4362 		printk(KERN_INFO
4363 		       "Race in cgroup_clone() - leaking cgroup %s\n",
4364 		       nodename);
4365 		goto again;
4366 	}
4367 
4368 	/* do any required auto-setup */
4369 	for_each_subsys(root, ss) {
4370 		if (ss->post_clone)
4371 			ss->post_clone(ss, child);
4372 	}
4373 
4374 	/* All seems fine. Finish by moving the task into the new cgroup */
4375 	ret = cgroup_attach_task(child, tsk);
4376 	mutex_unlock(&cgroup_mutex);
4377 
4378  out_release:
4379 	mutex_unlock(&inode->i_mutex);
4380 
4381 	mutex_lock(&cgroup_mutex);
4382 	put_css_set(cg);
4383 	mutex_unlock(&cgroup_mutex);
4384 	deactivate_super(root->sb);
4385 	return ret;
4386 }
4387 
4388 /**
4389  * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
4390  * @cgrp: the cgroup in question
4391  * @task: the task in question
4392  *
4393  * See if @cgrp is a descendant of @task's cgroup in the appropriate
4394  * hierarchy.
4395  *
4396  * If we are sending in dummytop, then presumably we are creating
4397  * the top cgroup in the subsystem.
4398  *
4399  * Called only by the ns (nsproxy) cgroup.
4400  */
cgroup_is_descendant(const struct cgroup * cgrp,struct task_struct * task)4401 int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
4402 {
4403 	int ret;
4404 	struct cgroup *target;
4405 
4406 	if (cgrp == dummytop)
4407 		return 1;
4408 
4409 	target = task_cgroup_from_root(task, cgrp->root);
4410 	while (cgrp != target && cgrp!= cgrp->top_cgroup)
4411 		cgrp = cgrp->parent;
4412 	ret = (cgrp == target);
4413 	return ret;
4414 }
4415 
check_for_release(struct cgroup * cgrp)4416 static void check_for_release(struct cgroup *cgrp)
4417 {
4418 	/* All of these checks rely on RCU to keep the cgroup
4419 	 * structure alive */
4420 	if (cgroup_is_releasable(cgrp) && !atomic_read(&cgrp->count)
4421 	    && list_empty(&cgrp->children) && !cgroup_has_css_refs(cgrp)) {
4422 		/* Control Group is currently removeable. If it's not
4423 		 * already queued for a userspace notification, queue
4424 		 * it now */
4425 		int need_schedule_work = 0;
4426 		spin_lock(&release_list_lock);
4427 		if (!cgroup_is_removed(cgrp) &&
4428 		    list_empty(&cgrp->release_list)) {
4429 			list_add(&cgrp->release_list, &release_list);
4430 			need_schedule_work = 1;
4431 		}
4432 		spin_unlock(&release_list_lock);
4433 		if (need_schedule_work)
4434 			schedule_work(&release_agent_work);
4435 	}
4436 }
4437 
4438 /* Caller must verify that the css is not for root cgroup */
__css_put(struct cgroup_subsys_state * css,int count)4439 void __css_put(struct cgroup_subsys_state *css, int count)
4440 {
4441 	struct cgroup *cgrp = css->cgroup;
4442 	int val;
4443 	rcu_read_lock();
4444 	val = atomic_sub_return(count, &css->refcnt);
4445 	if (val == 1) {
4446 		if (notify_on_release(cgrp)) {
4447 			set_bit(CGRP_RELEASABLE, &cgrp->flags);
4448 			check_for_release(cgrp);
4449 		}
4450 		cgroup_wakeup_rmdir_waiter(cgrp);
4451 	}
4452 	rcu_read_unlock();
4453 	WARN_ON_ONCE(val < 1);
4454 }
4455 EXPORT_SYMBOL_GPL(__css_put);
4456 
4457 /*
4458  * Notify userspace when a cgroup is released, by running the
4459  * configured release agent with the name of the cgroup (path
4460  * relative to the root of cgroup file system) as the argument.
4461  *
4462  * Most likely, this user command will try to rmdir this cgroup.
4463  *
4464  * This races with the possibility that some other task will be
4465  * attached to this cgroup before it is removed, or that some other
4466  * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
4467  * The presumed 'rmdir' will fail quietly if this cgroup is no longer
4468  * unused, and this cgroup will be reprieved from its death sentence,
4469  * to continue to serve a useful existence.  Next time it's released,
4470  * we will get notified again, if it still has 'notify_on_release' set.
4471  *
4472  * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
4473  * means only wait until the task is successfully execve()'d.  The
4474  * separate release agent task is forked by call_usermodehelper(),
4475  * then control in this thread returns here, without waiting for the
4476  * release agent task.  We don't bother to wait because the caller of
4477  * this routine has no use for the exit status of the release agent
4478  * task, so no sense holding our caller up for that.
4479  */
cgroup_release_agent(struct work_struct * work)4480 static void cgroup_release_agent(struct work_struct *work)
4481 {
4482 	BUG_ON(work != &release_agent_work);
4483 	mutex_lock(&cgroup_mutex);
4484 	spin_lock(&release_list_lock);
4485 	while (!list_empty(&release_list)) {
4486 		char *argv[3], *envp[3];
4487 		int i;
4488 		char *pathbuf = NULL, *agentbuf = NULL;
4489 		struct cgroup *cgrp = list_entry(release_list.next,
4490 						    struct cgroup,
4491 						    release_list);
4492 		list_del_init(&cgrp->release_list);
4493 		spin_unlock(&release_list_lock);
4494 		pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4495 		if (!pathbuf)
4496 			goto continue_free;
4497 		if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
4498 			goto continue_free;
4499 		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
4500 		if (!agentbuf)
4501 			goto continue_free;
4502 
4503 		i = 0;
4504 		argv[i++] = agentbuf;
4505 		argv[i++] = pathbuf;
4506 		argv[i] = NULL;
4507 
4508 		i = 0;
4509 		/* minimal command environment */
4510 		envp[i++] = "HOME=/";
4511 		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
4512 		envp[i] = NULL;
4513 
4514 		/* Drop the lock while we invoke the usermode helper,
4515 		 * since the exec could involve hitting disk and hence
4516 		 * be a slow process */
4517 		mutex_unlock(&cgroup_mutex);
4518 		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
4519 		mutex_lock(&cgroup_mutex);
4520  continue_free:
4521 		kfree(pathbuf);
4522 		kfree(agentbuf);
4523 		spin_lock(&release_list_lock);
4524 	}
4525 	spin_unlock(&release_list_lock);
4526 	mutex_unlock(&cgroup_mutex);
4527 }
4528 
cgroup_disable(char * str)4529 static int __init cgroup_disable(char *str)
4530 {
4531 	int i;
4532 	char *token;
4533 
4534 	while ((token = strsep(&str, ",")) != NULL) {
4535 		if (!*token)
4536 			continue;
4537 		/*
4538 		 * cgroup_disable, being at boot time, can't know about module
4539 		 * subsystems, so we don't worry about them.
4540 		 */
4541 		for (i = 0; i < CGROUP_BUILTIN_SUBSYS_COUNT; i++) {
4542 			struct cgroup_subsys *ss = subsys[i];
4543 
4544 			if (!strcmp(token, ss->name)) {
4545 				ss->disabled = 1;
4546 				printk(KERN_INFO "Disabling %s control group"
4547 					" subsystem\n", ss->name);
4548 				break;
4549 			}
4550 		}
4551 	}
4552 	return 1;
4553 }
4554 __setup("cgroup_disable=", cgroup_disable);
4555 
4556 /*
4557  * Functons for CSS ID.
4558  */
4559 
4560 /*
4561  *To get ID other than 0, this should be called when !cgroup_is_removed().
4562  */
css_id(struct cgroup_subsys_state * css)4563 unsigned short css_id(struct cgroup_subsys_state *css)
4564 {
4565 	struct css_id *cssid;
4566 
4567 	/*
4568 	 * This css_id() can return correct value when somone has refcnt
4569 	 * on this or this is under rcu_read_lock(). Once css->id is allocated,
4570 	 * it's unchanged until freed.
4571 	 */
4572 	cssid = rcu_dereference_check(css->id,
4573 			rcu_read_lock_held() || atomic_read(&css->refcnt));
4574 
4575 	if (cssid)
4576 		return cssid->id;
4577 	return 0;
4578 }
4579 EXPORT_SYMBOL_GPL(css_id);
4580 
css_depth(struct cgroup_subsys_state * css)4581 unsigned short css_depth(struct cgroup_subsys_state *css)
4582 {
4583 	struct css_id *cssid;
4584 
4585 	cssid = rcu_dereference_check(css->id,
4586 			rcu_read_lock_held() || atomic_read(&css->refcnt));
4587 
4588 	if (cssid)
4589 		return cssid->depth;
4590 	return 0;
4591 }
4592 EXPORT_SYMBOL_GPL(css_depth);
4593 
4594 /**
4595  *  css_is_ancestor - test "root" css is an ancestor of "child"
4596  * @child: the css to be tested.
4597  * @root: the css supporsed to be an ancestor of the child.
4598  *
4599  * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
4600  * this function reads css->id, this use rcu_dereference() and rcu_read_lock().
4601  * But, considering usual usage, the csses should be valid objects after test.
4602  * Assuming that the caller will do some action to the child if this returns
4603  * returns true, the caller must take "child";s reference count.
4604  * If "child" is valid object and this returns true, "root" is valid, too.
4605  */
4606 
css_is_ancestor(struct cgroup_subsys_state * child,const struct cgroup_subsys_state * root)4607 bool css_is_ancestor(struct cgroup_subsys_state *child,
4608 		    const struct cgroup_subsys_state *root)
4609 {
4610 	struct css_id *child_id;
4611 	struct css_id *root_id;
4612 	bool ret = true;
4613 
4614 	rcu_read_lock();
4615 	child_id  = rcu_dereference(child->id);
4616 	root_id = rcu_dereference(root->id);
4617 	if (!child_id
4618 	    || !root_id
4619 	    || (child_id->depth < root_id->depth)
4620 	    || (child_id->stack[root_id->depth] != root_id->id))
4621 		ret = false;
4622 	rcu_read_unlock();
4623 	return ret;
4624 }
4625 
__free_css_id_cb(struct rcu_head * head)4626 static void __free_css_id_cb(struct rcu_head *head)
4627 {
4628 	struct css_id *id;
4629 
4630 	id = container_of(head, struct css_id, rcu_head);
4631 	kfree(id);
4632 }
4633 
free_css_id(struct cgroup_subsys * ss,struct cgroup_subsys_state * css)4634 void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
4635 {
4636 	struct css_id *id = css->id;
4637 	/* When this is called before css_id initialization, id can be NULL */
4638 	if (!id)
4639 		return;
4640 
4641 	BUG_ON(!ss->use_id);
4642 
4643 	rcu_assign_pointer(id->css, NULL);
4644 	rcu_assign_pointer(css->id, NULL);
4645 	spin_lock(&ss->id_lock);
4646 	idr_remove(&ss->idr, id->id);
4647 	spin_unlock(&ss->id_lock);
4648 	call_rcu(&id->rcu_head, __free_css_id_cb);
4649 }
4650 EXPORT_SYMBOL_GPL(free_css_id);
4651 
4652 /*
4653  * This is called by init or create(). Then, calls to this function are
4654  * always serialized (By cgroup_mutex() at create()).
4655  */
4656 
get_new_cssid(struct cgroup_subsys * ss,int depth)4657 static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
4658 {
4659 	struct css_id *newid;
4660 	int myid, error, size;
4661 
4662 	BUG_ON(!ss->use_id);
4663 
4664 	size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
4665 	newid = kzalloc(size, GFP_KERNEL);
4666 	if (!newid)
4667 		return ERR_PTR(-ENOMEM);
4668 	/* get id */
4669 	if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
4670 		error = -ENOMEM;
4671 		goto err_out;
4672 	}
4673 	spin_lock(&ss->id_lock);
4674 	/* Don't use 0. allocates an ID of 1-65535 */
4675 	error = idr_get_new_above(&ss->idr, newid, 1, &myid);
4676 	spin_unlock(&ss->id_lock);
4677 
4678 	/* Returns error when there are no free spaces for new ID.*/
4679 	if (error) {
4680 		error = -ENOSPC;
4681 		goto err_out;
4682 	}
4683 	if (myid > CSS_ID_MAX)
4684 		goto remove_idr;
4685 
4686 	newid->id = myid;
4687 	newid->depth = depth;
4688 	return newid;
4689 remove_idr:
4690 	error = -ENOSPC;
4691 	spin_lock(&ss->id_lock);
4692 	idr_remove(&ss->idr, myid);
4693 	spin_unlock(&ss->id_lock);
4694 err_out:
4695 	kfree(newid);
4696 	return ERR_PTR(error);
4697 
4698 }
4699 
cgroup_init_idr(struct cgroup_subsys * ss,struct cgroup_subsys_state * rootcss)4700 static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
4701 					    struct cgroup_subsys_state *rootcss)
4702 {
4703 	struct css_id *newid;
4704 
4705 	spin_lock_init(&ss->id_lock);
4706 	idr_init(&ss->idr);
4707 
4708 	newid = get_new_cssid(ss, 0);
4709 	if (IS_ERR(newid))
4710 		return PTR_ERR(newid);
4711 
4712 	newid->stack[0] = newid->id;
4713 	newid->css = rootcss;
4714 	rootcss->id = newid;
4715 	return 0;
4716 }
4717 
alloc_css_id(struct cgroup_subsys * ss,struct cgroup * parent,struct cgroup * child)4718 static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
4719 			struct cgroup *child)
4720 {
4721 	int subsys_id, i, depth = 0;
4722 	struct cgroup_subsys_state *parent_css, *child_css;
4723 	struct css_id *child_id, *parent_id;
4724 
4725 	subsys_id = ss->subsys_id;
4726 	parent_css = parent->subsys[subsys_id];
4727 	child_css = child->subsys[subsys_id];
4728 	parent_id = parent_css->id;
4729 	depth = parent_id->depth + 1;
4730 
4731 	child_id = get_new_cssid(ss, depth);
4732 	if (IS_ERR(child_id))
4733 		return PTR_ERR(child_id);
4734 
4735 	for (i = 0; i < depth; i++)
4736 		child_id->stack[i] = parent_id->stack[i];
4737 	child_id->stack[depth] = child_id->id;
4738 	/*
4739 	 * child_id->css pointer will be set after this cgroup is available
4740 	 * see cgroup_populate_dir()
4741 	 */
4742 	rcu_assign_pointer(child_css->id, child_id);
4743 
4744 	return 0;
4745 }
4746 
4747 /**
4748  * css_lookup - lookup css by id
4749  * @ss: cgroup subsys to be looked into.
4750  * @id: the id
4751  *
4752  * Returns pointer to cgroup_subsys_state if there is valid one with id.
4753  * NULL if not. Should be called under rcu_read_lock()
4754  */
css_lookup(struct cgroup_subsys * ss,int id)4755 struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
4756 {
4757 	struct css_id *cssid = NULL;
4758 
4759 	BUG_ON(!ss->use_id);
4760 	cssid = idr_find(&ss->idr, id);
4761 
4762 	if (unlikely(!cssid))
4763 		return NULL;
4764 
4765 	return rcu_dereference(cssid->css);
4766 }
4767 EXPORT_SYMBOL_GPL(css_lookup);
4768 
4769 /**
4770  * css_get_next - lookup next cgroup under specified hierarchy.
4771  * @ss: pointer to subsystem
4772  * @id: current position of iteration.
4773  * @root: pointer to css. search tree under this.
4774  * @foundid: position of found object.
4775  *
4776  * Search next css under the specified hierarchy of rootid. Calling under
4777  * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
4778  */
4779 struct cgroup_subsys_state *
css_get_next(struct cgroup_subsys * ss,int id,struct cgroup_subsys_state * root,int * foundid)4780 css_get_next(struct cgroup_subsys *ss, int id,
4781 	     struct cgroup_subsys_state *root, int *foundid)
4782 {
4783 	struct cgroup_subsys_state *ret = NULL;
4784 	struct css_id *tmp;
4785 	int tmpid;
4786 	int rootid = css_id(root);
4787 	int depth = css_depth(root);
4788 
4789 	if (!rootid)
4790 		return NULL;
4791 
4792 	BUG_ON(!ss->use_id);
4793 	/* fill start point for scan */
4794 	tmpid = id;
4795 	while (1) {
4796 		/*
4797 		 * scan next entry from bitmap(tree), tmpid is updated after
4798 		 * idr_get_next().
4799 		 */
4800 		spin_lock(&ss->id_lock);
4801 		tmp = idr_get_next(&ss->idr, &tmpid);
4802 		spin_unlock(&ss->id_lock);
4803 
4804 		if (!tmp)
4805 			break;
4806 		if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
4807 			ret = rcu_dereference(tmp->css);
4808 			if (ret) {
4809 				*foundid = tmpid;
4810 				break;
4811 			}
4812 		}
4813 		/* continue to scan from next id */
4814 		tmpid = tmpid + 1;
4815 	}
4816 	return ret;
4817 }
4818 
4819 /*
4820  * get corresponding css from file open on cgroupfs directory
4821  */
cgroup_css_from_dir(struct file * f,int id)4822 struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
4823 {
4824 	struct cgroup *cgrp;
4825 	struct inode *inode;
4826 	struct cgroup_subsys_state *css;
4827 
4828 	inode = f->f_dentry->d_inode;
4829 	/* check in cgroup filesystem dir */
4830 	if (inode->i_op != &cgroup_dir_inode_operations)
4831 		return ERR_PTR(-EBADF);
4832 
4833 	if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
4834 		return ERR_PTR(-EINVAL);
4835 
4836 	/* get cgroup */
4837 	cgrp = __d_cgrp(f->f_dentry);
4838 	css = cgrp->subsys[id];
4839 	return css ? css : ERR_PTR(-ENOENT);
4840 }
4841 
4842 #ifdef CONFIG_CGROUP_DEBUG
debug_create(struct cgroup_subsys * ss,struct cgroup * cont)4843 static struct cgroup_subsys_state *debug_create(struct cgroup_subsys *ss,
4844 						   struct cgroup *cont)
4845 {
4846 	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
4847 
4848 	if (!css)
4849 		return ERR_PTR(-ENOMEM);
4850 
4851 	return css;
4852 }
4853 
debug_destroy(struct cgroup_subsys * ss,struct cgroup * cont)4854 static void debug_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
4855 {
4856 	kfree(cont->subsys[debug_subsys_id]);
4857 }
4858 
cgroup_refcount_read(struct cgroup * cont,struct cftype * cft)4859 static u64 cgroup_refcount_read(struct cgroup *cont, struct cftype *cft)
4860 {
4861 	return atomic_read(&cont->count);
4862 }
4863 
debug_taskcount_read(struct cgroup * cont,struct cftype * cft)4864 static u64 debug_taskcount_read(struct cgroup *cont, struct cftype *cft)
4865 {
4866 	return cgroup_task_count(cont);
4867 }
4868 
current_css_set_read(struct cgroup * cont,struct cftype * cft)4869 static u64 current_css_set_read(struct cgroup *cont, struct cftype *cft)
4870 {
4871 	return (u64)(unsigned long)current->cgroups;
4872 }
4873 
current_css_set_refcount_read(struct cgroup * cont,struct cftype * cft)4874 static u64 current_css_set_refcount_read(struct cgroup *cont,
4875 					   struct cftype *cft)
4876 {
4877 	u64 count;
4878 
4879 	rcu_read_lock();
4880 	count = atomic_read(&current->cgroups->refcount);
4881 	rcu_read_unlock();
4882 	return count;
4883 }
4884 
current_css_set_cg_links_read(struct cgroup * cont,struct cftype * cft,struct seq_file * seq)4885 static int current_css_set_cg_links_read(struct cgroup *cont,
4886 					 struct cftype *cft,
4887 					 struct seq_file *seq)
4888 {
4889 	struct cg_cgroup_link *link;
4890 	struct css_set *cg;
4891 
4892 	read_lock(&css_set_lock);
4893 	rcu_read_lock();
4894 	cg = rcu_dereference(current->cgroups);
4895 	list_for_each_entry(link, &cg->cg_links, cg_link_list) {
4896 		struct cgroup *c = link->cgrp;
4897 		const char *name;
4898 
4899 		if (c->dentry)
4900 			name = c->dentry->d_name.name;
4901 		else
4902 			name = "?";
4903 		seq_printf(seq, "Root %d group %s\n",
4904 			   c->root->hierarchy_id, name);
4905 	}
4906 	rcu_read_unlock();
4907 	read_unlock(&css_set_lock);
4908 	return 0;
4909 }
4910 
4911 #define MAX_TASKS_SHOWN_PER_CSS 25
cgroup_css_links_read(struct cgroup * cont,struct cftype * cft,struct seq_file * seq)4912 static int cgroup_css_links_read(struct cgroup *cont,
4913 				 struct cftype *cft,
4914 				 struct seq_file *seq)
4915 {
4916 	struct cg_cgroup_link *link;
4917 
4918 	read_lock(&css_set_lock);
4919 	list_for_each_entry(link, &cont->css_sets, cgrp_link_list) {
4920 		struct css_set *cg = link->cg;
4921 		struct task_struct *task;
4922 		int count = 0;
4923 		seq_printf(seq, "css_set %p\n", cg);
4924 		list_for_each_entry(task, &cg->tasks, cg_list) {
4925 			if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
4926 				seq_puts(seq, "  ...\n");
4927 				break;
4928 			} else {
4929 				seq_printf(seq, "  task %d\n",
4930 					   task_pid_vnr(task));
4931 			}
4932 		}
4933 	}
4934 	read_unlock(&css_set_lock);
4935 	return 0;
4936 }
4937 
releasable_read(struct cgroup * cgrp,struct cftype * cft)4938 static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
4939 {
4940 	return test_bit(CGRP_RELEASABLE, &cgrp->flags);
4941 }
4942 
4943 static struct cftype debug_files[] =  {
4944 	{
4945 		.name = "cgroup_refcount",
4946 		.read_u64 = cgroup_refcount_read,
4947 	},
4948 	{
4949 		.name = "taskcount",
4950 		.read_u64 = debug_taskcount_read,
4951 	},
4952 
4953 	{
4954 		.name = "current_css_set",
4955 		.read_u64 = current_css_set_read,
4956 	},
4957 
4958 	{
4959 		.name = "current_css_set_refcount",
4960 		.read_u64 = current_css_set_refcount_read,
4961 	},
4962 
4963 	{
4964 		.name = "current_css_set_cg_links",
4965 		.read_seq_string = current_css_set_cg_links_read,
4966 	},
4967 
4968 	{
4969 		.name = "cgroup_css_links",
4970 		.read_seq_string = cgroup_css_links_read,
4971 	},
4972 
4973 	{
4974 		.name = "releasable",
4975 		.read_u64 = releasable_read,
4976 	},
4977 };
4978 
debug_populate(struct cgroup_subsys * ss,struct cgroup * cont)4979 static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
4980 {
4981 	return cgroup_add_files(cont, ss, debug_files,
4982 				ARRAY_SIZE(debug_files));
4983 }
4984 
4985 struct cgroup_subsys debug_subsys = {
4986 	.name = "debug",
4987 	.create = debug_create,
4988 	.destroy = debug_destroy,
4989 	.populate = debug_populate,
4990 	.subsys_id = debug_subsys_id,
4991 };
4992 #endif /* CONFIG_CGROUP_DEBUG */
4993