1 /* memcontrol.c - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * Memory thresholds
10  * Copyright (C) 2009 Nokia Corporation
11  * Author: Kirill A. Shutemov
12  *
13  * This program is free software; you can redistribute it and/or modify
14  * it under the terms of the GNU General Public License as published by
15  * the Free Software Foundation; either version 2 of the License, or
16  * (at your option) any later version.
17  *
18  * This program is distributed in the hope that it will be useful,
19  * but WITHOUT ANY WARRANTY; without even the implied warranty of
20  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21  * GNU General Public License for more details.
22  */
23 
24 #include <linux/res_counter.h>
25 #include <linux/memcontrol.h>
26 #include <linux/cgroup.h>
27 #include <linux/mm.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagemap.h>
30 #include <linux/smp.h>
31 #include <linux/page-flags.h>
32 #include <linux/backing-dev.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/rcupdate.h>
35 #include <linux/limits.h>
36 #include <linux/mutex.h>
37 #include <linux/rbtree.h>
38 #include <linux/slab.h>
39 #include <linux/swap.h>
40 #include <linux/swapops.h>
41 #include <linux/spinlock.h>
42 #include <linux/eventfd.h>
43 #include <linux/sort.h>
44 #include <linux/fs.h>
45 #include <linux/seq_file.h>
46 #include <linux/vmalloc.h>
47 #include <linux/mm_inline.h>
48 #include <linux/page_cgroup.h>
49 #include <linux/cpu.h>
50 #include <linux/oom.h>
51 #include "internal.h"
52 
53 #include <asm/uaccess.h>
54 
55 #include <trace/events/vmscan.h>
56 
57 struct cgroup_subsys mem_cgroup_subsys __read_mostly;
58 #define MEM_CGROUP_RECLAIM_RETRIES	5
59 struct mem_cgroup *root_mem_cgroup __read_mostly;
60 
61 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62 /* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63 int do_swap_account __read_mostly;
64 
65 /* for remember boot option*/
66 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67 static int really_do_swap_account __initdata = 1;
68 #else
69 static int really_do_swap_account __initdata = 0;
70 #endif
71 
72 #else
73 #define do_swap_account		(0)
74 #endif
75 
76 
77 /*
78  * Statistics for memory cgroup.
79  */
80 enum mem_cgroup_stat_index {
81 	/*
82 	 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
83 	 */
84 	MEM_CGROUP_STAT_CACHE, 	   /* # of pages charged as cache */
85 	MEM_CGROUP_STAT_RSS,	   /* # of pages charged as anon rss */
86 	MEM_CGROUP_STAT_FILE_MAPPED,  /* # of pages charged as file rss */
87 	MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
88 	MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
89 	MEM_CGROUP_ON_MOVE,	/* someone is moving account between groups */
90 	MEM_CGROUP_STAT_NSTATS,
91 };
92 
93 enum mem_cgroup_events_index {
94 	MEM_CGROUP_EVENTS_PGPGIN,	/* # of pages paged in */
95 	MEM_CGROUP_EVENTS_PGPGOUT,	/* # of pages paged out */
96 	MEM_CGROUP_EVENTS_COUNT,	/* # of pages paged in/out */
97 	MEM_CGROUP_EVENTS_NSTATS,
98 };
99 /*
100  * Per memcg event counter is incremented at every pagein/pageout. With THP,
101  * it will be incremated by the number of pages. This counter is used for
102  * for trigger some periodic events. This is straightforward and better
103  * than using jiffies etc. to handle periodic memcg event.
104  */
105 enum mem_cgroup_events_target {
106 	MEM_CGROUP_TARGET_THRESH,
107 	MEM_CGROUP_TARGET_SOFTLIMIT,
108 	MEM_CGROUP_NTARGETS,
109 };
110 #define THRESHOLDS_EVENTS_TARGET (128)
111 #define SOFTLIMIT_EVENTS_TARGET (1024)
112 
113 struct mem_cgroup_stat_cpu {
114 	long count[MEM_CGROUP_STAT_NSTATS];
115 	unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
116 	unsigned long targets[MEM_CGROUP_NTARGETS];
117 };
118 
119 /*
120  * per-zone information in memory controller.
121  */
122 struct mem_cgroup_per_zone {
123 	/*
124 	 * spin_lock to protect the per cgroup LRU
125 	 */
126 	struct list_head	lists[NR_LRU_LISTS];
127 	unsigned long		count[NR_LRU_LISTS];
128 
129 	struct zone_reclaim_stat reclaim_stat;
130 	struct rb_node		tree_node;	/* RB tree node */
131 	unsigned long long	usage_in_excess;/* Set to the value by which */
132 						/* the soft limit is exceeded*/
133 	bool			on_tree;
134 	struct mem_cgroup	*mem;		/* Back pointer, we cannot */
135 						/* use container_of	   */
136 };
137 /* Macro for accessing counter */
138 #define MEM_CGROUP_ZSTAT(mz, idx)	((mz)->count[(idx)])
139 
140 struct mem_cgroup_per_node {
141 	struct mem_cgroup_per_zone zoneinfo[MAX_NR_ZONES];
142 };
143 
144 struct mem_cgroup_lru_info {
145 	struct mem_cgroup_per_node *nodeinfo[MAX_NUMNODES];
146 };
147 
148 /*
149  * Cgroups above their limits are maintained in a RB-Tree, independent of
150  * their hierarchy representation
151  */
152 
153 struct mem_cgroup_tree_per_zone {
154 	struct rb_root rb_root;
155 	spinlock_t lock;
156 };
157 
158 struct mem_cgroup_tree_per_node {
159 	struct mem_cgroup_tree_per_zone rb_tree_per_zone[MAX_NR_ZONES];
160 };
161 
162 struct mem_cgroup_tree {
163 	struct mem_cgroup_tree_per_node *rb_tree_per_node[MAX_NUMNODES];
164 };
165 
166 static struct mem_cgroup_tree soft_limit_tree __read_mostly;
167 
168 struct mem_cgroup_threshold {
169 	struct eventfd_ctx *eventfd;
170 	u64 threshold;
171 };
172 
173 /* For threshold */
174 struct mem_cgroup_threshold_ary {
175 	/* An array index points to threshold just below usage. */
176 	int current_threshold;
177 	/* Size of entries[] */
178 	unsigned int size;
179 	/* Array of thresholds */
180 	struct mem_cgroup_threshold entries[0];
181 };
182 
183 struct mem_cgroup_thresholds {
184 	/* Primary thresholds array */
185 	struct mem_cgroup_threshold_ary *primary;
186 	/*
187 	 * Spare threshold array.
188 	 * This is needed to make mem_cgroup_unregister_event() "never fail".
189 	 * It must be able to store at least primary->size - 1 entries.
190 	 */
191 	struct mem_cgroup_threshold_ary *spare;
192 };
193 
194 /* for OOM */
195 struct mem_cgroup_eventfd_list {
196 	struct list_head list;
197 	struct eventfd_ctx *eventfd;
198 };
199 
200 static void mem_cgroup_threshold(struct mem_cgroup *mem);
201 static void mem_cgroup_oom_notify(struct mem_cgroup *mem);
202 
203 /*
204  * The memory controller data structure. The memory controller controls both
205  * page cache and RSS per cgroup. We would eventually like to provide
206  * statistics based on the statistics developed by Rik Van Riel for clock-pro,
207  * to help the administrator determine what knobs to tune.
208  *
209  * TODO: Add a water mark for the memory controller. Reclaim will begin when
210  * we hit the water mark. May be even add a low water mark, such that
211  * no reclaim occurs from a cgroup at it's low water mark, this is
212  * a feature that will be implemented much later in the future.
213  */
214 struct mem_cgroup {
215 	struct cgroup_subsys_state css;
216 	/*
217 	 * the counter to account for memory usage
218 	 */
219 	struct res_counter res;
220 	/*
221 	 * the counter to account for mem+swap usage.
222 	 */
223 	struct res_counter memsw;
224 	/*
225 	 * Per cgroup active and inactive list, similar to the
226 	 * per zone LRU lists.
227 	 */
228 	struct mem_cgroup_lru_info info;
229 	/*
230 	 * While reclaiming in a hierarchy, we cache the last child we
231 	 * reclaimed from.
232 	 */
233 	int last_scanned_child;
234 	/*
235 	 * Should the accounting and control be hierarchical, per subtree?
236 	 */
237 	bool use_hierarchy;
238 	atomic_t	oom_lock;
239 	atomic_t	refcnt;
240 
241 	unsigned int	swappiness;
242 	/* OOM-Killer disable */
243 	int		oom_kill_disable;
244 
245 	/* set when res.limit == memsw.limit */
246 	bool		memsw_is_minimum;
247 
248 	/* protect arrays of thresholds */
249 	struct mutex thresholds_lock;
250 
251 	/* thresholds for memory usage. RCU-protected */
252 	struct mem_cgroup_thresholds thresholds;
253 
254 	/* thresholds for mem+swap usage. RCU-protected */
255 	struct mem_cgroup_thresholds memsw_thresholds;
256 
257 	/* For oom notifier event fd */
258 	struct list_head oom_notify;
259 
260 	/*
261 	 * Should we move charges of a task when a task is moved into this
262 	 * mem_cgroup ? And what type of charges should we move ?
263 	 */
264 	unsigned long 	move_charge_at_immigrate;
265 	/*
266 	 * percpu counter.
267 	 */
268 	struct mem_cgroup_stat_cpu *stat;
269 	/*
270 	 * used when a cpu is offlined or other synchronizations
271 	 * See mem_cgroup_read_stat().
272 	 */
273 	struct mem_cgroup_stat_cpu nocpu_base;
274 	spinlock_t pcp_counter_lock;
275 };
276 
277 /* Stuffs for move charges at task migration. */
278 /*
279  * Types of charges to be moved. "move_charge_at_immitgrate" is treated as a
280  * left-shifted bitmap of these types.
281  */
282 enum move_type {
283 	MOVE_CHARGE_TYPE_ANON,	/* private anonymous page and swap of it */
284 	MOVE_CHARGE_TYPE_FILE,	/* file page(including tmpfs) and swap of it */
285 	NR_MOVE_TYPE,
286 };
287 
288 /* "mc" and its members are protected by cgroup_mutex */
289 static struct move_charge_struct {
290 	spinlock_t	  lock; /* for from, to */
291 	struct mem_cgroup *from;
292 	struct mem_cgroup *to;
293 	unsigned long precharge;
294 	unsigned long moved_charge;
295 	unsigned long moved_swap;
296 	struct task_struct *moving_task;	/* a task moving charges */
297 	wait_queue_head_t waitq;		/* a waitq for other context */
298 } mc = {
299 	.lock = __SPIN_LOCK_UNLOCKED(mc.lock),
300 	.waitq = __WAIT_QUEUE_HEAD_INITIALIZER(mc.waitq),
301 };
302 
move_anon(void)303 static bool move_anon(void)
304 {
305 	return test_bit(MOVE_CHARGE_TYPE_ANON,
306 					&mc.to->move_charge_at_immigrate);
307 }
308 
move_file(void)309 static bool move_file(void)
310 {
311 	return test_bit(MOVE_CHARGE_TYPE_FILE,
312 					&mc.to->move_charge_at_immigrate);
313 }
314 
315 /*
316  * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
317  * limit reclaim to prevent infinite loops, if they ever occur.
318  */
319 #define	MEM_CGROUP_MAX_RECLAIM_LOOPS		(100)
320 #define	MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS	(2)
321 
322 enum charge_type {
323 	MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
324 	MEM_CGROUP_CHARGE_TYPE_MAPPED,
325 	MEM_CGROUP_CHARGE_TYPE_SHMEM,	/* used by page migration of shmem */
326 	MEM_CGROUP_CHARGE_TYPE_FORCE,	/* used by force_empty */
327 	MEM_CGROUP_CHARGE_TYPE_SWAPOUT,	/* for accounting swapcache */
328 	MEM_CGROUP_CHARGE_TYPE_DROP,	/* a page was unused swap cache */
329 	NR_CHARGE_TYPE,
330 };
331 
332 /* for encoding cft->private value on file */
333 #define _MEM			(0)
334 #define _MEMSWAP		(1)
335 #define _OOM_TYPE		(2)
336 #define MEMFILE_PRIVATE(x, val)	(((x) << 16) | (val))
337 #define MEMFILE_TYPE(val)	(((val) >> 16) & 0xffff)
338 #define MEMFILE_ATTR(val)	((val) & 0xffff)
339 /* Used for OOM nofiier */
340 #define OOM_CONTROL		(0)
341 
342 /*
343  * Reclaim flags for mem_cgroup_hierarchical_reclaim
344  */
345 #define MEM_CGROUP_RECLAIM_NOSWAP_BIT	0x0
346 #define MEM_CGROUP_RECLAIM_NOSWAP	(1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT)
347 #define MEM_CGROUP_RECLAIM_SHRINK_BIT	0x1
348 #define MEM_CGROUP_RECLAIM_SHRINK	(1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
349 #define MEM_CGROUP_RECLAIM_SOFT_BIT	0x2
350 #define MEM_CGROUP_RECLAIM_SOFT		(1 << MEM_CGROUP_RECLAIM_SOFT_BIT)
351 
352 static void mem_cgroup_get(struct mem_cgroup *mem);
353 static void mem_cgroup_put(struct mem_cgroup *mem);
354 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
355 static void drain_all_stock_async(void);
356 
357 static struct mem_cgroup_per_zone *
mem_cgroup_zoneinfo(struct mem_cgroup * mem,int nid,int zid)358 mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
359 {
360 	return &mem->info.nodeinfo[nid]->zoneinfo[zid];
361 }
362 
mem_cgroup_css(struct mem_cgroup * mem)363 struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
364 {
365 	return &mem->css;
366 }
367 
368 static struct mem_cgroup_per_zone *
page_cgroup_zoneinfo(struct mem_cgroup * mem,struct page * page)369 page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
370 {
371 	int nid = page_to_nid(page);
372 	int zid = page_zonenum(page);
373 
374 	return mem_cgroup_zoneinfo(mem, nid, zid);
375 }
376 
377 static struct mem_cgroup_tree_per_zone *
soft_limit_tree_node_zone(int nid,int zid)378 soft_limit_tree_node_zone(int nid, int zid)
379 {
380 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
381 }
382 
383 static struct mem_cgroup_tree_per_zone *
soft_limit_tree_from_page(struct page * page)384 soft_limit_tree_from_page(struct page *page)
385 {
386 	int nid = page_to_nid(page);
387 	int zid = page_zonenum(page);
388 
389 	return &soft_limit_tree.rb_tree_per_node[nid]->rb_tree_per_zone[zid];
390 }
391 
392 static void
__mem_cgroup_insert_exceeded(struct mem_cgroup * mem,struct mem_cgroup_per_zone * mz,struct mem_cgroup_tree_per_zone * mctz,unsigned long long new_usage_in_excess)393 __mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
394 				struct mem_cgroup_per_zone *mz,
395 				struct mem_cgroup_tree_per_zone *mctz,
396 				unsigned long long new_usage_in_excess)
397 {
398 	struct rb_node **p = &mctz->rb_root.rb_node;
399 	struct rb_node *parent = NULL;
400 	struct mem_cgroup_per_zone *mz_node;
401 
402 	if (mz->on_tree)
403 		return;
404 
405 	mz->usage_in_excess = new_usage_in_excess;
406 	if (!mz->usage_in_excess)
407 		return;
408 	while (*p) {
409 		parent = *p;
410 		mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
411 					tree_node);
412 		if (mz->usage_in_excess < mz_node->usage_in_excess)
413 			p = &(*p)->rb_left;
414 		/*
415 		 * We can't avoid mem cgroups that are over their soft
416 		 * limit by the same amount
417 		 */
418 		else if (mz->usage_in_excess >= mz_node->usage_in_excess)
419 			p = &(*p)->rb_right;
420 	}
421 	rb_link_node(&mz->tree_node, parent, p);
422 	rb_insert_color(&mz->tree_node, &mctz->rb_root);
423 	mz->on_tree = true;
424 }
425 
426 static void
__mem_cgroup_remove_exceeded(struct mem_cgroup * mem,struct mem_cgroup_per_zone * mz,struct mem_cgroup_tree_per_zone * mctz)427 __mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
428 				struct mem_cgroup_per_zone *mz,
429 				struct mem_cgroup_tree_per_zone *mctz)
430 {
431 	if (!mz->on_tree)
432 		return;
433 	rb_erase(&mz->tree_node, &mctz->rb_root);
434 	mz->on_tree = false;
435 }
436 
437 static void
mem_cgroup_remove_exceeded(struct mem_cgroup * mem,struct mem_cgroup_per_zone * mz,struct mem_cgroup_tree_per_zone * mctz)438 mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
439 				struct mem_cgroup_per_zone *mz,
440 				struct mem_cgroup_tree_per_zone *mctz)
441 {
442 	spin_lock(&mctz->lock);
443 	__mem_cgroup_remove_exceeded(mem, mz, mctz);
444 	spin_unlock(&mctz->lock);
445 }
446 
447 
mem_cgroup_update_tree(struct mem_cgroup * mem,struct page * page)448 static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
449 {
450 	unsigned long long excess;
451 	struct mem_cgroup_per_zone *mz;
452 	struct mem_cgroup_tree_per_zone *mctz;
453 	int nid = page_to_nid(page);
454 	int zid = page_zonenum(page);
455 	mctz = soft_limit_tree_from_page(page);
456 
457 	/*
458 	 * Necessary to update all ancestors when hierarchy is used.
459 	 * because their event counter is not touched.
460 	 */
461 	for (; mem; mem = parent_mem_cgroup(mem)) {
462 		mz = mem_cgroup_zoneinfo(mem, nid, zid);
463 		excess = res_counter_soft_limit_excess(&mem->res);
464 		/*
465 		 * We have to update the tree if mz is on RB-tree or
466 		 * mem is over its softlimit.
467 		 */
468 		if (excess || mz->on_tree) {
469 			spin_lock(&mctz->lock);
470 			/* if on-tree, remove it */
471 			if (mz->on_tree)
472 				__mem_cgroup_remove_exceeded(mem, mz, mctz);
473 			/*
474 			 * Insert again. mz->usage_in_excess will be updated.
475 			 * If excess is 0, no tree ops.
476 			 */
477 			__mem_cgroup_insert_exceeded(mem, mz, mctz, excess);
478 			spin_unlock(&mctz->lock);
479 		}
480 	}
481 }
482 
mem_cgroup_remove_from_trees(struct mem_cgroup * mem)483 static void mem_cgroup_remove_from_trees(struct mem_cgroup *mem)
484 {
485 	int node, zone;
486 	struct mem_cgroup_per_zone *mz;
487 	struct mem_cgroup_tree_per_zone *mctz;
488 
489 	for_each_node_state(node, N_POSSIBLE) {
490 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
491 			mz = mem_cgroup_zoneinfo(mem, node, zone);
492 			mctz = soft_limit_tree_node_zone(node, zone);
493 			mem_cgroup_remove_exceeded(mem, mz, mctz);
494 		}
495 	}
496 }
497 
498 static struct mem_cgroup_per_zone *
__mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone * mctz)499 __mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
500 {
501 	struct rb_node *rightmost = NULL;
502 	struct mem_cgroup_per_zone *mz;
503 
504 retry:
505 	mz = NULL;
506 	rightmost = rb_last(&mctz->rb_root);
507 	if (!rightmost)
508 		goto done;		/* Nothing to reclaim from */
509 
510 	mz = rb_entry(rightmost, struct mem_cgroup_per_zone, tree_node);
511 	/*
512 	 * Remove the node now but someone else can add it back,
513 	 * we will to add it back at the end of reclaim to its correct
514 	 * position in the tree.
515 	 */
516 	__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
517 	if (!res_counter_soft_limit_excess(&mz->mem->res) ||
518 		!css_tryget(&mz->mem->css))
519 		goto retry;
520 done:
521 	return mz;
522 }
523 
524 static struct mem_cgroup_per_zone *
mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone * mctz)525 mem_cgroup_largest_soft_limit_node(struct mem_cgroup_tree_per_zone *mctz)
526 {
527 	struct mem_cgroup_per_zone *mz;
528 
529 	spin_lock(&mctz->lock);
530 	mz = __mem_cgroup_largest_soft_limit_node(mctz);
531 	spin_unlock(&mctz->lock);
532 	return mz;
533 }
534 
535 /*
536  * Implementation Note: reading percpu statistics for memcg.
537  *
538  * Both of vmstat[] and percpu_counter has threshold and do periodic
539  * synchronization to implement "quick" read. There are trade-off between
540  * reading cost and precision of value. Then, we may have a chance to implement
541  * a periodic synchronizion of counter in memcg's counter.
542  *
543  * But this _read() function is used for user interface now. The user accounts
544  * memory usage by memory cgroup and he _always_ requires exact value because
545  * he accounts memory. Even if we provide quick-and-fuzzy read, we always
546  * have to visit all online cpus and make sum. So, for now, unnecessary
547  * synchronization is not implemented. (just implemented for cpu hotplug)
548  *
549  * If there are kernel internal actions which can make use of some not-exact
550  * value, and reading all cpu value can be performance bottleneck in some
551  * common workload, threashold and synchonization as vmstat[] should be
552  * implemented.
553  */
mem_cgroup_read_stat(struct mem_cgroup * mem,enum mem_cgroup_stat_index idx)554 static long mem_cgroup_read_stat(struct mem_cgroup *mem,
555 				 enum mem_cgroup_stat_index idx)
556 {
557 	long val = 0;
558 	int cpu;
559 
560 	get_online_cpus();
561 	for_each_online_cpu(cpu)
562 		val += per_cpu(mem->stat->count[idx], cpu);
563 #ifdef CONFIG_HOTPLUG_CPU
564 	spin_lock(&mem->pcp_counter_lock);
565 	val += mem->nocpu_base.count[idx];
566 	spin_unlock(&mem->pcp_counter_lock);
567 #endif
568 	put_online_cpus();
569 	return val;
570 }
571 
mem_cgroup_local_usage(struct mem_cgroup * mem)572 static long mem_cgroup_local_usage(struct mem_cgroup *mem)
573 {
574 	long ret;
575 
576 	ret = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
577 	ret += mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
578 	return ret;
579 }
580 
mem_cgroup_swap_statistics(struct mem_cgroup * mem,bool charge)581 static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
582 					 bool charge)
583 {
584 	int val = (charge) ? 1 : -1;
585 	this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val);
586 }
587 
mem_cgroup_read_events(struct mem_cgroup * mem,enum mem_cgroup_events_index idx)588 static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem,
589 					    enum mem_cgroup_events_index idx)
590 {
591 	unsigned long val = 0;
592 	int cpu;
593 
594 	for_each_online_cpu(cpu)
595 		val += per_cpu(mem->stat->events[idx], cpu);
596 #ifdef CONFIG_HOTPLUG_CPU
597 	spin_lock(&mem->pcp_counter_lock);
598 	val += mem->nocpu_base.events[idx];
599 	spin_unlock(&mem->pcp_counter_lock);
600 #endif
601 	return val;
602 }
603 
mem_cgroup_charge_statistics(struct mem_cgroup * mem,bool file,int nr_pages)604 static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
605 					 bool file, int nr_pages)
606 {
607 	preempt_disable();
608 
609 	if (file)
610 		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
611 	else
612 		__this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
613 
614 	/* pagein of a big page is an event. So, ignore page size */
615 	if (nr_pages > 0)
616 		__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGIN]);
617 	else {
618 		__this_cpu_inc(mem->stat->events[MEM_CGROUP_EVENTS_PGPGOUT]);
619 		nr_pages = -nr_pages; /* for event */
620 	}
621 
622 	__this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages);
623 
624 	preempt_enable();
625 }
626 
mem_cgroup_get_local_zonestat(struct mem_cgroup * mem,enum lru_list idx)627 static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
628 					enum lru_list idx)
629 {
630 	int nid, zid;
631 	struct mem_cgroup_per_zone *mz;
632 	u64 total = 0;
633 
634 	for_each_online_node(nid)
635 		for (zid = 0; zid < MAX_NR_ZONES; zid++) {
636 			mz = mem_cgroup_zoneinfo(mem, nid, zid);
637 			total += MEM_CGROUP_ZSTAT(mz, idx);
638 		}
639 	return total;
640 }
641 
__memcg_event_check(struct mem_cgroup * mem,int target)642 static bool __memcg_event_check(struct mem_cgroup *mem, int target)
643 {
644 	unsigned long val, next;
645 
646 	val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
647 	next = this_cpu_read(mem->stat->targets[target]);
648 	/* from time_after() in jiffies.h */
649 	return ((long)next - (long)val < 0);
650 }
651 
__mem_cgroup_target_update(struct mem_cgroup * mem,int target)652 static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
653 {
654 	unsigned long val, next;
655 
656 	val = this_cpu_read(mem->stat->events[MEM_CGROUP_EVENTS_COUNT]);
657 
658 	switch (target) {
659 	case MEM_CGROUP_TARGET_THRESH:
660 		next = val + THRESHOLDS_EVENTS_TARGET;
661 		break;
662 	case MEM_CGROUP_TARGET_SOFTLIMIT:
663 		next = val + SOFTLIMIT_EVENTS_TARGET;
664 		break;
665 	default:
666 		return;
667 	}
668 
669 	this_cpu_write(mem->stat->targets[target], next);
670 }
671 
672 /*
673  * Check events in order.
674  *
675  */
memcg_check_events(struct mem_cgroup * mem,struct page * page)676 static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
677 {
678 	/* threshold event is triggered in finer grain than soft limit */
679 	if (unlikely(__memcg_event_check(mem, MEM_CGROUP_TARGET_THRESH))) {
680 		mem_cgroup_threshold(mem);
681 		__mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
682 		if (unlikely(__memcg_event_check(mem,
683 			MEM_CGROUP_TARGET_SOFTLIMIT))){
684 			mem_cgroup_update_tree(mem, page);
685 			__mem_cgroup_target_update(mem,
686 				MEM_CGROUP_TARGET_SOFTLIMIT);
687 		}
688 	}
689 }
690 
mem_cgroup_from_cont(struct cgroup * cont)691 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
692 {
693 	return container_of(cgroup_subsys_state(cont,
694 				mem_cgroup_subsys_id), struct mem_cgroup,
695 				css);
696 }
697 
mem_cgroup_from_task(struct task_struct * p)698 struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
699 {
700 	/*
701 	 * mm_update_next_owner() may clear mm->owner to NULL
702 	 * if it races with swapoff, page migration, etc.
703 	 * So this can be called with p == NULL.
704 	 */
705 	if (unlikely(!p))
706 		return NULL;
707 
708 	return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
709 				struct mem_cgroup, css);
710 }
711 
try_get_mem_cgroup_from_mm(struct mm_struct * mm)712 static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
713 {
714 	struct mem_cgroup *mem = NULL;
715 
716 	if (!mm)
717 		return NULL;
718 	/*
719 	 * Because we have no locks, mm->owner's may be being moved to other
720 	 * cgroup. We use css_tryget() here even if this looks
721 	 * pessimistic (rather than adding locks here).
722 	 */
723 	rcu_read_lock();
724 	do {
725 		mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
726 		if (unlikely(!mem))
727 			break;
728 	} while (!css_tryget(&mem->css));
729 	rcu_read_unlock();
730 	return mem;
731 }
732 
733 /* The caller has to guarantee "mem" exists before calling this */
mem_cgroup_start_loop(struct mem_cgroup * mem)734 static struct mem_cgroup *mem_cgroup_start_loop(struct mem_cgroup *mem)
735 {
736 	struct cgroup_subsys_state *css;
737 	int found;
738 
739 	if (!mem) /* ROOT cgroup has the smallest ID */
740 		return root_mem_cgroup; /*css_put/get against root is ignored*/
741 	if (!mem->use_hierarchy) {
742 		if (css_tryget(&mem->css))
743 			return mem;
744 		return NULL;
745 	}
746 	rcu_read_lock();
747 	/*
748 	 * searching a memory cgroup which has the smallest ID under given
749 	 * ROOT cgroup. (ID >= 1)
750 	 */
751 	css = css_get_next(&mem_cgroup_subsys, 1, &mem->css, &found);
752 	if (css && css_tryget(css))
753 		mem = container_of(css, struct mem_cgroup, css);
754 	else
755 		mem = NULL;
756 	rcu_read_unlock();
757 	return mem;
758 }
759 
mem_cgroup_get_next(struct mem_cgroup * iter,struct mem_cgroup * root,bool cond)760 static struct mem_cgroup *mem_cgroup_get_next(struct mem_cgroup *iter,
761 					struct mem_cgroup *root,
762 					bool cond)
763 {
764 	int nextid = css_id(&iter->css) + 1;
765 	int found;
766 	int hierarchy_used;
767 	struct cgroup_subsys_state *css;
768 
769 	hierarchy_used = iter->use_hierarchy;
770 
771 	css_put(&iter->css);
772 	/* If no ROOT, walk all, ignore hierarchy */
773 	if (!cond || (root && !hierarchy_used))
774 		return NULL;
775 
776 	if (!root)
777 		root = root_mem_cgroup;
778 
779 	do {
780 		iter = NULL;
781 		rcu_read_lock();
782 
783 		css = css_get_next(&mem_cgroup_subsys, nextid,
784 				&root->css, &found);
785 		if (css && css_tryget(css))
786 			iter = container_of(css, struct mem_cgroup, css);
787 		rcu_read_unlock();
788 		/* If css is NULL, no more cgroups will be found */
789 		nextid = found + 1;
790 	} while (css && !iter);
791 
792 	return iter;
793 }
794 /*
795  * for_eacn_mem_cgroup_tree() for visiting all cgroup under tree. Please
796  * be careful that "break" loop is not allowed. We have reference count.
797  * Instead of that modify "cond" to be false and "continue" to exit the loop.
798  */
799 #define for_each_mem_cgroup_tree_cond(iter, root, cond)	\
800 	for (iter = mem_cgroup_start_loop(root);\
801 	     iter != NULL;\
802 	     iter = mem_cgroup_get_next(iter, root, cond))
803 
804 #define for_each_mem_cgroup_tree(iter, root) \
805 	for_each_mem_cgroup_tree_cond(iter, root, true)
806 
807 #define for_each_mem_cgroup_all(iter) \
808 	for_each_mem_cgroup_tree_cond(iter, NULL, true)
809 
810 
mem_cgroup_is_root(struct mem_cgroup * mem)811 static inline bool mem_cgroup_is_root(struct mem_cgroup *mem)
812 {
813 	return (mem == root_mem_cgroup);
814 }
815 
816 /*
817  * Following LRU functions are allowed to be used without PCG_LOCK.
818  * Operations are called by routine of global LRU independently from memcg.
819  * What we have to take care of here is validness of pc->mem_cgroup.
820  *
821  * Changes to pc->mem_cgroup happens when
822  * 1. charge
823  * 2. moving account
824  * In typical case, "charge" is done before add-to-lru. Exception is SwapCache.
825  * It is added to LRU before charge.
826  * If PCG_USED bit is not set, page_cgroup is not added to this private LRU.
827  * When moving account, the page is not on LRU. It's isolated.
828  */
829 
mem_cgroup_del_lru_list(struct page * page,enum lru_list lru)830 void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
831 {
832 	struct page_cgroup *pc;
833 	struct mem_cgroup_per_zone *mz;
834 
835 	if (mem_cgroup_disabled())
836 		return;
837 	pc = lookup_page_cgroup(page);
838 	/* can happen while we handle swapcache. */
839 	if (!TestClearPageCgroupAcctLRU(pc))
840 		return;
841 	VM_BUG_ON(!pc->mem_cgroup);
842 	/*
843 	 * We don't check PCG_USED bit. It's cleared when the "page" is finally
844 	 * removed from global LRU.
845 	 */
846 	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
847 	/* huge page split is done under lru_lock. so, we have no races. */
848 	MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
849 	if (mem_cgroup_is_root(pc->mem_cgroup))
850 		return;
851 	VM_BUG_ON(list_empty(&pc->lru));
852 	list_del_init(&pc->lru);
853 }
854 
mem_cgroup_del_lru(struct page * page)855 void mem_cgroup_del_lru(struct page *page)
856 {
857 	mem_cgroup_del_lru_list(page, page_lru(page));
858 }
859 
860 /*
861  * Writeback is about to end against a page which has been marked for immediate
862  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
863  * inactive list.
864  */
mem_cgroup_rotate_reclaimable_page(struct page * page)865 void mem_cgroup_rotate_reclaimable_page(struct page *page)
866 {
867 	struct mem_cgroup_per_zone *mz;
868 	struct page_cgroup *pc;
869 	enum lru_list lru = page_lru(page);
870 
871 	if (mem_cgroup_disabled())
872 		return;
873 
874 	pc = lookup_page_cgroup(page);
875 	/* unused or root page is not rotated. */
876 	if (!PageCgroupUsed(pc))
877 		return;
878 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
879 	smp_rmb();
880 	if (mem_cgroup_is_root(pc->mem_cgroup))
881 		return;
882 	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
883 	list_move_tail(&pc->lru, &mz->lists[lru]);
884 }
885 
mem_cgroup_rotate_lru_list(struct page * page,enum lru_list lru)886 void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
887 {
888 	struct mem_cgroup_per_zone *mz;
889 	struct page_cgroup *pc;
890 
891 	if (mem_cgroup_disabled())
892 		return;
893 
894 	pc = lookup_page_cgroup(page);
895 	/* unused or root page is not rotated. */
896 	if (!PageCgroupUsed(pc))
897 		return;
898 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
899 	smp_rmb();
900 	if (mem_cgroup_is_root(pc->mem_cgroup))
901 		return;
902 	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
903 	list_move(&pc->lru, &mz->lists[lru]);
904 }
905 
mem_cgroup_add_lru_list(struct page * page,enum lru_list lru)906 void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
907 {
908 	struct page_cgroup *pc;
909 	struct mem_cgroup_per_zone *mz;
910 
911 	if (mem_cgroup_disabled())
912 		return;
913 	pc = lookup_page_cgroup(page);
914 	VM_BUG_ON(PageCgroupAcctLRU(pc));
915 	if (!PageCgroupUsed(pc))
916 		return;
917 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
918 	smp_rmb();
919 	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
920 	/* huge page split is done under lru_lock. so, we have no races. */
921 	MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
922 	SetPageCgroupAcctLRU(pc);
923 	if (mem_cgroup_is_root(pc->mem_cgroup))
924 		return;
925 	list_add(&pc->lru, &mz->lists[lru]);
926 }
927 
928 /*
929  * At handling SwapCache and other FUSE stuff, pc->mem_cgroup may be changed
930  * while it's linked to lru because the page may be reused after it's fully
931  * uncharged. To handle that, unlink page_cgroup from LRU when charge it again.
932  * It's done under lock_page and expected that zone->lru_lock isnever held.
933  */
mem_cgroup_lru_del_before_commit(struct page * page)934 static void mem_cgroup_lru_del_before_commit(struct page *page)
935 {
936 	unsigned long flags;
937 	struct zone *zone = page_zone(page);
938 	struct page_cgroup *pc = lookup_page_cgroup(page);
939 
940 	/*
941 	 * Doing this check without taking ->lru_lock seems wrong but this
942 	 * is safe. Because if page_cgroup's USED bit is unset, the page
943 	 * will not be added to any memcg's LRU. If page_cgroup's USED bit is
944 	 * set, the commit after this will fail, anyway.
945 	 * This all charge/uncharge is done under some mutual execustion.
946 	 * So, we don't need to taking care of changes in USED bit.
947 	 */
948 	if (likely(!PageLRU(page)))
949 		return;
950 
951 	spin_lock_irqsave(&zone->lru_lock, flags);
952 	/*
953 	 * Forget old LRU when this page_cgroup is *not* used. This Used bit
954 	 * is guarded by lock_page() because the page is SwapCache.
955 	 */
956 	if (!PageCgroupUsed(pc))
957 		mem_cgroup_del_lru_list(page, page_lru(page));
958 	spin_unlock_irqrestore(&zone->lru_lock, flags);
959 }
960 
mem_cgroup_lru_add_after_commit(struct page * page)961 static void mem_cgroup_lru_add_after_commit(struct page *page)
962 {
963 	unsigned long flags;
964 	struct zone *zone = page_zone(page);
965 	struct page_cgroup *pc = lookup_page_cgroup(page);
966 
967 	/* taking care of that the page is added to LRU while we commit it */
968 	if (likely(!PageLRU(page)))
969 		return;
970 	spin_lock_irqsave(&zone->lru_lock, flags);
971 	/* link when the page is linked to LRU but page_cgroup isn't */
972 	if (PageLRU(page) && !PageCgroupAcctLRU(pc))
973 		mem_cgroup_add_lru_list(page, page_lru(page));
974 	spin_unlock_irqrestore(&zone->lru_lock, flags);
975 }
976 
977 
mem_cgroup_move_lists(struct page * page,enum lru_list from,enum lru_list to)978 void mem_cgroup_move_lists(struct page *page,
979 			   enum lru_list from, enum lru_list to)
980 {
981 	if (mem_cgroup_disabled())
982 		return;
983 	mem_cgroup_del_lru_list(page, from);
984 	mem_cgroup_add_lru_list(page, to);
985 }
986 
task_in_mem_cgroup(struct task_struct * task,const struct mem_cgroup * mem)987 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem)
988 {
989 	int ret;
990 	struct mem_cgroup *curr = NULL;
991 	struct task_struct *p;
992 
993 	p = find_lock_task_mm(task);
994 	if (!p)
995 		return 0;
996 	curr = try_get_mem_cgroup_from_mm(p->mm);
997 	task_unlock(p);
998 	if (!curr)
999 		return 0;
1000 	/*
1001 	 * We should check use_hierarchy of "mem" not "curr". Because checking
1002 	 * use_hierarchy of "curr" here make this function true if hierarchy is
1003 	 * enabled in "curr" and "curr" is a child of "mem" in *cgroup*
1004 	 * hierarchy(even if use_hierarchy is disabled in "mem").
1005 	 */
1006 	if (mem->use_hierarchy)
1007 		ret = css_is_ancestor(&curr->css, &mem->css);
1008 	else
1009 		ret = (curr == mem);
1010 	css_put(&curr->css);
1011 	return ret;
1012 }
1013 
calc_inactive_ratio(struct mem_cgroup * memcg,unsigned long * present_pages)1014 static int calc_inactive_ratio(struct mem_cgroup *memcg, unsigned long *present_pages)
1015 {
1016 	unsigned long active;
1017 	unsigned long inactive;
1018 	unsigned long gb;
1019 	unsigned long inactive_ratio;
1020 
1021 	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_ANON);
1022 	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_ANON);
1023 
1024 	gb = (inactive + active) >> (30 - PAGE_SHIFT);
1025 	if (gb)
1026 		inactive_ratio = int_sqrt(10 * gb);
1027 	else
1028 		inactive_ratio = 1;
1029 
1030 	if (present_pages) {
1031 		present_pages[0] = inactive;
1032 		present_pages[1] = active;
1033 	}
1034 
1035 	return inactive_ratio;
1036 }
1037 
mem_cgroup_inactive_anon_is_low(struct mem_cgroup * memcg)1038 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
1039 {
1040 	unsigned long active;
1041 	unsigned long inactive;
1042 	unsigned long present_pages[2];
1043 	unsigned long inactive_ratio;
1044 
1045 	inactive_ratio = calc_inactive_ratio(memcg, present_pages);
1046 
1047 	inactive = present_pages[0];
1048 	active = present_pages[1];
1049 
1050 	if (inactive * inactive_ratio < active)
1051 		return 1;
1052 
1053 	return 0;
1054 }
1055 
mem_cgroup_inactive_file_is_low(struct mem_cgroup * memcg)1056 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg)
1057 {
1058 	unsigned long active;
1059 	unsigned long inactive;
1060 
1061 	inactive = mem_cgroup_get_local_zonestat(memcg, LRU_INACTIVE_FILE);
1062 	active = mem_cgroup_get_local_zonestat(memcg, LRU_ACTIVE_FILE);
1063 
1064 	return (active > inactive);
1065 }
1066 
mem_cgroup_zone_nr_pages(struct mem_cgroup * memcg,struct zone * zone,enum lru_list lru)1067 unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
1068 				       struct zone *zone,
1069 				       enum lru_list lru)
1070 {
1071 	int nid = zone_to_nid(zone);
1072 	int zid = zone_idx(zone);
1073 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1074 
1075 	return MEM_CGROUP_ZSTAT(mz, lru);
1076 }
1077 
mem_cgroup_get_reclaim_stat(struct mem_cgroup * memcg,struct zone * zone)1078 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1079 						      struct zone *zone)
1080 {
1081 	int nid = zone_to_nid(zone);
1082 	int zid = zone_idx(zone);
1083 	struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1084 
1085 	return &mz->reclaim_stat;
1086 }
1087 
1088 struct zone_reclaim_stat *
mem_cgroup_get_reclaim_stat_from_page(struct page * page)1089 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1090 {
1091 	struct page_cgroup *pc;
1092 	struct mem_cgroup_per_zone *mz;
1093 
1094 	if (mem_cgroup_disabled())
1095 		return NULL;
1096 
1097 	pc = lookup_page_cgroup(page);
1098 	if (!PageCgroupUsed(pc))
1099 		return NULL;
1100 	/* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1101 	smp_rmb();
1102 	mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1103 	return &mz->reclaim_stat;
1104 }
1105 
mem_cgroup_isolate_pages(unsigned long nr_to_scan,struct list_head * dst,unsigned long * scanned,int order,int mode,struct zone * z,struct mem_cgroup * mem_cont,int active,int file)1106 unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1107 					struct list_head *dst,
1108 					unsigned long *scanned, int order,
1109 					int mode, struct zone *z,
1110 					struct mem_cgroup *mem_cont,
1111 					int active, int file)
1112 {
1113 	unsigned long nr_taken = 0;
1114 	struct page *page;
1115 	unsigned long scan;
1116 	LIST_HEAD(pc_list);
1117 	struct list_head *src;
1118 	struct page_cgroup *pc, *tmp;
1119 	int nid = zone_to_nid(z);
1120 	int zid = zone_idx(z);
1121 	struct mem_cgroup_per_zone *mz;
1122 	int lru = LRU_FILE * file + active;
1123 	int ret;
1124 
1125 	BUG_ON(!mem_cont);
1126 	mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1127 	src = &mz->lists[lru];
1128 
1129 	scan = 0;
1130 	list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
1131 		if (scan >= nr_to_scan)
1132 			break;
1133 
1134 		if (unlikely(!PageCgroupUsed(pc)))
1135 			continue;
1136 
1137 		page = lookup_cgroup_page(pc);
1138 
1139 		if (unlikely(!PageLRU(page)))
1140 			continue;
1141 
1142 		scan++;
1143 		ret = __isolate_lru_page(page, mode, file);
1144 		switch (ret) {
1145 		case 0:
1146 			list_move(&page->lru, dst);
1147 			mem_cgroup_del_lru(page);
1148 			nr_taken += hpage_nr_pages(page);
1149 			break;
1150 		case -EBUSY:
1151 			/* we don't affect global LRU but rotate in our LRU */
1152 			mem_cgroup_rotate_lru_list(page, page_lru(page));
1153 			break;
1154 		default:
1155 			break;
1156 		}
1157 	}
1158 
1159 	*scanned = scan;
1160 
1161 	trace_mm_vmscan_memcg_isolate(0, nr_to_scan, scan, nr_taken,
1162 				      0, 0, 0, mode);
1163 
1164 	return nr_taken;
1165 }
1166 
1167 #define mem_cgroup_from_res_counter(counter, member)	\
1168 	container_of(counter, struct mem_cgroup, member)
1169 
1170 /**
1171  * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1172  * @mem: the memory cgroup
1173  *
1174  * Returns the maximum amount of memory @mem can be charged with, in
1175  * pages.
1176  */
mem_cgroup_margin(struct mem_cgroup * mem)1177 static unsigned long mem_cgroup_margin(struct mem_cgroup *mem)
1178 {
1179 	unsigned long long margin;
1180 
1181 	margin = res_counter_margin(&mem->res);
1182 	if (do_swap_account)
1183 		margin = min(margin, res_counter_margin(&mem->memsw));
1184 	return margin >> PAGE_SHIFT;
1185 }
1186 
get_swappiness(struct mem_cgroup * memcg)1187 static unsigned int get_swappiness(struct mem_cgroup *memcg)
1188 {
1189 	struct cgroup *cgrp = memcg->css.cgroup;
1190 
1191 	/* root ? */
1192 	if (cgrp->parent == NULL)
1193 		return vm_swappiness;
1194 
1195 	return memcg->swappiness;
1196 }
1197 
mem_cgroup_start_move(struct mem_cgroup * mem)1198 static void mem_cgroup_start_move(struct mem_cgroup *mem)
1199 {
1200 	int cpu;
1201 
1202 	get_online_cpus();
1203 	spin_lock(&mem->pcp_counter_lock);
1204 	for_each_online_cpu(cpu)
1205 		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) += 1;
1206 	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] += 1;
1207 	spin_unlock(&mem->pcp_counter_lock);
1208 	put_online_cpus();
1209 
1210 	synchronize_rcu();
1211 }
1212 
mem_cgroup_end_move(struct mem_cgroup * mem)1213 static void mem_cgroup_end_move(struct mem_cgroup *mem)
1214 {
1215 	int cpu;
1216 
1217 	if (!mem)
1218 		return;
1219 	get_online_cpus();
1220 	spin_lock(&mem->pcp_counter_lock);
1221 	for_each_online_cpu(cpu)
1222 		per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) -= 1;
1223 	mem->nocpu_base.count[MEM_CGROUP_ON_MOVE] -= 1;
1224 	spin_unlock(&mem->pcp_counter_lock);
1225 	put_online_cpus();
1226 }
1227 /*
1228  * 2 routines for checking "mem" is under move_account() or not.
1229  *
1230  * mem_cgroup_stealed() - checking a cgroup is mc.from or not. This is used
1231  *			  for avoiding race in accounting. If true,
1232  *			  pc->mem_cgroup may be overwritten.
1233  *
1234  * mem_cgroup_under_move() - checking a cgroup is mc.from or mc.to or
1235  *			  under hierarchy of moving cgroups. This is for
1236  *			  waiting at hith-memory prressure caused by "move".
1237  */
1238 
mem_cgroup_stealed(struct mem_cgroup * mem)1239 static bool mem_cgroup_stealed(struct mem_cgroup *mem)
1240 {
1241 	VM_BUG_ON(!rcu_read_lock_held());
1242 	return this_cpu_read(mem->stat->count[MEM_CGROUP_ON_MOVE]) > 0;
1243 }
1244 
mem_cgroup_under_move(struct mem_cgroup * mem)1245 static bool mem_cgroup_under_move(struct mem_cgroup *mem)
1246 {
1247 	struct mem_cgroup *from;
1248 	struct mem_cgroup *to;
1249 	bool ret = false;
1250 	/*
1251 	 * Unlike task_move routines, we access mc.to, mc.from not under
1252 	 * mutual exclusion by cgroup_mutex. Here, we take spinlock instead.
1253 	 */
1254 	spin_lock(&mc.lock);
1255 	from = mc.from;
1256 	to = mc.to;
1257 	if (!from)
1258 		goto unlock;
1259 	if (from == mem || to == mem
1260 	    || (mem->use_hierarchy && css_is_ancestor(&from->css, &mem->css))
1261 	    || (mem->use_hierarchy && css_is_ancestor(&to->css,	&mem->css)))
1262 		ret = true;
1263 unlock:
1264 	spin_unlock(&mc.lock);
1265 	return ret;
1266 }
1267 
mem_cgroup_wait_acct_move(struct mem_cgroup * mem)1268 static bool mem_cgroup_wait_acct_move(struct mem_cgroup *mem)
1269 {
1270 	if (mc.moving_task && current != mc.moving_task) {
1271 		if (mem_cgroup_under_move(mem)) {
1272 			DEFINE_WAIT(wait);
1273 			prepare_to_wait(&mc.waitq, &wait, TASK_INTERRUPTIBLE);
1274 			/* moving charge context might have finished. */
1275 			if (mc.moving_task)
1276 				schedule();
1277 			finish_wait(&mc.waitq, &wait);
1278 			return true;
1279 		}
1280 	}
1281 	return false;
1282 }
1283 
1284 /**
1285  * mem_cgroup_print_oom_info: Called from OOM with tasklist_lock held in read mode.
1286  * @memcg: The memory cgroup that went over limit
1287  * @p: Task that is going to be killed
1288  *
1289  * NOTE: @memcg and @p's mem_cgroup can be different when hierarchy is
1290  * enabled
1291  */
mem_cgroup_print_oom_info(struct mem_cgroup * memcg,struct task_struct * p)1292 void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1293 {
1294 	struct cgroup *task_cgrp;
1295 	struct cgroup *mem_cgrp;
1296 	/*
1297 	 * Need a buffer in BSS, can't rely on allocations. The code relies
1298 	 * on the assumption that OOM is serialized for memory controller.
1299 	 * If this assumption is broken, revisit this code.
1300 	 */
1301 	static char memcg_name[PATH_MAX];
1302 	int ret;
1303 
1304 	if (!memcg || !p)
1305 		return;
1306 
1307 
1308 	rcu_read_lock();
1309 
1310 	mem_cgrp = memcg->css.cgroup;
1311 	task_cgrp = task_cgroup(p, mem_cgroup_subsys_id);
1312 
1313 	ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1314 	if (ret < 0) {
1315 		/*
1316 		 * Unfortunately, we are unable to convert to a useful name
1317 		 * But we'll still print out the usage information
1318 		 */
1319 		rcu_read_unlock();
1320 		goto done;
1321 	}
1322 	rcu_read_unlock();
1323 
1324 	printk(KERN_INFO "Task in %s killed", memcg_name);
1325 
1326 	rcu_read_lock();
1327 	ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1328 	if (ret < 0) {
1329 		rcu_read_unlock();
1330 		goto done;
1331 	}
1332 	rcu_read_unlock();
1333 
1334 	/*
1335 	 * Continues from above, so we don't need an KERN_ level
1336 	 */
1337 	printk(KERN_CONT " as a result of limit of %s\n", memcg_name);
1338 done:
1339 
1340 	printk(KERN_INFO "memory: usage %llukB, limit %llukB, failcnt %llu\n",
1341 		res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1342 		res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
1343 		res_counter_read_u64(&memcg->res, RES_FAILCNT));
1344 	printk(KERN_INFO "memory+swap: usage %llukB, limit %llukB, "
1345 		"failcnt %llu\n",
1346 		res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10,
1347 		res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10,
1348 		res_counter_read_u64(&memcg->memsw, RES_FAILCNT));
1349 }
1350 
1351 /*
1352  * This function returns the number of memcg under hierarchy tree. Returns
1353  * 1(self count) if no children.
1354  */
mem_cgroup_count_children(struct mem_cgroup * mem)1355 static int mem_cgroup_count_children(struct mem_cgroup *mem)
1356 {
1357 	int num = 0;
1358 	struct mem_cgroup *iter;
1359 
1360 	for_each_mem_cgroup_tree(iter, mem)
1361 		num++;
1362 	return num;
1363 }
1364 
1365 /*
1366  * Return the memory (and swap, if configured) limit for a memcg.
1367  */
mem_cgroup_get_limit(struct mem_cgroup * memcg)1368 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
1369 {
1370 	u64 limit;
1371 	u64 memsw;
1372 
1373 	limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
1374 	limit += total_swap_pages << PAGE_SHIFT;
1375 
1376 	memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
1377 	/*
1378 	 * If memsw is finite and limits the amount of swap space available
1379 	 * to this memcg, return that limit.
1380 	 */
1381 	return min(limit, memsw);
1382 }
1383 
1384 /*
1385  * Visit the first child (need not be the first child as per the ordering
1386  * of the cgroup list, since we track last_scanned_child) of @mem and use
1387  * that to reclaim free pages from.
1388  */
1389 static struct mem_cgroup *
mem_cgroup_select_victim(struct mem_cgroup * root_mem)1390 mem_cgroup_select_victim(struct mem_cgroup *root_mem)
1391 {
1392 	struct mem_cgroup *ret = NULL;
1393 	struct cgroup_subsys_state *css;
1394 	int nextid, found;
1395 
1396 	if (!root_mem->use_hierarchy) {
1397 		css_get(&root_mem->css);
1398 		ret = root_mem;
1399 	}
1400 
1401 	while (!ret) {
1402 		rcu_read_lock();
1403 		nextid = root_mem->last_scanned_child + 1;
1404 		css = css_get_next(&mem_cgroup_subsys, nextid, &root_mem->css,
1405 				   &found);
1406 		if (css && css_tryget(css))
1407 			ret = container_of(css, struct mem_cgroup, css);
1408 
1409 		rcu_read_unlock();
1410 		/* Updates scanning parameter */
1411 		if (!css) {
1412 			/* this means start scan from ID:1 */
1413 			root_mem->last_scanned_child = 0;
1414 		} else
1415 			root_mem->last_scanned_child = found;
1416 	}
1417 
1418 	return ret;
1419 }
1420 
1421 /*
1422  * Scan the hierarchy if needed to reclaim memory. We remember the last child
1423  * we reclaimed from, so that we don't end up penalizing one child extensively
1424  * based on its position in the children list.
1425  *
1426  * root_mem is the original ancestor that we've been reclaim from.
1427  *
1428  * We give up and return to the caller when we visit root_mem twice.
1429  * (other groups can be removed while we're walking....)
1430  *
1431  * If shrink==true, for avoiding to free too much, this returns immedieately.
1432  */
mem_cgroup_hierarchical_reclaim(struct mem_cgroup * root_mem,struct zone * zone,gfp_t gfp_mask,unsigned long reclaim_options)1433 static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1434 						struct zone *zone,
1435 						gfp_t gfp_mask,
1436 						unsigned long reclaim_options)
1437 {
1438 	struct mem_cgroup *victim;
1439 	int ret, total = 0;
1440 	int loop = 0;
1441 	bool noswap = reclaim_options & MEM_CGROUP_RECLAIM_NOSWAP;
1442 	bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK;
1443 	bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT;
1444 	unsigned long excess;
1445 
1446 	excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1447 
1448 	/* If memsw_is_minimum==1, swap-out is of-no-use. */
1449 	if (root_mem->memsw_is_minimum)
1450 		noswap = true;
1451 
1452 	while (1) {
1453 		victim = mem_cgroup_select_victim(root_mem);
1454 		if (victim == root_mem) {
1455 			loop++;
1456 			if (loop >= 1)
1457 				drain_all_stock_async();
1458 			if (loop >= 2) {
1459 				/*
1460 				 * If we have not been able to reclaim
1461 				 * anything, it might because there are
1462 				 * no reclaimable pages under this hierarchy
1463 				 */
1464 				if (!check_soft || !total) {
1465 					css_put(&victim->css);
1466 					break;
1467 				}
1468 				/*
1469 				 * We want to do more targeted reclaim.
1470 				 * excess >> 2 is not to excessive so as to
1471 				 * reclaim too much, nor too less that we keep
1472 				 * coming back to reclaim from this cgroup
1473 				 */
1474 				if (total >= (excess >> 2) ||
1475 					(loop > MEM_CGROUP_MAX_RECLAIM_LOOPS)) {
1476 					css_put(&victim->css);
1477 					break;
1478 				}
1479 			}
1480 		}
1481 		if (!mem_cgroup_local_usage(victim)) {
1482 			/* this cgroup's local usage == 0 */
1483 			css_put(&victim->css);
1484 			continue;
1485 		}
1486 		/* we use swappiness of local cgroup */
1487 		if (check_soft)
1488 			ret = mem_cgroup_shrink_node_zone(victim, gfp_mask,
1489 				noswap, get_swappiness(victim), zone);
1490 		else
1491 			ret = try_to_free_mem_cgroup_pages(victim, gfp_mask,
1492 						noswap, get_swappiness(victim));
1493 		css_put(&victim->css);
1494 		/*
1495 		 * At shrinking usage, we can't check we should stop here or
1496 		 * reclaim more. It's depends on callers. last_scanned_child
1497 		 * will work enough for keeping fairness under tree.
1498 		 */
1499 		if (shrink)
1500 			return ret;
1501 		total += ret;
1502 		if (check_soft) {
1503 			if (!res_counter_soft_limit_excess(&root_mem->res))
1504 				return total;
1505 		} else if (mem_cgroup_margin(root_mem))
1506 			return 1 + total;
1507 	}
1508 	return total;
1509 }
1510 
1511 /*
1512  * Check OOM-Killer is already running under our hierarchy.
1513  * If someone is running, return false.
1514  */
mem_cgroup_oom_lock(struct mem_cgroup * mem)1515 static bool mem_cgroup_oom_lock(struct mem_cgroup *mem)
1516 {
1517 	int x, lock_count = 0;
1518 	struct mem_cgroup *iter;
1519 
1520 	for_each_mem_cgroup_tree(iter, mem) {
1521 		x = atomic_inc_return(&iter->oom_lock);
1522 		lock_count = max(x, lock_count);
1523 	}
1524 
1525 	if (lock_count == 1)
1526 		return true;
1527 	return false;
1528 }
1529 
mem_cgroup_oom_unlock(struct mem_cgroup * mem)1530 static int mem_cgroup_oom_unlock(struct mem_cgroup *mem)
1531 {
1532 	struct mem_cgroup *iter;
1533 
1534 	/*
1535 	 * When a new child is created while the hierarchy is under oom,
1536 	 * mem_cgroup_oom_lock() may not be called. We have to use
1537 	 * atomic_add_unless() here.
1538 	 */
1539 	for_each_mem_cgroup_tree(iter, mem)
1540 		atomic_add_unless(&iter->oom_lock, -1, 0);
1541 	return 0;
1542 }
1543 
1544 
1545 static DEFINE_MUTEX(memcg_oom_mutex);
1546 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
1547 
1548 struct oom_wait_info {
1549 	struct mem_cgroup *mem;
1550 	wait_queue_t	wait;
1551 };
1552 
memcg_oom_wake_function(wait_queue_t * wait,unsigned mode,int sync,void * arg)1553 static int memcg_oom_wake_function(wait_queue_t *wait,
1554 	unsigned mode, int sync, void *arg)
1555 {
1556 	struct mem_cgroup *wake_mem = (struct mem_cgroup *)arg;
1557 	struct oom_wait_info *oom_wait_info;
1558 
1559 	oom_wait_info = container_of(wait, struct oom_wait_info, wait);
1560 
1561 	if (oom_wait_info->mem == wake_mem)
1562 		goto wakeup;
1563 	/* if no hierarchy, no match */
1564 	if (!oom_wait_info->mem->use_hierarchy || !wake_mem->use_hierarchy)
1565 		return 0;
1566 	/*
1567 	 * Both of oom_wait_info->mem and wake_mem are stable under us.
1568 	 * Then we can use css_is_ancestor without taking care of RCU.
1569 	 */
1570 	if (!css_is_ancestor(&oom_wait_info->mem->css, &wake_mem->css) &&
1571 	    !css_is_ancestor(&wake_mem->css, &oom_wait_info->mem->css))
1572 		return 0;
1573 
1574 wakeup:
1575 	return autoremove_wake_function(wait, mode, sync, arg);
1576 }
1577 
memcg_wakeup_oom(struct mem_cgroup * mem)1578 static void memcg_wakeup_oom(struct mem_cgroup *mem)
1579 {
1580 	/* for filtering, pass "mem" as argument. */
1581 	__wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, mem);
1582 }
1583 
memcg_oom_recover(struct mem_cgroup * mem)1584 static void memcg_oom_recover(struct mem_cgroup *mem)
1585 {
1586 	if (mem && atomic_read(&mem->oom_lock))
1587 		memcg_wakeup_oom(mem);
1588 }
1589 
1590 /*
1591  * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1592  */
mem_cgroup_handle_oom(struct mem_cgroup * mem,gfp_t mask)1593 bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1594 {
1595 	struct oom_wait_info owait;
1596 	bool locked, need_to_kill;
1597 
1598 	owait.mem = mem;
1599 	owait.wait.flags = 0;
1600 	owait.wait.func = memcg_oom_wake_function;
1601 	owait.wait.private = current;
1602 	INIT_LIST_HEAD(&owait.wait.task_list);
1603 	need_to_kill = true;
1604 	/* At first, try to OOM lock hierarchy under mem.*/
1605 	mutex_lock(&memcg_oom_mutex);
1606 	locked = mem_cgroup_oom_lock(mem);
1607 	/*
1608 	 * Even if signal_pending(), we can't quit charge() loop without
1609 	 * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
1610 	 * under OOM is always welcomed, use TASK_KILLABLE here.
1611 	 */
1612 	prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
1613 	if (!locked || mem->oom_kill_disable)
1614 		need_to_kill = false;
1615 	if (locked)
1616 		mem_cgroup_oom_notify(mem);
1617 	mutex_unlock(&memcg_oom_mutex);
1618 
1619 	if (need_to_kill) {
1620 		finish_wait(&memcg_oom_waitq, &owait.wait);
1621 		mem_cgroup_out_of_memory(mem, mask);
1622 	} else {
1623 		schedule();
1624 		finish_wait(&memcg_oom_waitq, &owait.wait);
1625 	}
1626 	mutex_lock(&memcg_oom_mutex);
1627 	mem_cgroup_oom_unlock(mem);
1628 	memcg_wakeup_oom(mem);
1629 	mutex_unlock(&memcg_oom_mutex);
1630 
1631 	if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
1632 		return false;
1633 	/* Give chance to dying process */
1634 	schedule_timeout(1);
1635 	return true;
1636 }
1637 
1638 /*
1639  * Currently used to update mapped file statistics, but the routine can be
1640  * generalized to update other statistics as well.
1641  *
1642  * Notes: Race condition
1643  *
1644  * We usually use page_cgroup_lock() for accessing page_cgroup member but
1645  * it tends to be costly. But considering some conditions, we doesn't need
1646  * to do so _always_.
1647  *
1648  * Considering "charge", lock_page_cgroup() is not required because all
1649  * file-stat operations happen after a page is attached to radix-tree. There
1650  * are no race with "charge".
1651  *
1652  * Considering "uncharge", we know that memcg doesn't clear pc->mem_cgroup
1653  * at "uncharge" intentionally. So, we always see valid pc->mem_cgroup even
1654  * if there are race with "uncharge". Statistics itself is properly handled
1655  * by flags.
1656  *
1657  * Considering "move", this is an only case we see a race. To make the race
1658  * small, we check MEM_CGROUP_ON_MOVE percpu value and detect there are
1659  * possibility of race condition. If there is, we take a lock.
1660  */
1661 
mem_cgroup_update_page_stat(struct page * page,enum mem_cgroup_page_stat_item idx,int val)1662 void mem_cgroup_update_page_stat(struct page *page,
1663 				 enum mem_cgroup_page_stat_item idx, int val)
1664 {
1665 	struct mem_cgroup *mem;
1666 	struct page_cgroup *pc = lookup_page_cgroup(page);
1667 	bool need_unlock = false;
1668 	unsigned long uninitialized_var(flags);
1669 
1670 	if (unlikely(!pc))
1671 		return;
1672 
1673 	rcu_read_lock();
1674 	mem = pc->mem_cgroup;
1675 	if (unlikely(!mem || !PageCgroupUsed(pc)))
1676 		goto out;
1677 	/* pc->mem_cgroup is unstable ? */
1678 	if (unlikely(mem_cgroup_stealed(mem)) || PageTransHuge(page)) {
1679 		/* take a lock against to access pc->mem_cgroup */
1680 		move_lock_page_cgroup(pc, &flags);
1681 		need_unlock = true;
1682 		mem = pc->mem_cgroup;
1683 		if (!mem || !PageCgroupUsed(pc))
1684 			goto out;
1685 	}
1686 
1687 	switch (idx) {
1688 	case MEMCG_NR_FILE_MAPPED:
1689 		if (val > 0)
1690 			SetPageCgroupFileMapped(pc);
1691 		else if (!page_mapped(page))
1692 			ClearPageCgroupFileMapped(pc);
1693 		idx = MEM_CGROUP_STAT_FILE_MAPPED;
1694 		break;
1695 	default:
1696 		BUG();
1697 	}
1698 
1699 	this_cpu_add(mem->stat->count[idx], val);
1700 
1701 out:
1702 	if (unlikely(need_unlock))
1703 		move_unlock_page_cgroup(pc, &flags);
1704 	rcu_read_unlock();
1705 	return;
1706 }
1707 EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1708 
1709 /*
1710  * size of first charge trial. "32" comes from vmscan.c's magic value.
1711  * TODO: maybe necessary to use big numbers in big irons.
1712  */
1713 #define CHARGE_BATCH	32U
1714 struct memcg_stock_pcp {
1715 	struct mem_cgroup *cached; /* this never be root cgroup */
1716 	unsigned int nr_pages;
1717 	struct work_struct work;
1718 };
1719 static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1720 static atomic_t memcg_drain_count;
1721 
1722 /*
1723  * Try to consume stocked charge on this cpu. If success, one page is consumed
1724  * from local stock and true is returned. If the stock is 0 or charges from a
1725  * cgroup which is not current target, returns false. This stock will be
1726  * refilled.
1727  */
consume_stock(struct mem_cgroup * mem)1728 static bool consume_stock(struct mem_cgroup *mem)
1729 {
1730 	struct memcg_stock_pcp *stock;
1731 	bool ret = true;
1732 
1733 	stock = &get_cpu_var(memcg_stock);
1734 	if (mem == stock->cached && stock->nr_pages)
1735 		stock->nr_pages--;
1736 	else /* need to call res_counter_charge */
1737 		ret = false;
1738 	put_cpu_var(memcg_stock);
1739 	return ret;
1740 }
1741 
1742 /*
1743  * Returns stocks cached in percpu to res_counter and reset cached information.
1744  */
drain_stock(struct memcg_stock_pcp * stock)1745 static void drain_stock(struct memcg_stock_pcp *stock)
1746 {
1747 	struct mem_cgroup *old = stock->cached;
1748 
1749 	if (stock->nr_pages) {
1750 		unsigned long bytes = stock->nr_pages * PAGE_SIZE;
1751 
1752 		res_counter_uncharge(&old->res, bytes);
1753 		if (do_swap_account)
1754 			res_counter_uncharge(&old->memsw, bytes);
1755 		stock->nr_pages = 0;
1756 	}
1757 	stock->cached = NULL;
1758 }
1759 
1760 /*
1761  * This must be called under preempt disabled or must be called by
1762  * a thread which is pinned to local cpu.
1763  */
drain_local_stock(struct work_struct * dummy)1764 static void drain_local_stock(struct work_struct *dummy)
1765 {
1766 	struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1767 	drain_stock(stock);
1768 }
1769 
1770 /*
1771  * Cache charges(val) which is from res_counter, to local per_cpu area.
1772  * This will be consumed by consume_stock() function, later.
1773  */
refill_stock(struct mem_cgroup * mem,unsigned int nr_pages)1774 static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
1775 {
1776 	struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock);
1777 
1778 	if (stock->cached != mem) { /* reset if necessary */
1779 		drain_stock(stock);
1780 		stock->cached = mem;
1781 	}
1782 	stock->nr_pages += nr_pages;
1783 	put_cpu_var(memcg_stock);
1784 }
1785 
1786 /*
1787  * Tries to drain stocked charges in other cpus. This function is asynchronous
1788  * and just put a work per cpu for draining localy on each cpu. Caller can
1789  * expects some charges will be back to res_counter later but cannot wait for
1790  * it.
1791  */
drain_all_stock_async(void)1792 static void drain_all_stock_async(void)
1793 {
1794 	int cpu;
1795 	/* This function is for scheduling "drain" in asynchronous way.
1796 	 * The result of "drain" is not directly handled by callers. Then,
1797 	 * if someone is calling drain, we don't have to call drain more.
1798 	 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
1799 	 * there is a race. We just do loose check here.
1800 	 */
1801 	if (atomic_read(&memcg_drain_count))
1802 		return;
1803 	/* Notify other cpus that system-wide "drain" is running */
1804 	atomic_inc(&memcg_drain_count);
1805 	get_online_cpus();
1806 	for_each_online_cpu(cpu) {
1807 		struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
1808 		schedule_work_on(cpu, &stock->work);
1809 	}
1810  	put_online_cpus();
1811 	atomic_dec(&memcg_drain_count);
1812 	/* We don't wait for flush_work */
1813 }
1814 
1815 /* This is a synchronous drain interface. */
drain_all_stock_sync(void)1816 static void drain_all_stock_sync(void)
1817 {
1818 	/* called when force_empty is called */
1819 	atomic_inc(&memcg_drain_count);
1820 	schedule_on_each_cpu(drain_local_stock);
1821 	atomic_dec(&memcg_drain_count);
1822 }
1823 
1824 /*
1825  * This function drains percpu counter value from DEAD cpu and
1826  * move it to local cpu. Note that this function can be preempted.
1827  */
mem_cgroup_drain_pcp_counter(struct mem_cgroup * mem,int cpu)1828 static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *mem, int cpu)
1829 {
1830 	int i;
1831 
1832 	spin_lock(&mem->pcp_counter_lock);
1833 	for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) {
1834 		long x = per_cpu(mem->stat->count[i], cpu);
1835 
1836 		per_cpu(mem->stat->count[i], cpu) = 0;
1837 		mem->nocpu_base.count[i] += x;
1838 	}
1839 	for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
1840 		unsigned long x = per_cpu(mem->stat->events[i], cpu);
1841 
1842 		per_cpu(mem->stat->events[i], cpu) = 0;
1843 		mem->nocpu_base.events[i] += x;
1844 	}
1845 	/* need to clear ON_MOVE value, works as a kind of lock. */
1846 	per_cpu(mem->stat->count[MEM_CGROUP_ON_MOVE], cpu) = 0;
1847 	spin_unlock(&mem->pcp_counter_lock);
1848 }
1849 
synchronize_mem_cgroup_on_move(struct mem_cgroup * mem,int cpu)1850 static void synchronize_mem_cgroup_on_move(struct mem_cgroup *mem, int cpu)
1851 {
1852 	int idx = MEM_CGROUP_ON_MOVE;
1853 
1854 	spin_lock(&mem->pcp_counter_lock);
1855 	per_cpu(mem->stat->count[idx], cpu) = mem->nocpu_base.count[idx];
1856 	spin_unlock(&mem->pcp_counter_lock);
1857 }
1858 
memcg_cpu_hotplug_callback(struct notifier_block * nb,unsigned long action,void * hcpu)1859 static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
1860 					unsigned long action,
1861 					void *hcpu)
1862 {
1863 	int cpu = (unsigned long)hcpu;
1864 	struct memcg_stock_pcp *stock;
1865 	struct mem_cgroup *iter;
1866 
1867 	if ((action == CPU_ONLINE)) {
1868 		for_each_mem_cgroup_all(iter)
1869 			synchronize_mem_cgroup_on_move(iter, cpu);
1870 		return NOTIFY_OK;
1871 	}
1872 
1873 	if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN)
1874 		return NOTIFY_OK;
1875 
1876 	for_each_mem_cgroup_all(iter)
1877 		mem_cgroup_drain_pcp_counter(iter, cpu);
1878 
1879 	stock = &per_cpu(memcg_stock, cpu);
1880 	drain_stock(stock);
1881 	return NOTIFY_OK;
1882 }
1883 
1884 
1885 /* See __mem_cgroup_try_charge() for details */
1886 enum {
1887 	CHARGE_OK,		/* success */
1888 	CHARGE_RETRY,		/* need to retry but retry is not bad */
1889 	CHARGE_NOMEM,		/* we can't do more. return -ENOMEM */
1890 	CHARGE_WOULDBLOCK,	/* GFP_WAIT wasn't set and no enough res. */
1891 	CHARGE_OOM_DIE,		/* the current is killed because of OOM */
1892 };
1893 
mem_cgroup_do_charge(struct mem_cgroup * mem,gfp_t gfp_mask,unsigned int nr_pages,bool oom_check)1894 static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask,
1895 				unsigned int nr_pages, bool oom_check)
1896 {
1897 	unsigned long csize = nr_pages * PAGE_SIZE;
1898 	struct mem_cgroup *mem_over_limit;
1899 	struct res_counter *fail_res;
1900 	unsigned long flags = 0;
1901 	int ret;
1902 
1903 	ret = res_counter_charge(&mem->res, csize, &fail_res);
1904 
1905 	if (likely(!ret)) {
1906 		if (!do_swap_account)
1907 			return CHARGE_OK;
1908 		ret = res_counter_charge(&mem->memsw, csize, &fail_res);
1909 		if (likely(!ret))
1910 			return CHARGE_OK;
1911 
1912 		res_counter_uncharge(&mem->res, csize);
1913 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw);
1914 		flags |= MEM_CGROUP_RECLAIM_NOSWAP;
1915 	} else
1916 		mem_over_limit = mem_cgroup_from_res_counter(fail_res, res);
1917 	/*
1918 	 * nr_pages can be either a huge page (HPAGE_PMD_NR), a batch
1919 	 * of regular pages (CHARGE_BATCH), or a single regular page (1).
1920 	 *
1921 	 * Never reclaim on behalf of optional batching, retry with a
1922 	 * single page instead.
1923 	 */
1924 	if (nr_pages == CHARGE_BATCH)
1925 		return CHARGE_RETRY;
1926 
1927 	if (!(gfp_mask & __GFP_WAIT))
1928 		return CHARGE_WOULDBLOCK;
1929 
1930 	ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL,
1931 					      gfp_mask, flags);
1932 	if (mem_cgroup_margin(mem_over_limit) >= nr_pages)
1933 		return CHARGE_RETRY;
1934 	/*
1935 	 * Even though the limit is exceeded at this point, reclaim
1936 	 * may have been able to free some pages.  Retry the charge
1937 	 * before killing the task.
1938 	 *
1939 	 * Only for regular pages, though: huge pages are rather
1940 	 * unlikely to succeed so close to the limit, and we fall back
1941 	 * to regular pages anyway in case of failure.
1942 	 */
1943 	if (nr_pages == 1 && ret)
1944 		return CHARGE_RETRY;
1945 
1946 	/*
1947 	 * At task move, charge accounts can be doubly counted. So, it's
1948 	 * better to wait until the end of task_move if something is going on.
1949 	 */
1950 	if (mem_cgroup_wait_acct_move(mem_over_limit))
1951 		return CHARGE_RETRY;
1952 
1953 	/* If we don't need to call oom-killer at el, return immediately */
1954 	if (!oom_check)
1955 		return CHARGE_NOMEM;
1956 	/* check OOM */
1957 	if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask))
1958 		return CHARGE_OOM_DIE;
1959 
1960 	return CHARGE_RETRY;
1961 }
1962 
1963 /*
1964  * Unlike exported interface, "oom" parameter is added. if oom==true,
1965  * oom-killer can be invoked.
1966  */
__mem_cgroup_try_charge(struct mm_struct * mm,gfp_t gfp_mask,unsigned int nr_pages,struct mem_cgroup ** memcg,bool oom)1967 static int __mem_cgroup_try_charge(struct mm_struct *mm,
1968 				   gfp_t gfp_mask,
1969 				   unsigned int nr_pages,
1970 				   struct mem_cgroup **memcg,
1971 				   bool oom)
1972 {
1973 	unsigned int batch = max(CHARGE_BATCH, nr_pages);
1974 	int nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
1975 	struct mem_cgroup *mem = NULL;
1976 	int ret;
1977 
1978 	/*
1979 	 * Unlike gloval-vm's OOM-kill, we're not in memory shortage
1980 	 * in system level. So, allow to go ahead dying process in addition to
1981 	 * MEMDIE process.
1982 	 */
1983 	if (unlikely(test_thread_flag(TIF_MEMDIE)
1984 		     || fatal_signal_pending(current)))
1985 		goto bypass;
1986 
1987 	/*
1988 	 * We always charge the cgroup the mm_struct belongs to.
1989 	 * The mm_struct's mem_cgroup changes on task migration if the
1990 	 * thread group leader migrates. It's possible that mm is not
1991 	 * set, if so charge the init_mm (happens for pagecache usage).
1992 	 */
1993 	if (!*memcg && !mm)
1994 		goto bypass;
1995 again:
1996 	if (*memcg) { /* css should be a valid one */
1997 		mem = *memcg;
1998 		VM_BUG_ON(css_is_removed(&mem->css));
1999 		if (mem_cgroup_is_root(mem))
2000 			goto done;
2001 		if (nr_pages == 1 && consume_stock(mem))
2002 			goto done;
2003 		css_get(&mem->css);
2004 	} else {
2005 		struct task_struct *p;
2006 
2007 		rcu_read_lock();
2008 		p = rcu_dereference(mm->owner);
2009 		/*
2010 		 * Because we don't have task_lock(), "p" can exit.
2011 		 * In that case, "mem" can point to root or p can be NULL with
2012 		 * race with swapoff. Then, we have small risk of mis-accouning.
2013 		 * But such kind of mis-account by race always happens because
2014 		 * we don't have cgroup_mutex(). It's overkill and we allo that
2015 		 * small race, here.
2016 		 * (*) swapoff at el will charge against mm-struct not against
2017 		 * task-struct. So, mm->owner can be NULL.
2018 		 */
2019 		mem = mem_cgroup_from_task(p);
2020 		if (!mem || mem_cgroup_is_root(mem)) {
2021 			rcu_read_unlock();
2022 			goto done;
2023 		}
2024 		if (nr_pages == 1 && consume_stock(mem)) {
2025 			/*
2026 			 * It seems dagerous to access memcg without css_get().
2027 			 * But considering how consume_stok works, it's not
2028 			 * necessary. If consume_stock success, some charges
2029 			 * from this memcg are cached on this cpu. So, we
2030 			 * don't need to call css_get()/css_tryget() before
2031 			 * calling consume_stock().
2032 			 */
2033 			rcu_read_unlock();
2034 			goto done;
2035 		}
2036 		/* after here, we may be blocked. we need to get refcnt */
2037 		if (!css_tryget(&mem->css)) {
2038 			rcu_read_unlock();
2039 			goto again;
2040 		}
2041 		rcu_read_unlock();
2042 	}
2043 
2044 	do {
2045 		bool oom_check;
2046 
2047 		/* If killed, bypass charge */
2048 		if (fatal_signal_pending(current)) {
2049 			css_put(&mem->css);
2050 			goto bypass;
2051 		}
2052 
2053 		oom_check = false;
2054 		if (oom && !nr_oom_retries) {
2055 			oom_check = true;
2056 			nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
2057 		}
2058 
2059 		ret = mem_cgroup_do_charge(mem, gfp_mask, batch, oom_check);
2060 		switch (ret) {
2061 		case CHARGE_OK:
2062 			break;
2063 		case CHARGE_RETRY: /* not in OOM situation but retry */
2064 			batch = nr_pages;
2065 			css_put(&mem->css);
2066 			mem = NULL;
2067 			goto again;
2068 		case CHARGE_WOULDBLOCK: /* !__GFP_WAIT */
2069 			css_put(&mem->css);
2070 			goto nomem;
2071 		case CHARGE_NOMEM: /* OOM routine works */
2072 			if (!oom) {
2073 				css_put(&mem->css);
2074 				goto nomem;
2075 			}
2076 			/* If oom, we never return -ENOMEM */
2077 			nr_oom_retries--;
2078 			break;
2079 		case CHARGE_OOM_DIE: /* Killed by OOM Killer */
2080 			css_put(&mem->css);
2081 			goto bypass;
2082 		}
2083 	} while (ret != CHARGE_OK);
2084 
2085 	if (batch > nr_pages)
2086 		refill_stock(mem, batch - nr_pages);
2087 	css_put(&mem->css);
2088 done:
2089 	*memcg = mem;
2090 	return 0;
2091 nomem:
2092 	*memcg = NULL;
2093 	return -ENOMEM;
2094 bypass:
2095 	*memcg = NULL;
2096 	return 0;
2097 }
2098 
2099 /*
2100  * Somemtimes we have to undo a charge we got by try_charge().
2101  * This function is for that and do uncharge, put css's refcnt.
2102  * gotten by try_charge().
2103  */
__mem_cgroup_cancel_charge(struct mem_cgroup * mem,unsigned int nr_pages)2104 static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
2105 				       unsigned int nr_pages)
2106 {
2107 	if (!mem_cgroup_is_root(mem)) {
2108 		unsigned long bytes = nr_pages * PAGE_SIZE;
2109 
2110 		res_counter_uncharge(&mem->res, bytes);
2111 		if (do_swap_account)
2112 			res_counter_uncharge(&mem->memsw, bytes);
2113 	}
2114 }
2115 
2116 /*
2117  * A helper function to get mem_cgroup from ID. must be called under
2118  * rcu_read_lock(). The caller must check css_is_removed() or some if
2119  * it's concern. (dropping refcnt from swap can be called against removed
2120  * memcg.)
2121  */
mem_cgroup_lookup(unsigned short id)2122 static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2123 {
2124 	struct cgroup_subsys_state *css;
2125 
2126 	/* ID 0 is unused ID */
2127 	if (!id)
2128 		return NULL;
2129 	css = css_lookup(&mem_cgroup_subsys, id);
2130 	if (!css)
2131 		return NULL;
2132 	return container_of(css, struct mem_cgroup, css);
2133 }
2134 
try_get_mem_cgroup_from_page(struct page * page)2135 struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2136 {
2137 	struct mem_cgroup *mem = NULL;
2138 	struct page_cgroup *pc;
2139 	unsigned short id;
2140 	swp_entry_t ent;
2141 
2142 	VM_BUG_ON(!PageLocked(page));
2143 
2144 	pc = lookup_page_cgroup(page);
2145 	lock_page_cgroup(pc);
2146 	if (PageCgroupUsed(pc)) {
2147 		mem = pc->mem_cgroup;
2148 		if (mem && !css_tryget(&mem->css))
2149 			mem = NULL;
2150 	} else if (PageSwapCache(page)) {
2151 		ent.val = page_private(page);
2152 		id = lookup_swap_cgroup(ent);
2153 		rcu_read_lock();
2154 		mem = mem_cgroup_lookup(id);
2155 		if (mem && !css_tryget(&mem->css))
2156 			mem = NULL;
2157 		rcu_read_unlock();
2158 	}
2159 	unlock_page_cgroup(pc);
2160 	return mem;
2161 }
2162 
__mem_cgroup_commit_charge(struct mem_cgroup * mem,struct page * page,unsigned int nr_pages,struct page_cgroup * pc,enum charge_type ctype)2163 static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
2164 				       struct page *page,
2165 				       unsigned int nr_pages,
2166 				       struct page_cgroup *pc,
2167 				       enum charge_type ctype)
2168 {
2169 	lock_page_cgroup(pc);
2170 	if (unlikely(PageCgroupUsed(pc))) {
2171 		unlock_page_cgroup(pc);
2172 		__mem_cgroup_cancel_charge(mem, nr_pages);
2173 		return;
2174 	}
2175 	/*
2176 	 * we don't need page_cgroup_lock about tail pages, becase they are not
2177 	 * accessed by any other context at this point.
2178 	 */
2179 	pc->mem_cgroup = mem;
2180 	/*
2181 	 * We access a page_cgroup asynchronously without lock_page_cgroup().
2182 	 * Especially when a page_cgroup is taken from a page, pc->mem_cgroup
2183 	 * is accessed after testing USED bit. To make pc->mem_cgroup visible
2184 	 * before USED bit, we need memory barrier here.
2185 	 * See mem_cgroup_add_lru_list(), etc.
2186  	 */
2187 	smp_wmb();
2188 	switch (ctype) {
2189 	case MEM_CGROUP_CHARGE_TYPE_CACHE:
2190 	case MEM_CGROUP_CHARGE_TYPE_SHMEM:
2191 		SetPageCgroupCache(pc);
2192 		SetPageCgroupUsed(pc);
2193 		break;
2194 	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2195 		ClearPageCgroupCache(pc);
2196 		SetPageCgroupUsed(pc);
2197 		break;
2198 	default:
2199 		break;
2200 	}
2201 
2202 	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), nr_pages);
2203 	unlock_page_cgroup(pc);
2204 	/*
2205 	 * "charge_statistics" updated event counter. Then, check it.
2206 	 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
2207 	 * if they exceeds softlimit.
2208 	 */
2209 	memcg_check_events(mem, page);
2210 }
2211 
2212 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2213 
2214 #define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MOVE_LOCK) |\
2215 			(1 << PCG_ACCT_LRU) | (1 << PCG_MIGRATION))
2216 /*
2217  * Because tail pages are not marked as "used", set it. We're under
2218  * zone->lru_lock, 'splitting on pmd' and compund_lock.
2219  */
mem_cgroup_split_huge_fixup(struct page * head,struct page * tail)2220 void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2221 {
2222 	struct page_cgroup *head_pc = lookup_page_cgroup(head);
2223 	struct page_cgroup *tail_pc = lookup_page_cgroup(tail);
2224 	unsigned long flags;
2225 
2226 	if (mem_cgroup_disabled())
2227 		return;
2228 	/*
2229 	 * We have no races with charge/uncharge but will have races with
2230 	 * page state accounting.
2231 	 */
2232 	move_lock_page_cgroup(head_pc, &flags);
2233 
2234 	tail_pc->mem_cgroup = head_pc->mem_cgroup;
2235 	smp_wmb(); /* see __commit_charge() */
2236 	if (PageCgroupAcctLRU(head_pc)) {
2237 		enum lru_list lru;
2238 		struct mem_cgroup_per_zone *mz;
2239 
2240 		/*
2241 		 * LRU flags cannot be copied because we need to add tail
2242 		 *.page to LRU by generic call and our hook will be called.
2243 		 * We hold lru_lock, then, reduce counter directly.
2244 		 */
2245 		lru = page_lru(head);
2246 		mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2247 		MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2248 	}
2249 	tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;
2250 	move_unlock_page_cgroup(head_pc, &flags);
2251 }
2252 #endif
2253 
2254 /**
2255  * mem_cgroup_move_account - move account of the page
2256  * @page: the page
2257  * @nr_pages: number of regular pages (>1 for huge pages)
2258  * @pc:	page_cgroup of the page.
2259  * @from: mem_cgroup which the page is moved from.
2260  * @to:	mem_cgroup which the page is moved to. @from != @to.
2261  * @uncharge: whether we should call uncharge and css_put against @from.
2262  *
2263  * The caller must confirm following.
2264  * - page is not on LRU (isolate_page() is useful.)
2265  * - compound_lock is held when nr_pages > 1
2266  *
2267  * This function doesn't do "charge" nor css_get to new cgroup. It should be
2268  * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is
2269  * true, this function does "uncharge" from old cgroup, but it doesn't if
2270  * @uncharge is false, so a caller should do "uncharge".
2271  */
mem_cgroup_move_account(struct page * page,unsigned int nr_pages,struct page_cgroup * pc,struct mem_cgroup * from,struct mem_cgroup * to,bool uncharge)2272 static int mem_cgroup_move_account(struct page *page,
2273 				   unsigned int nr_pages,
2274 				   struct page_cgroup *pc,
2275 				   struct mem_cgroup *from,
2276 				   struct mem_cgroup *to,
2277 				   bool uncharge)
2278 {
2279 	unsigned long flags;
2280 	int ret;
2281 
2282 	VM_BUG_ON(from == to);
2283 	VM_BUG_ON(PageLRU(page));
2284 	/*
2285 	 * The page is isolated from LRU. So, collapse function
2286 	 * will not handle this page. But page splitting can happen.
2287 	 * Do this check under compound_page_lock(). The caller should
2288 	 * hold it.
2289 	 */
2290 	ret = -EBUSY;
2291 	if (nr_pages > 1 && !PageTransHuge(page))
2292 		goto out;
2293 
2294 	lock_page_cgroup(pc);
2295 
2296 	ret = -EINVAL;
2297 	if (!PageCgroupUsed(pc) || pc->mem_cgroup != from)
2298 		goto unlock;
2299 
2300 	move_lock_page_cgroup(pc, &flags);
2301 
2302 	if (PageCgroupFileMapped(pc)) {
2303 		/* Update mapped_file data for mem_cgroup */
2304 		preempt_disable();
2305 		__this_cpu_dec(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2306 		__this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2307 		preempt_enable();
2308 	}
2309 	mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
2310 	if (uncharge)
2311 		/* This is not "cancel", but cancel_charge does all we need. */
2312 		__mem_cgroup_cancel_charge(from, nr_pages);
2313 
2314 	/* caller should have done css_get */
2315 	pc->mem_cgroup = to;
2316 	mem_cgroup_charge_statistics(to, PageCgroupCache(pc), nr_pages);
2317 	/*
2318 	 * We charges against "to" which may not have any tasks. Then, "to"
2319 	 * can be under rmdir(). But in current implementation, caller of
2320 	 * this function is just force_empty() and move charge, so it's
2321 	 * guaranteed that "to" is never removed. So, we don't check rmdir
2322 	 * status here.
2323 	 */
2324 	move_unlock_page_cgroup(pc, &flags);
2325 	ret = 0;
2326 unlock:
2327 	unlock_page_cgroup(pc);
2328 	/*
2329 	 * check events
2330 	 */
2331 	memcg_check_events(to, page);
2332 	memcg_check_events(from, page);
2333 out:
2334 	return ret;
2335 }
2336 
2337 /*
2338  * move charges to its parent.
2339  */
2340 
mem_cgroup_move_parent(struct page * page,struct page_cgroup * pc,struct mem_cgroup * child,gfp_t gfp_mask)2341 static int mem_cgroup_move_parent(struct page *page,
2342 				  struct page_cgroup *pc,
2343 				  struct mem_cgroup *child,
2344 				  gfp_t gfp_mask)
2345 {
2346 	struct cgroup *cg = child->css.cgroup;
2347 	struct cgroup *pcg = cg->parent;
2348 	struct mem_cgroup *parent;
2349 	unsigned int nr_pages;
2350 	unsigned long uninitialized_var(flags);
2351 	int ret;
2352 
2353 	/* Is ROOT ? */
2354 	if (!pcg)
2355 		return -EINVAL;
2356 
2357 	ret = -EBUSY;
2358 	if (!get_page_unless_zero(page))
2359 		goto out;
2360 	if (isolate_lru_page(page))
2361 		goto put;
2362 
2363 	nr_pages = hpage_nr_pages(page);
2364 
2365 	parent = mem_cgroup_from_cont(pcg);
2366 	ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false);
2367 	if (ret || !parent)
2368 		goto put_back;
2369 
2370 	if (nr_pages > 1)
2371 		flags = compound_lock_irqsave(page);
2372 
2373 	ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true);
2374 	if (ret)
2375 		__mem_cgroup_cancel_charge(parent, nr_pages);
2376 
2377 	if (nr_pages > 1)
2378 		compound_unlock_irqrestore(page, flags);
2379 put_back:
2380 	putback_lru_page(page);
2381 put:
2382 	put_page(page);
2383 out:
2384 	return ret;
2385 }
2386 
2387 /*
2388  * Charge the memory controller for page usage.
2389  * Return
2390  * 0 if the charge was successful
2391  * < 0 if the cgroup is over its limit
2392  */
mem_cgroup_charge_common(struct page * page,struct mm_struct * mm,gfp_t gfp_mask,enum charge_type ctype)2393 static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2394 				gfp_t gfp_mask, enum charge_type ctype)
2395 {
2396 	struct mem_cgroup *mem = NULL;
2397 	unsigned int nr_pages = 1;
2398 	struct page_cgroup *pc;
2399 	bool oom = true;
2400 	int ret;
2401 
2402 	if (PageTransHuge(page)) {
2403 		nr_pages <<= compound_order(page);
2404 		VM_BUG_ON(!PageTransHuge(page));
2405 		/*
2406 		 * Never OOM-kill a process for a huge page.  The
2407 		 * fault handler will fall back to regular pages.
2408 		 */
2409 		oom = false;
2410 	}
2411 
2412 	pc = lookup_page_cgroup(page);
2413 	BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2414 
2415 	ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &mem, oom);
2416 	if (ret || !mem)
2417 		return ret;
2418 
2419 	__mem_cgroup_commit_charge(mem, page, nr_pages, pc, ctype);
2420 	return 0;
2421 }
2422 
mem_cgroup_newpage_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)2423 int mem_cgroup_newpage_charge(struct page *page,
2424 			      struct mm_struct *mm, gfp_t gfp_mask)
2425 {
2426 	if (mem_cgroup_disabled())
2427 		return 0;
2428 	/*
2429 	 * If already mapped, we don't have to account.
2430 	 * If page cache, page->mapping has address_space.
2431 	 * But page->mapping may have out-of-use anon_vma pointer,
2432 	 * detecit it by PageAnon() check. newly-mapped-anon's page->mapping
2433 	 * is NULL.
2434   	 */
2435 	if (page_mapped(page) || (page->mapping && !PageAnon(page)))
2436 		return 0;
2437 	if (unlikely(!mm))
2438 		mm = &init_mm;
2439 	return mem_cgroup_charge_common(page, mm, gfp_mask,
2440 				MEM_CGROUP_CHARGE_TYPE_MAPPED);
2441 }
2442 
2443 static void
2444 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2445 					enum charge_type ctype);
2446 
2447 static void
__mem_cgroup_commit_charge_lrucare(struct page * page,struct mem_cgroup * mem,enum charge_type ctype)2448 __mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *mem,
2449 					enum charge_type ctype)
2450 {
2451 	struct page_cgroup *pc = lookup_page_cgroup(page);
2452 	/*
2453 	 * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
2454 	 * is already on LRU. It means the page may on some other page_cgroup's
2455 	 * LRU. Take care of it.
2456 	 */
2457 	mem_cgroup_lru_del_before_commit(page);
2458 	__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
2459 	mem_cgroup_lru_add_after_commit(page);
2460 	return;
2461 }
2462 
mem_cgroup_cache_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)2463 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
2464 				gfp_t gfp_mask)
2465 {
2466 	struct mem_cgroup *mem = NULL;
2467 	int ret;
2468 
2469 	if (mem_cgroup_disabled())
2470 		return 0;
2471 	if (PageCompound(page))
2472 		return 0;
2473 	/*
2474 	 * Corner case handling. This is called from add_to_page_cache()
2475 	 * in usual. But some FS (shmem) precharges this page before calling it
2476 	 * and call add_to_page_cache() with GFP_NOWAIT.
2477 	 *
2478 	 * For GFP_NOWAIT case, the page may be pre-charged before calling
2479 	 * add_to_page_cache(). (See shmem.c) check it here and avoid to call
2480 	 * charge twice. (It works but has to pay a bit larger cost.)
2481 	 * And when the page is SwapCache, it should take swap information
2482 	 * into account. This is under lock_page() now.
2483 	 */
2484 	if (!(gfp_mask & __GFP_WAIT)) {
2485 		struct page_cgroup *pc;
2486 
2487 		pc = lookup_page_cgroup(page);
2488 		if (!pc)
2489 			return 0;
2490 		lock_page_cgroup(pc);
2491 		if (PageCgroupUsed(pc)) {
2492 			unlock_page_cgroup(pc);
2493 			return 0;
2494 		}
2495 		unlock_page_cgroup(pc);
2496 	}
2497 
2498 	if (unlikely(!mm))
2499 		mm = &init_mm;
2500 
2501 	if (page_is_file_cache(page)) {
2502 		ret = __mem_cgroup_try_charge(mm, gfp_mask, 1, &mem, true);
2503 		if (ret || !mem)
2504 			return ret;
2505 
2506 		/*
2507 		 * FUSE reuses pages without going through the final
2508 		 * put that would remove them from the LRU list, make
2509 		 * sure that they get relinked properly.
2510 		 */
2511 		__mem_cgroup_commit_charge_lrucare(page, mem,
2512 					MEM_CGROUP_CHARGE_TYPE_CACHE);
2513 		return ret;
2514 	}
2515 	/* shmem */
2516 	if (PageSwapCache(page)) {
2517 		ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
2518 		if (!ret)
2519 			__mem_cgroup_commit_charge_swapin(page, mem,
2520 					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2521 	} else
2522 		ret = mem_cgroup_charge_common(page, mm, gfp_mask,
2523 					MEM_CGROUP_CHARGE_TYPE_SHMEM);
2524 
2525 	return ret;
2526 }
2527 
2528 /*
2529  * While swap-in, try_charge -> commit or cancel, the page is locked.
2530  * And when try_charge() successfully returns, one refcnt to memcg without
2531  * struct page_cgroup is acquired. This refcnt will be consumed by
2532  * "commit()" or removed by "cancel()"
2533  */
mem_cgroup_try_charge_swapin(struct mm_struct * mm,struct page * page,gfp_t mask,struct mem_cgroup ** ptr)2534 int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
2535 				 struct page *page,
2536 				 gfp_t mask, struct mem_cgroup **ptr)
2537 {
2538 	struct mem_cgroup *mem;
2539 	int ret;
2540 
2541 	*ptr = NULL;
2542 
2543 	if (mem_cgroup_disabled())
2544 		return 0;
2545 
2546 	if (!do_swap_account)
2547 		goto charge_cur_mm;
2548 	/*
2549 	 * A racing thread's fault, or swapoff, may have already updated
2550 	 * the pte, and even removed page from swap cache: in those cases
2551 	 * do_swap_page()'s pte_same() test will fail; but there's also a
2552 	 * KSM case which does need to charge the page.
2553 	 */
2554 	if (!PageSwapCache(page))
2555 		goto charge_cur_mm;
2556 	mem = try_get_mem_cgroup_from_page(page);
2557 	if (!mem)
2558 		goto charge_cur_mm;
2559 	*ptr = mem;
2560 	ret = __mem_cgroup_try_charge(NULL, mask, 1, ptr, true);
2561 	css_put(&mem->css);
2562 	return ret;
2563 charge_cur_mm:
2564 	if (unlikely(!mm))
2565 		mm = &init_mm;
2566 	return __mem_cgroup_try_charge(mm, mask, 1, ptr, true);
2567 }
2568 
2569 static void
__mem_cgroup_commit_charge_swapin(struct page * page,struct mem_cgroup * ptr,enum charge_type ctype)2570 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
2571 					enum charge_type ctype)
2572 {
2573 	if (mem_cgroup_disabled())
2574 		return;
2575 	if (!ptr)
2576 		return;
2577 	cgroup_exclude_rmdir(&ptr->css);
2578 
2579 	__mem_cgroup_commit_charge_lrucare(page, ptr, ctype);
2580 	/*
2581 	 * Now swap is on-memory. This means this page may be
2582 	 * counted both as mem and swap....double count.
2583 	 * Fix it by uncharging from memsw. Basically, this SwapCache is stable
2584 	 * under lock_page(). But in do_swap_page()::memory.c, reuse_swap_page()
2585 	 * may call delete_from_swap_cache() before reach here.
2586 	 */
2587 	if (do_swap_account && PageSwapCache(page)) {
2588 		swp_entry_t ent = {.val = page_private(page)};
2589 		unsigned short id;
2590 		struct mem_cgroup *memcg;
2591 
2592 		id = swap_cgroup_record(ent, 0);
2593 		rcu_read_lock();
2594 		memcg = mem_cgroup_lookup(id);
2595 		if (memcg) {
2596 			/*
2597 			 * This recorded memcg can be obsolete one. So, avoid
2598 			 * calling css_tryget
2599 			 */
2600 			if (!mem_cgroup_is_root(memcg))
2601 				res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2602 			mem_cgroup_swap_statistics(memcg, false);
2603 			mem_cgroup_put(memcg);
2604 		}
2605 		rcu_read_unlock();
2606 	}
2607 	/*
2608 	 * At swapin, we may charge account against cgroup which has no tasks.
2609 	 * So, rmdir()->pre_destroy() can be called while we do this charge.
2610 	 * In that case, we need to call pre_destroy() again. check it here.
2611 	 */
2612 	cgroup_release_and_wakeup_rmdir(&ptr->css);
2613 }
2614 
mem_cgroup_commit_charge_swapin(struct page * page,struct mem_cgroup * ptr)2615 void mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr)
2616 {
2617 	__mem_cgroup_commit_charge_swapin(page, ptr,
2618 					MEM_CGROUP_CHARGE_TYPE_MAPPED);
2619 }
2620 
mem_cgroup_cancel_charge_swapin(struct mem_cgroup * mem)2621 void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
2622 {
2623 	if (mem_cgroup_disabled())
2624 		return;
2625 	if (!mem)
2626 		return;
2627 	__mem_cgroup_cancel_charge(mem, 1);
2628 }
2629 
mem_cgroup_do_uncharge(struct mem_cgroup * mem,unsigned int nr_pages,const enum charge_type ctype)2630 static void mem_cgroup_do_uncharge(struct mem_cgroup *mem,
2631 				   unsigned int nr_pages,
2632 				   const enum charge_type ctype)
2633 {
2634 	struct memcg_batch_info *batch = NULL;
2635 	bool uncharge_memsw = true;
2636 
2637 	/* If swapout, usage of swap doesn't decrease */
2638 	if (!do_swap_account || ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2639 		uncharge_memsw = false;
2640 
2641 	batch = &current->memcg_batch;
2642 	/*
2643 	 * In usual, we do css_get() when we remember memcg pointer.
2644 	 * But in this case, we keep res->usage until end of a series of
2645 	 * uncharges. Then, it's ok to ignore memcg's refcnt.
2646 	 */
2647 	if (!batch->memcg)
2648 		batch->memcg = mem;
2649 	/*
2650 	 * do_batch > 0 when unmapping pages or inode invalidate/truncate.
2651 	 * In those cases, all pages freed continuously can be expected to be in
2652 	 * the same cgroup and we have chance to coalesce uncharges.
2653 	 * But we do uncharge one by one if this is killed by OOM(TIF_MEMDIE)
2654 	 * because we want to do uncharge as soon as possible.
2655 	 */
2656 
2657 	if (!batch->do_batch || test_thread_flag(TIF_MEMDIE))
2658 		goto direct_uncharge;
2659 
2660 	if (nr_pages > 1)
2661 		goto direct_uncharge;
2662 
2663 	/*
2664 	 * In typical case, batch->memcg == mem. This means we can
2665 	 * merge a series of uncharges to an uncharge of res_counter.
2666 	 * If not, we uncharge res_counter ony by one.
2667 	 */
2668 	if (batch->memcg != mem)
2669 		goto direct_uncharge;
2670 	/* remember freed charge and uncharge it later */
2671 	batch->nr_pages++;
2672 	if (uncharge_memsw)
2673 		batch->memsw_nr_pages++;
2674 	return;
2675 direct_uncharge:
2676 	res_counter_uncharge(&mem->res, nr_pages * PAGE_SIZE);
2677 	if (uncharge_memsw)
2678 		res_counter_uncharge(&mem->memsw, nr_pages * PAGE_SIZE);
2679 	if (unlikely(batch->memcg != mem))
2680 		memcg_oom_recover(mem);
2681 	return;
2682 }
2683 
2684 /*
2685  * uncharge if !page_mapped(page)
2686  */
2687 static struct mem_cgroup *
__mem_cgroup_uncharge_common(struct page * page,enum charge_type ctype)2688 __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2689 {
2690 	struct mem_cgroup *mem = NULL;
2691 	unsigned int nr_pages = 1;
2692 	struct page_cgroup *pc;
2693 
2694 	if (mem_cgroup_disabled())
2695 		return NULL;
2696 
2697 	if (PageSwapCache(page))
2698 		return NULL;
2699 
2700 	if (PageTransHuge(page)) {
2701 		nr_pages <<= compound_order(page);
2702 		VM_BUG_ON(!PageTransHuge(page));
2703 	}
2704 	/*
2705 	 * Check if our page_cgroup is valid
2706 	 */
2707 	pc = lookup_page_cgroup(page);
2708 	if (unlikely(!pc || !PageCgroupUsed(pc)))
2709 		return NULL;
2710 
2711 	lock_page_cgroup(pc);
2712 
2713 	mem = pc->mem_cgroup;
2714 
2715 	if (!PageCgroupUsed(pc))
2716 		goto unlock_out;
2717 
2718 	switch (ctype) {
2719 	case MEM_CGROUP_CHARGE_TYPE_MAPPED:
2720 	case MEM_CGROUP_CHARGE_TYPE_DROP:
2721 		/* See mem_cgroup_prepare_migration() */
2722 		if (page_mapped(page) || PageCgroupMigration(pc))
2723 			goto unlock_out;
2724 		break;
2725 	case MEM_CGROUP_CHARGE_TYPE_SWAPOUT:
2726 		if (!PageAnon(page)) {	/* Shared memory */
2727 			if (page->mapping && !page_is_file_cache(page))
2728 				goto unlock_out;
2729 		} else if (page_mapped(page)) /* Anon */
2730 				goto unlock_out;
2731 		break;
2732 	default:
2733 		break;
2734 	}
2735 
2736 	mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -nr_pages);
2737 
2738 	ClearPageCgroupUsed(pc);
2739 	/*
2740 	 * pc->mem_cgroup is not cleared here. It will be accessed when it's
2741 	 * freed from LRU. This is safe because uncharged page is expected not
2742 	 * to be reused (freed soon). Exception is SwapCache, it's handled by
2743 	 * special functions.
2744 	 */
2745 
2746 	unlock_page_cgroup(pc);
2747 	/*
2748 	 * even after unlock, we have mem->res.usage here and this memcg
2749 	 * will never be freed.
2750 	 */
2751 	memcg_check_events(mem, page);
2752 	if (do_swap_account && ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT) {
2753 		mem_cgroup_swap_statistics(mem, true);
2754 		mem_cgroup_get(mem);
2755 	}
2756 	if (!mem_cgroup_is_root(mem))
2757 		mem_cgroup_do_uncharge(mem, nr_pages, ctype);
2758 
2759 	return mem;
2760 
2761 unlock_out:
2762 	unlock_page_cgroup(pc);
2763 	return NULL;
2764 }
2765 
mem_cgroup_uncharge_page(struct page * page)2766 void mem_cgroup_uncharge_page(struct page *page)
2767 {
2768 	/* early check. */
2769 	if (page_mapped(page))
2770 		return;
2771 	if (page->mapping && !PageAnon(page))
2772 		return;
2773 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_MAPPED);
2774 }
2775 
mem_cgroup_uncharge_cache_page(struct page * page)2776 void mem_cgroup_uncharge_cache_page(struct page *page)
2777 {
2778 	VM_BUG_ON(page_mapped(page));
2779 	VM_BUG_ON(page->mapping);
2780 	__mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_CACHE);
2781 }
2782 
2783 /*
2784  * Batch_start/batch_end is called in unmap_page_range/invlidate/trucate.
2785  * In that cases, pages are freed continuously and we can expect pages
2786  * are in the same memcg. All these calls itself limits the number of
2787  * pages freed at once, then uncharge_start/end() is called properly.
2788  * This may be called prural(2) times in a context,
2789  */
2790 
mem_cgroup_uncharge_start(void)2791 void mem_cgroup_uncharge_start(void)
2792 {
2793 	current->memcg_batch.do_batch++;
2794 	/* We can do nest. */
2795 	if (current->memcg_batch.do_batch == 1) {
2796 		current->memcg_batch.memcg = NULL;
2797 		current->memcg_batch.nr_pages = 0;
2798 		current->memcg_batch.memsw_nr_pages = 0;
2799 	}
2800 }
2801 
mem_cgroup_uncharge_end(void)2802 void mem_cgroup_uncharge_end(void)
2803 {
2804 	struct memcg_batch_info *batch = &current->memcg_batch;
2805 
2806 	if (!batch->do_batch)
2807 		return;
2808 
2809 	batch->do_batch--;
2810 	if (batch->do_batch) /* If stacked, do nothing. */
2811 		return;
2812 
2813 	if (!batch->memcg)
2814 		return;
2815 	/*
2816 	 * This "batch->memcg" is valid without any css_get/put etc...
2817 	 * bacause we hide charges behind us.
2818 	 */
2819 	if (batch->nr_pages)
2820 		res_counter_uncharge(&batch->memcg->res,
2821 				     batch->nr_pages * PAGE_SIZE);
2822 	if (batch->memsw_nr_pages)
2823 		res_counter_uncharge(&batch->memcg->memsw,
2824 				     batch->memsw_nr_pages * PAGE_SIZE);
2825 	memcg_oom_recover(batch->memcg);
2826 	/* forget this pointer (for sanity check) */
2827 	batch->memcg = NULL;
2828 }
2829 
2830 #ifdef CONFIG_SWAP
2831 /*
2832  * called after __delete_from_swap_cache() and drop "page" account.
2833  * memcg information is recorded to swap_cgroup of "ent"
2834  */
2835 void
mem_cgroup_uncharge_swapcache(struct page * page,swp_entry_t ent,bool swapout)2836 mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
2837 {
2838 	struct mem_cgroup *memcg;
2839 	int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
2840 
2841 	if (!swapout) /* this was a swap cache but the swap is unused ! */
2842 		ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
2843 
2844 	memcg = __mem_cgroup_uncharge_common(page, ctype);
2845 
2846 	/*
2847 	 * record memcg information,  if swapout && memcg != NULL,
2848 	 * mem_cgroup_get() was called in uncharge().
2849 	 */
2850 	if (do_swap_account && swapout && memcg)
2851 		swap_cgroup_record(ent, css_id(&memcg->css));
2852 }
2853 #endif
2854 
2855 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
2856 /*
2857  * called from swap_entry_free(). remove record in swap_cgroup and
2858  * uncharge "memsw" account.
2859  */
mem_cgroup_uncharge_swap(swp_entry_t ent)2860 void mem_cgroup_uncharge_swap(swp_entry_t ent)
2861 {
2862 	struct mem_cgroup *memcg;
2863 	unsigned short id;
2864 
2865 	if (!do_swap_account)
2866 		return;
2867 
2868 	id = swap_cgroup_record(ent, 0);
2869 	rcu_read_lock();
2870 	memcg = mem_cgroup_lookup(id);
2871 	if (memcg) {
2872 		/*
2873 		 * We uncharge this because swap is freed.
2874 		 * This memcg can be obsolete one. We avoid calling css_tryget
2875 		 */
2876 		if (!mem_cgroup_is_root(memcg))
2877 			res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
2878 		mem_cgroup_swap_statistics(memcg, false);
2879 		mem_cgroup_put(memcg);
2880 	}
2881 	rcu_read_unlock();
2882 }
2883 
2884 /**
2885  * mem_cgroup_move_swap_account - move swap charge and swap_cgroup's record.
2886  * @entry: swap entry to be moved
2887  * @from:  mem_cgroup which the entry is moved from
2888  * @to:  mem_cgroup which the entry is moved to
2889  * @need_fixup: whether we should fixup res_counters and refcounts.
2890  *
2891  * It succeeds only when the swap_cgroup's record for this entry is the same
2892  * as the mem_cgroup's id of @from.
2893  *
2894  * Returns 0 on success, -EINVAL on failure.
2895  *
2896  * The caller must have charged to @to, IOW, called res_counter_charge() about
2897  * both res and memsw, and called css_get().
2898  */
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to,bool need_fixup)2899 static int mem_cgroup_move_swap_account(swp_entry_t entry,
2900 		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2901 {
2902 	unsigned short old_id, new_id;
2903 
2904 	old_id = css_id(&from->css);
2905 	new_id = css_id(&to->css);
2906 
2907 	if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
2908 		mem_cgroup_swap_statistics(from, false);
2909 		mem_cgroup_swap_statistics(to, true);
2910 		/*
2911 		 * This function is only called from task migration context now.
2912 		 * It postpones res_counter and refcount handling till the end
2913 		 * of task migration(mem_cgroup_clear_mc()) for performance
2914 		 * improvement. But we cannot postpone mem_cgroup_get(to)
2915 		 * because if the process that has been moved to @to does
2916 		 * swap-in, the refcount of @to might be decreased to 0.
2917 		 */
2918 		mem_cgroup_get(to);
2919 		if (need_fixup) {
2920 			if (!mem_cgroup_is_root(from))
2921 				res_counter_uncharge(&from->memsw, PAGE_SIZE);
2922 			mem_cgroup_put(from);
2923 			/*
2924 			 * we charged both to->res and to->memsw, so we should
2925 			 * uncharge to->res.
2926 			 */
2927 			if (!mem_cgroup_is_root(to))
2928 				res_counter_uncharge(&to->res, PAGE_SIZE);
2929 		}
2930 		return 0;
2931 	}
2932 	return -EINVAL;
2933 }
2934 #else
mem_cgroup_move_swap_account(swp_entry_t entry,struct mem_cgroup * from,struct mem_cgroup * to,bool need_fixup)2935 static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
2936 		struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup)
2937 {
2938 	return -EINVAL;
2939 }
2940 #endif
2941 
2942 /*
2943  * Before starting migration, account PAGE_SIZE to mem_cgroup that the old
2944  * page belongs to.
2945  */
mem_cgroup_prepare_migration(struct page * page,struct page * newpage,struct mem_cgroup ** ptr,gfp_t gfp_mask)2946 int mem_cgroup_prepare_migration(struct page *page,
2947 	struct page *newpage, struct mem_cgroup **ptr, gfp_t gfp_mask)
2948 {
2949 	struct mem_cgroup *mem = NULL;
2950 	struct page_cgroup *pc;
2951 	enum charge_type ctype;
2952 	int ret = 0;
2953 
2954 	*ptr = NULL;
2955 
2956 	VM_BUG_ON(PageTransHuge(page));
2957 	if (mem_cgroup_disabled())
2958 		return 0;
2959 
2960 	pc = lookup_page_cgroup(page);
2961 	lock_page_cgroup(pc);
2962 	if (PageCgroupUsed(pc)) {
2963 		mem = pc->mem_cgroup;
2964 		css_get(&mem->css);
2965 		/*
2966 		 * At migrating an anonymous page, its mapcount goes down
2967 		 * to 0 and uncharge() will be called. But, even if it's fully
2968 		 * unmapped, migration may fail and this page has to be
2969 		 * charged again. We set MIGRATION flag here and delay uncharge
2970 		 * until end_migration() is called
2971 		 *
2972 		 * Corner Case Thinking
2973 		 * A)
2974 		 * When the old page was mapped as Anon and it's unmap-and-freed
2975 		 * while migration was ongoing.
2976 		 * If unmap finds the old page, uncharge() of it will be delayed
2977 		 * until end_migration(). If unmap finds a new page, it's
2978 		 * uncharged when it make mapcount to be 1->0. If unmap code
2979 		 * finds swap_migration_entry, the new page will not be mapped
2980 		 * and end_migration() will find it(mapcount==0).
2981 		 *
2982 		 * B)
2983 		 * When the old page was mapped but migraion fails, the kernel
2984 		 * remaps it. A charge for it is kept by MIGRATION flag even
2985 		 * if mapcount goes down to 0. We can do remap successfully
2986 		 * without charging it again.
2987 		 *
2988 		 * C)
2989 		 * The "old" page is under lock_page() until the end of
2990 		 * migration, so, the old page itself will not be swapped-out.
2991 		 * If the new page is swapped out before end_migraton, our
2992 		 * hook to usual swap-out path will catch the event.
2993 		 */
2994 		if (PageAnon(page))
2995 			SetPageCgroupMigration(pc);
2996 	}
2997 	unlock_page_cgroup(pc);
2998 	/*
2999 	 * If the page is not charged at this point,
3000 	 * we return here.
3001 	 */
3002 	if (!mem)
3003 		return 0;
3004 
3005 	*ptr = mem;
3006 	ret = __mem_cgroup_try_charge(NULL, gfp_mask, 1, ptr, false);
3007 	css_put(&mem->css);/* drop extra refcnt */
3008 	if (ret || *ptr == NULL) {
3009 		if (PageAnon(page)) {
3010 			lock_page_cgroup(pc);
3011 			ClearPageCgroupMigration(pc);
3012 			unlock_page_cgroup(pc);
3013 			/*
3014 			 * The old page may be fully unmapped while we kept it.
3015 			 */
3016 			mem_cgroup_uncharge_page(page);
3017 		}
3018 		return -ENOMEM;
3019 	}
3020 	/*
3021 	 * We charge new page before it's used/mapped. So, even if unlock_page()
3022 	 * is called before end_migration, we can catch all events on this new
3023 	 * page. In the case new page is migrated but not remapped, new page's
3024 	 * mapcount will be finally 0 and we call uncharge in end_migration().
3025 	 */
3026 	pc = lookup_page_cgroup(newpage);
3027 	if (PageAnon(page))
3028 		ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED;
3029 	else if (page_is_file_cache(page))
3030 		ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
3031 	else
3032 		ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3033 	__mem_cgroup_commit_charge(mem, page, 1, pc, ctype);
3034 	return ret;
3035 }
3036 
3037 /* remove redundant charge if migration failed*/
mem_cgroup_end_migration(struct mem_cgroup * mem,struct page * oldpage,struct page * newpage,bool migration_ok)3038 void mem_cgroup_end_migration(struct mem_cgroup *mem,
3039 	struct page *oldpage, struct page *newpage, bool migration_ok)
3040 {
3041 	struct page *used, *unused;
3042 	struct page_cgroup *pc;
3043 
3044 	if (!mem)
3045 		return;
3046 	/* blocks rmdir() */
3047 	cgroup_exclude_rmdir(&mem->css);
3048 	if (!migration_ok) {
3049 		used = oldpage;
3050 		unused = newpage;
3051 	} else {
3052 		used = newpage;
3053 		unused = oldpage;
3054 	}
3055 	/*
3056 	 * We disallowed uncharge of pages under migration because mapcount
3057 	 * of the page goes down to zero, temporarly.
3058 	 * Clear the flag and check the page should be charged.
3059 	 */
3060 	pc = lookup_page_cgroup(oldpage);
3061 	lock_page_cgroup(pc);
3062 	ClearPageCgroupMigration(pc);
3063 	unlock_page_cgroup(pc);
3064 
3065 	__mem_cgroup_uncharge_common(unused, MEM_CGROUP_CHARGE_TYPE_FORCE);
3066 
3067 	/*
3068 	 * If a page is a file cache, radix-tree replacement is very atomic
3069 	 * and we can skip this check. When it was an Anon page, its mapcount
3070 	 * goes down to 0. But because we added MIGRATION flage, it's not
3071 	 * uncharged yet. There are several case but page->mapcount check
3072 	 * and USED bit check in mem_cgroup_uncharge_page() will do enough
3073 	 * check. (see prepare_charge() also)
3074 	 */
3075 	if (PageAnon(used))
3076 		mem_cgroup_uncharge_page(used);
3077 	/*
3078 	 * At migration, we may charge account against cgroup which has no
3079 	 * tasks.
3080 	 * So, rmdir()->pre_destroy() can be called while we do this charge.
3081 	 * In that case, we need to call pre_destroy() again. check it here.
3082 	 */
3083 	cgroup_release_and_wakeup_rmdir(&mem->css);
3084 }
3085 
3086 /*
3087  * A call to try to shrink memory usage on charge failure at shmem's swapin.
3088  * Calling hierarchical_reclaim is not enough because we should update
3089  * last_oom_jiffies to prevent pagefault_out_of_memory from invoking global OOM.
3090  * Moreover considering hierarchy, we should reclaim from the mem_over_limit,
3091  * not from the memcg which this page would be charged to.
3092  * try_charge_swapin does all of these works properly.
3093  */
mem_cgroup_shmem_charge_fallback(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)3094 int mem_cgroup_shmem_charge_fallback(struct page *page,
3095 			    struct mm_struct *mm,
3096 			    gfp_t gfp_mask)
3097 {
3098 	struct mem_cgroup *mem;
3099 	int ret;
3100 
3101 	if (mem_cgroup_disabled())
3102 		return 0;
3103 
3104 	ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
3105 	if (!ret)
3106 		mem_cgroup_cancel_charge_swapin(mem); /* it does !mem check */
3107 
3108 	return ret;
3109 }
3110 
3111 #ifdef CONFIG_DEBUG_VM
lookup_page_cgroup_used(struct page * page)3112 static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3113 {
3114 	struct page_cgroup *pc;
3115 
3116 	pc = lookup_page_cgroup(page);
3117 	if (likely(pc) && PageCgroupUsed(pc))
3118 		return pc;
3119 	return NULL;
3120 }
3121 
mem_cgroup_bad_page_check(struct page * page)3122 bool mem_cgroup_bad_page_check(struct page *page)
3123 {
3124 	if (mem_cgroup_disabled())
3125 		return false;
3126 
3127 	return lookup_page_cgroup_used(page) != NULL;
3128 }
3129 
mem_cgroup_print_bad_page(struct page * page)3130 void mem_cgroup_print_bad_page(struct page *page)
3131 {
3132 	struct page_cgroup *pc;
3133 
3134 	pc = lookup_page_cgroup_used(page);
3135 	if (pc) {
3136 		int ret = -1;
3137 		char *path;
3138 
3139 		printk(KERN_ALERT "pc:%p pc->flags:%lx pc->mem_cgroup:%p",
3140 		       pc, pc->flags, pc->mem_cgroup);
3141 
3142 		path = kmalloc(PATH_MAX, GFP_KERNEL);
3143 		if (path) {
3144 			rcu_read_lock();
3145 			ret = cgroup_path(pc->mem_cgroup->css.cgroup,
3146 							path, PATH_MAX);
3147 			rcu_read_unlock();
3148 		}
3149 
3150 		printk(KERN_CONT "(%s)\n",
3151 				(ret < 0) ? "cannot get the path" : path);
3152 		kfree(path);
3153 	}
3154 }
3155 #endif
3156 
3157 static DEFINE_MUTEX(set_limit_mutex);
3158 
mem_cgroup_resize_limit(struct mem_cgroup * memcg,unsigned long long val)3159 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
3160 				unsigned long long val)
3161 {
3162 	int retry_count;
3163 	u64 memswlimit, memlimit;
3164 	int ret = 0;
3165 	int children = mem_cgroup_count_children(memcg);
3166 	u64 curusage, oldusage;
3167 	int enlarge;
3168 
3169 	/*
3170 	 * For keeping hierarchical_reclaim simple, how long we should retry
3171 	 * is depends on callers. We set our retry-count to be function
3172 	 * of # of children which we should visit in this loop.
3173 	 */
3174 	retry_count = MEM_CGROUP_RECLAIM_RETRIES * children;
3175 
3176 	oldusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3177 
3178 	enlarge = 0;
3179 	while (retry_count) {
3180 		if (signal_pending(current)) {
3181 			ret = -EINTR;
3182 			break;
3183 		}
3184 		/*
3185 		 * Rather than hide all in some function, I do this in
3186 		 * open coded manner. You see what this really does.
3187 		 * We have to guarantee mem->res.limit < mem->memsw.limit.
3188 		 */
3189 		mutex_lock(&set_limit_mutex);
3190 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3191 		if (memswlimit < val) {
3192 			ret = -EINVAL;
3193 			mutex_unlock(&set_limit_mutex);
3194 			break;
3195 		}
3196 
3197 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3198 		if (memlimit < val)
3199 			enlarge = 1;
3200 
3201 		ret = res_counter_set_limit(&memcg->res, val);
3202 		if (!ret) {
3203 			if (memswlimit == val)
3204 				memcg->memsw_is_minimum = true;
3205 			else
3206 				memcg->memsw_is_minimum = false;
3207 		}
3208 		mutex_unlock(&set_limit_mutex);
3209 
3210 		if (!ret)
3211 			break;
3212 
3213 		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3214 						MEM_CGROUP_RECLAIM_SHRINK);
3215 		curusage = res_counter_read_u64(&memcg->res, RES_USAGE);
3216 		/* Usage is reduced ? */
3217   		if (curusage >= oldusage)
3218 			retry_count--;
3219 		else
3220 			oldusage = curusage;
3221 	}
3222 	if (!ret && enlarge)
3223 		memcg_oom_recover(memcg);
3224 
3225 	return ret;
3226 }
3227 
mem_cgroup_resize_memsw_limit(struct mem_cgroup * memcg,unsigned long long val)3228 static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
3229 					unsigned long long val)
3230 {
3231 	int retry_count;
3232 	u64 memlimit, memswlimit, oldusage, curusage;
3233 	int children = mem_cgroup_count_children(memcg);
3234 	int ret = -EBUSY;
3235 	int enlarge = 0;
3236 
3237 	/* see mem_cgroup_resize_res_limit */
3238  	retry_count = children * MEM_CGROUP_RECLAIM_RETRIES;
3239 	oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3240 	while (retry_count) {
3241 		if (signal_pending(current)) {
3242 			ret = -EINTR;
3243 			break;
3244 		}
3245 		/*
3246 		 * Rather than hide all in some function, I do this in
3247 		 * open coded manner. You see what this really does.
3248 		 * We have to guarantee mem->res.limit < mem->memsw.limit.
3249 		 */
3250 		mutex_lock(&set_limit_mutex);
3251 		memlimit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3252 		if (memlimit > val) {
3253 			ret = -EINVAL;
3254 			mutex_unlock(&set_limit_mutex);
3255 			break;
3256 		}
3257 		memswlimit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3258 		if (memswlimit < val)
3259 			enlarge = 1;
3260 		ret = res_counter_set_limit(&memcg->memsw, val);
3261 		if (!ret) {
3262 			if (memlimit == val)
3263 				memcg->memsw_is_minimum = true;
3264 			else
3265 				memcg->memsw_is_minimum = false;
3266 		}
3267 		mutex_unlock(&set_limit_mutex);
3268 
3269 		if (!ret)
3270 			break;
3271 
3272 		mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL,
3273 						MEM_CGROUP_RECLAIM_NOSWAP |
3274 						MEM_CGROUP_RECLAIM_SHRINK);
3275 		curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE);
3276 		/* Usage is reduced ? */
3277 		if (curusage >= oldusage)
3278 			retry_count--;
3279 		else
3280 			oldusage = curusage;
3281 	}
3282 	if (!ret && enlarge)
3283 		memcg_oom_recover(memcg);
3284 	return ret;
3285 }
3286 
mem_cgroup_soft_limit_reclaim(struct zone * zone,int order,gfp_t gfp_mask)3287 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
3288 					    gfp_t gfp_mask)
3289 {
3290 	unsigned long nr_reclaimed = 0;
3291 	struct mem_cgroup_per_zone *mz, *next_mz = NULL;
3292 	unsigned long reclaimed;
3293 	int loop = 0;
3294 	struct mem_cgroup_tree_per_zone *mctz;
3295 	unsigned long long excess;
3296 
3297 	if (order > 0)
3298 		return 0;
3299 
3300 	mctz = soft_limit_tree_node_zone(zone_to_nid(zone), zone_idx(zone));
3301 	/*
3302 	 * This loop can run a while, specially if mem_cgroup's continuously
3303 	 * keep exceeding their soft limit and putting the system under
3304 	 * pressure
3305 	 */
3306 	do {
3307 		if (next_mz)
3308 			mz = next_mz;
3309 		else
3310 			mz = mem_cgroup_largest_soft_limit_node(mctz);
3311 		if (!mz)
3312 			break;
3313 
3314 		reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone,
3315 						gfp_mask,
3316 						MEM_CGROUP_RECLAIM_SOFT);
3317 		nr_reclaimed += reclaimed;
3318 		spin_lock(&mctz->lock);
3319 
3320 		/*
3321 		 * If we failed to reclaim anything from this memory cgroup
3322 		 * it is time to move on to the next cgroup
3323 		 */
3324 		next_mz = NULL;
3325 		if (!reclaimed) {
3326 			do {
3327 				/*
3328 				 * Loop until we find yet another one.
3329 				 *
3330 				 * By the time we get the soft_limit lock
3331 				 * again, someone might have aded the
3332 				 * group back on the RB tree. Iterate to
3333 				 * make sure we get a different mem.
3334 				 * mem_cgroup_largest_soft_limit_node returns
3335 				 * NULL if no other cgroup is present on
3336 				 * the tree
3337 				 */
3338 				next_mz =
3339 				__mem_cgroup_largest_soft_limit_node(mctz);
3340 				if (next_mz == mz) {
3341 					css_put(&next_mz->mem->css);
3342 					next_mz = NULL;
3343 				} else /* next_mz == NULL or other memcg */
3344 					break;
3345 			} while (1);
3346 		}
3347 		__mem_cgroup_remove_exceeded(mz->mem, mz, mctz);
3348 		excess = res_counter_soft_limit_excess(&mz->mem->res);
3349 		/*
3350 		 * One school of thought says that we should not add
3351 		 * back the node to the tree if reclaim returns 0.
3352 		 * But our reclaim could return 0, simply because due
3353 		 * to priority we are exposing a smaller subset of
3354 		 * memory to reclaim from. Consider this as a longer
3355 		 * term TODO.
3356 		 */
3357 		/* If excess == 0, no tree ops */
3358 		__mem_cgroup_insert_exceeded(mz->mem, mz, mctz, excess);
3359 		spin_unlock(&mctz->lock);
3360 		css_put(&mz->mem->css);
3361 		loop++;
3362 		/*
3363 		 * Could not reclaim anything and there are no more
3364 		 * mem cgroups to try or we seem to be looping without
3365 		 * reclaiming anything.
3366 		 */
3367 		if (!nr_reclaimed &&
3368 			(next_mz == NULL ||
3369 			loop > MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS))
3370 			break;
3371 	} while (!nr_reclaimed);
3372 	if (next_mz)
3373 		css_put(&next_mz->mem->css);
3374 	return nr_reclaimed;
3375 }
3376 
3377 /*
3378  * This routine traverse page_cgroup in given list and drop them all.
3379  * *And* this routine doesn't reclaim page itself, just removes page_cgroup.
3380  */
mem_cgroup_force_empty_list(struct mem_cgroup * mem,int node,int zid,enum lru_list lru)3381 static int mem_cgroup_force_empty_list(struct mem_cgroup *mem,
3382 				int node, int zid, enum lru_list lru)
3383 {
3384 	struct zone *zone;
3385 	struct mem_cgroup_per_zone *mz;
3386 	struct page_cgroup *pc, *busy;
3387 	unsigned long flags, loop;
3388 	struct list_head *list;
3389 	int ret = 0;
3390 
3391 	zone = &NODE_DATA(node)->node_zones[zid];
3392 	mz = mem_cgroup_zoneinfo(mem, node, zid);
3393 	list = &mz->lists[lru];
3394 
3395 	loop = MEM_CGROUP_ZSTAT(mz, lru);
3396 	/* give some margin against EBUSY etc...*/
3397 	loop += 256;
3398 	busy = NULL;
3399 	while (loop--) {
3400 		struct page *page;
3401 
3402 		ret = 0;
3403 		spin_lock_irqsave(&zone->lru_lock, flags);
3404 		if (list_empty(list)) {
3405 			spin_unlock_irqrestore(&zone->lru_lock, flags);
3406 			break;
3407 		}
3408 		pc = list_entry(list->prev, struct page_cgroup, lru);
3409 		if (busy == pc) {
3410 			list_move(&pc->lru, list);
3411 			busy = NULL;
3412 			spin_unlock_irqrestore(&zone->lru_lock, flags);
3413 			continue;
3414 		}
3415 		spin_unlock_irqrestore(&zone->lru_lock, flags);
3416 
3417 		page = lookup_cgroup_page(pc);
3418 
3419 		ret = mem_cgroup_move_parent(page, pc, mem, GFP_KERNEL);
3420 		if (ret == -ENOMEM)
3421 			break;
3422 
3423 		if (ret == -EBUSY || ret == -EINVAL) {
3424 			/* found lock contention or "pc" is obsolete. */
3425 			busy = pc;
3426 			cond_resched();
3427 		} else
3428 			busy = NULL;
3429 	}
3430 
3431 	if (!ret && !list_empty(list))
3432 		return -EBUSY;
3433 	return ret;
3434 }
3435 
3436 /*
3437  * make mem_cgroup's charge to be 0 if there is no task.
3438  * This enables deleting this mem_cgroup.
3439  */
mem_cgroup_force_empty(struct mem_cgroup * mem,bool free_all)3440 static int mem_cgroup_force_empty(struct mem_cgroup *mem, bool free_all)
3441 {
3442 	int ret;
3443 	int node, zid, shrink;
3444 	int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
3445 	struct cgroup *cgrp = mem->css.cgroup;
3446 
3447 	css_get(&mem->css);
3448 
3449 	shrink = 0;
3450 	/* should free all ? */
3451 	if (free_all)
3452 		goto try_to_free;
3453 move_account:
3454 	do {
3455 		ret = -EBUSY;
3456 		if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children))
3457 			goto out;
3458 		ret = -EINTR;
3459 		if (signal_pending(current))
3460 			goto out;
3461 		/* This is for making all *used* pages to be on LRU. */
3462 		lru_add_drain_all();
3463 		drain_all_stock_sync();
3464 		ret = 0;
3465 		mem_cgroup_start_move(mem);
3466 		for_each_node_state(node, N_HIGH_MEMORY) {
3467 			for (zid = 0; !ret && zid < MAX_NR_ZONES; zid++) {
3468 				enum lru_list l;
3469 				for_each_lru(l) {
3470 					ret = mem_cgroup_force_empty_list(mem,
3471 							node, zid, l);
3472 					if (ret)
3473 						break;
3474 				}
3475 			}
3476 			if (ret)
3477 				break;
3478 		}
3479 		mem_cgroup_end_move(mem);
3480 		memcg_oom_recover(mem);
3481 		/* it seems parent cgroup doesn't have enough mem */
3482 		if (ret == -ENOMEM)
3483 			goto try_to_free;
3484 		cond_resched();
3485 	/* "ret" should also be checked to ensure all lists are empty. */
3486 	} while (mem->res.usage > 0 || ret);
3487 out:
3488 	css_put(&mem->css);
3489 	return ret;
3490 
3491 try_to_free:
3492 	/* returns EBUSY if there is a task or if we come here twice. */
3493 	if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children) || shrink) {
3494 		ret = -EBUSY;
3495 		goto out;
3496 	}
3497 	/* we call try-to-free pages for make this cgroup empty */
3498 	lru_add_drain_all();
3499 	/* try to free all pages in this cgroup */
3500 	shrink = 1;
3501 	while (nr_retries && mem->res.usage > 0) {
3502 		int progress;
3503 
3504 		if (signal_pending(current)) {
3505 			ret = -EINTR;
3506 			goto out;
3507 		}
3508 		progress = try_to_free_mem_cgroup_pages(mem, GFP_KERNEL,
3509 						false, get_swappiness(mem));
3510 		if (!progress) {
3511 			nr_retries--;
3512 			/* maybe some writeback is necessary */
3513 			congestion_wait(BLK_RW_ASYNC, HZ/10);
3514 		}
3515 
3516 	}
3517 	lru_add_drain();
3518 	/* try move_account...there may be some *locked* pages. */
3519 	goto move_account;
3520 }
3521 
mem_cgroup_force_empty_write(struct cgroup * cont,unsigned int event)3522 int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3523 {
3524 	return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3525 }
3526 
3527 
mem_cgroup_hierarchy_read(struct cgroup * cont,struct cftype * cft)3528 static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
3529 {
3530 	return mem_cgroup_from_cont(cont)->use_hierarchy;
3531 }
3532 
mem_cgroup_hierarchy_write(struct cgroup * cont,struct cftype * cft,u64 val)3533 static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
3534 					u64 val)
3535 {
3536 	int retval = 0;
3537 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3538 	struct cgroup *parent = cont->parent;
3539 	struct mem_cgroup *parent_mem = NULL;
3540 
3541 	if (parent)
3542 		parent_mem = mem_cgroup_from_cont(parent);
3543 
3544 	cgroup_lock();
3545 	/*
3546 	 * If parent's use_hierarchy is set, we can't make any modifications
3547 	 * in the child subtrees. If it is unset, then the change can
3548 	 * occur, provided the current cgroup has no children.
3549 	 *
3550 	 * For the root cgroup, parent_mem is NULL, we allow value to be
3551 	 * set if there are no children.
3552 	 */
3553 	if ((!parent_mem || !parent_mem->use_hierarchy) &&
3554 				(val == 1 || val == 0)) {
3555 		if (list_empty(&cont->children))
3556 			mem->use_hierarchy = val;
3557 		else
3558 			retval = -EBUSY;
3559 	} else
3560 		retval = -EINVAL;
3561 	cgroup_unlock();
3562 
3563 	return retval;
3564 }
3565 
3566 
mem_cgroup_recursive_stat(struct mem_cgroup * mem,enum mem_cgroup_stat_index idx)3567 static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *mem,
3568 					       enum mem_cgroup_stat_index idx)
3569 {
3570 	struct mem_cgroup *iter;
3571 	long val = 0;
3572 
3573 	/* Per-cpu values can be negative, use a signed accumulator */
3574 	for_each_mem_cgroup_tree(iter, mem)
3575 		val += mem_cgroup_read_stat(iter, idx);
3576 
3577 	if (val < 0) /* race ? */
3578 		val = 0;
3579 	return val;
3580 }
3581 
mem_cgroup_usage(struct mem_cgroup * mem,bool swap)3582 static inline u64 mem_cgroup_usage(struct mem_cgroup *mem, bool swap)
3583 {
3584 	u64 val;
3585 
3586 	if (!mem_cgroup_is_root(mem)) {
3587 		if (!swap)
3588 			return res_counter_read_u64(&mem->res, RES_USAGE);
3589 		else
3590 			return res_counter_read_u64(&mem->memsw, RES_USAGE);
3591 	}
3592 
3593 	val = mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_CACHE);
3594 	val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_RSS);
3595 
3596 	if (swap)
3597 		val += mem_cgroup_recursive_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3598 
3599 	return val << PAGE_SHIFT;
3600 }
3601 
mem_cgroup_read(struct cgroup * cont,struct cftype * cft)3602 static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
3603 {
3604 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
3605 	u64 val;
3606 	int type, name;
3607 
3608 	type = MEMFILE_TYPE(cft->private);
3609 	name = MEMFILE_ATTR(cft->private);
3610 	switch (type) {
3611 	case _MEM:
3612 		if (name == RES_USAGE)
3613 			val = mem_cgroup_usage(mem, false);
3614 		else
3615 			val = res_counter_read_u64(&mem->res, name);
3616 		break;
3617 	case _MEMSWAP:
3618 		if (name == RES_USAGE)
3619 			val = mem_cgroup_usage(mem, true);
3620 		else
3621 			val = res_counter_read_u64(&mem->memsw, name);
3622 		break;
3623 	default:
3624 		BUG();
3625 		break;
3626 	}
3627 	return val;
3628 }
3629 /*
3630  * The user of this function is...
3631  * RES_LIMIT.
3632  */
mem_cgroup_write(struct cgroup * cont,struct cftype * cft,const char * buffer)3633 static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
3634 			    const char *buffer)
3635 {
3636 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
3637 	int type, name;
3638 	unsigned long long val;
3639 	int ret;
3640 
3641 	type = MEMFILE_TYPE(cft->private);
3642 	name = MEMFILE_ATTR(cft->private);
3643 	switch (name) {
3644 	case RES_LIMIT:
3645 		if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */
3646 			ret = -EINVAL;
3647 			break;
3648 		}
3649 		/* This function does all necessary parse...reuse it */
3650 		ret = res_counter_memparse_write_strategy(buffer, &val);
3651 		if (ret)
3652 			break;
3653 		if (type == _MEM)
3654 			ret = mem_cgroup_resize_limit(memcg, val);
3655 		else
3656 			ret = mem_cgroup_resize_memsw_limit(memcg, val);
3657 		break;
3658 	case RES_SOFT_LIMIT:
3659 		ret = res_counter_memparse_write_strategy(buffer, &val);
3660 		if (ret)
3661 			break;
3662 		/*
3663 		 * For memsw, soft limits are hard to implement in terms
3664 		 * of semantics, for now, we support soft limits for
3665 		 * control without swap
3666 		 */
3667 		if (type == _MEM)
3668 			ret = res_counter_set_soft_limit(&memcg->res, val);
3669 		else
3670 			ret = -EINVAL;
3671 		break;
3672 	default:
3673 		ret = -EINVAL; /* should be BUG() ? */
3674 		break;
3675 	}
3676 	return ret;
3677 }
3678 
memcg_get_hierarchical_limit(struct mem_cgroup * memcg,unsigned long long * mem_limit,unsigned long long * memsw_limit)3679 static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
3680 		unsigned long long *mem_limit, unsigned long long *memsw_limit)
3681 {
3682 	struct cgroup *cgroup;
3683 	unsigned long long min_limit, min_memsw_limit, tmp;
3684 
3685 	min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
3686 	min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3687 	cgroup = memcg->css.cgroup;
3688 	if (!memcg->use_hierarchy)
3689 		goto out;
3690 
3691 	while (cgroup->parent) {
3692 		cgroup = cgroup->parent;
3693 		memcg = mem_cgroup_from_cont(cgroup);
3694 		if (!memcg->use_hierarchy)
3695 			break;
3696 		tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
3697 		min_limit = min(min_limit, tmp);
3698 		tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
3699 		min_memsw_limit = min(min_memsw_limit, tmp);
3700 	}
3701 out:
3702 	*mem_limit = min_limit;
3703 	*memsw_limit = min_memsw_limit;
3704 	return;
3705 }
3706 
mem_cgroup_reset(struct cgroup * cont,unsigned int event)3707 static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
3708 {
3709 	struct mem_cgroup *mem;
3710 	int type, name;
3711 
3712 	mem = mem_cgroup_from_cont(cont);
3713 	type = MEMFILE_TYPE(event);
3714 	name = MEMFILE_ATTR(event);
3715 	switch (name) {
3716 	case RES_MAX_USAGE:
3717 		if (type == _MEM)
3718 			res_counter_reset_max(&mem->res);
3719 		else
3720 			res_counter_reset_max(&mem->memsw);
3721 		break;
3722 	case RES_FAILCNT:
3723 		if (type == _MEM)
3724 			res_counter_reset_failcnt(&mem->res);
3725 		else
3726 			res_counter_reset_failcnt(&mem->memsw);
3727 		break;
3728 	}
3729 
3730 	return 0;
3731 }
3732 
mem_cgroup_move_charge_read(struct cgroup * cgrp,struct cftype * cft)3733 static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
3734 					struct cftype *cft)
3735 {
3736 	return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
3737 }
3738 
3739 #ifdef CONFIG_MMU
mem_cgroup_move_charge_write(struct cgroup * cgrp,struct cftype * cft,u64 val)3740 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3741 					struct cftype *cft, u64 val)
3742 {
3743 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
3744 
3745 	if (val >= (1 << NR_MOVE_TYPE))
3746 		return -EINVAL;
3747 	/*
3748 	 * We check this value several times in both in can_attach() and
3749 	 * attach(), so we need cgroup lock to prevent this value from being
3750 	 * inconsistent.
3751 	 */
3752 	cgroup_lock();
3753 	mem->move_charge_at_immigrate = val;
3754 	cgroup_unlock();
3755 
3756 	return 0;
3757 }
3758 #else
mem_cgroup_move_charge_write(struct cgroup * cgrp,struct cftype * cft,u64 val)3759 static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
3760 					struct cftype *cft, u64 val)
3761 {
3762 	return -ENOSYS;
3763 }
3764 #endif
3765 
3766 
3767 /* For read statistics */
3768 enum {
3769 	MCS_CACHE,
3770 	MCS_RSS,
3771 	MCS_FILE_MAPPED,
3772 	MCS_PGPGIN,
3773 	MCS_PGPGOUT,
3774 	MCS_SWAP,
3775 	MCS_INACTIVE_ANON,
3776 	MCS_ACTIVE_ANON,
3777 	MCS_INACTIVE_FILE,
3778 	MCS_ACTIVE_FILE,
3779 	MCS_UNEVICTABLE,
3780 	NR_MCS_STAT,
3781 };
3782 
3783 struct mcs_total_stat {
3784 	s64 stat[NR_MCS_STAT];
3785 };
3786 
3787 struct {
3788 	char *local_name;
3789 	char *total_name;
3790 } memcg_stat_strings[NR_MCS_STAT] = {
3791 	{"cache", "total_cache"},
3792 	{"rss", "total_rss"},
3793 	{"mapped_file", "total_mapped_file"},
3794 	{"pgpgin", "total_pgpgin"},
3795 	{"pgpgout", "total_pgpgout"},
3796 	{"swap", "total_swap"},
3797 	{"inactive_anon", "total_inactive_anon"},
3798 	{"active_anon", "total_active_anon"},
3799 	{"inactive_file", "total_inactive_file"},
3800 	{"active_file", "total_active_file"},
3801 	{"unevictable", "total_unevictable"}
3802 };
3803 
3804 
3805 static void
mem_cgroup_get_local_stat(struct mem_cgroup * mem,struct mcs_total_stat * s)3806 mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3807 {
3808 	s64 val;
3809 
3810 	/* per cpu stat */
3811 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_CACHE);
3812 	s->stat[MCS_CACHE] += val * PAGE_SIZE;
3813 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_RSS);
3814 	s->stat[MCS_RSS] += val * PAGE_SIZE;
3815 	val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_FILE_MAPPED);
3816 	s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
3817 	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGIN);
3818 	s->stat[MCS_PGPGIN] += val;
3819 	val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGPGOUT);
3820 	s->stat[MCS_PGPGOUT] += val;
3821 	if (do_swap_account) {
3822 		val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT);
3823 		s->stat[MCS_SWAP] += val * PAGE_SIZE;
3824 	}
3825 
3826 	/* per zone stat */
3827 	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON);
3828 	s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
3829 	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_ANON);
3830 	s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
3831 	val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_FILE);
3832 	s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
3833 	val = mem_cgroup_get_local_zonestat(mem, LRU_ACTIVE_FILE);
3834 	s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
3835 	val = mem_cgroup_get_local_zonestat(mem, LRU_UNEVICTABLE);
3836 	s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
3837 }
3838 
3839 static void
mem_cgroup_get_total_stat(struct mem_cgroup * mem,struct mcs_total_stat * s)3840 mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s)
3841 {
3842 	struct mem_cgroup *iter;
3843 
3844 	for_each_mem_cgroup_tree(iter, mem)
3845 		mem_cgroup_get_local_stat(iter, s);
3846 }
3847 
mem_control_stat_show(struct cgroup * cont,struct cftype * cft,struct cgroup_map_cb * cb)3848 static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
3849 				 struct cgroup_map_cb *cb)
3850 {
3851 	struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
3852 	struct mcs_total_stat mystat;
3853 	int i;
3854 
3855 	memset(&mystat, 0, sizeof(mystat));
3856 	mem_cgroup_get_local_stat(mem_cont, &mystat);
3857 
3858 	for (i = 0; i < NR_MCS_STAT; i++) {
3859 		if (i == MCS_SWAP && !do_swap_account)
3860 			continue;
3861 		cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]);
3862 	}
3863 
3864 	/* Hierarchical information */
3865 	{
3866 		unsigned long long limit, memsw_limit;
3867 		memcg_get_hierarchical_limit(mem_cont, &limit, &memsw_limit);
3868 		cb->fill(cb, "hierarchical_memory_limit", limit);
3869 		if (do_swap_account)
3870 			cb->fill(cb, "hierarchical_memsw_limit", memsw_limit);
3871 	}
3872 
3873 	memset(&mystat, 0, sizeof(mystat));
3874 	mem_cgroup_get_total_stat(mem_cont, &mystat);
3875 	for (i = 0; i < NR_MCS_STAT; i++) {
3876 		if (i == MCS_SWAP && !do_swap_account)
3877 			continue;
3878 		cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]);
3879 	}
3880 
3881 #ifdef CONFIG_DEBUG_VM
3882 	cb->fill(cb, "inactive_ratio", calc_inactive_ratio(mem_cont, NULL));
3883 
3884 	{
3885 		int nid, zid;
3886 		struct mem_cgroup_per_zone *mz;
3887 		unsigned long recent_rotated[2] = {0, 0};
3888 		unsigned long recent_scanned[2] = {0, 0};
3889 
3890 		for_each_online_node(nid)
3891 			for (zid = 0; zid < MAX_NR_ZONES; zid++) {
3892 				mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
3893 
3894 				recent_rotated[0] +=
3895 					mz->reclaim_stat.recent_rotated[0];
3896 				recent_rotated[1] +=
3897 					mz->reclaim_stat.recent_rotated[1];
3898 				recent_scanned[0] +=
3899 					mz->reclaim_stat.recent_scanned[0];
3900 				recent_scanned[1] +=
3901 					mz->reclaim_stat.recent_scanned[1];
3902 			}
3903 		cb->fill(cb, "recent_rotated_anon", recent_rotated[0]);
3904 		cb->fill(cb, "recent_rotated_file", recent_rotated[1]);
3905 		cb->fill(cb, "recent_scanned_anon", recent_scanned[0]);
3906 		cb->fill(cb, "recent_scanned_file", recent_scanned[1]);
3907 	}
3908 #endif
3909 
3910 	return 0;
3911 }
3912 
mem_cgroup_swappiness_read(struct cgroup * cgrp,struct cftype * cft)3913 static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
3914 {
3915 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3916 
3917 	return get_swappiness(memcg);
3918 }
3919 
mem_cgroup_swappiness_write(struct cgroup * cgrp,struct cftype * cft,u64 val)3920 static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3921 				       u64 val)
3922 {
3923 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
3924 	struct mem_cgroup *parent;
3925 
3926 	if (val > 100)
3927 		return -EINVAL;
3928 
3929 	if (cgrp->parent == NULL)
3930 		return -EINVAL;
3931 
3932 	parent = mem_cgroup_from_cont(cgrp->parent);
3933 
3934 	cgroup_lock();
3935 
3936 	/* If under hierarchy, only empty-root can set this value */
3937 	if ((parent->use_hierarchy) ||
3938 	    (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
3939 		cgroup_unlock();
3940 		return -EINVAL;
3941 	}
3942 
3943 	memcg->swappiness = val;
3944 
3945 	cgroup_unlock();
3946 
3947 	return 0;
3948 }
3949 
__mem_cgroup_threshold(struct mem_cgroup * memcg,bool swap)3950 static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3951 {
3952 	struct mem_cgroup_threshold_ary *t;
3953 	u64 usage;
3954 	int i;
3955 
3956 	rcu_read_lock();
3957 	if (!swap)
3958 		t = rcu_dereference(memcg->thresholds.primary);
3959 	else
3960 		t = rcu_dereference(memcg->memsw_thresholds.primary);
3961 
3962 	if (!t)
3963 		goto unlock;
3964 
3965 	usage = mem_cgroup_usage(memcg, swap);
3966 
3967 	/*
3968 	 * current_threshold points to threshold just below usage.
3969 	 * If it's not true, a threshold was crossed after last
3970 	 * call of __mem_cgroup_threshold().
3971 	 */
3972 	i = t->current_threshold;
3973 
3974 	/*
3975 	 * Iterate backward over array of thresholds starting from
3976 	 * current_threshold and check if a threshold is crossed.
3977 	 * If none of thresholds below usage is crossed, we read
3978 	 * only one element of the array here.
3979 	 */
3980 	for (; i >= 0 && unlikely(t->entries[i].threshold > usage); i--)
3981 		eventfd_signal(t->entries[i].eventfd, 1);
3982 
3983 	/* i = current_threshold + 1 */
3984 	i++;
3985 
3986 	/*
3987 	 * Iterate forward over array of thresholds starting from
3988 	 * current_threshold+1 and check if a threshold is crossed.
3989 	 * If none of thresholds above usage is crossed, we read
3990 	 * only one element of the array here.
3991 	 */
3992 	for (; i < t->size && unlikely(t->entries[i].threshold <= usage); i++)
3993 		eventfd_signal(t->entries[i].eventfd, 1);
3994 
3995 	/* Update current_threshold */
3996 	t->current_threshold = i - 1;
3997 unlock:
3998 	rcu_read_unlock();
3999 }
4000 
mem_cgroup_threshold(struct mem_cgroup * memcg)4001 static void mem_cgroup_threshold(struct mem_cgroup *memcg)
4002 {
4003 	while (memcg) {
4004 		__mem_cgroup_threshold(memcg, false);
4005 		if (do_swap_account)
4006 			__mem_cgroup_threshold(memcg, true);
4007 
4008 		memcg = parent_mem_cgroup(memcg);
4009 	}
4010 }
4011 
compare_thresholds(const void * a,const void * b)4012 static int compare_thresholds(const void *a, const void *b)
4013 {
4014 	const struct mem_cgroup_threshold *_a = a;
4015 	const struct mem_cgroup_threshold *_b = b;
4016 
4017 	return _a->threshold - _b->threshold;
4018 }
4019 
mem_cgroup_oom_notify_cb(struct mem_cgroup * mem)4020 static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem)
4021 {
4022 	struct mem_cgroup_eventfd_list *ev;
4023 
4024 	list_for_each_entry(ev, &mem->oom_notify, list)
4025 		eventfd_signal(ev->eventfd, 1);
4026 	return 0;
4027 }
4028 
mem_cgroup_oom_notify(struct mem_cgroup * mem)4029 static void mem_cgroup_oom_notify(struct mem_cgroup *mem)
4030 {
4031 	struct mem_cgroup *iter;
4032 
4033 	for_each_mem_cgroup_tree(iter, mem)
4034 		mem_cgroup_oom_notify_cb(iter);
4035 }
4036 
mem_cgroup_usage_register_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd,const char * args)4037 static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4038 	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4039 {
4040 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4041 	struct mem_cgroup_thresholds *thresholds;
4042 	struct mem_cgroup_threshold_ary *new;
4043 	int type = MEMFILE_TYPE(cft->private);
4044 	u64 threshold, usage;
4045 	int i, size, ret;
4046 
4047 	ret = res_counter_memparse_write_strategy(args, &threshold);
4048 	if (ret)
4049 		return ret;
4050 
4051 	mutex_lock(&memcg->thresholds_lock);
4052 
4053 	if (type == _MEM)
4054 		thresholds = &memcg->thresholds;
4055 	else if (type == _MEMSWAP)
4056 		thresholds = &memcg->memsw_thresholds;
4057 	else
4058 		BUG();
4059 
4060 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4061 
4062 	/* Check if a threshold crossed before adding a new one */
4063 	if (thresholds->primary)
4064 		__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4065 
4066 	size = thresholds->primary ? thresholds->primary->size + 1 : 1;
4067 
4068 	/* Allocate memory for new array of thresholds */
4069 	new = kmalloc(sizeof(*new) + size * sizeof(struct mem_cgroup_threshold),
4070 			GFP_KERNEL);
4071 	if (!new) {
4072 		ret = -ENOMEM;
4073 		goto unlock;
4074 	}
4075 	new->size = size;
4076 
4077 	/* Copy thresholds (if any) to new array */
4078 	if (thresholds->primary) {
4079 		memcpy(new->entries, thresholds->primary->entries, (size - 1) *
4080 				sizeof(struct mem_cgroup_threshold));
4081 	}
4082 
4083 	/* Add new threshold */
4084 	new->entries[size - 1].eventfd = eventfd;
4085 	new->entries[size - 1].threshold = threshold;
4086 
4087 	/* Sort thresholds. Registering of new threshold isn't time-critical */
4088 	sort(new->entries, size, sizeof(struct mem_cgroup_threshold),
4089 			compare_thresholds, NULL);
4090 
4091 	/* Find current threshold */
4092 	new->current_threshold = -1;
4093 	for (i = 0; i < size; i++) {
4094 		if (new->entries[i].threshold < usage) {
4095 			/*
4096 			 * new->current_threshold will not be used until
4097 			 * rcu_assign_pointer(), so it's safe to increment
4098 			 * it here.
4099 			 */
4100 			++new->current_threshold;
4101 		}
4102 	}
4103 
4104 	/* Free old spare buffer and save old primary buffer as spare */
4105 	kfree(thresholds->spare);
4106 	thresholds->spare = thresholds->primary;
4107 
4108 	rcu_assign_pointer(thresholds->primary, new);
4109 
4110 	/* To be sure that nobody uses thresholds */
4111 	synchronize_rcu();
4112 
4113 unlock:
4114 	mutex_unlock(&memcg->thresholds_lock);
4115 
4116 	return ret;
4117 }
4118 
mem_cgroup_usage_unregister_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd)4119 static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4120 	struct cftype *cft, struct eventfd_ctx *eventfd)
4121 {
4122 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4123 	struct mem_cgroup_thresholds *thresholds;
4124 	struct mem_cgroup_threshold_ary *new;
4125 	int type = MEMFILE_TYPE(cft->private);
4126 	u64 usage;
4127 	int i, j, size;
4128 
4129 	mutex_lock(&memcg->thresholds_lock);
4130 	if (type == _MEM)
4131 		thresholds = &memcg->thresholds;
4132 	else if (type == _MEMSWAP)
4133 		thresholds = &memcg->memsw_thresholds;
4134 	else
4135 		BUG();
4136 
4137 	/*
4138 	 * Something went wrong if we trying to unregister a threshold
4139 	 * if we don't have thresholds
4140 	 */
4141 	BUG_ON(!thresholds);
4142 
4143 	usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
4144 
4145 	/* Check if a threshold crossed before removing */
4146 	__mem_cgroup_threshold(memcg, type == _MEMSWAP);
4147 
4148 	/* Calculate new number of threshold */
4149 	size = 0;
4150 	for (i = 0; i < thresholds->primary->size; i++) {
4151 		if (thresholds->primary->entries[i].eventfd != eventfd)
4152 			size++;
4153 	}
4154 
4155 	new = thresholds->spare;
4156 
4157 	/* Set thresholds array to NULL if we don't have thresholds */
4158 	if (!size) {
4159 		kfree(new);
4160 		new = NULL;
4161 		goto swap_buffers;
4162 	}
4163 
4164 	new->size = size;
4165 
4166 	/* Copy thresholds and find current threshold */
4167 	new->current_threshold = -1;
4168 	for (i = 0, j = 0; i < thresholds->primary->size; i++) {
4169 		if (thresholds->primary->entries[i].eventfd == eventfd)
4170 			continue;
4171 
4172 		new->entries[j] = thresholds->primary->entries[i];
4173 		if (new->entries[j].threshold < usage) {
4174 			/*
4175 			 * new->current_threshold will not be used
4176 			 * until rcu_assign_pointer(), so it's safe to increment
4177 			 * it here.
4178 			 */
4179 			++new->current_threshold;
4180 		}
4181 		j++;
4182 	}
4183 
4184 swap_buffers:
4185 	/* Swap primary and spare array */
4186 	thresholds->spare = thresholds->primary;
4187 	rcu_assign_pointer(thresholds->primary, new);
4188 
4189 	/* To be sure that nobody uses thresholds */
4190 	synchronize_rcu();
4191 
4192 	mutex_unlock(&memcg->thresholds_lock);
4193 }
4194 
mem_cgroup_oom_register_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd,const char * args)4195 static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
4196 	struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
4197 {
4198 	struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
4199 	struct mem_cgroup_eventfd_list *event;
4200 	int type = MEMFILE_TYPE(cft->private);
4201 
4202 	BUG_ON(type != _OOM_TYPE);
4203 	event = kmalloc(sizeof(*event),	GFP_KERNEL);
4204 	if (!event)
4205 		return -ENOMEM;
4206 
4207 	mutex_lock(&memcg_oom_mutex);
4208 
4209 	event->eventfd = eventfd;
4210 	list_add(&event->list, &memcg->oom_notify);
4211 
4212 	/* already in OOM ? */
4213 	if (atomic_read(&memcg->oom_lock))
4214 		eventfd_signal(eventfd, 1);
4215 	mutex_unlock(&memcg_oom_mutex);
4216 
4217 	return 0;
4218 }
4219 
mem_cgroup_oom_unregister_event(struct cgroup * cgrp,struct cftype * cft,struct eventfd_ctx * eventfd)4220 static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
4221 	struct cftype *cft, struct eventfd_ctx *eventfd)
4222 {
4223 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4224 	struct mem_cgroup_eventfd_list *ev, *tmp;
4225 	int type = MEMFILE_TYPE(cft->private);
4226 
4227 	BUG_ON(type != _OOM_TYPE);
4228 
4229 	mutex_lock(&memcg_oom_mutex);
4230 
4231 	list_for_each_entry_safe(ev, tmp, &mem->oom_notify, list) {
4232 		if (ev->eventfd == eventfd) {
4233 			list_del(&ev->list);
4234 			kfree(ev);
4235 		}
4236 	}
4237 
4238 	mutex_unlock(&memcg_oom_mutex);
4239 }
4240 
mem_cgroup_oom_control_read(struct cgroup * cgrp,struct cftype * cft,struct cgroup_map_cb * cb)4241 static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
4242 	struct cftype *cft,  struct cgroup_map_cb *cb)
4243 {
4244 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4245 
4246 	cb->fill(cb, "oom_kill_disable", mem->oom_kill_disable);
4247 
4248 	if (atomic_read(&mem->oom_lock))
4249 		cb->fill(cb, "under_oom", 1);
4250 	else
4251 		cb->fill(cb, "under_oom", 0);
4252 	return 0;
4253 }
4254 
mem_cgroup_oom_control_write(struct cgroup * cgrp,struct cftype * cft,u64 val)4255 static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4256 	struct cftype *cft, u64 val)
4257 {
4258 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgrp);
4259 	struct mem_cgroup *parent;
4260 
4261 	/* cannot set to root cgroup and only 0 and 1 are allowed */
4262 	if (!cgrp->parent || !((val == 0) || (val == 1)))
4263 		return -EINVAL;
4264 
4265 	parent = mem_cgroup_from_cont(cgrp->parent);
4266 
4267 	cgroup_lock();
4268 	/* oom-kill-disable is a flag for subhierarchy. */
4269 	if ((parent->use_hierarchy) ||
4270 	    (mem->use_hierarchy && !list_empty(&cgrp->children))) {
4271 		cgroup_unlock();
4272 		return -EINVAL;
4273 	}
4274 	mem->oom_kill_disable = val;
4275 	if (!val)
4276 		memcg_oom_recover(mem);
4277 	cgroup_unlock();
4278 	return 0;
4279 }
4280 
4281 static struct cftype mem_cgroup_files[] = {
4282 	{
4283 		.name = "usage_in_bytes",
4284 		.private = MEMFILE_PRIVATE(_MEM, RES_USAGE),
4285 		.read_u64 = mem_cgroup_read,
4286 		.register_event = mem_cgroup_usage_register_event,
4287 		.unregister_event = mem_cgroup_usage_unregister_event,
4288 	},
4289 	{
4290 		.name = "max_usage_in_bytes",
4291 		.private = MEMFILE_PRIVATE(_MEM, RES_MAX_USAGE),
4292 		.trigger = mem_cgroup_reset,
4293 		.read_u64 = mem_cgroup_read,
4294 	},
4295 	{
4296 		.name = "limit_in_bytes",
4297 		.private = MEMFILE_PRIVATE(_MEM, RES_LIMIT),
4298 		.write_string = mem_cgroup_write,
4299 		.read_u64 = mem_cgroup_read,
4300 	},
4301 	{
4302 		.name = "soft_limit_in_bytes",
4303 		.private = MEMFILE_PRIVATE(_MEM, RES_SOFT_LIMIT),
4304 		.write_string = mem_cgroup_write,
4305 		.read_u64 = mem_cgroup_read,
4306 	},
4307 	{
4308 		.name = "failcnt",
4309 		.private = MEMFILE_PRIVATE(_MEM, RES_FAILCNT),
4310 		.trigger = mem_cgroup_reset,
4311 		.read_u64 = mem_cgroup_read,
4312 	},
4313 	{
4314 		.name = "stat",
4315 		.read_map = mem_control_stat_show,
4316 	},
4317 	{
4318 		.name = "force_empty",
4319 		.trigger = mem_cgroup_force_empty_write,
4320 	},
4321 	{
4322 		.name = "use_hierarchy",
4323 		.write_u64 = mem_cgroup_hierarchy_write,
4324 		.read_u64 = mem_cgroup_hierarchy_read,
4325 	},
4326 	{
4327 		.name = "swappiness",
4328 		.read_u64 = mem_cgroup_swappiness_read,
4329 		.write_u64 = mem_cgroup_swappiness_write,
4330 	},
4331 	{
4332 		.name = "move_charge_at_immigrate",
4333 		.read_u64 = mem_cgroup_move_charge_read,
4334 		.write_u64 = mem_cgroup_move_charge_write,
4335 	},
4336 	{
4337 		.name = "oom_control",
4338 		.read_map = mem_cgroup_oom_control_read,
4339 		.write_u64 = mem_cgroup_oom_control_write,
4340 		.register_event = mem_cgroup_oom_register_event,
4341 		.unregister_event = mem_cgroup_oom_unregister_event,
4342 		.private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL),
4343 	},
4344 };
4345 
4346 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4347 static struct cftype memsw_cgroup_files[] = {
4348 	{
4349 		.name = "memsw.usage_in_bytes",
4350 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_USAGE),
4351 		.read_u64 = mem_cgroup_read,
4352 		.register_event = mem_cgroup_usage_register_event,
4353 		.unregister_event = mem_cgroup_usage_unregister_event,
4354 	},
4355 	{
4356 		.name = "memsw.max_usage_in_bytes",
4357 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_MAX_USAGE),
4358 		.trigger = mem_cgroup_reset,
4359 		.read_u64 = mem_cgroup_read,
4360 	},
4361 	{
4362 		.name = "memsw.limit_in_bytes",
4363 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_LIMIT),
4364 		.write_string = mem_cgroup_write,
4365 		.read_u64 = mem_cgroup_read,
4366 	},
4367 	{
4368 		.name = "memsw.failcnt",
4369 		.private = MEMFILE_PRIVATE(_MEMSWAP, RES_FAILCNT),
4370 		.trigger = mem_cgroup_reset,
4371 		.read_u64 = mem_cgroup_read,
4372 	},
4373 };
4374 
register_memsw_files(struct cgroup * cont,struct cgroup_subsys * ss)4375 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4376 {
4377 	if (!do_swap_account)
4378 		return 0;
4379 	return cgroup_add_files(cont, ss, memsw_cgroup_files,
4380 				ARRAY_SIZE(memsw_cgroup_files));
4381 };
4382 #else
register_memsw_files(struct cgroup * cont,struct cgroup_subsys * ss)4383 static int register_memsw_files(struct cgroup *cont, struct cgroup_subsys *ss)
4384 {
4385 	return 0;
4386 }
4387 #endif
4388 
alloc_mem_cgroup_per_zone_info(struct mem_cgroup * mem,int node)4389 static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4390 {
4391 	struct mem_cgroup_per_node *pn;
4392 	struct mem_cgroup_per_zone *mz;
4393 	enum lru_list l;
4394 	int zone, tmp = node;
4395 	/*
4396 	 * This routine is called against possible nodes.
4397 	 * But it's BUG to call kmalloc() against offline node.
4398 	 *
4399 	 * TODO: this routine can waste much memory for nodes which will
4400 	 *       never be onlined. It's better to use memory hotplug callback
4401 	 *       function.
4402 	 */
4403 	if (!node_state(node, N_NORMAL_MEMORY))
4404 		tmp = -1;
4405 	pn = kzalloc_node(sizeof(*pn), GFP_KERNEL, tmp);
4406 	if (!pn)
4407 		return 1;
4408 
4409 	mem->info.nodeinfo[node] = pn;
4410 	for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4411 		mz = &pn->zoneinfo[zone];
4412 		for_each_lru(l)
4413 			INIT_LIST_HEAD(&mz->lists[l]);
4414 		mz->usage_in_excess = 0;
4415 		mz->on_tree = false;
4416 		mz->mem = mem;
4417 	}
4418 	return 0;
4419 }
4420 
free_mem_cgroup_per_zone_info(struct mem_cgroup * mem,int node)4421 static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
4422 {
4423 	kfree(mem->info.nodeinfo[node]);
4424 }
4425 
mem_cgroup_alloc(void)4426 static struct mem_cgroup *mem_cgroup_alloc(void)
4427 {
4428 	struct mem_cgroup *mem;
4429 	int size = sizeof(struct mem_cgroup);
4430 
4431 	/* Can be very big if MAX_NUMNODES is very big */
4432 	if (size < PAGE_SIZE)
4433 		mem = kzalloc(size, GFP_KERNEL);
4434 	else
4435 		mem = vzalloc(size);
4436 
4437 	if (!mem)
4438 		return NULL;
4439 
4440 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
4441 	if (!mem->stat)
4442 		goto out_free;
4443 	spin_lock_init(&mem->pcp_counter_lock);
4444 	return mem;
4445 
4446 out_free:
4447 	if (size < PAGE_SIZE)
4448 		kfree(mem);
4449 	else
4450 		vfree(mem);
4451 	return NULL;
4452 }
4453 
4454 /*
4455  * At destroying mem_cgroup, references from swap_cgroup can remain.
4456  * (scanning all at force_empty is too costly...)
4457  *
4458  * Instead of clearing all references at force_empty, we remember
4459  * the number of reference from swap_cgroup and free mem_cgroup when
4460  * it goes down to 0.
4461  *
4462  * Removal of cgroup itself succeeds regardless of refs from swap.
4463  */
4464 
__mem_cgroup_free(struct mem_cgroup * mem)4465 static void __mem_cgroup_free(struct mem_cgroup *mem)
4466 {
4467 	int node;
4468 
4469 	mem_cgroup_remove_from_trees(mem);
4470 	free_css_id(&mem_cgroup_subsys, &mem->css);
4471 
4472 	for_each_node_state(node, N_POSSIBLE)
4473 		free_mem_cgroup_per_zone_info(mem, node);
4474 
4475 	free_percpu(mem->stat);
4476 	if (sizeof(struct mem_cgroup) < PAGE_SIZE)
4477 		kfree(mem);
4478 	else
4479 		vfree(mem);
4480 }
4481 
mem_cgroup_get(struct mem_cgroup * mem)4482 static void mem_cgroup_get(struct mem_cgroup *mem)
4483 {
4484 	atomic_inc(&mem->refcnt);
4485 }
4486 
__mem_cgroup_put(struct mem_cgroup * mem,int count)4487 static void __mem_cgroup_put(struct mem_cgroup *mem, int count)
4488 {
4489 	if (atomic_sub_and_test(count, &mem->refcnt)) {
4490 		struct mem_cgroup *parent = parent_mem_cgroup(mem);
4491 		__mem_cgroup_free(mem);
4492 		if (parent)
4493 			mem_cgroup_put(parent);
4494 	}
4495 }
4496 
mem_cgroup_put(struct mem_cgroup * mem)4497 static void mem_cgroup_put(struct mem_cgroup *mem)
4498 {
4499 	__mem_cgroup_put(mem, 1);
4500 }
4501 
4502 /*
4503  * Returns the parent mem_cgroup in memcgroup hierarchy with hierarchy enabled.
4504  */
parent_mem_cgroup(struct mem_cgroup * mem)4505 static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem)
4506 {
4507 	if (!mem->res.parent)
4508 		return NULL;
4509 	return mem_cgroup_from_res_counter(mem->res.parent, res);
4510 }
4511 
4512 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
enable_swap_cgroup(void)4513 static void __init enable_swap_cgroup(void)
4514 {
4515 	if (!mem_cgroup_disabled() && really_do_swap_account)
4516 		do_swap_account = 1;
4517 }
4518 #else
enable_swap_cgroup(void)4519 static void __init enable_swap_cgroup(void)
4520 {
4521 }
4522 #endif
4523 
mem_cgroup_soft_limit_tree_init(void)4524 static int mem_cgroup_soft_limit_tree_init(void)
4525 {
4526 	struct mem_cgroup_tree_per_node *rtpn;
4527 	struct mem_cgroup_tree_per_zone *rtpz;
4528 	int tmp, node, zone;
4529 
4530 	for_each_node_state(node, N_POSSIBLE) {
4531 		tmp = node;
4532 		if (!node_state(node, N_NORMAL_MEMORY))
4533 			tmp = -1;
4534 		rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, tmp);
4535 		if (!rtpn)
4536 			return 1;
4537 
4538 		soft_limit_tree.rb_tree_per_node[node] = rtpn;
4539 
4540 		for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4541 			rtpz = &rtpn->rb_tree_per_zone[zone];
4542 			rtpz->rb_root = RB_ROOT;
4543 			spin_lock_init(&rtpz->lock);
4544 		}
4545 	}
4546 	return 0;
4547 }
4548 
4549 static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys * ss,struct cgroup * cont)4550 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
4551 {
4552 	struct mem_cgroup *mem, *parent;
4553 	long error = -ENOMEM;
4554 	int node;
4555 
4556 	mem = mem_cgroup_alloc();
4557 	if (!mem)
4558 		return ERR_PTR(error);
4559 
4560 	for_each_node_state(node, N_POSSIBLE)
4561 		if (alloc_mem_cgroup_per_zone_info(mem, node))
4562 			goto free_out;
4563 
4564 	/* root ? */
4565 	if (cont->parent == NULL) {
4566 		int cpu;
4567 		enable_swap_cgroup();
4568 		parent = NULL;
4569 		root_mem_cgroup = mem;
4570 		if (mem_cgroup_soft_limit_tree_init())
4571 			goto free_out;
4572 		for_each_possible_cpu(cpu) {
4573 			struct memcg_stock_pcp *stock =
4574 						&per_cpu(memcg_stock, cpu);
4575 			INIT_WORK(&stock->work, drain_local_stock);
4576 		}
4577 		hotcpu_notifier(memcg_cpu_hotplug_callback, 0);
4578 	} else {
4579 		parent = mem_cgroup_from_cont(cont->parent);
4580 		mem->use_hierarchy = parent->use_hierarchy;
4581 		mem->oom_kill_disable = parent->oom_kill_disable;
4582 	}
4583 
4584 	if (parent && parent->use_hierarchy) {
4585 		res_counter_init(&mem->res, &parent->res);
4586 		res_counter_init(&mem->memsw, &parent->memsw);
4587 		/*
4588 		 * We increment refcnt of the parent to ensure that we can
4589 		 * safely access it on res_counter_charge/uncharge.
4590 		 * This refcnt will be decremented when freeing this
4591 		 * mem_cgroup(see mem_cgroup_put).
4592 		 */
4593 		mem_cgroup_get(parent);
4594 	} else {
4595 		res_counter_init(&mem->res, NULL);
4596 		res_counter_init(&mem->memsw, NULL);
4597 	}
4598 	mem->last_scanned_child = 0;
4599 	INIT_LIST_HEAD(&mem->oom_notify);
4600 
4601 	if (parent)
4602 		mem->swappiness = get_swappiness(parent);
4603 	atomic_set(&mem->refcnt, 1);
4604 	mem->move_charge_at_immigrate = 0;
4605 	mutex_init(&mem->thresholds_lock);
4606 	return &mem->css;
4607 free_out:
4608 	__mem_cgroup_free(mem);
4609 	root_mem_cgroup = NULL;
4610 	return ERR_PTR(error);
4611 }
4612 
mem_cgroup_pre_destroy(struct cgroup_subsys * ss,struct cgroup * cont)4613 static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
4614 					struct cgroup *cont)
4615 {
4616 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4617 
4618 	return mem_cgroup_force_empty(mem, false);
4619 }
4620 
mem_cgroup_destroy(struct cgroup_subsys * ss,struct cgroup * cont)4621 static void mem_cgroup_destroy(struct cgroup_subsys *ss,
4622 				struct cgroup *cont)
4623 {
4624 	struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
4625 
4626 	mem_cgroup_put(mem);
4627 }
4628 
mem_cgroup_populate(struct cgroup_subsys * ss,struct cgroup * cont)4629 static int mem_cgroup_populate(struct cgroup_subsys *ss,
4630 				struct cgroup *cont)
4631 {
4632 	int ret;
4633 
4634 	ret = cgroup_add_files(cont, ss, mem_cgroup_files,
4635 				ARRAY_SIZE(mem_cgroup_files));
4636 
4637 	if (!ret)
4638 		ret = register_memsw_files(cont, ss);
4639 	return ret;
4640 }
4641 
4642 #ifdef CONFIG_MMU
4643 /* Handlers for move charge at task migration. */
4644 #define PRECHARGE_COUNT_AT_ONCE	256
mem_cgroup_do_precharge(unsigned long count)4645 static int mem_cgroup_do_precharge(unsigned long count)
4646 {
4647 	int ret = 0;
4648 	int batch_count = PRECHARGE_COUNT_AT_ONCE;
4649 	struct mem_cgroup *mem = mc.to;
4650 
4651 	if (mem_cgroup_is_root(mem)) {
4652 		mc.precharge += count;
4653 		/* we don't need css_get for root */
4654 		return ret;
4655 	}
4656 	/* try to charge at once */
4657 	if (count > 1) {
4658 		struct res_counter *dummy;
4659 		/*
4660 		 * "mem" cannot be under rmdir() because we've already checked
4661 		 * by cgroup_lock_live_cgroup() that it is not removed and we
4662 		 * are still under the same cgroup_mutex. So we can postpone
4663 		 * css_get().
4664 		 */
4665 		if (res_counter_charge(&mem->res, PAGE_SIZE * count, &dummy))
4666 			goto one_by_one;
4667 		if (do_swap_account && res_counter_charge(&mem->memsw,
4668 						PAGE_SIZE * count, &dummy)) {
4669 			res_counter_uncharge(&mem->res, PAGE_SIZE * count);
4670 			goto one_by_one;
4671 		}
4672 		mc.precharge += count;
4673 		return ret;
4674 	}
4675 one_by_one:
4676 	/* fall back to one by one charge */
4677 	while (count--) {
4678 		if (signal_pending(current)) {
4679 			ret = -EINTR;
4680 			break;
4681 		}
4682 		if (!batch_count--) {
4683 			batch_count = PRECHARGE_COUNT_AT_ONCE;
4684 			cond_resched();
4685 		}
4686 		ret = __mem_cgroup_try_charge(NULL, GFP_KERNEL, 1, &mem, false);
4687 		if (ret || !mem)
4688 			/* mem_cgroup_clear_mc() will do uncharge later */
4689 			return -ENOMEM;
4690 		mc.precharge++;
4691 	}
4692 	return ret;
4693 }
4694 
4695 /**
4696  * is_target_pte_for_mc - check a pte whether it is valid for move charge
4697  * @vma: the vma the pte to be checked belongs
4698  * @addr: the address corresponding to the pte to be checked
4699  * @ptent: the pte to be checked
4700  * @target: the pointer the target page or swap ent will be stored(can be NULL)
4701  *
4702  * Returns
4703  *   0(MC_TARGET_NONE): if the pte is not a target for move charge.
4704  *   1(MC_TARGET_PAGE): if the page corresponding to this pte is a target for
4705  *     move charge. if @target is not NULL, the page is stored in target->page
4706  *     with extra refcnt got(Callers should handle it).
4707  *   2(MC_TARGET_SWAP): if the swap entry corresponding to this pte is a
4708  *     target for charge migration. if @target is not NULL, the entry is stored
4709  *     in target->ent.
4710  *
4711  * Called with pte lock held.
4712  */
4713 union mc_target {
4714 	struct page	*page;
4715 	swp_entry_t	ent;
4716 };
4717 
4718 enum mc_target_type {
4719 	MC_TARGET_NONE,	/* not used */
4720 	MC_TARGET_PAGE,
4721 	MC_TARGET_SWAP,
4722 };
4723 
mc_handle_present_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent)4724 static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
4725 						unsigned long addr, pte_t ptent)
4726 {
4727 	struct page *page = vm_normal_page(vma, addr, ptent);
4728 
4729 	if (!page || !page_mapped(page))
4730 		return NULL;
4731 	if (PageAnon(page)) {
4732 		/* we don't move shared anon */
4733 		if (!move_anon() || page_mapcount(page) > 2)
4734 			return NULL;
4735 	} else if (!move_file())
4736 		/* we ignore mapcount for file pages */
4737 		return NULL;
4738 	if (!get_page_unless_zero(page))
4739 		return NULL;
4740 
4741 	return page;
4742 }
4743 
mc_handle_swap_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)4744 static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
4745 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4746 {
4747 	int usage_count;
4748 	struct page *page = NULL;
4749 	swp_entry_t ent = pte_to_swp_entry(ptent);
4750 
4751 	if (!move_anon() || non_swap_entry(ent))
4752 		return NULL;
4753 	usage_count = mem_cgroup_count_swap_user(ent, &page);
4754 	if (usage_count > 1) { /* we don't move shared anon */
4755 		if (page)
4756 			put_page(page);
4757 		return NULL;
4758 	}
4759 	if (do_swap_account)
4760 		entry->val = ent.val;
4761 
4762 	return page;
4763 }
4764 
mc_handle_file_pte(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,swp_entry_t * entry)4765 static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
4766 			unsigned long addr, pte_t ptent, swp_entry_t *entry)
4767 {
4768 	struct page *page = NULL;
4769 	struct inode *inode;
4770 	struct address_space *mapping;
4771 	pgoff_t pgoff;
4772 
4773 	if (!vma->vm_file) /* anonymous vma */
4774 		return NULL;
4775 	if (!move_file())
4776 		return NULL;
4777 
4778 	inode = vma->vm_file->f_path.dentry->d_inode;
4779 	mapping = vma->vm_file->f_mapping;
4780 	if (pte_none(ptent))
4781 		pgoff = linear_page_index(vma, addr);
4782 	else /* pte_file(ptent) is true */
4783 		pgoff = pte_to_pgoff(ptent);
4784 
4785 	/* page is moved even if it's not RSS of this task(page-faulted). */
4786 	if (!mapping_cap_swap_backed(mapping)) { /* normal file */
4787 		page = find_get_page(mapping, pgoff);
4788 	} else { /* shmem/tmpfs file. we should take account of swap too. */
4789 		swp_entry_t ent;
4790 		mem_cgroup_get_shmem_target(inode, pgoff, &page, &ent);
4791 		if (do_swap_account)
4792 			entry->val = ent.val;
4793 	}
4794 
4795 	return page;
4796 }
4797 
is_target_pte_for_mc(struct vm_area_struct * vma,unsigned long addr,pte_t ptent,union mc_target * target)4798 static int is_target_pte_for_mc(struct vm_area_struct *vma,
4799 		unsigned long addr, pte_t ptent, union mc_target *target)
4800 {
4801 	struct page *page = NULL;
4802 	struct page_cgroup *pc;
4803 	int ret = 0;
4804 	swp_entry_t ent = { .val = 0 };
4805 
4806 	if (pte_present(ptent))
4807 		page = mc_handle_present_pte(vma, addr, ptent);
4808 	else if (is_swap_pte(ptent))
4809 		page = mc_handle_swap_pte(vma, addr, ptent, &ent);
4810 	else if (pte_none(ptent) || pte_file(ptent))
4811 		page = mc_handle_file_pte(vma, addr, ptent, &ent);
4812 
4813 	if (!page && !ent.val)
4814 		return 0;
4815 	if (page) {
4816 		pc = lookup_page_cgroup(page);
4817 		/*
4818 		 * Do only loose check w/o page_cgroup lock.
4819 		 * mem_cgroup_move_account() checks the pc is valid or not under
4820 		 * the lock.
4821 		 */
4822 		if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) {
4823 			ret = MC_TARGET_PAGE;
4824 			if (target)
4825 				target->page = page;
4826 		}
4827 		if (!ret || !target)
4828 			put_page(page);
4829 	}
4830 	/* There is a swap entry and a page doesn't exist or isn't charged */
4831 	if (ent.val && !ret &&
4832 			css_id(&mc.from->css) == lookup_swap_cgroup(ent)) {
4833 		ret = MC_TARGET_SWAP;
4834 		if (target)
4835 			target->ent = ent;
4836 	}
4837 	return ret;
4838 }
4839 
mem_cgroup_count_precharge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)4840 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
4841 					unsigned long addr, unsigned long end,
4842 					struct mm_walk *walk)
4843 {
4844 	struct vm_area_struct *vma = walk->private;
4845 	pte_t *pte;
4846 	spinlock_t *ptl;
4847 
4848 	split_huge_page_pmd(walk->mm, pmd);
4849 
4850 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
4851 	for (; addr != end; pte++, addr += PAGE_SIZE)
4852 		if (is_target_pte_for_mc(vma, addr, *pte, NULL))
4853 			mc.precharge++;	/* increment precharge temporarily */
4854 	pte_unmap_unlock(pte - 1, ptl);
4855 	cond_resched();
4856 
4857 	return 0;
4858 }
4859 
mem_cgroup_count_precharge(struct mm_struct * mm)4860 static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4861 {
4862 	unsigned long precharge;
4863 	struct vm_area_struct *vma;
4864 
4865 	down_read(&mm->mmap_sem);
4866 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
4867 		struct mm_walk mem_cgroup_count_precharge_walk = {
4868 			.pmd_entry = mem_cgroup_count_precharge_pte_range,
4869 			.mm = mm,
4870 			.private = vma,
4871 		};
4872 		if (is_vm_hugetlb_page(vma))
4873 			continue;
4874 		walk_page_range(vma->vm_start, vma->vm_end,
4875 					&mem_cgroup_count_precharge_walk);
4876 	}
4877 	up_read(&mm->mmap_sem);
4878 
4879 	precharge = mc.precharge;
4880 	mc.precharge = 0;
4881 
4882 	return precharge;
4883 }
4884 
mem_cgroup_precharge_mc(struct mm_struct * mm)4885 static int mem_cgroup_precharge_mc(struct mm_struct *mm)
4886 {
4887 	unsigned long precharge = mem_cgroup_count_precharge(mm);
4888 
4889 	VM_BUG_ON(mc.moving_task);
4890 	mc.moving_task = current;
4891 	return mem_cgroup_do_precharge(precharge);
4892 }
4893 
4894 /* cancels all extra charges on mc.from and mc.to, and wakes up all waiters. */
__mem_cgroup_clear_mc(void)4895 static void __mem_cgroup_clear_mc(void)
4896 {
4897 	struct mem_cgroup *from = mc.from;
4898 	struct mem_cgroup *to = mc.to;
4899 
4900 	/* we must uncharge all the leftover precharges from mc.to */
4901 	if (mc.precharge) {
4902 		__mem_cgroup_cancel_charge(mc.to, mc.precharge);
4903 		mc.precharge = 0;
4904 	}
4905 	/*
4906 	 * we didn't uncharge from mc.from at mem_cgroup_move_account(), so
4907 	 * we must uncharge here.
4908 	 */
4909 	if (mc.moved_charge) {
4910 		__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
4911 		mc.moved_charge = 0;
4912 	}
4913 	/* we must fixup refcnts and charges */
4914 	if (mc.moved_swap) {
4915 		/* uncharge swap account from the old cgroup */
4916 		if (!mem_cgroup_is_root(mc.from))
4917 			res_counter_uncharge(&mc.from->memsw,
4918 						PAGE_SIZE * mc.moved_swap);
4919 		__mem_cgroup_put(mc.from, mc.moved_swap);
4920 
4921 		if (!mem_cgroup_is_root(mc.to)) {
4922 			/*
4923 			 * we charged both to->res and to->memsw, so we should
4924 			 * uncharge to->res.
4925 			 */
4926 			res_counter_uncharge(&mc.to->res,
4927 						PAGE_SIZE * mc.moved_swap);
4928 		}
4929 		/* we've already done mem_cgroup_get(mc.to) */
4930 		mc.moved_swap = 0;
4931 	}
4932 	memcg_oom_recover(from);
4933 	memcg_oom_recover(to);
4934 	wake_up_all(&mc.waitq);
4935 }
4936 
mem_cgroup_clear_mc(void)4937 static void mem_cgroup_clear_mc(void)
4938 {
4939 	struct mem_cgroup *from = mc.from;
4940 
4941 	/*
4942 	 * we must clear moving_task before waking up waiters at the end of
4943 	 * task migration.
4944 	 */
4945 	mc.moving_task = NULL;
4946 	__mem_cgroup_clear_mc();
4947 	spin_lock(&mc.lock);
4948 	mc.from = NULL;
4949 	mc.to = NULL;
4950 	spin_unlock(&mc.lock);
4951 	mem_cgroup_end_move(from);
4952 }
4953 
mem_cgroup_can_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct task_struct * p,bool threadgroup)4954 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4955 				struct cgroup *cgroup,
4956 				struct task_struct *p,
4957 				bool threadgroup)
4958 {
4959 	int ret = 0;
4960 	struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup);
4961 
4962 	if (mem->move_charge_at_immigrate) {
4963 		struct mm_struct *mm;
4964 		struct mem_cgroup *from = mem_cgroup_from_task(p);
4965 
4966 		VM_BUG_ON(from == mem);
4967 
4968 		mm = get_task_mm(p);
4969 		if (!mm)
4970 			return 0;
4971 		/* We move charges only when we move a owner of the mm */
4972 		if (mm->owner == p) {
4973 			VM_BUG_ON(mc.from);
4974 			VM_BUG_ON(mc.to);
4975 			VM_BUG_ON(mc.precharge);
4976 			VM_BUG_ON(mc.moved_charge);
4977 			VM_BUG_ON(mc.moved_swap);
4978 			mem_cgroup_start_move(from);
4979 			spin_lock(&mc.lock);
4980 			mc.from = from;
4981 			mc.to = mem;
4982 			spin_unlock(&mc.lock);
4983 			/* We set mc.moving_task later */
4984 
4985 			ret = mem_cgroup_precharge_mc(mm);
4986 			if (ret)
4987 				mem_cgroup_clear_mc();
4988 		}
4989 		mmput(mm);
4990 	}
4991 	return ret;
4992 }
4993 
mem_cgroup_cancel_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct task_struct * p,bool threadgroup)4994 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
4995 				struct cgroup *cgroup,
4996 				struct task_struct *p,
4997 				bool threadgroup)
4998 {
4999 	mem_cgroup_clear_mc();
5000 }
5001 
mem_cgroup_move_charge_pte_range(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)5002 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5003 				unsigned long addr, unsigned long end,
5004 				struct mm_walk *walk)
5005 {
5006 	int ret = 0;
5007 	struct vm_area_struct *vma = walk->private;
5008 	pte_t *pte;
5009 	spinlock_t *ptl;
5010 
5011 	split_huge_page_pmd(walk->mm, pmd);
5012 retry:
5013 	pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
5014 	for (; addr != end; addr += PAGE_SIZE) {
5015 		pte_t ptent = *(pte++);
5016 		union mc_target target;
5017 		int type;
5018 		struct page *page;
5019 		struct page_cgroup *pc;
5020 		swp_entry_t ent;
5021 
5022 		if (!mc.precharge)
5023 			break;
5024 
5025 		type = is_target_pte_for_mc(vma, addr, ptent, &target);
5026 		switch (type) {
5027 		case MC_TARGET_PAGE:
5028 			page = target.page;
5029 			if (isolate_lru_page(page))
5030 				goto put;
5031 			pc = lookup_page_cgroup(page);
5032 			if (!mem_cgroup_move_account(page, 1, pc,
5033 						     mc.from, mc.to, false)) {
5034 				mc.precharge--;
5035 				/* we uncharge from mc.from later. */
5036 				mc.moved_charge++;
5037 			}
5038 			putback_lru_page(page);
5039 put:			/* is_target_pte_for_mc() gets the page */
5040 			put_page(page);
5041 			break;
5042 		case MC_TARGET_SWAP:
5043 			ent = target.ent;
5044 			if (!mem_cgroup_move_swap_account(ent,
5045 						mc.from, mc.to, false)) {
5046 				mc.precharge--;
5047 				/* we fixup refcnts and charges later. */
5048 				mc.moved_swap++;
5049 			}
5050 			break;
5051 		default:
5052 			break;
5053 		}
5054 	}
5055 	pte_unmap_unlock(pte - 1, ptl);
5056 	cond_resched();
5057 
5058 	if (addr != end) {
5059 		/*
5060 		 * We have consumed all precharges we got in can_attach().
5061 		 * We try charge one by one, but don't do any additional
5062 		 * charges to mc.to if we have failed in charge once in attach()
5063 		 * phase.
5064 		 */
5065 		ret = mem_cgroup_do_precharge(1);
5066 		if (!ret)
5067 			goto retry;
5068 	}
5069 
5070 	return ret;
5071 }
5072 
mem_cgroup_move_charge(struct mm_struct * mm)5073 static void mem_cgroup_move_charge(struct mm_struct *mm)
5074 {
5075 	struct vm_area_struct *vma;
5076 
5077 	lru_add_drain_all();
5078 retry:
5079 	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5080 		/*
5081 		 * Someone who are holding the mmap_sem might be waiting in
5082 		 * waitq. So we cancel all extra charges, wake up all waiters,
5083 		 * and retry. Because we cancel precharges, we might not be able
5084 		 * to move enough charges, but moving charge is a best-effort
5085 		 * feature anyway, so it wouldn't be a big problem.
5086 		 */
5087 		__mem_cgroup_clear_mc();
5088 		cond_resched();
5089 		goto retry;
5090 	}
5091 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
5092 		int ret;
5093 		struct mm_walk mem_cgroup_move_charge_walk = {
5094 			.pmd_entry = mem_cgroup_move_charge_pte_range,
5095 			.mm = mm,
5096 			.private = vma,
5097 		};
5098 		if (is_vm_hugetlb_page(vma))
5099 			continue;
5100 		ret = walk_page_range(vma->vm_start, vma->vm_end,
5101 						&mem_cgroup_move_charge_walk);
5102 		if (ret)
5103 			/*
5104 			 * means we have consumed all precharges and failed in
5105 			 * doing additional charge. Just abandon here.
5106 			 */
5107 			break;
5108 	}
5109 	up_read(&mm->mmap_sem);
5110 }
5111 
mem_cgroup_move_task(struct cgroup_subsys * ss,struct cgroup * cont,struct cgroup * old_cont,struct task_struct * p,bool threadgroup)5112 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5113 				struct cgroup *cont,
5114 				struct cgroup *old_cont,
5115 				struct task_struct *p,
5116 				bool threadgroup)
5117 {
5118 	struct mm_struct *mm;
5119 
5120 	if (!mc.to)
5121 		/* no need to move charge */
5122 		return;
5123 
5124 	mm = get_task_mm(p);
5125 	if (mm) {
5126 		mem_cgroup_move_charge(mm);
5127 		mmput(mm);
5128 	}
5129 	mem_cgroup_clear_mc();
5130 }
5131 #else	/* !CONFIG_MMU */
mem_cgroup_can_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct task_struct * p,bool threadgroup)5132 static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
5133 				struct cgroup *cgroup,
5134 				struct task_struct *p,
5135 				bool threadgroup)
5136 {
5137 	return 0;
5138 }
mem_cgroup_cancel_attach(struct cgroup_subsys * ss,struct cgroup * cgroup,struct task_struct * p,bool threadgroup)5139 static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
5140 				struct cgroup *cgroup,
5141 				struct task_struct *p,
5142 				bool threadgroup)
5143 {
5144 }
mem_cgroup_move_task(struct cgroup_subsys * ss,struct cgroup * cont,struct cgroup * old_cont,struct task_struct * p,bool threadgroup)5145 static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5146 				struct cgroup *cont,
5147 				struct cgroup *old_cont,
5148 				struct task_struct *p,
5149 				bool threadgroup)
5150 {
5151 }
5152 #endif
5153 
5154 struct cgroup_subsys mem_cgroup_subsys = {
5155 	.name = "memory",
5156 	.subsys_id = mem_cgroup_subsys_id,
5157 	.create = mem_cgroup_create,
5158 	.pre_destroy = mem_cgroup_pre_destroy,
5159 	.destroy = mem_cgroup_destroy,
5160 	.populate = mem_cgroup_populate,
5161 	.can_attach = mem_cgroup_can_attach,
5162 	.cancel_attach = mem_cgroup_cancel_attach,
5163 	.attach = mem_cgroup_move_task,
5164 	.early_init = 0,
5165 	.use_id = 1,
5166 };
5167 
5168 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
enable_swap_account(char * s)5169 static int __init enable_swap_account(char *s)
5170 {
5171 	/* consider enabled if no parameter or 1 is given */
5172 	if (!(*s) || !strcmp(s, "=1"))
5173 		really_do_swap_account = 1;
5174 	else if (!strcmp(s, "=0"))
5175 		really_do_swap_account = 0;
5176 	return 1;
5177 }
5178 __setup("swapaccount", enable_swap_account);
5179 
disable_swap_account(char * s)5180 static int __init disable_swap_account(char *s)
5181 {
5182 	printk_once("noswapaccount is deprecated and will be removed in 2.6.40. Use swapaccount=0 instead\n");
5183 	enable_swap_account("=0");
5184 	return 1;
5185 }
5186 __setup("noswapaccount", disable_swap_account);
5187 #endif
5188