1 /* memcontrol.h - Memory Controller
2  *
3  * Copyright IBM Corporation, 2007
4  * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5  *
6  * Copyright 2007 OpenVZ SWsoft Inc
7  * Author: Pavel Emelianov <xemul@openvz.org>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #ifndef _LINUX_MEMCONTROL_H
21 #define _LINUX_MEMCONTROL_H
22 #include <linux/cgroup.h>
23 #include <linux/vm_event_item.h>
24 
25 struct mem_cgroup;
26 struct page_cgroup;
27 struct page;
28 struct mm_struct;
29 
30 /* Stats that can be updated by kernel. */
31 enum mem_cgroup_page_stat_item {
32 	MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33 };
34 
35 struct mem_cgroup_reclaim_cookie {
36 	struct zone *zone;
37 	int priority;
38 	unsigned int generation;
39 };
40 
41 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
42 /*
43  * All "charge" functions with gfp_mask should use GFP_KERNEL or
44  * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45  * alloc memory but reclaims memory from all available zones. So, "where I want
46  * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47  * available but adding a rule is better. charge functions' gfp_mask should
48  * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49  * codes.
50  * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51  */
52 
53 extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
54 				gfp_t gfp_mask);
55 /* for swap handling */
56 extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
57 		struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
58 extern void mem_cgroup_commit_charge_swapin(struct page *page,
59 					struct mem_cgroup *memcg);
60 extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
61 
62 extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 					gfp_t gfp_mask);
64 
65 struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66 struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *,
67 				       enum lru_list);
68 void mem_cgroup_lru_del_list(struct page *, enum lru_list);
69 void mem_cgroup_lru_del(struct page *);
70 struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 					 enum lru_list, enum lru_list);
72 
73 /* For coalescing uncharge for reducing memcg' overhead*/
74 extern void mem_cgroup_uncharge_start(void);
75 extern void mem_cgroup_uncharge_end(void);
76 
77 extern void mem_cgroup_uncharge_page(struct page *page);
78 extern void mem_cgroup_uncharge_cache_page(struct page *page);
79 
80 extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
81 				     int order);
82 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
83 
84 extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
85 extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
86 extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
87 
88 extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
89 extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
90 
91 static inline
mm_match_cgroup(const struct mm_struct * mm,const struct mem_cgroup * cgroup)92 int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
93 {
94 	struct mem_cgroup *memcg;
95 	rcu_read_lock();
96 	memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
97 	rcu_read_unlock();
98 	return cgroup == memcg;
99 }
100 
101 extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
102 
103 extern int
104 mem_cgroup_prepare_migration(struct page *page,
105 	struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
106 extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
107 	struct page *oldpage, struct page *newpage, bool migration_ok);
108 
109 struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
110 				   struct mem_cgroup *,
111 				   struct mem_cgroup_reclaim_cookie *);
112 void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
113 
114 /*
115  * For memory reclaim.
116  */
117 int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg,
118 				    struct zone *zone);
119 int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
120 				    struct zone *zone);
121 int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
122 unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg,
123 					int nid, int zid, unsigned int lrumask);
124 struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
125 						      struct zone *zone);
126 struct zone_reclaim_stat*
127 mem_cgroup_get_reclaim_stat_from_page(struct page *page);
128 extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
129 					struct task_struct *p);
130 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
131 					struct page *newpage);
132 
133 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
134 extern int do_swap_account;
135 #endif
136 
mem_cgroup_disabled(void)137 static inline bool mem_cgroup_disabled(void)
138 {
139 	if (mem_cgroup_subsys.disabled)
140 		return true;
141 	return false;
142 }
143 
144 void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
145 					 unsigned long *flags);
146 
147 extern atomic_t memcg_moving;
148 
mem_cgroup_begin_update_page_stat(struct page * page,bool * locked,unsigned long * flags)149 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
150 					bool *locked, unsigned long *flags)
151 {
152 	if (mem_cgroup_disabled())
153 		return;
154 	rcu_read_lock();
155 	*locked = false;
156 	if (atomic_read(&memcg_moving))
157 		__mem_cgroup_begin_update_page_stat(page, locked, flags);
158 }
159 
160 void __mem_cgroup_end_update_page_stat(struct page *page,
161 				unsigned long *flags);
mem_cgroup_end_update_page_stat(struct page * page,bool * locked,unsigned long * flags)162 static inline void mem_cgroup_end_update_page_stat(struct page *page,
163 					bool *locked, unsigned long *flags)
164 {
165 	if (mem_cgroup_disabled())
166 		return;
167 	if (*locked)
168 		__mem_cgroup_end_update_page_stat(page, flags);
169 	rcu_read_unlock();
170 }
171 
172 void mem_cgroup_update_page_stat(struct page *page,
173 				 enum mem_cgroup_page_stat_item idx,
174 				 int val);
175 
mem_cgroup_inc_page_stat(struct page * page,enum mem_cgroup_page_stat_item idx)176 static inline void mem_cgroup_inc_page_stat(struct page *page,
177 					    enum mem_cgroup_page_stat_item idx)
178 {
179 	mem_cgroup_update_page_stat(page, idx, 1);
180 }
181 
mem_cgroup_dec_page_stat(struct page * page,enum mem_cgroup_page_stat_item idx)182 static inline void mem_cgroup_dec_page_stat(struct page *page,
183 					    enum mem_cgroup_page_stat_item idx)
184 {
185 	mem_cgroup_update_page_stat(page, idx, -1);
186 }
187 
188 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
189 						gfp_t gfp_mask,
190 						unsigned long *total_scanned);
191 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
192 
193 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
194 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
195 void mem_cgroup_split_huge_fixup(struct page *head);
196 #endif
197 
198 #ifdef CONFIG_DEBUG_VM
199 bool mem_cgroup_bad_page_check(struct page *page);
200 void mem_cgroup_print_bad_page(struct page *page);
201 #endif
202 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
203 struct mem_cgroup;
204 
mem_cgroup_newpage_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)205 static inline int mem_cgroup_newpage_charge(struct page *page,
206 					struct mm_struct *mm, gfp_t gfp_mask)
207 {
208 	return 0;
209 }
210 
mem_cgroup_cache_charge(struct page * page,struct mm_struct * mm,gfp_t gfp_mask)211 static inline int mem_cgroup_cache_charge(struct page *page,
212 					struct mm_struct *mm, gfp_t gfp_mask)
213 {
214 	return 0;
215 }
216 
mem_cgroup_try_charge_swapin(struct mm_struct * mm,struct page * page,gfp_t gfp_mask,struct mem_cgroup ** memcgp)217 static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
218 		struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
219 {
220 	return 0;
221 }
222 
mem_cgroup_commit_charge_swapin(struct page * page,struct mem_cgroup * memcg)223 static inline void mem_cgroup_commit_charge_swapin(struct page *page,
224 					  struct mem_cgroup *memcg)
225 {
226 }
227 
mem_cgroup_cancel_charge_swapin(struct mem_cgroup * memcg)228 static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
229 {
230 }
231 
mem_cgroup_uncharge_start(void)232 static inline void mem_cgroup_uncharge_start(void)
233 {
234 }
235 
mem_cgroup_uncharge_end(void)236 static inline void mem_cgroup_uncharge_end(void)
237 {
238 }
239 
mem_cgroup_uncharge_page(struct page * page)240 static inline void mem_cgroup_uncharge_page(struct page *page)
241 {
242 }
243 
mem_cgroup_uncharge_cache_page(struct page * page)244 static inline void mem_cgroup_uncharge_cache_page(struct page *page)
245 {
246 }
247 
mem_cgroup_zone_lruvec(struct zone * zone,struct mem_cgroup * memcg)248 static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
249 						    struct mem_cgroup *memcg)
250 {
251 	return &zone->lruvec;
252 }
253 
mem_cgroup_lru_add_list(struct zone * zone,struct page * page,enum lru_list lru)254 static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone,
255 						     struct page *page,
256 						     enum lru_list lru)
257 {
258 	return &zone->lruvec;
259 }
260 
mem_cgroup_lru_del_list(struct page * page,enum lru_list lru)261 static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
262 {
263 }
264 
mem_cgroup_lru_del(struct page * page)265 static inline void mem_cgroup_lru_del(struct page *page)
266 {
267 }
268 
mem_cgroup_lru_move_lists(struct zone * zone,struct page * page,enum lru_list from,enum lru_list to)269 static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
270 						       struct page *page,
271 						       enum lru_list from,
272 						       enum lru_list to)
273 {
274 	return &zone->lruvec;
275 }
276 
try_get_mem_cgroup_from_page(struct page * page)277 static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
278 {
279 	return NULL;
280 }
281 
try_get_mem_cgroup_from_mm(struct mm_struct * mm)282 static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
283 {
284 	return NULL;
285 }
286 
mm_match_cgroup(struct mm_struct * mm,struct mem_cgroup * memcg)287 static inline int mm_match_cgroup(struct mm_struct *mm,
288 		struct mem_cgroup *memcg)
289 {
290 	return 1;
291 }
292 
task_in_mem_cgroup(struct task_struct * task,const struct mem_cgroup * memcg)293 static inline int task_in_mem_cgroup(struct task_struct *task,
294 				     const struct mem_cgroup *memcg)
295 {
296 	return 1;
297 }
298 
299 static inline struct cgroup_subsys_state
mem_cgroup_css(struct mem_cgroup * memcg)300 		*mem_cgroup_css(struct mem_cgroup *memcg)
301 {
302 	return NULL;
303 }
304 
305 static inline int
mem_cgroup_prepare_migration(struct page * page,struct page * newpage,struct mem_cgroup ** memcgp,gfp_t gfp_mask)306 mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
307 	struct mem_cgroup **memcgp, gfp_t gfp_mask)
308 {
309 	return 0;
310 }
311 
mem_cgroup_end_migration(struct mem_cgroup * memcg,struct page * oldpage,struct page * newpage,bool migration_ok)312 static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
313 		struct page *oldpage, struct page *newpage, bool migration_ok)
314 {
315 }
316 
317 static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup * root,struct mem_cgroup * prev,struct mem_cgroup_reclaim_cookie * reclaim)318 mem_cgroup_iter(struct mem_cgroup *root,
319 		struct mem_cgroup *prev,
320 		struct mem_cgroup_reclaim_cookie *reclaim)
321 {
322 	return NULL;
323 }
324 
mem_cgroup_iter_break(struct mem_cgroup * root,struct mem_cgroup * prev)325 static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
326 					 struct mem_cgroup *prev)
327 {
328 }
329 
mem_cgroup_disabled(void)330 static inline bool mem_cgroup_disabled(void)
331 {
332 	return true;
333 }
334 
335 static inline int
mem_cgroup_inactive_anon_is_low(struct mem_cgroup * memcg,struct zone * zone)336 mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
337 {
338 	return 1;
339 }
340 
341 static inline int
mem_cgroup_inactive_file_is_low(struct mem_cgroup * memcg,struct zone * zone)342 mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone)
343 {
344 	return 1;
345 }
346 
347 static inline unsigned long
mem_cgroup_zone_nr_lru_pages(struct mem_cgroup * memcg,int nid,int zid,unsigned int lru_mask)348 mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
349 				unsigned int lru_mask)
350 {
351 	return 0;
352 }
353 
354 
355 static inline struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat(struct mem_cgroup * memcg,struct zone * zone)356 mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
357 {
358 	return NULL;
359 }
360 
361 static inline struct zone_reclaim_stat*
mem_cgroup_get_reclaim_stat_from_page(struct page * page)362 mem_cgroup_get_reclaim_stat_from_page(struct page *page)
363 {
364 	return NULL;
365 }
366 
367 static inline void
mem_cgroup_print_oom_info(struct mem_cgroup * memcg,struct task_struct * p)368 mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
369 {
370 }
371 
mem_cgroup_begin_update_page_stat(struct page * page,bool * locked,unsigned long * flags)372 static inline void mem_cgroup_begin_update_page_stat(struct page *page,
373 					bool *locked, unsigned long *flags)
374 {
375 }
376 
mem_cgroup_end_update_page_stat(struct page * page,bool * locked,unsigned long * flags)377 static inline void mem_cgroup_end_update_page_stat(struct page *page,
378 					bool *locked, unsigned long *flags)
379 {
380 }
381 
mem_cgroup_inc_page_stat(struct page * page,enum mem_cgroup_page_stat_item idx)382 static inline void mem_cgroup_inc_page_stat(struct page *page,
383 					    enum mem_cgroup_page_stat_item idx)
384 {
385 }
386 
mem_cgroup_dec_page_stat(struct page * page,enum mem_cgroup_page_stat_item idx)387 static inline void mem_cgroup_dec_page_stat(struct page *page,
388 					    enum mem_cgroup_page_stat_item idx)
389 {
390 }
391 
392 static inline
mem_cgroup_soft_limit_reclaim(struct zone * zone,int order,gfp_t gfp_mask,unsigned long * total_scanned)393 unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
394 					    gfp_t gfp_mask,
395 					    unsigned long *total_scanned)
396 {
397 	return 0;
398 }
399 
400 static inline
mem_cgroup_get_limit(struct mem_cgroup * memcg)401 u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
402 {
403 	return 0;
404 }
405 
mem_cgroup_split_huge_fixup(struct page * head)406 static inline void mem_cgroup_split_huge_fixup(struct page *head)
407 {
408 }
409 
410 static inline
mem_cgroup_count_vm_event(struct mm_struct * mm,enum vm_event_item idx)411 void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
412 {
413 }
mem_cgroup_replace_page_cache(struct page * oldpage,struct page * newpage)414 static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
415 				struct page *newpage)
416 {
417 }
418 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
419 
420 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
421 static inline bool
mem_cgroup_bad_page_check(struct page * page)422 mem_cgroup_bad_page_check(struct page *page)
423 {
424 	return false;
425 }
426 
427 static inline void
mem_cgroup_print_bad_page(struct page * page)428 mem_cgroup_print_bad_page(struct page *page)
429 {
430 }
431 #endif
432 
433 enum {
434 	UNDER_LIMIT,
435 	SOFT_LIMIT,
436 	OVER_LIMIT,
437 };
438 
439 struct sock;
440 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
441 void sock_update_memcg(struct sock *sk);
442 void sock_release_memcg(struct sock *sk);
443 #else
sock_update_memcg(struct sock * sk)444 static inline void sock_update_memcg(struct sock *sk)
445 {
446 }
sock_release_memcg(struct sock * sk)447 static inline void sock_release_memcg(struct sock *sk)
448 {
449 }
450 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
451 #endif /* _LINUX_MEMCONTROL_H */
452 
453