1 #ifndef __LINUX_PAGE_CGROUP_H
2 #define __LINUX_PAGE_CGROUP_H
3 
4 enum {
5 	/* flags for mem_cgroup */
6 	PCG_LOCK,  /* Lock for pc->mem_cgroup and following bits. */
7 	PCG_CACHE, /* charged as cache */
8 	PCG_USED, /* this object is in use. */
9 	PCG_MIGRATION, /* under page migration */
10 	/* flags for mem_cgroup and file and I/O status */
11 	PCG_MOVE_LOCK, /* For race between move_account v.s. following bits */
12 	PCG_FILE_MAPPED, /* page is accounted as "mapped" */
13 	/* No lock in page_cgroup */
14 	PCG_ACCT_LRU, /* page has been accounted for (under lru_lock) */
15 	__NR_PCG_FLAGS,
16 };
17 
18 #ifndef __GENERATING_BOUNDS_H
19 #include <generated/bounds.h>
20 
21 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
22 #include <linux/bit_spinlock.h>
23 
24 /*
25  * Page Cgroup can be considered as an extended mem_map.
26  * A page_cgroup page is associated with every page descriptor. The
27  * page_cgroup helps us identify information about the cgroup
28  * All page cgroups are allocated at boot or memory hotplug event,
29  * then the page cgroup for pfn always exists.
30  */
31 struct page_cgroup {
32 	unsigned long flags;
33 	struct mem_cgroup *mem_cgroup;
34 	struct list_head lru;		/* per cgroup LRU list */
35 };
36 
37 void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
38 
39 #ifdef CONFIG_SPARSEMEM
page_cgroup_init_flatmem(void)40 static inline void __init page_cgroup_init_flatmem(void)
41 {
42 }
43 extern void __init page_cgroup_init(void);
44 #else
45 void __init page_cgroup_init_flatmem(void);
page_cgroup_init(void)46 static inline void __init page_cgroup_init(void)
47 {
48 }
49 #endif
50 
51 struct page_cgroup *lookup_page_cgroup(struct page *page);
52 struct page *lookup_cgroup_page(struct page_cgroup *pc);
53 
54 #define TESTPCGFLAG(uname, lname)			\
55 static inline int PageCgroup##uname(struct page_cgroup *pc)	\
56 	{ return test_bit(PCG_##lname, &pc->flags); }
57 
58 #define SETPCGFLAG(uname, lname)			\
59 static inline void SetPageCgroup##uname(struct page_cgroup *pc)\
60 	{ set_bit(PCG_##lname, &pc->flags);  }
61 
62 #define CLEARPCGFLAG(uname, lname)			\
63 static inline void ClearPageCgroup##uname(struct page_cgroup *pc)	\
64 	{ clear_bit(PCG_##lname, &pc->flags);  }
65 
66 #define TESTCLEARPCGFLAG(uname, lname)			\
67 static inline int TestClearPageCgroup##uname(struct page_cgroup *pc)	\
68 	{ return test_and_clear_bit(PCG_##lname, &pc->flags);  }
69 
70 /* Cache flag is set only once (at allocation) */
TESTPCGFLAG(Cache,CACHE)71 TESTPCGFLAG(Cache, CACHE)
72 CLEARPCGFLAG(Cache, CACHE)
73 SETPCGFLAG(Cache, CACHE)
74 
75 TESTPCGFLAG(Used, USED)
76 CLEARPCGFLAG(Used, USED)
77 SETPCGFLAG(Used, USED)
78 
79 SETPCGFLAG(AcctLRU, ACCT_LRU)
80 CLEARPCGFLAG(AcctLRU, ACCT_LRU)
81 TESTPCGFLAG(AcctLRU, ACCT_LRU)
82 TESTCLEARPCGFLAG(AcctLRU, ACCT_LRU)
83 
84 
85 SETPCGFLAG(FileMapped, FILE_MAPPED)
86 CLEARPCGFLAG(FileMapped, FILE_MAPPED)
87 TESTPCGFLAG(FileMapped, FILE_MAPPED)
88 
89 SETPCGFLAG(Migration, MIGRATION)
90 CLEARPCGFLAG(Migration, MIGRATION)
91 TESTPCGFLAG(Migration, MIGRATION)
92 
93 static inline void lock_page_cgroup(struct page_cgroup *pc)
94 {
95 	/*
96 	 * Don't take this lock in IRQ context.
97 	 * This lock is for pc->mem_cgroup, USED, CACHE, MIGRATION
98 	 */
99 	bit_spin_lock(PCG_LOCK, &pc->flags);
100 }
101 
unlock_page_cgroup(struct page_cgroup * pc)102 static inline void unlock_page_cgroup(struct page_cgroup *pc)
103 {
104 	bit_spin_unlock(PCG_LOCK, &pc->flags);
105 }
106 
move_lock_page_cgroup(struct page_cgroup * pc,unsigned long * flags)107 static inline void move_lock_page_cgroup(struct page_cgroup *pc,
108 	unsigned long *flags)
109 {
110 	/*
111 	 * We know updates to pc->flags of page cache's stats are from both of
112 	 * usual context or IRQ context. Disable IRQ to avoid deadlock.
113 	 */
114 	local_irq_save(*flags);
115 	bit_spin_lock(PCG_MOVE_LOCK, &pc->flags);
116 }
117 
move_unlock_page_cgroup(struct page_cgroup * pc,unsigned long * flags)118 static inline void move_unlock_page_cgroup(struct page_cgroup *pc,
119 	unsigned long *flags)
120 {
121 	bit_spin_unlock(PCG_MOVE_LOCK, &pc->flags);
122 	local_irq_restore(*flags);
123 }
124 
125 #ifdef CONFIG_SPARSEMEM
126 #define PCG_ARRAYID_WIDTH	SECTIONS_SHIFT
127 #else
128 #define PCG_ARRAYID_WIDTH	NODES_SHIFT
129 #endif
130 
131 #if (PCG_ARRAYID_WIDTH > BITS_PER_LONG - NR_PCG_FLAGS)
132 #error Not enough space left in pc->flags to store page_cgroup array IDs
133 #endif
134 
135 /* pc->flags: ARRAY-ID | FLAGS */
136 
137 #define PCG_ARRAYID_MASK	((1UL << PCG_ARRAYID_WIDTH) - 1)
138 
139 #define PCG_ARRAYID_OFFSET	(BITS_PER_LONG - PCG_ARRAYID_WIDTH)
140 /*
141  * Zero the shift count for non-existent fields, to prevent compiler
142  * warnings and ensure references are optimized away.
143  */
144 #define PCG_ARRAYID_SHIFT	(PCG_ARRAYID_OFFSET * (PCG_ARRAYID_WIDTH != 0))
145 
set_page_cgroup_array_id(struct page_cgroup * pc,unsigned long id)146 static inline void set_page_cgroup_array_id(struct page_cgroup *pc,
147 					    unsigned long id)
148 {
149 	pc->flags &= ~(PCG_ARRAYID_MASK << PCG_ARRAYID_SHIFT);
150 	pc->flags |= (id & PCG_ARRAYID_MASK) << PCG_ARRAYID_SHIFT;
151 }
152 
page_cgroup_array_id(struct page_cgroup * pc)153 static inline unsigned long page_cgroup_array_id(struct page_cgroup *pc)
154 {
155 	return (pc->flags >> PCG_ARRAYID_SHIFT) & PCG_ARRAYID_MASK;
156 }
157 
158 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
159 struct page_cgroup;
160 
pgdat_page_cgroup_init(struct pglist_data * pgdat)161 static inline void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
162 {
163 }
164 
lookup_page_cgroup(struct page * page)165 static inline struct page_cgroup *lookup_page_cgroup(struct page *page)
166 {
167 	return NULL;
168 }
169 
page_cgroup_init(void)170 static inline void page_cgroup_init(void)
171 {
172 }
173 
page_cgroup_init_flatmem(void)174 static inline void __init page_cgroup_init_flatmem(void)
175 {
176 }
177 
178 #endif /* CONFIG_CGROUP_MEM_RES_CTLR */
179 
180 #include <linux/swap.h>
181 
182 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
183 extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
184 					unsigned short old, unsigned short new);
185 extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id);
186 extern unsigned short lookup_swap_cgroup(swp_entry_t ent);
187 extern int swap_cgroup_swapon(int type, unsigned long max_pages);
188 extern void swap_cgroup_swapoff(int type);
189 #else
190 
191 static inline
swap_cgroup_record(swp_entry_t ent,unsigned short id)192 unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id)
193 {
194 	return 0;
195 }
196 
197 static inline
lookup_swap_cgroup(swp_entry_t ent)198 unsigned short lookup_swap_cgroup(swp_entry_t ent)
199 {
200 	return 0;
201 }
202 
203 static inline int
swap_cgroup_swapon(int type,unsigned long max_pages)204 swap_cgroup_swapon(int type, unsigned long max_pages)
205 {
206 	return 0;
207 }
208 
swap_cgroup_swapoff(int type)209 static inline void swap_cgroup_swapoff(int type)
210 {
211 	return;
212 }
213 
214 #endif /* CONFIG_CGROUP_MEM_RES_CTLR_SWAP */
215 
216 #endif /* !__GENERATING_BOUNDS_H */
217 
218 #endif /* __LINUX_PAGE_CGROUP_H */
219