1 #ifndef _LINUX_VMSTAT_H
2 #define _LINUX_VMSTAT_H
3 
4 #include <linux/types.h>
5 #include <linux/percpu.h>
6 #include <linux/mm.h>
7 #include <linux/mmzone.h>
8 #include <asm/atomic.h>
9 
10 #ifdef CONFIG_ZONE_DMA
11 #define DMA_ZONE(xx) xx##_DMA,
12 #else
13 #define DMA_ZONE(xx)
14 #endif
15 
16 #ifdef CONFIG_ZONE_DMA32
17 #define DMA32_ZONE(xx) xx##_DMA32,
18 #else
19 #define DMA32_ZONE(xx)
20 #endif
21 
22 #ifdef CONFIG_HIGHMEM
23 #define HIGHMEM_ZONE(xx) , xx##_HIGH
24 #else
25 #define HIGHMEM_ZONE(xx)
26 #endif
27 
28 
29 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
30 
31 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
32 		FOR_ALL_ZONES(PGALLOC),
33 		PGFREE, PGACTIVATE, PGDEACTIVATE,
34 		PGFAULT, PGMAJFAULT,
35 		FOR_ALL_ZONES(PGREFILL),
36 		FOR_ALL_ZONES(PGSTEAL),
37 		FOR_ALL_ZONES(PGSCAN_KSWAPD),
38 		FOR_ALL_ZONES(PGSCAN_DIRECT),
39 #ifdef CONFIG_NUMA
40 		PGSCAN_ZONE_RECLAIM_FAILED,
41 #endif
42 		PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
43 		KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
44 		KSWAPD_SKIP_CONGESTION_WAIT,
45 		PAGEOUTRUN, ALLOCSTALL, PGROTATED,
46 #ifdef CONFIG_COMPACTION
47 		COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED,
48 		COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
49 #endif
50 #ifdef CONFIG_HUGETLB_PAGE
51 		HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
52 #endif
53 		UNEVICTABLE_PGCULLED,	/* culled to noreclaim list */
54 		UNEVICTABLE_PGSCANNED,	/* scanned for reclaimability */
55 		UNEVICTABLE_PGRESCUED,	/* rescued from noreclaim list */
56 		UNEVICTABLE_PGMLOCKED,
57 		UNEVICTABLE_PGMUNLOCKED,
58 		UNEVICTABLE_PGCLEARED,	/* on COW, page truncate */
59 		UNEVICTABLE_PGSTRANDED,	/* unable to isolate on unlock */
60 		UNEVICTABLE_MLOCKFREED,
61 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
62 		THP_FAULT_ALLOC,
63 		THP_FAULT_FALLBACK,
64 		THP_COLLAPSE_ALLOC,
65 		THP_COLLAPSE_ALLOC_FAILED,
66 		THP_SPLIT,
67 #endif
68 		NR_VM_EVENT_ITEMS
69 };
70 
71 extern int sysctl_stat_interval;
72 
73 #ifdef CONFIG_VM_EVENT_COUNTERS
74 /*
75  * Light weight per cpu counter implementation.
76  *
77  * Counters should only be incremented and no critical kernel component
78  * should rely on the counter values.
79  *
80  * Counters are handled completely inline. On many platforms the code
81  * generated will simply be the increment of a global address.
82  */
83 
84 struct vm_event_state {
85 	unsigned long event[NR_VM_EVENT_ITEMS];
86 };
87 
88 DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
89 
__count_vm_event(enum vm_event_item item)90 static inline void __count_vm_event(enum vm_event_item item)
91 {
92 	__this_cpu_inc(vm_event_states.event[item]);
93 }
94 
count_vm_event(enum vm_event_item item)95 static inline void count_vm_event(enum vm_event_item item)
96 {
97 	this_cpu_inc(vm_event_states.event[item]);
98 }
99 
__count_vm_events(enum vm_event_item item,long delta)100 static inline void __count_vm_events(enum vm_event_item item, long delta)
101 {
102 	__this_cpu_add(vm_event_states.event[item], delta);
103 }
104 
count_vm_events(enum vm_event_item item,long delta)105 static inline void count_vm_events(enum vm_event_item item, long delta)
106 {
107 	this_cpu_add(vm_event_states.event[item], delta);
108 }
109 
110 extern void all_vm_events(unsigned long *);
111 #ifdef CONFIG_HOTPLUG
112 extern void vm_events_fold_cpu(int cpu);
113 #else
vm_events_fold_cpu(int cpu)114 static inline void vm_events_fold_cpu(int cpu)
115 {
116 }
117 #endif
118 
119 #else
120 
121 /* Disable counters */
count_vm_event(enum vm_event_item item)122 static inline void count_vm_event(enum vm_event_item item)
123 {
124 }
count_vm_events(enum vm_event_item item,long delta)125 static inline void count_vm_events(enum vm_event_item item, long delta)
126 {
127 }
__count_vm_event(enum vm_event_item item)128 static inline void __count_vm_event(enum vm_event_item item)
129 {
130 }
__count_vm_events(enum vm_event_item item,long delta)131 static inline void __count_vm_events(enum vm_event_item item, long delta)
132 {
133 }
all_vm_events(unsigned long * ret)134 static inline void all_vm_events(unsigned long *ret)
135 {
136 }
vm_events_fold_cpu(int cpu)137 static inline void vm_events_fold_cpu(int cpu)
138 {
139 }
140 
141 #endif /* CONFIG_VM_EVENT_COUNTERS */
142 
143 #define __count_zone_vm_events(item, zone, delta) \
144 		__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
145 		zone_idx(zone), delta)
146 
147 /*
148  * Zone based page accounting with per cpu differentials.
149  */
150 extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
151 
zone_page_state_add(long x,struct zone * zone,enum zone_stat_item item)152 static inline void zone_page_state_add(long x, struct zone *zone,
153 				 enum zone_stat_item item)
154 {
155 	atomic_long_add(x, &zone->vm_stat[item]);
156 	atomic_long_add(x, &vm_stat[item]);
157 }
158 
global_page_state(enum zone_stat_item item)159 static inline unsigned long global_page_state(enum zone_stat_item item)
160 {
161 	long x = atomic_long_read(&vm_stat[item]);
162 #ifdef CONFIG_SMP
163 	if (x < 0)
164 		x = 0;
165 #endif
166 	return x;
167 }
168 
zone_page_state(struct zone * zone,enum zone_stat_item item)169 static inline unsigned long zone_page_state(struct zone *zone,
170 					enum zone_stat_item item)
171 {
172 	long x = atomic_long_read(&zone->vm_stat[item]);
173 #ifdef CONFIG_SMP
174 	if (x < 0)
175 		x = 0;
176 #endif
177 	return x;
178 }
179 
180 /*
181  * More accurate version that also considers the currently pending
182  * deltas. For that we need to loop over all cpus to find the current
183  * deltas. There is no synchronization so the result cannot be
184  * exactly accurate either.
185  */
zone_page_state_snapshot(struct zone * zone,enum zone_stat_item item)186 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
187 					enum zone_stat_item item)
188 {
189 	long x = atomic_long_read(&zone->vm_stat[item]);
190 
191 #ifdef CONFIG_SMP
192 	int cpu;
193 	for_each_online_cpu(cpu)
194 		x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item];
195 
196 	if (x < 0)
197 		x = 0;
198 #endif
199 	return x;
200 }
201 
202 extern unsigned long global_reclaimable_pages(void);
203 extern unsigned long zone_reclaimable_pages(struct zone *zone);
204 
205 #ifdef CONFIG_NUMA
206 /*
207  * Determine the per node value of a stat item. This function
208  * is called frequently in a NUMA machine, so try to be as
209  * frugal as possible.
210  */
node_page_state(int node,enum zone_stat_item item)211 static inline unsigned long node_page_state(int node,
212 				 enum zone_stat_item item)
213 {
214 	struct zone *zones = NODE_DATA(node)->node_zones;
215 
216 	return
217 #ifdef CONFIG_ZONE_DMA
218 		zone_page_state(&zones[ZONE_DMA], item) +
219 #endif
220 #ifdef CONFIG_ZONE_DMA32
221 		zone_page_state(&zones[ZONE_DMA32], item) +
222 #endif
223 #ifdef CONFIG_HIGHMEM
224 		zone_page_state(&zones[ZONE_HIGHMEM], item) +
225 #endif
226 		zone_page_state(&zones[ZONE_NORMAL], item) +
227 		zone_page_state(&zones[ZONE_MOVABLE], item);
228 }
229 
230 extern void zone_statistics(struct zone *, struct zone *, gfp_t gfp);
231 
232 #else
233 
234 #define node_page_state(node, item) global_page_state(item)
235 #define zone_statistics(_zl, _z, gfp) do { } while (0)
236 
237 #endif /* CONFIG_NUMA */
238 
239 #define add_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, __d)
240 #define sub_zone_page_state(__z, __i, __d) mod_zone_page_state(__z, __i, -(__d))
241 
zap_zone_vm_stats(struct zone * zone)242 static inline void zap_zone_vm_stats(struct zone *zone)
243 {
244 	memset(zone->vm_stat, 0, sizeof(zone->vm_stat));
245 }
246 
247 extern void inc_zone_state(struct zone *, enum zone_stat_item);
248 
249 #ifdef CONFIG_SMP
250 void __mod_zone_page_state(struct zone *, enum zone_stat_item item, int);
251 void __inc_zone_page_state(struct page *, enum zone_stat_item);
252 void __dec_zone_page_state(struct page *, enum zone_stat_item);
253 
254 void mod_zone_page_state(struct zone *, enum zone_stat_item, int);
255 void inc_zone_page_state(struct page *, enum zone_stat_item);
256 void dec_zone_page_state(struct page *, enum zone_stat_item);
257 
258 extern void inc_zone_state(struct zone *, enum zone_stat_item);
259 extern void __inc_zone_state(struct zone *, enum zone_stat_item);
260 extern void dec_zone_state(struct zone *, enum zone_stat_item);
261 extern void __dec_zone_state(struct zone *, enum zone_stat_item);
262 
263 void refresh_cpu_vm_stats(int);
264 
265 int calculate_pressure_threshold(struct zone *zone);
266 int calculate_normal_threshold(struct zone *zone);
267 void set_pgdat_percpu_threshold(pg_data_t *pgdat,
268 				int (*calculate_pressure)(struct zone *));
269 #else /* CONFIG_SMP */
270 
271 /*
272  * We do not maintain differentials in a single processor configuration.
273  * The functions directly modify the zone and global counters.
274  */
__mod_zone_page_state(struct zone * zone,enum zone_stat_item item,int delta)275 static inline void __mod_zone_page_state(struct zone *zone,
276 			enum zone_stat_item item, int delta)
277 {
278 	zone_page_state_add(delta, zone, item);
279 }
280 
__inc_zone_state(struct zone * zone,enum zone_stat_item item)281 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
282 {
283 	atomic_long_inc(&zone->vm_stat[item]);
284 	atomic_long_inc(&vm_stat[item]);
285 }
286 
__inc_zone_page_state(struct page * page,enum zone_stat_item item)287 static inline void __inc_zone_page_state(struct page *page,
288 			enum zone_stat_item item)
289 {
290 	__inc_zone_state(page_zone(page), item);
291 }
292 
__dec_zone_state(struct zone * zone,enum zone_stat_item item)293 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
294 {
295 	atomic_long_dec(&zone->vm_stat[item]);
296 	atomic_long_dec(&vm_stat[item]);
297 }
298 
__dec_zone_page_state(struct page * page,enum zone_stat_item item)299 static inline void __dec_zone_page_state(struct page *page,
300 			enum zone_stat_item item)
301 {
302 	__dec_zone_state(page_zone(page), item);
303 }
304 
305 /*
306  * We only use atomic operations to update counters. So there is no need to
307  * disable interrupts.
308  */
309 #define inc_zone_page_state __inc_zone_page_state
310 #define dec_zone_page_state __dec_zone_page_state
311 #define mod_zone_page_state __mod_zone_page_state
312 
313 #define set_pgdat_percpu_threshold(pgdat, callback) { }
314 
refresh_cpu_vm_stats(int cpu)315 static inline void refresh_cpu_vm_stats(int cpu) { }
316 #endif
317 
318 #endif /* _LINUX_VMSTAT_H */
319