1 #ifndef _LINUX_COMPACTION_H
2 #define _LINUX_COMPACTION_H
3 
4 /* Return values for compact_zone() and try_to_compact_pages() */
5 /* compaction didn't start as it was not possible or direct reclaim was more suitable */
6 #define COMPACT_SKIPPED		0
7 /* compaction should continue to another pageblock */
8 #define COMPACT_CONTINUE	1
9 /* direct compaction partially compacted a zone and there are suitable pages */
10 #define COMPACT_PARTIAL		2
11 /* The full zone was compacted */
12 #define COMPACT_COMPLETE	3
13 
14 #ifdef CONFIG_COMPACTION
15 extern int sysctl_compact_memory;
16 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
17 			void __user *buffer, size_t *length, loff_t *ppos);
18 extern int sysctl_extfrag_threshold;
19 extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
20 			void __user *buffer, size_t *length, loff_t *ppos);
21 
22 extern int fragmentation_index(struct zone *zone, unsigned int order);
23 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 			int order, gfp_t gfp_mask, nodemask_t *mask,
25 			bool sync);
26 extern unsigned long compaction_suitable(struct zone *zone, int order);
27 extern unsigned long compact_zone_order(struct zone *zone, int order,
28 					gfp_t gfp_mask, bool sync);
29 
30 /* Do not skip compaction more than 64 times */
31 #define COMPACT_MAX_DEFER_SHIFT 6
32 
33 /*
34  * Compaction is deferred when compaction fails to result in a page
35  * allocation success. 1 << compact_defer_limit compactions are skipped up
36  * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
37  */
defer_compaction(struct zone * zone)38 static inline void defer_compaction(struct zone *zone)
39 {
40 	zone->compact_considered = 0;
41 	zone->compact_defer_shift++;
42 
43 	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
44 		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
45 }
46 
47 /* Returns true if compaction should be skipped this time */
compaction_deferred(struct zone * zone)48 static inline bool compaction_deferred(struct zone *zone)
49 {
50 	unsigned long defer_limit = 1UL << zone->compact_defer_shift;
51 
52 	/* Avoid possible overflow */
53 	if (++zone->compact_considered > defer_limit)
54 		zone->compact_considered = defer_limit;
55 
56 	return zone->compact_considered < (1UL << zone->compact_defer_shift);
57 }
58 
59 #else
try_to_compact_pages(struct zonelist * zonelist,int order,gfp_t gfp_mask,nodemask_t * nodemask,bool sync)60 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
61 			int order, gfp_t gfp_mask, nodemask_t *nodemask,
62 			bool sync)
63 {
64 	return COMPACT_CONTINUE;
65 }
66 
compaction_suitable(struct zone * zone,int order)67 static inline unsigned long compaction_suitable(struct zone *zone, int order)
68 {
69 	return COMPACT_SKIPPED;
70 }
71 
compact_zone_order(struct zone * zone,int order,gfp_t gfp_mask,bool sync)72 static inline unsigned long compact_zone_order(struct zone *zone, int order,
73 					       gfp_t gfp_mask, bool sync)
74 {
75 	return COMPACT_CONTINUE;
76 }
77 
defer_compaction(struct zone * zone)78 static inline void defer_compaction(struct zone *zone)
79 {
80 }
81 
compaction_deferred(struct zone * zone)82 static inline bool compaction_deferred(struct zone *zone)
83 {
84 	return 1;
85 }
86 
87 #endif /* CONFIG_COMPACTION */
88 
89 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
90 extern int compaction_register_node(struct node *node);
91 extern void compaction_unregister_node(struct node *node);
92 
93 #else
94 
compaction_register_node(struct node * node)95 static inline int compaction_register_node(struct node *node)
96 {
97 	return 0;
98 }
99 
compaction_unregister_node(struct node * node)100 static inline void compaction_unregister_node(struct node *node)
101 {
102 }
103 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
104 
105 #endif /* _LINUX_COMPACTION_H */
106