1 #ifndef _LINUX_COMPACTION_H
2 #define _LINUX_COMPACTION_H
3
4 /* Return values for compact_zone() and try_to_compact_pages() */
5 /* compaction didn't start as it was not possible or direct reclaim was more suitable */
6 #define COMPACT_SKIPPED 0
7 /* compaction should continue to another pageblock */
8 #define COMPACT_CONTINUE 1
9 /* direct compaction partially compacted a zone and there are suitable pages */
10 #define COMPACT_PARTIAL 2
11 /* The full zone was compacted */
12 #define COMPACT_COMPLETE 3
13
14 #ifdef CONFIG_COMPACTION
15 extern int sysctl_compact_memory;
16 extern int sysctl_compaction_handler(struct ctl_table *table, int write,
17 void __user *buffer, size_t *length, loff_t *ppos);
18 extern int sysctl_extfrag_threshold;
19 extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
20 void __user *buffer, size_t *length, loff_t *ppos);
21
22 extern int fragmentation_index(struct zone *zone, unsigned int order);
23 extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask,
25 bool sync);
26 extern int compact_pgdat(pg_data_t *pgdat, int order);
27 extern unsigned long compaction_suitable(struct zone *zone, int order);
28
29 /* Do not skip compaction more than 64 times */
30 #define COMPACT_MAX_DEFER_SHIFT 6
31
32 /*
33 * Compaction is deferred when compaction fails to result in a page
34 * allocation success. 1 << compact_defer_limit compactions are skipped up
35 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
36 */
defer_compaction(struct zone * zone,int order)37 static inline void defer_compaction(struct zone *zone, int order)
38 {
39 zone->compact_considered = 0;
40 zone->compact_defer_shift++;
41
42 if (order < zone->compact_order_failed)
43 zone->compact_order_failed = order;
44
45 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
46 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
47 }
48
49 /* Returns true if compaction should be skipped this time */
compaction_deferred(struct zone * zone,int order)50 static inline bool compaction_deferred(struct zone *zone, int order)
51 {
52 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
53
54 if (order < zone->compact_order_failed)
55 return false;
56
57 /* Avoid possible overflow */
58 if (++zone->compact_considered > defer_limit)
59 zone->compact_considered = defer_limit;
60
61 return zone->compact_considered < (1UL << zone->compact_defer_shift);
62 }
63
64 #else
try_to_compact_pages(struct zonelist * zonelist,int order,gfp_t gfp_mask,nodemask_t * nodemask,bool sync)65 static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
66 int order, gfp_t gfp_mask, nodemask_t *nodemask,
67 bool sync)
68 {
69 return COMPACT_CONTINUE;
70 }
71
compact_pgdat(pg_data_t * pgdat,int order)72 static inline int compact_pgdat(pg_data_t *pgdat, int order)
73 {
74 return COMPACT_CONTINUE;
75 }
76
compaction_suitable(struct zone * zone,int order)77 static inline unsigned long compaction_suitable(struct zone *zone, int order)
78 {
79 return COMPACT_SKIPPED;
80 }
81
defer_compaction(struct zone * zone,int order)82 static inline void defer_compaction(struct zone *zone, int order)
83 {
84 }
85
compaction_deferred(struct zone * zone,int order)86 static inline bool compaction_deferred(struct zone *zone, int order)
87 {
88 return 1;
89 }
90
91 #endif /* CONFIG_COMPACTION */
92
93 #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
94 extern int compaction_register_node(struct node *node);
95 extern void compaction_unregister_node(struct node *node);
96
97 #else
98
compaction_register_node(struct node * node)99 static inline int compaction_register_node(struct node *node)
100 {
101 return 0;
102 }
103
compaction_unregister_node(struct node * node)104 static inline void compaction_unregister_node(struct node *node)
105 {
106 }
107 #endif /* CONFIG_COMPACTION && CONFIG_SYSFS && CONFIG_NUMA */
108
109 #endif /* _LINUX_COMPACTION_H */
110