1 #ifndef _LINUX_CPUSET_H
2 #define _LINUX_CPUSET_H
3 /*
4  *  cpuset interface
5  *
6  *  Copyright (C) 2003 BULL SA
7  *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
8  *
9  */
10 
11 #include <linux/sched.h>
12 #include <linux/cpumask.h>
13 #include <linux/nodemask.h>
14 #include <linux/cgroup.h>
15 #include <linux/mm.h>
16 
17 #ifdef CONFIG_CPUSETS
18 
19 extern int number_of_cpusets;	/* How many cpusets are defined in system? */
20 
21 extern int cpuset_init(void);
22 extern void cpuset_init_smp(void);
23 extern void cpuset_update_active_cpus(void);
24 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
25 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
26 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
27 #define cpuset_current_mems_allowed (current->mems_allowed)
28 void cpuset_init_current_mems_allowed(void);
29 int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
30 
31 extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32 extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
33 
cpuset_node_allowed_softwall(int node,gfp_t gfp_mask)34 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
35 {
36 	return number_of_cpusets <= 1 ||
37 		__cpuset_node_allowed_softwall(node, gfp_mask);
38 }
39 
cpuset_node_allowed_hardwall(int node,gfp_t gfp_mask)40 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
41 {
42 	return number_of_cpusets <= 1 ||
43 		__cpuset_node_allowed_hardwall(node, gfp_mask);
44 }
45 
cpuset_zone_allowed_softwall(struct zone * z,gfp_t gfp_mask)46 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
47 {
48 	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
49 }
50 
cpuset_zone_allowed_hardwall(struct zone * z,gfp_t gfp_mask)51 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
52 {
53 	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
54 }
55 
56 extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
57 					  const struct task_struct *tsk2);
58 
59 #define cpuset_memory_pressure_bump() 				\
60 	do {							\
61 		if (cpuset_memory_pressure_enabled)		\
62 			__cpuset_memory_pressure_bump();	\
63 	} while (0)
64 extern int cpuset_memory_pressure_enabled;
65 extern void __cpuset_memory_pressure_bump(void);
66 
67 extern const struct file_operations proc_cpuset_operations;
68 struct seq_file;
69 extern void cpuset_task_status_allowed(struct seq_file *m,
70 					struct task_struct *task);
71 
72 extern int cpuset_mem_spread_node(void);
73 extern int cpuset_slab_spread_node(void);
74 
cpuset_do_page_mem_spread(void)75 static inline int cpuset_do_page_mem_spread(void)
76 {
77 	return current->flags & PF_SPREAD_PAGE;
78 }
79 
cpuset_do_slab_mem_spread(void)80 static inline int cpuset_do_slab_mem_spread(void)
81 {
82 	return current->flags & PF_SPREAD_SLAB;
83 }
84 
85 extern int current_cpuset_is_being_rebound(void);
86 
87 extern void rebuild_sched_domains(void);
88 
89 extern void cpuset_print_task_mems_allowed(struct task_struct *p);
90 
91 /*
92  * get_mems_allowed is required when making decisions involving mems_allowed
93  * such as during page allocation. mems_allowed can be updated in parallel
94  * and depending on the new value an operation can fail potentially causing
95  * process failure. A retry loop with get_mems_allowed and put_mems_allowed
96  * prevents these artificial failures.
97  */
get_mems_allowed(void)98 static inline unsigned int get_mems_allowed(void)
99 {
100 	return read_seqcount_begin(&current->mems_allowed_seq);
101 }
102 
103 /*
104  * If this returns false, the operation that took place after get_mems_allowed
105  * may have failed. It is up to the caller to retry the operation if
106  * appropriate.
107  */
put_mems_allowed(unsigned int seq)108 static inline bool put_mems_allowed(unsigned int seq)
109 {
110 	return !read_seqcount_retry(&current->mems_allowed_seq, seq);
111 }
112 
set_mems_allowed(nodemask_t nodemask)113 static inline void set_mems_allowed(nodemask_t nodemask)
114 {
115 	task_lock(current);
116 	write_seqcount_begin(&current->mems_allowed_seq);
117 	current->mems_allowed = nodemask;
118 	write_seqcount_end(&current->mems_allowed_seq);
119 	task_unlock(current);
120 }
121 
122 #else /* !CONFIG_CPUSETS */
123 
cpuset_init(void)124 static inline int cpuset_init(void) { return 0; }
cpuset_init_smp(void)125 static inline void cpuset_init_smp(void) {}
126 
cpuset_update_active_cpus(void)127 static inline void cpuset_update_active_cpus(void)
128 {
129 	partition_sched_domains(1, NULL, NULL);
130 }
131 
cpuset_cpus_allowed(struct task_struct * p,struct cpumask * mask)132 static inline void cpuset_cpus_allowed(struct task_struct *p,
133 				       struct cpumask *mask)
134 {
135 	cpumask_copy(mask, cpu_possible_mask);
136 }
137 
cpuset_cpus_allowed_fallback(struct task_struct * p)138 static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
139 {
140 }
141 
cpuset_mems_allowed(struct task_struct * p)142 static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
143 {
144 	return node_possible_map;
145 }
146 
147 #define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
cpuset_init_current_mems_allowed(void)148 static inline void cpuset_init_current_mems_allowed(void) {}
149 
cpuset_nodemask_valid_mems_allowed(nodemask_t * nodemask)150 static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
151 {
152 	return 1;
153 }
154 
cpuset_node_allowed_softwall(int node,gfp_t gfp_mask)155 static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
156 {
157 	return 1;
158 }
159 
cpuset_node_allowed_hardwall(int node,gfp_t gfp_mask)160 static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
161 {
162 	return 1;
163 }
164 
cpuset_zone_allowed_softwall(struct zone * z,gfp_t gfp_mask)165 static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
166 {
167 	return 1;
168 }
169 
cpuset_zone_allowed_hardwall(struct zone * z,gfp_t gfp_mask)170 static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
171 {
172 	return 1;
173 }
174 
cpuset_mems_allowed_intersects(const struct task_struct * tsk1,const struct task_struct * tsk2)175 static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
176 						 const struct task_struct *tsk2)
177 {
178 	return 1;
179 }
180 
cpuset_memory_pressure_bump(void)181 static inline void cpuset_memory_pressure_bump(void) {}
182 
cpuset_task_status_allowed(struct seq_file * m,struct task_struct * task)183 static inline void cpuset_task_status_allowed(struct seq_file *m,
184 						struct task_struct *task)
185 {
186 }
187 
cpuset_mem_spread_node(void)188 static inline int cpuset_mem_spread_node(void)
189 {
190 	return 0;
191 }
192 
cpuset_slab_spread_node(void)193 static inline int cpuset_slab_spread_node(void)
194 {
195 	return 0;
196 }
197 
cpuset_do_page_mem_spread(void)198 static inline int cpuset_do_page_mem_spread(void)
199 {
200 	return 0;
201 }
202 
cpuset_do_slab_mem_spread(void)203 static inline int cpuset_do_slab_mem_spread(void)
204 {
205 	return 0;
206 }
207 
current_cpuset_is_being_rebound(void)208 static inline int current_cpuset_is_being_rebound(void)
209 {
210 	return 0;
211 }
212 
rebuild_sched_domains(void)213 static inline void rebuild_sched_domains(void)
214 {
215 	partition_sched_domains(1, NULL, NULL);
216 }
217 
cpuset_print_task_mems_allowed(struct task_struct * p)218 static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
219 {
220 }
221 
set_mems_allowed(nodemask_t nodemask)222 static inline void set_mems_allowed(nodemask_t nodemask)
223 {
224 }
225 
get_mems_allowed(void)226 static inline unsigned int get_mems_allowed(void)
227 {
228 	return 0;
229 }
230 
put_mems_allowed(unsigned int seq)231 static inline bool put_mems_allowed(unsigned int seq)
232 {
233 	return true;
234 }
235 
236 #endif /* !CONFIG_CPUSETS */
237 
238 #endif /* _LINUX_CPUSET_H */
239