1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SMP_H
3 #define __LINUX_SMP_H
4 
5 /*
6  *	Generic SMP support
7  *		Alan Cox. <alan@redhat.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/smp_types.h>
16 
17 typedef void (*smp_call_func_t)(void *info);
18 typedef bool (*smp_cond_func_t)(int cpu, void *info);
19 
20 /*
21  * structure shares (partial) layout with struct irq_work
22  */
23 struct __call_single_data {
24 	struct __call_single_node node;
25 	smp_call_func_t func;
26 	void *info;
27 };
28 
29 #define CSD_INIT(_func, _info) \
30 	(struct __call_single_data){ .func = (_func), .info = (_info), }
31 
32 /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
33 typedef struct __call_single_data call_single_data_t
34 	__aligned(sizeof(struct __call_single_data));
35 
36 #define INIT_CSD(_csd, _func, _info)		\
37 do {						\
38 	*(_csd) = CSD_INIT((_func), (_info));	\
39 } while (0)
40 
41 /*
42  * Enqueue a llist_node on the call_single_queue; be very careful, read
43  * flush_smp_call_function_queue() in detail.
44  */
45 extern void __smp_call_single_queue(int cpu, struct llist_node *node);
46 
47 /* total number of cpus in this system (may exceed NR_CPUS) */
48 extern unsigned int total_cpus;
49 
50 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
51 			     int wait);
52 
53 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
54 			   void *info, bool wait, const struct cpumask *mask);
55 
56 int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
57 
58 /*
59  * Cpus stopping functions in panic. All have default weak definitions.
60  * Architecture-dependent code may override them.
61  */
62 void panic_smp_self_stop(void);
63 void nmi_panic_self_stop(struct pt_regs *regs);
64 void crash_smp_send_stop(void);
65 
66 /*
67  * Call a function on all processors
68  */
on_each_cpu(smp_call_func_t func,void * info,int wait)69 static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
70 {
71 	on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
72 }
73 
74 /**
75  * on_each_cpu_mask(): Run a function on processors specified by
76  * cpumask, which may include the local processor.
77  * @mask: The set of cpus to run on (only runs on online subset).
78  * @func: The function to run. This must be fast and non-blocking.
79  * @info: An arbitrary pointer to pass to the function.
80  * @wait: If true, wait (atomically) until function has completed
81  *        on other CPUs.
82  *
83  * If @wait is true, then returns once @func has returned.
84  *
85  * You must not call this function with disabled interrupts or from a
86  * hardware interrupt handler or from a bottom half handler.  The
87  * exception is that it may be used during early boot while
88  * early_boot_irqs_disabled is set.
89  */
on_each_cpu_mask(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)90 static inline void on_each_cpu_mask(const struct cpumask *mask,
91 				    smp_call_func_t func, void *info, bool wait)
92 {
93 	on_each_cpu_cond_mask(NULL, func, info, wait, mask);
94 }
95 
96 /*
97  * Call a function on each processor for which the supplied function
98  * cond_func returns a positive value. This may include the local
99  * processor.  May be used during early boot while early_boot_irqs_disabled is
100  * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
101  */
on_each_cpu_cond(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait)102 static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
103 				    smp_call_func_t func, void *info, bool wait)
104 {
105 	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
106 }
107 
108 #ifdef CONFIG_SMP
109 
110 #include <linux/preempt.h>
111 #include <linux/compiler.h>
112 #include <linux/thread_info.h>
113 #include <asm/smp.h>
114 
115 /*
116  * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
117  * (defined in asm header):
118  */
119 
120 /*
121  * stops all CPUs but the current one:
122  */
123 extern void smp_send_stop(void);
124 
125 /*
126  * sends a 'reschedule' event to another CPU:
127  */
128 extern void smp_send_reschedule(int cpu);
129 
130 
131 /*
132  * Prepare machine for booting other CPUs.
133  */
134 extern void smp_prepare_cpus(unsigned int max_cpus);
135 
136 /*
137  * Bring a CPU up
138  */
139 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
140 
141 /*
142  * Final polishing of CPUs
143  */
144 extern void smp_cpus_done(unsigned int max_cpus);
145 
146 /*
147  * Call a function on all other processors
148  */
149 void smp_call_function(smp_call_func_t func, void *info, int wait);
150 void smp_call_function_many(const struct cpumask *mask,
151 			    smp_call_func_t func, void *info, bool wait);
152 
153 int smp_call_function_any(const struct cpumask *mask,
154 			  smp_call_func_t func, void *info, int wait);
155 
156 void kick_all_cpus_sync(void);
157 void wake_up_all_idle_cpus(void);
158 
159 /*
160  * Generic and arch helpers
161  */
162 void __init call_function_init(void);
163 void generic_smp_call_function_single_interrupt(void);
164 #define generic_smp_call_function_interrupt \
165 	generic_smp_call_function_single_interrupt
166 
167 /*
168  * Mark the boot cpu "online" so that it can call console drivers in
169  * printk() and can access its per-cpu storage.
170  */
171 void smp_prepare_boot_cpu(void);
172 
173 extern unsigned int setup_max_cpus;
174 extern void __init setup_nr_cpu_ids(void);
175 extern void __init smp_init(void);
176 
177 extern int __boot_cpu_id;
178 
get_boot_cpu_id(void)179 static inline int get_boot_cpu_id(void)
180 {
181 	return __boot_cpu_id;
182 }
183 
184 #else /* !SMP */
185 
smp_send_stop(void)186 static inline void smp_send_stop(void) { }
187 
188 /*
189  *	These macros fold the SMP functionality into a single CPU system
190  */
191 #define raw_smp_processor_id()			0
up_smp_call_function(smp_call_func_t func,void * info)192 static inline void up_smp_call_function(smp_call_func_t func, void *info)
193 {
194 }
195 #define smp_call_function(func, info, wait) \
196 			(up_smp_call_function(func, info))
197 
smp_send_reschedule(int cpu)198 static inline void smp_send_reschedule(int cpu) { }
199 #define smp_prepare_boot_cpu()			do {} while (0)
200 #define smp_call_function_many(mask, func, info, wait) \
201 			(up_smp_call_function(func, info))
call_function_init(void)202 static inline void call_function_init(void) { }
203 
204 static inline int
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)205 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
206 		      void *info, int wait)
207 {
208 	return smp_call_function_single(0, func, info, wait);
209 }
210 
kick_all_cpus_sync(void)211 static inline void kick_all_cpus_sync(void) {  }
wake_up_all_idle_cpus(void)212 static inline void wake_up_all_idle_cpus(void) {  }
213 
214 #ifdef CONFIG_UP_LATE_INIT
215 extern void __init up_late_init(void);
smp_init(void)216 static inline void smp_init(void) { up_late_init(); }
217 #else
smp_init(void)218 static inline void smp_init(void) { }
219 #endif
220 
get_boot_cpu_id(void)221 static inline int get_boot_cpu_id(void)
222 {
223 	return 0;
224 }
225 
226 #endif /* !SMP */
227 
228 /**
229  * raw_processor_id() - get the current (unstable) CPU id
230  *
231  * For then you know what you are doing and need an unstable
232  * CPU id.
233  */
234 
235 /**
236  * smp_processor_id() - get the current (stable) CPU id
237  *
238  * This is the normal accessor to the CPU id and should be used
239  * whenever possible.
240  *
241  * The CPU id is stable when:
242  *
243  *  - IRQs are disabled;
244  *  - preemption is disabled;
245  *  - the task is CPU affine.
246  *
247  * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
248  * when smp_processor_id() is used when the CPU id is not stable.
249  */
250 
251 /*
252  * Allow the architecture to differentiate between a stable and unstable read.
253  * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
254  * regular asm read for the stable.
255  */
256 #ifndef __smp_processor_id
257 #define __smp_processor_id(x) raw_smp_processor_id(x)
258 #endif
259 
260 #ifdef CONFIG_DEBUG_PREEMPT
261   extern unsigned int debug_smp_processor_id(void);
262 # define smp_processor_id() debug_smp_processor_id()
263 #else
264 # define smp_processor_id() __smp_processor_id()
265 #endif
266 
267 #define get_cpu()		({ preempt_disable(); __smp_processor_id(); })
268 #define put_cpu()		preempt_enable()
269 
270 /*
271  * Callback to arch code if there's nosmp or maxcpus=0 on the
272  * boot command line:
273  */
274 extern void arch_disable_smp_support(void);
275 
276 extern void arch_thaw_secondary_cpus_begin(void);
277 extern void arch_thaw_secondary_cpus_end(void);
278 
279 void smp_setup_processor_id(void);
280 
281 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
282 		    bool phys);
283 
284 /* SMP core functions */
285 int smpcfd_prepare_cpu(unsigned int cpu);
286 int smpcfd_dead_cpu(unsigned int cpu);
287 int smpcfd_dying_cpu(unsigned int cpu);
288 
289 #endif /* __LINUX_SMP_H */
290