1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_SMP_H
3 #define __LINUX_SMP_H
4 
5 /*
6  *	Generic SMP support
7  *		Alan Cox. <alan@redhat.com>
8  */
9 
10 #include <linux/errno.h>
11 #include <linux/types.h>
12 #include <linux/list.h>
13 #include <linux/cpumask.h>
14 #include <linux/init.h>
15 #include <linux/smp_types.h>
16 
17 typedef void (*smp_call_func_t)(void *info);
18 typedef bool (*smp_cond_func_t)(int cpu, void *info);
19 
20 /*
21  * structure shares (partial) layout with struct irq_work
22  */
23 struct __call_single_data {
24 	struct __call_single_node node;
25 	smp_call_func_t func;
26 	void *info;
27 };
28 
29 #define CSD_INIT(_func, _info) \
30 	(struct __call_single_data){ .func = (_func), .info = (_info), }
31 
32 /* Use __aligned() to avoid to use 2 cache lines for 1 csd */
33 typedef struct __call_single_data call_single_data_t
34 	__aligned(sizeof(struct __call_single_data));
35 
36 #define INIT_CSD(_csd, _func, _info)		\
37 do {						\
38 	*(_csd) = CSD_INIT((_func), (_info));	\
39 } while (0)
40 
41 /*
42  * Enqueue a llist_node on the call_single_queue; be very careful, read
43  * flush_smp_call_function_queue() in detail.
44  */
45 extern void __smp_call_single_queue(int cpu, struct llist_node *node);
46 
47 /* total number of cpus in this system (may exceed NR_CPUS) */
48 extern unsigned int total_cpus;
49 
50 int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
51 			     int wait);
52 
53 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
54 			   void *info, bool wait, const struct cpumask *mask);
55 
56 int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
57 
58 /*
59  * Cpus stopping functions in panic. All have default weak definitions.
60  * Architecture-dependent code may override them.
61  */
62 void __noreturn panic_smp_self_stop(void);
63 void __noreturn nmi_panic_self_stop(struct pt_regs *regs);
64 void crash_smp_send_stop(void);
65 
66 /*
67  * Call a function on all processors
68  */
on_each_cpu(smp_call_func_t func,void * info,int wait)69 static inline void on_each_cpu(smp_call_func_t func, void *info, int wait)
70 {
71 	on_each_cpu_cond_mask(NULL, func, info, wait, cpu_online_mask);
72 }
73 
74 /**
75  * on_each_cpu_mask(): Run a function on processors specified by
76  * cpumask, which may include the local processor.
77  * @mask: The set of cpus to run on (only runs on online subset).
78  * @func: The function to run. This must be fast and non-blocking.
79  * @info: An arbitrary pointer to pass to the function.
80  * @wait: If true, wait (atomically) until function has completed
81  *        on other CPUs.
82  *
83  * If @wait is true, then returns once @func has returned.
84  *
85  * You must not call this function with disabled interrupts or from a
86  * hardware interrupt handler or from a bottom half handler.  The
87  * exception is that it may be used during early boot while
88  * early_boot_irqs_disabled is set.
89  */
on_each_cpu_mask(const struct cpumask * mask,smp_call_func_t func,void * info,bool wait)90 static inline void on_each_cpu_mask(const struct cpumask *mask,
91 				    smp_call_func_t func, void *info, bool wait)
92 {
93 	on_each_cpu_cond_mask(NULL, func, info, wait, mask);
94 }
95 
96 /*
97  * Call a function on each processor for which the supplied function
98  * cond_func returns a positive value. This may include the local
99  * processor.  May be used during early boot while early_boot_irqs_disabled is
100  * set. Use local_irq_save/restore() instead of local_irq_disable/enable().
101  */
on_each_cpu_cond(smp_cond_func_t cond_func,smp_call_func_t func,void * info,bool wait)102 static inline void on_each_cpu_cond(smp_cond_func_t cond_func,
103 				    smp_call_func_t func, void *info, bool wait)
104 {
105 	on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
106 }
107 
108 #ifdef CONFIG_SMP
109 
110 #include <linux/preempt.h>
111 #include <linux/compiler.h>
112 #include <linux/thread_info.h>
113 #include <asm/smp.h>
114 
115 /*
116  * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
117  * (defined in asm header):
118  */
119 
120 /*
121  * stops all CPUs but the current one:
122  */
123 extern void smp_send_stop(void);
124 
125 /*
126  * sends a 'reschedule' event to another CPU:
127  */
128 extern void arch_smp_send_reschedule(int cpu);
129 /*
130  * scheduler_ipi() is inline so can't be passed as callback reason, but the
131  * callsite IP should be sufficient for root-causing IPIs sent from here.
132  */
133 #define smp_send_reschedule(cpu) ({		  \
134 	trace_ipi_send_cpu(cpu, _RET_IP_, NULL);  \
135 	arch_smp_send_reschedule(cpu);		  \
136 })
137 
138 /*
139  * Prepare machine for booting other CPUs.
140  */
141 extern void smp_prepare_cpus(unsigned int max_cpus);
142 
143 /*
144  * Bring a CPU up
145  */
146 extern int __cpu_up(unsigned int cpunum, struct task_struct *tidle);
147 
148 /*
149  * Final polishing of CPUs
150  */
151 extern void smp_cpus_done(unsigned int max_cpus);
152 
153 /*
154  * Call a function on all other processors
155  */
156 void smp_call_function(smp_call_func_t func, void *info, int wait);
157 void smp_call_function_many(const struct cpumask *mask,
158 			    smp_call_func_t func, void *info, bool wait);
159 
160 int smp_call_function_any(const struct cpumask *mask,
161 			  smp_call_func_t func, void *info, int wait);
162 
163 void kick_all_cpus_sync(void);
164 void wake_up_all_idle_cpus(void);
165 
166 /*
167  * Generic and arch helpers
168  */
169 void __init call_function_init(void);
170 void generic_smp_call_function_single_interrupt(void);
171 #define generic_smp_call_function_interrupt \
172 	generic_smp_call_function_single_interrupt
173 
174 /*
175  * Mark the boot cpu "online" so that it can call console drivers in
176  * printk() and can access its per-cpu storage.
177  */
178 void smp_prepare_boot_cpu(void);
179 
180 extern unsigned int setup_max_cpus;
181 extern void __init setup_nr_cpu_ids(void);
182 extern void __init smp_init(void);
183 
184 extern int __boot_cpu_id;
185 
get_boot_cpu_id(void)186 static inline int get_boot_cpu_id(void)
187 {
188 	return __boot_cpu_id;
189 }
190 
191 #else /* !SMP */
192 
smp_send_stop(void)193 static inline void smp_send_stop(void) { }
194 
195 /*
196  *	These macros fold the SMP functionality into a single CPU system
197  */
198 #define raw_smp_processor_id()			0
up_smp_call_function(smp_call_func_t func,void * info)199 static inline void up_smp_call_function(smp_call_func_t func, void *info)
200 {
201 }
202 #define smp_call_function(func, info, wait) \
203 			(up_smp_call_function(func, info))
204 
smp_send_reschedule(int cpu)205 static inline void smp_send_reschedule(int cpu) { }
206 #define smp_prepare_boot_cpu()			do {} while (0)
207 #define smp_call_function_many(mask, func, info, wait) \
208 			(up_smp_call_function(func, info))
call_function_init(void)209 static inline void call_function_init(void) { }
210 
211 static inline int
smp_call_function_any(const struct cpumask * mask,smp_call_func_t func,void * info,int wait)212 smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
213 		      void *info, int wait)
214 {
215 	return smp_call_function_single(0, func, info, wait);
216 }
217 
kick_all_cpus_sync(void)218 static inline void kick_all_cpus_sync(void) {  }
wake_up_all_idle_cpus(void)219 static inline void wake_up_all_idle_cpus(void) {  }
220 
221 #ifdef CONFIG_UP_LATE_INIT
222 extern void __init up_late_init(void);
smp_init(void)223 static inline void smp_init(void) { up_late_init(); }
224 #else
smp_init(void)225 static inline void smp_init(void) { }
226 #endif
227 
get_boot_cpu_id(void)228 static inline int get_boot_cpu_id(void)
229 {
230 	return 0;
231 }
232 
233 #endif /* !SMP */
234 
235 /**
236  * raw_processor_id() - get the current (unstable) CPU id
237  *
238  * For then you know what you are doing and need an unstable
239  * CPU id.
240  */
241 
242 /**
243  * smp_processor_id() - get the current (stable) CPU id
244  *
245  * This is the normal accessor to the CPU id and should be used
246  * whenever possible.
247  *
248  * The CPU id is stable when:
249  *
250  *  - IRQs are disabled;
251  *  - preemption is disabled;
252  *  - the task is CPU affine.
253  *
254  * When CONFIG_DEBUG_PREEMPT; we verify these assumption and WARN
255  * when smp_processor_id() is used when the CPU id is not stable.
256  */
257 
258 /*
259  * Allow the architecture to differentiate between a stable and unstable read.
260  * For example, x86 uses an IRQ-safe asm-volatile read for the unstable but a
261  * regular asm read for the stable.
262  */
263 #ifndef __smp_processor_id
264 #define __smp_processor_id(x) raw_smp_processor_id(x)
265 #endif
266 
267 #ifdef CONFIG_DEBUG_PREEMPT
268   extern unsigned int debug_smp_processor_id(void);
269 # define smp_processor_id() debug_smp_processor_id()
270 #else
271 # define smp_processor_id() __smp_processor_id()
272 #endif
273 
274 #define get_cpu()		({ preempt_disable(); __smp_processor_id(); })
275 #define put_cpu()		preempt_enable()
276 
277 /*
278  * Callback to arch code if there's nosmp or maxcpus=0 on the
279  * boot command line:
280  */
281 extern void arch_disable_smp_support(void);
282 
283 extern void arch_thaw_secondary_cpus_begin(void);
284 extern void arch_thaw_secondary_cpus_end(void);
285 
286 void smp_setup_processor_id(void);
287 
288 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
289 		    bool phys);
290 
291 /* SMP core functions */
292 int smpcfd_prepare_cpu(unsigned int cpu);
293 int smpcfd_dead_cpu(unsigned int cpu);
294 int smpcfd_dying_cpu(unsigned int cpu);
295 
296 #endif /* __LINUX_SMP_H */
297