1 /*
2  *  arch/s390/kernel/smp.c
3  *
4  *  S390 version
5  *    Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6  *    Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
7  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
8  *
9  *  based on other smp stuff by
10  *    (c) 1995 Alan Cox, CymruNET Ltd  <alan@cymru.net>
11  *    (c) 1998 Ingo Molnar
12  *
13  * We work with logical cpu numbering everywhere we can. The only
14  * functions using the real cpu address (got from STAP) are the sigp
15  * functions. For all other functions we use the identity mapping.
16  * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17  * used e.g. to find the idle task belonging to a logical cpu. Every array
18  * in the kernel is sorted by the logical cpu number and not by the physical
19  * one which is causing all the confusion with __cpu_logical_map and
20  * cpu_number_map in other architectures.
21  */
22 
23 #include <linux/module.h>
24 #include <linux/init.h>
25 
26 #include <linux/mm.h>
27 #include <linux/spinlock.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/smp_lock.h>
30 
31 #include <linux/delay.h>
32 #include <linux/cache.h>
33 
34 #include <asm/sigp.h>
35 #include <asm/pgalloc.h>
36 #include <asm/irq.h>
37 #include <asm/s390_ext.h>
38 #include <asm/cpcmd.h>
39 
40 /* prototypes */
41 extern int cpu_idle(void * unused);
42 
43 extern __u16 boot_cpu_addr;
44 extern volatile int __cpu_logical_map[];
45 
46 /*
47  * An array with a pointer the lowcore of every CPU.
48  */
49 static int       max_cpus = NR_CPUS;	  /* Setup configured maximum number of CPUs to activate	*/
50 int              smp_num_cpus;
51 struct _lowcore *lowcore_ptr[NR_CPUS];
52 cycles_t         cacheflush_time=0;
53 int              smp_threads_ready=0;      /* Set when the idlers are all forked. */
54 static atomic_t  smp_commenced = ATOMIC_INIT(0);
55 
56 spinlock_t       kernel_flag __cacheline_aligned_in_smp = SPIN_LOCK_UNLOCKED;
57 
58 unsigned long	 cpu_online_map;
59 
60 /*
61  *      Setup routine for controlling SMP activation
62  *
63  *      Command-line option of "nosmp" or "maxcpus=0" will disable SMP
64  *      activation entirely (the MPS table probe still happens, though).
65  *
66  *      Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
67  *      greater than 0, limits the maximum number of CPUs activated in
68  *      SMP mode to <NUM>.
69  */
70 
nosmp(char * str)71 static int __init nosmp(char *str)
72 {
73 	max_cpus = 0;
74 	return 1;
75 }
76 
77 __setup("nosmp", nosmp);
78 
maxcpus(char * str)79 static int __init maxcpus(char *str)
80 {
81 	get_option(&str, &max_cpus);
82 	return 1;
83 }
84 
85 __setup("maxcpus=", maxcpus);
86 
87 /*
88  * Reboot, halt and power_off routines for SMP.
89  */
90 extern char vmhalt_cmd[];
91 extern char vmpoff_cmd[];
92 
93 extern void reipl(unsigned long devno);
94 
95 static void smp_ext_bitcall(int, ec_bit_sig);
96 static void smp_ext_bitcall_others(ec_bit_sig);
97 
98 /*
99  * Structure and data for smp_call_function(). This is designed to minimise
100  * static memory requirements. It also looks cleaner.
101  */
102 static spinlock_t call_lock = SPIN_LOCK_UNLOCKED;
103 
104 struct call_data_struct {
105 	void (*func) (void *info);
106 	void *info;
107 	atomic_t started;
108 	atomic_t finished;
109 	int wait;
110 };
111 
112 static struct call_data_struct * call_data;
113 
114 /*
115  * 'Call function' interrupt callback
116  */
do_call_function(void)117 static void do_call_function(void)
118 {
119 	void (*func) (void *info) = call_data->func;
120 	void *info = call_data->info;
121 	int wait = call_data->wait;
122 
123 	atomic_inc(&call_data->started);
124 	(*func)(info);
125 	if (wait)
126 		atomic_inc(&call_data->finished);
127 }
128 
129 /*
130  * this function sends a 'generic call function' IPI to all other CPUs
131  * in the system.
132  */
133 
smp_call_function(void (* func)(void * info),void * info,int nonatomic,int wait)134 int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
135 			int wait)
136 /*
137  * [SUMMARY] Run a function on all other CPUs.
138  * <func> The function to run. This must be fast and non-blocking.
139  * <info> An arbitrary pointer to pass to the function.
140  * <nonatomic> currently unused.
141  * <wait> If true, wait (atomically) until function has completed on other CPUs.
142  * [RETURNS] 0 on success, else a negative status code. Does not return until
143  * remote CPUs are nearly ready to execute <<func>> or are or have executed.
144  *
145  * You must not call this function with disabled interrupts or from a
146  * hardware interrupt handler, you may call it from a bottom half handler.
147  */
148 {
149 	struct call_data_struct data;
150 	int cpus = smp_num_cpus-1;
151 
152 	if (!cpus || !atomic_read(&smp_commenced))
153 		return 0;
154 
155 	data.func = func;
156 	data.info = info;
157 	atomic_set(&data.started, 0);
158 	data.wait = wait;
159 	if (wait)
160 		atomic_set(&data.finished, 0);
161 
162 	spin_lock_bh(&call_lock);
163 	call_data = &data;
164 	/* Send a message to all other CPUs and wait for them to respond */
165 	smp_ext_bitcall_others(ec_call_function);
166 
167 	/* Wait for response */
168 	while (atomic_read(&data.started) != cpus)
169 		barrier();
170 
171 	if (wait)
172 		while (atomic_read(&data.finished) != cpus)
173 			barrier();
174 	spin_unlock_bh(&call_lock);
175 
176 	return 0;
177 }
178 
179 /*
180  * Call a function on one CPU
181  * cpu : the CPU the function should be executed on
182  *
183  * You must not call this function with disabled interrupts or from a
184  * hardware interrupt handler, you may call it from a bottom half handler.
185  */
smp_call_function_on(void (* func)(void * info),void * info,int nonatomic,int wait,int cpu)186 int smp_call_function_on(void (*func) (void *info), void *info,
187                          int nonatomic, int wait, int cpu)
188 {
189 	struct call_data_struct data;
190 
191 	if (!atomic_read(&smp_commenced))
192 		return 0;
193 
194 	if (smp_processor_id() == cpu) {
195 		/* direct call to function */
196 		func(info);
197 		return 0;
198 	}
199 
200 	data.func = func;
201 	data.info = info;
202 
203 	atomic_set(&data.started, 0);
204 	data.wait = wait;
205 	if (wait)
206 		atomic_set(&data.finished, 0);
207 
208 	spin_lock_bh(&call_lock);
209 	call_data = &data;
210 	smp_ext_bitcall(cpu, ec_call_function);
211 
212 	/* Wait for response */
213 	while (atomic_read(&data.started) != 1)
214 		barrier();
215 
216 	if (wait)
217 		while (atomic_read(&data.finished) != 1)
218 			barrier();
219 
220 	spin_unlock_bh(&call_lock);
221 	return 0;
222 }
223 
do_send_stop(void)224 static inline void do_send_stop(void)
225 {
226         unsigned long dummy;
227         int i;
228 
229         /* stop all processors */
230         for (i =  0; i < smp_num_cpus; i++) {
231                 if (smp_processor_id() != i) {
232                         int ccode;
233                         do {
234                                 ccode = signal_processor_ps(
235                                    &dummy,
236                                    0,
237                                    i,
238                                    sigp_stop);
239                         } while(ccode == sigp_busy);
240                 }
241         }
242 }
243 
do_store_status(void)244 static inline void do_store_status(void)
245 {
246         unsigned long low_core_addr;
247         unsigned long dummy;
248         int i;
249 
250         /* store status of all processors in their lowcores (real 0) */
251         for (i =  0; i < smp_num_cpus; i++) {
252                 if (smp_processor_id() != i) {
253                         int ccode;
254                         low_core_addr = (unsigned long)get_cpu_lowcore(i);
255                         do {
256                                 ccode = signal_processor_ps(
257                                    &dummy,
258                                    low_core_addr,
259                                    i,
260                                    sigp_store_status_at_address);
261                         } while(ccode == sigp_busy);
262                 }
263         }
264 }
265 
266 /*
267  * this function sends a 'stop' sigp to all other CPUs in the system.
268  * it goes straight through.
269  */
smp_send_stop(void)270 void smp_send_stop(void)
271 {
272         /* write magic number to zero page (absolute 0) */
273         get_cpu_lowcore(smp_processor_id())->panic_magic = __PANIC_MAGIC;
274 
275 	/* stop other processors. */
276 	do_send_stop();
277 
278 	/* store status of other processors. */
279 	do_store_status();
280 }
281 
282 /*
283  * Reboot, halt and power_off routines for SMP.
284  */
285 static volatile unsigned long cpu_restart_map;
286 
do_machine_restart(void * __unused)287 static void do_machine_restart(void * __unused)
288 {
289 	clear_bit(smp_processor_id(), &cpu_restart_map);
290 	if (smp_processor_id() == 0) {
291 		/* Wait for all other cpus to enter do_machine_restart. */
292 		while (cpu_restart_map != 0);
293 		/* Store status of other cpus. */
294 		do_store_status();
295 		/*
296 		 * Finally call reipl. Because we waited for all other
297 		 * cpus to enter this function we know that they do
298 		 * not hold any s390irq-locks (the cpus have been
299 		 * interrupted by an external interrupt and s390irq
300 		 * locks are always held disabled).
301 		 */
302 		reipl(S390_lowcore.ipl_device);
303 	}
304 	signal_processor(smp_processor_id(), sigp_stop);
305 }
306 
machine_restart_smp(char * __unused)307 void machine_restart_smp(char * __unused)
308 {
309 	cpu_restart_map = cpu_online_map;
310         smp_call_function(do_machine_restart, NULL, 0, 0);
311 	do_machine_restart(NULL);
312 }
313 
do_machine_halt(void * __unused)314 static void do_machine_halt(void * __unused)
315 {
316 	if (smp_processor_id() == 0) {
317 		smp_send_stop();
318 		if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
319 			cpcmd(vmhalt_cmd, NULL, 0);
320 		signal_processor(smp_processor_id(),
321 				 sigp_stop_and_store_status);
322 	}
323 	for (;;)
324 		enabled_wait();
325 }
326 
machine_halt_smp(void)327 void machine_halt_smp(void)
328 {
329         smp_call_function(do_machine_halt, NULL, 0, 0);
330 	do_machine_halt(NULL);
331 }
332 
do_machine_power_off(void * __unused)333 static void do_machine_power_off(void * __unused)
334 {
335 	if (smp_processor_id() == 0) {
336 		smp_send_stop();
337 		if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
338 			cpcmd(vmpoff_cmd, NULL, 0);
339 		signal_processor(smp_processor_id(),
340 				 sigp_stop_and_store_status);
341 	}
342 	for (;;)
343 		enabled_wait();
344 }
345 
machine_power_off_smp(void)346 void machine_power_off_smp(void)
347 {
348         smp_call_function(do_machine_power_off, NULL, 0, 0);
349 	do_machine_power_off(NULL);
350 }
351 
352 /*
353  * This is the main routine where commands issued by other
354  * cpus are handled.
355  */
356 
do_ext_call_interrupt(struct pt_regs * regs,__u16 code)357 void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
358 {
359         int bits;
360 
361         /*
362          * handle bit signal external calls
363          *
364          * For the ec_schedule signal we have to do nothing. All the work
365          * is done automatically when we return from the interrupt.
366          */
367         do {
368                 bits = atomic_read(&S390_lowcore.ext_call_fast);
369         } while (atomic_compare_and_swap(bits,0,&S390_lowcore.ext_call_fast));
370 
371 	if (test_bit(ec_call_function, &bits))
372 		do_call_function();
373 }
374 
375 /*
376  * Send an external call sigp to another cpu and wait
377  * for its completion.
378  */
smp_ext_bitcall(int cpu,ec_bit_sig sig)379 static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
380 {
381 	struct _lowcore *lowcore = get_cpu_lowcore(cpu);
382 
383 	/*
384 	 * Set signaling bit in lowcore of target cpu and kick it
385 	 */
386 	atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
387 	while(signal_processor(cpu, sigp_external_call) == sigp_busy)
388 		udelay(10);
389 }
390 
391 /*
392  * Send an external call sigp to every other cpu in the system and
393  * wait for its completion.
394  */
smp_ext_bitcall_others(ec_bit_sig sig)395 static void smp_ext_bitcall_others(ec_bit_sig sig)
396 {
397 	struct _lowcore *lowcore;
398 	int i;
399 
400 	for (i = 0; i < smp_num_cpus; i++) {
401 		if (smp_processor_id() == i)
402 			continue;
403 		lowcore = get_cpu_lowcore(i);
404 		/*
405 		 * Set signaling bit in lowcore of target cpu and kick it
406 		 */
407 		atomic_set_mask(1<<sig, &lowcore->ext_call_fast);
408 		while (signal_processor(i, sigp_external_call) == sigp_busy)
409 			udelay(10);
410 	}
411 }
412 
413 /*
414  * this function sends a 'purge tlb' signal to another CPU.
415  */
smp_ptlb_callback(void * info)416 void smp_ptlb_callback(void *info)
417 {
418 	local_flush_tlb();
419 }
420 
smp_ptlb_all(void)421 void smp_ptlb_all(void)
422 {
423         smp_call_function(smp_ptlb_callback, NULL, 0, 1);
424 	local_flush_tlb();
425 }
426 
427 /*
428  * this function sends a 'reschedule' IPI to another CPU.
429  * it goes straight through and wastes no time serializing
430  * anything. Worst case is that we lose a reschedule ...
431  */
432 
smp_send_reschedule(int cpu)433 void smp_send_reschedule(int cpu)
434 {
435         smp_ext_bitcall(cpu, ec_schedule);
436 }
437 
438 /*
439  * parameter area for the set/clear control bit callbacks
440  */
441 typedef struct
442 {
443 	__u16 start_ctl;
444 	__u16 end_ctl;
445 	__u32 orvals[16];
446 	__u32 andvals[16];
447 } ec_creg_mask_parms;
448 
449 /*
450  * callback for setting/clearing control bits
451  */
smp_ctl_bit_callback(void * info)452 void smp_ctl_bit_callback(void *info) {
453 	ec_creg_mask_parms *pp;
454 	u32 cregs[16];
455 	int i;
456 
457 	pp = (ec_creg_mask_parms *) info;
458 	asm volatile ("   bras  1,0f\n"
459 		      "   stctl 0,0,0(%0)\n"
460 		      "0: ex    %1,0(1)\n"
461 		      : : "a" (cregs+pp->start_ctl),
462 		          "a" ((pp->start_ctl<<4) + pp->end_ctl)
463 		      : "memory", "1" );
464 	for (i = pp->start_ctl; i <= pp->end_ctl; i++)
465 		cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
466 	asm volatile ("   bras  1,0f\n"
467 		      "   lctl 0,0,0(%0)\n"
468 		      "0: ex    %1,0(1)\n"
469 		      : : "a" (cregs+pp->start_ctl),
470 		          "a" ((pp->start_ctl<<4) + pp->end_ctl)
471 		      : "memory", "1" );
472 	return;
473 }
474 
475 /*
476  * Set a bit in a control register of all cpus
477  */
smp_ctl_set_bit(int cr,int bit)478 void smp_ctl_set_bit(int cr, int bit) {
479         ec_creg_mask_parms parms;
480 
481         if (atomic_read(&smp_commenced) != 0) {
482                 parms.start_ctl = cr;
483                 parms.end_ctl = cr;
484                 parms.orvals[cr] = 1 << bit;
485                 parms.andvals[cr] = 0xFFFFFFFF;
486                 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
487         }
488         __ctl_set_bit(cr, bit);
489 }
490 
491 /*
492  * Clear a bit in a control register of all cpus
493  */
smp_ctl_clear_bit(int cr,int bit)494 void smp_ctl_clear_bit(int cr, int bit) {
495         ec_creg_mask_parms parms;
496 
497         if (atomic_read(&smp_commenced) != 0) {
498                 parms.start_ctl = cr;
499                 parms.end_ctl = cr;
500                 parms.orvals[cr] = 0x00000000;
501                 parms.andvals[cr] = ~(1 << bit);
502                 smp_call_function(smp_ctl_bit_callback, &parms, 0, 1);
503         }
504         __ctl_clear_bit(cr, bit);
505 }
506 
507 /*
508  * Lets check how many CPUs we have.
509  */
510 
smp_count_cpus(void)511 void smp_count_cpus(void)
512 {
513         int curr_cpu;
514 
515         current->processor = 0;
516         smp_num_cpus = 1;
517 	cpu_online_map = 1;
518         for (curr_cpu = 0;
519              curr_cpu <= 65535 && smp_num_cpus < max_cpus; curr_cpu++) {
520                 if ((__u16) curr_cpu == boot_cpu_addr)
521                         continue;
522                 __cpu_logical_map[smp_num_cpus] = (__u16) curr_cpu;
523                 if (signal_processor(smp_num_cpus, sigp_sense) ==
524                     sigp_not_operational)
525                         continue;
526                 smp_num_cpus++;
527         }
528         printk("Detected %d CPU's\n",(int) smp_num_cpus);
529         printk("Boot cpu address %2X\n", boot_cpu_addr);
530 }
531 
532 
533 /*
534  *      Activate a secondary processor.
535  */
536 extern void init_cpu_timer(void);
537 extern int pfault_init(void);
538 extern int pfault_token(void);
539 
start_secondary(void * cpuvoid)540 int __init start_secondary(void *cpuvoid)
541 {
542         /* Setup the cpu */
543         cpu_init();
544         /* Print info about this processor */
545         print_cpu_info(&safe_get_cpu_lowcore(smp_processor_id())->cpu_data);
546         /* Wait for completion of smp startup */
547         while (!atomic_read(&smp_commenced))
548                 /* nothing */ ;
549         /* init per CPU timer */
550         init_cpu_timer();
551 #ifdef CONFIG_PFAULT
552 	/* Enable pfault pseudo page faults on this cpu. */
553 	pfault_init();
554 #endif
555         /* cpu_idle will call schedule for us */
556         return cpu_idle(NULL);
557 }
558 
559 /*
560  * The restart interrupt handler jumps to start_secondary directly
561  * without the detour over initialize_secondary. We defined it here
562  * so that the linker doesn't complain.
563  */
initialize_secondary(void)564 void __init initialize_secondary(void)
565 {
566 }
567 
fork_by_hand(void)568 static int __init fork_by_hand(void)
569 {
570        struct pt_regs regs;
571        /* don't care about the psw and regs settings since we'll never
572           reschedule the forked task. */
573        memset(&regs,0,sizeof(struct pt_regs));
574        return do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0);
575 }
576 
do_boot_cpu(int cpu)577 static void __init do_boot_cpu(int cpu)
578 {
579         struct task_struct *idle;
580         struct _lowcore    *cpu_lowcore;
581 
582         /* We can't use kernel_thread since we must _avoid_ to reschedule
583            the child. */
584         if (fork_by_hand() < 0)
585                 panic("failed fork for CPU %d", cpu);
586 
587         /*
588          * We remove it from the pidhash and the runqueue
589          * once we got the process:
590          */
591         idle = init_task.prev_task;
592         if (!idle)
593                 panic("No idle process for CPU %d",cpu);
594         idle->processor = cpu;
595 	idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
596 
597         del_from_runqueue(idle);
598         unhash_process(idle);
599         init_tasks[cpu] = idle;
600 
601         cpu_lowcore = get_cpu_lowcore(cpu);
602 	cpu_lowcore->save_area[15] = idle->thread.ksp;
603 	cpu_lowcore->kernel_stack = (__u32) idle + 8192;
604         __asm__ __volatile__("la    1,%0\n\t"
605 			     "stctl 0,15,0(1)\n\t"
606 			     "la    1,%1\n\t"
607                              "stam  0,15,0(1)"
608                              : "=m" (cpu_lowcore->cregs_save_area[0]),
609                                "=m" (cpu_lowcore->access_regs_save_area[0])
610                              : : "1", "memory");
611 
612         eieio();
613         signal_processor(cpu,sigp_restart);
614 	/* Mark this cpu as online */
615 	set_bit(cpu, &cpu_online_map);
616 }
617 
618 /*
619  *      Architecture specific routine called by the kernel just before init is
620  *      fired off. This allows the BP to have everything in order [we hope].
621  *      At the end of this all the APs will hit the system scheduling and off
622  *      we go. Each AP will load the system gdt's and jump through the kernel
623  *      init into idle(). At this point the scheduler will one day take over
624  *      and give them jobs to do. smp_callin is a standard routine
625  *      we use to track CPUs as they power up.
626  */
627 
smp_commence(void)628 void __init smp_commence(void)
629 {
630         /*
631          *      Lets the callins below out of their loop.
632          */
633         atomic_set(&smp_commenced,1);
634 }
635 
636 /*
637  *	Cycle through the processors sending sigp_restart to boot each.
638  */
639 
smp_boot_cpus(void)640 void __init smp_boot_cpus(void)
641 {
642 	unsigned long async_stack;
643         sigp_ccode   ccode;
644         int i;
645 
646         /* request the 0x1202 external interrupt */
647         if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
648                 panic("Couldn't request external interrupt 0x1202");
649         smp_count_cpus();
650         memset(lowcore_ptr,0,sizeof(lowcore_ptr));
651 
652         /*
653          *      Initialize the logical to physical CPU number mapping
654          */
655         print_cpu_info(&safe_get_cpu_lowcore(0)->cpu_data);
656 
657         for(i = 0; i < smp_num_cpus; i++)
658         {
659 		lowcore_ptr[i] = (struct _lowcore *)
660 			__get_free_page(GFP_KERNEL|GFP_DMA);
661                 if (lowcore_ptr[i] == NULL)
662                         panic("smp_boot_cpus failed to "
663 			      "allocate prefix memory\n");
664 		async_stack = __get_free_pages(GFP_KERNEL,1);
665 		if (async_stack == 0)
666 			panic("smp_boot_cpus failed to allocate "
667 			      "asyncronous interrupt stack\n");
668 
669                 memcpy(lowcore_ptr[i], &S390_lowcore, sizeof(struct _lowcore));
670 		lowcore_ptr[i]->async_stack = async_stack + (2 * PAGE_SIZE);
671                 /*
672                  * Most of the parameters are set up when the cpu is
673                  * started up.
674                  */
675 		if (smp_processor_id() == i)
676 			set_prefix((u32) lowcore_ptr[i]);
677 		else {
678 			ccode = signal_processor_p((u32)(lowcore_ptr[i]),
679 						   i, sigp_set_prefix);
680 			if (ccode)
681 				/* if this gets troublesome I'll have to do
682 				 * something about it. */
683 				printk("ccode %d for cpu %d  returned when "
684 				       "setting prefix in smp_boot_cpus not good.\n",
685 				       (int) ccode, (int) i);
686 			else
687 				do_boot_cpu(i);
688 		}
689 	}
690 }
691 
692 /*
693  * the frequency of the profiling timer can be changed
694  * by writing a multiplier value into /proc/profile.
695  *
696  * usually you want to run this on all CPUs ;)
697  */
setup_profiling_timer(unsigned int multiplier)698 int setup_profiling_timer(unsigned int multiplier)
699 {
700         return 0;
701 }
702 
703 EXPORT_SYMBOL(lowcore_ptr);
704 EXPORT_SYMBOL(kernel_flag);
705 EXPORT_SYMBOL(smp_ctl_set_bit);
706 EXPORT_SYMBOL(smp_ctl_clear_bit);
707 EXPORT_SYMBOL(smp_num_cpus);
708 EXPORT_SYMBOL(smp_call_function);
709