1 /*
2 ** SMP Support
3 **
4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 ** Copyright (C) 2001 Grant Grundler <grundler@parisc-linux.org>
7 **
8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
10 **
11 ** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
12 ** -grant (1/12/2001)
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 */
19 #define __KERNEL_SYSCALLS__
20 #undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
21
22 #include <linux/autoconf.h>
23
24 #include <linux/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/smp.h>
33 #include <linux/kernel_stat.h>
34 #include <linux/mm.h>
35 #include <linux/delay.h>
36 #include <linux/reboot.h>
37
38 #include <asm/system.h>
39 #include <asm/atomic.h>
40 #include <asm/bitops.h>
41 #include <asm/current.h>
42 #include <asm/delay.h>
43 #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
44
45 #include <asm/io.h>
46 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
47 #include <asm/mmu_context.h>
48 #include <asm/page.h>
49 #include <asm/pgtable.h>
50 #include <asm/pgalloc.h>
51 #include <asm/processor.h>
52 #include <asm/ptrace.h>
53 #include <asm/unistd.h>
54
55 #define kDEBUG 0
56
57 spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
58
59 spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
60
61 volatile struct task_struct *smp_init_current_idle_task;
62 spinlock_t kernel_flag = SPIN_LOCK_UNLOCKED;
63
64 static volatile int smp_commenced = 0; /* Set when the idlers are all forked */
65 static volatile int cpu_now_booting = 0; /* track which CPU is booting */
66 volatile unsigned long cpu_online_map = 0; /* Bitmap of online CPUs */
67 #define IS_LOGGED_IN(cpunum) (test_bit(cpunum, (atomic_t *)&cpu_online_map))
68
69 int smp_num_cpus = 1;
70 int smp_threads_ready = 0;
71 static int max_cpus = -1; /* Command line */
72 struct smp_call_struct {
73 void (*func) (void *info);
74 void *info;
75 long wait;
76 atomic_t unstarted_count;
77 atomic_t unfinished_count;
78 };
79 static volatile struct smp_call_struct *smp_call_function_data;
80
81 enum ipi_message_type {
82 IPI_NOP=0,
83 IPI_RESCHEDULE=1,
84 IPI_CALL_FUNC,
85 IPI_CPU_START,
86 IPI_CPU_STOP,
87 IPI_CPU_TEST
88 };
89
90
91 /********** SMP inter processor interrupt and communication routines */
92
93 #undef PER_CPU_IRQ_REGION
94 #ifdef PER_CPU_IRQ_REGION
95 /* XXX REVISIT Ignore for now.
96 ** *May* need this "hook" to register IPI handler
97 ** once we have perCPU ExtIntr switch tables.
98 */
99 static void
ipi_init(int cpuid)100 ipi_init(int cpuid)
101 {
102
103 /* If CPU is present ... */
104 #ifdef ENTRY_SYS_CPUS
105 /* *and* running (not stopped) ... */
106 #error iCOD support wants state checked here.
107 #endif
108
109 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
110
111 if(IS_LOGGED_IN(cpuid) )
112 {
113 switch_to_idle_task(current);
114 }
115
116 return;
117 }
118 #endif
119
120
121 /*
122 ** Yoink this CPU from the runnable list...
123 **
124 */
125 static void
halt_processor(void)126 halt_processor(void)
127 {
128 #ifdef ENTRY_SYS_CPUS
129 #error halt_processor() needs rework
130 /*
131 ** o migrate I/O interrupts off this CPU.
132 ** o leave IPI enabled - __cli() will disable IPI.
133 ** o leave CPU in online map - just change the state
134 */
135 cpu_data[this_cpu].state = STATE_STOPPED;
136 mark_bh(IPI_BH);
137 #else
138 /* REVISIT : redirect I/O Interrupts to another CPU? */
139 /* REVISIT : does PM *know* this CPU isn't available? */
140 clear_bit(smp_processor_id(), (void *)&cpu_online_map);
141 __cli();
142 for (;;)
143 ;
144 #endif
145 }
146
147
148 void
ipi_interrupt(int irq,void * dev_id,struct pt_regs * regs)149 ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
150 {
151 int this_cpu = smp_processor_id();
152 struct cpuinfo_parisc *p = &cpu_data[this_cpu];
153 unsigned long ops;
154 unsigned long flags;
155
156 /* Count this now; we may make a call that never returns. */
157 p->ipi_count++;
158
159 mb(); /* Order interrupt and bit testing. */
160
161 for (;;) {
162 spin_lock_irqsave(&(p->lock),flags);
163 ops = p->pending_ipi;
164 p->pending_ipi = 0;
165 spin_unlock_irqrestore(&(p->lock),flags);
166
167 mb(); /* Order bit clearing and data access. */
168
169 if (!ops)
170 break;
171
172 while (ops) {
173 unsigned long which = ffz(~ops);
174
175 switch (which) {
176 case IPI_RESCHEDULE:
177 #if (kDEBUG>=100)
178 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
179 #endif /* kDEBUG */
180 ops &= ~(1 << IPI_RESCHEDULE);
181 /*
182 * Reschedule callback. Everything to be
183 * done is done by the interrupt return path.
184 */
185 break;
186
187 case IPI_CALL_FUNC:
188 #if (kDEBUG>=100)
189 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
190 #endif /* kDEBUG */
191 ops &= ~(1 << IPI_CALL_FUNC);
192 {
193 volatile struct smp_call_struct *data;
194 void (*func)(void *info);
195 void *info;
196 int wait;
197
198 data = smp_call_function_data;
199 func = data->func;
200 info = data->info;
201 wait = data->wait;
202
203 mb();
204 atomic_dec ((atomic_t *)&data->unstarted_count);
205
206 /* At this point, *data can't
207 * be relied upon.
208 */
209
210 (*func)(info);
211
212 /* Notify the sending CPU that the
213 * task is done.
214 */
215 mb();
216 if (wait)
217 atomic_dec ((atomic_t *)&data->unfinished_count);
218 }
219 break;
220
221 case IPI_CPU_START:
222 #if (kDEBUG>=100)
223 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
224 #endif /* kDEBUG */
225 ops &= ~(1 << IPI_CPU_START);
226 #ifdef ENTRY_SYS_CPUS
227 p->state = STATE_RUNNING;
228 #endif
229 break;
230
231 case IPI_CPU_STOP:
232 #if (kDEBUG>=100)
233 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
234 #endif /* kDEBUG */
235 ops &= ~(1 << IPI_CPU_STOP);
236 #ifdef ENTRY_SYS_CPUS
237 #else
238 halt_processor();
239 #endif
240 break;
241
242 case IPI_CPU_TEST:
243 #if (kDEBUG>=100)
244 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
245 #endif /* kDEBUG */
246 ops &= ~(1 << IPI_CPU_TEST);
247 break;
248
249 default:
250 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
251 this_cpu, which);
252 ops &= ~(1 << which);
253 return;
254 } /* Switch */
255 } /* while (ops) */
256 }
257 return;
258 }
259
260
261 static inline void
ipi_send(int cpu,enum ipi_message_type op)262 ipi_send(int cpu, enum ipi_message_type op)
263 {
264 struct cpuinfo_parisc *p = &cpu_data[cpu];
265 unsigned long flags;
266
267 spin_lock_irqsave(&(p->lock),flags);
268 p->pending_ipi |= 1 << op;
269 __raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
270 spin_unlock_irqrestore(&(p->lock),flags);
271 }
272
273
274 static inline void
send_IPI_single(int dest_cpu,enum ipi_message_type op)275 send_IPI_single(int dest_cpu, enum ipi_message_type op)
276 {
277 if (dest_cpu == NO_PROC_ID) {
278 BUG();
279 return;
280 }
281
282 ipi_send(dest_cpu, op);
283 }
284
285 static inline void
send_IPI_allbutself(enum ipi_message_type op)286 send_IPI_allbutself(enum ipi_message_type op)
287 {
288 int i;
289
290 for (i = 0; i < smp_num_cpus; i++) {
291 if (i != smp_processor_id())
292 send_IPI_single(i, op);
293 }
294 }
295
296 inline void
smp_send_stop(void)297 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
298
299 static inline void
smp_send_start(void)300 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
301
302 void
smp_send_reschedule(int cpu)303 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
304
305
306 /**
307 * Run a function on all other CPUs.
308 * <func> The function to run. This must be fast and non-blocking.
309 * <info> An arbitrary pointer to pass to the function.
310 * <retry> If true, keep retrying until ready.
311 * <wait> If true, wait until function has completed on other CPUs.
312 * [RETURNS] 0 on success, else a negative status code.
313 *
314 * Does not return until remote CPUs are nearly ready to execute <func>
315 * or have executed.
316 */
317
318 int
smp_call_function(void (* func)(void * info),void * info,int retry,int wait)319 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
320 {
321 struct smp_call_struct data;
322 long timeout;
323 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
324
325 data.func = func;
326 data.info = info;
327 data.wait = wait;
328 atomic_set(&data.unstarted_count, smp_num_cpus - 1);
329 atomic_set(&data.unfinished_count, smp_num_cpus - 1);
330
331 if (retry) {
332 spin_lock (&lock);
333 while (smp_call_function_data != 0)
334 barrier();
335 }
336 else {
337 spin_lock (&lock);
338 if (smp_call_function_data) {
339 spin_unlock (&lock);
340 return -EBUSY;
341 }
342 }
343
344 smp_call_function_data = &data;
345 spin_unlock (&lock);
346
347 /* Send a message to all other CPUs and wait for them to respond */
348 send_IPI_allbutself(IPI_CALL_FUNC);
349
350 /* Wait for response */
351 timeout = jiffies + HZ;
352 while ( (atomic_read (&data.unstarted_count) > 0) &&
353 time_before (jiffies, timeout) )
354 barrier ();
355
356 /* We either got one or timed out. Release the lock */
357
358 mb();
359 smp_call_function_data = NULL;
360 if (atomic_read (&data.unstarted_count) > 0) {
361 printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",
362 smp_processor_id());
363 return -ETIMEDOUT;
364 }
365
366 while (wait && atomic_read (&data.unfinished_count) > 0)
367 barrier ();
368
369 return 0;
370 }
371
372
373
374 /*
375 * Setup routine for controlling SMP activation
376 *
377 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
378 * activation entirely (the MPS table probe still happens, though).
379 *
380 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
381 * greater than 0, limits the maximum number of CPUs activated in
382 * SMP mode to <NUM>.
383 */
384
nosmp(char * str)385 static int __init nosmp(char *str)
386 {
387 max_cpus = 0;
388 return 1;
389 }
390
391 __setup("nosmp", nosmp);
392
maxcpus(char * str)393 static int __init maxcpus(char *str)
394 {
395 get_option(&str, &max_cpus);
396 return 1;
397 }
398
399 __setup("maxcpus=", maxcpus);
400
401 /*
402 * Flush all other CPU's tlb and then mine. Do this with smp_call_function()
403 * as we want to ensure all TLB's flushed before proceeding.
404 */
405
406 extern void flush_tlb_all_local(void);
407
408 void
smp_flush_tlb_all(void)409 smp_flush_tlb_all(void)
410 {
411 smp_call_function((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
412 flush_tlb_all_local();
413 }
414
415
416 void
smp_do_timer(struct pt_regs * regs)417 smp_do_timer(struct pt_regs *regs)
418 {
419 int cpu = smp_processor_id();
420 struct cpuinfo_parisc *data = &cpu_data[cpu];
421
422 if (!--data->prof_counter) {
423 data->prof_counter = data->prof_multiplier;
424 update_process_times(user_mode(regs));
425 }
426 }
427
428 /*
429 * Called by secondaries to update state and initialize CPU registers.
430 */
431 static void __init
smp_cpu_init(int cpunum)432 smp_cpu_init(int cpunum)
433 {
434 extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */
435 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
436
437 /* Set modes and Enable floating point coprocessor */
438 init_per_cpu(cpunum);
439
440 disable_sr_hashing();
441 mb();
442
443 /* Well, support 2.4 linux scheme as well. */
444 if (test_and_set_bit(cpunum, (unsigned long *) (&cpu_online_map))) {
445 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
446 machine_halt();
447 }
448
449 /* Initialise the idle task for this CPU */
450 atomic_inc(&init_mm.mm_count);
451 current->active_mm = &init_mm;
452 if (current->mm)
453 BUG();
454 enter_lazy_tlb(&init_mm, current, cpunum);
455
456 init_IRQ(); /* make sure no IRQ's are enabled or pending */
457 }
458
459
460 /*
461 * Slaves start using C here. Indirectly called from smp_slave_stext.
462 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
463 */
smp_callin(void)464 void __init smp_callin(void)
465 {
466 extern void cpu_idle(void); /* arch/parisc/kernel/process.c */
467 int slave_id = cpu_now_booting;
468 #if 0
469 void *istack;
470 #endif
471
472 smp_cpu_init(slave_id);
473
474 #if 0 /* NOT WORKING YET - see entry.S */
475 istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
476 if (istack == NULL) {
477 printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
478 BUG();
479 }
480 mtctl(istack,31);
481 #endif
482
483 flush_cache_all_local(); /* start with known state */
484 flush_tlb_all_local();
485
486 local_irq_enable(); /* Interrupts have been off until now */
487
488 /* Slaves wait here until Big Poppa daddy say "jump" */
489 mb(); /* PARANOID */
490 while (!smp_commenced) ;
491 mb(); /* PARANOID */
492
493 cpu_idle(); /* Wait for timer to schedule some work */
494
495 /* NOTREACHED */
496 panic("smp_callin() AAAAaaaaahhhh....\n");
497 }
498
499 /*
500 * Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
501 * because that could end up calling schedule(). If it did, the new idle
502 * task could get scheduled before we had a chance to remove it from the
503 * run-queue...
504 */
fork_by_hand(void)505 static int fork_by_hand(void)
506 {
507 struct pt_regs regs;
508
509 /*
510 * don't care about the regs settings since
511 * we'll never reschedule the forked task.
512 */
513 return do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
514 }
515
516
517 /*
518 * Bring one cpu online.
519 */
smp_boot_one_cpu(int cpuid,int cpunum)520 static int smp_boot_one_cpu(int cpuid, int cpunum)
521 {
522 struct task_struct *idle;
523 long timeout;
524
525 /*
526 * Create an idle task for this CPU. Note the address wed* give
527 * to kernel_thread is irrelevant -- it's going to start
528 * where OS_BOOT_RENDEVZ vector in SAL says to start. But
529 * this gets all the other task-y sort of data structures set
530 * up like we wish. We need to pull the just created idle task
531 * off the run queue and stuff it into the init_tasks[] array.
532 * Sheesh . . .
533 */
534
535 if (fork_by_hand() < 0)
536 panic("SMP: fork failed for CPU:%d", cpuid);
537
538 idle = init_task.prev_task;
539 if (!idle)
540 panic("SMP: No idle process for CPU:%d", cpuid);
541
542 task_set_cpu(idle, cpunum); /* manually schedule idle task */
543 del_from_runqueue(idle);
544 unhash_process(idle);
545 init_tasks[cpunum] = idle;
546
547 /* Let _start know what logical CPU we're booting
548 ** (offset into init_tasks[],cpu_data[])
549 */
550 cpu_now_booting = cpunum;
551
552 /*
553 ** boot strap code needs to know the task address since
554 ** it also contains the process stack.
555 */
556 smp_init_current_idle_task = idle ;
557 mb();
558
559 /*
560 ** This gets PDC to release the CPU from a very tight loop.
561 ** See MEM_RENDEZ comments in head.S.
562 */
563 __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
564 mb();
565
566 /*
567 * OK, wait a bit for that CPU to finish staggering about.
568 * Slave will set a bit when it reaches smp_cpu_init() and then
569 * wait for smp_commenced to be 1.
570 * Once we see the bit change, we can move on.
571 */
572 for (timeout = 0; timeout < 10000; timeout++) {
573 if(IS_LOGGED_IN(cpunum)) {
574 /* Which implies Slave has started up */
575 cpu_now_booting = 0;
576 smp_init_current_idle_task = NULL;
577 goto alive ;
578 }
579 udelay(100);
580 barrier();
581 }
582
583 init_tasks[cpunum] = NULL;
584 free_task_struct(idle);
585
586 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
587 return -1;
588
589 alive:
590 /* Remember the Slave data */
591 #if (kDEBUG>=100)
592 printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
593 cpuid, cpunum, timeout * 100);
594 #endif /* kDEBUG */
595 #ifdef ENTRY_SYS_CPUS
596 cpu_data[cpunum].state = STATE_RUNNING;
597 #endif
598 return 0;
599 }
600
601
602
603
604 /*
605 ** inventory.c:do_inventory() has already 'discovered' the additional CPU's.
606 ** We are ready to wrest them from PDC's control now.
607 ** Called by smp_init bring all the secondaries online and hold them.
608 **
609 ** o Setup of the IPI irq handler is done in irq.c.
610 ** o MEM_RENDEZ is initialzed in head.S:stext()
611 **
612 */
smp_boot_cpus(void)613 void __init smp_boot_cpus(void)
614 {
615 int i, cpu_count = 1;
616 unsigned long bogosum = loops_per_jiffy; /* Count Monarch */
617
618 /* REVISIT - assumes first CPU reported by PAT PDC is BSP */
619 int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
620
621 /* Setup BSP mappings */
622 printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
623 init_task.processor = bootstrap_processor;
624 current->processor = bootstrap_processor;
625 cpu_online_map = 1 << bootstrap_processor; /* Mark Boostrap processor as present */
626 current->active_mm = &init_mm;
627
628 #ifdef ENTRY_SYS_CPUS
629 cpu_data[0].state = STATE_RUNNING;
630 #endif
631
632 /* Nothing to do when told not to. */
633 if (max_cpus == 0) {
634 printk(KERN_INFO "SMP mode deactivated.\n");
635 return;
636 }
637
638 if (max_cpus != -1)
639 printk(KERN_INFO "Limiting CPUs to %d\n", max_cpus);
640
641 /* We found more than one CPU.... */
642 if (boot_cpu_data.cpu_count > 1) {
643
644 for (i = 0; i < NR_CPUS; i++) {
645 if (cpu_data[i].cpuid == NO_PROC_ID ||
646 cpu_data[i].cpuid == bootstrap_processor)
647 continue;
648
649 if (smp_boot_one_cpu(cpu_data[i].cpuid, cpu_count) < 0)
650 continue;
651
652 bogosum += loops_per_jiffy;
653 cpu_count++; /* Count good CPUs only... */
654
655 /* Bail when we've started as many CPUS as told to */
656 if (cpu_count == max_cpus)
657 break;
658 }
659 }
660 if (cpu_count == 1) {
661 printk(KERN_INFO "SMP: Bootstrap processor only.\n");
662 }
663
664 printk(KERN_INFO "SMP: Total %d of %d processors activated "
665 "(%lu.%02lu BogoMIPS noticed).\n",
666 cpu_count, boot_cpu_data.cpu_count, (bogosum + 25) / 5000,
667 ((bogosum + 25) / 50) % 100);
668
669 smp_num_cpus = cpu_count;
670 #ifdef PER_CPU_IRQ_REGION
671 ipi_init();
672 #endif
673 return;
674 }
675
676 /*
677 * Called from main.c by Monarch Processor.
678 * After this, any CPU can schedule any task.
679 */
smp_commence(void)680 void smp_commence(void)
681 {
682 smp_commenced = 1;
683 mb();
684 return;
685 }
686
687 #ifdef ENTRY_SYS_CPUS
688 /* Code goes along with:
689 ** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * /
690 */
sys_cpus(int argc,char ** argv)691 int sys_cpus(int argc, char **argv)
692 {
693 int i,j=0;
694 extern int current_pid(int cpu);
695
696 if( argc > 2 ) {
697 printk("sys_cpus:Only one argument supported\n");
698 return (-1);
699 }
700 if ( argc == 1 ){
701
702 #ifdef DUMP_MORE_STATE
703 for(i=0; i<NR_CPUS; i++) {
704 int cpus_per_line = 4;
705 if(IS_LOGGED_IN(i)) {
706 if (j++ % cpus_per_line)
707 printk(" %3d",i);
708 else
709 printk("\n %3d",i);
710 }
711 }
712 printk("\n");
713 #else
714 printk("\n 0\n");
715 #endif
716 } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
717 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
718 #ifdef DUMP_MORE_STATE
719 for(i=0;i<NR_CPUS;i++) {
720 if (!IS_LOGGED_IN(i))
721 continue;
722 if (cpu_data[i].cpuid != NO_PROC_ID) {
723 switch(cpu_data[i].state) {
724 case STATE_RENDEZVOUS:
725 printk("RENDEZVS ");
726 break;
727 case STATE_RUNNING:
728 printk((current_pid(i)!=0) ? "RUNNING " : "IDLING ");
729 break;
730 case STATE_STOPPED:
731 printk("STOPPED ");
732 break;
733 case STATE_HALTED:
734 printk("HALTED ");
735 break;
736 default:
737 printk("%08x?", cpu_data[i].state);
738 break;
739 }
740 if(IS_LOGGED_IN(i)) {
741 printk(" %4d",current_pid(i));
742 }
743 printk(" %6d",cpu_number_map(i));
744 printk(" %5d",i);
745 printk(" 0x%lx\n",cpu_data[i].hpa);
746 }
747 }
748 #else
749 printk("\n%s %4d 0 0 --------",
750 (current->pid)?"RUNNING ": "IDLING ",current->pid);
751 #endif
752 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
753 #ifdef DUMP_MORE_STATE
754 printk("\nCPUSTATE CPUID\n");
755 for (i=0;i<NR_CPUS;i++) {
756 if (!IS_LOGGED_IN(i))
757 continue;
758 if (cpu_data[i].cpuid != NO_PROC_ID) {
759 switch(cpu_data[i].state) {
760 case STATE_RENDEZVOUS:
761 printk("RENDEZVS");break;
762 case STATE_RUNNING:
763 printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
764 break;
765 case STATE_STOPPED:
766 printk("STOPPED ");break;
767 case STATE_HALTED:
768 printk("HALTED ");break;
769 default:
770 }
771 printk(" %5d\n",i);
772 }
773 }
774 #else
775 printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING ");
776 #endif
777 } else {
778 printk("sys_cpus:Unknown request\n");
779 return (-1);
780 }
781 return 0;
782 }
783 #endif /* ENTRY_SYS_CPUS */
784
785 #ifdef CONFIG_PROC_FS
786 int __init
setup_profiling_timer(unsigned int multiplier)787 setup_profiling_timer(unsigned int multiplier)
788 {
789 return -EINVAL;
790 }
791 #endif
792