1 /*
2 * Copyright (C) 2001 Broadcom Corporation
3 * Copyright (C) 2004 Maciej W. Rozycki
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/smp.h>
23 #include <linux/kernel_stat.h>
24
25 #include <asm/mmu_context.h>
26 #include <asm/sibyte/64bit.h>
27 #include <asm/sibyte/sb1250.h>
28 #include <asm/sibyte/sb1250_regs.h>
29 #include <asm/sibyte/sb1250_int.h>
30
31 extern void smp_call_function_interrupt(void);
32
33 /*
34 * These are routines for dealing with the sb1250 smp capabilities
35 * independent of board/firmware
36 */
37
38 static u64 mailbox_set_regs[] = {
39 KSEG1 + A_IMR_CPU0_BASE + R_IMR_MAILBOX_SET_CPU,
40 KSEG1 + A_IMR_CPU1_BASE + R_IMR_MAILBOX_SET_CPU
41 };
42
43 static u64 mailbox_clear_regs[] = {
44 KSEG1 + A_IMR_CPU0_BASE + R_IMR_MAILBOX_CLR_CPU,
45 KSEG1 + A_IMR_CPU1_BASE + R_IMR_MAILBOX_CLR_CPU
46 };
47
48 static u64 mailbox_regs[] = {
49 KSEG1 + A_IMR_CPU0_BASE + R_IMR_MAILBOX_CPU,
50 KSEG1 + A_IMR_CPU1_BASE + R_IMR_MAILBOX_CPU
51 };
52
53
54 /*
55 * Simple enough; everything is set up, so just poke the appropriate mailbox
56 * register, and we should be set
57 */
core_send_ipi(int cpu,unsigned int action)58 void core_send_ipi(int cpu, unsigned int action)
59 {
60 out64((((u64)action)<< 48), mailbox_set_regs[cpu]);
61 }
62
63
sb1250_smp_finish(void)64 void sb1250_smp_finish(void)
65 {
66 sb1250_time_init();
67 }
68
sb1250_mailbox_interrupt(struct pt_regs * regs)69 void sb1250_mailbox_interrupt(struct pt_regs *regs)
70 {
71 int cpu = smp_processor_id();
72 unsigned int action;
73
74 kstat.irqs[cpu][K_INT_MBOX_0]++;
75 /* Load the mailbox register to figure out what we're supposed to do */
76 action = (in64(mailbox_regs[cpu]) >> 48) & 0xffff;
77
78 /* Clear the mailbox to clear the interrupt */
79 out64(((u64)action)<<48, mailbox_clear_regs[cpu]);
80
81 /*
82 * Nothing to do for SMP_RESCHEDULE_YOURSELF; returning from the
83 * interrupt will do the reschedule for us
84 */
85
86 if (action & SMP_CALL_FUNCTION) {
87 smp_call_function_interrupt();
88 }
89 }
90
91 extern atomic_t cpus_booted;
92 extern atomic_t smp_commenced;
93
94 /*
95 * Hook for doing final board-specific setup after the generic smp setup
96 * is done
97 */
start_secondary(void)98 asmlinkage void start_secondary(void)
99 {
100 unsigned int cpu = smp_processor_id();
101
102 cpu_probe();
103 prom_init_secondary();
104 per_cpu_trap_init();
105
106 /*
107 * XXX parity protection should be folded in here when it's converted
108 * to an option instead of something based on .cputype
109 */
110 pgd_current[cpu] = init_mm.pgd;
111 cpu_data[cpu].udelay_val = loops_per_jiffy;
112 prom_smp_finish();
113 printk("Slave cpu booted successfully\n");
114 CPUMASK_SETB(cpu_online_map, cpu);
115 atomic_inc(&cpus_booted);
116 while (!atomic_read(&smp_commenced));
117 cpu_idle();
118 }
119
smp_boot_cpus(void)120 void __init smp_boot_cpus(void)
121 {
122 int i;
123 int cur_cpu = 0;
124
125 smp_num_cpus = prom_setup_smp();
126 init_new_context(current, &init_mm);
127 current->processor = 0;
128 cpu_data[0].udelay_val = loops_per_jiffy;
129 cpu_data[0].asid_cache = ASID_FIRST_VERSION;
130 CPUMASK_CLRALL(cpu_online_map);
131 CPUMASK_SETB(cpu_online_map, 0);
132 atomic_set(&cpus_booted, 1); /* Master CPU is already booted... */
133 init_idle();
134 __cpu_number_map[0] = 0;
135 __cpu_logical_map[0] = 0;
136 /* smp_tune_scheduling(); XXX */
137
138 /*
139 * This loop attempts to compensate for "holes" in the CPU
140 * numbering. It's overkill, but general.
141 */
142 for (i = 1; i < smp_num_cpus && cur_cpu < NR_CPUS; i++) {
143 struct task_struct *p;
144 struct pt_regs regs;
145 printk("Starting CPU %d...\n", i);
146
147 /* Spawn a new process normally. Grab a pointer to
148 its task struct so we can mess with it */
149 do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0);
150 p = init_task.prev_task;
151
152 /* Schedule the first task manually */
153 p->processor = i;
154 p->cpus_runnable = 1 << i; /* we schedule the first task manually */
155
156 init_tasks[i] = p;
157
158 del_from_runqueue(p);
159 unhash_process(p);
160
161 do {
162 int status;
163
164 /* Iterate until we find a CPU that comes up */
165 cur_cpu++;
166 status = prom_boot_secondary(cur_cpu,
167 (unsigned long)p +
168 KERNEL_STACK_SIZE - 32,
169 (unsigned long)p);
170 if (status == 0) {
171 __cpu_number_map[cur_cpu] = i;
172 __cpu_logical_map[i] = cur_cpu;
173 break;
174 }
175 } while (cur_cpu < NR_CPUS);
176 }
177
178 /* Wait for everyone to come up */
179 while (atomic_read(&cpus_booted) != smp_num_cpus);
180 smp_threads_ready = 1;
181 }
182