1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2004-2008, 2009, 2010, 2011 Cavium Networks
7  */
8 
9 #include <linux/interrupt.h>
10 #include <linux/bitops.h>
11 #include <linux/percpu.h>
12 #include <linux/irq.h>
13 #include <linux/smp.h>
14 
15 #include <asm/octeon/octeon.h>
16 
17 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
18 static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
19 
20 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
21 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
22 
23 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
24 
25 union octeon_ciu_chip_data {
26 	void *p;
27 	unsigned long l;
28 	struct {
29 		unsigned int line:6;
30 		unsigned int bit:6;
31 	} s;
32 };
33 
34 struct octeon_core_chip_data {
35 	struct mutex core_irq_mutex;
36 	bool current_en;
37 	bool desired_en;
38 	u8 bit;
39 };
40 
41 #define MIPS_CORE_IRQ_LINES 8
42 
43 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
44 
octeon_irq_set_ciu_mapping(int irq,int line,int bit,struct irq_chip * chip,irq_flow_handler_t handler)45 static void __init octeon_irq_set_ciu_mapping(int irq, int line, int bit,
46 					      struct irq_chip *chip,
47 					      irq_flow_handler_t handler)
48 {
49 	union octeon_ciu_chip_data cd;
50 
51 	irq_set_chip_and_handler(irq, chip, handler);
52 
53 	cd.l = 0;
54 	cd.s.line = line;
55 	cd.s.bit = bit;
56 
57 	irq_set_chip_data(irq, cd.p);
58 	octeon_irq_ciu_to_irq[line][bit] = irq;
59 }
60 
octeon_coreid_for_cpu(int cpu)61 static int octeon_coreid_for_cpu(int cpu)
62 {
63 #ifdef CONFIG_SMP
64 	return cpu_logical_map(cpu);
65 #else
66 	return cvmx_get_core_num();
67 #endif
68 }
69 
octeon_cpu_for_coreid(int coreid)70 static int octeon_cpu_for_coreid(int coreid)
71 {
72 #ifdef CONFIG_SMP
73 	return cpu_number_map(coreid);
74 #else
75 	return smp_processor_id();
76 #endif
77 }
78 
octeon_irq_core_ack(struct irq_data * data)79 static void octeon_irq_core_ack(struct irq_data *data)
80 {
81 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
82 	unsigned int bit = cd->bit;
83 
84 	/*
85 	 * We don't need to disable IRQs to make these atomic since
86 	 * they are already disabled earlier in the low level
87 	 * interrupt code.
88 	 */
89 	clear_c0_status(0x100 << bit);
90 	/* The two user interrupts must be cleared manually. */
91 	if (bit < 2)
92 		clear_c0_cause(0x100 << bit);
93 }
94 
octeon_irq_core_eoi(struct irq_data * data)95 static void octeon_irq_core_eoi(struct irq_data *data)
96 {
97 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
98 
99 	/*
100 	 * We don't need to disable IRQs to make these atomic since
101 	 * they are already disabled earlier in the low level
102 	 * interrupt code.
103 	 */
104 	set_c0_status(0x100 << cd->bit);
105 }
106 
octeon_irq_core_set_enable_local(void * arg)107 static void octeon_irq_core_set_enable_local(void *arg)
108 {
109 	struct irq_data *data = arg;
110 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
111 	unsigned int mask = 0x100 << cd->bit;
112 
113 	/*
114 	 * Interrupts are already disabled, so these are atomic.
115 	 */
116 	if (cd->desired_en)
117 		set_c0_status(mask);
118 	else
119 		clear_c0_status(mask);
120 
121 }
122 
octeon_irq_core_disable(struct irq_data * data)123 static void octeon_irq_core_disable(struct irq_data *data)
124 {
125 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
126 	cd->desired_en = false;
127 }
128 
octeon_irq_core_enable(struct irq_data * data)129 static void octeon_irq_core_enable(struct irq_data *data)
130 {
131 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
132 	cd->desired_en = true;
133 }
134 
octeon_irq_core_bus_lock(struct irq_data * data)135 static void octeon_irq_core_bus_lock(struct irq_data *data)
136 {
137 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
138 
139 	mutex_lock(&cd->core_irq_mutex);
140 }
141 
octeon_irq_core_bus_sync_unlock(struct irq_data * data)142 static void octeon_irq_core_bus_sync_unlock(struct irq_data *data)
143 {
144 	struct octeon_core_chip_data *cd = irq_data_get_irq_chip_data(data);
145 
146 	if (cd->desired_en != cd->current_en) {
147 		on_each_cpu(octeon_irq_core_set_enable_local, data, 1);
148 
149 		cd->current_en = cd->desired_en;
150 	}
151 
152 	mutex_unlock(&cd->core_irq_mutex);
153 }
154 
155 static struct irq_chip octeon_irq_chip_core = {
156 	.name = "Core",
157 	.irq_enable = octeon_irq_core_enable,
158 	.irq_disable = octeon_irq_core_disable,
159 	.irq_ack = octeon_irq_core_ack,
160 	.irq_eoi = octeon_irq_core_eoi,
161 	.irq_bus_lock = octeon_irq_core_bus_lock,
162 	.irq_bus_sync_unlock = octeon_irq_core_bus_sync_unlock,
163 
164 	.irq_cpu_online = octeon_irq_core_eoi,
165 	.irq_cpu_offline = octeon_irq_core_ack,
166 	.flags = IRQCHIP_ONOFFLINE_ENABLED,
167 };
168 
octeon_irq_init_core(void)169 static void __init octeon_irq_init_core(void)
170 {
171 	int i;
172 	int irq;
173 	struct octeon_core_chip_data *cd;
174 
175 	for (i = 0; i < MIPS_CORE_IRQ_LINES; i++) {
176 		cd = &octeon_irq_core_chip_data[i];
177 		cd->current_en = false;
178 		cd->desired_en = false;
179 		cd->bit = i;
180 		mutex_init(&cd->core_irq_mutex);
181 
182 		irq = OCTEON_IRQ_SW0 + i;
183 		switch (irq) {
184 		case OCTEON_IRQ_TIMER:
185 		case OCTEON_IRQ_SW0:
186 		case OCTEON_IRQ_SW1:
187 		case OCTEON_IRQ_5:
188 		case OCTEON_IRQ_PERF:
189 			irq_set_chip_data(irq, cd);
190 			irq_set_chip_and_handler(irq, &octeon_irq_chip_core,
191 						 handle_percpu_irq);
192 			break;
193 		default:
194 			break;
195 		}
196 	}
197 }
198 
next_cpu_for_irq(struct irq_data * data)199 static int next_cpu_for_irq(struct irq_data *data)
200 {
201 
202 #ifdef CONFIG_SMP
203 	int cpu;
204 	int weight = cpumask_weight(data->affinity);
205 
206 	if (weight > 1) {
207 		cpu = smp_processor_id();
208 		for (;;) {
209 			cpu = cpumask_next(cpu, data->affinity);
210 			if (cpu >= nr_cpu_ids) {
211 				cpu = -1;
212 				continue;
213 			} else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
214 				break;
215 			}
216 		}
217 	} else if (weight == 1) {
218 		cpu = cpumask_first(data->affinity);
219 	} else {
220 		cpu = smp_processor_id();
221 	}
222 	return cpu;
223 #else
224 	return smp_processor_id();
225 #endif
226 }
227 
octeon_irq_ciu_enable(struct irq_data * data)228 static void octeon_irq_ciu_enable(struct irq_data *data)
229 {
230 	int cpu = next_cpu_for_irq(data);
231 	int coreid = octeon_coreid_for_cpu(cpu);
232 	unsigned long *pen;
233 	unsigned long flags;
234 	union octeon_ciu_chip_data cd;
235 
236 	cd.p = irq_data_get_irq_chip_data(data);
237 
238 	if (cd.s.line == 0) {
239 		raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
240 		pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
241 		set_bit(cd.s.bit, pen);
242 		cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
243 		raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
244 	} else {
245 		raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
246 		pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
247 		set_bit(cd.s.bit, pen);
248 		cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
249 		raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
250 	}
251 }
252 
octeon_irq_ciu_enable_local(struct irq_data * data)253 static void octeon_irq_ciu_enable_local(struct irq_data *data)
254 {
255 	unsigned long *pen;
256 	unsigned long flags;
257 	union octeon_ciu_chip_data cd;
258 
259 	cd.p = irq_data_get_irq_chip_data(data);
260 
261 	if (cd.s.line == 0) {
262 		raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
263 		pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
264 		set_bit(cd.s.bit, pen);
265 		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
266 		raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
267 	} else {
268 		raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
269 		pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
270 		set_bit(cd.s.bit, pen);
271 		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
272 		raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
273 	}
274 }
275 
octeon_irq_ciu_disable_local(struct irq_data * data)276 static void octeon_irq_ciu_disable_local(struct irq_data *data)
277 {
278 	unsigned long *pen;
279 	unsigned long flags;
280 	union octeon_ciu_chip_data cd;
281 
282 	cd.p = irq_data_get_irq_chip_data(data);
283 
284 	if (cd.s.line == 0) {
285 		raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
286 		pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
287 		clear_bit(cd.s.bit, pen);
288 		cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
289 		raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
290 	} else {
291 		raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
292 		pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
293 		clear_bit(cd.s.bit, pen);
294 		cvmx_write_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1), *pen);
295 		raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
296 	}
297 }
298 
octeon_irq_ciu_disable_all(struct irq_data * data)299 static void octeon_irq_ciu_disable_all(struct irq_data *data)
300 {
301 	unsigned long flags;
302 	unsigned long *pen;
303 	int cpu;
304 	union octeon_ciu_chip_data cd;
305 
306 	wmb(); /* Make sure flag changes arrive before register updates. */
307 
308 	cd.p = irq_data_get_irq_chip_data(data);
309 
310 	if (cd.s.line == 0) {
311 		raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
312 		for_each_online_cpu(cpu) {
313 			int coreid = octeon_coreid_for_cpu(cpu);
314 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
315 			clear_bit(cd.s.bit, pen);
316 			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
317 		}
318 		raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
319 	} else {
320 		raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
321 		for_each_online_cpu(cpu) {
322 			int coreid = octeon_coreid_for_cpu(cpu);
323 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
324 			clear_bit(cd.s.bit, pen);
325 			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
326 		}
327 		raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
328 	}
329 }
330 
octeon_irq_ciu_enable_all(struct irq_data * data)331 static void octeon_irq_ciu_enable_all(struct irq_data *data)
332 {
333 	unsigned long flags;
334 	unsigned long *pen;
335 	int cpu;
336 	union octeon_ciu_chip_data cd;
337 
338 	cd.p = irq_data_get_irq_chip_data(data);
339 
340 	if (cd.s.line == 0) {
341 		raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
342 		for_each_online_cpu(cpu) {
343 			int coreid = octeon_coreid_for_cpu(cpu);
344 			pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
345 			set_bit(cd.s.bit, pen);
346 			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
347 		}
348 		raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
349 	} else {
350 		raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
351 		for_each_online_cpu(cpu) {
352 			int coreid = octeon_coreid_for_cpu(cpu);
353 			pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
354 			set_bit(cd.s.bit, pen);
355 			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
356 		}
357 		raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
358 	}
359 }
360 
361 /*
362  * Enable the irq on the next core in the affinity set for chips that
363  * have the EN*_W1{S,C} registers.
364  */
octeon_irq_ciu_enable_v2(struct irq_data * data)365 static void octeon_irq_ciu_enable_v2(struct irq_data *data)
366 {
367 	u64 mask;
368 	int cpu = next_cpu_for_irq(data);
369 	union octeon_ciu_chip_data cd;
370 
371 	cd.p = irq_data_get_irq_chip_data(data);
372 	mask = 1ull << (cd.s.bit);
373 
374 	/*
375 	 * Called under the desc lock, so these should never get out
376 	 * of sync.
377 	 */
378 	if (cd.s.line == 0) {
379 		int index = octeon_coreid_for_cpu(cpu) * 2;
380 		set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
381 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
382 	} else {
383 		int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
384 		set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
385 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
386 	}
387 }
388 
389 /*
390  * Enable the irq on the current CPU for chips that
391  * have the EN*_W1{S,C} registers.
392  */
octeon_irq_ciu_enable_local_v2(struct irq_data * data)393 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
394 {
395 	u64 mask;
396 	union octeon_ciu_chip_data cd;
397 
398 	cd.p = irq_data_get_irq_chip_data(data);
399 	mask = 1ull << (cd.s.bit);
400 
401 	if (cd.s.line == 0) {
402 		int index = cvmx_get_core_num() * 2;
403 		set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
404 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
405 	} else {
406 		int index = cvmx_get_core_num() * 2 + 1;
407 		set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
408 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
409 	}
410 }
411 
octeon_irq_ciu_disable_local_v2(struct irq_data * data)412 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
413 {
414 	u64 mask;
415 	union octeon_ciu_chip_data cd;
416 
417 	cd.p = irq_data_get_irq_chip_data(data);
418 	mask = 1ull << (cd.s.bit);
419 
420 	if (cd.s.line == 0) {
421 		int index = cvmx_get_core_num() * 2;
422 		clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
423 		cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
424 	} else {
425 		int index = cvmx_get_core_num() * 2 + 1;
426 		clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
427 		cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
428 	}
429 }
430 
431 /*
432  * Write to the W1C bit in CVMX_CIU_INTX_SUM0 to clear the irq.
433  */
octeon_irq_ciu_ack(struct irq_data * data)434 static void octeon_irq_ciu_ack(struct irq_data *data)
435 {
436 	u64 mask;
437 	union octeon_ciu_chip_data cd;
438 
439 	cd.p = data->chip_data;
440 	mask = 1ull << (cd.s.bit);
441 
442 	if (cd.s.line == 0) {
443 		int index = cvmx_get_core_num() * 2;
444 		cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
445 	} else {
446 		cvmx_write_csr(CVMX_CIU_INT_SUM1, mask);
447 	}
448 }
449 
450 /*
451  * Disable the irq on the all cores for chips that have the EN*_W1{S,C}
452  * registers.
453  */
octeon_irq_ciu_disable_all_v2(struct irq_data * data)454 static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
455 {
456 	int cpu;
457 	u64 mask;
458 	union octeon_ciu_chip_data cd;
459 
460 	wmb(); /* Make sure flag changes arrive before register updates. */
461 
462 	cd.p = data->chip_data;
463 	mask = 1ull << (cd.s.bit);
464 
465 	if (cd.s.line == 0) {
466 		for_each_online_cpu(cpu) {
467 			int index = octeon_coreid_for_cpu(cpu) * 2;
468 			clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
469 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
470 		}
471 	} else {
472 		for_each_online_cpu(cpu) {
473 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
474 			clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
475 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
476 		}
477 	}
478 }
479 
480 /*
481  * Enable the irq on the all cores for chips that have the EN*_W1{S,C}
482  * registers.
483  */
octeon_irq_ciu_enable_all_v2(struct irq_data * data)484 static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
485 {
486 	int cpu;
487 	u64 mask;
488 	union octeon_ciu_chip_data cd;
489 
490 	cd.p = data->chip_data;
491 	mask = 1ull << (cd.s.bit);
492 
493 	if (cd.s.line == 0) {
494 		for_each_online_cpu(cpu) {
495 			int index = octeon_coreid_for_cpu(cpu) * 2;
496 			set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
497 			cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
498 		}
499 	} else {
500 		for_each_online_cpu(cpu) {
501 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
502 			set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
503 			cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
504 		}
505 	}
506 }
507 
508 #ifdef CONFIG_SMP
509 
octeon_irq_cpu_offline_ciu(struct irq_data * data)510 static void octeon_irq_cpu_offline_ciu(struct irq_data *data)
511 {
512 	int cpu = smp_processor_id();
513 	cpumask_t new_affinity;
514 
515 	if (!cpumask_test_cpu(cpu, data->affinity))
516 		return;
517 
518 	if (cpumask_weight(data->affinity) > 1) {
519 		/*
520 		 * It has multi CPU affinity, just remove this CPU
521 		 * from the affinity set.
522 		 */
523 		cpumask_copy(&new_affinity, data->affinity);
524 		cpumask_clear_cpu(cpu, &new_affinity);
525 	} else {
526 		/* Otherwise, put it on lowest numbered online CPU. */
527 		cpumask_clear(&new_affinity);
528 		cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
529 	}
530 	__irq_set_affinity_locked(data, &new_affinity);
531 }
532 
octeon_irq_ciu_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)533 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
534 				       const struct cpumask *dest, bool force)
535 {
536 	int cpu;
537 	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
538 	unsigned long flags;
539 	union octeon_ciu_chip_data cd;
540 
541 	cd.p = data->chip_data;
542 
543 	/*
544 	 * For non-v2 CIU, we will allow only single CPU affinity.
545 	 * This removes the need to do locking in the .ack/.eoi
546 	 * functions.
547 	 */
548 	if (cpumask_weight(dest) != 1)
549 		return -EINVAL;
550 
551 	if (!enable_one)
552 		return 0;
553 
554 	if (cd.s.line == 0) {
555 		raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
556 		for_each_online_cpu(cpu) {
557 			int coreid = octeon_coreid_for_cpu(cpu);
558 			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
559 
560 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
561 				enable_one = false;
562 				set_bit(cd.s.bit, pen);
563 			} else {
564 				clear_bit(cd.s.bit, pen);
565 			}
566 			cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
567 		}
568 		raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
569 	} else {
570 		raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
571 		for_each_online_cpu(cpu) {
572 			int coreid = octeon_coreid_for_cpu(cpu);
573 			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
574 
575 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
576 				enable_one = false;
577 				set_bit(cd.s.bit, pen);
578 			} else {
579 				clear_bit(cd.s.bit, pen);
580 			}
581 			cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
582 		}
583 		raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
584 	}
585 	return 0;
586 }
587 
588 /*
589  * Set affinity for the irq for chips that have the EN*_W1{S,C}
590  * registers.
591  */
octeon_irq_ciu_set_affinity_v2(struct irq_data * data,const struct cpumask * dest,bool force)592 static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
593 					  const struct cpumask *dest,
594 					  bool force)
595 {
596 	int cpu;
597 	bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
598 	u64 mask;
599 	union octeon_ciu_chip_data cd;
600 
601 	if (!enable_one)
602 		return 0;
603 
604 	cd.p = data->chip_data;
605 	mask = 1ull << cd.s.bit;
606 
607 	if (cd.s.line == 0) {
608 		for_each_online_cpu(cpu) {
609 			unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
610 			int index = octeon_coreid_for_cpu(cpu) * 2;
611 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
612 				enable_one = false;
613 				set_bit(cd.s.bit, pen);
614 				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
615 			} else {
616 				clear_bit(cd.s.bit, pen);
617 				cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
618 			}
619 		}
620 	} else {
621 		for_each_online_cpu(cpu) {
622 			unsigned long *pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
623 			int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
624 			if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 				enable_one = false;
626 				set_bit(cd.s.bit, pen);
627 				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
628 			} else {
629 				clear_bit(cd.s.bit, pen);
630 				cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
631 			}
632 		}
633 	}
634 	return 0;
635 }
636 #endif
637 
638 /*
639  * The v1 CIU code already masks things, so supply a dummy version to
640  * the core chip code.
641  */
octeon_irq_dummy_mask(struct irq_data * data)642 static void octeon_irq_dummy_mask(struct irq_data *data)
643 {
644 }
645 
646 /*
647  * Newer octeon chips have support for lockless CIU operation.
648  */
649 static struct irq_chip octeon_irq_chip_ciu_v2 = {
650 	.name = "CIU",
651 	.irq_enable = octeon_irq_ciu_enable_v2,
652 	.irq_disable = octeon_irq_ciu_disable_all_v2,
653 	.irq_mask = octeon_irq_ciu_disable_local_v2,
654 	.irq_unmask = octeon_irq_ciu_enable_v2,
655 #ifdef CONFIG_SMP
656 	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
657 	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
658 #endif
659 };
660 
661 static struct irq_chip octeon_irq_chip_ciu_edge_v2 = {
662 	.name = "CIU-E",
663 	.irq_enable = octeon_irq_ciu_enable_v2,
664 	.irq_disable = octeon_irq_ciu_disable_all_v2,
665 	.irq_ack = octeon_irq_ciu_ack,
666 	.irq_mask = octeon_irq_ciu_disable_local_v2,
667 	.irq_unmask = octeon_irq_ciu_enable_v2,
668 #ifdef CONFIG_SMP
669 	.irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
670 	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
671 #endif
672 };
673 
674 static struct irq_chip octeon_irq_chip_ciu = {
675 	.name = "CIU",
676 	.irq_enable = octeon_irq_ciu_enable,
677 	.irq_disable = octeon_irq_ciu_disable_all,
678 	.irq_mask = octeon_irq_dummy_mask,
679 #ifdef CONFIG_SMP
680 	.irq_set_affinity = octeon_irq_ciu_set_affinity,
681 	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
682 #endif
683 };
684 
685 static struct irq_chip octeon_irq_chip_ciu_edge = {
686 	.name = "CIU-E",
687 	.irq_enable = octeon_irq_ciu_enable,
688 	.irq_disable = octeon_irq_ciu_disable_all,
689 	.irq_mask = octeon_irq_dummy_mask,
690 	.irq_ack = octeon_irq_ciu_ack,
691 #ifdef CONFIG_SMP
692 	.irq_set_affinity = octeon_irq_ciu_set_affinity,
693 	.irq_cpu_offline = octeon_irq_cpu_offline_ciu,
694 #endif
695 };
696 
697 /* The mbox versions don't do any affinity or round-robin. */
698 static struct irq_chip octeon_irq_chip_ciu_mbox_v2 = {
699 	.name = "CIU-M",
700 	.irq_enable = octeon_irq_ciu_enable_all_v2,
701 	.irq_disable = octeon_irq_ciu_disable_all_v2,
702 	.irq_ack = octeon_irq_ciu_disable_local_v2,
703 	.irq_eoi = octeon_irq_ciu_enable_local_v2,
704 
705 	.irq_cpu_online = octeon_irq_ciu_enable_local_v2,
706 	.irq_cpu_offline = octeon_irq_ciu_disable_local_v2,
707 	.flags = IRQCHIP_ONOFFLINE_ENABLED,
708 };
709 
710 static struct irq_chip octeon_irq_chip_ciu_mbox = {
711 	.name = "CIU-M",
712 	.irq_enable = octeon_irq_ciu_enable_all,
713 	.irq_disable = octeon_irq_ciu_disable_all,
714 
715 	.irq_cpu_online = octeon_irq_ciu_enable_local,
716 	.irq_cpu_offline = octeon_irq_ciu_disable_local,
717 	.flags = IRQCHIP_ONOFFLINE_ENABLED,
718 };
719 
720 /*
721  * Watchdog interrupts are special.  They are associated with a single
722  * core, so we hardwire the affinity to that core.
723  */
octeon_irq_ciu_wd_enable(struct irq_data * data)724 static void octeon_irq_ciu_wd_enable(struct irq_data *data)
725 {
726 	unsigned long flags;
727 	unsigned long *pen;
728 	int coreid = data->irq - OCTEON_IRQ_WDOG0;	/* Bit 0-63 of EN1 */
729 	int cpu = octeon_cpu_for_coreid(coreid);
730 
731 	raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
732 	pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
733 	set_bit(coreid, pen);
734 	cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
735 	raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
736 }
737 
738 /*
739  * Watchdog interrupts are special.  They are associated with a single
740  * core, so we hardwire the affinity to that core.
741  */
octeon_irq_ciu1_wd_enable_v2(struct irq_data * data)742 static void octeon_irq_ciu1_wd_enable_v2(struct irq_data *data)
743 {
744 	int coreid = data->irq - OCTEON_IRQ_WDOG0;
745 	int cpu = octeon_cpu_for_coreid(coreid);
746 
747 	set_bit(coreid, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
748 	cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(coreid * 2 + 1), 1ull << coreid);
749 }
750 
751 
752 static struct irq_chip octeon_irq_chip_ciu_wd_v2 = {
753 	.name = "CIU-W",
754 	.irq_enable = octeon_irq_ciu1_wd_enable_v2,
755 	.irq_disable = octeon_irq_ciu_disable_all_v2,
756 	.irq_mask = octeon_irq_ciu_disable_local_v2,
757 	.irq_unmask = octeon_irq_ciu_enable_local_v2,
758 };
759 
760 static struct irq_chip octeon_irq_chip_ciu_wd = {
761 	.name = "CIU-W",
762 	.irq_enable = octeon_irq_ciu_wd_enable,
763 	.irq_disable = octeon_irq_ciu_disable_all,
764 	.irq_mask = octeon_irq_dummy_mask,
765 };
766 
octeon_irq_ip2_v1(void)767 static void octeon_irq_ip2_v1(void)
768 {
769 	const unsigned long core_id = cvmx_get_core_num();
770 	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
771 
772 	ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
773 	clear_c0_status(STATUSF_IP2);
774 	if (likely(ciu_sum)) {
775 		int bit = fls64(ciu_sum) - 1;
776 		int irq = octeon_irq_ciu_to_irq[0][bit];
777 		if (likely(irq))
778 			do_IRQ(irq);
779 		else
780 			spurious_interrupt();
781 	} else {
782 		spurious_interrupt();
783 	}
784 	set_c0_status(STATUSF_IP2);
785 }
786 
octeon_irq_ip2_v2(void)787 static void octeon_irq_ip2_v2(void)
788 {
789 	const unsigned long core_id = cvmx_get_core_num();
790 	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
791 
792 	ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
793 	if (likely(ciu_sum)) {
794 		int bit = fls64(ciu_sum) - 1;
795 		int irq = octeon_irq_ciu_to_irq[0][bit];
796 		if (likely(irq))
797 			do_IRQ(irq);
798 		else
799 			spurious_interrupt();
800 	} else {
801 		spurious_interrupt();
802 	}
803 }
octeon_irq_ip3_v1(void)804 static void octeon_irq_ip3_v1(void)
805 {
806 	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
807 
808 	ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
809 	clear_c0_status(STATUSF_IP3);
810 	if (likely(ciu_sum)) {
811 		int bit = fls64(ciu_sum) - 1;
812 		int irq = octeon_irq_ciu_to_irq[1][bit];
813 		if (likely(irq))
814 			do_IRQ(irq);
815 		else
816 			spurious_interrupt();
817 	} else {
818 		spurious_interrupt();
819 	}
820 	set_c0_status(STATUSF_IP3);
821 }
822 
octeon_irq_ip3_v2(void)823 static void octeon_irq_ip3_v2(void)
824 {
825 	u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
826 
827 	ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
828 	if (likely(ciu_sum)) {
829 		int bit = fls64(ciu_sum) - 1;
830 		int irq = octeon_irq_ciu_to_irq[1][bit];
831 		if (likely(irq))
832 			do_IRQ(irq);
833 		else
834 			spurious_interrupt();
835 	} else {
836 		spurious_interrupt();
837 	}
838 }
839 
octeon_irq_ip4_mask(void)840 static void octeon_irq_ip4_mask(void)
841 {
842 	clear_c0_status(STATUSF_IP4);
843 	spurious_interrupt();
844 }
845 
846 static void (*octeon_irq_ip2)(void);
847 static void (*octeon_irq_ip3)(void);
848 static void (*octeon_irq_ip4)(void);
849 
850 void __cpuinitdata (*octeon_irq_setup_secondary)(void);
851 
octeon_irq_percpu_enable(void)852 static void __cpuinit octeon_irq_percpu_enable(void)
853 {
854 	irq_cpu_online();
855 }
856 
octeon_irq_init_ciu_percpu(void)857 static void __cpuinit octeon_irq_init_ciu_percpu(void)
858 {
859 	int coreid = cvmx_get_core_num();
860 	/*
861 	 * Disable All CIU Interrupts. The ones we need will be
862 	 * enabled later.  Read the SUM register so we know the write
863 	 * completed.
864 	 */
865 	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
866 	cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
867 	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
868 	cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
869 	cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
870 }
871 
octeon_irq_setup_secondary_ciu(void)872 static void __cpuinit octeon_irq_setup_secondary_ciu(void)
873 {
874 
875 	__get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
876 	__get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
877 
878 	octeon_irq_init_ciu_percpu();
879 	octeon_irq_percpu_enable();
880 
881 	/* Enable the CIU lines */
882 	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
883 	clear_c0_status(STATUSF_IP4);
884 }
885 
octeon_irq_init_ciu(void)886 static void __init octeon_irq_init_ciu(void)
887 {
888 	unsigned int i;
889 	struct irq_chip *chip;
890 	struct irq_chip *chip_edge;
891 	struct irq_chip *chip_mbox;
892 	struct irq_chip *chip_wd;
893 
894 	octeon_irq_init_ciu_percpu();
895 	octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
896 
897 	if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
898 	    OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
899 	    OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
900 	    OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
901 		octeon_irq_ip2 = octeon_irq_ip2_v2;
902 		octeon_irq_ip3 = octeon_irq_ip3_v2;
903 		chip = &octeon_irq_chip_ciu_v2;
904 		chip_edge = &octeon_irq_chip_ciu_edge_v2;
905 		chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
906 		chip_wd = &octeon_irq_chip_ciu_wd_v2;
907 	} else {
908 		octeon_irq_ip2 = octeon_irq_ip2_v1;
909 		octeon_irq_ip3 = octeon_irq_ip3_v1;
910 		chip = &octeon_irq_chip_ciu;
911 		chip_edge = &octeon_irq_chip_ciu_edge;
912 		chip_mbox = &octeon_irq_chip_ciu_mbox;
913 		chip_wd = &octeon_irq_chip_ciu_wd;
914 	}
915 	octeon_irq_ip4 = octeon_irq_ip4_mask;
916 
917 	/* Mips internal */
918 	octeon_irq_init_core();
919 
920 	/* CIU_0 */
921 	for (i = 0; i < 16; i++)
922 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq);
923 	for (i = 0; i < 16; i++)
924 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GPIO0, 0, i + 16, chip, handle_level_irq);
925 
926 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq);
927 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq);
928 
929 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART0, 0, 34, chip, handle_level_irq);
930 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART1, 0, 35, chip, handle_level_irq);
931 
932 	for (i = 0; i < 4; i++)
933 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq);
934 	for (i = 0; i < 4; i++)
935 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq);
936 
937 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI, 0, 45, chip, handle_level_irq);
938 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq);
939 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_TRACE0, 0, 47, chip, handle_level_irq);
940 
941 	for (i = 0; i < 2; i++)
942 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_GMX_DRP0, 0, i + 48, chip_edge, handle_edge_irq);
943 
944 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD_DRP, 0, 50, chip_edge, handle_edge_irq);
945 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY_ZERO, 0, 51, chip_edge, handle_edge_irq);
946 
947 	for (i = 0; i < 4; i++)
948 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip_edge, handle_edge_irq);
949 
950 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq);
951 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_PCM, 0, 57, chip, handle_level_irq);
952 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MPI, 0, 58, chip, handle_level_irq);
953 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_TWSI2, 0, 59, chip, handle_level_irq);
954 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_POWIQ, 0, 60, chip, handle_level_irq);
955 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPDPPTHR, 0, 61, chip, handle_level_irq);
956 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII0, 0, 62, chip, handle_level_irq);
957 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq);
958 
959 	/* CIU_1 */
960 	for (i = 0; i < 16; i++)
961 		octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq);
962 
963 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_UART2, 1, 16, chip, handle_level_irq);
964 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq);
965 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MII1, 1, 18, chip, handle_level_irq);
966 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_NAND, 1, 19, chip, handle_level_irq);
967 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_MIO, 1, 20, chip, handle_level_irq);
968 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_IOB, 1, 21, chip, handle_level_irq);
969 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_FPA, 1, 22, chip, handle_level_irq);
970 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_POW, 1, 23, chip, handle_level_irq);
971 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_L2C, 1, 24, chip, handle_level_irq);
972 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_IPD, 1, 25, chip, handle_level_irq);
973 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_PIP, 1, 26, chip, handle_level_irq);
974 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_PKO, 1, 27, chip, handle_level_irq);
975 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_ZIP, 1, 28, chip, handle_level_irq);
976 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_TIM, 1, 29, chip, handle_level_irq);
977 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_RAD, 1, 30, chip, handle_level_irq);
978 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_KEY, 1, 31, chip, handle_level_irq);
979 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFA, 1, 32, chip, handle_level_irq);
980 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_USBCTL, 1, 33, chip, handle_level_irq);
981 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_SLI, 1, 34, chip, handle_level_irq);
982 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_DPI, 1, 35, chip, handle_level_irq);
983 
984 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGX0, 1, 36, chip, handle_level_irq);
985 
986 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_AGL, 1, 46, chip, handle_level_irq);
987 
988 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_PTP, 1, 47, chip_edge, handle_edge_irq);
989 
990 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM0, 1, 48, chip, handle_level_irq);
991 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_PEM1, 1, 49, chip, handle_level_irq);
992 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO0, 1, 50, chip, handle_level_irq);
993 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_SRIO1, 1, 51, chip, handle_level_irq);
994 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_LMC0, 1, 52, chip, handle_level_irq);
995 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_DFM, 1, 56, chip, handle_level_irq);
996 	octeon_irq_set_ciu_mapping(OCTEON_IRQ_RST, 1, 63, chip, handle_level_irq);
997 
998 	/* Enable the CIU lines */
999 	set_c0_status(STATUSF_IP3 | STATUSF_IP2);
1000 	clear_c0_status(STATUSF_IP4);
1001 }
1002 
arch_init_irq(void)1003 void __init arch_init_irq(void)
1004 {
1005 #ifdef CONFIG_SMP
1006 	/* Set the default affinity to the boot cpu. */
1007 	cpumask_clear(irq_default_affinity);
1008 	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
1009 #endif
1010 	octeon_irq_init_ciu();
1011 }
1012 
plat_irq_dispatch(void)1013 asmlinkage void plat_irq_dispatch(void)
1014 {
1015 	unsigned long cop0_cause;
1016 	unsigned long cop0_status;
1017 
1018 	while (1) {
1019 		cop0_cause = read_c0_cause();
1020 		cop0_status = read_c0_status();
1021 		cop0_cause &= cop0_status;
1022 		cop0_cause &= ST0_IM;
1023 
1024 		if (unlikely(cop0_cause & STATUSF_IP2))
1025 			octeon_irq_ip2();
1026 		else if (unlikely(cop0_cause & STATUSF_IP3))
1027 			octeon_irq_ip3();
1028 		else if (unlikely(cop0_cause & STATUSF_IP4))
1029 			octeon_irq_ip4();
1030 		else if (likely(cop0_cause))
1031 			do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
1032 		else
1033 			break;
1034 	}
1035 }
1036 
1037 #ifdef CONFIG_HOTPLUG_CPU
1038 
fixup_irqs(void)1039 void fixup_irqs(void)
1040 {
1041 	irq_cpu_offline();
1042 }
1043 
1044 #endif /* CONFIG_HOTPLUG_CPU */
1045