1 /*
2  * arch/ppc/kernel/xics.c
3  *
4  * Copyright 2000 IBM Corporation.
5  *
6  *  This program is free software; you can redistribute it and/or
7  *  modify it under the terms of the GNU General Public License
8  *  as published by the Free Software Foundation; either version
9  *  2 of the License, or (at your option) any later version.
10  */
11 #include <linux/config.h>
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <asm/prom.h>
17 #include <asm/io.h>
18 #include <asm/pgtable.h>
19 #include <asm/smp.h>
20 #include <asm/naca.h>
21 #include <asm/rtas.h>
22 #include "i8259.h"
23 #include "xics.h"
24 #include <asm/ppcdebug.h>
25 
26 void xics_enable_irq(u_int irq);
27 void xics_disable_irq(u_int irq);
28 void xics_mask_and_ack_irq(u_int irq);
29 void xics_end_irq(u_int irq);
30 void xics_set_affinity(unsigned int irq_nr, unsigned long cpumask);
31 
32 struct hw_interrupt_type xics_pic = {
33 	" XICS     ",
34 	NULL,
35 	NULL,
36 	xics_enable_irq,
37 	xics_disable_irq,
38 	xics_mask_and_ack_irq,
39 	xics_end_irq,
40 	xics_set_affinity
41 };
42 
43 struct hw_interrupt_type xics_8259_pic = {
44 	" XICS/8259",
45 	NULL,
46 	NULL,
47 	NULL,
48 	NULL,
49 	xics_mask_and_ack_irq,
50 	NULL
51 };
52 
53 #define XICS_IPI		2
54 #define XICS_IRQ_SPURIOUS	0
55 
56 /* Want a priority other than 0.  Various HW issues require this. */
57 #define	DEFAULT_PRIORITY	5
58 
59 struct xics_ipl {
60 	union {
61 		u32	word;
62 		u8	bytes[4];
63 	} xirr_poll;
64 	union {
65 		u32 word;
66 		u8	bytes[4];
67 	} xirr;
68 	u32	dummy;
69 	union {
70 		u32	word;
71 		u8	bytes[4];
72 	} qirr;
73 };
74 
75 struct xics_info {
76 	volatile struct xics_ipl *	per_cpu[NR_CPUS];
77 };
78 
79 struct xics_info	xics_info;
80 
81 unsigned long long intr_base = 0;
82 int xics_irq_8259_cascade = 0;
83 int xics_irq_8259_cascade_real = 0;
84 unsigned int default_server = 0xFF;
85 unsigned int default_distrib_server = 0;
86 
87 /* RTAS service tokens */
88 int ibm_get_xive;
89 int ibm_set_xive;
90 int ibm_int_on;
91 int ibm_int_off;
92 
93 struct xics_interrupt_node {
94 	unsigned long long addr;
95 	unsigned long long size;
96 } inodes[NR_CPUS*2];
97 
98 typedef struct {
99 	int (*xirr_info_get)(int cpu);
100 	void (*xirr_info_set)(int cpu, int val);
101 	void (*cppr_info)(int cpu, u8 val);
102 	void (*qirr_info)(int cpu, u8 val);
103 } xics_ops;
104 
105 
pSeries_xirr_info_get(int n_cpu)106 static int pSeries_xirr_info_get(int n_cpu)
107 {
108 	return (xics_info.per_cpu[n_cpu]->xirr.word);
109 }
110 
pSeries_xirr_info_set(int n_cpu,int value)111 static void pSeries_xirr_info_set(int n_cpu, int value)
112 {
113 	xics_info.per_cpu[n_cpu]->xirr.word = value;
114 }
115 
pSeries_cppr_info(int n_cpu,u8 value)116 static void pSeries_cppr_info(int n_cpu, u8 value)
117 {
118 	xics_info.per_cpu[n_cpu]->xirr.bytes[0] = value;
119 }
120 
pSeries_qirr_info(int n_cpu,u8 value)121 static void pSeries_qirr_info(int n_cpu , u8 value)
122 {
123 	xics_info.per_cpu[n_cpu]->qirr.bytes[0] = value;
124 }
125 
126 static xics_ops pSeries_ops = {
127 	pSeries_xirr_info_get,
128 	pSeries_xirr_info_set,
129 	pSeries_cppr_info,
130 	pSeries_qirr_info
131 };
132 
133 static xics_ops *ops = &pSeries_ops;
134 extern xics_ops pSeriesLP_ops;
135 
136 
137 void
xics_enable_irq(u_int virq)138 xics_enable_irq(u_int virq)
139 {
140 	u_int		irq;
141 	unsigned long	status;
142 	long	        call_status;
143 	unsigned int    interrupt_server = default_server;
144 
145 	irq = irq_offset_down(virq);
146 	if (irq == XICS_IPI)
147 		return;
148 
149 #ifdef CONFIG_IRQ_ALL_CPUS
150 	if((smp_num_cpus == systemcfg->processorCount) &&
151 	   (smp_threads_ready)) {
152 		interrupt_server = default_distrib_server;
153 	}
154 #endif
155 	call_status = rtas_call(ibm_set_xive, 3, 1, (unsigned long*)&status,
156 				irq, interrupt_server, DEFAULT_PRIORITY);
157 
158 	if( call_status != 0 ) {
159 		printk("xics_enable_irq: irq=%x: rtas_call failed; retn=%lx, status=%lx\n",
160 		       irq, call_status, status);
161 		return;
162 	}
163 	/* Now unmask the interrupt (often a no-op) */
164 	call_status = rtas_call(ibm_int_on, 1, 1, (unsigned long*)&status,
165 				irq);
166 	if( call_status != 0 ) {
167 		printk("xics_disable_irq on: irq=%x: rtas_call failed, retn=%lx\n",
168 		       irq, call_status);
169 		return;
170 	}
171 }
172 
173 void
xics_disable_irq(u_int virq)174 xics_disable_irq(u_int virq)
175 {
176 	u_int		irq;
177 	unsigned long 	status;
178 	long 	        call_status;
179 
180 	irq = irq_offset_down(virq);
181 	call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq);
182 	if( call_status != 0 ) {
183 		printk("xics_disable_irq: irq=%x: rtas_call failed, retn=%lx\n",
184 		       irq, call_status);
185 		return;
186 	}
187 }
188 
189 void
xics_end_irq(u_int irq)190 xics_end_irq(u_int irq)
191 {
192 	int cpu = smp_processor_id();
193 
194 	iosync();
195 	ops->xirr_info_set(cpu, (0xff<<24) | irq_offset_down(irq));
196 }
197 
198 void
xics_mask_and_ack_irq(u_int irq)199 xics_mask_and_ack_irq(u_int	irq)
200 {
201 	int cpu = smp_processor_id();
202 
203 	if (irq < irq_offset_value()) {
204 		i8259_pic.ack(irq);
205 		iosync();
206 		ops->xirr_info_set(cpu, ((0xff<<24) | xics_irq_8259_cascade_real));
207 		iosync();
208 	}
209 	else {
210 		ops->cppr_info(cpu, 0xff);
211 		iosync();
212 	}
213 }
214 
215 int
xics_get_irq(struct pt_regs * regs)216 xics_get_irq(struct pt_regs *regs)
217 {
218 	u_int	cpu = smp_processor_id();
219 	u_int	vec;
220 	int irq;
221 
222 	vec = ops->xirr_info_get(cpu);
223 	/*  (vec >> 24) == old priority */
224 	vec &= 0x00ffffff;
225 	/* for sanity, this had better be < NR_IRQS - 16 */
226 	if( vec == xics_irq_8259_cascade_real ) {
227 		irq = i8259_irq(cpu);
228 		if(irq == -1) {
229 			/* Spurious cascaded interrupt.  Still must ack xics */
230 			xics_end_irq(irq_offset_up(xics_irq_8259_cascade));
231 			irq = -1;
232 		}
233 	} else if( vec == XICS_IRQ_SPURIOUS ) {
234 		irq = -1;
235 	} else {
236 		irq = irq_offset_up(vec);
237 	}
238 	return irq;
239 }
240 
241 
242 #ifdef CONFIG_SMP
xics_ipi_action(int irq,void * dev_id,struct pt_regs * regs)243 void xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
244 {
245 	extern volatile unsigned long xics_ipi_message[];
246 	int cpu = smp_processor_id();
247 
248 	ops->qirr_info(cpu, 0xff);
249 	while (xics_ipi_message[cpu]) {
250 		if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, &xics_ipi_message[cpu])) {
251 			mb();
252 			smp_message_recv(PPC_MSG_CALL_FUNCTION, regs);
253 		}
254 		if (test_and_clear_bit(PPC_MSG_RESCHEDULE, &xics_ipi_message[cpu])) {
255 			mb();
256 			smp_message_recv(PPC_MSG_RESCHEDULE, regs);
257 		}
258 #ifdef CONFIG_XMON
259 		if (test_and_clear_bit(PPC_MSG_XMON_BREAK, &xics_ipi_message[cpu])) {
260 			mb();
261 			smp_message_recv(PPC_MSG_XMON_BREAK, regs);
262 		}
263 #endif
264 	}
265 }
266 
xics_cause_IPI(int cpu)267 void xics_cause_IPI(int cpu)
268 {
269 	ops->qirr_info(cpu,0) ;
270 }
271 
xics_setup_cpu(void)272 void xics_setup_cpu(void)
273 {
274 	int cpu = smp_processor_id();
275 
276 	ops->cppr_info(cpu, 0xff);
277 	iosync();
278 }
279 #endif /* CONFIG_SMP */
280 
xics_init_irq_desc(irq_desc_t * desc)281 void xics_init_irq_desc(irq_desc_t *desc)
282 {
283 	/* Don't mess with the handler if already set.
284 	 * This leaves the setup of isa handlers undisturbed.
285 	 */
286 	if (!desc->handler)
287 		desc->handler = &xics_pic;
288 }
289 
290 void
xics_init_IRQ(void)291 xics_init_IRQ( void )
292 {
293 	int i;
294 	unsigned long intr_size = 0;
295 	struct device_node *np;
296 	uint *ireg, ilen, indx=0;
297 
298 	ppc64_boot_msg(0x20, "XICS Init");
299 
300 	ibm_get_xive = rtas_token("ibm,get-xive");
301 	ibm_set_xive = rtas_token("ibm,set-xive");
302 	ibm_int_on  = rtas_token("ibm,int-on");
303 	ibm_int_off = rtas_token("ibm,int-off");
304 
305 	np = find_type_devices("PowerPC-External-Interrupt-Presentation");
306 	if (!np) {
307 		printk(KERN_WARNING "Can't find Interrupt Presentation\n");
308 		udbg_printf("Can't find Interrupt Presentation\n");
309 		while (1);
310 	}
311 nextnode:
312 	ireg = (uint *)get_property(np, "ibm,interrupt-server-ranges", 0);
313 	if (ireg) {
314 		/*
315 		 * set node starting index for this node
316 		 */
317 		indx = *ireg;
318 	}
319 
320 	ireg = (uint *)get_property(np, "reg", &ilen);
321 	if (!ireg) {
322 		printk(KERN_WARNING "Can't find Interrupt Reg Property\n");
323 		udbg_printf("Can't find Interrupt Reg Property\n");
324 		while (1);
325 	}
326 
327 	while (ilen) {
328 		inodes[indx].addr = (unsigned long long)*ireg++ << 32;
329 		ilen -= sizeof(uint);
330 		inodes[indx].addr |= *ireg++;
331 		ilen -= sizeof(uint);
332 		inodes[indx].size = (unsigned long long)*ireg++ << 32;
333 		ilen -= sizeof(uint);
334 		inodes[indx].size |= *ireg++;
335 		ilen -= sizeof(uint);
336 		indx++;
337 		if (indx >= NR_CPUS) break;
338 	}
339 
340 	np = np->next;
341 	if ((indx < NR_CPUS) && np) goto nextnode;
342 
343 	/* Find the server numbers for the boot cpu. */
344 	for (np = find_type_devices("cpu"); np; np = np->next) {
345 		ireg = (uint *)get_property(np, "reg", &ilen);
346 		if (ireg && ireg[0] == hard_smp_processor_id()) {
347 			ireg = (uint *)get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen);
348 			i = ilen / sizeof(int);
349 			if (ireg && i > 0) {
350 				default_server = ireg[0];
351 				default_distrib_server = ireg[i-1]; /* take last element */
352 			}
353 			break;
354 		}
355 	}
356 
357 	intr_base = inodes[0].addr;
358 	intr_size = (ulong)inodes[0].size;
359 
360 	np = find_type_devices("interrupt-controller");
361 	if (!np) {
362 		printk(KERN_WARNING "xics:  no ISA Interrupt Controller\n");
363 		xics_irq_8259_cascade_real = -1;
364 		xics_irq_8259_cascade = -1;
365 	} else {
366 		ireg = (uint *) get_property(np, "interrupts", 0);
367 		if (!ireg) {
368 			printk(KERN_WARNING "Can't find ISA Interrupts Property\n");
369 			udbg_printf("Can't find ISA Interrupts Property\n");
370 			while (1);
371 		}
372 		xics_irq_8259_cascade_real = *ireg;
373 		xics_irq_8259_cascade = xics_irq_8259_cascade_real;
374 	}
375 
376 	if (systemcfg->platform == PLATFORM_PSERIES) {
377 #ifdef CONFIG_SMP
378 		for (i = 0; i < systemcfg->processorCount; ++i) {
379 			xics_info.per_cpu[i] =
380 			  __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
381 				  (ulong)inodes[get_hard_smp_processor_id(i)].size, _PAGE_NO_CACHE);
382 		}
383 #else
384 		xics_info.per_cpu[0] = __ioremap((ulong)intr_base, intr_size, _PAGE_NO_CACHE);
385 #endif /* CONFIG_SMP */
386 #ifdef CONFIG_PPC_PSERIES
387 	/* actually iSeries does not use any of xics...but it has link dependencies
388 	 * for now, except this new one...
389 	 */
390 	} else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
391 		ops = &pSeriesLP_ops;
392 #endif
393 	}
394 
395 	xics_8259_pic.enable = i8259_pic.enable;
396 	xics_8259_pic.disable = i8259_pic.disable;
397 	for (i = 0; i < 16; ++i)
398 		real_irqdesc(i)->handler = &xics_8259_pic;
399 
400 	ops->cppr_info(0, 0xff);
401 	iosync();
402 	if (xics_irq_8259_cascade != -1) {
403 		if (request_irq(irq_offset_up(xics_irq_8259_cascade),
404 				no_action, 0, "8259 cascade", 0))
405 			printk(KERN_ERR "xics_init_IRQ: couldn't get 8259 cascade\n");
406 		i8259_init();
407 	}
408 
409 #ifdef CONFIG_SMP
410 	request_irq(irq_offset_up(XICS_IPI), xics_ipi_action, 0, "IPI", 0);
411 	real_irqdesc(irq_offset_up(XICS_IPI))->status |= IRQ_PER_CPU;
412 #endif
413 	ppc64_boot_msg(0x21, "XICS Done");
414 }
415 
416 /*
417  * Find first logical cpu and return its physical cpu number
418  */
physmask(u32 cpumask)419 static inline u32 physmask(u32 cpumask)
420 {
421 	int i;
422 
423 	for (i = 0; i < smp_num_cpus; ++i, cpumask >>= 1) {
424 		if (cpumask & 1)
425 			return get_hard_smp_processor_id(i);
426 	}
427 
428 	printk(KERN_ERR "xics_set_affinity: invalid irq mask\n");
429 
430 	return default_distrib_server;
431 }
432 
xics_set_affinity(unsigned int irq,unsigned long cpumask)433 void xics_set_affinity(unsigned int irq, unsigned long cpumask)
434 {
435 	irq_desc_t *desc = irqdesc(irq);
436 	unsigned long flags;
437 	long status;
438 	unsigned long xics_status[2];
439 	u32 newmask;
440 
441 	irq = irq_offset_down(irq);
442 	if (irq == XICS_IPI)
443 		return;
444 
445 	spin_lock_irqsave(&desc->lock, flags);
446 
447 	status = rtas_call(ibm_get_xive, 1, 3, (void *)&xics_status, irq);
448 
449 	if (status) {
450 		printk("xics_set_affinity: irq=%d ibm,get-xive returns %ld\n",
451 			irq, status);
452 		goto out;
453 	}
454 
455 	/* For the moment only implement delivery to all cpus or one cpu */
456 	if (cpumask == 0xffffffff)
457 		newmask = default_distrib_server;
458 	else
459 		newmask = physmask(cpumask);
460 
461 	status = rtas_call(ibm_set_xive, 3, 1, NULL,
462 				irq, newmask, xics_status[1]);
463 
464 	if (status) {
465 		printk("xics_set_affinity irq=%d ibm,set-xive returns %ld\n",
466 			irq, status);
467 		goto out;
468 	}
469 
470 out:
471 	spin_unlock_irqrestore(&desc->lock, flags);
472 }
473