1 /*
2 * Cell Internal Interrupt Controller
3 *
4 * Copyright (C) 2006 Benjamin Herrenschmidt (benh@kernel.crashing.org)
5 * IBM, Corp.
6 *
7 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
8 *
9 * Author: Arnd Bergmann <arndb@de.ibm.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 *
25 * TODO:
26 * - Fix various assumptions related to HW CPU numbers vs. linux CPU numbers
27 * vs node numbers in the setup code
28 * - Implement proper handling of maxcpus=1/2 (that is, routing of irqs from
29 * a non-active node to the active node)
30 */
31
32 #include <linux/interrupt.h>
33 #include <linux/irq.h>
34 #include <linux/module.h>
35 #include <linux/percpu.h>
36 #include <linux/types.h>
37 #include <linux/ioport.h>
38 #include <linux/kernel_stat.h>
39
40 #include <asm/io.h>
41 #include <asm/pgtable.h>
42 #include <asm/prom.h>
43 #include <asm/ptrace.h>
44 #include <asm/machdep.h>
45 #include <asm/cell-regs.h>
46
47 #include "interrupt.h"
48
49 struct iic {
50 struct cbe_iic_thread_regs __iomem *regs;
51 u8 target_id;
52 u8 eoi_stack[16];
53 int eoi_ptr;
54 struct device_node *node;
55 };
56
57 static DEFINE_PER_CPU(struct iic, cpu_iic);
58 #define IIC_NODE_COUNT 2
59 static struct irq_host *iic_host;
60
61 /* Convert between "pending" bits and hw irq number */
iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)62 static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
63 {
64 unsigned char unit = bits.source & 0xf;
65 unsigned char node = bits.source >> 4;
66 unsigned char class = bits.class & 3;
67
68 /* Decode IPIs */
69 if (bits.flags & CBE_IIC_IRQ_IPI)
70 return IIC_IRQ_TYPE_IPI | (bits.prio >> 4);
71 else
72 return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit;
73 }
74
iic_mask(struct irq_data * d)75 static void iic_mask(struct irq_data *d)
76 {
77 }
78
iic_unmask(struct irq_data * d)79 static void iic_unmask(struct irq_data *d)
80 {
81 }
82
iic_eoi(struct irq_data * d)83 static void iic_eoi(struct irq_data *d)
84 {
85 struct iic *iic = &__get_cpu_var(cpu_iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0);
88 }
89
90 static struct irq_chip iic_chip = {
91 .name = "CELL-IIC",
92 .irq_mask = iic_mask,
93 .irq_unmask = iic_unmask,
94 .irq_eoi = iic_eoi,
95 };
96
97
iic_ioexc_eoi(struct irq_data * d)98 static void iic_ioexc_eoi(struct irq_data *d)
99 {
100 }
101
iic_ioexc_cascade(unsigned int irq,struct irq_desc * desc)102 static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc)
103 {
104 struct irq_chip *chip = irq_desc_get_chip(desc);
105 struct cbe_iic_regs __iomem *node_iic =
106 (void __iomem *)irq_desc_get_handler_data(desc);
107 unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC;
108 unsigned long bits, ack;
109 int cascade;
110
111 for (;;) {
112 bits = in_be64(&node_iic->iic_is);
113 if (bits == 0)
114 break;
115 /* pre-ack edge interrupts */
116 ack = bits & IIC_ISR_EDGE_MASK;
117 if (ack)
118 out_be64(&node_iic->iic_is, ack);
119 /* handle them */
120 for (cascade = 63; cascade >= 0; cascade--)
121 if (bits & (0x8000000000000000UL >> cascade)) {
122 unsigned int cirq =
123 irq_linear_revmap(iic_host,
124 base | cascade);
125 if (cirq != NO_IRQ)
126 generic_handle_irq(cirq);
127 }
128 /* post-ack level interrupts */
129 ack = bits & ~IIC_ISR_EDGE_MASK;
130 if (ack)
131 out_be64(&node_iic->iic_is, ack);
132 }
133 chip->irq_eoi(&desc->irq_data);
134 }
135
136
137 static struct irq_chip iic_ioexc_chip = {
138 .name = "CELL-IOEX",
139 .irq_mask = iic_mask,
140 .irq_unmask = iic_unmask,
141 .irq_eoi = iic_ioexc_eoi,
142 };
143
144 /* Get an IRQ number from the pending state register of the IIC */
iic_get_irq(void)145 static unsigned int iic_get_irq(void)
146 {
147 struct cbe_iic_pending_bits pending;
148 struct iic *iic;
149 unsigned int virq;
150
151 iic = &__get_cpu_var(cpu_iic);
152 *(unsigned long *) &pending =
153 in_be64((u64 __iomem *) &iic->regs->pending_destr);
154 if (!(pending.flags & CBE_IIC_IRQ_VALID))
155 return NO_IRQ;
156 virq = irq_linear_revmap(iic_host, iic_pending_to_hwnum(pending));
157 if (virq == NO_IRQ)
158 return NO_IRQ;
159 iic->eoi_stack[++iic->eoi_ptr] = pending.prio;
160 BUG_ON(iic->eoi_ptr > 15);
161 return virq;
162 }
163
iic_setup_cpu(void)164 void iic_setup_cpu(void)
165 {
166 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
167 }
168
iic_get_target_id(int cpu)169 u8 iic_get_target_id(int cpu)
170 {
171 return per_cpu(cpu_iic, cpu).target_id;
172 }
173
174 EXPORT_SYMBOL_GPL(iic_get_target_id);
175
176 #ifdef CONFIG_SMP
177
178 /* Use the highest interrupt priorities for IPI */
iic_ipi_to_irq(int ipi)179 static inline int iic_ipi_to_irq(int ipi)
180 {
181 return IIC_IRQ_TYPE_IPI + 0xf - ipi;
182 }
183
iic_cause_IPI(int cpu,int mesg)184 void iic_cause_IPI(int cpu, int mesg)
185 {
186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4);
187 }
188
iic_get_irq_host(int node)189 struct irq_host *iic_get_irq_host(int node)
190 {
191 return iic_host;
192 }
193 EXPORT_SYMBOL_GPL(iic_get_irq_host);
194
iic_ipi_action(int irq,void * dev_id)195 static irqreturn_t iic_ipi_action(int irq, void *dev_id)
196 {
197 int ipi = (int)(long)dev_id;
198
199 smp_message_recv(ipi);
200
201 return IRQ_HANDLED;
202 }
iic_request_ipi(int ipi,const char * name)203 static void iic_request_ipi(int ipi, const char *name)
204 {
205 int virq;
206
207 virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi));
208 if (virq == NO_IRQ) {
209 printk(KERN_ERR
210 "iic: failed to map IPI %s\n", name);
211 return;
212 }
213 if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name,
214 (void *)(long)ipi))
215 printk(KERN_ERR
216 "iic: failed to request IPI %s\n", name);
217 }
218
iic_request_IPIs(void)219 void iic_request_IPIs(void)
220 {
221 iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call");
222 iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched");
223 iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single");
224 #ifdef CONFIG_DEBUGGER
225 iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug");
226 #endif /* CONFIG_DEBUGGER */
227 }
228
229 #endif /* CONFIG_SMP */
230
231
iic_host_match(struct irq_host * h,struct device_node * node)232 static int iic_host_match(struct irq_host *h, struct device_node *node)
233 {
234 return of_device_is_compatible(node,
235 "IBM,CBEA-Internal-Interrupt-Controller");
236 }
237
iic_host_map(struct irq_host * h,unsigned int virq,irq_hw_number_t hw)238 static int iic_host_map(struct irq_host *h, unsigned int virq,
239 irq_hw_number_t hw)
240 {
241 switch (hw & IIC_IRQ_TYPE_MASK) {
242 case IIC_IRQ_TYPE_IPI:
243 irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq);
244 break;
245 case IIC_IRQ_TYPE_IOEXC:
246 irq_set_chip_and_handler(virq, &iic_ioexc_chip,
247 handle_edge_eoi_irq);
248 break;
249 default:
250 irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq);
251 }
252 return 0;
253 }
254
iic_host_xlate(struct irq_host * h,struct device_node * ct,const u32 * intspec,unsigned int intsize,irq_hw_number_t * out_hwirq,unsigned int * out_flags)255 static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
256 const u32 *intspec, unsigned int intsize,
257 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
258
259 {
260 unsigned int node, ext, unit, class;
261 const u32 *val;
262
263 if (!of_device_is_compatible(ct,
264 "IBM,CBEA-Internal-Interrupt-Controller"))
265 return -ENODEV;
266 if (intsize != 1)
267 return -ENODEV;
268 val = of_get_property(ct, "#interrupt-cells", NULL);
269 if (val == NULL || *val != 1)
270 return -ENODEV;
271
272 node = intspec[0] >> 24;
273 ext = (intspec[0] >> 16) & 0xff;
274 class = (intspec[0] >> 8) & 0xff;
275 unit = intspec[0] & 0xff;
276
277 /* Check if node is in supported range */
278 if (node > 1)
279 return -EINVAL;
280
281 /* Build up interrupt number, special case for IO exceptions */
282 *out_hwirq = (node << IIC_IRQ_NODE_SHIFT);
283 if (unit == IIC_UNIT_IIC && class == 1)
284 *out_hwirq |= IIC_IRQ_TYPE_IOEXC | ext;
285 else
286 *out_hwirq |= IIC_IRQ_TYPE_NORMAL |
287 (class << IIC_IRQ_CLASS_SHIFT) | unit;
288
289 /* Dummy flags, ignored by iic code */
290 *out_flags = IRQ_TYPE_EDGE_RISING;
291
292 return 0;
293 }
294
295 static struct irq_host_ops iic_host_ops = {
296 .match = iic_host_match,
297 .map = iic_host_map,
298 .xlate = iic_host_xlate,
299 };
300
init_one_iic(unsigned int hw_cpu,unsigned long addr,struct device_node * node)301 static void __init init_one_iic(unsigned int hw_cpu, unsigned long addr,
302 struct device_node *node)
303 {
304 /* XXX FIXME: should locate the linux CPU number from the HW cpu
305 * number properly. We are lucky for now
306 */
307 struct iic *iic = &per_cpu(cpu_iic, hw_cpu);
308
309 iic->regs = ioremap(addr, sizeof(struct cbe_iic_thread_regs));
310 BUG_ON(iic->regs == NULL);
311
312 iic->target_id = ((hw_cpu & 2) << 3) | ((hw_cpu & 1) ? 0xf : 0xe);
313 iic->eoi_stack[0] = 0xff;
314 iic->node = of_node_get(node);
315 out_be64(&iic->regs->prio, 0);
316
317 printk(KERN_INFO "IIC for CPU %d target id 0x%x : %s\n",
318 hw_cpu, iic->target_id, node->full_name);
319 }
320
setup_iic(void)321 static int __init setup_iic(void)
322 {
323 struct device_node *dn;
324 struct resource r0, r1;
325 unsigned int node, cascade, found = 0;
326 struct cbe_iic_regs __iomem *node_iic;
327 const u32 *np;
328
329 for (dn = NULL;
330 (dn = of_find_node_by_name(dn,"interrupt-controller")) != NULL;) {
331 if (!of_device_is_compatible(dn,
332 "IBM,CBEA-Internal-Interrupt-Controller"))
333 continue;
334 np = of_get_property(dn, "ibm,interrupt-server-ranges", NULL);
335 if (np == NULL) {
336 printk(KERN_WARNING "IIC: CPU association not found\n");
337 of_node_put(dn);
338 return -ENODEV;
339 }
340 if (of_address_to_resource(dn, 0, &r0) ||
341 of_address_to_resource(dn, 1, &r1)) {
342 printk(KERN_WARNING "IIC: Can't resolve addresses\n");
343 of_node_put(dn);
344 return -ENODEV;
345 }
346 found++;
347 init_one_iic(np[0], r0.start, dn);
348 init_one_iic(np[1], r1.start, dn);
349
350 /* Setup cascade for IO exceptions. XXX cleanup tricks to get
351 * node vs CPU etc...
352 * Note that we configure the IIC_IRR here with a hard coded
353 * priority of 1. We might want to improve that later.
354 */
355 node = np[0] >> 1;
356 node_iic = cbe_get_cpu_iic_regs(np[0]);
357 cascade = node << IIC_IRQ_NODE_SHIFT;
358 cascade |= 1 << IIC_IRQ_CLASS_SHIFT;
359 cascade |= IIC_UNIT_IIC;
360 cascade = irq_create_mapping(iic_host, cascade);
361 if (cascade == NO_IRQ)
362 continue;
363 /*
364 * irq_data is a generic pointer that gets passed back
365 * to us later, so the forced cast is fine.
366 */
367 irq_set_handler_data(cascade, (void __force *)node_iic);
368 irq_set_chained_handler(cascade, iic_ioexc_cascade);
369 out_be64(&node_iic->iic_ir,
370 (1 << 12) /* priority */ |
371 (node << 4) /* dest node */ |
372 IIC_UNIT_THREAD_0 /* route them to thread 0 */);
373 /* Flush pending (make sure it triggers if there is
374 * anything pending
375 */
376 out_be64(&node_iic->iic_is, 0xfffffffffffffffful);
377 }
378
379 if (found)
380 return 0;
381 else
382 return -ENODEV;
383 }
384
iic_init_IRQ(void)385 void __init iic_init_IRQ(void)
386 {
387 /* Setup an irq host data structure */
388 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT,
389 &iic_host_ops, IIC_IRQ_INVALID);
390 BUG_ON(iic_host == NULL);
391 irq_set_default_host(iic_host);
392
393 /* Discover and initialize iics */
394 if (setup_iic() < 0)
395 panic("IIC: Failed to initialize !\n");
396
397 /* Set master interrupt handling function */
398 ppc_md.get_irq = iic_get_irq;
399
400 /* Enable on current CPU */
401 iic_setup_cpu();
402 }
403
iic_set_interrupt_routing(int cpu,int thread,int priority)404 void iic_set_interrupt_routing(int cpu, int thread, int priority)
405 {
406 struct cbe_iic_regs __iomem *iic_regs = cbe_get_cpu_iic_regs(cpu);
407 u64 iic_ir = 0;
408 int node = cpu >> 1;
409
410 /* Set which node and thread will handle the next interrupt */
411 iic_ir |= CBE_IIC_IR_PRIO(priority) |
412 CBE_IIC_IR_DEST_NODE(node);
413 if (thread == 0)
414 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_0);
415 else
416 iic_ir |= CBE_IIC_IR_DEST_UNIT(CBE_IIC_IR_PT_1);
417 out_be64(&iic_regs->iic_ir, iic_ir);
418 }
419