1 /*
2  * Set up the interrupt priorities
3  *
4  * Copyright  2004-2009 Analog Devices Inc.
5  *                 2003 Bas Vermeulen <bas@buyways.nl>
6  *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
7  *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
8  *                 1999 D. Jeff Dionne <jeff@uclinux.org>
9  *                 1996 Roman Zippel
10  *
11  * Licensed under the GPL-2
12  */
13 
14 #include <linux/module.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/seq_file.h>
17 #include <linux/irq.h>
18 #include <linux/sched.h>
19 #ifdef CONFIG_IPIPE
20 #include <linux/ipipe.h>
21 #endif
22 #ifdef CONFIG_KGDB
23 #include <linux/kgdb.h>
24 #endif
25 #include <asm/traps.h>
26 #include <asm/blackfin.h>
27 #include <asm/gpio.h>
28 #include <asm/irq_handler.h>
29 #include <asm/dpmc.h>
30 #include <asm/bfin5xx_spi.h>
31 #include <asm/bfin_sport.h>
32 #include <asm/bfin_can.h>
33 
34 #define SIC_SYSIRQ(irq)	(irq - (IRQ_CORETMR + 1))
35 
36 #ifdef BF537_FAMILY
37 # define BF537_GENERIC_ERROR_INT_DEMUX
38 # define SPI_ERR_MASK   (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE)	/* SPI_STAT */
39 # define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF)	/* SPORT_STAT */
40 # define PPI_ERR_MASK   (0xFFFF & ~FLD)	/* PPI_STATUS */
41 # define EMAC_ERR_MASK  (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE)	/* EMAC_SYSTAT */
42 # define UART_ERR_MASK  (0x6)	/* UART_IIR */
43 # define CAN_ERR_MASK   (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF)	/* CAN_GIF */
44 #else
45 # undef BF537_GENERIC_ERROR_INT_DEMUX
46 #endif
47 
48 /*
49  * NOTES:
50  * - we have separated the physical Hardware interrupt from the
51  * levels that the LINUX kernel sees (see the description in irq.h)
52  * -
53  */
54 
55 #ifndef CONFIG_SMP
56 /* Initialize this to an actual value to force it into the .data
57  * section so that we know it is properly initialized at entry into
58  * the kernel but before bss is initialized to zero (which is where
59  * it would live otherwise).  The 0x1f magic represents the IRQs we
60  * cannot actually mask out in hardware.
61  */
62 unsigned long bfin_irq_flags = 0x1f;
63 EXPORT_SYMBOL(bfin_irq_flags);
64 #endif
65 
66 /* The number of spurious interrupts */
67 atomic_t num_spurious;
68 
69 #ifdef CONFIG_PM
70 unsigned long bfin_sic_iwr[3];	/* Up to 3 SIC_IWRx registers */
71 unsigned vr_wakeup;
72 #endif
73 
74 struct ivgx {
75 	/* irq number for request_irq, available in mach-bf5xx/irq.h */
76 	unsigned int irqno;
77 	/* corresponding bit in the SIC_ISR register */
78 	unsigned int isrflag;
79 } ivg_table[NR_PERI_INTS];
80 
81 struct ivg_slice {
82 	/* position of first irq in ivg_table for given ivg */
83 	struct ivgx *ifirst;
84 	struct ivgx *istop;
85 } ivg7_13[IVG13 - IVG7 + 1];
86 
87 
88 /*
89  * Search SIC_IAR and fill tables with the irqvalues
90  * and their positions in the SIC_ISR register.
91  */
search_IAR(void)92 static void __init search_IAR(void)
93 {
94 	unsigned ivg, irq_pos = 0;
95 	for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
96 		int irqN;
97 
98 		ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
99 
100 		for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
101 			int irqn;
102 			u32 iar = bfin_read32((unsigned long *)SIC_IAR0 +
103 #if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
104 	defined(CONFIG_BF538) || defined(CONFIG_BF539)
105 				((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
106 #else
107 				(irqN >> 3)
108 #endif
109 				);
110 
111 			for (irqn = irqN; irqn < irqN + 4; ++irqn) {
112 				int iar_shift = (irqn & 7) * 4;
113 				if (ivg == (0xf & (iar >> iar_shift))) {
114 					ivg_table[irq_pos].irqno = IVG7 + irqn;
115 					ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
116 					ivg7_13[ivg].istop++;
117 					irq_pos++;
118 				}
119 			}
120 		}
121 	}
122 }
123 
124 /*
125  * This is for core internal IRQs
126  */
127 
bfin_ack_noop(struct irq_data * d)128 static void bfin_ack_noop(struct irq_data *d)
129 {
130 	/* Dummy function.  */
131 }
132 
bfin_core_mask_irq(struct irq_data * d)133 static void bfin_core_mask_irq(struct irq_data *d)
134 {
135 	bfin_irq_flags &= ~(1 << d->irq);
136 	if (!hard_irqs_disabled())
137 		hard_local_irq_enable();
138 }
139 
bfin_core_unmask_irq(struct irq_data * d)140 static void bfin_core_unmask_irq(struct irq_data *d)
141 {
142 	bfin_irq_flags |= 1 << d->irq;
143 	/*
144 	 * If interrupts are enabled, IMASK must contain the same value
145 	 * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
146 	 * are currently disabled we need not do anything; one of the
147 	 * callers will take care of setting IMASK to the proper value
148 	 * when reenabling interrupts.
149 	 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
150 	 * what we need.
151 	 */
152 	if (!hard_irqs_disabled())
153 		hard_local_irq_enable();
154 	return;
155 }
156 
bfin_internal_mask_irq(unsigned int irq)157 static void bfin_internal_mask_irq(unsigned int irq)
158 {
159 	unsigned long flags;
160 
161 #ifdef CONFIG_BF53x
162 	flags = hard_local_irq_save();
163 	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
164 			     ~(1 << SIC_SYSIRQ(irq)));
165 #else
166 	unsigned mask_bank, mask_bit;
167 	flags = hard_local_irq_save();
168 	mask_bank = SIC_SYSIRQ(irq) / 32;
169 	mask_bit = SIC_SYSIRQ(irq) % 32;
170 	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
171 			     ~(1 << mask_bit));
172 #ifdef CONFIG_SMP
173 	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
174 			     ~(1 << mask_bit));
175 #endif
176 #endif
177 	hard_local_irq_restore(flags);
178 }
179 
bfin_internal_mask_irq_chip(struct irq_data * d)180 static void bfin_internal_mask_irq_chip(struct irq_data *d)
181 {
182 	bfin_internal_mask_irq(d->irq);
183 }
184 
185 #ifdef CONFIG_SMP
bfin_internal_unmask_irq_affinity(unsigned int irq,const struct cpumask * affinity)186 static void bfin_internal_unmask_irq_affinity(unsigned int irq,
187 		const struct cpumask *affinity)
188 #else
189 static void bfin_internal_unmask_irq(unsigned int irq)
190 #endif
191 {
192 	unsigned long flags;
193 
194 #ifdef CONFIG_BF53x
195 	flags = hard_local_irq_save();
196 	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
197 			     (1 << SIC_SYSIRQ(irq)));
198 #else
199 	unsigned mask_bank, mask_bit;
200 	flags = hard_local_irq_save();
201 	mask_bank = SIC_SYSIRQ(irq) / 32;
202 	mask_bit = SIC_SYSIRQ(irq) % 32;
203 #ifdef CONFIG_SMP
204 	if (cpumask_test_cpu(0, affinity))
205 #endif
206 		bfin_write_SIC_IMASK(mask_bank,
207 			bfin_read_SIC_IMASK(mask_bank) |
208 			(1 << mask_bit));
209 #ifdef CONFIG_SMP
210 	if (cpumask_test_cpu(1, affinity))
211 		bfin_write_SICB_IMASK(mask_bank,
212 			bfin_read_SICB_IMASK(mask_bank) |
213 			(1 << mask_bit));
214 #endif
215 #endif
216 	hard_local_irq_restore(flags);
217 }
218 
219 #ifdef CONFIG_SMP
bfin_internal_unmask_irq_chip(struct irq_data * d)220 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
221 {
222 	bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
223 }
224 
bfin_internal_set_affinity(struct irq_data * d,const struct cpumask * mask,bool force)225 static int bfin_internal_set_affinity(struct irq_data *d,
226 				      const struct cpumask *mask, bool force)
227 {
228 	bfin_internal_mask_irq(d->irq);
229 	bfin_internal_unmask_irq_affinity(d->irq, mask);
230 
231 	return 0;
232 }
233 #else
bfin_internal_unmask_irq_chip(struct irq_data * d)234 static void bfin_internal_unmask_irq_chip(struct irq_data *d)
235 {
236 	bfin_internal_unmask_irq(d->irq);
237 }
238 #endif
239 
240 #ifdef CONFIG_PM
bfin_internal_set_wake(unsigned int irq,unsigned int state)241 int bfin_internal_set_wake(unsigned int irq, unsigned int state)
242 {
243 	u32 bank, bit, wakeup = 0;
244 	unsigned long flags;
245 	bank = SIC_SYSIRQ(irq) / 32;
246 	bit = SIC_SYSIRQ(irq) % 32;
247 
248 	switch (irq) {
249 #ifdef IRQ_RTC
250 	case IRQ_RTC:
251 	wakeup |= WAKE;
252 	break;
253 #endif
254 #ifdef IRQ_CAN0_RX
255 	case IRQ_CAN0_RX:
256 	wakeup |= CANWE;
257 	break;
258 #endif
259 #ifdef IRQ_CAN1_RX
260 	case IRQ_CAN1_RX:
261 	wakeup |= CANWE;
262 	break;
263 #endif
264 #ifdef IRQ_USB_INT0
265 	case IRQ_USB_INT0:
266 	wakeup |= USBWE;
267 	break;
268 #endif
269 #ifdef CONFIG_BF54x
270 	case IRQ_CNT:
271 	wakeup |= ROTWE;
272 	break;
273 #endif
274 	default:
275 	break;
276 	}
277 
278 	flags = hard_local_irq_save();
279 
280 	if (state) {
281 		bfin_sic_iwr[bank] |= (1 << bit);
282 		vr_wakeup  |= wakeup;
283 
284 	} else {
285 		bfin_sic_iwr[bank] &= ~(1 << bit);
286 		vr_wakeup  &= ~wakeup;
287 	}
288 
289 	hard_local_irq_restore(flags);
290 
291 	return 0;
292 }
293 
bfin_internal_set_wake_chip(struct irq_data * d,unsigned int state)294 static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
295 {
296 	return bfin_internal_set_wake(d->irq, state);
297 }
298 #endif
299 
300 static struct irq_chip bfin_core_irqchip = {
301 	.name = "CORE",
302 	.irq_ack = bfin_ack_noop,
303 	.irq_mask = bfin_core_mask_irq,
304 	.irq_unmask = bfin_core_unmask_irq,
305 };
306 
307 static struct irq_chip bfin_internal_irqchip = {
308 	.name = "INTN",
309 	.irq_ack = bfin_ack_noop,
310 	.irq_mask = bfin_internal_mask_irq_chip,
311 	.irq_unmask = bfin_internal_unmask_irq_chip,
312 	.irq_mask_ack = bfin_internal_mask_irq_chip,
313 	.irq_disable = bfin_internal_mask_irq_chip,
314 	.irq_enable = bfin_internal_unmask_irq_chip,
315 #ifdef CONFIG_SMP
316 	.irq_set_affinity = bfin_internal_set_affinity,
317 #endif
318 #ifdef CONFIG_PM
319 	.irq_set_wake = bfin_internal_set_wake_chip,
320 #endif
321 };
322 
bfin_handle_irq(unsigned irq)323 static void bfin_handle_irq(unsigned irq)
324 {
325 #ifdef CONFIG_IPIPE
326 	struct pt_regs regs;    /* Contents not used. */
327 	ipipe_trace_irq_entry(irq);
328 	__ipipe_handle_irq(irq, &regs);
329 	ipipe_trace_irq_exit(irq);
330 #else /* !CONFIG_IPIPE */
331 	generic_handle_irq(irq);
332 #endif  /* !CONFIG_IPIPE */
333 }
334 
335 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
336 static int error_int_mask;
337 
bfin_generic_error_mask_irq(struct irq_data * d)338 static void bfin_generic_error_mask_irq(struct irq_data *d)
339 {
340 	error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR));
341 	if (!error_int_mask)
342 		bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
343 }
344 
bfin_generic_error_unmask_irq(struct irq_data * d)345 static void bfin_generic_error_unmask_irq(struct irq_data *d)
346 {
347 	bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
348 	error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR);
349 }
350 
351 static struct irq_chip bfin_generic_error_irqchip = {
352 	.name = "ERROR",
353 	.irq_ack = bfin_ack_noop,
354 	.irq_mask_ack = bfin_generic_error_mask_irq,
355 	.irq_mask = bfin_generic_error_mask_irq,
356 	.irq_unmask = bfin_generic_error_unmask_irq,
357 };
358 
bfin_demux_error_irq(unsigned int int_err_irq,struct irq_desc * inta_desc)359 static void bfin_demux_error_irq(unsigned int int_err_irq,
360 				 struct irq_desc *inta_desc)
361 {
362 	int irq = 0;
363 
364 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
365 	if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
366 		irq = IRQ_MAC_ERROR;
367 	else
368 #endif
369 	if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
370 		irq = IRQ_SPORT0_ERROR;
371 	else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
372 		irq = IRQ_SPORT1_ERROR;
373 	else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
374 		irq = IRQ_PPI_ERROR;
375 	else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
376 		irq = IRQ_CAN_ERROR;
377 	else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
378 		irq = IRQ_SPI_ERROR;
379 	else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
380 		irq = IRQ_UART0_ERROR;
381 	else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
382 		irq = IRQ_UART1_ERROR;
383 
384 	if (irq) {
385 		if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
386 			bfin_handle_irq(irq);
387 		else {
388 
389 			switch (irq) {
390 			case IRQ_PPI_ERROR:
391 				bfin_write_PPI_STATUS(PPI_ERR_MASK);
392 				break;
393 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
394 			case IRQ_MAC_ERROR:
395 				bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
396 				break;
397 #endif
398 			case IRQ_SPORT0_ERROR:
399 				bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
400 				break;
401 
402 			case IRQ_SPORT1_ERROR:
403 				bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
404 				break;
405 
406 			case IRQ_CAN_ERROR:
407 				bfin_write_CAN_GIS(CAN_ERR_MASK);
408 				break;
409 
410 			case IRQ_SPI_ERROR:
411 				bfin_write_SPI_STAT(SPI_ERR_MASK);
412 				break;
413 
414 			default:
415 				break;
416 			}
417 
418 			pr_debug("IRQ %d:"
419 				 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
420 				 irq);
421 		}
422 	} else
423 		printk(KERN_ERR
424 		       "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
425 		       " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
426 		       __func__, __FILE__, __LINE__);
427 
428 }
429 #endif				/* BF537_GENERIC_ERROR_INT_DEMUX */
430 
431 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
432 static int mac_stat_int_mask;
433 
bfin_mac_status_ack_irq(unsigned int irq)434 static void bfin_mac_status_ack_irq(unsigned int irq)
435 {
436 	switch (irq) {
437 	case IRQ_MAC_MMCINT:
438 		bfin_write_EMAC_MMC_TIRQS(
439 			bfin_read_EMAC_MMC_TIRQE() &
440 			bfin_read_EMAC_MMC_TIRQS());
441 		bfin_write_EMAC_MMC_RIRQS(
442 			bfin_read_EMAC_MMC_RIRQE() &
443 			bfin_read_EMAC_MMC_RIRQS());
444 		break;
445 	case IRQ_MAC_RXFSINT:
446 		bfin_write_EMAC_RX_STKY(
447 			bfin_read_EMAC_RX_IRQE() &
448 			bfin_read_EMAC_RX_STKY());
449 		break;
450 	case IRQ_MAC_TXFSINT:
451 		bfin_write_EMAC_TX_STKY(
452 			bfin_read_EMAC_TX_IRQE() &
453 			bfin_read_EMAC_TX_STKY());
454 		break;
455 	case IRQ_MAC_WAKEDET:
456 		 bfin_write_EMAC_WKUP_CTL(
457 			bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
458 		break;
459 	default:
460 		/* These bits are W1C */
461 		bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
462 		break;
463 	}
464 }
465 
bfin_mac_status_mask_irq(struct irq_data * d)466 static void bfin_mac_status_mask_irq(struct irq_data *d)
467 {
468 	unsigned int irq = d->irq;
469 
470 	mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
471 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
472 	switch (irq) {
473 	case IRQ_MAC_PHYINT:
474 		bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
475 		break;
476 	default:
477 		break;
478 	}
479 #else
480 	if (!mac_stat_int_mask)
481 		bfin_internal_mask_irq(IRQ_MAC_ERROR);
482 #endif
483 	bfin_mac_status_ack_irq(irq);
484 }
485 
bfin_mac_status_unmask_irq(struct irq_data * d)486 static void bfin_mac_status_unmask_irq(struct irq_data *d)
487 {
488 	unsigned int irq = d->irq;
489 
490 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
491 	switch (irq) {
492 	case IRQ_MAC_PHYINT:
493 		bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
494 		break;
495 	default:
496 		break;
497 	}
498 #else
499 	if (!mac_stat_int_mask)
500 		bfin_internal_unmask_irq(IRQ_MAC_ERROR);
501 #endif
502 	mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
503 }
504 
505 #ifdef CONFIG_PM
bfin_mac_status_set_wake(struct irq_data * d,unsigned int state)506 int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
507 {
508 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
509 	return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
510 #else
511 	return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
512 #endif
513 }
514 #endif
515 
516 static struct irq_chip bfin_mac_status_irqchip = {
517 	.name = "MACST",
518 	.irq_ack = bfin_ack_noop,
519 	.irq_mask_ack = bfin_mac_status_mask_irq,
520 	.irq_mask = bfin_mac_status_mask_irq,
521 	.irq_unmask = bfin_mac_status_unmask_irq,
522 #ifdef CONFIG_PM
523 	.irq_set_wake = bfin_mac_status_set_wake,
524 #endif
525 };
526 
bfin_demux_mac_status_irq(unsigned int int_err_irq,struct irq_desc * inta_desc)527 static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
528 				 struct irq_desc *inta_desc)
529 {
530 	int i, irq = 0;
531 	u32 status = bfin_read_EMAC_SYSTAT();
532 
533 	for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
534 		if (status & (1L << i)) {
535 			irq = IRQ_MAC_PHYINT + i;
536 			break;
537 		}
538 
539 	if (irq) {
540 		if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
541 			bfin_handle_irq(irq);
542 		} else {
543 			bfin_mac_status_ack_irq(irq);
544 			pr_debug("IRQ %d:"
545 				 " MASKED MAC ERROR INTERRUPT ASSERTED\n",
546 				 irq);
547 		}
548 	} else
549 		printk(KERN_ERR
550 		       "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
551 		       " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
552 		       "(EMAC_SYSTAT=0x%X)\n",
553 		       __func__, __FILE__, __LINE__, status);
554 }
555 #endif
556 
bfin_set_irq_handler(unsigned irq,irq_flow_handler_t handle)557 static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
558 {
559 #ifdef CONFIG_IPIPE
560 	handle = handle_level_irq;
561 #endif
562 	__irq_set_handler_locked(irq, handle);
563 }
564 
565 static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
566 extern void bfin_gpio_irq_prepare(unsigned gpio);
567 
568 #if !defined(CONFIG_BF54x)
569 
bfin_gpio_ack_irq(struct irq_data * d)570 static void bfin_gpio_ack_irq(struct irq_data *d)
571 {
572 	/* AFAIK ack_irq in case mask_ack is provided
573 	 * get's only called for edge sense irqs
574 	 */
575 	set_gpio_data(irq_to_gpio(d->irq), 0);
576 }
577 
bfin_gpio_mask_ack_irq(struct irq_data * d)578 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
579 {
580 	unsigned int irq = d->irq;
581 	u32 gpionr = irq_to_gpio(irq);
582 
583 	if (!irqd_is_level_type(d))
584 		set_gpio_data(gpionr, 0);
585 
586 	set_gpio_maska(gpionr, 0);
587 }
588 
bfin_gpio_mask_irq(struct irq_data * d)589 static void bfin_gpio_mask_irq(struct irq_data *d)
590 {
591 	set_gpio_maska(irq_to_gpio(d->irq), 0);
592 }
593 
bfin_gpio_unmask_irq(struct irq_data * d)594 static void bfin_gpio_unmask_irq(struct irq_data *d)
595 {
596 	set_gpio_maska(irq_to_gpio(d->irq), 1);
597 }
598 
bfin_gpio_irq_startup(struct irq_data * d)599 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
600 {
601 	u32 gpionr = irq_to_gpio(d->irq);
602 
603 	if (__test_and_set_bit(gpionr, gpio_enabled))
604 		bfin_gpio_irq_prepare(gpionr);
605 
606 	bfin_gpio_unmask_irq(d);
607 
608 	return 0;
609 }
610 
bfin_gpio_irq_shutdown(struct irq_data * d)611 static void bfin_gpio_irq_shutdown(struct irq_data *d)
612 {
613 	u32 gpionr = irq_to_gpio(d->irq);
614 
615 	bfin_gpio_mask_irq(d);
616 	__clear_bit(gpionr, gpio_enabled);
617 	bfin_gpio_irq_free(gpionr);
618 }
619 
bfin_gpio_irq_type(struct irq_data * d,unsigned int type)620 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
621 {
622 	unsigned int irq = d->irq;
623 	int ret;
624 	char buf[16];
625 	u32 gpionr = irq_to_gpio(irq);
626 
627 	if (type == IRQ_TYPE_PROBE) {
628 		/* only probe unenabled GPIO interrupt lines */
629 		if (test_bit(gpionr, gpio_enabled))
630 			return 0;
631 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
632 	}
633 
634 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
635 		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
636 
637 		snprintf(buf, 16, "gpio-irq%d", irq);
638 		ret = bfin_gpio_irq_request(gpionr, buf);
639 		if (ret)
640 			return ret;
641 
642 		if (__test_and_set_bit(gpionr, gpio_enabled))
643 			bfin_gpio_irq_prepare(gpionr);
644 
645 	} else {
646 		__clear_bit(gpionr, gpio_enabled);
647 		return 0;
648 	}
649 
650 	set_gpio_inen(gpionr, 0);
651 	set_gpio_dir(gpionr, 0);
652 
653 	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
654 	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
655 		set_gpio_both(gpionr, 1);
656 	else
657 		set_gpio_both(gpionr, 0);
658 
659 	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
660 		set_gpio_polar(gpionr, 1);	/* low or falling edge denoted by one */
661 	else
662 		set_gpio_polar(gpionr, 0);	/* high or rising edge denoted by zero */
663 
664 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
665 		set_gpio_edge(gpionr, 1);
666 		set_gpio_inen(gpionr, 1);
667 		set_gpio_data(gpionr, 0);
668 
669 	} else {
670 		set_gpio_edge(gpionr, 0);
671 		set_gpio_inen(gpionr, 1);
672 	}
673 
674 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
675 		bfin_set_irq_handler(irq, handle_edge_irq);
676 	else
677 		bfin_set_irq_handler(irq, handle_level_irq);
678 
679 	return 0;
680 }
681 
682 #ifdef CONFIG_PM
bfin_gpio_set_wake(struct irq_data * d,unsigned int state)683 int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
684 {
685 	return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
686 }
687 #endif
688 
bfin_demux_gpio_irq(unsigned int inta_irq,struct irq_desc * desc)689 static void bfin_demux_gpio_irq(unsigned int inta_irq,
690 				struct irq_desc *desc)
691 {
692 	unsigned int i, gpio, mask, irq, search = 0;
693 
694 	switch (inta_irq) {
695 #if defined(CONFIG_BF53x)
696 	case IRQ_PROG_INTA:
697 		irq = IRQ_PF0;
698 		search = 1;
699 		break;
700 # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
701 	case IRQ_MAC_RX:
702 		irq = IRQ_PH0;
703 		break;
704 # endif
705 #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
706 	case IRQ_PORTF_INTA:
707 		irq = IRQ_PF0;
708 		break;
709 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
710 	case IRQ_PORTF_INTA:
711 		irq = IRQ_PF0;
712 		break;
713 	case IRQ_PORTG_INTA:
714 		irq = IRQ_PG0;
715 		break;
716 	case IRQ_PORTH_INTA:
717 		irq = IRQ_PH0;
718 		break;
719 #elif defined(CONFIG_BF561)
720 	case IRQ_PROG0_INTA:
721 		irq = IRQ_PF0;
722 		break;
723 	case IRQ_PROG1_INTA:
724 		irq = IRQ_PF16;
725 		break;
726 	case IRQ_PROG2_INTA:
727 		irq = IRQ_PF32;
728 		break;
729 #endif
730 	default:
731 		BUG();
732 		return;
733 	}
734 
735 	if (search) {
736 		for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
737 			irq += i;
738 
739 			mask = get_gpiop_data(i) & get_gpiop_maska(i);
740 
741 			while (mask) {
742 				if (mask & 1)
743 					bfin_handle_irq(irq);
744 				irq++;
745 				mask >>= 1;
746 			}
747 		}
748 	} else {
749 			gpio = irq_to_gpio(irq);
750 			mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
751 
752 			do {
753 				if (mask & 1)
754 					bfin_handle_irq(irq);
755 				irq++;
756 				mask >>= 1;
757 			} while (mask);
758 	}
759 
760 }
761 
762 #else				/* CONFIG_BF54x */
763 
764 #define NR_PINT_SYS_IRQS	4
765 #define NR_PINT_BITS		32
766 #define NR_PINTS		160
767 #define IRQ_NOT_AVAIL		0xFF
768 
769 #define PINT_2_BANK(x)		((x) >> 5)
770 #define PINT_2_BIT(x)		((x) & 0x1F)
771 #define PINT_BIT(x)		(1 << (PINT_2_BIT(x)))
772 
773 static unsigned char irq2pint_lut[NR_PINTS];
774 static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
775 
776 struct pin_int_t {
777 	unsigned int mask_set;
778 	unsigned int mask_clear;
779 	unsigned int request;
780 	unsigned int assign;
781 	unsigned int edge_set;
782 	unsigned int edge_clear;
783 	unsigned int invert_set;
784 	unsigned int invert_clear;
785 	unsigned int pinstate;
786 	unsigned int latch;
787 };
788 
789 static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
790 	(struct pin_int_t *)PINT0_MASK_SET,
791 	(struct pin_int_t *)PINT1_MASK_SET,
792 	(struct pin_int_t *)PINT2_MASK_SET,
793 	(struct pin_int_t *)PINT3_MASK_SET,
794 };
795 
get_irq_base(u32 bank,u8 bmap)796 inline unsigned int get_irq_base(u32 bank, u8 bmap)
797 {
798 	unsigned int irq_base;
799 
800 	if (bank < 2) {		/*PA-PB */
801 		irq_base = IRQ_PA0 + bmap * 16;
802 	} else {		/*PC-PJ */
803 		irq_base = IRQ_PC0 + bmap * 16;
804 	}
805 
806 	return irq_base;
807 }
808 
809 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
init_pint_lut(void)810 void init_pint_lut(void)
811 {
812 	u16 bank, bit, irq_base, bit_pos;
813 	u32 pint_assign;
814 	u8 bmap;
815 
816 	memset(irq2pint_lut, IRQ_NOT_AVAIL, sizeof(irq2pint_lut));
817 
818 	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++) {
819 
820 		pint_assign = pint[bank]->assign;
821 
822 		for (bit = 0; bit < NR_PINT_BITS; bit++) {
823 
824 			bmap = (pint_assign >> ((bit / 8) * 8)) & 0xFF;
825 
826 			irq_base = get_irq_base(bank, bmap);
827 
828 			irq_base += (bit % 8) + ((bit / 8) & 1 ? 8 : 0);
829 			bit_pos = bit + bank * NR_PINT_BITS;
830 
831 			pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
832 			irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
833 		}
834 	}
835 }
836 
bfin_gpio_ack_irq(struct irq_data * d)837 static void bfin_gpio_ack_irq(struct irq_data *d)
838 {
839 	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
840 	u32 pintbit = PINT_BIT(pint_val);
841 	u32 bank = PINT_2_BANK(pint_val);
842 
843 	if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
844 		if (pint[bank]->invert_set & pintbit)
845 			pint[bank]->invert_clear = pintbit;
846 		else
847 			pint[bank]->invert_set = pintbit;
848 	}
849 	pint[bank]->request = pintbit;
850 
851 }
852 
bfin_gpio_mask_ack_irq(struct irq_data * d)853 static void bfin_gpio_mask_ack_irq(struct irq_data *d)
854 {
855 	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
856 	u32 pintbit = PINT_BIT(pint_val);
857 	u32 bank = PINT_2_BANK(pint_val);
858 
859 	if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
860 		if (pint[bank]->invert_set & pintbit)
861 			pint[bank]->invert_clear = pintbit;
862 		else
863 			pint[bank]->invert_set = pintbit;
864 	}
865 
866 	pint[bank]->request = pintbit;
867 	pint[bank]->mask_clear = pintbit;
868 }
869 
bfin_gpio_mask_irq(struct irq_data * d)870 static void bfin_gpio_mask_irq(struct irq_data *d)
871 {
872 	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
873 
874 	pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
875 }
876 
bfin_gpio_unmask_irq(struct irq_data * d)877 static void bfin_gpio_unmask_irq(struct irq_data *d)
878 {
879 	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
880 	u32 pintbit = PINT_BIT(pint_val);
881 	u32 bank = PINT_2_BANK(pint_val);
882 
883 	pint[bank]->mask_set = pintbit;
884 }
885 
bfin_gpio_irq_startup(struct irq_data * d)886 static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
887 {
888 	unsigned int irq = d->irq;
889 	u32 gpionr = irq_to_gpio(irq);
890 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
891 
892 	if (pint_val == IRQ_NOT_AVAIL) {
893 		printk(KERN_ERR
894 		"GPIO IRQ %d :Not in PINT Assign table "
895 		"Reconfigure Interrupt to Port Assignemt\n", irq);
896 		return -ENODEV;
897 	}
898 
899 	if (__test_and_set_bit(gpionr, gpio_enabled))
900 		bfin_gpio_irq_prepare(gpionr);
901 
902 	bfin_gpio_unmask_irq(d);
903 
904 	return 0;
905 }
906 
bfin_gpio_irq_shutdown(struct irq_data * d)907 static void bfin_gpio_irq_shutdown(struct irq_data *d)
908 {
909 	u32 gpionr = irq_to_gpio(d->irq);
910 
911 	bfin_gpio_mask_irq(d);
912 	__clear_bit(gpionr, gpio_enabled);
913 	bfin_gpio_irq_free(gpionr);
914 }
915 
bfin_gpio_irq_type(struct irq_data * d,unsigned int type)916 static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
917 {
918 	unsigned int irq = d->irq;
919 	int ret;
920 	char buf[16];
921 	u32 gpionr = irq_to_gpio(irq);
922 	u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
923 	u32 pintbit = PINT_BIT(pint_val);
924 	u32 bank = PINT_2_BANK(pint_val);
925 
926 	if (pint_val == IRQ_NOT_AVAIL)
927 		return -ENODEV;
928 
929 	if (type == IRQ_TYPE_PROBE) {
930 		/* only probe unenabled GPIO interrupt lines */
931 		if (test_bit(gpionr, gpio_enabled))
932 			return 0;
933 		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
934 	}
935 
936 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
937 		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
938 
939 		snprintf(buf, 16, "gpio-irq%d", irq);
940 		ret = bfin_gpio_irq_request(gpionr, buf);
941 		if (ret)
942 			return ret;
943 
944 		if (__test_and_set_bit(gpionr, gpio_enabled))
945 			bfin_gpio_irq_prepare(gpionr);
946 
947 	} else {
948 		__clear_bit(gpionr, gpio_enabled);
949 		return 0;
950 	}
951 
952 	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
953 		pint[bank]->invert_set = pintbit;	/* low or falling edge denoted by one */
954 	else
955 		pint[bank]->invert_clear = pintbit;	/* high or rising edge denoted by zero */
956 
957 	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
958 	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
959 		if (gpio_get_value(gpionr))
960 			pint[bank]->invert_set = pintbit;
961 		else
962 			pint[bank]->invert_clear = pintbit;
963 	}
964 
965 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
966 		pint[bank]->edge_set = pintbit;
967 		bfin_set_irq_handler(irq, handle_edge_irq);
968 	} else {
969 		pint[bank]->edge_clear = pintbit;
970 		bfin_set_irq_handler(irq, handle_level_irq);
971 	}
972 
973 	return 0;
974 }
975 
976 #ifdef CONFIG_PM
977 u32 pint_saved_masks[NR_PINT_SYS_IRQS];
978 u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
979 
bfin_gpio_set_wake(struct irq_data * d,unsigned int state)980 int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
981 {
982 	u32 pint_irq;
983 	u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
984 	u32 bank = PINT_2_BANK(pint_val);
985 	u32 pintbit = PINT_BIT(pint_val);
986 
987 	switch (bank) {
988 	case 0:
989 		pint_irq = IRQ_PINT0;
990 		break;
991 	case 2:
992 		pint_irq = IRQ_PINT2;
993 		break;
994 	case 3:
995 		pint_irq = IRQ_PINT3;
996 		break;
997 	case 1:
998 		pint_irq = IRQ_PINT1;
999 		break;
1000 	default:
1001 		return -EINVAL;
1002 	}
1003 
1004 	bfin_internal_set_wake(pint_irq, state);
1005 
1006 	if (state)
1007 		pint_wakeup_masks[bank] |= pintbit;
1008 	else
1009 		pint_wakeup_masks[bank] &= ~pintbit;
1010 
1011 	return 0;
1012 }
1013 
bfin_pm_setup(void)1014 u32 bfin_pm_setup(void)
1015 {
1016 	u32 val, i;
1017 
1018 	for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1019 		val = pint[i]->mask_clear;
1020 		pint_saved_masks[i] = val;
1021 		if (val ^ pint_wakeup_masks[i]) {
1022 			pint[i]->mask_clear = val;
1023 			pint[i]->mask_set = pint_wakeup_masks[i];
1024 		}
1025 	}
1026 
1027 	return 0;
1028 }
1029 
bfin_pm_restore(void)1030 void bfin_pm_restore(void)
1031 {
1032 	u32 i, val;
1033 
1034 	for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1035 		val = pint_saved_masks[i];
1036 		if (val ^ pint_wakeup_masks[i]) {
1037 			pint[i]->mask_clear = pint[i]->mask_clear;
1038 			pint[i]->mask_set = val;
1039 		}
1040 	}
1041 }
1042 #endif
1043 
bfin_demux_gpio_irq(unsigned int inta_irq,struct irq_desc * desc)1044 static void bfin_demux_gpio_irq(unsigned int inta_irq,
1045 				struct irq_desc *desc)
1046 {
1047 	u32 bank, pint_val;
1048 	u32 request, irq;
1049 
1050 	switch (inta_irq) {
1051 	case IRQ_PINT0:
1052 		bank = 0;
1053 		break;
1054 	case IRQ_PINT2:
1055 		bank = 2;
1056 		break;
1057 	case IRQ_PINT3:
1058 		bank = 3;
1059 		break;
1060 	case IRQ_PINT1:
1061 		bank = 1;
1062 		break;
1063 	default:
1064 		return;
1065 	}
1066 
1067 	pint_val = bank * NR_PINT_BITS;
1068 
1069 	request = pint[bank]->request;
1070 
1071 	while (request) {
1072 		if (request & 1) {
1073 			irq = pint2irq_lut[pint_val] + SYS_IRQS;
1074 			bfin_handle_irq(irq);
1075 		}
1076 		pint_val++;
1077 		request >>= 1;
1078 	}
1079 
1080 }
1081 #endif
1082 
1083 static struct irq_chip bfin_gpio_irqchip = {
1084 	.name = "GPIO",
1085 	.irq_ack = bfin_gpio_ack_irq,
1086 	.irq_mask = bfin_gpio_mask_irq,
1087 	.irq_mask_ack = bfin_gpio_mask_ack_irq,
1088 	.irq_unmask = bfin_gpio_unmask_irq,
1089 	.irq_disable = bfin_gpio_mask_irq,
1090 	.irq_enable = bfin_gpio_unmask_irq,
1091 	.irq_set_type = bfin_gpio_irq_type,
1092 	.irq_startup = bfin_gpio_irq_startup,
1093 	.irq_shutdown = bfin_gpio_irq_shutdown,
1094 #ifdef CONFIG_PM
1095 	.irq_set_wake = bfin_gpio_set_wake,
1096 #endif
1097 };
1098 
init_exception_vectors(void)1099 void __cpuinit init_exception_vectors(void)
1100 {
1101 	/* cannot program in software:
1102 	 * evt0 - emulation (jtag)
1103 	 * evt1 - reset
1104 	 */
1105 	bfin_write_EVT2(evt_nmi);
1106 	bfin_write_EVT3(trap);
1107 	bfin_write_EVT5(evt_ivhw);
1108 	bfin_write_EVT6(evt_timer);
1109 	bfin_write_EVT7(evt_evt7);
1110 	bfin_write_EVT8(evt_evt8);
1111 	bfin_write_EVT9(evt_evt9);
1112 	bfin_write_EVT10(evt_evt10);
1113 	bfin_write_EVT11(evt_evt11);
1114 	bfin_write_EVT12(evt_evt12);
1115 	bfin_write_EVT13(evt_evt13);
1116 	bfin_write_EVT14(evt_evt14);
1117 	bfin_write_EVT15(evt_system_call);
1118 	CSYNC();
1119 }
1120 
1121 /*
1122  * This function should be called during kernel startup to initialize
1123  * the BFin IRQ handling routines.
1124  */
1125 
init_arch_irq(void)1126 int __init init_arch_irq(void)
1127 {
1128 	int irq;
1129 	unsigned long ilat = 0;
1130 	/*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
1131 #if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
1132 	|| defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1133 	bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1134 	bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1135 # ifdef CONFIG_BF54x
1136 	bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1137 # endif
1138 # ifdef CONFIG_SMP
1139 	bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
1140 	bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
1141 # endif
1142 #else
1143 	bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
1144 #endif
1145 
1146 	local_irq_disable();
1147 
1148 #if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
1149 	/* Clear EMAC Interrupt Status bits so we can demux it later */
1150 	bfin_write_EMAC_SYSTAT(-1);
1151 #endif
1152 
1153 #ifdef CONFIG_BF54x
1154 # ifdef CONFIG_PINTx_REASSIGN
1155 	pint[0]->assign = CONFIG_PINT0_ASSIGN;
1156 	pint[1]->assign = CONFIG_PINT1_ASSIGN;
1157 	pint[2]->assign = CONFIG_PINT2_ASSIGN;
1158 	pint[3]->assign = CONFIG_PINT3_ASSIGN;
1159 # endif
1160 	/* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
1161 	init_pint_lut();
1162 #endif
1163 
1164 	for (irq = 0; irq <= SYS_IRQS; irq++) {
1165 		if (irq <= IRQ_CORETMR)
1166 			irq_set_chip(irq, &bfin_core_irqchip);
1167 		else
1168 			irq_set_chip(irq, &bfin_internal_irqchip);
1169 
1170 		switch (irq) {
1171 #if defined(CONFIG_BF53x)
1172 		case IRQ_PROG_INTA:
1173 # if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1174 		case IRQ_MAC_RX:
1175 # endif
1176 #elif defined(CONFIG_BF54x)
1177 		case IRQ_PINT0:
1178 		case IRQ_PINT1:
1179 		case IRQ_PINT2:
1180 		case IRQ_PINT3:
1181 #elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1182 		case IRQ_PORTF_INTA:
1183 		case IRQ_PORTG_INTA:
1184 		case IRQ_PORTH_INTA:
1185 #elif defined(CONFIG_BF561)
1186 		case IRQ_PROG0_INTA:
1187 		case IRQ_PROG1_INTA:
1188 		case IRQ_PROG2_INTA:
1189 #elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
1190 		case IRQ_PORTF_INTA:
1191 #endif
1192 			irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1193 			break;
1194 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
1195 		case IRQ_GENERIC_ERROR:
1196 			irq_set_chained_handler(irq, bfin_demux_error_irq);
1197 			break;
1198 #endif
1199 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1200 		case IRQ_MAC_ERROR:
1201 			irq_set_chained_handler(irq,
1202 						bfin_demux_mac_status_irq);
1203 			break;
1204 #endif
1205 #ifdef CONFIG_SMP
1206 		case IRQ_SUPPLE_0:
1207 		case IRQ_SUPPLE_1:
1208 			irq_set_handler(irq, handle_percpu_irq);
1209 			break;
1210 #endif
1211 
1212 #ifdef CONFIG_TICKSOURCE_CORETMR
1213 		case IRQ_CORETMR:
1214 # ifdef CONFIG_SMP
1215 			irq_set_handler(irq, handle_percpu_irq);
1216 			break;
1217 # else
1218 			irq_set_handler(irq, handle_simple_irq);
1219 			break;
1220 # endif
1221 #endif
1222 
1223 #ifdef CONFIG_TICKSOURCE_GPTMR0
1224 		case IRQ_TIMER0:
1225 			irq_set_handler(irq, handle_simple_irq);
1226 			break;
1227 #endif
1228 
1229 #ifdef CONFIG_IPIPE
1230 		default:
1231 			irq_set_handler(irq, handle_level_irq);
1232 			break;
1233 #else /* !CONFIG_IPIPE */
1234 		default:
1235 			irq_set_handler(irq, handle_simple_irq);
1236 			break;
1237 #endif /* !CONFIG_IPIPE */
1238 		}
1239 	}
1240 
1241 #ifdef BF537_GENERIC_ERROR_INT_DEMUX
1242 	for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1243 		irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip,
1244 					 handle_level_irq);
1245 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1246 	irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
1247 #endif
1248 #endif
1249 
1250 #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1251 	for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1252 		irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1253 					 handle_level_irq);
1254 #endif
1255 	/* if configured as edge, then will be changed to do_edge_IRQ */
1256 	for (irq = GPIO_IRQ_BASE;
1257 		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1258 		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1259 					 handle_level_irq);
1260 
1261 	bfin_write_IMASK(0);
1262 	CSYNC();
1263 	ilat = bfin_read_ILAT();
1264 	CSYNC();
1265 	bfin_write_ILAT(ilat);
1266 	CSYNC();
1267 
1268 	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1269 	/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1270 	 * local_irq_enable()
1271 	 */
1272 	program_IAR();
1273 	/* Therefore it's better to setup IARs before interrupts enabled */
1274 	search_IAR();
1275 
1276 	/* Enable interrupts IVG7-15 */
1277 	bfin_irq_flags |= IMASK_IVG15 |
1278 	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1279 	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1280 
1281 	/* This implicitly covers ANOMALY_05000171
1282 	 * Boot-ROM code modifies SICA_IWRx wakeup registers
1283 	 */
1284 #ifdef SIC_IWR0
1285 	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1286 # ifdef SIC_IWR1
1287 	/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1288 	 * will screw up the bootrom as it relies on MDMA0/1 waking it
1289 	 * up from IDLE instructions.  See this report for more info:
1290 	 * http://blackfin.uclinux.org/gf/tracker/4323
1291 	 */
1292 	if (ANOMALY_05000435)
1293 		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1294 	else
1295 		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1296 # endif
1297 # ifdef SIC_IWR2
1298 	bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1299 # endif
1300 #else
1301 	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1302 #endif
1303 
1304 	return 0;
1305 }
1306 
1307 #ifdef CONFIG_DO_IRQ_L1
1308 __attribute__((l1_text))
1309 #endif
do_irq(int vec,struct pt_regs * fp)1310 void do_irq(int vec, struct pt_regs *fp)
1311 {
1312 	if (vec == EVT_IVTMR_P) {
1313 		vec = IRQ_CORETMR;
1314 	} else {
1315 		struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1316 		struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1317 #if defined(SIC_ISR0)
1318 		unsigned long sic_status[3];
1319 
1320 		if (smp_processor_id()) {
1321 # ifdef SICB_ISR0
1322 			/* This will be optimized out in UP mode. */
1323 			sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1324 			sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1325 # endif
1326 		} else {
1327 			sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1328 			sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1329 		}
1330 # ifdef SIC_ISR2
1331 		sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1332 # endif
1333 		for (;; ivg++) {
1334 			if (ivg >= ivg_stop) {
1335 				atomic_inc(&num_spurious);
1336 				return;
1337 			}
1338 			if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1339 				break;
1340 		}
1341 #else
1342 		unsigned long sic_status;
1343 
1344 		sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1345 
1346 		for (;; ivg++) {
1347 			if (ivg >= ivg_stop) {
1348 				atomic_inc(&num_spurious);
1349 				return;
1350 			} else if (sic_status & ivg->isrflag)
1351 				break;
1352 		}
1353 #endif
1354 		vec = ivg->irqno;
1355 	}
1356 	asm_do_IRQ(vec, fp);
1357 }
1358 
1359 #ifdef CONFIG_IPIPE
1360 
__ipipe_get_irq_priority(unsigned irq)1361 int __ipipe_get_irq_priority(unsigned irq)
1362 {
1363 	int ient, prio;
1364 
1365 	if (irq <= IRQ_CORETMR)
1366 		return irq;
1367 
1368 	for (ient = 0; ient < NR_PERI_INTS; ient++) {
1369 		struct ivgx *ivg = ivg_table + ient;
1370 		if (ivg->irqno == irq) {
1371 			for (prio = 0; prio <= IVG13-IVG7; prio++) {
1372 				if (ivg7_13[prio].ifirst <= ivg &&
1373 				    ivg7_13[prio].istop > ivg)
1374 					return IVG7 + prio;
1375 			}
1376 		}
1377 	}
1378 
1379 	return IVG15;
1380 }
1381 
1382 /* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1383 #ifdef CONFIG_DO_IRQ_L1
1384 __attribute__((l1_text))
1385 #endif
__ipipe_grab_irq(int vec,struct pt_regs * regs)1386 asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1387 {
1388 	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1389 	struct ipipe_domain *this_domain = __ipipe_current_domain;
1390 	struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1391 	struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1392 	int irq, s = 0;
1393 
1394 	if (likely(vec == EVT_IVTMR_P))
1395 		irq = IRQ_CORETMR;
1396 	else {
1397 #if defined(SIC_ISR0)
1398 		unsigned long sic_status[3];
1399 
1400 		sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1401 		sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1402 # ifdef SIC_ISR2
1403 		sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1404 # endif
1405 		for (;; ivg++) {
1406 			if (ivg >= ivg_stop) {
1407 				atomic_inc(&num_spurious);
1408 				return 0;
1409 			}
1410 			if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1411 				break;
1412 		}
1413 #else
1414 		unsigned long sic_status;
1415 
1416 		sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1417 
1418 		for (;; ivg++) {
1419 			if (ivg >= ivg_stop) {
1420 				atomic_inc(&num_spurious);
1421 				return 0;
1422 			} else if (sic_status & ivg->isrflag)
1423 				break;
1424 		}
1425 #endif
1426 		irq = ivg->irqno;
1427 	}
1428 
1429 	if (irq == IRQ_SYSTMR) {
1430 #if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1431 		bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1432 #endif
1433 		/* This is basically what we need from the register frame. */
1434 		__raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1435 		__raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1436 		if (this_domain != ipipe_root_domain)
1437 			__raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1438 		else
1439 			__raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1440 	}
1441 
1442 	/*
1443 	 * We don't want Linux interrupt handlers to run at the
1444 	 * current core priority level (i.e. < EVT15), since this
1445 	 * might delay other interrupts handled by a high priority
1446 	 * domain. Here is what we do instead:
1447 	 *
1448 	 * - we raise the SYNCDEFER bit to prevent
1449 	 * __ipipe_handle_irq() to sync the pipeline for the root
1450 	 * stage for the incoming interrupt. Upon return, that IRQ is
1451 	 * pending in the interrupt log.
1452 	 *
1453 	 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1454 	 * that _schedule_and_signal_from_int will eventually sync the
1455 	 * pipeline from EVT15.
1456 	 */
1457 	if (this_domain == ipipe_root_domain) {
1458 		s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1459 		barrier();
1460 	}
1461 
1462 	ipipe_trace_irq_entry(irq);
1463 	__ipipe_handle_irq(irq, regs);
1464 	ipipe_trace_irq_exit(irq);
1465 
1466 	if (user_mode(regs) &&
1467 	    !ipipe_test_foreign_stack() &&
1468 	    (current->ipipe_flags & PF_EVTRET) != 0) {
1469 		/*
1470 		 * Testing for user_regs() does NOT fully eliminate
1471 		 * foreign stack contexts, because of the forged
1472 		 * interrupt returns we do through
1473 		 * __ipipe_call_irqtail. In that case, we might have
1474 		 * preempted a foreign stack context in a high
1475 		 * priority domain, with a single interrupt level now
1476 		 * pending after the irqtail unwinding is done. In
1477 		 * which case user_mode() is now true, and the event
1478 		 * gets dispatched spuriously.
1479 		 */
1480 		current->ipipe_flags &= ~PF_EVTRET;
1481 		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1482 	}
1483 
1484 	if (this_domain == ipipe_root_domain) {
1485 		set_thread_flag(TIF_IRQ_SYNC);
1486 		if (!s) {
1487 			__clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1488 			return !test_bit(IPIPE_STALL_FLAG, &p->status);
1489 		}
1490 	}
1491 
1492 	return 0;
1493 }
1494 
1495 #endif /* CONFIG_IPIPE */
1496