1 /*
2 * Platform dependent support for SGI SN
3 *
4 * Copyright (c) 2000-2003 Silicon Graphics, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it would be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
13 *
14 * Further, this software is distributed without any warranty that it is
15 * free of the rightful claim of any third person regarding infringement
16 * or the like. Any license provided herein, whether implied or
17 * otherwise, applies only to this software file. Patent licenses, if
18 * any, provided herein do not apply to combinations of this program with
19 * other software, or any other product whatsoever.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
24 *
25 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
26 * Mountain View, CA 94043, or:
27 *
28 * http://www.sgi.com
29 *
30 * For further information regarding this notice, see:
31 *
32 * http://oss.sgi.com/projects/GenInfo/NoticeExplan
33 */
34
35 #include <linux/init.h>
36 #include <linux/sched.h>
37 #include <linux/vmalloc.h>
38 #include <asm/current.h>
39 #include <linux/irq.h>
40 #include <linux/interrupt.h>
41 #include <linux/slab.h>
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44 #include <asm/sn/sgi.h>
45 #include <asm/sn/iograph.h>
46 #include <asm/sn/invent.h>
47 #include <linux/devfs_fs_kernel.h>
48 #include <asm/sn/hcl.h>
49 #include <asm/sn/types.h>
50 #include <asm/sn/pci/bridge.h>
51 #include <asm/sn/pci/pciio.h>
52 #include <asm/sn/pci/pciio_private.h>
53 #include <asm/sn/pci/pcibr.h>
54 #include <asm/sn/pci/pcibr_private.h>
55 #include <asm/sn/sn_cpuid.h>
56 #include <asm/sn/io.h>
57 #include <asm/sn/intr.h>
58 #include <asm/sn/addrs.h>
59 #include <asm/sn/driver.h>
60 #include <asm/sn/arch.h>
61 #include <asm/sn/pda.h>
62 #include <asm/processor.h>
63 #include <asm/system.h>
64 #include <asm/bitops.h>
65
66 int irq_to_bit_pos(int irq);
67 static void force_interrupt(int irq);
68 extern void pcibr_force_interrupt(pcibr_intr_t intr);
69 extern int sn_force_interrupt_flag;
70
71 struct pcibr_intr_list_t {
72 struct pcibr_intr_list_t *next;
73 pcibr_intr_t intr;
74 };
75
76 static struct pcibr_intr_list_t **pcibr_intr_list;
77
78
79
80 static unsigned int
sn_startup_irq(unsigned int irq)81 sn_startup_irq(unsigned int irq)
82 {
83 return(0);
84 }
85
86 static void
sn_shutdown_irq(unsigned int irq)87 sn_shutdown_irq(unsigned int irq)
88 {
89 }
90
91 static void
sn_disable_irq(unsigned int irq)92 sn_disable_irq(unsigned int irq)
93 {
94 }
95
96 static void
sn_enable_irq(unsigned int irq)97 sn_enable_irq(unsigned int irq)
98 {
99 }
100
101 static void
sn_ack_irq(unsigned int irq)102 sn_ack_irq(unsigned int irq)
103 {
104 unsigned long event_occurred, mask = 0;
105 int nasid;
106
107 irq = irq & 0xff;
108 nasid = smp_physical_node_id();
109 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
110 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
111 mask |= (1 << SH_EVENT_OCCURRED_UART_INT_SHFT);
112 }
113 if (event_occurred & SH_EVENT_OCCURRED_IPI_INT_MASK) {
114 mask |= (1 << SH_EVENT_OCCURRED_IPI_INT_SHFT);
115 }
116 if (event_occurred & SH_EVENT_OCCURRED_II_INT0_MASK) {
117 mask |= (1 << SH_EVENT_OCCURRED_II_INT0_SHFT);
118 }
119 if (event_occurred & SH_EVENT_OCCURRED_II_INT1_MASK) {
120 mask |= (1 << SH_EVENT_OCCURRED_II_INT1_SHFT);
121 }
122 HUB_S((unsigned long *)GLOBAL_MMR_ADDR(nasid, SH_EVENT_OCCURRED_ALIAS), mask );
123 __set_bit(irq, (volatile void *)pda.sn_in_service_ivecs);
124 }
125
126 static void
sn_end_irq(unsigned int irq)127 sn_end_irq(unsigned int irq)
128 {
129 int nasid;
130 int ivec;
131 unsigned long event_occurred;
132
133 ivec = irq & 0xff;
134 if (ivec == SGI_UART_VECTOR) {
135 nasid = smp_physical_node_id();
136 event_occurred = HUB_L( (unsigned long *)GLOBAL_MMR_ADDR(nasid,SH_EVENT_OCCURRED) );
137 // If the UART bit is set here, we may have received an interrupt from the
138 // UART that the driver missed. To make sure, we IPI ourselves to force us
139 // to look again.
140 if (event_occurred & SH_EVENT_OCCURRED_UART_INT_MASK) {
141 platform_send_ipi(smp_processor_id(), SGI_UART_VECTOR, IA64_IPI_DM_INT, 0);
142 }
143 }
144 clear_bit(ivec, (volatile void *)pda.sn_in_service_ivecs);
145 if (sn_force_interrupt_flag)
146 force_interrupt(irq);
147 }
148
149 static void
sn_set_affinity_irq(unsigned int irq,unsigned long cpu)150 sn_set_affinity_irq(unsigned int irq, unsigned long cpu)
151 {
152 int redir = 0;
153 struct pcibr_intr_list_t *p = pcibr_intr_list[irq];
154 pcibr_intr_t intr;
155 extern void sn_shub_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
156 extern void sn_tio_redirect_intr(pcibr_intr_t intr, unsigned long cpu);
157
158 if (p == NULL)
159 return;
160
161 intr = p->intr;
162
163 if (intr == NULL)
164 return;
165
166 if (IS_PIC_SOFT(intr->bi_soft) ) {
167 sn_shub_redirect_intr(intr, cpu);
168 // Defer TIO for now.
169 // } else if (IS_TIO_SOFT(intr->bi_soft) {
170 // sn_tio_redirect_intr(intr, cpu);
171 } else {
172 return;
173 }
174 (void) set_irq_affinity_info(irq, cpu_physical_id(cpu), redir);
175 }
176
177
178 struct hw_interrupt_type irq_type_sn = {
179 "SN hub",
180 sn_startup_irq,
181 sn_shutdown_irq,
182 sn_enable_irq,
183 sn_disable_irq,
184 sn_ack_irq,
185 sn_end_irq,
186 sn_set_affinity_irq
187 };
188
189
190 struct irq_desc *
sn_irq_desc(unsigned int irq)191 sn_irq_desc(unsigned int irq) {
192
193 irq = SN_IVEC_FROM_IRQ(irq);
194
195 return(_irq_desc + irq);
196 }
197
198 u8
sn_irq_to_vector(u8 irq)199 sn_irq_to_vector(u8 irq) {
200 return(irq);
201 }
202
203 unsigned int
sn_local_vector_to_irq(u8 vector)204 sn_local_vector_to_irq(u8 vector) {
205 return (CPU_VECTOR_TO_IRQ(smp_processor_id(), vector));
206 }
207
208 void
sn_irq_init(void)209 sn_irq_init (void)
210 {
211 int i;
212 irq_desc_t *base_desc = _irq_desc;
213
214 for (i=0; i<NR_IRQS; i++) {
215 if (base_desc[i].handler == &no_irq_type) {
216 base_desc[i].handler = &irq_type_sn;
217 }
218 }
219 }
220
221 int
bit_pos_to_irq(int bit)222 bit_pos_to_irq(int bit) {
223 #define BIT_TO_IRQ 64
224 if (bit > 118) bit = 118;
225
226 return bit + BIT_TO_IRQ;
227 }
228
229 int
irq_to_bit_pos(int irq)230 irq_to_bit_pos(int irq) {
231 #define IRQ_TO_BIT 64
232 int bit = irq - IRQ_TO_BIT;
233
234 return bit;
235 }
236
237 void
register_pcibr_intr(int irq,pcibr_intr_t intr)238 register_pcibr_intr(int irq, pcibr_intr_t intr) {
239 struct pcibr_intr_list_t *p = kmalloc(sizeof(struct pcibr_intr_list_t), GFP_KERNEL);
240 struct pcibr_intr_list_t *list;
241 int cpu = intr->bi_cpu;
242
243 if (pcibr_intr_list == NULL) {
244 pcibr_intr_list = kmalloc(sizeof(struct pcibr_intr_list_t *) * NR_IRQS, GFP_KERNEL);
245 if (pcibr_intr_list == NULL)
246 pcibr_intr_list = vmalloc(sizeof(struct pcibr_intr_list_t *) * NR_IRQS);
247 if (pcibr_intr_list == NULL) panic("Could not allocate memory for pcibr_intr_list\n");
248 memset( (void *)pcibr_intr_list, 0, sizeof(struct pcibr_intr_list_t *) * NR_IRQS);
249 }
250 if (pdacpu(cpu).sn_last_irq < irq) {
251 pdacpu(cpu).sn_last_irq = irq;
252 }
253 if (pdacpu(cpu).sn_first_irq == 0 || pdacpu(cpu).sn_first_irq > irq) pdacpu(cpu).sn_first_irq = irq;
254 if (!p) panic("Could not allocate memory for pcibr_intr_list_t\n");
255 if ((list = pcibr_intr_list[irq])) {
256 while (list->next) list = list->next;
257 list->next = p;
258 p->next = NULL;
259 p->intr = intr;
260 } else {
261 pcibr_intr_list[irq] = p;
262 p->next = NULL;
263 p->intr = intr;
264 }
265 }
266
267 void
force_polled_int(void)268 force_polled_int(void) {
269 int i;
270 struct pcibr_intr_list_t *p;
271
272 for (i=0; i<NR_IRQS;i++) {
273 p = pcibr_intr_list[i];
274 while (p) {
275 if (p->intr){
276 pcibr_force_interrupt(p->intr);
277 }
278 p = p->next;
279 }
280 }
281 }
282
283 static void
force_interrupt(int irq)284 force_interrupt(int irq) {
285 struct pcibr_intr_list_t *p = pcibr_intr_list[irq];
286
287 while (p) {
288 if (p->intr) {
289 pcibr_force_interrupt(p->intr);
290 }
291 p = p->next;
292 }
293 }
294
295 /*
296 Check for lost interrupts. If the PIC int_status reg. says that
297 an interrupt has been sent, but not handled, and the interrupt
298 is not pending in either the cpu irr regs or in the soft irr regs,
299 and the interrupt is not in service, then the interrupt may have
300 been lost. Force an interrupt on that pin. It is possible that
301 the interrupt is in flight, so we may generate a spurious interrupt,
302 but we should never miss a real lost interrupt.
303 */
304
305 static void
sn_check_intr(int irq,pcibr_intr_t intr)306 sn_check_intr(int irq, pcibr_intr_t intr) {
307 unsigned long regval;
308 int irr_reg_num;
309 int irr_bit;
310 unsigned long irr_reg;
311
312
313 regval = intr->bi_soft->bs_base->p_int_status_64;
314 irr_reg_num = irq_to_vector(irq) / 64;
315 irr_bit = irq_to_vector(irq) % 64;
316 switch (irr_reg_num) {
317 case 0:
318 irr_reg = ia64_get_irr0();
319 break;
320 case 1:
321 irr_reg = ia64_get_irr1();
322 break;
323 case 2:
324 irr_reg = ia64_get_irr2();
325 break;
326 case 3:
327 irr_reg = ia64_get_irr3();
328 break;
329 }
330 if (!test_bit(irr_bit, &irr_reg) ) {
331 if (!test_bit(irq, pda.sn_soft_irr) ) {
332 if (!test_bit(irq, pda.sn_in_service_ivecs) ) {
333 regval &= 0xff;
334 if (intr->bi_ibits & regval & intr->bi_last_intr) {
335 regval &= ~(intr->bi_ibits & regval);
336 pcibr_force_interrupt(intr);
337 }
338 }
339 }
340 }
341 intr->bi_last_intr = regval;
342 }
343
344 void
sn_lb_int_war_check(void)345 sn_lb_int_war_check(void) {
346 int i;
347
348 if (pda.sn_first_irq == 0) return;
349 for (i=pda.sn_first_irq;
350 i <= pda.sn_last_irq; i++) {
351 struct pcibr_intr_list_t *p = pcibr_intr_list[i];
352 if (p == NULL) {
353 continue;
354 }
355 while (p) {
356 sn_check_intr(i, p->intr);
357 p = p->next;
358 }
359 }
360 }
361
362 static inline int
sn_get_next_bit(void)363 sn_get_next_bit(void) {
364 int i;
365 int bit;
366
367 for (i = 3; i >= 0; i--) {
368 if (pda.sn_soft_irr[i] != 0) {
369 bit = (i * 64) + __ffs(pda.sn_soft_irr[i]);
370 __change_bit(bit, (volatile void *)pda.sn_soft_irr);
371 return(bit);
372 }
373 }
374 return IA64_SPURIOUS_INT_VECTOR;
375 }
376
377 void
sn_set_tpr(int vector)378 sn_set_tpr(int vector) {
379 if (vector > IA64_LAST_DEVICE_VECTOR || vector < IA64_FIRST_DEVICE_VECTOR) {
380 ia64_set_tpr(vector);
381 } else {
382 ia64_set_tpr(IA64_LAST_DEVICE_VECTOR);
383 }
384 }
385
386 static inline void
sn_get_all_ivr(void)387 sn_get_all_ivr(void) {
388 int vector;
389
390 vector = ia64_get_ivr();
391 while (vector != IA64_SPURIOUS_INT_VECTOR) {
392 __set_bit(vector, (volatile void *)pda.sn_soft_irr);
393 ia64_eoi();
394 if (vector > IA64_LAST_DEVICE_VECTOR) return;
395 vector = ia64_get_ivr();
396 }
397 }
398
399 int
sn_get_ivr(void)400 sn_get_ivr(void) {
401 int vector;
402
403 vector = sn_get_next_bit();
404 if (vector == IA64_SPURIOUS_INT_VECTOR) {
405 sn_get_all_ivr();
406 vector = sn_get_next_bit();
407 }
408 return vector;
409 }
410