1 /*
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
8 */
9
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <asm/sn/sgi.h>
14 #include <asm/sn/sn_cpuid.h>
15 #include <asm/sn/addrs.h>
16 #include <asm/sn/arch.h>
17 #include <asm/sn/iograph.h>
18 #include <asm/sn/invent.h>
19 #include <asm/sn/hcl.h>
20 #include <asm/sn/labelcl.h>
21 #include <asm/sn/xtalk/xwidget.h>
22 #include <asm/sn/pci/bridge.h>
23 #include <asm/sn/pci/pciio.h>
24 #include <asm/sn/pci/pcibr.h>
25 #include <asm/sn/pci/pcibr_private.h>
26 #include <asm/sn/pci/pci_defs.h>
27 #include <asm/sn/prio.h>
28 #include <asm/sn/xtalk/xbow.h>
29 #include <asm/sn/ioc3.h>
30 #include <asm/sn/io.h>
31 #include <asm/sn/sn_private.h>
32
33 #ifdef __ia64
34 inline int
compare_and_swap_ptr(void ** location,void * old_ptr,void * new_ptr)35 compare_and_swap_ptr(void **location, void *old_ptr, void *new_ptr)
36 {
37 /* FIXME - compare_and_swap_ptr NOT ATOMIC */
38 if (*location == old_ptr) {
39 *location = new_ptr;
40 return(1);
41 }
42 else
43 return(0);
44 }
45 #endif
46
47 unsigned pcibr_intr_bits(pciio_info_t info, pciio_intr_line_t lines, int nslots);
48 pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
49 void pcibr_intr_free(pcibr_intr_t);
50 void pcibr_setpciint(xtalk_intr_t);
51 int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
52 void pcibr_intr_disconnect(pcibr_intr_t);
53
54 vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
55 void pcibr_xintr_preset(void *, int, xwidgetnum_t, iopaddr_t, xtalk_intr_vector_t);
56 void pcibr_intr_func(intr_arg_t);
57
58 extern pcibr_info_t pcibr_info_get(vertex_hdl_t);
59
60 /* =====================================================================
61 * INTERRUPT MANAGEMENT
62 */
63
64 unsigned
pcibr_intr_bits(pciio_info_t info,pciio_intr_line_t lines,int nslots)65 pcibr_intr_bits(pciio_info_t info,
66 pciio_intr_line_t lines, int nslots)
67 {
68 pciio_slot_t slot = PCIBR_INFO_SLOT_GET_INT(info);
69 unsigned bbits = 0;
70
71 /*
72 * Currently favored mapping from PCI
73 * slot number and INTA/B/C/D to Bridge
74 * PCI Interrupt Bit Number:
75 *
76 * SLOT A B C D
77 * 0 0 4 0 4
78 * 1 1 5 1 5
79 * 2 2 6 2 6
80 * 3 3 7 3 7
81 * 4 4 0 4 0
82 * 5 5 1 5 1
83 * 6 6 2 6 2
84 * 7 7 3 7 3
85 */
86
87 if (slot < nslots) {
88 if (lines & (PCIIO_INTR_LINE_A| PCIIO_INTR_LINE_C))
89 bbits |= 1 << slot;
90 if (lines & (PCIIO_INTR_LINE_B| PCIIO_INTR_LINE_D))
91 bbits |= 1 << (slot ^ 4);
92 }
93 return bbits;
94 }
95
96
97 /*
98 * Get the next wrapper pointer queued in the interrupt circular buffer.
99 */
100 pcibr_intr_wrap_t
pcibr_wrap_get(pcibr_intr_cbuf_t cbuf)101 pcibr_wrap_get(pcibr_intr_cbuf_t cbuf)
102 {
103 pcibr_intr_wrap_t wrap;
104
105 if (cbuf->ib_in == cbuf->ib_out)
106 panic( "pcibr intr circular buffer empty, cbuf=0x%p, ib_in=ib_out=%d\n",
107 (void *)cbuf, cbuf->ib_out);
108
109 wrap = cbuf->ib_cbuf[cbuf->ib_out++];
110 cbuf->ib_out = cbuf->ib_out % IBUFSIZE;
111 return(wrap);
112 }
113
114 /*
115 * Queue a wrapper pointer in the interrupt circular buffer.
116 */
117 void
pcibr_wrap_put(pcibr_intr_wrap_t wrap,pcibr_intr_cbuf_t cbuf)118 pcibr_wrap_put(pcibr_intr_wrap_t wrap, pcibr_intr_cbuf_t cbuf)
119 {
120 int in;
121
122 /*
123 * Multiple CPUs could be executing this code simultaneously
124 * if a handler has registered multiple interrupt lines and
125 * the interrupts are directed to different CPUs.
126 */
127 spin_lock(&cbuf->ib_lock);
128 in = (cbuf->ib_in + 1) % IBUFSIZE;
129 if (in == cbuf->ib_out)
130 panic( "pcibr intr circular buffer full, cbuf=0x%p, ib_in=%d\n",
131 (void *)cbuf, cbuf->ib_in);
132
133 cbuf->ib_cbuf[cbuf->ib_in] = wrap;
134 cbuf->ib_in = in;
135 spin_unlock(&cbuf->ib_lock);
136 return;
137 }
138
139 /*
140 * On SN systems there is a race condition between a PIO read response
141 * and DMA's. In rare cases, the read response may beat the DMA, causing
142 * the driver to think that data in memory is complete and meaningful.
143 * This code eliminates that race.
144 * This routine is called by the PIO read routines after doing the read.
145 * This routine then forces a fake interrupt on another line, which
146 * is logically associated with the slot that the PIO is addressed to.
147 * (see sn_dma_flush_init() )
148 * It then spins while watching the memory location that the interrupt
149 * is targetted to. When the interrupt response arrives, we are sure
150 * that the DMA has landed in memory and it is safe for the driver
151 * to proceed.
152 */
153
154 extern struct sn_flush_nasid_entry flush_nasid_list[MAX_NASIDS];
155
156 void
sn_dma_flush(unsigned long addr)157 sn_dma_flush(unsigned long addr) {
158 nasid_t nasid;
159 int wid_num;
160 volatile struct sn_flush_device_list *p;
161 int i,j;
162 int bwin;
163 unsigned long flags;
164
165 nasid = NASID_GET(addr);
166 wid_num = SWIN_WIDGETNUM(addr);
167 bwin = BWIN_WINDOWNUM(addr);
168
169 if (flush_nasid_list[nasid].widget_p == NULL) return;
170 if (bwin > 0) {
171 bwin--;
172 switch (bwin) {
173 case 0:
174 wid_num = ((flush_nasid_list[nasid].iio_itte1) >> 8) & 0xf;
175 break;
176 case 1:
177 wid_num = ((flush_nasid_list[nasid].iio_itte2) >> 8) & 0xf;
178 break;
179 case 2:
180 wid_num = ((flush_nasid_list[nasid].iio_itte3) >> 8) & 0xf;
181 break;
182 case 3:
183 wid_num = ((flush_nasid_list[nasid].iio_itte4) >> 8) & 0xf;
184 break;
185 case 4:
186 wid_num = ((flush_nasid_list[nasid].iio_itte5) >> 8) & 0xf;
187 break;
188 case 5:
189 wid_num = ((flush_nasid_list[nasid].iio_itte6) >> 8) & 0xf;
190 break;
191 case 6:
192 wid_num = ((flush_nasid_list[nasid].iio_itte7) >> 8) & 0xf;
193 break;
194 }
195 }
196 if (flush_nasid_list[nasid].widget_p == NULL) return;
197 if (flush_nasid_list[nasid].widget_p[wid_num] == NULL) return;
198 p = &flush_nasid_list[nasid].widget_p[wid_num][0];
199
200 // find a matching BAR
201
202 for (i=0; i<DEV_PER_WIDGET;i++) {
203 for (j=0; j<PCI_ROM_RESOURCE;j++) {
204 if (p->bar_list[j].start == 0) break;
205 if (addr >= p->bar_list[j].start && addr <= p->bar_list[j].end) break;
206 }
207 if (j < PCI_ROM_RESOURCE && p->bar_list[j].start != 0) break;
208 p++;
209 }
210
211 // if no matching BAR, return without doing anything.
212
213 if (i == DEV_PER_WIDGET) return;
214
215 spin_lock_irqsave(&p->flush_lock, flags);
216
217 p->flush_addr = 0;
218
219 // force an interrupt.
220
221 *(bridgereg_t *)(p->force_int_addr) = 1;
222
223 // wait for the interrupt to come back.
224
225 while (p->flush_addr != 0x10f);
226
227 // okay, everything is synched up.
228 spin_unlock_irqrestore(&p->flush_lock, flags);
229
230 return;
231 }
232
233 EXPORT_SYMBOL(sn_dma_flush);
234
235 /*
236 * There are end cases where a deadlock can occur if interrupt
237 * processing completes and the Bridge b_int_status bit is still set.
238 *
239 * One scenerio is if a second PCI interrupt occurs within 60ns of
240 * the previous interrupt being cleared. In this case the Bridge
241 * does not detect the transition, the Bridge b_int_status bit
242 * remains set, and because no transition was detected no interrupt
243 * packet is sent to the Hub/Heart.
244 *
245 * A second scenerio is possible when a b_int_status bit is being
246 * shared by multiple devices:
247 * Device #1 generates interrupt
248 * Bridge b_int_status bit set
249 * Device #2 generates interrupt
250 * interrupt processing begins
251 * ISR for device #1 runs and
252 * clears interrupt
253 * Device #1 generates interrupt
254 * ISR for device #2 runs and
255 * clears interrupt
256 * (b_int_status bit still set)
257 * interrupt processing completes
258 *
259 * Interrupt processing is now complete, but an interrupt is still
260 * outstanding for Device #1. But because there was no transition of
261 * the b_int_status bit, no interrupt packet will be generated and
262 * a deadlock will occur.
263 *
264 * To avoid these deadlock situations, this function is used
265 * to check if a specific Bridge b_int_status bit is set, and if so,
266 * cause the setting of the corresponding interrupt bit.
267 *
268 * On a XBridge (SN1) and PIC (SN2), we do this by writing the appropriate Bridge Force
269 * Interrupt register.
270 */
271 void
pcibr_force_interrupt(pcibr_intr_t intr)272 pcibr_force_interrupt(pcibr_intr_t intr)
273 {
274 unsigned bit;
275 unsigned bits;
276 pcibr_soft_t pcibr_soft = intr->bi_soft;
277 bridge_t *bridge = pcibr_soft->bs_base;
278
279 bits = intr->bi_ibits;
280 for (bit = 0; bit < 8; bit++) {
281 if (bits & (1 << bit)) {
282
283 PCIBR_DEBUG((PCIBR_DEBUG_INTR, pcibr_soft->bs_vhdl,
284 "pcibr_force_interrupt: bit=0x%x\n", bit));
285
286 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
287 bridge->b_force_pin[bit].intr = 1;
288 }
289 }
290 }
291 }
292
293 /*ARGSUSED */
294 pcibr_intr_t
pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,device_desc_t dev_desc,pciio_intr_line_t lines,vertex_hdl_t owner_dev)295 pcibr_intr_alloc(vertex_hdl_t pconn_vhdl,
296 device_desc_t dev_desc,
297 pciio_intr_line_t lines,
298 vertex_hdl_t owner_dev)
299 {
300 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
301 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pcibr_info);
302 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
303 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
304 bridge_t *bridge = pcibr_soft->bs_base;
305 int is_threaded = 0;
306
307 xtalk_intr_t *xtalk_intr_p;
308 pcibr_intr_t *pcibr_intr_p;
309 pcibr_intr_list_t *intr_list_p;
310
311 unsigned pcibr_int_bits;
312 unsigned pcibr_int_bit;
313 xtalk_intr_t xtalk_intr = (xtalk_intr_t)0;
314 hub_intr_t hub_intr;
315 pcibr_intr_t pcibr_intr;
316 pcibr_intr_list_t intr_entry;
317 pcibr_intr_list_t intr_list;
318 bridgereg_t int_dev;
319
320
321 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
322 "pcibr_intr_alloc: %s%s%s%s%s\n",
323 !(lines & 15) ? " No INTs?" : "",
324 lines & 1 ? " INTA" : "",
325 lines & 2 ? " INTB" : "",
326 lines & 4 ? " INTC" : "",
327 lines & 8 ? " INTD" : ""));
328
329 NEW(pcibr_intr);
330 if (!pcibr_intr)
331 return NULL;
332
333 pcibr_intr->bi_dev = pconn_vhdl;
334 pcibr_intr->bi_lines = lines;
335 pcibr_intr->bi_soft = pcibr_soft;
336 pcibr_intr->bi_ibits = 0; /* bits will be added below */
337 pcibr_intr->bi_func = 0; /* unset until connect */
338 pcibr_intr->bi_arg = 0; /* unset until connect */
339 pcibr_intr->bi_flags = is_threaded ? 0 : PCIIO_INTR_NOTHREAD;
340 pcibr_intr->bi_mustruncpu = CPU_NONE;
341 pcibr_intr->bi_ibuf.ib_in = 0;
342 pcibr_intr->bi_ibuf.ib_out = 0;
343 spin_lock_init(&pcibr_intr->bi_ibuf.ib_lock);
344 pcibr_int_bits = pcibr_soft->bs_intr_bits((pciio_info_t)pcibr_info, lines,
345 PCIBR_NUM_SLOTS(pcibr_soft));
346
347
348 /*
349 * For each PCI interrupt line requested, figure
350 * out which Bridge PCI Interrupt Line it maps
351 * to, and make sure there are xtalk resources
352 * allocated for it.
353 */
354 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
355 "pcibr_intr_alloc: pcibr_int_bits: 0x%x\n", pcibr_int_bits));
356 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit ++) {
357 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
358 xtalk_intr_p = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
359
360 xtalk_intr = *xtalk_intr_p;
361
362 if (xtalk_intr == NULL) {
363 /*
364 * This xtalk_intr_alloc is constrained for two reasons:
365 * 1) Normal interrupts and error interrupts need to be delivered
366 * through a single xtalk target widget so that there aren't any
367 * ordering problems with DMA, completion interrupts, and error
368 * interrupts. (Use of xconn_vhdl forces this.)
369 *
370 * 2) On SN1, addressing constraints on SN1 and Bridge force
371 * us to use a single PI number for all interrupts from a
372 * single Bridge. (SN1-specific code forces this).
373 */
374
375 /*
376 * All code dealing with threaded PCI interrupt handlers
377 * is located at the pcibr level. Because of this,
378 * we always want the lower layers (hub/heart_intr_alloc,
379 * intr_level_connect) to treat us as non-threaded so we
380 * don't set up a duplicate threaded environment. We make
381 * this happen by calling a special xtalk interface.
382 */
383 xtalk_intr = xtalk_intr_alloc_nothd(xconn_vhdl, dev_desc,
384 owner_dev);
385
386 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
387 "pcibr_intr_alloc: xtalk_intr=0x%x\n", xtalk_intr));
388
389 /* both an assert and a runtime check on this:
390 * we need to check in non-DEBUG kernels, and
391 * the ASSERT gets us more information when
392 * we use DEBUG kernels.
393 */
394 ASSERT(xtalk_intr != NULL);
395 if (xtalk_intr == NULL) {
396 /* it is quite possible that our
397 * xtalk_intr_alloc failed because
398 * someone else got there first,
399 * and we can find their results
400 * in xtalk_intr_p.
401 */
402 if (!*xtalk_intr_p) {
403 #ifdef SUPPORT_PRINTING_V_FORMAT
404 printk(KERN_ALERT
405 "pcibr_intr_alloc %v: unable to get xtalk interrupt resources",
406 xconn_vhdl);
407 #else
408 printk(KERN_ALERT
409 "pcibr_intr_alloc 0x%p: unable to get xtalk interrupt resources",
410 (void *)xconn_vhdl);
411 #endif
412 /* yes, we leak resources here. */
413 return 0;
414 }
415 } else if (compare_and_swap_ptr((void **) xtalk_intr_p, NULL, xtalk_intr)) {
416 /*
417 * now tell the bridge which slot is
418 * using this interrupt line.
419 */
420 int_dev = bridge->b_int_device;
421 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
422 int_dev |= pciio_slot << BRIDGE_INT_DEV_SHFT(pcibr_int_bit);
423 bridge->b_int_device = int_dev; /* XXXMP */
424
425 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
426 "bridge intr bit %d clears my wrb\n",
427 pcibr_int_bit));
428 } else {
429 /* someone else got one allocated first;
430 * free the one we just created, and
431 * retrieve the one they allocated.
432 */
433 xtalk_intr_free(xtalk_intr);
434 xtalk_intr = *xtalk_intr_p;
435 #if PARANOID
436 /* once xtalk_intr is set, we never clear it,
437 * so if the CAS fails above, this condition
438 * can "never happen" ...
439 */
440 if (!xtalk_intr) {
441 printk(KERN_ALERT
442 "pcibr_intr_alloc %v: unable to set xtalk interrupt resources",
443 xconn_vhdl);
444 /* yes, we leak resources here. */
445 return 0;
446 }
447 #endif
448 }
449 }
450
451 pcibr_intr->bi_ibits |= 1 << pcibr_int_bit;
452
453 NEW(intr_entry);
454 intr_entry->il_next = NULL;
455 intr_entry->il_intr = pcibr_intr;
456 intr_entry->il_wrbf = &(bridge->b_wr_req_buf[pciio_slot].reg);
457 intr_list_p =
458 &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
459
460 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
461 "Bridge bit 0x%x wrap=0x%x\n", pcibr_int_bit,
462 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap));
463
464 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
465 /* we are the first interrupt on this bridge bit.
466 */
467 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
468 "INT 0x%x (bridge bit %d) allocated [FIRST]\n",
469 pcibr_int_bits, pcibr_int_bit));
470 continue;
471 }
472 intr_list = *intr_list_p;
473 pcibr_intr_p = &intr_list->il_intr;
474 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
475 /* first entry on list was erased,
476 * and we replaced it, so we
477 * don't need our intr_entry.
478 */
479 DEL(intr_entry);
480 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
481 "INT 0x%x (bridge bit %d) replaces erased first\n",
482 pcibr_int_bits, pcibr_int_bit));
483 continue;
484 }
485 intr_list_p = &intr_list->il_next;
486 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
487 /* we are the new second interrupt on this bit.
488 */
489 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared = 1;
490 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
491 "INT 0x%x (bridge bit %d) is new SECOND\n",
492 pcibr_int_bits, pcibr_int_bit));
493 continue;
494 }
495 while (1) {
496 pcibr_intr_p = &intr_list->il_intr;
497 if (compare_and_swap_ptr((void **) pcibr_intr_p, NULL, pcibr_intr)) {
498 /* an entry on list was erased,
499 * and we replaced it, so we
500 * don't need our intr_entry.
501 */
502 DEL(intr_entry);
503
504 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
505 "INT 0x%x (bridge bit %d) replaces erase Nth\n",
506 pcibr_int_bits, pcibr_int_bit));
507 break;
508 }
509 intr_list_p = &intr_list->il_next;
510 if (compare_and_swap_ptr((void **) intr_list_p, NULL, intr_entry)) {
511 /* entry appended to share list
512 */
513 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pconn_vhdl,
514 "INT 0x%x (bridge bit %d) is new Nth\n",
515 pcibr_int_bits, pcibr_int_bit));
516 break;
517 }
518 /* step to next record in chain
519 */
520 intr_list = *intr_list_p;
521 }
522 }
523 }
524
525 #if DEBUG && INTR_DEBUG
526 printk("%v pcibr_intr_alloc complete\n", pconn_vhdl);
527 #endif
528 hub_intr = (hub_intr_t)xtalk_intr;
529 pcibr_intr->bi_irq = hub_intr->i_bit;
530 pcibr_intr->bi_cpu = hub_intr->i_cpuid;
531 return pcibr_intr;
532 }
533
534 /*ARGSUSED */
535 void
pcibr_intr_free(pcibr_intr_t pcibr_intr)536 pcibr_intr_free(pcibr_intr_t pcibr_intr)
537 {
538 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
539 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
540 unsigned pcibr_int_bit;
541 pcibr_intr_list_t intr_list;
542 int intr_shared;
543 xtalk_intr_t *xtalk_intrp;
544
545 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++) {
546 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
547 for (intr_list =
548 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_list;
549 intr_list != NULL;
550 intr_list = intr_list->il_next)
551 if (compare_and_swap_ptr((void **) &intr_list->il_intr,
552 pcibr_intr,
553 NULL)) {
554
555 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC,
556 pcibr_intr->bi_dev,
557 "pcibr_intr_free: cleared hdlr from bit 0x%x\n",
558 pcibr_int_bit));
559 }
560 /* If this interrupt line is not being shared between multiple
561 * devices release the xtalk interrupt resources.
562 */
563 intr_shared =
564 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared;
565 xtalk_intrp = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
566
567 if ((!intr_shared) && (*xtalk_intrp)) {
568
569 bridge_t *bridge = pcibr_soft->bs_base;
570 bridgereg_t int_dev;
571
572 xtalk_intr_free(*xtalk_intrp);
573 *xtalk_intrp = 0;
574
575 /* Clear the PCI device interrupt to bridge interrupt pin
576 * mapping.
577 */
578 int_dev = bridge->b_int_device;
579 int_dev &= ~BRIDGE_INT_DEV_MASK(pcibr_int_bit);
580 bridge->b_int_device = int_dev;
581
582 }
583 }
584 }
585 DEL(pcibr_intr);
586 }
587
588 void
pcibr_setpciint(xtalk_intr_t xtalk_intr)589 pcibr_setpciint(xtalk_intr_t xtalk_intr)
590 {
591 iopaddr_t addr;
592 xtalk_intr_vector_t vect;
593 vertex_hdl_t vhdl;
594 bridge_t *bridge;
595 picreg_t *int_addr;
596
597 addr = xtalk_intr_addr_get(xtalk_intr);
598 vect = xtalk_intr_vector_get(xtalk_intr);
599 vhdl = xtalk_intr_dev_get(xtalk_intr);
600 bridge = (bridge_t *)xtalk_piotrans_addr(vhdl, 0, 0, sizeof(bridge_t), 0);
601
602 int_addr = (picreg_t *)xtalk_intr_sfarg_get(xtalk_intr);
603 *int_addr = ((PIC_INT_ADDR_FLD & ((uint64_t)vect << 48)) |
604 (PIC_INT_ADDR_HOST & addr));
605 }
606
607 /*ARGSUSED */
608 int
pcibr_intr_connect(pcibr_intr_t pcibr_intr,intr_func_t intr_func,intr_arg_t intr_arg)609 pcibr_intr_connect(pcibr_intr_t pcibr_intr, intr_func_t intr_func, intr_arg_t intr_arg)
610 {
611 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
612 bridge_t *bridge = pcibr_soft->bs_base;
613 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
614 unsigned pcibr_int_bit;
615 uint64_t int_enable;
616 unsigned long s;
617
618 if (pcibr_intr == NULL)
619 return -1;
620
621 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
622 "pcibr_intr_connect: intr_func=0x%x\n",
623 pcibr_intr));
624
625 pcibr_intr->bi_func = intr_func;
626 pcibr_intr->bi_arg = intr_arg;
627 *((volatile unsigned *)&pcibr_intr->bi_flags) |= PCIIO_INTR_CONNECTED;
628
629 /*
630 * For each PCI interrupt line requested, figure
631 * out which Bridge PCI Interrupt Line it maps
632 * to, and make sure there are xtalk resources
633 * allocated for it.
634 */
635 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
636 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
637 pcibr_intr_wrap_t intr_wrap;
638 xtalk_intr_t xtalk_intr;
639 void *int_addr;
640
641 xtalk_intr = pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr;
642 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
643
644 /*
645 * If this interrupt line is being shared and the connect has
646 * already been done, no need to do it again.
647 */
648 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected)
649 continue;
650
651
652 /*
653 * Use the pcibr wrapper function to handle all Bridge interrupts
654 * regardless of whether the interrupt line is shared or not.
655 */
656 if (IS_PIC_SOFT(pcibr_soft))
657 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
658 else
659 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
660
661 xtalk_intr_connect(xtalk_intr, pcibr_intr_func, (intr_arg_t) intr_wrap,
662 (xtalk_intr_setfunc_t) pcibr_setpciint,
663 (void *)int_addr);
664
665 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 1;
666
667 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
668 "pcibr_setpciint: int_addr=0x%x, *int_addr=0x%x, "
669 "pcibr_int_bit=0x%x\n", int_addr,
670 *(picreg_t *)int_addr,
671 pcibr_int_bit));
672 }
673
674 /* PIC WAR. PV# 854697
675 * On PIC we must write 64-bit MMRs with 64-bit stores
676 */
677 s = pcibr_lock(pcibr_soft);
678 if (IS_PIC_SOFT(pcibr_soft) &&
679 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
680 int_enable = bridge->p_int_enable_64;
681 int_enable |= pcibr_int_bits;
682 bridge->p_int_enable_64 = int_enable;
683 } else {
684 bridgereg_t int_enable;
685
686 int_enable = bridge->b_int_enable;
687 int_enable |= pcibr_int_bits;
688 bridge->b_int_enable = int_enable;
689 }
690 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
691 pcibr_unlock(pcibr_soft, s);
692
693 return 0;
694 }
695
696 /*ARGSUSED */
697 void
pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)698 pcibr_intr_disconnect(pcibr_intr_t pcibr_intr)
699 {
700 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
701 bridge_t *bridge = pcibr_soft->bs_base;
702 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
703 unsigned pcibr_int_bit;
704 pcibr_intr_wrap_t intr_wrap;
705 uint64_t int_enable;
706 unsigned long s;
707
708 /* Stop calling the function. Now.
709 */
710 *((volatile unsigned *)&pcibr_intr->bi_flags) &= ~PCIIO_INTR_CONNECTED;
711 pcibr_intr->bi_func = 0;
712 pcibr_intr->bi_arg = 0;
713 /*
714 * For each PCI interrupt line requested, figure
715 * out which Bridge PCI Interrupt Line it maps
716 * to, and disconnect the interrupt.
717 */
718
719 /* don't disable interrupts for lines that
720 * are shared between devices.
721 */
722 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
723 if ((pcibr_int_bits & (1 << pcibr_int_bit)) &&
724 (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared))
725 pcibr_int_bits &= ~(1 << pcibr_int_bit);
726 if (!pcibr_int_bits)
727 return;
728
729 /* PIC WAR. PV# 854697
730 * On PIC we must write 64-bit MMRs with 64-bit stores
731 */
732 s = pcibr_lock(pcibr_soft);
733 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
734 int_enable = bridge->p_int_enable_64;
735 int_enable &= ~pcibr_int_bits;
736 bridge->p_int_enable_64 = int_enable;
737 } else {
738 int_enable = (uint64_t)bridge->b_int_enable;
739 int_enable &= ~pcibr_int_bits;
740 bridge->b_int_enable = (bridgereg_t)int_enable;
741 }
742 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
743 pcibr_unlock(pcibr_soft, s);
744
745 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
746 "pcibr_intr_disconnect: disabled int_bits=0x%x\n",
747 pcibr_int_bits));
748
749 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
750 if (pcibr_int_bits & (1 << pcibr_int_bit)) {
751 void *int_addr;
752
753 /* if the interrupt line is now shared,
754 * do not disconnect it.
755 */
756 if (pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
757 continue;
758
759 xtalk_intr_disconnect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
760 pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_connected = 0;
761
762 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
763 "pcibr_intr_disconnect: disconnect int_bits=0x%x\n",
764 pcibr_int_bits));
765
766 /* if we are sharing the interrupt line,
767 * connect us up; this closes the hole
768 * where the another pcibr_intr_alloc()
769 * was in progress as we disconnected.
770 */
771 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
772 continue;
773
774 intr_wrap = &pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap;
775 if (!pcibr_soft->bs_intr[pcibr_int_bit].bsi_pcibr_intr_wrap.iw_shared)
776 continue;
777
778 if (IS_PIC_SOFT(pcibr_soft))
779 int_addr = (void *)&(bridge->p_int_addr_64[pcibr_int_bit]);
780 else
781 int_addr = (void *)&(bridge->b_int_addr[pcibr_int_bit].addr);
782
783 xtalk_intr_connect(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr,
784 pcibr_intr_func, (intr_arg_t) intr_wrap,
785 (xtalk_intr_setfunc_t)pcibr_setpciint,
786 (void *)(long)pcibr_int_bit);
787 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_intr->bi_dev,
788 "pcibr_intr_disconnect: now-sharing int_bits=0x%x\n",
789 pcibr_int_bit));
790 }
791 }
792
793 /*ARGSUSED */
794 vertex_hdl_t
pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)795 pcibr_intr_cpu_get(pcibr_intr_t pcibr_intr)
796 {
797 pcibr_soft_t pcibr_soft = pcibr_intr->bi_soft;
798 unsigned pcibr_int_bits = pcibr_intr->bi_ibits;
799 unsigned pcibr_int_bit;
800
801 for (pcibr_int_bit = 0; pcibr_int_bit < 8; pcibr_int_bit++)
802 if (pcibr_int_bits & (1 << pcibr_int_bit))
803 return xtalk_intr_cpu_get(pcibr_soft->bs_intr[pcibr_int_bit].bsi_xtalk_intr);
804 return 0;
805 }
806
807 /* =====================================================================
808 * INTERRUPT HANDLING
809 */
810 void
pcibr_clearwidint(bridge_t * bridge)811 pcibr_clearwidint(bridge_t *bridge)
812 {
813 bridge->b_wid_int_upper = 0;
814 bridge->b_wid_int_lower = 0;
815 }
816
817
818 void
pcibr_setwidint(xtalk_intr_t intr)819 pcibr_setwidint(xtalk_intr_t intr)
820 {
821 xwidgetnum_t targ = xtalk_intr_target_get(intr);
822 iopaddr_t addr = xtalk_intr_addr_get(intr);
823 xtalk_intr_vector_t vect = xtalk_intr_vector_get(intr);
824 widgetreg_t NEW_b_wid_int_upper, NEW_b_wid_int_lower;
825 widgetreg_t OLD_b_wid_int_upper, OLD_b_wid_int_lower;
826
827 bridge_t *bridge = (bridge_t *)xtalk_intr_sfarg_get(intr);
828
829 NEW_b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
830 XTALK_ADDR_TO_UPPER(addr));
831 NEW_b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
832
833 OLD_b_wid_int_upper = bridge->b_wid_int_upper;
834 OLD_b_wid_int_lower = bridge->b_wid_int_lower;
835
836 /* Verify that all interrupts from this Bridge are using a single PI */
837 if ((OLD_b_wid_int_upper != 0) && (OLD_b_wid_int_lower != 0)) {
838 /*
839 * Once set, these registers shouldn't change; they should
840 * be set multiple times with the same values.
841 *
842 * If we're attempting to change these registers, it means
843 * that our heuristics for allocating interrupts in a way
844 * appropriate for IP35 have failed, and the admin needs to
845 * explicitly direct some interrupts (or we need to make the
846 * heuristics more clever).
847 *
848 * In practice, we hope this doesn't happen very often, if
849 * at all.
850 */
851 if ((OLD_b_wid_int_upper != NEW_b_wid_int_upper) ||
852 (OLD_b_wid_int_lower != NEW_b_wid_int_lower)) {
853 printk(KERN_WARNING "Interrupt allocation is too complex.\n");
854 printk(KERN_WARNING "Use explicit administrative interrupt targetting.\n");
855 printk(KERN_WARNING "bridge=0x%lx targ=0x%x\n", (unsigned long)bridge, targ);
856 printk(KERN_WARNING "NEW=0x%x/0x%x OLD=0x%x/0x%x\n",
857 NEW_b_wid_int_upper, NEW_b_wid_int_lower,
858 OLD_b_wid_int_upper, OLD_b_wid_int_lower);
859 panic("PCI Bridge interrupt targetting error\n");
860 }
861 }
862
863 bridge->b_wid_int_upper = NEW_b_wid_int_upper;
864 bridge->b_wid_int_lower = NEW_b_wid_int_lower;
865 bridge->b_int_host_err = vect;
866
867 }
868
869 /*
870 * pcibr_intr_preset: called during mlreset time
871 * if the platform specific code needs to route
872 * one of the Bridge's xtalk interrupts before the
873 * xtalk infrastructure is available.
874 */
875 void
pcibr_xintr_preset(void * which_widget,int which_widget_intr,xwidgetnum_t targ,iopaddr_t addr,xtalk_intr_vector_t vect)876 pcibr_xintr_preset(void *which_widget,
877 int which_widget_intr,
878 xwidgetnum_t targ,
879 iopaddr_t addr,
880 xtalk_intr_vector_t vect)
881 {
882 bridge_t *bridge = (bridge_t *) which_widget;
883
884 if (which_widget_intr == -1) {
885 /* bridge widget error interrupt */
886 bridge->b_wid_int_upper = ( (0x000F0000 & (targ << 16)) |
887 XTALK_ADDR_TO_UPPER(addr));
888 bridge->b_wid_int_lower = XTALK_ADDR_TO_LOWER(addr);
889 bridge->b_int_host_err = vect;
890 printk("pcibr_xintr_preset: b_wid_int_upper 0x%lx b_wid_int_lower 0x%lx b_int_host_err 0x%x\n",
891 ( (0x000F0000 & (targ << 16)) | XTALK_ADDR_TO_UPPER(addr)),
892 XTALK_ADDR_TO_LOWER(addr), vect);
893
894 /* turn on all interrupts except
895 * the PCI interrupt requests,
896 * at least at heart.
897 */
898 bridge->b_int_enable |= ~BRIDGE_IMR_INT_MSK;
899
900 } else {
901 /* routing a PCI device interrupt.
902 * targ and low 38 bits of addr must
903 * be the same as the already set
904 * value for the widget error interrupt.
905 */
906 bridge->b_int_addr[which_widget_intr].addr =
907 ((BRIDGE_INT_ADDR_HOST & (addr >> 30)) |
908 (BRIDGE_INT_ADDR_FLD & vect));
909 /*
910 * now bridge can let it through;
911 * NB: still should be blocked at
912 * xtalk provider end, until the service
913 * function is set.
914 */
915 bridge->b_int_enable |= 1 << vect;
916 }
917 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
918 }
919
920
921 /*
922 * pcibr_intr_func()
923 *
924 * This is the pcibr interrupt "wrapper" function that is called,
925 * in interrupt context, to initiate the interrupt handler(s) registered
926 * (via pcibr_intr_alloc/connect) for the occurring interrupt. Non-threaded
927 * handlers will be called directly, and threaded handlers will have their
928 * thread woken up.
929 */
930 void
pcibr_intr_func(intr_arg_t arg)931 pcibr_intr_func(intr_arg_t arg)
932 {
933 pcibr_intr_wrap_t wrap = (pcibr_intr_wrap_t) arg;
934 reg_p wrbf;
935 intr_func_t func;
936 pcibr_intr_t intr;
937 pcibr_intr_list_t list;
938 int clearit;
939 int do_nonthreaded = 1;
940 int is_threaded = 0;
941 int x = 0;
942 pcibr_soft_t pcibr_soft = wrap->iw_soft;
943 bridge_t *bridge = pcibr_soft->bs_base;
944 uint64_t p_enable = pcibr_soft->bs_int_enable;
945 int bit = wrap->iw_ibit;
946
947 /*
948 * PIC WAR. PV#855272
949 * Early attempt at a workaround for the runaway
950 * interrupt problem. Briefly disable the enable bit for
951 * this device.
952 */
953 if (IS_PIC_SOFT(pcibr_soft) &&
954 PCIBR_WAR_ENABLED(PV855272, pcibr_soft)) {
955 unsigned s;
956
957 /* disable-enable interrupts for this bridge pin */
958
959 p_enable &= ~(1 << bit);
960 s = pcibr_lock(pcibr_soft);
961 bridge->p_int_enable_64 = p_enable;
962 p_enable |= (1 << bit);
963 bridge->p_int_enable_64 = p_enable;
964 pcibr_unlock(pcibr_soft, s);
965 }
966
967 /*
968 * If any handler is still running from a previous interrupt
969 * just return. If there's a need to call the handler(s) again,
970 * another interrupt will be generated either by the device or by
971 * pcibr_force_interrupt().
972 */
973
974 if (wrap->iw_hdlrcnt) {
975 return;
976 }
977
978 /*
979 * Call all interrupt handlers registered.
980 * First, the pcibr_intrd threads for any threaded handlers will be
981 * awoken, then any non-threaded handlers will be called sequentially.
982 */
983
984 clearit = 1;
985 while (do_nonthreaded) {
986 for (list = wrap->iw_list; list != NULL; list = list->il_next) {
987 if ((intr = list->il_intr) && (intr->bi_flags & PCIIO_INTR_CONNECTED)) {
988
989 /*
990 * This device may have initiated write
991 * requests since the bridge last saw
992 * an edge on this interrupt input; flushing
993 * the buffer prior to invoking the handler
994 * should help but may not be sufficient if we
995 * get more requests after the flush, followed
996 * by the card deciding it wants service, before
997 * the interrupt handler checks to see if things need
998 * to be done.
999 *
1000 * There is a similar race condition if
1001 * an interrupt handler loops around and
1002 * notices further service is required.
1003 * Perhaps we need to have an explicit
1004 * call that interrupt handlers need to
1005 * do between noticing that DMA to memory
1006 * has completed, but before observing the
1007 * contents of memory?
1008 */
1009
1010 if ((do_nonthreaded) && (!is_threaded)) {
1011 /* Non-threaded - Call the interrupt handler at interrupt level */
1012 /* Only need to flush write buffers if sharing */
1013
1014 if ((wrap->iw_shared) && (wrbf = list->il_wrbf)) {
1015 if ((x = *wrbf)) /* write request buffer flush */
1016 #ifdef SUPPORT_PRINTING_V_FORMAT
1017 printk(KERN_ALERT "pcibr_intr_func %v: \n"
1018 "write buffer flush failed, wrbf=0x%x\n",
1019 list->il_intr->bi_dev, wrbf);
1020 #else
1021 printk(KERN_ALERT "pcibr_intr_func %p: \n"
1022 "write buffer flush failed, wrbf=0x%lx\n",
1023 (void *)list->il_intr->bi_dev, (long) wrbf);
1024 #endif
1025 }
1026 func = intr->bi_func;
1027 if ( func )
1028 func(intr->bi_arg);
1029 }
1030 clearit = 0;
1031 }
1032 }
1033 do_nonthreaded = 0;
1034
1035 /*
1036 * If the non-threaded handler was the last to complete,
1037 * (i.e., no threaded handlers still running) force an
1038 * interrupt to avoid a potential deadlock situation.
1039 */
1040 if (wrap->iw_hdlrcnt == 0) {
1041 pcibr_force_interrupt((pcibr_intr_t) wrap);
1042 }
1043 }
1044
1045 /* If there were no handlers,
1046 * disable the interrupt and return.
1047 * It will get enabled again after
1048 * a handler is connected.
1049 * If we don't do this, we would
1050 * sit here and spin through the
1051 * list forever.
1052 */
1053 if (clearit) {
1054 pcibr_soft_t pcibr_soft = wrap->iw_soft;
1055 bridge_t *bridge = pcibr_soft->bs_base;
1056 bridgereg_t int_enable;
1057 bridgereg_t mask = 1 << wrap->iw_ibit;
1058 unsigned long s;
1059
1060 /* PIC BRINUGP WAR (PV# 854697):
1061 * On PIC we must write 64-bit MMRs with 64-bit stores
1062 */
1063 s = pcibr_lock(pcibr_soft);
1064 if (IS_PIC_SOFT(pcibr_soft) &&
1065 PCIBR_WAR_ENABLED(PV854697, pcibr_soft)) {
1066 int_enable = bridge->p_int_enable_64;
1067 int_enable &= ~mask;
1068 bridge->p_int_enable_64 = int_enable;
1069 } else {
1070 int_enable = (uint64_t)bridge->b_int_enable;
1071 int_enable &= ~mask;
1072 bridge->b_int_enable = (bridgereg_t)int_enable;
1073 }
1074 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
1075 pcibr_unlock(pcibr_soft, s);
1076 return;
1077 }
1078 }
1079