1 /*
2 *
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
6 *
7 * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
8 */
9
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/module.h>
13 #include <linux/string.h>
14 #if 0
15 #include <linux/ioport.h>
16 #include <linux/interrupt.h>
17 #endif
18 #include <asm/sn/sgi.h>
19 #include <asm/sn/sn_sal.h>
20 #include <asm/sn/sn_cpuid.h>
21 #include <asm/sn/addrs.h>
22 #include <asm/sn/arch.h>
23 #include <asm/sn/iograph.h>
24 #include <asm/sn/invent.h>
25 #include <asm/sn/hcl.h>
26 #include <asm/sn/labelcl.h>
27 #include <asm/sn/klconfig.h>
28 #include <asm/sn/xtalk/xwidget.h>
29 #include <asm/sn/pci/bridge.h>
30 #include <asm/sn/pci/pciio.h>
31 #include <asm/sn/pci/pcibr.h>
32 #include <asm/sn/pci/pcibr_private.h>
33 #include <asm/sn/pci/pci_defs.h>
34 #include <asm/sn/prio.h>
35 #include <asm/sn/xtalk/xbow.h>
36 #include <asm/sn/ioc3.h>
37 #include <asm/sn/io.h>
38 #include <asm/sn/sn_private.h>
39
40 /*
41 * global variables to toggle the different levels of pcibr debugging.
42 * -pcibr_debug_mask is the mask of the different types of debugging
43 * you want to enable. See sys/PCI/pcibr_private.h
44 * -pcibr_debug_module is the module you want to trace. By default
45 * all modules are trace. For IP35 this value has the format of
46 * something like "001c10". For IP27 this value is a node number,
47 * i.e. "1", "2"... For IP30 this is undefined and should be set to
48 * 'all'.
49 * -pcibr_debug_widget is the widget you want to trace. For IP27
50 * the widget isn't exposed in the hwpath so use the xio slot num.
51 * i.e. for 'io2' set pcibr_debug_widget to "2".
52 * -pcibr_debug_slot is the pci slot you want to trace.
53 */
54 uint32_t pcibr_debug_mask = 0x0; /* 0x00000000 to disable */
55 char *pcibr_debug_module = "all"; /* 'all' for all modules */
56 int pcibr_debug_widget = -1; /* '-1' for all widgets */
57 int pcibr_debug_slot = -1; /* '-1' for all slots */
58
59 /*
60 * Macros related to the Lucent USS 302/312 usb timeout workaround. It
61 * appears that if the lucent part can get into a retry loop if it sees a
62 * DAC on the bus during a pio read retry. The loop is broken after about
63 * 1ms, so we need to set up bridges holding this part to allow at least
64 * 1ms for pio.
65 */
66
67 #define USS302_TIMEOUT_WAR
68
69 #ifdef USS302_TIMEOUT_WAR
70 #define LUCENT_USBHC_VENDOR_ID_NUM 0x11c1
71 #define LUCENT_USBHC302_DEVICE_ID_NUM 0x5801
72 #define LUCENT_USBHC312_DEVICE_ID_NUM 0x5802
73 #define USS302_BRIDGE_TIMEOUT_HLD 4
74 #endif
75
76 /* kbrick widgetnum-to-bus layout */
77 int p_busnum[MAX_PORT_NUM] = { /* widget# */
78 0, 0, 0, 0, 0, 0, 0, 0, /* 0x0 - 0x7 */
79 2, /* 0x8 */
80 1, /* 0x9 */
81 0, 0, /* 0xa - 0xb */
82 5, /* 0xc */
83 6, /* 0xd */
84 4, /* 0xe */
85 3, /* 0xf */
86 };
87
88 #if PCIBR_SOFT_LIST
89 pcibr_list_p pcibr_list = 0;
90 #endif
91
92 extern int hwgraph_vertex_name_get(vertex_hdl_t vhdl, char *buf, uint buflen);
93 extern long atoi(register char *p);
94 extern cnodeid_t nodevertex_to_cnodeid(vertex_hdl_t vhdl);
95 extern char *dev_to_name(vertex_hdl_t dev, char *buf, uint buflen);
96 extern struct map *atemapalloc(uint64_t);
97 extern void atefree(struct map *, size_t, uint64_t);
98 extern void atemapfree(struct map *);
99 extern pciio_dmamap_t get_free_pciio_dmamap(vertex_hdl_t);
100 extern void free_pciio_dmamap(pcibr_dmamap_t);
101 extern void xwidget_error_register(vertex_hdl_t, error_handler_f *, error_handler_arg_t);
102
103 #define ATE_WRITE() ate_write(pcibr_soft, ate_ptr, ate_count, ate)
104 #if PCIBR_FREEZE_TIME
105 #define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, &freeze_time, cmd_regs)
106 #else
107 #define ATE_FREEZE() s = ate_freeze(pcibr_dmamap, cmd_regs)
108 #endif /* PCIBR_FREEZE_TIME */
109
110 #if PCIBR_FREEZE_TIME
111 #define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, ate, ate_total, freeze_time, cmd_regs, s)
112 #else
113 #define ATE_THAW() ate_thaw(pcibr_dmamap, ate_index, cmd_regs, s)
114 #endif
115
116 /* =====================================================================
117 * Function Table of Contents
118 *
119 * The order of functions in this file has stopped
120 * making much sense. We might want to take a look
121 * at it some time and bring back some sanity, or
122 * perhaps bust this file into smaller chunks.
123 */
124
125 extern int do_pcibr_rrb_free_all(pcibr_soft_t, bridge_t *, pciio_slot_t);
126 extern void do_pcibr_rrb_autoalloc(pcibr_soft_t, int, int, int);
127
128 extern int pcibr_wrb_flush(vertex_hdl_t);
129 extern int pcibr_rrb_alloc(vertex_hdl_t, int *, int *);
130 extern void pcibr_rrb_flush(vertex_hdl_t);
131
132 static int pcibr_try_set_device(pcibr_soft_t, pciio_slot_t, unsigned, bridgereg_t);
133 void pcibr_release_device(pcibr_soft_t, pciio_slot_t, bridgereg_t);
134
135 extern void pcibr_setwidint(xtalk_intr_t);
136 extern void pcibr_clearwidint(bridge_t *);
137
138 extern iopaddr_t pcibr_bus_addr_alloc(pcibr_soft_t, pciio_win_info_t,
139 pciio_space_t, int, int, int);
140
141 int pcibr_attach(vertex_hdl_t);
142 int pcibr_attach2(vertex_hdl_t, bridge_t *, vertex_hdl_t,
143 int, pcibr_soft_t *);
144 int pcibr_detach(vertex_hdl_t);
145 int pcibr_pcix_rbars_calc(pcibr_soft_t);
146 extern int pcibr_init_ext_ate_ram(bridge_t *);
147 extern int pcibr_ate_alloc(pcibr_soft_t, int);
148 extern void pcibr_ate_free(pcibr_soft_t, int, int);
149 extern int pcibr_widget_to_bus(vertex_hdl_t pcibr_vhdl);
150
151 extern unsigned ate_freeze(pcibr_dmamap_t pcibr_dmamap,
152 #if PCIBR_FREEZE_TIME
153 unsigned *freeze_time_ptr,
154 #endif
155 unsigned *cmd_regs);
156 extern void ate_write(pcibr_soft_t pcibr_soft, bridge_ate_p ate_ptr, int ate_count, bridge_ate_t ate);
157 extern void ate_thaw(pcibr_dmamap_t pcibr_dmamap, int ate_index,
158 #if PCIBR_FREEZE_TIME
159 bridge_ate_t ate,
160 int ate_total,
161 unsigned freeze_time_start,
162 #endif
163 unsigned *cmd_regs,
164 unsigned s);
165
166 pcibr_info_t pcibr_info_get(vertex_hdl_t);
167
168 static iopaddr_t pcibr_addr_pci_to_xio(vertex_hdl_t, pciio_slot_t, pciio_space_t, iopaddr_t, size_t, unsigned);
169
170 pcibr_piomap_t pcibr_piomap_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, size_t, unsigned);
171 void pcibr_piomap_free(pcibr_piomap_t);
172 caddr_t pcibr_piomap_addr(pcibr_piomap_t, iopaddr_t, size_t);
173 void pcibr_piomap_done(pcibr_piomap_t);
174 caddr_t pcibr_piotrans_addr(vertex_hdl_t, device_desc_t, pciio_space_t, iopaddr_t, size_t, unsigned);
175 iopaddr_t pcibr_piospace_alloc(vertex_hdl_t, device_desc_t, pciio_space_t, size_t, size_t);
176 void pcibr_piospace_free(vertex_hdl_t, pciio_space_t, iopaddr_t, size_t);
177
178 static iopaddr_t pcibr_flags_to_d64(unsigned, pcibr_soft_t);
179 extern bridge_ate_t pcibr_flags_to_ate(unsigned);
180
181 pcibr_dmamap_t pcibr_dmamap_alloc(vertex_hdl_t, device_desc_t, size_t, unsigned);
182 void pcibr_dmamap_free(pcibr_dmamap_t);
183 extern bridge_ate_p pcibr_ate_addr(pcibr_soft_t, int);
184 static iopaddr_t pcibr_addr_xio_to_pci(pcibr_soft_t, iopaddr_t, size_t);
185 iopaddr_t pcibr_dmamap_addr(pcibr_dmamap_t, paddr_t, size_t);
186 void pcibr_dmamap_done(pcibr_dmamap_t);
187 cnodeid_t pcibr_get_dmatrans_node(vertex_hdl_t);
188 iopaddr_t pcibr_dmatrans_addr(vertex_hdl_t, device_desc_t, paddr_t, size_t, unsigned);
189 void pcibr_dmamap_drain(pcibr_dmamap_t);
190 void pcibr_dmaaddr_drain(vertex_hdl_t, paddr_t, size_t);
191 void pcibr_dmalist_drain(vertex_hdl_t, alenlist_t);
192 iopaddr_t pcibr_dmamap_pciaddr_get(pcibr_dmamap_t);
193
194 extern unsigned pcibr_intr_bits(pciio_info_t info,
195 pciio_intr_line_t lines, int nslots);
196 extern pcibr_intr_t pcibr_intr_alloc(vertex_hdl_t, device_desc_t, pciio_intr_line_t, vertex_hdl_t);
197 extern void pcibr_intr_free(pcibr_intr_t);
198 extern void pcibr_setpciint(xtalk_intr_t);
199 extern int pcibr_intr_connect(pcibr_intr_t, intr_func_t, intr_arg_t);
200 extern void pcibr_intr_disconnect(pcibr_intr_t);
201
202 extern vertex_hdl_t pcibr_intr_cpu_get(pcibr_intr_t);
203 extern void pcibr_intr_func(intr_arg_t);
204
205 extern void print_bridge_errcmd(uint32_t, char *);
206
207 extern void pcibr_error_dump(pcibr_soft_t);
208 extern uint32_t pcibr_errintr_group(uint32_t);
209 extern void pcibr_pioerr_check(pcibr_soft_t);
210 extern void pcibr_error_intr_handler(int, void *, struct pt_regs *);
211
212 extern int pcibr_addr_toslot(pcibr_soft_t, iopaddr_t, pciio_space_t *, iopaddr_t *, pciio_function_t *);
213 extern void pcibr_error_cleanup(pcibr_soft_t, int);
214 extern void pcibr_device_disable(pcibr_soft_t, int);
215 extern int pcibr_pioerror(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
216 extern int pcibr_dmard_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
217 extern int pcibr_dmawr_error(pcibr_soft_t, int, ioerror_mode_t, ioerror_t *);
218 extern int pcibr_error_handler(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
219 extern int pcibr_error_handler_wrapper(error_handler_arg_t, int, ioerror_mode_t, ioerror_t *);
220 void pcibr_provider_startup(vertex_hdl_t);
221 void pcibr_provider_shutdown(vertex_hdl_t);
222
223 int pcibr_reset(vertex_hdl_t);
224 pciio_endian_t pcibr_endian_set(vertex_hdl_t, pciio_endian_t, pciio_endian_t);
225 int pcibr_device_flags_set(vertex_hdl_t, pcibr_device_flags_t);
226
227 extern cfg_p pcibr_config_addr(vertex_hdl_t, unsigned);
228 extern uint64_t pcibr_config_get(vertex_hdl_t, unsigned, unsigned);
229 extern void pcibr_config_set(vertex_hdl_t, unsigned, unsigned, uint64_t);
230
231 extern pcibr_hints_t pcibr_hints_get(vertex_hdl_t, int);
232 extern void pcibr_hints_fix_rrbs(vertex_hdl_t);
233 extern void pcibr_hints_dualslot(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
234 extern void pcibr_hints_intr_bits(vertex_hdl_t, pcibr_intr_bits_f *);
235 extern void pcibr_set_rrb_callback(vertex_hdl_t, rrb_alloc_funct_t);
236 extern void pcibr_hints_handsoff(vertex_hdl_t);
237 extern void pcibr_hints_subdevs(vertex_hdl_t, pciio_slot_t, uint64_t);
238
239 extern int pcibr_slot_info_init(vertex_hdl_t,pciio_slot_t);
240 extern int pcibr_slot_info_free(vertex_hdl_t,pciio_slot_t);
241 extern int pcibr_slot_info_return(pcibr_soft_t, pciio_slot_t,
242 pcibr_slot_info_resp_t);
243 extern void pcibr_slot_func_info_return(pcibr_info_h, int,
244 pcibr_slot_func_info_resp_t);
245 extern int pcibr_slot_addr_space_init(vertex_hdl_t,pciio_slot_t);
246 extern int pcibr_slot_pcix_rbar_init(pcibr_soft_t, pciio_slot_t);
247 extern int pcibr_slot_device_init(vertex_hdl_t, pciio_slot_t);
248 extern int pcibr_slot_guest_info_init(vertex_hdl_t,pciio_slot_t);
249 extern int pcibr_slot_call_device_attach(vertex_hdl_t,
250 pciio_slot_t, int);
251 extern int pcibr_slot_call_device_detach(vertex_hdl_t,
252 pciio_slot_t, int);
253 extern int pcibr_slot_attach(vertex_hdl_t, pciio_slot_t, int,
254 char *, int *);
255 extern int pcibr_slot_detach(vertex_hdl_t, pciio_slot_t, int,
256 char *, int *);
257
258 extern int pcibr_slot_initial_rrb_alloc(vertex_hdl_t, pciio_slot_t);
259 extern int pcibr_initial_rrb(vertex_hdl_t, pciio_slot_t, pciio_slot_t);
260
261 /* =====================================================================
262 * Device(x) register management
263 */
264
265 /* pcibr_try_set_device: attempt to modify Device(x)
266 * for the specified slot on the specified bridge
267 * as requested in flags, limited to the specified
268 * bits. Returns which BRIDGE bits were in conflict,
269 * or ZERO if everything went OK.
270 *
271 * Caller MUST hold pcibr_lock when calling this function.
272 */
273 static int
pcibr_try_set_device(pcibr_soft_t pcibr_soft,pciio_slot_t slot,unsigned flags,bridgereg_t mask)274 pcibr_try_set_device(pcibr_soft_t pcibr_soft,
275 pciio_slot_t slot,
276 unsigned flags,
277 bridgereg_t mask)
278 {
279 bridge_t *bridge;
280 pcibr_soft_slot_t slotp;
281 bridgereg_t old;
282 bridgereg_t new;
283 bridgereg_t chg;
284 bridgereg_t bad;
285 bridgereg_t badpmu;
286 bridgereg_t badd32;
287 bridgereg_t badd64;
288 bridgereg_t fix;
289 unsigned long s;
290 bridgereg_t xmask;
291
292 xmask = mask;
293 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
294 if (mask == BRIDGE_DEV_PMU_BITS)
295 xmask = XBRIDGE_DEV_PMU_BITS;
296 if (mask == BRIDGE_DEV_D64_BITS)
297 xmask = XBRIDGE_DEV_D64_BITS;
298 }
299
300 slotp = &pcibr_soft->bs_slot[slot];
301
302 s = pcibr_lock(pcibr_soft);
303
304 bridge = pcibr_soft->bs_base;
305
306 old = slotp->bss_device;
307
308 /* figure out what the desired
309 * Device(x) bits are based on
310 * the flags specified.
311 */
312
313 new = old;
314
315 /* Currently, we inherit anything that
316 * the new caller has not specified in
317 * one way or another, unless we take
318 * action here to not inherit.
319 *
320 * This is needed for the "swap" stuff,
321 * since it could have been set via
322 * pcibr_endian_set -- altho note that
323 * any explicit PCIBR_BYTE_STREAM or
324 * PCIBR_WORD_VALUES will freely override
325 * the effect of that call (and vice
326 * versa, no protection either way).
327 *
328 * I want to get rid of pcibr_endian_set
329 * in favor of tracking DMA endianness
330 * using the flags specified when DMA
331 * channels are created.
332 */
333
334 #define BRIDGE_DEV_WRGA_BITS (BRIDGE_DEV_PMU_WRGA_EN | BRIDGE_DEV_DIR_WRGA_EN)
335 #define BRIDGE_DEV_SWAP_BITS (BRIDGE_DEV_SWAP_PMU | BRIDGE_DEV_SWAP_DIR)
336
337 /* Do not use Barrier, Write Gather,
338 * or Prefetch unless asked.
339 * Leave everything else as it
340 * was from the last time.
341 */
342 new = new
343 & ~BRIDGE_DEV_BARRIER
344 & ~BRIDGE_DEV_WRGA_BITS
345 & ~BRIDGE_DEV_PREF
346 ;
347
348 /* Generic macro flags
349 */
350 if (flags & PCIIO_DMA_DATA) {
351 new = (new
352 & ~BRIDGE_DEV_BARRIER) /* barrier off */
353 | BRIDGE_DEV_PREF; /* prefetch on */
354
355 }
356 if (flags & PCIIO_DMA_CMD) {
357 new = ((new
358 & ~BRIDGE_DEV_PREF) /* prefetch off */
359 & ~BRIDGE_DEV_WRGA_BITS) /* write gather off */
360 | BRIDGE_DEV_BARRIER; /* barrier on */
361 }
362 /* Generic detail flags
363 */
364 if (flags & PCIIO_WRITE_GATHER)
365 new |= BRIDGE_DEV_WRGA_BITS;
366 if (flags & PCIIO_NOWRITE_GATHER)
367 new &= ~BRIDGE_DEV_WRGA_BITS;
368
369 if (flags & PCIIO_PREFETCH)
370 new |= BRIDGE_DEV_PREF;
371 if (flags & PCIIO_NOPREFETCH)
372 new &= ~BRIDGE_DEV_PREF;
373
374 if (flags & PCIBR_WRITE_GATHER)
375 new |= BRIDGE_DEV_WRGA_BITS;
376 if (flags & PCIBR_NOWRITE_GATHER)
377 new &= ~BRIDGE_DEV_WRGA_BITS;
378
379 if (flags & PCIIO_BYTE_STREAM)
380 new |= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ?
381 BRIDGE_DEV_SWAP_DIR : BRIDGE_DEV_SWAP_BITS;
382 if (flags & PCIIO_WORD_VALUES)
383 new &= (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) ?
384 ~BRIDGE_DEV_SWAP_DIR : ~BRIDGE_DEV_SWAP_BITS;
385
386 /* Provider-specific flags
387 */
388 if (flags & PCIBR_PREFETCH)
389 new |= BRIDGE_DEV_PREF;
390 if (flags & PCIBR_NOPREFETCH)
391 new &= ~BRIDGE_DEV_PREF;
392
393 if (flags & PCIBR_PRECISE)
394 new |= BRIDGE_DEV_PRECISE;
395 if (flags & PCIBR_NOPRECISE)
396 new &= ~BRIDGE_DEV_PRECISE;
397
398 if (flags & PCIBR_BARRIER)
399 new |= BRIDGE_DEV_BARRIER;
400 if (flags & PCIBR_NOBARRIER)
401 new &= ~BRIDGE_DEV_BARRIER;
402
403 if (flags & PCIBR_64BIT)
404 new |= BRIDGE_DEV_DEV_SIZE;
405 if (flags & PCIBR_NO64BIT)
406 new &= ~BRIDGE_DEV_DEV_SIZE;
407
408 /*
409 * PIC BRINGUP WAR (PV# 855271):
410 * Allow setting BRIDGE_DEV_VIRTUAL_EN on PIC iff we're a 64-bit
411 * device. The bit is only intended for 64-bit devices and, on
412 * PIC, can cause problems for 32-bit devices.
413 */
414 if (IS_PIC_SOFT(pcibr_soft) && mask == BRIDGE_DEV_D64_BITS &&
415 PCIBR_WAR_ENABLED(PV855271, pcibr_soft)) {
416 if (flags & PCIBR_VCHAN1) {
417 new |= BRIDGE_DEV_VIRTUAL_EN;
418 xmask |= BRIDGE_DEV_VIRTUAL_EN;
419 }
420 }
421
422
423 chg = old ^ new; /* what are we changing, */
424 chg &= xmask; /* of the interesting bits */
425
426 if (chg) {
427
428 badd32 = slotp->bss_d32_uctr ? (BRIDGE_DEV_D32_BITS & chg) : 0;
429 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
430 badpmu = slotp->bss_pmu_uctr ? (XBRIDGE_DEV_PMU_BITS & chg) : 0;
431 badd64 = slotp->bss_d64_uctr ? (XBRIDGE_DEV_D64_BITS & chg) : 0;
432 } else {
433 badpmu = slotp->bss_pmu_uctr ? (BRIDGE_DEV_PMU_BITS & chg) : 0;
434 badd64 = slotp->bss_d64_uctr ? (BRIDGE_DEV_D64_BITS & chg) : 0;
435 }
436 bad = badpmu | badd32 | badd64;
437
438 if (bad) {
439
440 /* some conflicts can be resolved by
441 * forcing the bit on. this may cause
442 * some performance degredation in
443 * the stream(s) that want the bit off,
444 * but the alternative is not allowing
445 * the new stream at all.
446 */
447 if ( (fix = bad & (BRIDGE_DEV_PRECISE |
448 BRIDGE_DEV_BARRIER)) ) {
449 bad &= ~fix;
450 /* don't change these bits if
451 * they are already set in "old"
452 */
453 chg &= ~(fix & old);
454 }
455 /* some conflicts can be resolved by
456 * forcing the bit off. this may cause
457 * some performance degredation in
458 * the stream(s) that want the bit on,
459 * but the alternative is not allowing
460 * the new stream at all.
461 */
462 if ( (fix = bad & (BRIDGE_DEV_WRGA_BITS |
463 BRIDGE_DEV_PREF)) ) {
464 bad &= ~fix;
465 /* don't change these bits if
466 * we wanted to turn them on.
467 */
468 chg &= ~(fix & new);
469 }
470 /* conflicts in other bits mean
471 * we can not establish this DMA
472 * channel while the other(s) are
473 * still present.
474 */
475 if (bad) {
476 pcibr_unlock(pcibr_soft, s);
477 #ifdef PIC_LATER
478 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
479 "pcibr_try_set_device: mod blocked by %x\n",
480 bad, device_bits));
481 #endif
482 return bad;
483 }
484 }
485 }
486 if (mask == BRIDGE_DEV_PMU_BITS)
487 slotp->bss_pmu_uctr++;
488 if (mask == BRIDGE_DEV_D32_BITS)
489 slotp->bss_d32_uctr++;
490 if (mask == BRIDGE_DEV_D64_BITS)
491 slotp->bss_d64_uctr++;
492
493 /* the value we want to write is the
494 * original value, with the bits for
495 * our selected changes flipped, and
496 * with any disabled features turned off.
497 */
498 new = old ^ chg; /* only change what we want to change */
499
500 if (slotp->bss_device == new) {
501 pcibr_unlock(pcibr_soft, s);
502 return 0;
503 }
504 if ( IS_PIC_SOFT(pcibr_soft) ) {
505 bridge->b_device[slot].reg = new;
506 slotp->bss_device = new;
507 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
508 }
509 pcibr_unlock(pcibr_soft, s);
510
511 #ifdef PIC_LATER
512 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pcibr_soft->bs_vhdl,
513 "pcibr_try_set_device: Device(%d): %x\n",
514 slot, new, device_bits));
515 #else
516 printk("pcibr_try_set_device: Device(%d): %x\n", slot, new);
517 #endif
518 return 0;
519 }
520
521 void
pcibr_release_device(pcibr_soft_t pcibr_soft,pciio_slot_t slot,bridgereg_t mask)522 pcibr_release_device(pcibr_soft_t pcibr_soft,
523 pciio_slot_t slot,
524 bridgereg_t mask)
525 {
526 pcibr_soft_slot_t slotp;
527 unsigned long s;
528
529 slotp = &pcibr_soft->bs_slot[slot];
530
531 s = pcibr_lock(pcibr_soft);
532
533 if (mask == BRIDGE_DEV_PMU_BITS)
534 slotp->bss_pmu_uctr--;
535 if (mask == BRIDGE_DEV_D32_BITS)
536 slotp->bss_d32_uctr--;
537 if (mask == BRIDGE_DEV_D64_BITS)
538 slotp->bss_d64_uctr--;
539
540 pcibr_unlock(pcibr_soft, s);
541 }
542
543
544 /* =====================================================================
545 * Bridge (pcibr) "Device Driver" entry points
546 */
547
548
549 static int
pcibr_mmap(struct file * file,struct vm_area_struct * vma)550 pcibr_mmap(struct file * file, struct vm_area_struct * vma)
551 {
552 vertex_hdl_t pcibr_vhdl;
553 pcibr_soft_t pcibr_soft;
554 bridge_t *bridge;
555 unsigned long phys_addr;
556 int error = 0;
557
558 #ifdef CONFIG_HWGFS_FS
559 pcibr_vhdl = (vertex_hdl_t) file->f_dentry->d_fsdata;
560 #else
561 pcibr_vhdl = (vertex_hdl_t) file->private_data;
562 #endif
563 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
564 bridge = pcibr_soft->bs_base;
565 phys_addr = (unsigned long)bridge & ~0xc000000000000000; /* Mask out the Uncache bits */
566 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
567 vma->vm_flags |= VM_NONCACHED | VM_RESERVED | VM_IO;
568 error = io_remap_page_range(vma->vm_start, phys_addr,
569 vma->vm_end-vma->vm_start,
570 vma->vm_page_prot);
571 return(error);
572 }
573
574 /*
575 * This is the file operation table for the pcibr driver.
576 * As each of the functions are implemented, put the
577 * appropriate function name below.
578 */
579 static int pcibr_mmap(struct file * file, struct vm_area_struct * vma);
580 struct file_operations pcibr_fops = {
581 .owner = THIS_MODULE,
582 .mmap = pcibr_mmap,
583 };
584
585 /* This is special case code used by grio. There are plans to make
586 * this a bit more general in the future, but till then this should
587 * be sufficient.
588 */
589 pciio_slot_t
pcibr_device_slot_get(vertex_hdl_t dev_vhdl)590 pcibr_device_slot_get(vertex_hdl_t dev_vhdl)
591 {
592 char devname[MAXDEVNAME];
593 vertex_hdl_t tdev;
594 pciio_info_t pciio_info;
595 pciio_slot_t slot = PCIIO_SLOT_NONE;
596
597 vertex_to_name(dev_vhdl, devname, MAXDEVNAME);
598
599 /* run back along the canonical path
600 * until we find a PCI connection point.
601 */
602 tdev = hwgraph_connectpt_get(dev_vhdl);
603 while (tdev != GRAPH_VERTEX_NONE) {
604 pciio_info = pciio_info_chk(tdev);
605 if (pciio_info) {
606 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
607 break;
608 }
609 hwgraph_vertex_unref(tdev);
610 tdev = hwgraph_connectpt_get(tdev);
611 }
612 hwgraph_vertex_unref(tdev);
613
614 return slot;
615 }
616
617 pcibr_info_t
pcibr_info_get(vertex_hdl_t vhdl)618 pcibr_info_get(vertex_hdl_t vhdl)
619 {
620 return (pcibr_info_t) pciio_info_get(vhdl);
621 }
622
623 pcibr_info_t
pcibr_device_info_new(pcibr_soft_t pcibr_soft,pciio_slot_t slot,pciio_function_t rfunc,pciio_vendor_id_t vendor,pciio_device_id_t device)624 pcibr_device_info_new(
625 pcibr_soft_t pcibr_soft,
626 pciio_slot_t slot,
627 pciio_function_t rfunc,
628 pciio_vendor_id_t vendor,
629 pciio_device_id_t device)
630 {
631 pcibr_info_t pcibr_info;
632 pciio_function_t func;
633 int ibit;
634
635 func = (rfunc == PCIIO_FUNC_NONE) ? 0 : rfunc;
636
637 /*
638 * Create a pciio_info_s for this device. pciio_device_info_new()
639 * will set the c_slot (which is suppose to represent the external
640 * slot (i.e the slot number silk screened on the back of the I/O
641 * brick)). So for PIC we need to adjust this "internal slot" num
642 * passed into us, into its external representation. See comment
643 * for the PCIBR_DEVICE_TO_SLOT macro for more information.
644 */
645 NEW(pcibr_info);
646 pciio_device_info_new(&pcibr_info->f_c, pcibr_soft->bs_vhdl,
647 PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot),
648 rfunc, vendor, device);
649 pcibr_info->f_dev = slot;
650
651 /* Set PCI bus number */
652 pcibr_info->f_bus = pcibr_widget_to_bus(pcibr_soft->bs_vhdl);
653
654 if (slot != PCIIO_SLOT_NONE) {
655
656 /*
657 * Currently favored mapping from PCI
658 * slot number and INTA/B/C/D to Bridge
659 * PCI Interrupt Bit Number:
660 *
661 * SLOT A B C D
662 * 0 0 4 0 4
663 * 1 1 5 1 5
664 * 2 2 6 2 6
665 * 3 3 7 3 7
666 * 4 4 0 4 0
667 * 5 5 1 5 1
668 * 6 6 2 6 2
669 * 7 7 3 7 3
670 *
671 * XXX- allow pcibr_hints to override default
672 * XXX- allow ADMIN to override pcibr_hints
673 */
674 for (ibit = 0; ibit < 4; ++ibit)
675 pcibr_info->f_ibit[ibit] =
676 (slot + 4 * ibit) & 7;
677
678 /*
679 * Record the info in the sparse func info space.
680 */
681 if (func < pcibr_soft->bs_slot[slot].bss_ninfo)
682 pcibr_soft->bs_slot[slot].bss_infos[func] = pcibr_info;
683 }
684 return pcibr_info;
685 }
686
687
688 /*
689 * pcibr_device_unregister
690 * This frees up any hardware resources reserved for this PCI device
691 * and removes any PCI infrastructural information setup for it.
692 * This is usually used at the time of shutting down of the PCI card.
693 */
694 int
pcibr_device_unregister(vertex_hdl_t pconn_vhdl)695 pcibr_device_unregister(vertex_hdl_t pconn_vhdl)
696 {
697 pciio_info_t pciio_info;
698 vertex_hdl_t pcibr_vhdl;
699 pciio_slot_t slot;
700 pcibr_soft_t pcibr_soft;
701 bridge_t *bridge;
702 int count_vchan0, count_vchan1;
703 unsigned s;
704 int error_call;
705 int error = 0;
706
707 pciio_info = pciio_info_get(pconn_vhdl);
708
709 pcibr_vhdl = pciio_info_master_get(pciio_info);
710 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
711
712 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
713 bridge = pcibr_soft->bs_base;
714
715 /* Clear all the hardware xtalk resources for this device */
716 xtalk_widgetdev_shutdown(pcibr_soft->bs_conn, slot);
717
718 /* Flush all the rrbs */
719 pcibr_rrb_flush(pconn_vhdl);
720
721 /*
722 * If the RRB configuration for this slot has changed, set it
723 * back to the boot-time default
724 */
725 if (pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] >= 0) {
726
727 s = pcibr_lock(pcibr_soft);
728
729 /* PIC NOTE: If this is a BRIDGE, VCHAN2 & VCHAN3 will be zero so
730 * no need to conditionalize this (ie. "if (IS_PIC_SOFT())" ).
731 */
732 pcibr_soft->bs_rrb_res[slot] = pcibr_soft->bs_rrb_res[slot] +
733 pcibr_soft->bs_rrb_valid[slot][VCHAN0] +
734 pcibr_soft->bs_rrb_valid[slot][VCHAN1] +
735 pcibr_soft->bs_rrb_valid[slot][VCHAN2] +
736 pcibr_soft->bs_rrb_valid[slot][VCHAN3];
737
738 /* Free the rrbs allocated to this slot, both the normal & virtual */
739 do_pcibr_rrb_free_all(pcibr_soft, bridge, slot);
740
741 count_vchan0 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0];
742 count_vchan1 = pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN1];
743
744 pcibr_unlock(pcibr_soft, s);
745
746 pcibr_rrb_alloc(pconn_vhdl, &count_vchan0, &count_vchan1);
747
748 }
749
750 /* Flush the write buffers !! */
751 error_call = pcibr_wrb_flush(pconn_vhdl);
752
753 if (error_call)
754 error = error_call;
755
756 /* Clear the information specific to the slot */
757 error_call = pcibr_slot_info_free(pcibr_vhdl, slot);
758
759 if (error_call)
760 error = error_call;
761
762 return(error);
763
764 }
765
766 /*
767 * pcibr_driver_reg_callback
768 * CDL will call this function for each device found in the PCI
769 * registry that matches the vendor/device IDs supported by
770 * the driver being registered. The device's connection vertex
771 * and the driver's attach function return status enable the
772 * slot's device status to be set.
773 */
774 void
pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,int key1,int key2,int error)775 pcibr_driver_reg_callback(vertex_hdl_t pconn_vhdl,
776 int key1, int key2, int error)
777 {
778 pciio_info_t pciio_info;
779 pcibr_info_t pcibr_info;
780 vertex_hdl_t pcibr_vhdl;
781 pciio_slot_t slot;
782 pcibr_soft_t pcibr_soft;
783
784 /* Do not set slot status for vendor/device ID wildcard drivers */
785 if ((key1 == -1) || (key2 == -1))
786 return;
787
788 pciio_info = pciio_info_get(pconn_vhdl);
789 pcibr_info = pcibr_info_get(pconn_vhdl);
790
791 pcibr_vhdl = pciio_info_master_get(pciio_info);
792 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
793
794 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
795
796 #ifdef PIC_LATER
797 /* This may be a loadable driver so lock out any pciconfig actions */
798 mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
799 #endif
800
801 pcibr_info->f_att_det_error = error;
802
803 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
804
805 if (error) {
806 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_INCMPLT;
807 } else {
808 pcibr_soft->bs_slot[slot].slot_status |= SLOT_STARTUP_CMPLT;
809 }
810
811 #ifdef PIC_LATER
812 /* Release the bus lock */
813 mrunlock(pcibr_soft->bs_bus_lock);
814 #endif
815 }
816
817 /*
818 * pcibr_driver_unreg_callback
819 * CDL will call this function for each device found in the PCI
820 * registry that matches the vendor/device IDs supported by
821 * the driver being unregistered. The device's connection vertex
822 * and the driver's detach function return status enable the
823 * slot's device status to be set.
824 */
825 void
pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,int key1,int key2,int error)826 pcibr_driver_unreg_callback(vertex_hdl_t pconn_vhdl,
827 int key1, int key2, int error)
828 {
829 pciio_info_t pciio_info;
830 pcibr_info_t pcibr_info;
831 vertex_hdl_t pcibr_vhdl;
832 pciio_slot_t slot;
833 pcibr_soft_t pcibr_soft;
834
835 /* Do not set slot status for vendor/device ID wildcard drivers */
836 if ((key1 == -1) || (key2 == -1))
837 return;
838
839 pciio_info = pciio_info_get(pconn_vhdl);
840 pcibr_info = pcibr_info_get(pconn_vhdl);
841
842 pcibr_vhdl = pciio_info_master_get(pciio_info);
843 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
844
845 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
846
847 #ifdef PIC_LATER
848 /* This may be a loadable driver so lock out any pciconfig actions */
849 mrlock(pcibr_soft->bs_bus_lock, MR_UPDATE, PZERO);
850 #endif
851
852 pcibr_info->f_att_det_error = error;
853
854 pcibr_soft->bs_slot[slot].slot_status &= ~SLOT_STATUS_MASK;
855
856 if (error) {
857 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_INCMPLT;
858 } else {
859 pcibr_soft->bs_slot[slot].slot_status |= SLOT_SHUTDOWN_CMPLT;
860 }
861
862 #ifdef PIC_LATER
863 /* Release the bus lock */
864 mrunlock(pcibr_soft->bs_bus_lock);
865 #endif
866 }
867
868 /*
869 * build a convenience link path in the
870 * form of ".../<iobrick>/bus/<busnum>"
871 *
872 * returns 1 on success, 0 otherwise
873 *
874 * depends on hwgraph separator == '/'
875 */
876 int
pcibr_bus_cnvlink(vertex_hdl_t f_c)877 pcibr_bus_cnvlink(vertex_hdl_t f_c)
878 {
879 char dst[MAXDEVNAME];
880 char *dp = dst;
881 char *cp, *xp;
882 int widgetnum;
883 char pcibus[8];
884 vertex_hdl_t nvtx, svtx;
885 int rv;
886
887 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, f_c, "pcibr_bus_cnvlink\n"));
888
889 if (GRAPH_SUCCESS != hwgraph_vertex_name_get(f_c, dst, MAXDEVNAME)) {
890 return 0;
891 }
892
893 /* dst example == /hw/module/001c02/Pbrick/xtalk/8/pci/direct */
894
895 /* find the widget number */
896 xp = strstr(dst, "/"EDGE_LBL_XTALK"/");
897 if (xp == NULL) {
898 return 0;
899 }
900 widgetnum = simple_strtoul(xp+7, NULL, 0);
901 if (widgetnum < XBOW_PORT_8 || widgetnum > XBOW_PORT_F) {
902 return 0;
903 }
904
905 /* remove "/pci/direct" from path */
906 cp = strstr(dst, "/" EDGE_LBL_PCI "/" EDGE_LBL_DIRECT);
907 if (cp == NULL) {
908 return 0;
909 }
910 *cp = (char)NULL;
911
912 /* get the vertex for the widget */
913 if (GRAPH_SUCCESS != hwgraph_traverse(NULL, dp, &svtx)) {
914 return 0;
915 }
916
917 *xp = (char)NULL; /* remove "/xtalk/..." from path */
918
919 /* dst example now == /hw/module/001c02/Pbrick */
920
921 /* get the bus number */
922 strcat(dst, "/");
923 strcat(dst, EDGE_LBL_BUS);
924 sprintf(pcibus, "%d", p_busnum[widgetnum]);
925
926 /* link to bus to widget */
927 rv = hwgraph_path_add(NULL, dp, &nvtx);
928 if (GRAPH_SUCCESS == rv)
929 rv = hwgraph_edge_add(nvtx, svtx, pcibus);
930
931 return (rv == GRAPH_SUCCESS);
932 }
933
934
935 /*
936 * pcibr_attach: called every time the crosstalk
937 * infrastructure is asked to initialize a widget
938 * that matches the part number we handed to the
939 * registration routine above.
940 */
941 /*ARGSUSED */
942 int
pcibr_attach(vertex_hdl_t xconn_vhdl)943 pcibr_attach(vertex_hdl_t xconn_vhdl)
944 {
945 /* REFERENCED */
946 graph_error_t rc;
947 vertex_hdl_t pcibr_vhdl;
948 bridge_t *bridge;
949
950 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, xconn_vhdl, "pcibr_attach\n"));
951
952 bridge = (bridge_t *)
953 xtalk_piotrans_addr(xconn_vhdl, NULL,
954 0, sizeof(bridge_t), 0);
955 /*
956 * Create the vertex for the PCI bus, which we
957 * will also use to hold the pcibr_soft and
958 * which will be the "master" vertex for all the
959 * pciio connection points we will hang off it.
960 * This needs to happen before we call nic_bridge_vertex_info
961 * as we are some of the *_vmc functions need access to the edges.
962 *
963 * Opening this vertex will provide access to
964 * the Bridge registers themselves.
965 */
966 rc = hwgraph_path_add(xconn_vhdl, EDGE_LBL_PCI, &pcibr_vhdl);
967 ASSERT(rc == GRAPH_SUCCESS);
968
969 pciio_provider_register(pcibr_vhdl, &pcibr_provider);
970 pciio_provider_startup(pcibr_vhdl);
971
972 return pcibr_attach2(xconn_vhdl, bridge, pcibr_vhdl, 0, NULL);
973 }
974
975
976 /*ARGSUSED */
977 int
pcibr_attach2(vertex_hdl_t xconn_vhdl,bridge_t * bridge,vertex_hdl_t pcibr_vhdl,int busnum,pcibr_soft_t * ret_softp)978 pcibr_attach2(vertex_hdl_t xconn_vhdl, bridge_t *bridge,
979 vertex_hdl_t pcibr_vhdl, int busnum, pcibr_soft_t *ret_softp)
980 {
981 /* REFERENCED */
982 vertex_hdl_t ctlr_vhdl;
983 bridgereg_t id;
984 int rev;
985 pcibr_soft_t pcibr_soft;
986 pcibr_info_t pcibr_info;
987 xwidget_info_t info;
988 xtalk_intr_t xtalk_intr;
989 int slot;
990 int ibit;
991 vertex_hdl_t noslot_conn;
992 char devnm[MAXDEVNAME], *s;
993 pcibr_hints_t pcibr_hints;
994 uint64_t int_enable;
995 picreg_t int_enable_64;
996 unsigned rrb_fixed = 0;
997
998 #if PCI_FBBE
999 int fast_back_to_back_enable;
1000 #endif
1001 nasid_t nasid;
1002 int iobrick_type_get_nasid(nasid_t nasid);
1003 int iobrick_module_get_nasid(nasid_t nasid);
1004
1005 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1006 "pcibr_attach2: bridge=0x%p, busnum=%d\n", bridge, busnum));
1007
1008 ctlr_vhdl = NULL;
1009 ctlr_vhdl = hwgraph_register(pcibr_vhdl, EDGE_LBL_CONTROLLER, 0,
1010 DEVFS_FL_AUTO_DEVNUM, 0, 0,
1011 S_IFCHR | S_IRUSR | S_IWUSR | S_IRGRP, 0, 0,
1012 (struct file_operations *)&pcibr_fops, (void *)pcibr_vhdl);
1013 ASSERT(ctlr_vhdl != NULL);
1014
1015 /*
1016 * Get the hint structure; if some NIC callback
1017 * marked this vertex as "hands-off" then we
1018 * just return here, before doing anything else.
1019 */
1020 pcibr_hints = pcibr_hints_get(xconn_vhdl, 0);
1021
1022 if (pcibr_hints && pcibr_hints->ph_hands_off)
1023 return -1; /* generic operations disabled */
1024
1025 id = bridge->b_wid_id;
1026 rev = XWIDGET_PART_REV_NUM(id);
1027
1028 hwgraph_info_add_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, (arbitrary_info_t) rev);
1029
1030 /*
1031 * allocate soft state structure, fill in some
1032 * fields, and hook it up to our vertex.
1033 */
1034 NEW(pcibr_soft);
1035 if (ret_softp)
1036 *ret_softp = pcibr_soft;
1037 memset(pcibr_soft, 0, sizeof *pcibr_soft);
1038 pcibr_soft_set(pcibr_vhdl, pcibr_soft);
1039 pcibr_soft->bs_conn = xconn_vhdl;
1040 pcibr_soft->bs_vhdl = pcibr_vhdl;
1041 pcibr_soft->bs_base = bridge;
1042 pcibr_soft->bs_rev_num = rev;
1043 pcibr_soft->bs_intr_bits = (pcibr_intr_bits_f *)pcibr_intr_bits;
1044
1045 pcibr_soft->bs_min_slot = 0; /* lowest possible slot# */
1046 pcibr_soft->bs_max_slot = 7; /* highest possible slot# */
1047 pcibr_soft->bs_busnum = busnum;
1048 pcibr_soft->bs_bridge_type = PCIBR_BRIDGETYPE_PIC;
1049 switch(pcibr_soft->bs_bridge_type) {
1050 case PCIBR_BRIDGETYPE_BRIDGE:
1051 pcibr_soft->bs_int_ate_size = BRIDGE_INTERNAL_ATES;
1052 pcibr_soft->bs_bridge_mode = 0; /* speed is not available in bridge */
1053 break;
1054 case PCIBR_BRIDGETYPE_PIC:
1055 pcibr_soft->bs_min_slot = 0;
1056 pcibr_soft->bs_max_slot = 3;
1057 pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1058 pcibr_soft->bs_bridge_mode =
1059 (((bridge->p_wid_stat_64 & PIC_STAT_PCIX_SPEED) >> 33) |
1060 ((bridge->p_wid_stat_64 & PIC_STAT_PCIX_ACTIVE) >> 33));
1061
1062 /* We have to clear PIC's write request buffer to avoid parity
1063 * errors. See PV#854845.
1064 */
1065 {
1066 int i;
1067
1068 for (i=0; i < PIC_WR_REQ_BUFSIZE; i++) {
1069 bridge->p_wr_req_lower[i] = 0;
1070 bridge->p_wr_req_upper[i] = 0;
1071 bridge->p_wr_req_parity[i] = 0;
1072 }
1073 }
1074
1075 break;
1076 case PCIBR_BRIDGETYPE_XBRIDGE:
1077 pcibr_soft->bs_int_ate_size = XBRIDGE_INTERNAL_ATES;
1078 pcibr_soft->bs_bridge_mode =
1079 ((bridge->b_wid_control & BRIDGE_CTRL_PCI_SPEED) >> 3);
1080 break;
1081 }
1082
1083 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1084 "pcibr_attach2: pcibr_soft=0x%x, mode=0x%x\n",
1085 pcibr_soft, pcibr_soft->bs_bridge_mode));
1086 pcibr_soft->bsi_err_intr = 0;
1087
1088 /* Bridges up through REV C
1089 * are unable to set the direct
1090 * byteswappers to BYTE_STREAM.
1091 */
1092 if (pcibr_soft->bs_rev_num <= BRIDGE_PART_REV_C) {
1093 pcibr_soft->bs_pio_end_io = PCIIO_WORD_VALUES;
1094 pcibr_soft->bs_pio_end_mem = PCIIO_WORD_VALUES;
1095 }
1096 #if PCIBR_SOFT_LIST
1097 /*
1098 * link all the pcibr_soft structs
1099 */
1100 {
1101 pcibr_list_p self;
1102
1103 NEW(self);
1104 self->bl_soft = pcibr_soft;
1105 self->bl_vhdl = pcibr_vhdl;
1106 self->bl_next = pcibr_list;
1107 pcibr_list = self;
1108 }
1109 #endif /* PCIBR_SOFT_LIST */
1110
1111 /*
1112 * get the name of this bridge vertex and keep the info. Use this
1113 * only where it is really needed now: like error interrupts.
1114 */
1115 s = dev_to_name(pcibr_vhdl, devnm, MAXDEVNAME);
1116 pcibr_soft->bs_name = kmalloc(strlen(s) + 1, GFP_KERNEL);
1117 strcpy(pcibr_soft->bs_name, s);
1118
1119 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1120 "pcibr_attach2: %s ASIC: rev %s (code=0x%x)\n",
1121 IS_XBRIDGE_SOFT(pcibr_soft) ? "XBridge" :
1122 IS_PIC_SOFT(pcibr_soft) ? "PIC" : "Bridge",
1123 (rev == BRIDGE_PART_REV_A) ? "A" :
1124 (rev == BRIDGE_PART_REV_B) ? "B" :
1125 (rev == BRIDGE_PART_REV_C) ? "C" :
1126 (rev == BRIDGE_PART_REV_D) ? "D" :
1127 (rev == XBRIDGE_PART_REV_A) ? "A" :
1128 (rev == XBRIDGE_PART_REV_B) ? "B" :
1129 (IS_PIC_PART_REV_A(rev)) ? "A" :
1130 "unknown", rev, pcibr_soft->bs_name));
1131
1132 info = xwidget_info_get(xconn_vhdl);
1133 pcibr_soft->bs_xid = xwidget_info_id_get(info);
1134 pcibr_soft->bs_master = xwidget_info_master_get(info);
1135 pcibr_soft->bs_mxid = xwidget_info_masterid_get(info);
1136
1137 pcibr_soft->bs_first_slot = pcibr_soft->bs_min_slot;
1138 pcibr_soft->bs_last_slot = pcibr_soft->bs_max_slot;
1139 /*
1140 * Bridge can only reset slots 0, 1, 2, and 3. Ibrick internal
1141 * slots 4, 5, 6, and 7 must be reset as a group, so do not
1142 * reset them.
1143 */
1144 pcibr_soft->bs_last_reset = 3;
1145
1146 nasid = NASID_GET(bridge);
1147
1148 if ((pcibr_soft->bs_bricktype = iobrick_type_get_nasid(nasid)) < 0)
1149 printk(KERN_WARNING "0x%p: Unknown bricktype : 0x%x\n", (void *)xconn_vhdl,
1150 (unsigned int)pcibr_soft->bs_bricktype);
1151
1152 pcibr_soft->bs_moduleid = iobrick_module_get_nasid(nasid);
1153
1154 if (pcibr_soft->bs_bricktype > 0) {
1155 switch (pcibr_soft->bs_bricktype) {
1156 case MODULE_PXBRICK:
1157 case MODULE_IXBRICK:
1158 case MODULE_OPUSBRICK:
1159 pcibr_soft->bs_first_slot = 0;
1160 pcibr_soft->bs_last_slot = 1;
1161 pcibr_soft->bs_last_reset = 1;
1162
1163 /* If Bus 1 has IO9 then there are 4 devices in that bus. Note
1164 * we figure this out from klconfig since the kernel has yet to
1165 * probe
1166 */
1167 if (pcibr_widget_to_bus(pcibr_vhdl) == 1) {
1168 lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
1169
1170 while (brd) {
1171 if (brd->brd_flags & LOCAL_MASTER_IO6) {
1172 pcibr_soft->bs_last_slot = 3;
1173 pcibr_soft->bs_last_reset = 3;
1174 }
1175 brd = KLCF_NEXT(brd);
1176 }
1177 }
1178 break;
1179 case MODULE_PBRICK:
1180 pcibr_soft->bs_first_slot = 1;
1181 pcibr_soft->bs_last_slot = 2;
1182 pcibr_soft->bs_last_reset = 2;
1183 break;
1184
1185 case MODULE_IBRICK:
1186 /*
1187 * Here's the current baseio layout for SN1 style systems:
1188 *
1189 * 0 1 2 3 4 5 6 7 slot#
1190 *
1191 * x scsi x x ioc3 usb x x O300 Ibrick
1192 *
1193 * x == never occupied
1194 * E == external (add-in) slot
1195 *
1196 */
1197 pcibr_soft->bs_first_slot = 1; /* Ibrick first slot == 1 */
1198 if (pcibr_soft->bs_xid == 0xe) {
1199 pcibr_soft->bs_last_slot = 2;
1200 pcibr_soft->bs_last_reset = 2;
1201 } else {
1202 pcibr_soft->bs_last_slot = 6;
1203 }
1204 break;
1205
1206 case MODULE_CGBRICK:
1207 pcibr_soft->bs_first_slot = 0;
1208 pcibr_soft->bs_last_slot = 0;
1209 pcibr_soft->bs_last_reset = 0;
1210 break;
1211
1212 default:
1213 break;
1214 }
1215
1216 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATTACH, pcibr_vhdl,
1217 "pcibr_attach2: %cbrick, slots %d-%d\n",
1218 MODULE_GET_BTCHAR(pcibr_soft->bs_moduleid),
1219 pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot));
1220 }
1221
1222 /*
1223 * Initialize bridge and bus locks
1224 */
1225 spin_lock_init(&pcibr_soft->bs_lock);
1226 #ifdef PIC_LATER
1227 mrinit(pcibr_soft->bs_bus_lock, "bus_lock");
1228 #endif
1229 /*
1230 * If we have one, process the hints structure.
1231 */
1232 if (pcibr_hints) {
1233 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_HINTS, pcibr_vhdl,
1234 "pcibr_attach2: pcibr_hints=0x%x\n", pcibr_hints));
1235
1236 rrb_fixed = pcibr_hints->ph_rrb_fixed;
1237
1238 pcibr_soft->bs_rrb_fixed = rrb_fixed;
1239
1240 if (pcibr_hints->ph_intr_bits) {
1241 pcibr_soft->bs_intr_bits = pcibr_hints->ph_intr_bits;
1242 }
1243
1244 for (slot = pcibr_soft->bs_min_slot;
1245 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1246 int hslot = pcibr_hints->ph_host_slot[slot] - 1;
1247
1248 if (hslot < 0) {
1249 pcibr_soft->bs_slot[slot].host_slot = slot;
1250 } else {
1251 pcibr_soft->bs_slot[slot].has_host = 1;
1252 pcibr_soft->bs_slot[slot].host_slot = hslot;
1253 }
1254 }
1255 }
1256 /*
1257 * Set-up initial values for state fields
1258 */
1259 for (slot = pcibr_soft->bs_min_slot;
1260 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1261 pcibr_soft->bs_slot[slot].bss_devio.bssd_space = PCIIO_SPACE_NONE;
1262 pcibr_soft->bs_slot[slot].bss_devio.bssd_ref_cnt = 0;
1263 pcibr_soft->bs_slot[slot].bss_d64_base = PCIBR_D64_BASE_UNSET;
1264 pcibr_soft->bs_slot[slot].bss_d32_base = PCIBR_D32_BASE_UNSET;
1265 pcibr_soft->bs_slot[slot].bss_ext_ates_active = ATOMIC_INIT(0);
1266 pcibr_soft->bs_rrb_valid_dflt[slot][VCHAN0] = -1;
1267 }
1268
1269 for (ibit = 0; ibit < 8; ++ibit) {
1270 pcibr_soft->bs_intr[ibit].bsi_xtalk_intr = 0;
1271 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_soft = pcibr_soft;
1272 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_list = NULL;
1273 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_stat =
1274 &(bridge->b_int_status);
1275 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_ibit = ibit;
1276 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_hdlrcnt = 0;
1277 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_shared = 0;
1278 pcibr_soft->bs_intr[ibit].bsi_pcibr_intr_wrap.iw_connected = 0;
1279 }
1280
1281 /*
1282 * connect up our error handler. PIC has 2 busses (thus resulting in 2
1283 * pcibr_soft structs under 1 widget), so only register a xwidget error
1284 * handler for PIC's bus0. NOTE: for PIC pcibr_error_handler_wrapper()
1285 * is a wrapper routine we register that will call the real error handler
1286 * pcibr_error_handler() with the correct pcibr_soft struct.
1287 */
1288 if (IS_PIC_SOFT(pcibr_soft)) {
1289 if (busnum == 0) {
1290 xwidget_error_register(xconn_vhdl, pcibr_error_handler_wrapper, pcibr_soft);
1291 }
1292 }
1293
1294 /*
1295 * Initialize various Bridge registers.
1296 */
1297
1298 /*
1299 * On pre-Rev.D bridges, set the PCI_RETRY_CNT
1300 * to zero to avoid dropping stores. (#475347)
1301 */
1302 if (rev < BRIDGE_PART_REV_D)
1303 bridge->b_bus_timeout &= ~BRIDGE_BUS_PCI_RETRY_MASK;
1304
1305 /*
1306 * Clear all pending interrupts.
1307 */
1308 bridge->b_int_rst_stat = (BRIDGE_IRR_ALL_CLR);
1309
1310 /* Initialize some PIC specific registers. */
1311 if (IS_PIC_SOFT(pcibr_soft)) {
1312 picreg_t pic_ctrl_reg = bridge->p_wid_control_64;
1313
1314 /* Bridges Requester ID: bus = busnum, dev = 0, func = 0 */
1315 pic_ctrl_reg &= ~PIC_CTRL_BUS_NUM_MASK;
1316 pic_ctrl_reg |= PIC_CTRL_BUS_NUM(busnum);
1317 pic_ctrl_reg &= ~PIC_CTRL_DEV_NUM_MASK;
1318 pic_ctrl_reg &= ~PIC_CTRL_FUN_NUM_MASK;
1319
1320 pic_ctrl_reg &= ~PIC_CTRL_NO_SNOOP;
1321 pic_ctrl_reg &= ~PIC_CTRL_RELAX_ORDER;
1322
1323 /* enable parity checking on PICs internal RAM */
1324 pic_ctrl_reg |= PIC_CTRL_PAR_EN_RESP;
1325 pic_ctrl_reg |= PIC_CTRL_PAR_EN_ATE;
1326 /* PIC BRINGUP WAR (PV# 862253): dont enable write request
1327 * parity checking.
1328 */
1329 if (!PCIBR_WAR_ENABLED(PV862253, pcibr_soft)) {
1330 pic_ctrl_reg |= PIC_CTRL_PAR_EN_REQ;
1331 }
1332
1333 bridge->p_wid_control_64 = pic_ctrl_reg;
1334 }
1335
1336 /*
1337 * Until otherwise set up,
1338 * assume all interrupts are
1339 * from slot 7(Bridge/Xbridge) or 3(PIC).
1340 * XXX. Not sure why we're doing this, made change for PIC
1341 * just to avoid setting reserved bits.
1342 */
1343 if (IS_PIC_SOFT(pcibr_soft))
1344 bridge->b_int_device = (uint32_t) 0x006db6db;
1345
1346 {
1347 bridgereg_t dirmap;
1348 paddr_t paddr;
1349 iopaddr_t xbase;
1350 xwidgetnum_t xport;
1351 iopaddr_t offset;
1352 int num_entries = 0;
1353 int entry;
1354 cnodeid_t cnodeid;
1355 nasid_t nasid;
1356
1357 /* Set the Bridge's 32-bit PCI to XTalk
1358 * Direct Map register to the most useful
1359 * value we can determine. Note that we
1360 * must use a single xid for all of:
1361 * direct-mapped 32-bit DMA accesses
1362 * direct-mapped 64-bit DMA accesses
1363 * DMA accesses through the PMU
1364 * interrupts
1365 * This is the only way to guarantee that
1366 * completion interrupts will reach a CPU
1367 * after all DMA data has reached memory.
1368 * (Of course, there may be a few special
1369 * drivers/controlers that explicitly manage
1370 * this ordering problem.)
1371 */
1372
1373 cnodeid = 0; /* default node id */
1374 nasid = COMPACT_TO_NASID_NODEID(cnodeid);
1375 paddr = NODE_OFFSET(nasid) + 0;
1376
1377 /* currently, we just assume that if we ask
1378 * for a DMA mapping to "zero" the XIO
1379 * host will transmute this into a request
1380 * for the lowest hunk of memory.
1381 */
1382 xbase = xtalk_dmatrans_addr(xconn_vhdl, 0,
1383 paddr, PAGE_SIZE, 0);
1384
1385 if (xbase != XIO_NOWHERE) {
1386 if (XIO_PACKED(xbase)) {
1387 xport = XIO_PORT(xbase);
1388 xbase = XIO_ADDR(xbase);
1389 } else
1390 xport = pcibr_soft->bs_mxid;
1391
1392 offset = xbase & ((1ull << BRIDGE_DIRMAP_OFF_ADDRSHFT) - 1ull);
1393 xbase >>= BRIDGE_DIRMAP_OFF_ADDRSHFT;
1394
1395 dirmap = xport << BRIDGE_DIRMAP_W_ID_SHFT;
1396
1397 if (xbase)
1398 dirmap |= BRIDGE_DIRMAP_OFF & xbase;
1399 else if (offset >= (512 << 20))
1400 dirmap |= BRIDGE_DIRMAP_ADD512;
1401
1402 bridge->b_dir_map = dirmap;
1403 }
1404 /*
1405 * Set bridge's idea of page size according to the system's
1406 * idea of "IO page size". TBD: The idea of IO page size
1407 * should really go away.
1408 */
1409 /*
1410 * ensure that we write and read without any interruption.
1411 * The read following the write is required for the Bridge war
1412 */
1413 #if IOPGSIZE == 4096
1414 if (IS_PIC_SOFT(pcibr_soft)) {
1415 bridge->p_wid_control_64 &= ~BRIDGE_CTRL_PAGE_SIZE;
1416 }
1417 #elif IOPGSIZE == 16384
1418 if (IS_PIC_SOFT(pcibr_soft)) {
1419 bridge->p_wid_control_64 |= BRIDGE_CTRL_PAGE_SIZE;
1420 }
1421 #else
1422 <<<Unable to deal with IOPGSIZE >>>;
1423 #endif
1424 bridge->b_wid_control; /* inval addr bug war */
1425
1426 /* Initialize internal mapping entries */
1427 for (entry = 0; entry < pcibr_soft->bs_int_ate_size; entry++) {
1428 bridge->b_int_ate_ram[entry].wr = 0;
1429 }
1430
1431 /*
1432 * Determine if there's external mapping SSRAM on this
1433 * bridge. Set up Bridge control register appropriately,
1434 * inititlize SSRAM, and set software up to manage RAM
1435 * entries as an allocatable resource.
1436 *
1437 * Currently, we just use the rm* routines to manage ATE
1438 * allocation. We should probably replace this with a
1439 * Best Fit allocator.
1440 *
1441 * For now, if we have external SSRAM, avoid using
1442 * the internal ssram: we can't turn PREFETCH on
1443 * when we use the internal SSRAM; and besides,
1444 * this also guarantees that no allocation will
1445 * straddle the internal/external line, so we
1446 * can increment ATE write addresses rather than
1447 * recomparing against BRIDGE_INTERNAL_ATES every
1448 * time.
1449 */
1450
1451 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1452 num_entries = 0;
1453 else
1454 num_entries = pcibr_init_ext_ate_ram(bridge);
1455
1456 /* we always have 128 ATEs (512 for Xbridge) inside the chip
1457 * even if disabled for debugging.
1458 */
1459 pcibr_soft->bs_int_ate_resource.start = 0;
1460 pcibr_soft->bs_int_ate_resource.end = pcibr_soft->bs_int_ate_size - 1;
1461
1462 if (num_entries > pcibr_soft->bs_int_ate_size) {
1463 #if PCIBR_ATE_NOTBOTH /* for debug -- forces us to use external ates */
1464 printk("pcibr_attach: disabling internal ATEs.\n");
1465 pcibr_ate_alloc(pcibr_soft, pcibr_soft->bs_int_ate_size);
1466 #endif
1467 pcibr_soft->bs_ext_ate_resource.start = pcibr_soft->bs_int_ate_size;
1468 pcibr_soft->bs_ext_ate_resource.end = num_entries;
1469 }
1470
1471 pcibr_soft->bs_allocated_ate_res = (void *) kmalloc(pcibr_soft->bs_int_ate_size * sizeof(unsigned long), GFP_KERNEL);
1472 memset(pcibr_soft->bs_allocated_ate_res, 0x0, pcibr_soft->bs_int_ate_size * sizeof(unsigned long));
1473
1474 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_ATE, pcibr_vhdl,
1475 "pcibr_attach2: %d ATEs, %d internal & %d external\n",
1476 num_entries ? num_entries : pcibr_soft->bs_int_ate_size,
1477 pcibr_soft->bs_int_ate_size,
1478 num_entries ? num_entries-pcibr_soft->bs_int_ate_size : 0));
1479 }
1480
1481 {
1482 bridgereg_t dirmap;
1483 iopaddr_t xbase;
1484
1485 /*
1486 * now figure the *real* xtalk base address
1487 * that dirmap sends us to.
1488 */
1489 dirmap = bridge->b_dir_map;
1490 if (dirmap & BRIDGE_DIRMAP_OFF)
1491 xbase = (iopaddr_t)(dirmap & BRIDGE_DIRMAP_OFF)
1492 << BRIDGE_DIRMAP_OFF_ADDRSHFT;
1493 else if (dirmap & BRIDGE_DIRMAP_ADD512)
1494 xbase = 512 << 20;
1495 else
1496 xbase = 0;
1497
1498 pcibr_soft->bs_dir_xbase = xbase;
1499
1500 /* it is entirely possible that we may, at this
1501 * point, have our dirmap pointing somewhere
1502 * other than our "master" port.
1503 */
1504 pcibr_soft->bs_dir_xport =
1505 (dirmap & BRIDGE_DIRMAP_W_ID) >> BRIDGE_DIRMAP_W_ID_SHFT;
1506 }
1507
1508 /* pcibr sources an error interrupt;
1509 * figure out where to send it.
1510 *
1511 * If any interrupts are enabled in bridge,
1512 * then the prom set us up and our interrupt
1513 * has already been reconnected in mlreset
1514 * above.
1515 *
1516 * Need to set the D_INTR_ISERR flag
1517 * in the dev_desc used for allocating the
1518 * error interrupt, so our interrupt will
1519 * be properly routed and prioritized.
1520 *
1521 * If our crosstalk provider wants to
1522 * fix widget error interrupts to specific
1523 * destinations, D_INTR_ISERR is how it
1524 * knows to do this.
1525 */
1526
1527 xtalk_intr = xtalk_intr_alloc(xconn_vhdl, (device_desc_t)0, pcibr_vhdl);
1528 {
1529 int irq = ((hub_intr_t)xtalk_intr)->i_bit;
1530 int cpu = ((hub_intr_t)xtalk_intr)->i_cpuid;
1531
1532 intr_unreserve_level(cpu, irq);
1533 ((hub_intr_t)xtalk_intr)->i_bit = SGI_PCIBR_ERROR;
1534 }
1535 ASSERT(xtalk_intr != NULL);
1536
1537 pcibr_soft->bsi_err_intr = xtalk_intr;
1538
1539 /*
1540 * On IP35 with XBridge, we do some extra checks in pcibr_setwidint
1541 * in order to work around some addressing limitations. In order
1542 * for that fire wall to work properly, we need to make sure we
1543 * start from a known clean state.
1544 */
1545 pcibr_clearwidint(bridge);
1546
1547 xtalk_intr_connect(xtalk_intr, (intr_func_t) pcibr_error_intr_handler,
1548 (intr_arg_t) pcibr_soft, (xtalk_intr_setfunc_t)pcibr_setwidint, (void *)bridge);
1549
1550 request_irq(SGI_PCIBR_ERROR, (void *)pcibr_error_intr_handler, SA_SHIRQ, "PCIBR error",
1551 (intr_arg_t) pcibr_soft);
1552
1553 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_INTR_ALLOC, pcibr_vhdl,
1554 "pcibr_setwidint: b_wid_int_upper=0x%x, b_wid_int_lower=0x%x\n",
1555 bridge->b_wid_int_upper, bridge->b_wid_int_lower));
1556
1557 /*
1558 * now we can start handling error interrupts;
1559 * enable all of them.
1560 * NOTE: some PCI ints may already be enabled.
1561 */
1562 /* We read the INT_ENABLE register as a 64bit picreg_t for PIC and a
1563 * 32bit bridgereg_t for BRIDGE, but always process the result as a
1564 * 64bit value so the code can be "common" for both PIC and BRIDGE...
1565 */
1566 if (IS_PIC_SOFT(pcibr_soft)) {
1567 int_enable_64 = bridge->p_int_enable_64 | BRIDGE_ISR_ERRORS;
1568 int_enable = (uint64_t)int_enable_64;
1569 #ifdef PFG_TEST
1570 int_enable = (uint64_t)0x7ffffeff7ffffeff;
1571 #endif
1572 }
1573
1574
1575 #if BRIDGE_ERROR_INTR_WAR
1576 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1577 /*
1578 * We commonly get master timeouts when talking to ql.
1579 * We also see RESP_XTALK_ERROR and LLP_TX_RETRY interrupts.
1580 * Insure that these are all disabled for now.
1581 */
1582 int_enable &= ~(BRIDGE_IMR_PCI_MST_TIMEOUT |
1583 BRIDGE_ISR_RESP_XTLK_ERR |
1584 BRIDGE_ISR_LLP_TX_RETRY);
1585 }
1586 if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_C) {
1587 int_enable &= ~BRIDGE_ISR_BAD_XRESP_PKT;
1588 }
1589 #endif /* BRIDGE_ERROR_INTR_WAR */
1590
1591 #ifdef QL_SCSI_CTRL_WAR /* for IP30 only */
1592 /* Really a QL rev A issue, but all newer hearts have newer QLs.
1593 * Forces all IO6/MSCSI to be new.
1594 */
1595 if (heart_rev() == HEART_REV_A)
1596 int_enable &= ~BRIDGE_IMR_PCI_MST_TIMEOUT;
1597 #endif
1598
1599 #ifdef BRIDGE1_TIMEOUT_WAR
1600 if (pcibr_soft->bs_rev_num == BRIDGE_PART_REV_A) {
1601 /*
1602 * Turn off these interrupts. They can't be trusted in bridge 1
1603 */
1604 int_enable &= ~(BRIDGE_IMR_XREAD_REQ_TIMEOUT |
1605 BRIDGE_IMR_UNEXP_RESP);
1606 }
1607 #endif
1608
1609 /* PIC BRINGUP WAR (PV# 856864 & 856865): allow the tnums that are
1610 * locked out to be freed up sooner (by timing out) so that the
1611 * read tnums are never completely used up.
1612 */
1613 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856864, pcibr_soft)) {
1614 int_enable &= ~PIC_ISR_PCIX_REQ_TOUT;
1615 int_enable &= ~BRIDGE_ISR_XREAD_REQ_TIMEOUT;
1616
1617 bridge->b_wid_req_timeout = 0x750;
1618 }
1619
1620 /*
1621 * PIC BRINGUP WAR (PV# 856866, 859504, 861476, 861478): Don't use
1622 * RRB0, RRB8, RRB1, and RRB9. Assign them to DEVICE[2|3]--VCHAN3
1623 * so they are not used
1624 */
1625 if (IS_PIC_SOFT(pcibr_soft) && PCIBR_WAR_ENABLED(PV856866, pcibr_soft)) {
1626 bridge->b_even_resp |= 0x000f000f;
1627 bridge->b_odd_resp |= 0x000f000f;
1628 }
1629
1630 if (IS_PIC_SOFT(pcibr_soft)) {
1631 bridge->p_int_enable_64 = (picreg_t)int_enable;
1632 }
1633 bridge->b_int_mode = 0; /* do not send "clear interrupt" packets */
1634
1635 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
1636
1637 /*
1638 * Depending on the rev of bridge, disable certain features.
1639 * Easiest way seems to be to force the PCIBR_NOwhatever
1640 * flag to be on for all DMA calls, which overrides any
1641 * PCIBR_whatever flag or even the setting of whatever
1642 * from the PCIIO_DMA_class flags (or even from the other
1643 * PCIBR flags, since NO overrides YES).
1644 */
1645 pcibr_soft->bs_dma_flags = 0;
1646
1647 /* PREFETCH:
1648 * Always completely disabled for REV.A;
1649 * at "pcibr_prefetch_enable_rev", anyone
1650 * asking for PCIIO_PREFETCH gets it.
1651 * Between these two points, you have to ask
1652 * for PCIBR_PREFETCH, which promises that
1653 * your driver knows about known Bridge WARs.
1654 */
1655 if (pcibr_soft->bs_rev_num < BRIDGE_PART_REV_B)
1656 pcibr_soft->bs_dma_flags |= PCIBR_NOPREFETCH;
1657 else if (pcibr_soft->bs_rev_num <
1658 (BRIDGE_WIDGET_PART_NUM << 4))
1659 pcibr_soft->bs_dma_flags |= PCIIO_NOPREFETCH;
1660
1661 /* WRITE_GATHER: Disabled */
1662 if (pcibr_soft->bs_rev_num <
1663 (BRIDGE_WIDGET_PART_NUM << 4))
1664 pcibr_soft->bs_dma_flags |= PCIBR_NOWRITE_GATHER;
1665
1666 /* PIC only supports 64-bit direct mapping in PCI-X mode. Since
1667 * all PCI-X devices that initiate memory transactions must be
1668 * capable of generating 64-bit addressed, we force 64-bit DMAs.
1669 */
1670 if (IS_PCIX(pcibr_soft)) {
1671 pcibr_soft->bs_dma_flags |= PCIIO_DMA_A64;
1672 }
1673
1674 {
1675
1676 iopaddr_t prom_base_addr = pcibr_soft->bs_xid << 24;
1677 int prom_base_size = 0x1000000;
1678 int status;
1679 struct resource *res;
1680
1681 /* Allocate resource maps based on bus page size; for I/O and memory
1682 * space, free all pages except those in the base area and in the
1683 * range set by the PROM.
1684 *
1685 * PROM creates BAR addresses in this format: 0x0ws00000 where w is
1686 * the widget number and s is the device register offset for the slot.
1687 */
1688
1689 /* Setup the Bus's PCI IO Root Resource. */
1690 pcibr_soft->bs_io_win_root_resource.start = PCIBR_BUS_IO_BASE;
1691 pcibr_soft->bs_io_win_root_resource.end = 0xffffffff;
1692 res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
1693 if (!res)
1694 panic("PCIBR:Unable to allocate resource structure\n");
1695
1696 /* Block off the range used by PROM. */
1697 res->start = prom_base_addr;
1698 res->end = prom_base_addr + (prom_base_size - 1);
1699 status = request_resource(&pcibr_soft->bs_io_win_root_resource, res);
1700 if (status)
1701 panic("PCIBR:Unable to request_resource()\n");
1702
1703 /* Setup the Small Window Root Resource */
1704 pcibr_soft->bs_swin_root_resource.start = PAGE_SIZE;
1705 pcibr_soft->bs_swin_root_resource.end = 0x000FFFFF;
1706
1707 /* Setup the Bus's PCI Memory Root Resource */
1708 pcibr_soft->bs_mem_win_root_resource.start = 0x200000;
1709 pcibr_soft->bs_mem_win_root_resource.end = 0xffffffff;
1710 res = (struct resource *) kmalloc( sizeof(struct resource), GFP_KERNEL);
1711 if (!res)
1712 panic("PCIBR:Unable to allocate resource structure\n");
1713
1714 /* Block off the range used by PROM. */
1715 res->start = prom_base_addr;
1716 res->end = prom_base_addr + (prom_base_size - 1);;
1717 status = request_resource(&pcibr_soft->bs_mem_win_root_resource, res);
1718 if (status)
1719 panic("PCIBR:Unable to request_resource()\n");
1720
1721 }
1722
1723 /* build "no-slot" connection point
1724 */
1725 pcibr_info = pcibr_device_info_new
1726 (pcibr_soft, PCIIO_SLOT_NONE, PCIIO_FUNC_NONE,
1727 PCIIO_VENDOR_ID_NONE, PCIIO_DEVICE_ID_NONE);
1728 noslot_conn = pciio_device_info_register
1729 (pcibr_vhdl, &pcibr_info->f_c);
1730
1731 /* Remember the no slot connection point info for tearing it
1732 * down during detach.
1733 */
1734 pcibr_soft->bs_noslot_conn = noslot_conn;
1735 pcibr_soft->bs_noslot_info = pcibr_info;
1736 #if PCI_FBBE
1737 fast_back_to_back_enable = 1;
1738 #endif
1739
1740 #if PCI_FBBE
1741 if (fast_back_to_back_enable) {
1742 /*
1743 * All devices on the bus are capable of fast back to back, so
1744 * we need to set the fast back to back bit in all devices on
1745 * the bus that are capable of doing such accesses.
1746 */
1747 }
1748 #endif
1749
1750 for (slot = pcibr_soft->bs_min_slot;
1751 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1752 /* Find out what is out there */
1753 (void)pcibr_slot_info_init(pcibr_vhdl,slot);
1754 }
1755 for (slot = pcibr_soft->bs_min_slot;
1756 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1757 /* Set up the address space for this slot in the PCI land */
1758 (void)pcibr_slot_addr_space_init(pcibr_vhdl, slot);
1759
1760 for (slot = pcibr_soft->bs_min_slot;
1761 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1762 /* Setup the device register */
1763 (void)pcibr_slot_device_init(pcibr_vhdl, slot);
1764
1765 if (IS_PCIX(pcibr_soft)) {
1766 pcibr_soft->bs_pcix_rbar_inuse = 0;
1767 pcibr_soft->bs_pcix_rbar_avail = NUM_RBAR;
1768 pcibr_soft->bs_pcix_rbar_percent_allowed =
1769 pcibr_pcix_rbars_calc(pcibr_soft);
1770
1771 for (slot = pcibr_soft->bs_min_slot;
1772 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1773 /* Setup the PCI-X Read Buffer Attribute Registers (RBARs) */
1774 (void)pcibr_slot_pcix_rbar_init(pcibr_soft, slot);
1775 }
1776
1777 /* Set up convenience links */
1778 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft))
1779 pcibr_bus_cnvlink(pcibr_soft->bs_vhdl);
1780
1781 for (slot = pcibr_soft->bs_min_slot;
1782 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1783 /* Setup host/guest relations */
1784 (void)pcibr_slot_guest_info_init(pcibr_vhdl, slot);
1785
1786 /* Handle initial RRB management for Bridge and Xbridge */
1787 pcibr_initial_rrb(pcibr_vhdl,
1788 pcibr_soft->bs_first_slot, pcibr_soft->bs_last_slot);
1789
1790 { /* Before any drivers get called that may want to re-allocate
1791 * RRB's, let's get some special cases pre-allocated. Drivers
1792 * may override these pre-allocations, but by doing pre-allocations
1793 * now we're assured not to step all over what the driver intended.
1794 *
1795 * Note: Someday this should probably be moved over to pcibr_rrb.c
1796 */
1797 /*
1798 * Each Pbrick PCI bus only has slots 1 and 2. Similarly for
1799 * widget 0xe on Ibricks. Allocate RRB's accordingly.
1800 */
1801 if (pcibr_soft->bs_bricktype > 0) {
1802 switch (pcibr_soft->bs_bricktype) {
1803 case MODULE_PBRICK:
1804 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
1805 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
1806 break;
1807 case MODULE_IBRICK:
1808 /* port 0xe on the Ibrick only has slots 1 and 2 */
1809 if (pcibr_soft->bs_xid == 0xe) {
1810 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 8);
1811 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 8);
1812 }
1813 else {
1814 /* allocate one RRB for the serial port */
1815 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 1);
1816 }
1817 break;
1818 case MODULE_PXBRICK:
1819 case MODULE_IXBRICK:
1820 case MODULE_OPUSBRICK:
1821 /*
1822 * If the IO9 is in the PXBrick (bus1, slot1) allocate
1823 * RRBs to all the devices
1824 */
1825 if ((pcibr_widget_to_bus(pcibr_vhdl) == 1) &&
1826 (pcibr_soft->bs_slot[0].bss_vendor_id == 0x10A9) &&
1827 (pcibr_soft->bs_slot[0].bss_device_id == 0x100A)) {
1828 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
1829 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
1830 do_pcibr_rrb_autoalloc(pcibr_soft, 2, VCHAN0, 4);
1831 do_pcibr_rrb_autoalloc(pcibr_soft, 3, VCHAN0, 4);
1832 } else {
1833 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 4);
1834 do_pcibr_rrb_autoalloc(pcibr_soft, 1, VCHAN0, 4);
1835 }
1836 break;
1837
1838 case MODULE_CGBRICK:
1839 do_pcibr_rrb_autoalloc(pcibr_soft, 0, VCHAN0, 8);
1840 break;
1841 } /* switch */
1842 }
1843 } /* OK Special RRB allocations are done. */
1844
1845 for (slot = pcibr_soft->bs_min_slot;
1846 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot)
1847 /* Call the device attach */
1848 (void)pcibr_slot_call_device_attach(pcibr_vhdl, slot, 0);
1849
1850 pciio_device_attach(noslot_conn, (int)0);
1851
1852 return 0;
1853 }
1854
1855 /*
1856 * pcibr_detach:
1857 * Detach the bridge device from the hwgraph after cleaning out all the
1858 * underlying vertices.
1859 */
1860
1861 int
pcibr_detach(vertex_hdl_t xconn)1862 pcibr_detach(vertex_hdl_t xconn)
1863 {
1864 pciio_slot_t slot;
1865 vertex_hdl_t pcibr_vhdl;
1866 pcibr_soft_t pcibr_soft;
1867 bridge_t *bridge;
1868 unsigned s;
1869
1870 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, xconn, "pcibr_detach\n"));
1871
1872 /* Get the bridge vertex from its xtalk connection point */
1873 if (hwgraph_traverse(xconn, EDGE_LBL_PCI, &pcibr_vhdl) != GRAPH_SUCCESS)
1874 return(1);
1875
1876 pcibr_soft = pcibr_soft_get(pcibr_vhdl);
1877 bridge = pcibr_soft->bs_base;
1878
1879
1880 s = pcibr_lock(pcibr_soft);
1881 /* Disable the interrupts from the bridge */
1882 if (IS_PIC_SOFT(pcibr_soft)) {
1883 bridge->p_int_enable_64 = 0;
1884 }
1885 pcibr_unlock(pcibr_soft, s);
1886
1887 /* Detach all the PCI devices talking to this bridge */
1888 for (slot = pcibr_soft->bs_min_slot;
1889 slot < PCIBR_NUM_SLOTS(pcibr_soft); ++slot) {
1890 pcibr_slot_detach(pcibr_vhdl, slot, 0, (char *)NULL, (int *)NULL);
1891 }
1892
1893 /* Unregister the no-slot connection point */
1894 pciio_device_info_unregister(pcibr_vhdl,
1895 &(pcibr_soft->bs_noslot_info->f_c));
1896
1897 kfree(pcibr_soft->bs_name);
1898
1899 /* Disconnect the error interrupt and free the xtalk resources
1900 * associated with it.
1901 */
1902 xtalk_intr_disconnect(pcibr_soft->bsi_err_intr);
1903 xtalk_intr_free(pcibr_soft->bsi_err_intr);
1904
1905 /* Clear the software state maintained by the bridge driver for this
1906 * bridge.
1907 */
1908 DEL(pcibr_soft);
1909 /* Remove the Bridge revision labelled info */
1910 (void)hwgraph_info_remove_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, NULL);
1911 /* Remove the character device associated with this bridge */
1912 (void)hwgraph_edge_remove(pcibr_vhdl, EDGE_LBL_CONTROLLER, NULL);
1913 /* Remove the PCI bridge vertex */
1914 (void)hwgraph_edge_remove(xconn, EDGE_LBL_PCI, NULL);
1915
1916 return(0);
1917 }
1918
1919 int
pcibr_asic_rev(vertex_hdl_t pconn_vhdl)1920 pcibr_asic_rev(vertex_hdl_t pconn_vhdl)
1921 {
1922 vertex_hdl_t pcibr_vhdl;
1923 int tmp_vhdl;
1924 arbitrary_info_t ainfo;
1925
1926 if (GRAPH_SUCCESS !=
1927 hwgraph_traverse(pconn_vhdl, EDGE_LBL_MASTER, &pcibr_vhdl))
1928 return -1;
1929
1930 tmp_vhdl = hwgraph_info_get_LBL(pcibr_vhdl, INFO_LBL_PCIBR_ASIC_REV, &ainfo);
1931
1932 /*
1933 * Any hwgraph function that returns a vertex handle will implicity
1934 * increment that vertex's reference count. The caller must explicity
1935 * decrement the vertex's referece count after the last reference to
1936 * that vertex.
1937 *
1938 * Decrement reference count incremented by call to hwgraph_traverse().
1939 *
1940 */
1941 hwgraph_vertex_unref(pcibr_vhdl);
1942
1943 if (tmp_vhdl != GRAPH_SUCCESS)
1944 return -1;
1945 return (int) ainfo;
1946 }
1947
1948 /* =====================================================================
1949 * PIO MANAGEMENT
1950 */
1951
1952 static iopaddr_t
pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,pciio_slot_t slot,pciio_space_t space,iopaddr_t pci_addr,size_t req_size,unsigned flags)1953 pcibr_addr_pci_to_xio(vertex_hdl_t pconn_vhdl,
1954 pciio_slot_t slot,
1955 pciio_space_t space,
1956 iopaddr_t pci_addr,
1957 size_t req_size,
1958 unsigned flags)
1959 {
1960 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
1961 pciio_info_t pciio_info = &pcibr_info->f_c;
1962 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
1963 bridge_t *bridge = pcibr_soft->bs_base;
1964
1965 unsigned bar; /* which BASE reg on device is decoding */
1966 iopaddr_t xio_addr = XIO_NOWHERE;
1967 iopaddr_t base; /* base of devio(x) mapped area on PCI */
1968 iopaddr_t limit; /* base of devio(x) mapped area on PCI */
1969
1970 pciio_space_t wspace; /* which space device is decoding */
1971 iopaddr_t wbase; /* base of device decode on PCI */
1972 size_t wsize; /* size of device decode on PCI */
1973
1974 int try; /* DevIO(x) window scanning order control */
1975 int maxtry, halftry;
1976 int win; /* which DevIO(x) window is being used */
1977 pciio_space_t mspace; /* target space for devio(x) register */
1978 iopaddr_t mbase; /* base of devio(x) mapped area on PCI */
1979 size_t msize; /* size of devio(x) mapped area on PCI */
1980 size_t mmask; /* addr bits stored in Device(x) */
1981 char tmp_str[512];
1982
1983 unsigned long s;
1984
1985 s = pcibr_lock(pcibr_soft);
1986
1987 if (pcibr_soft->bs_slot[slot].has_host) {
1988 slot = pcibr_soft->bs_slot[slot].host_slot;
1989 pcibr_info = pcibr_soft->bs_slot[slot].bss_infos[0];
1990
1991 /*
1992 * Special case for dual-slot pci devices such as ioc3 on IP27
1993 * baseio. In these cases, pconn_vhdl should never be for a pci
1994 * function on a subordiate PCI bus, so we can safely reset pciio_info
1995 * to be the info struct embedded in pcibr_info. Failure to do this
1996 * results in using a bogus pciio_info_t for calculations done later
1997 * in this routine.
1998 */
1999
2000 pciio_info = &pcibr_info->f_c;
2001 }
2002 if (space == PCIIO_SPACE_NONE)
2003 goto done;
2004
2005 if (space == PCIIO_SPACE_CFG) {
2006 /*
2007 * Usually, the first mapping
2008 * established to a PCI device
2009 * is to its config space.
2010 *
2011 * In any case, we definitely
2012 * do NOT need to worry about
2013 * PCI BASE registers, and
2014 * MUST NOT attempt to point
2015 * the DevIO(x) window at
2016 * this access ...
2017 */
2018 if (((flags & PCIIO_BYTE_STREAM) == 0) &&
2019 ((pci_addr + req_size) <= BRIDGE_TYPE0_CFG_FUNC_OFF))
2020 xio_addr = pci_addr + PCIBR_TYPE0_CFG_DEV(pcibr_soft, slot);
2021
2022 goto done;
2023 }
2024 if (space == PCIIO_SPACE_ROM) {
2025 /* PIO to the Expansion Rom.
2026 * Driver is responsible for
2027 * enabling and disabling
2028 * decodes properly.
2029 */
2030 wbase = pciio_info->c_rbase;
2031 wsize = pciio_info->c_rsize;
2032
2033 /*
2034 * While the driver should know better
2035 * than to attempt to map more space
2036 * than the device is decoding, he might
2037 * do it; better to bail out here.
2038 */
2039 if ((pci_addr + req_size) > wsize)
2040 goto done;
2041
2042 pci_addr += wbase;
2043 space = PCIIO_SPACE_MEM;
2044 }
2045 /*
2046 * reduce window mappings to raw
2047 * space mappings (maybe allocating
2048 * windows), and try for DevIO(x)
2049 * usage (setting it if it is available).
2050 */
2051 bar = space - PCIIO_SPACE_WIN0;
2052 if (bar < 6) {
2053 wspace = pciio_info->c_window[bar].w_space;
2054 if (wspace == PCIIO_SPACE_NONE)
2055 goto done;
2056
2057 /* get PCI base and size */
2058 wbase = pciio_info->c_window[bar].w_base;
2059 wsize = pciio_info->c_window[bar].w_size;
2060
2061 /*
2062 * While the driver should know better
2063 * than to attempt to map more space
2064 * than the device is decoding, he might
2065 * do it; better to bail out here.
2066 */
2067 if ((pci_addr + req_size) > wsize)
2068 goto done;
2069
2070 /* shift from window relative to
2071 * decoded space relative.
2072 */
2073 pci_addr += wbase;
2074 space = wspace;
2075 } else
2076 bar = -1;
2077
2078 /* Scan all the DevIO(x) windows twice looking for one
2079 * that can satisfy our request. The first time through,
2080 * only look at assigned windows; the second time, also
2081 * look at PCIIO_SPACE_NONE windows. Arrange the order
2082 * so we always look at our own window first.
2083 *
2084 * We will not attempt to satisfy a single request
2085 * by concatinating multiple windows.
2086 */
2087 maxtry = PCIBR_NUM_SLOTS(pcibr_soft) * 2;
2088 halftry = PCIBR_NUM_SLOTS(pcibr_soft) - 1;
2089 for (try = 0; try < maxtry; ++try) {
2090 bridgereg_t devreg;
2091 unsigned offset;
2092
2093 /* calculate win based on slot, attempt, and max possible
2094 devices on bus */
2095 win = (try + slot) % PCIBR_NUM_SLOTS(pcibr_soft);
2096
2097 /* If this DevIO(x) mapping area can provide
2098 * a mapping to this address, use it.
2099 */
2100 msize = (win < 2) ? 0x200000 : 0x100000;
2101 mmask = -msize;
2102 if (space != PCIIO_SPACE_IO)
2103 mmask &= 0x3FFFFFFF;
2104
2105 offset = pci_addr & (msize - 1);
2106
2107 /* If this window can't possibly handle that request,
2108 * go on to the next window.
2109 */
2110 if (((pci_addr & (msize - 1)) + req_size) > msize)
2111 continue;
2112
2113 devreg = pcibr_soft->bs_slot[win].bss_device;
2114
2115 /* Is this window "nailed down"?
2116 * If not, maybe we can use it.
2117 * (only check this the second time through)
2118 */
2119 mspace = pcibr_soft->bs_slot[win].bss_devio.bssd_space;
2120 if ((try > halftry) && (mspace == PCIIO_SPACE_NONE)) {
2121
2122 /* If this is the primary DevIO(x) window
2123 * for some other device, skip it.
2124 */
2125 if ((win != slot) &&
2126 (PCIIO_VENDOR_ID_NONE !=
2127 pcibr_soft->bs_slot[win].bss_vendor_id))
2128 continue;
2129
2130 /* It's a free window, and we fit in it.
2131 * Set up Device(win) to our taste.
2132 */
2133 mbase = pci_addr & mmask;
2134
2135 /* check that we would really get from
2136 * here to there.
2137 */
2138 if ((mbase | offset) != pci_addr)
2139 continue;
2140
2141 devreg &= ~BRIDGE_DEV_OFF_MASK;
2142 if (space != PCIIO_SPACE_IO)
2143 devreg |= BRIDGE_DEV_DEV_IO_MEM;
2144 else
2145 devreg &= ~BRIDGE_DEV_DEV_IO_MEM;
2146 devreg |= (mbase >> 20) & BRIDGE_DEV_OFF_MASK;
2147
2148 /* default is WORD_VALUES.
2149 * if you specify both,
2150 * operation is undefined.
2151 */
2152 if (flags & PCIIO_BYTE_STREAM)
2153 devreg |= BRIDGE_DEV_DEV_SWAP;
2154 else
2155 devreg &= ~BRIDGE_DEV_DEV_SWAP;
2156
2157 if (pcibr_soft->bs_slot[win].bss_device != devreg) {
2158 if ( IS_PIC_SOFT(pcibr_soft) ) {
2159 bridge->b_device[win].reg = devreg;
2160 pcibr_soft->bs_slot[win].bss_device = devreg;
2161 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
2162 }
2163
2164 #ifdef PCI_LATER
2165 PCIBR_DEBUG((PCIBR_DEBUG_DEVREG, pconn_vhdl,
2166 "pcibr_addr_pci_to_xio: Device(%d): %x\n",
2167 win, devreg, device_bits));
2168 #endif
2169 }
2170 pcibr_soft->bs_slot[win].bss_devio.bssd_space = space;
2171 pcibr_soft->bs_slot[win].bss_devio.bssd_base = mbase;
2172 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2173
2174 /* Increment this DevIO's use count */
2175 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2176
2177 /* Save the DevIO register index used to access this BAR */
2178 if (bar != -1)
2179 pcibr_info->f_window[bar].w_devio_index = win;
2180
2181 /*
2182 * The kernel only allows functions to have so many variable args,
2183 * attempting to call PCIBR_DEBUG_ALWAYS() with more than 5 printk
2184 * arguments fails so sprintf() it into a temporary string.
2185 */
2186 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2187 #ifdef PIC_LATER
2188 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
2189 "slot %d allocates DevIO(%d) Device(%d) set to %x\n",
2190 space, space_desc, pci_addr, pci_addr + req_size - 1,
2191 slot, win, win, devreg, device_bits);
2192 #else
2193 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to [%lx..%lx] for "
2194 "slot %d allocates DevIO(%d) Device(%d) set to %lx\n",
2195 (unsigned long)pci_addr, (unsigned long)(pci_addr + req_size - 1),
2196 (unsigned int)slot, win, win, (unsigned long)devreg);
2197 #endif
2198 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2199 }
2200 goto done;
2201 } /* endif DevIO(x) not pointed */
2202 mbase = pcibr_soft->bs_slot[win].bss_devio.bssd_base;
2203
2204 /* Now check for request incompat with DevIO(x)
2205 */
2206 if ((mspace != space) ||
2207 (pci_addr < mbase) ||
2208 ((pci_addr + req_size) > (mbase + msize)) ||
2209 ((flags & PCIIO_BYTE_STREAM) && !(devreg & BRIDGE_DEV_DEV_SWAP)) ||
2210 (!(flags & PCIIO_BYTE_STREAM) && (devreg & BRIDGE_DEV_DEV_SWAP)))
2211 continue;
2212
2213 /* DevIO(x) window is pointed at PCI space
2214 * that includes our target. Calculate the
2215 * final XIO address, release the lock and
2216 * return.
2217 */
2218 xio_addr = PCIBR_BRIDGE_DEVIO(pcibr_soft, win) + (pci_addr - mbase);
2219
2220 /* Increment this DevIO's use count */
2221 pcibr_soft->bs_slot[win].bss_devio.bssd_ref_cnt++;
2222
2223 /* Save the DevIO register index used to access this BAR */
2224 if (bar != -1)
2225 pcibr_info->f_window[bar].w_devio_index = win;
2226
2227 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2228 #ifdef PIC_LATER
2229 sprintf(tmp_str, "pcibr_addr_pci_to_xio: map to %x[%x..%x] for "
2230 "slot %d uses DevIO(%d)\n", space, space_desc, pci_addr,
2231 pci_addr + req_size - 1, slot, win);
2232 #endif
2233 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2234 }
2235 goto done;
2236 }
2237
2238 switch (space) {
2239 /*
2240 * Accesses to device decode
2241 * areas that do a not fit
2242 * within the DevIO(x) space are
2243 * modified to be accesses via
2244 * the direct mapping areas.
2245 *
2246 * If necessary, drivers can
2247 * explicitly ask for mappings
2248 * into these address spaces,
2249 * but this should never be needed.
2250 */
2251 case PCIIO_SPACE_MEM: /* "mem space" */
2252 case PCIIO_SPACE_MEM32: /* "mem, use 32-bit-wide bus" */
2253 if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
2254 base = PICBRIDGE0_PCI_MEM32_BASE;
2255 limit = PICBRIDGE0_PCI_MEM32_LIMIT;
2256 } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
2257 base = PICBRIDGE1_PCI_MEM32_BASE;
2258 limit = PICBRIDGE1_PCI_MEM32_LIMIT;
2259 } else { /* Bridge/Xbridge */
2260 base = BRIDGE_PCI_MEM32_BASE;
2261 limit = BRIDGE_PCI_MEM32_LIMIT;
2262 }
2263
2264 if ((pci_addr + base + req_size - 1) <= limit)
2265 xio_addr = pci_addr + base;
2266 break;
2267
2268 case PCIIO_SPACE_MEM64: /* "mem, use 64-bit-wide bus" */
2269 if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 0)) { /* PIC bus 0 */
2270 base = PICBRIDGE0_PCI_MEM64_BASE;
2271 limit = PICBRIDGE0_PCI_MEM64_LIMIT;
2272 } else if (IS_PIC_BUSNUM_SOFT(pcibr_soft, 1)) { /* PIC bus 1 */
2273 base = PICBRIDGE1_PCI_MEM64_BASE;
2274 limit = PICBRIDGE1_PCI_MEM64_LIMIT;
2275 } else { /* Bridge/Xbridge */
2276 base = BRIDGE_PCI_MEM64_BASE;
2277 limit = BRIDGE_PCI_MEM64_LIMIT;
2278 }
2279
2280 if ((pci_addr + base + req_size - 1) <= limit)
2281 xio_addr = pci_addr + base;
2282 break;
2283
2284 case PCIIO_SPACE_IO: /* "i/o space" */
2285 /*
2286 * PIC bridges do not support big-window aliases into PCI I/O space
2287 */
2288 if (IS_PIC_SOFT(pcibr_soft)) {
2289 xio_addr = XIO_NOWHERE;
2290 break;
2291 }
2292
2293 /* Bridge Hardware Bug WAR #482741:
2294 * The 4G area that maps directly from
2295 * XIO space to PCI I/O space is busted
2296 * until Bridge Rev D.
2297 */
2298 if ((pcibr_soft->bs_rev_num > BRIDGE_PART_REV_C) &&
2299 ((pci_addr + BRIDGE_PCI_IO_BASE + req_size - 1) <=
2300 BRIDGE_PCI_IO_LIMIT))
2301 xio_addr = pci_addr + BRIDGE_PCI_IO_BASE;
2302 break;
2303 }
2304
2305 /* Check that "Direct PIO" byteswapping matches,
2306 * try to change it if it does not.
2307 */
2308 if (xio_addr != XIO_NOWHERE) {
2309 unsigned bst; /* nonzero to set bytestream */
2310 unsigned *bfp; /* addr of record of how swapper is set */
2311 unsigned swb; /* which control bit to mung */
2312 unsigned bfo; /* current swapper setting */
2313 unsigned bfn; /* desired swapper setting */
2314
2315 bfp = ((space == PCIIO_SPACE_IO)
2316 ? (&pcibr_soft->bs_pio_end_io)
2317 : (&pcibr_soft->bs_pio_end_mem));
2318
2319 bfo = *bfp;
2320
2321 bst = flags & PCIIO_BYTE_STREAM;
2322
2323 bfn = bst ? PCIIO_BYTE_STREAM : PCIIO_WORD_VALUES;
2324
2325 if (bfn == bfo) { /* we already match. */
2326 ;
2327 } else if (bfo != 0) { /* we have a conflict. */
2328 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2329 #ifdef PIC_LATER
2330 sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap conflict in %x, "
2331 "was%s%s, want%s%s\n", space, space_desc,
2332 bfo & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2333 bfo & PCIIO_WORD_VALUES ? " WORD_VALUES" : "",
2334 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2335 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
2336 #endif
2337 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2338 }
2339 xio_addr = XIO_NOWHERE;
2340 } else { /* OK to make the change. */
2341 swb = (space == PCIIO_SPACE_IO) ? BRIDGE_CTRL_IO_SWAP : BRIDGE_CTRL_MEM_SWAP;
2342 if ( IS_PIC_SOFT(pcibr_soft) ) {
2343 picreg_t octl, nctl;
2344 octl = bridge->p_wid_control_64;
2345 nctl = bst ? octl | (uint64_t)swb : octl & ((uint64_t)~swb);
2346
2347 if (octl != nctl) /* make the change if any */
2348 bridge->b_wid_control = nctl;
2349 }
2350 *bfp = bfn; /* record the assignment */
2351
2352 if (pcibr_debug_mask & PCIBR_DEBUG_PIOMAP) {
2353 #ifdef PIC_LATER
2354 sprintf(tmp_str, "pcibr_addr_pci_to_xio: swap for %x set "
2355 "to%s%s\n", space, space_desc,
2356 bfn & PCIIO_BYTE_STREAM ? " BYTE_STREAM" : "",
2357 bfn & PCIIO_WORD_VALUES ? " WORD_VALUES" : "");
2358 #endif
2359 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl, "%s", tmp_str));
2360 }
2361 }
2362 }
2363 done:
2364 pcibr_unlock(pcibr_soft, s);
2365 return xio_addr;
2366 }
2367
2368 /*ARGSUSED6 */
2369 pcibr_piomap_t
pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,device_desc_t dev_desc,pciio_space_t space,iopaddr_t pci_addr,size_t req_size,size_t req_size_max,unsigned flags)2370 pcibr_piomap_alloc(vertex_hdl_t pconn_vhdl,
2371 device_desc_t dev_desc,
2372 pciio_space_t space,
2373 iopaddr_t pci_addr,
2374 size_t req_size,
2375 size_t req_size_max,
2376 unsigned flags)
2377 {
2378 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2379 pciio_info_t pciio_info = &pcibr_info->f_c;
2380 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2381 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2382 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2383
2384 pcibr_piomap_t *mapptr;
2385 pcibr_piomap_t maplist;
2386 pcibr_piomap_t pcibr_piomap;
2387 iopaddr_t xio_addr;
2388 xtalk_piomap_t xtalk_piomap;
2389 unsigned long s;
2390
2391 /* Make sure that the req sizes are non-zero */
2392 if ((req_size < 1) || (req_size_max < 1)) {
2393 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2394 "pcibr_piomap_alloc: req_size | req_size_max < 1\n"));
2395 return NULL;
2396 }
2397
2398 /*
2399 * Code to translate slot/space/addr
2400 * into xio_addr is common between
2401 * this routine and pcibr_piotrans_addr.
2402 */
2403 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2404
2405 if (xio_addr == XIO_NOWHERE) {
2406 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2407 "pcibr_piomap_alloc: xio_addr == XIO_NOWHERE\n"));
2408 return NULL;
2409 }
2410
2411 /* Check the piomap list to see if there is already an allocated
2412 * piomap entry but not in use. If so use that one. Otherwise
2413 * allocate a new piomap entry and add it to the piomap list
2414 */
2415 mapptr = &(pcibr_info->f_piomap);
2416
2417 s = pcibr_lock(pcibr_soft);
2418 for (pcibr_piomap = *mapptr;
2419 pcibr_piomap != NULL;
2420 pcibr_piomap = pcibr_piomap->bp_next) {
2421 if (pcibr_piomap->bp_mapsz == 0)
2422 break;
2423 }
2424
2425 if (pcibr_piomap)
2426 mapptr = NULL;
2427 else {
2428 pcibr_unlock(pcibr_soft, s);
2429 NEW(pcibr_piomap);
2430 }
2431
2432 pcibr_piomap->bp_dev = pconn_vhdl;
2433 pcibr_piomap->bp_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, pciio_slot);
2434 pcibr_piomap->bp_flags = flags;
2435 pcibr_piomap->bp_space = space;
2436 pcibr_piomap->bp_pciaddr = pci_addr;
2437 pcibr_piomap->bp_mapsz = req_size;
2438 pcibr_piomap->bp_soft = pcibr_soft;
2439 pcibr_piomap->bp_toc[0] = ATOMIC_INIT(0);
2440
2441 if (mapptr) {
2442 s = pcibr_lock(pcibr_soft);
2443 maplist = *mapptr;
2444 pcibr_piomap->bp_next = maplist;
2445 *mapptr = pcibr_piomap;
2446 }
2447 pcibr_unlock(pcibr_soft, s);
2448
2449
2450 if (pcibr_piomap) {
2451 xtalk_piomap =
2452 xtalk_piomap_alloc(xconn_vhdl, 0,
2453 xio_addr,
2454 req_size, req_size_max,
2455 flags & PIOMAP_FLAGS);
2456 if (xtalk_piomap) {
2457 pcibr_piomap->bp_xtalk_addr = xio_addr;
2458 pcibr_piomap->bp_xtalk_pio = xtalk_piomap;
2459 } else {
2460 pcibr_piomap->bp_mapsz = 0;
2461 pcibr_piomap = 0;
2462 }
2463 }
2464
2465 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2466 "pcibr_piomap_alloc: map=0x%x\n", pcibr_piomap));
2467
2468 return pcibr_piomap;
2469 }
2470
2471 /*ARGSUSED */
2472 void
pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)2473 pcibr_piomap_free(pcibr_piomap_t pcibr_piomap)
2474 {
2475 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2476 "pcibr_piomap_free: map=0x%x\n", pcibr_piomap));
2477
2478 xtalk_piomap_free(pcibr_piomap->bp_xtalk_pio);
2479 pcibr_piomap->bp_xtalk_pio = 0;
2480 pcibr_piomap->bp_mapsz = 0;
2481 }
2482
2483 /*ARGSUSED */
2484 caddr_t
pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,iopaddr_t pci_addr,size_t req_size)2485 pcibr_piomap_addr(pcibr_piomap_t pcibr_piomap,
2486 iopaddr_t pci_addr,
2487 size_t req_size)
2488 {
2489 caddr_t addr;
2490 addr = xtalk_piomap_addr(pcibr_piomap->bp_xtalk_pio,
2491 pcibr_piomap->bp_xtalk_addr +
2492 pci_addr - pcibr_piomap->bp_pciaddr,
2493 req_size);
2494 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2495 "pcibr_piomap_free: map=0x%x, addr=0x%x\n",
2496 pcibr_piomap, addr));
2497
2498 return(addr);
2499 }
2500
2501 /*ARGSUSED */
2502 void
pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)2503 pcibr_piomap_done(pcibr_piomap_t pcibr_piomap)
2504 {
2505 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pcibr_piomap->bp_dev,
2506 "pcibr_piomap_done: map=0x%x\n", pcibr_piomap));
2507 xtalk_piomap_done(pcibr_piomap->bp_xtalk_pio);
2508 }
2509
2510 /*ARGSUSED */
2511 caddr_t
pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,device_desc_t dev_desc,pciio_space_t space,iopaddr_t pci_addr,size_t req_size,unsigned flags)2512 pcibr_piotrans_addr(vertex_hdl_t pconn_vhdl,
2513 device_desc_t dev_desc,
2514 pciio_space_t space,
2515 iopaddr_t pci_addr,
2516 size_t req_size,
2517 unsigned flags)
2518 {
2519 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2520 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2521 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2522 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2523
2524 iopaddr_t xio_addr;
2525 caddr_t addr;
2526
2527 xio_addr = pcibr_addr_pci_to_xio(pconn_vhdl, pciio_slot, space, pci_addr, req_size, flags);
2528
2529 if (xio_addr == XIO_NOWHERE) {
2530 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2531 "pcibr_piotrans_addr: xio_addr == XIO_NOWHERE\n"));
2532 return NULL;
2533 }
2534
2535 addr = xtalk_piotrans_addr(xconn_vhdl, 0, xio_addr, req_size, flags & PIOMAP_FLAGS);
2536 PCIBR_DEBUG((PCIBR_DEBUG_PIODIR, pconn_vhdl,
2537 "pcibr_piotrans_addr: xio_addr=0x%x, addr=0x%x\n",
2538 xio_addr, addr));
2539 return(addr);
2540 }
2541
2542 /*
2543 * PIO Space allocation and management.
2544 * Allocate and Manage the PCI PIO space (mem and io space)
2545 * This routine is pretty simplistic at this time, and
2546 * does pretty trivial management of allocation and freeing.
2547 * The current scheme is prone for fragmentation.
2548 * Change the scheme to use bitmaps.
2549 */
2550
2551 /*ARGSUSED */
2552 iopaddr_t
pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,device_desc_t dev_desc,pciio_space_t space,size_t req_size,size_t alignment)2553 pcibr_piospace_alloc(vertex_hdl_t pconn_vhdl,
2554 device_desc_t dev_desc,
2555 pciio_space_t space,
2556 size_t req_size,
2557 size_t alignment)
2558 {
2559 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2560 pciio_info_t pciio_info = &pcibr_info->f_c;
2561 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2562
2563 pciio_piospace_t piosp;
2564 unsigned long s;
2565
2566 iopaddr_t start_addr;
2567 size_t align_mask;
2568
2569 /*
2570 * Check for proper alignment
2571 */
2572 ASSERT(alignment >= PAGE_SIZE);
2573 ASSERT((alignment & (alignment - 1)) == 0);
2574
2575 align_mask = alignment - 1;
2576 s = pcibr_lock(pcibr_soft);
2577
2578 /*
2579 * First look if a previously allocated chunk exists.
2580 */
2581 if ((piosp = pcibr_info->f_piospace)) {
2582 /*
2583 * Look through the list for a right sized free chunk.
2584 */
2585 do {
2586 if (piosp->free &&
2587 (piosp->space == space) &&
2588 (piosp->count >= req_size) &&
2589 !(piosp->start & align_mask)) {
2590 piosp->free = 0;
2591 pcibr_unlock(pcibr_soft, s);
2592 return piosp->start;
2593 }
2594 piosp = piosp->next;
2595 } while (piosp);
2596 }
2597 ASSERT(!piosp);
2598
2599 /*
2600 * Allocate PCI bus address, usually for the Universe chip driver;
2601 * do not pass window info since the actual PCI bus address
2602 * space will never be freed. The space may be reused after it
2603 * is logically released by pcibr_piospace_free().
2604 */
2605 switch (space) {
2606 case PCIIO_SPACE_IO:
2607 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2608 PCIIO_SPACE_IO,
2609 0, req_size, alignment);
2610 break;
2611
2612 case PCIIO_SPACE_MEM:
2613 case PCIIO_SPACE_MEM32:
2614 start_addr = pcibr_bus_addr_alloc(pcibr_soft, NULL,
2615 PCIIO_SPACE_MEM32,
2616 0, req_size, alignment);
2617 break;
2618
2619 default:
2620 ASSERT(0);
2621 pcibr_unlock(pcibr_soft, s);
2622 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2623 "pcibr_piospace_alloc: unknown space %d\n", space));
2624 return 0;
2625 }
2626
2627 /*
2628 * If too big a request, reject it.
2629 */
2630 if (!start_addr) {
2631 pcibr_unlock(pcibr_soft, s);
2632 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2633 "pcibr_piospace_alloc: request 0x%x to big\n", req_size));
2634 return 0;
2635 }
2636
2637 NEW(piosp);
2638 piosp->free = 0;
2639 piosp->space = space;
2640 piosp->start = start_addr;
2641 piosp->count = req_size;
2642 piosp->next = pcibr_info->f_piospace;
2643 pcibr_info->f_piospace = piosp;
2644
2645 pcibr_unlock(pcibr_soft, s);
2646
2647 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2648 "pcibr_piospace_alloc: piosp=0x%x\n", piosp));
2649
2650 return start_addr;
2651 }
2652
2653 /*ARGSUSED */
2654 void
pcibr_piospace_free(vertex_hdl_t pconn_vhdl,pciio_space_t space,iopaddr_t pciaddr,size_t req_size)2655 pcibr_piospace_free(vertex_hdl_t pconn_vhdl,
2656 pciio_space_t space,
2657 iopaddr_t pciaddr,
2658 size_t req_size)
2659 {
2660 pcibr_info_t pcibr_info = pcibr_info_get(pconn_vhdl);
2661 #ifdef PIC_LATER
2662 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pcibr_info->f_mfast;
2663 #endif
2664
2665 pciio_piospace_t piosp;
2666 unsigned long s;
2667 char name[1024];
2668
2669 /*
2670 * Look through the bridge data structures for the pciio_piospace_t
2671 * structure corresponding to 'pciaddr'
2672 */
2673 s = pcibr_lock(pcibr_soft);
2674 piosp = pcibr_info->f_piospace;
2675 while (piosp) {
2676 /*
2677 * Piospace free can only be for the complete
2678 * chunk and not parts of it..
2679 */
2680 if (piosp->start == pciaddr) {
2681 if (piosp->count == req_size)
2682 break;
2683 /*
2684 * Improper size passed for freeing..
2685 * Print a message and break;
2686 */
2687 hwgraph_vertex_name_get(pconn_vhdl, name, 1024);
2688 printk(KERN_WARNING "pcibr_piospace_free: error");
2689 printk(KERN_WARNING "Device %s freeing size (0x%lx) different than allocated (0x%lx)",
2690 name, req_size, piosp->count);
2691 printk(KERN_WARNING "Freeing 0x%lx instead", piosp->count);
2692 break;
2693 }
2694 piosp = piosp->next;
2695 }
2696
2697 if (!piosp) {
2698 printk(KERN_WARNING
2699 "pcibr_piospace_free: Address 0x%lx size 0x%lx - No match\n",
2700 pciaddr, req_size);
2701 pcibr_unlock(pcibr_soft, s);
2702 return;
2703 }
2704 piosp->free = 1;
2705 pcibr_unlock(pcibr_soft, s);
2706
2707 PCIBR_DEBUG((PCIBR_DEBUG_PIOMAP, pconn_vhdl,
2708 "pcibr_piospace_free: piosp=0x%x\n", piosp));
2709 return;
2710 }
2711
2712 /* =====================================================================
2713 * DMA MANAGEMENT
2714 *
2715 * The Bridge ASIC provides three methods of doing
2716 * DMA: via a "direct map" register available in
2717 * 32-bit PCI space (which selects a contiguous 2G
2718 * address space on some other widget), via
2719 * "direct" addressing via 64-bit PCI space (all
2720 * destination information comes from the PCI
2721 * address, including transfer attributes), and via
2722 * a "mapped" region that allows a bunch of
2723 * different small mappings to be established with
2724 * the PMU.
2725 *
2726 * For efficiency, we most prefer to use the 32-bit
2727 * direct mapping facility, since it requires no
2728 * resource allocations. The advantage of using the
2729 * PMU over the 64-bit direct is that single-cycle
2730 * PCI addressing can be used; the advantage of
2731 * using 64-bit direct over PMU addressing is that
2732 * we do not have to allocate entries in the PMU.
2733 */
2734
2735 /*
2736 * Convert PCI-generic software flags and Bridge-specific software flags
2737 * into Bridge-specific Direct Map attribute bits.
2738 */
2739 static iopaddr_t
pcibr_flags_to_d64(unsigned flags,pcibr_soft_t pcibr_soft)2740 pcibr_flags_to_d64(unsigned flags, pcibr_soft_t pcibr_soft)
2741 {
2742 iopaddr_t attributes = 0;
2743
2744 /* Sanity check: Bridge only allows use of VCHAN1 via 64-bit addrs */
2745 #ifdef LATER
2746 ASSERT_ALWAYS(!(flags & PCIBR_VCHAN1) || (flags & PCIIO_DMA_A64));
2747 #endif
2748
2749 /* Generic macro flags
2750 */
2751 if (flags & PCIIO_DMA_DATA) { /* standard data channel */
2752 attributes &= ~PCI64_ATTR_BAR; /* no barrier bit */
2753 attributes |= PCI64_ATTR_PREF; /* prefetch on */
2754 }
2755 if (flags & PCIIO_DMA_CMD) { /* standard command channel */
2756 attributes |= PCI64_ATTR_BAR; /* barrier bit on */
2757 attributes &= ~PCI64_ATTR_PREF; /* disable prefetch */
2758 }
2759 /* Generic detail flags
2760 */
2761 if (flags & PCIIO_PREFETCH)
2762 attributes |= PCI64_ATTR_PREF;
2763 if (flags & PCIIO_NOPREFETCH)
2764 attributes &= ~PCI64_ATTR_PREF;
2765
2766 /* the swap bit is in the address attributes for xbridge */
2767 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
2768 if (flags & PCIIO_BYTE_STREAM)
2769 attributes |= PCI64_ATTR_SWAP;
2770 if (flags & PCIIO_WORD_VALUES)
2771 attributes &= ~PCI64_ATTR_SWAP;
2772 }
2773
2774 /* Provider-specific flags
2775 */
2776 if (flags & PCIBR_BARRIER)
2777 attributes |= PCI64_ATTR_BAR;
2778 if (flags & PCIBR_NOBARRIER)
2779 attributes &= ~PCI64_ATTR_BAR;
2780
2781 if (flags & PCIBR_PREFETCH)
2782 attributes |= PCI64_ATTR_PREF;
2783 if (flags & PCIBR_NOPREFETCH)
2784 attributes &= ~PCI64_ATTR_PREF;
2785
2786 if (flags & PCIBR_PRECISE)
2787 attributes |= PCI64_ATTR_PREC;
2788 if (flags & PCIBR_NOPRECISE)
2789 attributes &= ~PCI64_ATTR_PREC;
2790
2791 if (flags & PCIBR_VCHAN1)
2792 attributes |= PCI64_ATTR_VIRTUAL;
2793 if (flags & PCIBR_VCHAN0)
2794 attributes &= ~PCI64_ATTR_VIRTUAL;
2795
2796 /* PIC in PCI-X mode only supports barrier & swap */
2797 if (IS_PCIX(pcibr_soft)) {
2798 attributes &= (PCI64_ATTR_BAR | PCI64_ATTR_SWAP);
2799 }
2800
2801 return (attributes);
2802 }
2803
2804 /*ARGSUSED */
2805 pcibr_dmamap_t
pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,device_desc_t dev_desc,size_t req_size_max,unsigned flags)2806 pcibr_dmamap_alloc(vertex_hdl_t pconn_vhdl,
2807 device_desc_t dev_desc,
2808 size_t req_size_max,
2809 unsigned flags)
2810 {
2811 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
2812 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
2813 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
2814 pciio_slot_t slot;
2815 xwidgetnum_t xio_port;
2816
2817 xtalk_dmamap_t xtalk_dmamap;
2818 pcibr_dmamap_t pcibr_dmamap;
2819 int ate_count;
2820 int ate_index;
2821 int vchan = VCHAN0;
2822
2823 /* merge in forced flags */
2824 flags |= pcibr_soft->bs_dma_flags;
2825
2826 /*
2827 * On SNIA64, these maps are pre-allocated because pcibr_dmamap_alloc()
2828 * can be called within an interrupt thread.
2829 */
2830 pcibr_dmamap = (pcibr_dmamap_t)get_free_pciio_dmamap(pcibr_soft->bs_vhdl);
2831
2832 if (!pcibr_dmamap)
2833 return 0;
2834
2835 xtalk_dmamap = xtalk_dmamap_alloc(xconn_vhdl, dev_desc, req_size_max,
2836 flags & DMAMAP_FLAGS);
2837 if (!xtalk_dmamap) {
2838 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
2839 "pcibr_dmamap_alloc: xtalk_dmamap_alloc failed\n"));
2840 free_pciio_dmamap(pcibr_dmamap);
2841 return 0;
2842 }
2843 xio_port = pcibr_soft->bs_mxid;
2844 slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
2845
2846 pcibr_dmamap->bd_dev = pconn_vhdl;
2847 pcibr_dmamap->bd_slot = PCIBR_DEVICE_TO_SLOT(pcibr_soft, slot);
2848 pcibr_dmamap->bd_soft = pcibr_soft;
2849 pcibr_dmamap->bd_xtalk = xtalk_dmamap;
2850 pcibr_dmamap->bd_max_size = req_size_max;
2851 pcibr_dmamap->bd_xio_port = xio_port;
2852
2853 if (flags & PCIIO_DMA_A64) {
2854 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D64_BITS)) {
2855 iopaddr_t pci_addr;
2856 int have_rrbs;
2857 int min_rrbs;
2858
2859 /* Device is capable of A64 operations,
2860 * and the attributes of the DMA are
2861 * consistent with any previous DMA
2862 * mappings using shared resources.
2863 */
2864
2865 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
2866
2867 pcibr_dmamap->bd_flags = flags;
2868 pcibr_dmamap->bd_xio_addr = 0;
2869 pcibr_dmamap->bd_pci_addr = pci_addr;
2870
2871 /* If in PCI mode, make sure we have an RRB (or two).
2872 */
2873 if (IS_PCI(pcibr_soft) &&
2874 !(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
2875 if (flags & PCIBR_VCHAN1)
2876 vchan = VCHAN1;
2877 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
2878 if (have_rrbs < 2) {
2879 if (pci_addr & PCI64_ATTR_PREF)
2880 min_rrbs = 2;
2881 else
2882 min_rrbs = 1;
2883 if (have_rrbs < min_rrbs)
2884 do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
2885 min_rrbs - have_rrbs);
2886 }
2887 }
2888 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2889 "pcibr_dmamap_alloc: using direct64, map=0x%x\n",
2890 pcibr_dmamap));
2891 return pcibr_dmamap;
2892 }
2893 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2894 "pcibr_dmamap_alloc: unable to use direct64\n"));
2895
2896 /* PIC only supports 64-bit direct mapping in PCI-X mode. */
2897 if (IS_PCIX(pcibr_soft)) {
2898 DEL(pcibr_dmamap);
2899 return 0;
2900 }
2901
2902 flags &= ~PCIIO_DMA_A64;
2903 }
2904 if (flags & PCIIO_FIXED) {
2905 /* warning: mappings may fail later,
2906 * if direct32 can't get to the address.
2907 */
2908 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_D32_BITS)) {
2909 /* User desires DIRECT A32 operations,
2910 * and the attributes of the DMA are
2911 * consistent with any previous DMA
2912 * mappings using shared resources.
2913 * Mapping calls may fail if target
2914 * is outside the direct32 range.
2915 */
2916 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2917 "pcibr_dmamap_alloc: using direct32, map=0x%x\n",
2918 pcibr_dmamap));
2919 pcibr_dmamap->bd_flags = flags;
2920 pcibr_dmamap->bd_xio_addr = pcibr_soft->bs_dir_xbase;
2921 pcibr_dmamap->bd_pci_addr = PCI32_DIRECT_BASE;
2922 return pcibr_dmamap;
2923 }
2924 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR, pconn_vhdl,
2925 "pcibr_dmamap_alloc: unable to use direct32\n"));
2926
2927 /* If the user demands FIXED and we can't
2928 * give it to him, fail.
2929 */
2930 xtalk_dmamap_free(xtalk_dmamap);
2931 free_pciio_dmamap(pcibr_dmamap);
2932 return 0;
2933 }
2934 /*
2935 * Allocate Address Translation Entries from the mapping RAM.
2936 * Unless the PCIBR_NO_ATE_ROUNDUP flag is specified,
2937 * the maximum number of ATEs is based on the worst-case
2938 * scenario, where the requested target is in the
2939 * last byte of an ATE; thus, mapping IOPGSIZE+2
2940 * does end up requiring three ATEs.
2941 */
2942 if (!(flags & PCIBR_NO_ATE_ROUNDUP)) {
2943 ate_count = IOPG((IOPGSIZE - 1) /* worst case start offset */
2944 +req_size_max /* max mapping bytes */
2945 - 1) + 1; /* round UP */
2946 } else { /* assume requested target is page aligned */
2947 ate_count = IOPG(req_size_max /* max mapping bytes */
2948 - 1) + 1; /* round UP */
2949 }
2950
2951 ate_index = pcibr_ate_alloc(pcibr_soft, ate_count);
2952
2953 if (ate_index != -1) {
2954 if (!pcibr_try_set_device(pcibr_soft, slot, flags, BRIDGE_DEV_PMU_BITS)) {
2955 bridge_ate_t ate_proto;
2956 int have_rrbs;
2957 int min_rrbs;
2958
2959 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
2960 "pcibr_dmamap_alloc: using PMU, ate_index=%d, "
2961 "pcibr_dmamap=0x%x\n", ate_index, pcibr_dmamap));
2962
2963 ate_proto = pcibr_flags_to_ate(flags);
2964
2965 pcibr_dmamap->bd_flags = flags;
2966 pcibr_dmamap->bd_pci_addr =
2967 PCI32_MAPPED_BASE + IOPGSIZE * ate_index;
2968 /*
2969 * for xbridge the byte-swap bit == bit 29 of PCI address
2970 */
2971 if (IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
2972 if (flags & PCIIO_BYTE_STREAM)
2973 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
2974 /*
2975 * If swap was set in bss_device in pcibr_endian_set()
2976 * we need to change the address bit.
2977 */
2978 if (pcibr_soft->bs_slot[slot].bss_device &
2979 BRIDGE_DEV_SWAP_PMU)
2980 ATE_SWAP_ON(pcibr_dmamap->bd_pci_addr);
2981 if (flags & PCIIO_WORD_VALUES)
2982 ATE_SWAP_OFF(pcibr_dmamap->bd_pci_addr);
2983 }
2984 pcibr_dmamap->bd_xio_addr = 0;
2985 pcibr_dmamap->bd_ate_ptr = pcibr_ate_addr(pcibr_soft, ate_index);
2986 pcibr_dmamap->bd_ate_index = ate_index;
2987 pcibr_dmamap->bd_ate_count = ate_count;
2988 pcibr_dmamap->bd_ate_proto = ate_proto;
2989
2990 /* Make sure we have an RRB (or two).
2991 */
2992 if (!(pcibr_soft->bs_rrb_fixed & (1 << slot))) {
2993 have_rrbs = pcibr_soft->bs_rrb_valid[slot][vchan];
2994 if (have_rrbs < 2) {
2995 if (ate_proto & ATE_PREF)
2996 min_rrbs = 2;
2997 else
2998 min_rrbs = 1;
2999 if (have_rrbs < min_rrbs)
3000 do_pcibr_rrb_autoalloc(pcibr_soft, slot, vchan,
3001 min_rrbs - have_rrbs);
3002 }
3003 }
3004 if (ate_index >= pcibr_soft->bs_int_ate_size &&
3005 !IS_XBRIDGE_OR_PIC_SOFT(pcibr_soft)) {
3006 bridge_t *bridge = pcibr_soft->bs_base;
3007 volatile unsigned *cmd_regp;
3008 unsigned cmd_reg = 0;
3009 unsigned long s;
3010
3011 pcibr_dmamap->bd_flags |= PCIBR_DMAMAP_SSRAM;
3012
3013 s = pcibr_lock(pcibr_soft);
3014 cmd_regp = pcibr_slot_config_addr(bridge, slot,
3015 PCI_CFG_COMMAND/4);
3016 if ( IS_PIC_SOFT(pcibr_soft) ) {
3017 cmd_reg = pcibr_slot_config_get(bridge, slot, PCI_CFG_COMMAND/4);
3018 }
3019 pcibr_soft->bs_slot[slot].bss_cmd_pointer = cmd_regp;
3020 pcibr_soft->bs_slot[slot].bss_cmd_shadow = cmd_reg;
3021 pcibr_unlock(pcibr_soft, s);
3022 }
3023 return pcibr_dmamap;
3024 }
3025 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3026 "pcibr_dmamap_alloc: PMU use failed, ate_index=%d\n",
3027 ate_index));
3028
3029 pcibr_ate_free(pcibr_soft, ate_index, ate_count);
3030 }
3031 /* total failure: sorry, you just can't
3032 * get from here to there that way.
3033 */
3034 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pconn_vhdl,
3035 "pcibr_dmamap_alloc: complete failure.\n"));
3036 xtalk_dmamap_free(xtalk_dmamap);
3037 free_pciio_dmamap(pcibr_dmamap);
3038 return 0;
3039 }
3040
3041 /*ARGSUSED */
3042 void
pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)3043 pcibr_dmamap_free(pcibr_dmamap_t pcibr_dmamap)
3044 {
3045 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
3046 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3047 pcibr_dmamap->bd_slot);
3048
3049 unsigned flags = pcibr_dmamap->bd_flags;
3050
3051 /* Make sure that bss_ext_ates_active
3052 * is properly kept up to date.
3053 */
3054
3055 if (PCIBR_DMAMAP_BUSY & flags)
3056 if (PCIBR_DMAMAP_SSRAM & flags)
3057 atomic_dec(&(pcibr_soft->bs_slot[slot]. bss_ext_ates_active));
3058
3059 xtalk_dmamap_free(pcibr_dmamap->bd_xtalk);
3060
3061 if (pcibr_dmamap->bd_flags & PCIIO_DMA_A64) {
3062 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_D64_BITS);
3063 }
3064 if (pcibr_dmamap->bd_ate_count) {
3065 pcibr_ate_free(pcibr_dmamap->bd_soft,
3066 pcibr_dmamap->bd_ate_index,
3067 pcibr_dmamap->bd_ate_count);
3068 pcibr_release_device(pcibr_soft, slot, BRIDGE_DEV_PMU_BITS);
3069 }
3070
3071 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3072 "pcibr_dmamap_free: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3073
3074 free_pciio_dmamap(pcibr_dmamap);
3075 }
3076
3077 /*
3078 * pcibr_addr_xio_to_pci: given a PIO range, hand
3079 * back the corresponding base PCI MEM address;
3080 * this is used to short-circuit DMA requests that
3081 * loop back onto this PCI bus.
3082 */
3083 static iopaddr_t
pcibr_addr_xio_to_pci(pcibr_soft_t soft,iopaddr_t xio_addr,size_t req_size)3084 pcibr_addr_xio_to_pci(pcibr_soft_t soft,
3085 iopaddr_t xio_addr,
3086 size_t req_size)
3087 {
3088 iopaddr_t xio_lim = xio_addr + req_size - 1;
3089 iopaddr_t pci_addr;
3090 pciio_slot_t slot;
3091
3092 if (IS_PIC_BUSNUM_SOFT(soft, 0)) {
3093 if ((xio_addr >= PICBRIDGE0_PCI_MEM32_BASE) &&
3094 (xio_lim <= PICBRIDGE0_PCI_MEM32_LIMIT)) {
3095 pci_addr = xio_addr - PICBRIDGE0_PCI_MEM32_BASE;
3096 return pci_addr;
3097 }
3098 if ((xio_addr >= PICBRIDGE0_PCI_MEM64_BASE) &&
3099 (xio_lim <= PICBRIDGE0_PCI_MEM64_LIMIT)) {
3100 pci_addr = xio_addr - PICBRIDGE0_PCI_MEM64_BASE;
3101 return pci_addr;
3102 }
3103 } else if (IS_PIC_BUSNUM_SOFT(soft, 1)) {
3104 if ((xio_addr >= PICBRIDGE1_PCI_MEM32_BASE) &&
3105 (xio_lim <= PICBRIDGE1_PCI_MEM32_LIMIT)) {
3106 pci_addr = xio_addr - PICBRIDGE1_PCI_MEM32_BASE;
3107 return pci_addr;
3108 }
3109 if ((xio_addr >= PICBRIDGE1_PCI_MEM64_BASE) &&
3110 (xio_lim <= PICBRIDGE1_PCI_MEM64_LIMIT)) {
3111 pci_addr = xio_addr - PICBRIDGE1_PCI_MEM64_BASE;
3112 return pci_addr;
3113 }
3114 } else {
3115 if ((xio_addr >= BRIDGE_PCI_MEM32_BASE) &&
3116 (xio_lim <= BRIDGE_PCI_MEM32_LIMIT)) {
3117 pci_addr = xio_addr - BRIDGE_PCI_MEM32_BASE;
3118 return pci_addr;
3119 }
3120 if ((xio_addr >= BRIDGE_PCI_MEM64_BASE) &&
3121 (xio_lim <= BRIDGE_PCI_MEM64_LIMIT)) {
3122 pci_addr = xio_addr - BRIDGE_PCI_MEM64_BASE;
3123 return pci_addr;
3124 }
3125 }
3126 for (slot = soft->bs_min_slot; slot < PCIBR_NUM_SLOTS(soft); ++slot)
3127 if ((xio_addr >= PCIBR_BRIDGE_DEVIO(soft, slot)) &&
3128 (xio_lim < PCIBR_BRIDGE_DEVIO(soft, slot + 1))) {
3129 bridgereg_t dev;
3130
3131 dev = soft->bs_slot[slot].bss_device;
3132 pci_addr = dev & BRIDGE_DEV_OFF_MASK;
3133 pci_addr <<= BRIDGE_DEV_OFF_ADDR_SHFT;
3134 pci_addr += xio_addr - PCIBR_BRIDGE_DEVIO(soft, slot);
3135 return (dev & BRIDGE_DEV_DEV_IO_MEM) ? pci_addr : PCI_NOWHERE;
3136 }
3137 return 0;
3138 }
3139
3140 /*ARGSUSED */
3141 iopaddr_t
pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,paddr_t paddr,size_t req_size)3142 pcibr_dmamap_addr(pcibr_dmamap_t pcibr_dmamap,
3143 paddr_t paddr,
3144 size_t req_size)
3145 {
3146 pcibr_soft_t pcibr_soft;
3147 iopaddr_t xio_addr;
3148 xwidgetnum_t xio_port;
3149 iopaddr_t pci_addr;
3150 unsigned flags;
3151
3152 ASSERT(pcibr_dmamap != NULL);
3153 ASSERT(req_size > 0);
3154 ASSERT(req_size <= pcibr_dmamap->bd_max_size);
3155
3156 pcibr_soft = pcibr_dmamap->bd_soft;
3157
3158 flags = pcibr_dmamap->bd_flags;
3159
3160 xio_addr = xtalk_dmamap_addr(pcibr_dmamap->bd_xtalk, paddr, req_size);
3161 if (XIO_PACKED(xio_addr)) {
3162 xio_port = XIO_PORT(xio_addr);
3163 xio_addr = XIO_ADDR(xio_addr);
3164 } else
3165 xio_port = pcibr_dmamap->bd_xio_port;
3166
3167 /* If this DMA is to an address that
3168 * refers back to this Bridge chip,
3169 * reduce it back to the correct
3170 * PCI MEM address.
3171 */
3172 if (xio_port == pcibr_soft->bs_xid) {
3173 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3174 } else if (flags & PCIIO_DMA_A64) {
3175 /* A64 DMA:
3176 * always use 64-bit direct mapping,
3177 * which always works.
3178 * Device(x) was set up during
3179 * dmamap allocation.
3180 */
3181
3182 /* attributes are already bundled up into bd_pci_addr.
3183 */
3184 pci_addr = pcibr_dmamap->bd_pci_addr
3185 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT)
3186 | xio_addr;
3187
3188 /* Bridge Hardware WAR #482836:
3189 * If the transfer is not cache aligned
3190 * and the Bridge Rev is <= B, force
3191 * prefetch to be off.
3192 */
3193 if (flags & PCIBR_NOPREFETCH)
3194 pci_addr &= ~PCI64_ATTR_PREF;
3195
3196 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
3197 pcibr_dmamap->bd_dev,
3198 "pcibr_dmamap_addr: (direct64): wanted paddr [0x%x..0x%x] "
3199 "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3200 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3201
3202 } else if (flags & PCIIO_FIXED) {
3203 /* A32 direct DMA:
3204 * always use 32-bit direct mapping,
3205 * which may fail.
3206 * Device(x) was set up during
3207 * dmamap allocation.
3208 */
3209
3210 if (xio_port != pcibr_soft->bs_dir_xport)
3211 pci_addr = 0; /* wrong DIDN */
3212 else if (xio_addr < pcibr_dmamap->bd_xio_addr)
3213 pci_addr = 0; /* out of range */
3214 else if ((xio_addr + req_size) >
3215 (pcibr_dmamap->bd_xio_addr + BRIDGE_DMA_DIRECT_SIZE))
3216 pci_addr = 0; /* out of range */
3217 else
3218 pci_addr = pcibr_dmamap->bd_pci_addr +
3219 xio_addr - pcibr_dmamap->bd_xio_addr;
3220
3221 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP | PCIBR_DEBUG_DMADIR,
3222 pcibr_dmamap->bd_dev,
3223 "pcibr_dmamap_addr (direct32): wanted paddr [0x%x..0x%x] "
3224 "XIO port 0x%x offset 0x%x, returning PCI 0x%x\n",
3225 paddr, paddr + req_size - 1, xio_port, xio_addr, pci_addr));
3226
3227 } else {
3228 bridge_t *bridge = pcibr_soft->bs_base;
3229 iopaddr_t offset = IOPGOFF(xio_addr);
3230 bridge_ate_t ate_proto = pcibr_dmamap->bd_ate_proto;
3231 int ate_count = IOPG(offset + req_size - 1) + 1;
3232
3233 int ate_index = pcibr_dmamap->bd_ate_index;
3234 unsigned cmd_regs[8];
3235 unsigned s;
3236
3237 #if PCIBR_FREEZE_TIME
3238 int ate_total = ate_count;
3239 unsigned freeze_time;
3240 #endif
3241 bridge_ate_p ate_ptr = pcibr_dmamap->bd_ate_ptr;
3242 bridge_ate_t ate;
3243
3244 /* Bridge Hardware WAR #482836:
3245 * If the transfer is not cache aligned
3246 * and the Bridge Rev is <= B, force
3247 * prefetch to be off.
3248 */
3249 if (flags & PCIBR_NOPREFETCH)
3250 ate_proto &= ~ATE_PREF;
3251
3252 ate = ate_proto
3253 | (xio_port << ATE_TIDSHIFT)
3254 | (xio_addr - offset);
3255
3256 pci_addr = pcibr_dmamap->bd_pci_addr + offset;
3257
3258 /* Fill in our mapping registers
3259 * with the appropriate xtalk data,
3260 * and hand back the PCI address.
3261 */
3262
3263 ASSERT(ate_count > 0);
3264 if (ate_count <= pcibr_dmamap->bd_ate_count) {
3265 ATE_FREEZE();
3266 ATE_WRITE();
3267 ATE_THAW();
3268 if ( IS_PIC_SOFT(pcibr_soft) ) {
3269 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
3270 }
3271 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3272 "pcibr_dmamap_addr (PMU) : wanted paddr "
3273 "[0x%x..0x%x] returning PCI 0x%x\n",
3274 paddr, paddr + req_size - 1, pci_addr));
3275
3276 } else {
3277 /* The number of ATE's required is greater than the number
3278 * allocated for this map. One way this can happen is if
3279 * pcibr_dmamap_alloc() was called with the PCIBR_NO_ATE_ROUNDUP
3280 * flag, and then when that map is used (right now), the
3281 * target address tells us we really did need to roundup.
3282 * The other possibility is that the map is just plain too
3283 * small to handle the requested target area.
3284 */
3285 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3286 "pcibr_dmamap_addr (PMU) : wanted paddr "
3287 "[0x%x..0x%x] ate_count 0x%x bd_ate_count 0x%x "
3288 "ATE's required > number allocated\n",
3289 paddr, paddr + req_size - 1,
3290 ate_count, pcibr_dmamap->bd_ate_count));
3291 pci_addr = 0;
3292 }
3293
3294 }
3295 return pci_addr;
3296 }
3297
3298 /*ARGSUSED */
3299 void
pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)3300 pcibr_dmamap_done(pcibr_dmamap_t pcibr_dmamap)
3301 {
3302 #ifdef PIC_LATER
3303 pcibr_soft_t pcibr_soft = pcibr_dmamap->bd_soft;
3304 pciio_slot_t slot = PCIBR_SLOT_TO_DEVICE(pcibr_soft,
3305 #endif
3306 /*
3307 * We could go through and invalidate ATEs here;
3308 * for performance reasons, we don't.
3309 * We also don't enforce the strict alternation
3310 * between _addr/_list and _done, but Hub does.
3311 */
3312
3313 if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_BUSY) {
3314 pcibr_dmamap->bd_flags &= ~PCIBR_DMAMAP_BUSY;
3315
3316 if (pcibr_dmamap->bd_flags & PCIBR_DMAMAP_SSRAM)
3317 atomic_dec(&(pcibr_dmamap->bd_soft->bs_slot[pcibr_dmamap->bd_slot]. bss_ext_ates_active));
3318 }
3319 xtalk_dmamap_done(pcibr_dmamap->bd_xtalk);
3320
3321 PCIBR_DEBUG((PCIBR_DEBUG_DMAMAP, pcibr_dmamap->bd_dev,
3322 "pcibr_dmamap_done: pcibr_dmamap=0x%x\n", pcibr_dmamap));
3323 }
3324
3325
3326 /*
3327 * For each bridge, the DIR_OFF value in the Direct Mapping Register
3328 * determines the PCI to Crosstalk memory mapping to be used for all
3329 * 32-bit Direct Mapping memory accesses. This mapping can be to any
3330 * node in the system. This function will return that compact node id.
3331 */
3332
3333 /*ARGSUSED */
3334 cnodeid_t
3335 pcibr_get_dmatrans_node(vertex_hdl_t pconn_vhdl)
3336 {
3337
3338 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3339 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3340
3341 return(NASID_TO_COMPACT_NODEID(NASID_GET(pcibr_soft->bs_dir_xbase)));
3342 }
3343
3344 /*ARGSUSED */
3345 iopaddr_t
3346 pcibr_dmatrans_addr(vertex_hdl_t pconn_vhdl,
3347 device_desc_t dev_desc,
3348 paddr_t paddr,
3349 size_t req_size,
3350 unsigned flags)
3351 {
3352 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3353 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3354 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3355 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3356 pcibr_soft_slot_t slotp = &pcibr_soft->bs_slot[pciio_slot];
3357
3358 xwidgetnum_t xio_port;
3359 iopaddr_t xio_addr;
3360 iopaddr_t pci_addr;
3361
3362 int have_rrbs;
3363 int min_rrbs;
3364 int vchan = VCHAN0;
3365
3366 /* merge in forced flags */
3367 flags |= pcibr_soft->bs_dma_flags;
3368
3369 xio_addr = xtalk_dmatrans_addr(xconn_vhdl, 0, paddr, req_size,
3370 flags & DMAMAP_FLAGS);
3371 if (!xio_addr) {
3372 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3373 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3374 "xtalk_dmatrans_addr failed with 0x%x\n",
3375 paddr, paddr + req_size - 1, xio_addr));
3376 return 0;
3377 }
3378 /*
3379 * find which XIO port this goes to.
3380 */
3381 if (XIO_PACKED(xio_addr)) {
3382 if (xio_addr == XIO_NOWHERE) {
3383 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3384 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3385 "xtalk_dmatrans_addr failed with XIO_NOWHERE\n",
3386 paddr, paddr + req_size - 1));
3387 return 0;
3388 }
3389 xio_port = XIO_PORT(xio_addr);
3390 xio_addr = XIO_ADDR(xio_addr);
3391
3392 } else
3393 xio_port = pcibr_soft->bs_mxid;
3394
3395 /*
3396 * If this DMA comes back to us,
3397 * return the PCI MEM address on
3398 * which it would land, or NULL
3399 * if the target is something
3400 * on bridge other than PCI MEM.
3401 */
3402 if (xio_port == pcibr_soft->bs_xid) {
3403 pci_addr = pcibr_addr_xio_to_pci(pcibr_soft, xio_addr, req_size);
3404 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3405 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3406 "xio_port=0x%x, pci_addr=0x%x\n",
3407 paddr, paddr + req_size - 1, xio_port, pci_addr));
3408 return pci_addr;
3409 }
3410 /* If the caller can use A64, try to
3411 * satisfy the request with the 64-bit
3412 * direct map. This can fail if the
3413 * configuration bits in Device(x)
3414 * conflict with our flags.
3415 */
3416
3417 if (flags & PCIIO_DMA_A64) {
3418 pci_addr = slotp->bss_d64_base;
3419 if (!(flags & PCIBR_VCHAN1))
3420 flags |= PCIBR_VCHAN0;
3421 if ((pci_addr != PCIBR_D64_BASE_UNSET) &&
3422 (flags == slotp->bss_d64_flags)) {
3423
3424 pci_addr |= xio_addr
3425 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3426
3427 #if HWG_PERF_CHECK
3428 if (xio_addr != 0x20000000)
3429 #endif
3430 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3431 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3432 "xio_port=0x%x, direct64: pci_addr=0x%x\n",
3433 paddr, paddr + req_size - 1, xio_addr, pci_addr));
3434 return (pci_addr);
3435 }
3436 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D64_BITS)) {
3437 pci_addr = pcibr_flags_to_d64(flags, pcibr_soft);
3438 slotp->bss_d64_flags = flags;
3439 slotp->bss_d64_base = pci_addr;
3440 pci_addr |= xio_addr
3441 | ((uint64_t) xio_port << PCI64_ATTR_TARG_SHFT);
3442
3443 /* If in PCI mode, make sure we have an RRB (or two).
3444 */
3445 if (IS_PCI(pcibr_soft) &&
3446 !(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
3447 if (flags & PCIBR_VCHAN1)
3448 vchan = VCHAN1;
3449 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
3450 if (have_rrbs < 2) {
3451 if (pci_addr & PCI64_ATTR_PREF)
3452 min_rrbs = 2;
3453 else
3454 min_rrbs = 1;
3455 if (have_rrbs < min_rrbs)
3456 do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot, vchan,
3457 min_rrbs - have_rrbs);
3458 }
3459 }
3460 #if HWG_PERF_CHECK
3461 if (xio_addr != 0x20000000)
3462 #endif
3463 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3464 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3465 "xio_port=0x%x, direct64: pci_addr=0x%x, "
3466 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
3467 xio_addr, pci_addr, (uint64_t) flags));
3468 return (pci_addr);
3469 }
3470
3471 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3472 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3473 "xio_port=0x%x, Unable to set direct64 Device(x) bits\n",
3474 paddr, paddr + req_size - 1, xio_addr));
3475
3476 /* PIC only supports 64-bit direct mapping in PCI-X mode */
3477 if (IS_PCIX(pcibr_soft)) {
3478 return 0;
3479 }
3480
3481 /* our flags conflict with Device(x). try direct32*/
3482 flags = flags & ~(PCIIO_DMA_A64 | PCIBR_VCHAN0);
3483 }
3484 /* Try to satisfy the request with the 32-bit direct
3485 * map. This can fail if the configuration bits in
3486 * Device(x) conflict with our flags, or if the
3487 * target address is outside where DIR_OFF points.
3488 */
3489 {
3490 size_t map_size = 1ULL << 31;
3491 iopaddr_t xio_base = pcibr_soft->bs_dir_xbase;
3492 iopaddr_t offset = xio_addr - xio_base;
3493 iopaddr_t endoff = req_size + offset;
3494
3495 if ((req_size > map_size) ||
3496 (xio_addr < xio_base) ||
3497 (xio_port != pcibr_soft->bs_dir_xport) ||
3498 (endoff > map_size)) {
3499
3500 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3501 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3502 "xio_port=0x%x, xio region outside direct32 target\n",
3503 paddr, paddr + req_size - 1, xio_addr));
3504 } else {
3505 pci_addr = slotp->bss_d32_base;
3506 if ((pci_addr != PCIBR_D32_BASE_UNSET) &&
3507 (flags == slotp->bss_d32_flags)) {
3508
3509 pci_addr |= offset;
3510
3511 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3512 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3513 "xio_port=0x%x, direct32: pci_addr=0x%x\n",
3514 paddr, paddr + req_size - 1, xio_addr, pci_addr));
3515
3516 return (pci_addr);
3517 }
3518 if (!pcibr_try_set_device(pcibr_soft, pciio_slot, flags, BRIDGE_DEV_D32_BITS)) {
3519
3520 pci_addr = PCI32_DIRECT_BASE;
3521 slotp->bss_d32_flags = flags;
3522 slotp->bss_d32_base = pci_addr;
3523 pci_addr |= offset;
3524
3525 /* Make sure we have an RRB (or two).
3526 */
3527 if (!(pcibr_soft->bs_rrb_fixed & (1 << pciio_slot))) {
3528 have_rrbs = pcibr_soft->bs_rrb_valid[pciio_slot][vchan];
3529 if (have_rrbs < 2) {
3530 if (slotp->bss_device & BRIDGE_DEV_PREF)
3531 min_rrbs = 2;
3532 else
3533 min_rrbs = 1;
3534 if (have_rrbs < min_rrbs)
3535 do_pcibr_rrb_autoalloc(pcibr_soft, pciio_slot,
3536 vchan, min_rrbs - have_rrbs);
3537 }
3538 }
3539 #if HWG_PERF_CHECK
3540 if (xio_addr != 0x20000000)
3541 #endif
3542 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3543 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3544 "xio_port=0x%x, direct32: pci_addr=0x%x, "
3545 "new flags: 0x%x\n", paddr, paddr + req_size - 1,
3546 xio_addr, pci_addr, (uint64_t) flags));
3547
3548 return (pci_addr);
3549 }
3550 /* our flags conflict with Device(x).
3551 */
3552 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3553 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3554 "xio_port=0x%x, Unable to set direct32 Device(x) bits\n",
3555 paddr, paddr + req_size - 1, xio_port));
3556 }
3557 }
3558
3559 PCIBR_DEBUG((PCIBR_DEBUG_DMADIR, pconn_vhdl,
3560 "pcibr_dmatrans_addr: wanted paddr [0x%x..0x%x], "
3561 "xio_port=0x%x, No acceptable PCI address found\n",
3562 paddr, paddr + req_size - 1, xio_port));
3563
3564 return 0;
3565 }
3566
3567 void
3568 pcibr_dmamap_drain(pcibr_dmamap_t map)
3569 {
3570 xtalk_dmamap_drain(map->bd_xtalk);
3571 }
3572
3573 void
3574 pcibr_dmaaddr_drain(vertex_hdl_t pconn_vhdl,
3575 paddr_t paddr,
3576 size_t bytes)
3577 {
3578 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3579 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3580 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3581
3582 xtalk_dmaaddr_drain(xconn_vhdl, paddr, bytes);
3583 }
3584
3585 void
3586 pcibr_dmalist_drain(vertex_hdl_t pconn_vhdl,
3587 alenlist_t list)
3588 {
3589 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3590 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3591 vertex_hdl_t xconn_vhdl = pcibr_soft->bs_conn;
3592
3593 xtalk_dmalist_drain(xconn_vhdl, list);
3594 }
3595
3596 /*
3597 * Get the starting PCIbus address out of the given DMA map.
3598 * This function is supposed to be used by a close friend of PCI bridge
3599 * since it relies on the fact that the starting address of the map is fixed at
3600 * the allocation time in the current implementation of PCI bridge.
3601 */
3602 iopaddr_t
3603 pcibr_dmamap_pciaddr_get(pcibr_dmamap_t pcibr_dmamap)
3604 {
3605 return (pcibr_dmamap->bd_pci_addr);
3606 }
3607
3608 /* =====================================================================
3609 * CONFIGURATION MANAGEMENT
3610 */
3611 /*ARGSUSED */
3612 void
3613 pcibr_provider_startup(vertex_hdl_t pcibr)
3614 {
3615 }
3616
3617 /*ARGSUSED */
3618 void
3619 pcibr_provider_shutdown(vertex_hdl_t pcibr)
3620 {
3621 }
3622
3623 int
3624 pcibr_reset(vertex_hdl_t conn)
3625 {
3626 #ifdef PIC_LATER
3627 pciio_info_t pciio_info = pciio_info_get(conn);
3628 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3629 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3630 bridge_t *bridge = pcibr_soft->bs_base;
3631 bridgereg_t ctlreg;
3632 unsigned cfgctl[8];
3633 unsigned long s;
3634 int f, nf;
3635 pcibr_info_h pcibr_infoh;
3636 pcibr_info_t pcibr_info;
3637 int win;
3638 int error = 0;
3639 #endif /* PIC_LATER */
3640
3641 BUG();
3642 #ifdef PIC_LATER
3643 if (pcibr_soft->bs_slot[pciio_slot].has_host) {
3644 pciio_slot = pcibr_soft->bs_slot[pciio_slot].host_slot;
3645 pcibr_info = pcibr_soft->bs_slot[pciio_slot].bss_infos[0];
3646 }
3647
3648 if ((pciio_slot >= pcibr_soft->bs_first_slot) &&
3649 (pciio_slot <= pcibr_soft->bs_last_reset)) {
3650 s = pcibr_lock(pcibr_soft);
3651 nf = pcibr_soft->bs_slot[pciio_slot].bss_ninfo;
3652 pcibr_infoh = pcibr_soft->bs_slot[pciio_slot].bss_infos;
3653 for (f = 0; f < nf; ++f)
3654 if (pcibr_infoh[f])
3655 cfgctl[f] = pcibr_func_config_get(bridge, pciio_slot, f,
3656 PCI_CFG_COMMAND/4);
3657
3658 error = iobrick_pci_slot_rst(pcibr_soft->bs_l1sc,
3659 pcibr_widget_to_bus(pcibr_soft->bs_vhdl),
3660 PCIBR_DEVICE_TO_SLOT(pcibr_soft,pciio_slot),
3661 NULL);
3662
3663 ctlreg = bridge->b_wid_control;
3664 bridge->b_wid_control = ctlreg & ~BRIDGE_CTRL_RST_PIN(pciio_slot);
3665 nano_delay(&ts);
3666 bridge->b_wid_control = ctlreg | BRIDGE_CTRL_RST_PIN(pciio_slot);
3667 nano_delay(&ts);
3668
3669 for (f = 0; f < nf; ++f)
3670 if ((pcibr_info = pcibr_infoh[f]))
3671 for (win = 0; win < 6; ++win)
3672 if (pcibr_info->f_window[win].w_base != 0)
3673 pcibr_func_config_set(bridge, pciio_slot, f,
3674 PCI_CFG_BASE_ADDR(win) / 4,
3675 pcibr_info->f_window[win].w_base);
3676 for (f = 0; f < nf; ++f)
3677 if (pcibr_infoh[f])
3678 pcibr_func_config_set(bridge, pciio_slot, f,
3679 PCI_CFG_COMMAND / 4,
3680 cfgctl[f]);
3681 pcibr_unlock(pcibr_soft, s);
3682
3683 if (error)
3684 return(-1);
3685
3686 return 0;
3687 }
3688 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DETACH, conn,
3689 "pcibr_reset unimplemented for slot %d\n", conn, pciio_slot));
3690 #endif /* PIC_LATER */
3691 return -1;
3692 }
3693
3694 pciio_endian_t
3695 pcibr_endian_set(vertex_hdl_t pconn_vhdl,
3696 pciio_endian_t device_end,
3697 pciio_endian_t desired_end)
3698 {
3699 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3700 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3701 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3702 bridgereg_t devreg;
3703 unsigned long s;
3704
3705 /*
3706 * Bridge supports hardware swapping; so we can always
3707 * arrange for the caller's desired endianness.
3708 */
3709
3710 s = pcibr_lock(pcibr_soft);
3711 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
3712 if (device_end != desired_end)
3713 devreg |= BRIDGE_DEV_SWAP_BITS;
3714 else
3715 devreg &= ~BRIDGE_DEV_SWAP_BITS;
3716
3717 /* NOTE- if we ever put SWAP bits
3718 * onto the disabled list, we will
3719 * have to change the logic here.
3720 */
3721 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
3722 bridge_t *bridge = pcibr_soft->bs_base;
3723
3724 if ( IS_PIC_SOFT(pcibr_soft) ) {
3725 bridge->b_device[pciio_slot].reg = devreg;
3726 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3727 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
3728 }
3729 }
3730 pcibr_unlock(pcibr_soft, s);
3731
3732 #ifdef PIC_LATER
3733 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
3734 "pcibr_endian_set: Device(%d): %x\n",
3735 pciio_slot, devreg, device_bits));
3736 #else
3737 printk("pcibr_endian_set: Device(%d): %x\n", pciio_slot, devreg);
3738 #endif
3739 return desired_end;
3740 }
3741
3742 /*
3743 * Interfaces to allow special (e.g. SGI) drivers to set/clear
3744 * Bridge-specific device flags. Many flags are modified through
3745 * PCI-generic interfaces; we don't allow them to be directly
3746 * manipulated here. Only flags that at this point seem pretty
3747 * Bridge-specific can be set through these special interfaces.
3748 * We may add more flags as the need arises, or remove flags and
3749 * create PCI-generic interfaces as the need arises.
3750 *
3751 * Returns 0 on failure, 1 on success
3752 */
3753 int
3754 pcibr_device_flags_set(vertex_hdl_t pconn_vhdl,
3755 pcibr_device_flags_t flags)
3756 {
3757 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3758 pciio_slot_t pciio_slot = PCIBR_INFO_SLOT_GET_INT(pciio_info);
3759 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3760 bridgereg_t set = 0;
3761 bridgereg_t clr = 0;
3762
3763 ASSERT((flags & PCIBR_DEVICE_FLAGS) == flags);
3764
3765 if (flags & PCIBR_WRITE_GATHER)
3766 set |= BRIDGE_DEV_PMU_WRGA_EN;
3767 if (flags & PCIBR_NOWRITE_GATHER)
3768 clr |= BRIDGE_DEV_PMU_WRGA_EN;
3769
3770 if (flags & PCIBR_WRITE_GATHER)
3771 set |= BRIDGE_DEV_DIR_WRGA_EN;
3772 if (flags & PCIBR_NOWRITE_GATHER)
3773 clr |= BRIDGE_DEV_DIR_WRGA_EN;
3774
3775 if (flags & PCIBR_PREFETCH)
3776 set |= BRIDGE_DEV_PREF;
3777 if (flags & PCIBR_NOPREFETCH)
3778 clr |= BRIDGE_DEV_PREF;
3779
3780 if (flags & PCIBR_PRECISE)
3781 set |= BRIDGE_DEV_PRECISE;
3782 if (flags & PCIBR_NOPRECISE)
3783 clr |= BRIDGE_DEV_PRECISE;
3784
3785 if (flags & PCIBR_BARRIER)
3786 set |= BRIDGE_DEV_BARRIER;
3787 if (flags & PCIBR_NOBARRIER)
3788 clr |= BRIDGE_DEV_BARRIER;
3789
3790 if (flags & PCIBR_64BIT)
3791 set |= BRIDGE_DEV_DEV_SIZE;
3792 if (flags & PCIBR_NO64BIT)
3793 clr |= BRIDGE_DEV_DEV_SIZE;
3794
3795 if (set || clr) {
3796 bridgereg_t devreg;
3797 unsigned long s;
3798
3799 s = pcibr_lock(pcibr_soft);
3800 devreg = pcibr_soft->bs_slot[pciio_slot].bss_device;
3801 devreg = (devreg & ~clr) | set;
3802 if (pcibr_soft->bs_slot[pciio_slot].bss_device != devreg) {
3803 bridge_t *bridge = pcibr_soft->bs_base;
3804
3805 if ( IS_PIC_SOFT(pcibr_soft) ) {
3806 bridge->b_device[pciio_slot].reg = devreg;
3807 pcibr_soft->bs_slot[pciio_slot].bss_device = devreg;
3808 bridge->b_wid_tflush; /* wait until Bridge PIO complete */
3809 }
3810 }
3811 pcibr_unlock(pcibr_soft, s);
3812 #ifdef PIC_LATER
3813 PCIBR_DEBUG_ALWAYS((PCIBR_DEBUG_DEVREG, pconn_vhdl,
3814 "pcibr_device_flags_set: Device(%d): %x\n",
3815 pciio_slot, devreg, device_bits));
3816 #else
3817 printk("pcibr_device_flags_set: Device(%d): %x\n", pciio_slot, devreg);
3818 #endif
3819 }
3820 return (1);
3821 }
3822
3823 /*
3824 * PIC has 16 RBARs per bus; meaning it can have a total of 16 outstanding
3825 * split transactions. If the functions on the bus have requested a total
3826 * of 16 or less, then we can give them what they requested (ie. 100%).
3827 * Otherwise we have make sure each function can get at least one buffer
3828 * and then divide the rest of the buffers up among the functions as ``A
3829 * PERCENTAGE OF WHAT THEY REQUESTED'' (i.e. 0% - 100% of a function's
3830 * pcix_type0_status.max_out_split). This percentage does not include the
3831 * one RBAR that all functions get by default.
3832 */
3833 int
3834 pcibr_pcix_rbars_calc(pcibr_soft_t pcibr_soft)
3835 {
3836 /* 'percent_allowed' is the percentage of requested RBARs that functions
3837 * are allowed, ***less the 1 RBAR that all functions get by default***
3838 */
3839 int percent_allowed;
3840
3841 if (pcibr_soft->bs_pcix_num_funcs) {
3842 if (pcibr_soft->bs_pcix_num_funcs > NUM_RBAR) {
3843 printk(KERN_WARNING
3844 "%lx: Must oversubscribe Read Buffer Attribute Registers"
3845 "(RBAR). Bus has %d RBARs but %d funcs need them.\n",
3846 (unsigned long)pcibr_soft->bs_vhdl, NUM_RBAR, pcibr_soft->bs_pcix_num_funcs);
3847 percent_allowed = 0;
3848 } else {
3849 percent_allowed = (((NUM_RBAR-pcibr_soft->bs_pcix_num_funcs)*100) /
3850 pcibr_soft->bs_pcix_split_tot);
3851
3852 /* +1 to percentage to solve rounding errors that occur because
3853 * we're not doing fractional math. (ie. ((3 * 66%) / 100) = 1)
3854 * but should be "2" if doing true fractional math. NOTE: Since
3855 * the greatest number of outstanding transactions a function
3856 * can request is 32, this "+1" will always work (i.e. we won't
3857 * accidentally oversubscribe the RBARs because of this rounding
3858 * of the percentage).
3859 */
3860 percent_allowed=(percent_allowed > 100) ? 100 : percent_allowed+1;
3861 }
3862 } else {
3863 return(ENODEV);
3864 }
3865
3866 return(percent_allowed);
3867 }
3868
3869 pciio_provider_t pcibr_provider =
3870 {
3871 (pciio_piomap_alloc_f *) pcibr_piomap_alloc,
3872 (pciio_piomap_free_f *) pcibr_piomap_free,
3873 (pciio_piomap_addr_f *) pcibr_piomap_addr,
3874 (pciio_piomap_done_f *) pcibr_piomap_done,
3875 (pciio_piotrans_addr_f *) pcibr_piotrans_addr,
3876 (pciio_piospace_alloc_f *) pcibr_piospace_alloc,
3877 (pciio_piospace_free_f *) pcibr_piospace_free,
3878
3879 (pciio_dmamap_alloc_f *) pcibr_dmamap_alloc,
3880 (pciio_dmamap_free_f *) pcibr_dmamap_free,
3881 (pciio_dmamap_addr_f *) pcibr_dmamap_addr,
3882 (pciio_dmamap_done_f *) pcibr_dmamap_done,
3883 (pciio_dmatrans_addr_f *) pcibr_dmatrans_addr,
3884 (pciio_dmamap_drain_f *) pcibr_dmamap_drain,
3885 (pciio_dmaaddr_drain_f *) pcibr_dmaaddr_drain,
3886 (pciio_dmalist_drain_f *) pcibr_dmalist_drain,
3887
3888 (pciio_intr_alloc_f *) pcibr_intr_alloc,
3889 (pciio_intr_free_f *) pcibr_intr_free,
3890 (pciio_intr_connect_f *) pcibr_intr_connect,
3891 (pciio_intr_disconnect_f *) pcibr_intr_disconnect,
3892 (pciio_intr_cpu_get_f *) pcibr_intr_cpu_get,
3893
3894 (pciio_provider_startup_f *) pcibr_provider_startup,
3895 (pciio_provider_shutdown_f *) pcibr_provider_shutdown,
3896 (pciio_reset_f *) pcibr_reset,
3897 (pciio_endian_set_f *) pcibr_endian_set,
3898 (pciio_config_get_f *) pcibr_config_get,
3899 (pciio_config_set_f *) pcibr_config_set,
3900 (pciio_error_devenable_f *) 0,
3901 (pciio_error_extract_f *) 0,
3902 (pciio_driver_reg_callback_f *) 0,
3903 (pciio_driver_unreg_callback_f *) 0,
3904 (pciio_device_unregister_f *) 0,
3905 (pciio_dma_enabled_f *) pcibr_dma_enabled,
3906 };
3907
3908 int
3909 pcibr_dma_enabled(vertex_hdl_t pconn_vhdl)
3910 {
3911 pciio_info_t pciio_info = pciio_info_get(pconn_vhdl);
3912 pcibr_soft_t pcibr_soft = (pcibr_soft_t) pciio_info_mfast_get(pciio_info);
3913
3914
3915 return xtalk_dma_enabled(pcibr_soft->bs_conn);
3916 }
3917
3918
3919 /*
3920 * pcibr_debug() is used to print pcibr debug messages to the console. A
3921 * user enables tracing by setting the following global variables:
3922 *
3923 * pcibr_debug_mask -Bitmask of what to trace. see pcibr_private.h
3924 * pcibr_debug_module -Module to trace. 'all' means trace all modules
3925 * pcibr_debug_widget -Widget to trace. '-1' means trace all widgets
3926 * pcibr_debug_slot -Slot to trace. '-1' means trace all slots
3927 *
3928 * 'type' is the type of debugging that the current PCIBR_DEBUG macro is
3929 * tracing. 'vhdl' (which can be NULL) is the vhdl associated with the
3930 * debug statement. If there is a 'vhdl' associated with this debug
3931 * statement, it is parsed to obtain the module, widget, and slot. If the
3932 * globals above match the PCIBR_DEBUG params, then the debug info in the
3933 * parameter 'format' is sent to the console.
3934 */
3935 void
3936 pcibr_debug(uint32_t type, vertex_hdl_t vhdl, char *format, ...)
3937 {
3938 char hwpath[MAXDEVNAME] = "\0";
3939 char copy_of_hwpath[MAXDEVNAME];
3940 char *module = "all";
3941 short widget = -1;
3942 short slot = -1;
3943 va_list ap;
3944
3945 if (pcibr_debug_mask & type) {
3946 if (vhdl) {
3947 if (!hwgraph_vertex_name_get(vhdl, hwpath, MAXDEVNAME)) {
3948 char *cp;
3949
3950 if (strcmp(module, pcibr_debug_module)) {
3951 /* use a copy */
3952 (void)strcpy(copy_of_hwpath, hwpath);
3953 cp = strstr(copy_of_hwpath, "/module/");
3954 if (cp) {
3955 cp += strlen("/module");
3956 module = strsep(&cp, "/");
3957 }
3958 }
3959 if (pcibr_debug_widget != -1) {
3960 cp = strstr(hwpath, "/xtalk/");
3961 if (cp) {
3962 cp += strlen("/xtalk/");
3963 widget = simple_strtoul(cp, NULL, 0);
3964 }
3965 }
3966 if (pcibr_debug_slot != -1) {
3967 cp = strstr(hwpath, "/pci/");
3968 if (cp) {
3969 cp += strlen("/pci/");
3970 slot = simple_strtoul(cp, NULL, 0);
3971 }
3972 }
3973 }
3974 }
3975 if ((vhdl == NULL) ||
3976 (!strcmp(module, pcibr_debug_module) &&
3977 (widget == pcibr_debug_widget) &&
3978 (slot == pcibr_debug_slot))) {
3979 #ifdef LATER
3980 printk("PCIBR_DEBUG<%d>\t: %s :", cpuid(), hwpath);
3981 #else
3982 printk("PCIBR_DEBUG\t: %s :", hwpath);
3983 #endif
3984 /*
3985 * Kernel printk translates to this 3 line sequence.
3986 * Since we have a variable length argument list, we
3987 * need to call printk this way rather than directly
3988 */
3989 {
3990 char buffer[500];
3991
3992 va_start(ap, format);
3993 vsnprintf(buffer, 500, format, ap);
3994 va_end(ap);
3995 buffer[499] = (char)0; /* just to be safe */
3996 printk("%s", buffer);
3997 }
3998 }
3999 }
4000 }
4001
4002 int
4003 isIO9(nasid_t nasid) {
4004 lboard_t *brd = (lboard_t *)KL_CONFIG_INFO(nasid);
4005
4006 while (brd) {
4007 if (brd->brd_flags & LOCAL_MASTER_IO6) {
4008 return 1;
4009 }
4010 brd = KLCF_NEXT(brd);
4011 }
4012 /* if it's dual ported, check the peer also */
4013 nasid = NODEPDA(NASID_TO_COMPACT_NODEID(nasid))->xbow_peer;
4014 if (nasid < 0) return 0;
4015 brd = (lboard_t *)KL_CONFIG_INFO(nasid);
4016 while (brd) {
4017 if (brd->brd_flags & LOCAL_MASTER_IO6) {
4018 return 1;
4019 }
4020 brd = KLCF_NEXT(brd);
4021 }
4022 return 0;
4023 }
4024