1 /*
2 * linux/arch/alpha/kernel/core_wildfire.c
3 *
4 * Wildfire support.
5 *
6 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/pci.h>
12 #include <linux/sched.h>
13 #include <linux/init.h>
14
15 #include <asm/ptrace.h>
16 #include <asm/system.h>
17 #include <asm/smp.h>
18
19 #define __EXTERN_INLINE inline
20 #include <asm/io.h>
21 #include <asm/core_wildfire.h>
22 #undef __EXTERN_INLINE
23
24 #include "proto.h"
25 #include "pci_impl.h"
26
27 #define DEBUG_MCHECK 0 /* 0 = minimal, 1 = debug, 2 = debug+dump. */
28 #define DEBUG_CONFIG 0
29 #define DEBUG_DUMP_REGS 0
30 #define DEBUG_DUMP_CONFIG 1
31
32 #if DEBUG_CONFIG
33 # define DBG_CFG(args) printk args
34 #else
35 # define DBG_CFG(args)
36 #endif
37
38 #if DEBUG_DUMP_REGS
39 static void wildfire_dump_pci_regs(int qbbno, int hoseno);
40 static void wildfire_dump_pca_regs(int qbbno, int pcano);
41 static void wildfire_dump_qsa_regs(int qbbno);
42 static void wildfire_dump_qsd_regs(int qbbno);
43 static void wildfire_dump_iop_regs(int qbbno);
44 static void wildfire_dump_gp_regs(int qbbno);
45 #endif
46 #if DEBUG_DUMP_CONFIG
47 static void wildfire_dump_hardware_config(void);
48 #endif
49
50 unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
51 unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
52 #define QBB_MAP_EMPTY 0xff
53
54 unsigned long wildfire_hard_qbb_mask;
55 unsigned long wildfire_soft_qbb_mask;
56 unsigned long wildfire_gp_mask;
57 unsigned long wildfire_hs_mask;
58 unsigned long wildfire_iop_mask;
59 unsigned long wildfire_ior_mask;
60 unsigned long wildfire_pca_mask;
61 unsigned long wildfire_cpu_mask;
62 unsigned long wildfire_mem_mask;
63
64 void __init
wildfire_init_hose(int qbbno,int hoseno)65 wildfire_init_hose(int qbbno, int hoseno)
66 {
67 struct pci_controller *hose;
68 wildfire_pci *pci;
69
70 hose = alloc_pci_controller();
71 hose->io_space = alloc_resource();
72 hose->mem_space = alloc_resource();
73
74 /* This is for userland consumption. */
75 hose->sparse_mem_base = 0;
76 hose->sparse_io_base = 0;
77 hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
78 hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
79
80 hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
81 hose->index = (qbbno << 3) + hoseno;
82
83 hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
84 hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
85 hose->io_space->name = pci_io_names[hoseno];
86 hose->io_space->flags = IORESOURCE_IO;
87
88 hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
89 hose->mem_space->end = hose->mem_space->start + 0xffffffff;
90 hose->mem_space->name = pci_mem_names[hoseno];
91 hose->mem_space->flags = IORESOURCE_MEM;
92
93 if (request_resource(&ioport_resource, hose->io_space) < 0)
94 printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
95 qbbno, hoseno);
96 if (request_resource(&iomem_resource, hose->mem_space) < 0)
97 printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
98 qbbno, hoseno);
99
100 #if DEBUG_DUMP_REGS
101 wildfire_dump_pci_regs(qbbno, hoseno);
102 #endif
103
104 /*
105 * Set up the PCI to main memory translation windows.
106 *
107 * Note: Window 3 is scatter-gather only
108 *
109 * Window 0 is scatter-gather 8MB at 8MB (for isa)
110 * Window 1 is direct access 1GB at 1GB
111 * Window 2 is direct access 1GB at 2GB
112 * Window 3 is scatter-gather 128MB at 3GB
113 * ??? We ought to scale window 3 memory.
114 *
115 */
116 hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
117 hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
118
119 pci = WILDFIRE_pci(qbbno, hoseno);
120
121 pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
122 pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
123 pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
124
125 pci->pci_window[1].wbase.csr = 0x40000000 | 1;
126 pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
127 pci->pci_window[1].tbase.csr = 0;
128
129 pci->pci_window[2].wbase.csr = 0x80000000 | 1;
130 pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
131 pci->pci_window[2].tbase.csr = 0x40000000;
132
133 pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
134 pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
135 pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
136
137 wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
138 }
139
140 void __init
wildfire_init_pca(int qbbno,int pcano)141 wildfire_init_pca(int qbbno, int pcano)
142 {
143
144 /* Test for PCA existence first. */
145 if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
146 return;
147
148 #if DEBUG_DUMP_REGS
149 wildfire_dump_pca_regs(qbbno, pcano);
150 #endif
151
152 /* Do both hoses of the PCA. */
153 wildfire_init_hose(qbbno, (pcano << 1) + 0);
154 wildfire_init_hose(qbbno, (pcano << 1) + 1);
155 }
156
157 void __init
wildfire_init_qbb(int qbbno)158 wildfire_init_qbb(int qbbno)
159 {
160 int pcano;
161
162 /* Test for QBB existence first. */
163 if (!WILDFIRE_QBB_EXISTS(qbbno))
164 return;
165
166 #if DEBUG_DUMP_REGS
167 wildfire_dump_qsa_regs(qbbno);
168 wildfire_dump_qsd_regs(qbbno);
169 wildfire_dump_iop_regs(qbbno);
170 wildfire_dump_gp_regs(qbbno);
171 #endif
172
173 /* Init all PCAs here. */
174 for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
175 wildfire_init_pca(qbbno, pcano);
176 }
177 }
178
179 void __init
wildfire_hardware_probe(void)180 wildfire_hardware_probe(void)
181 {
182 unsigned long temp;
183 unsigned int hard_qbb, soft_qbb;
184 wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
185 wildfire_qsd *qsd;
186 wildfire_qsa *qsa;
187 wildfire_iop *iop;
188 wildfire_gp *gp;
189 wildfire_ne *ne;
190 wildfire_fe *fe;
191 int i;
192
193 temp = fast->qsd_whami.csr;
194 #if 0
195 printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
196 #endif
197
198 hard_qbb = (temp >> 8) & 7;
199 soft_qbb = (temp >> 4) & 7;
200
201 /* Init the HW configuration variables. */
202 wildfire_hard_qbb_mask = (1 << hard_qbb);
203 wildfire_soft_qbb_mask = (1 << soft_qbb);
204
205 wildfire_gp_mask = 0;
206 wildfire_hs_mask = 0;
207 wildfire_iop_mask = 0;
208 wildfire_ior_mask = 0;
209 wildfire_pca_mask = 0;
210
211 wildfire_cpu_mask = 0;
212 wildfire_mem_mask = 0;
213
214 memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
215 memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
216
217 /* First, determine which QBBs are present. */
218 qsa = WILDFIRE_qsa(soft_qbb);
219
220 temp = qsa->qsa_qbb_id.csr;
221 #if 0
222 printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
223 #endif
224
225 if (temp & 0x40) /* Is there an HS? */
226 wildfire_hs_mask = 1;
227
228 if (temp & 0x20) { /* Is there a GP? */
229 gp = WILDFIRE_gp(soft_qbb);
230 temp = 0;
231 for (i = 0; i < 4; i++) {
232 temp |= gp->gpa_qbb_map[i].csr << (i * 8);
233 #if 0
234 printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
235 i, gp, temp);
236 #endif
237 }
238
239 for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
240 if (temp & 8) { /* Is there a QBB? */
241 soft_qbb = temp & 7;
242 wildfire_hard_qbb_mask |= (1 << hard_qbb);
243 wildfire_soft_qbb_mask |= (1 << soft_qbb);
244 }
245 temp >>= 4;
246 }
247 wildfire_gp_mask = wildfire_soft_qbb_mask;
248 }
249
250 /* Next determine each QBBs resources. */
251 for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
252 if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
253 qsd = WILDFIRE_qsd(soft_qbb);
254 temp = qsd->qsd_whami.csr;
255 #if 0
256 printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
257 #endif
258 hard_qbb = (temp >> 8) & 7;
259 wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
260 wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
261
262 qsa = WILDFIRE_qsa(soft_qbb);
263 temp = qsa->qsa_qbb_pop[0].csr;
264 #if 0
265 printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
266 #endif
267 wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
268 wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
269
270 temp = qsa->qsa_qbb_pop[1].csr;
271 #if 0
272 printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
273 #endif
274 wildfire_iop_mask |= (1 << soft_qbb);
275 wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
276
277 temp = qsa->qsa_qbb_id.csr;
278 #if 0
279 printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
280 #endif
281 if (temp & 0x20)
282 wildfire_gp_mask |= (1 << soft_qbb);
283
284 /* Probe for PCA existence here. */
285 for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
286 iop = WILDFIRE_iop(soft_qbb);
287 ne = WILDFIRE_ne(soft_qbb, i);
288 fe = WILDFIRE_fe(soft_qbb, i);
289
290 if ((iop->iop_hose[i].init.csr & 1) == 1 &&
291 ((ne->ne_what_am_i.csr & 0xf00000300) == 0x100000300) &&
292 ((fe->fe_what_am_i.csr & 0xf00000300) == 0x100000200))
293 {
294 wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
295 }
296 }
297
298 }
299 }
300 #if DEBUG_DUMP_CONFIG
301 wildfire_dump_hardware_config();
302 #endif
303 }
304
305 void __init
wildfire_init_arch(void)306 wildfire_init_arch(void)
307 {
308 int qbbno;
309
310 /* With multiple PCI buses, we play with I/O as physical addrs. */
311 ioport_resource.end = ~0UL;
312 iomem_resource.end = ~0UL;
313
314
315 /* Probe the hardware for info about configuration. */
316 wildfire_hardware_probe();
317
318 /* Now init all the found QBBs. */
319 for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
320 wildfire_init_qbb(qbbno);
321 }
322
323 /* Normal direct PCI DMA mapping. */
324 __direct_map_base = 0x40000000UL;
325 __direct_map_size = 0x80000000UL;
326 }
327
328 void
wildfire_machine_check(unsigned long vector,unsigned long la_ptr,struct pt_regs * regs)329 wildfire_machine_check(unsigned long vector, unsigned long la_ptr,
330 struct pt_regs * regs)
331 {
332 mb();
333 mb(); /* magic */
334 draina();
335 /* FIXME: clear pci errors */
336 wrmces(0x7);
337 mb();
338
339 process_mcheck_info(vector, la_ptr, regs, "WILDFIRE",
340 mcheck_expected(smp_processor_id()));
341 }
342
343 void
wildfire_kill_arch(int mode)344 wildfire_kill_arch(int mode)
345 {
346 }
347
348 void
wildfire_pci_tbi(struct pci_controller * hose,dma_addr_t start,dma_addr_t end)349 wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
350 {
351 int qbbno = hose->index >> 3;
352 int hoseno = hose->index & 7;
353 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
354
355 mb();
356 pci->pci_flush_tlb.csr; /* reading does the trick */
357 }
358
359 static int
mk_conf_addr(struct pci_dev * dev,int where,unsigned long * pci_addr,unsigned char * type1)360 mk_conf_addr(struct pci_dev *dev, int where, unsigned long *pci_addr,
361 unsigned char *type1)
362 {
363 struct pci_controller *hose = dev->sysdata;
364 unsigned long addr;
365 u8 bus = dev->bus->number;
366 u8 device_fn = dev->devfn;
367
368 DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
369 "pci_addr=0x%p, type1=0x%p)\n",
370 bus, device_fn, where, pci_addr, type1));
371
372 if (hose->first_busno == dev->bus->number)
373 bus = 0;
374 *type1 = (bus != 0);
375
376 addr = (bus << 16) | (device_fn << 8) | where;
377 addr |= hose->config_space_base;
378
379 *pci_addr = addr;
380 DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
381 return 0;
382 }
383
384 static int
wildfire_read_config_byte(struct pci_dev * dev,int where,u8 * value)385 wildfire_read_config_byte(struct pci_dev *dev, int where, u8 *value)
386 {
387 unsigned long addr;
388 unsigned char type1;
389
390 if (mk_conf_addr(dev, where, &addr, &type1))
391 return PCIBIOS_DEVICE_NOT_FOUND;
392
393 *value = __kernel_ldbu(*(vucp)addr);
394 return PCIBIOS_SUCCESSFUL;
395 }
396
397 static int
wildfire_read_config_word(struct pci_dev * dev,int where,u16 * value)398 wildfire_read_config_word(struct pci_dev *dev, int where, u16 *value)
399 {
400 unsigned long addr;
401 unsigned char type1;
402
403 if (mk_conf_addr(dev, where, &addr, &type1))
404 return PCIBIOS_DEVICE_NOT_FOUND;
405
406 *value = __kernel_ldwu(*(vusp)addr);
407 return PCIBIOS_SUCCESSFUL;
408 }
409
410 static int
wildfire_read_config_dword(struct pci_dev * dev,int where,u32 * value)411 wildfire_read_config_dword(struct pci_dev *dev, int where, u32 *value)
412 {
413 unsigned long addr;
414 unsigned char type1;
415
416 if (mk_conf_addr(dev, where, &addr, &type1))
417 return PCIBIOS_DEVICE_NOT_FOUND;
418
419 *value = *(vuip)addr;
420 return PCIBIOS_SUCCESSFUL;
421 }
422
423 static int
wildfire_write_config_byte(struct pci_dev * dev,int where,u8 value)424 wildfire_write_config_byte(struct pci_dev *dev, int where, u8 value)
425 {
426 unsigned long addr;
427 unsigned char type1;
428
429 if (mk_conf_addr(dev, where, &addr, &type1))
430 return PCIBIOS_DEVICE_NOT_FOUND;
431
432 __kernel_stb(value, *(vucp)addr);
433 mb();
434 __kernel_ldbu(*(vucp)addr);
435 return PCIBIOS_SUCCESSFUL;
436 }
437
438 static int
wildfire_write_config_word(struct pci_dev * dev,int where,u16 value)439 wildfire_write_config_word(struct pci_dev *dev, int where, u16 value)
440 {
441 unsigned long addr;
442 unsigned char type1;
443
444 if (mk_conf_addr(dev, where, &addr, &type1))
445 return PCIBIOS_DEVICE_NOT_FOUND;
446
447 __kernel_stw(value, *(vusp)addr);
448 mb();
449 __kernel_ldwu(*(vusp)addr);
450 return PCIBIOS_SUCCESSFUL;
451 }
452
453 static int
wildfire_write_config_dword(struct pci_dev * dev,int where,u32 value)454 wildfire_write_config_dword(struct pci_dev *dev, int where, u32 value)
455 {
456 unsigned long addr;
457 unsigned char type1;
458
459 if (mk_conf_addr(dev, where, &addr, &type1))
460 return PCIBIOS_DEVICE_NOT_FOUND;
461
462 *(vuip)addr = value;
463 mb();
464 *(vuip)addr;
465 return PCIBIOS_SUCCESSFUL;
466 }
467
468 struct pci_ops wildfire_pci_ops =
469 {
470 read_byte: wildfire_read_config_byte,
471 read_word: wildfire_read_config_word,
472 read_dword: wildfire_read_config_dword,
473 write_byte: wildfire_write_config_byte,
474 write_word: wildfire_write_config_word,
475 write_dword: wildfire_write_config_dword
476 };
477
478
479 /*
480 * NUMA Support
481 */
wildfire_pa_to_nid(unsigned long pa)482 int wildfire_pa_to_nid(unsigned long pa)
483 {
484 return pa >> 36;
485 }
486
wildfire_cpuid_to_nid(int cpuid)487 int wildfire_cpuid_to_nid(int cpuid)
488 {
489 /* assume 4 CPUs per node */
490 return cpuid >> 2;
491 }
492
wildfire_node_mem_start(int nid)493 unsigned long wildfire_node_mem_start(int nid)
494 {
495 /* 64GB per node */
496 return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
497 }
498
wildfire_node_mem_size(int nid)499 unsigned long wildfire_node_mem_size(int nid)
500 {
501 /* 64GB per node */
502 return 64UL * 1024 * 1024 * 1024;
503 }
504
505 #if DEBUG_DUMP_REGS
506
507 static void __init
wildfire_dump_pci_regs(int qbbno,int hoseno)508 wildfire_dump_pci_regs(int qbbno, int hoseno)
509 {
510 wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
511 int i;
512
513 printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
514 qbbno, hoseno, pci);
515
516 printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
517 pci->pci_io_addr_ext.csr);
518 printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
519 printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
520 printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
521 printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
522 printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
523 printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
524
525 printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
526 qbbno, hoseno, pci);
527 for (i = 0; i < 4; i++) {
528 printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
529 pci->pci_window[i].wbase.csr,
530 pci->pci_window[i].wmask.csr,
531 pci->pci_window[i].tbase.csr);
532 }
533 printk(KERN_ERR "\n");
534 }
535
536 static void __init
wildfire_dump_pca_regs(int qbbno,int pcano)537 wildfire_dump_pca_regs(int qbbno, int pcano)
538 {
539 wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
540 int i;
541
542 printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
543 qbbno, pcano, pca);
544
545 printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
546 printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
547 printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
548 printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
549 printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
550 pca->pca_stdio_edge_level.csr);
551
552 printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
553 qbbno, pcano, pca);
554 for (i = 0; i < 4; i++) {
555 printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
556 pca->pca_int[i].target.csr,
557 pca->pca_int[i].enable.csr);
558 }
559
560 printk(KERN_ERR "\n");
561 }
562
563 static void __init
wildfire_dump_qsa_regs(int qbbno)564 wildfire_dump_qsa_regs(int qbbno)
565 {
566 wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
567 int i;
568
569 printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
570
571 printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
572 printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
573 printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
574
575 for (i = 0; i < 5; i++)
576 printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
577 i, qsa->qsa_config[i].csr);
578
579 for (i = 0; i < 2; i++)
580 printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
581 i, qsa->qsa_qbb_pop[0].csr);
582
583 printk(KERN_ERR "\n");
584 }
585
586 static void __init
wildfire_dump_qsd_regs(int qbbno)587 wildfire_dump_qsd_regs(int qbbno)
588 {
589 wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
590
591 printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
592
593 printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
594 printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
595 printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
596 qsd->qsd_port_present.csr);
597 printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
598 qsd->qsd_port_active.csr);
599 printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
600 qsd->qsd_fault_ena.csr);
601 printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
602 qsd->qsd_cpu_int_ena.csr);
603 printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
604 qsd->qsd_mem_config.csr);
605 printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
606 qsd->qsd_err_sum.csr);
607
608 printk(KERN_ERR "\n");
609 }
610
611 static void __init
wildfire_dump_iop_regs(int qbbno)612 wildfire_dump_iop_regs(int qbbno)
613 {
614 wildfire_iop *iop = WILDFIRE_iop(qbbno);
615 int i;
616
617 printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
618
619 printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
620 printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
621 printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
622 iop->iop_switch_credits.csr);
623 printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
624 iop->iop_hose_credits.csr);
625
626 for (i = 0; i < 4; i++)
627 printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
628 i, iop->iop_hose[i].init.csr);
629 for (i = 0; i < 4; i++)
630 printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
631 i, iop->iop_dev_int[i].target.csr);
632
633 printk(KERN_ERR "\n");
634 }
635
636 static void __init
wildfire_dump_gp_regs(int qbbno)637 wildfire_dump_gp_regs(int qbbno)
638 {
639 wildfire_gp *gp = WILDFIRE_gp(qbbno);
640 int i;
641
642 printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
643 for (i = 0; i < 4; i++)
644 printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
645 i, gp->gpa_qbb_map[i].csr);
646
647 printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
648 gp->gpa_mem_pop_map.csr);
649 printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
650 printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
651 printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
652 printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
653 printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
654
655 printk(KERN_ERR "\n");
656 }
657 #endif /* DUMP_REGS */
658
659 #if DEBUG_DUMP_CONFIG
660 static void __init
wildfire_dump_hardware_config(void)661 wildfire_dump_hardware_config(void)
662 {
663 int i;
664
665 printk(KERN_ERR "Probed Hardware Configuration\n");
666
667 printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
668 printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
669
670 printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
671 printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
672 printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
673 printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
674 printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
675
676 printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
677 printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
678
679 printk(" hard_qbb_map: ");
680 for (i = 0; i < WILDFIRE_MAX_QBB; i++)
681 if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
682 printk("--- ");
683 else
684 printk("%3d ", wildfire_hard_qbb_map[i]);
685 printk("\n");
686
687 printk(" soft_qbb_map: ");
688 for (i = 0; i < WILDFIRE_MAX_QBB; i++)
689 if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
690 printk("--- ");
691 else
692 printk("%3d ", wildfire_soft_qbb_map[i]);
693 printk("\n");
694 }
695 #endif /* DUMP_CONFIG */
696