1 /*
2  *	linux/arch/alpha/kernel/core_cia.c
3  *
4  * Written by David A Rusling (david.rusling@reo.mts.dec.com).
5  * December 1995.
6  *
7  *	Copyright (C) 1995  David A Rusling
8  *	Copyright (C) 1997, 1998  Jay Estabrook
9  *	Copyright (C) 1998, 1999, 2000  Richard Henderson
10  *
11  * Code common to all CIA core logic chips.
12  */
13 
14 #define __EXTERN_INLINE inline
15 #include <asm/io.h>
16 #include <asm/core_cia.h>
17 #undef __EXTERN_INLINE
18 
19 #include <linux/types.h>
20 #include <linux/pci.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/bootmem.h>
24 
25 #include <asm/ptrace.h>
26 
27 #include "proto.h"
28 #include "pci_impl.h"
29 
30 
31 /*
32  * NOTE: Herein lie back-to-back mb instructions.  They are magic.
33  * One plausible explanation is that the i/o controller does not properly
34  * handle the system transaction.  Another involves timing.  Ho hum.
35  */
36 
37 #define DEBUG_CONFIG 0
38 #if DEBUG_CONFIG
39 # define DBGC(args)	printk args
40 #else
41 # define DBGC(args)
42 #endif
43 
44 #define vip	volatile int  *
45 
46 /*
47  * Given a bus, device, and function number, compute resulting
48  * configuration space address.  It is therefore not safe to have
49  * concurrent invocations to configuration space access routines, but
50  * there really shouldn't be any need for this.
51  *
52  * Type 0:
53  *
54  *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
55  *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
56  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
57  * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
58  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
59  *
60  *	31:11	Device select bit.
61  * 	10:8	Function number
62  * 	 7:2	Register number
63  *
64  * Type 1:
65  *
66  *  3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
67  *  3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
68  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
69  * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
70  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
71  *
72  *	31:24	reserved
73  *	23:16	bus number (8 bits = 128 possible buses)
74  *	15:11	Device number (5 bits)
75  *	10:8	function number
76  *	 7:2	register number
77  *
78  * Notes:
79  *	The function number selects which function of a multi-function device
80  *	(e.g., SCSI and Ethernet).
81  *
82  *	The register selects a DWORD (32 bit) register offset.  Hence it
83  *	doesn't get shifted by 2 bits as we want to "drop" the bottom two
84  *	bits.
85  */
86 
87 static int
mk_conf_addr(struct pci_bus * bus_dev,unsigned int device_fn,int where,unsigned long * pci_addr,unsigned char * type1)88 mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
89 	     unsigned long *pci_addr, unsigned char *type1)
90 {
91 	u8 bus = bus_dev->number;
92 
93 	*type1 = (bus != 0);
94 	*pci_addr = (bus << 16) | (device_fn << 8) | where;
95 
96 	DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
97 	      " returning address 0x%p\n"
98 	      bus, device_fn, where, *pci_addr));
99 
100 	return 0;
101 }
102 
103 static unsigned int
conf_read(unsigned long addr,unsigned char type1)104 conf_read(unsigned long addr, unsigned char type1)
105 {
106 	unsigned long flags;
107 	int stat0, value;
108 	int cia_cfg = 0;
109 
110 	DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
111 	local_irq_save(flags);
112 
113 	/* Reset status register to avoid losing errors.  */
114 	stat0 = *(vip)CIA_IOC_CIA_ERR;
115 	*(vip)CIA_IOC_CIA_ERR = stat0;
116 	mb();
117 	*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
118 
119 	/* If Type1 access, must set CIA CFG. */
120 	if (type1) {
121 		cia_cfg = *(vip)CIA_IOC_CFG;
122 		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
123 		mb();
124 		*(vip)CIA_IOC_CFG;
125 	}
126 
127 	mb();
128 	draina();
129 	mcheck_expected(0) = 1;
130 	mcheck_taken(0) = 0;
131 	mb();
132 
133 	/* Access configuration space.  */
134 	value = *(vip)addr;
135 	mb();
136 	mb();  /* magic */
137 	if (mcheck_taken(0)) {
138 		mcheck_taken(0) = 0;
139 		value = 0xffffffff;
140 		mb();
141 	}
142 	mcheck_expected(0) = 0;
143 	mb();
144 
145 	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
146 	if (type1) {
147 		*(vip)CIA_IOC_CFG = cia_cfg;
148 		mb();
149 		*(vip)CIA_IOC_CFG;
150 	}
151 
152 	local_irq_restore(flags);
153 	DBGC(("done\n"));
154 
155 	return value;
156 }
157 
158 static void
conf_write(unsigned long addr,unsigned int value,unsigned char type1)159 conf_write(unsigned long addr, unsigned int value, unsigned char type1)
160 {
161 	unsigned long flags;
162 	int stat0, cia_cfg = 0;
163 
164 	DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
165 	local_irq_save(flags);
166 
167 	/* Reset status register to avoid losing errors.  */
168 	stat0 = *(vip)CIA_IOC_CIA_ERR;
169 	*(vip)CIA_IOC_CIA_ERR = stat0;
170 	mb();
171 	*(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
172 
173 	/* If Type1 access, must set CIA CFG.  */
174 	if (type1) {
175 		cia_cfg = *(vip)CIA_IOC_CFG;
176 		*(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
177 		mb();
178 		*(vip)CIA_IOC_CFG;
179 	}
180 
181 	mb();
182 	draina();
183 	mcheck_expected(0) = 1;
184 	mcheck_taken(0) = 0;
185 	mb();
186 
187 	/* Access configuration space.  */
188 	*(vip)addr = value;
189 	mb();
190 	*(vip)addr; /* read back to force the write */
191 
192 	mcheck_expected(0) = 0;
193 	mb();
194 
195 	/* If Type1 access, must reset IOC CFG so normal IO space ops work.  */
196 	if (type1) {
197 		*(vip)CIA_IOC_CFG = cia_cfg;
198 		mb();
199 		*(vip)CIA_IOC_CFG;
200 	}
201 
202 	local_irq_restore(flags);
203 	DBGC(("done\n"));
204 }
205 
206 static int
cia_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * value)207 cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
208 		u32 *value)
209 {
210 	unsigned long addr, pci_addr;
211 	long mask;
212 	unsigned char type1;
213 	int shift;
214 
215 	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
216 		return PCIBIOS_DEVICE_NOT_FOUND;
217 
218 	mask = (size - 1) * 8;
219 	shift = (where & 3) * 8;
220 	addr = (pci_addr << 5) + mask + CIA_CONF;
221 	*value = conf_read(addr, type1) >> (shift);
222 	return PCIBIOS_SUCCESSFUL;
223 }
224 
225 static int
cia_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 value)226 cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
227 		 u32 value)
228 {
229 	unsigned long addr, pci_addr;
230 	long mask;
231 	unsigned char type1;
232 
233 	if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
234 		return PCIBIOS_DEVICE_NOT_FOUND;
235 
236 	mask = (size - 1) * 8;
237 	addr = (pci_addr << 5) + mask + CIA_CONF;
238 	conf_write(addr, value << ((where & 3) * 8), type1);
239 	return PCIBIOS_SUCCESSFUL;
240 }
241 
242 struct pci_ops cia_pci_ops =
243 {
244 	.read = 	cia_read_config,
245 	.write =	cia_write_config,
246 };
247 
248 /*
249  * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
250  * It cannot be invalidated.  Rather than hard code the pass numbers,
251  * actually try the tbia to see if it works.
252  */
253 
254 void
cia_pci_tbi(struct pci_controller * hose,dma_addr_t start,dma_addr_t end)255 cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
256 {
257 	wmb();
258 	*(vip)CIA_IOC_PCI_TBIA = 3;	/* Flush all locked and unlocked.  */
259 	mb();
260 	*(vip)CIA_IOC_PCI_TBIA;
261 }
262 
263 /*
264  * On PYXIS, even if the tbia works, we cannot use it. It effectively locks
265  * the chip (as well as direct write to the tag registers) if there is a
266  * SG DMA operation in progress. This is true at least for PYXIS rev. 1,
267  * so always use the method below.
268  */
269 /*
270  * This is the method NT and NetBSD use.
271  *
272  * Allocate mappings, and put the chip into DMA loopback mode to read a
273  * garbage page.  This works by causing TLB misses, causing old entries to
274  * be purged to make room for the new entries coming in for the garbage page.
275  */
276 
277 #define CIA_BROKEN_TBIA_BASE	0x30000000
278 #define CIA_BROKEN_TBIA_SIZE	1024
279 
280 /* Always called with interrupts disabled */
281 void
cia_pci_tbi_try2(struct pci_controller * hose,dma_addr_t start,dma_addr_t end)282 cia_pci_tbi_try2(struct pci_controller *hose,
283 		 dma_addr_t start, dma_addr_t end)
284 {
285 	void __iomem *bus_addr;
286 	int ctrl;
287 
288 	/* Put the chip into PCI loopback mode.  */
289 	mb();
290 	ctrl = *(vip)CIA_IOC_CIA_CTRL;
291 	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
292 	mb();
293 	*(vip)CIA_IOC_CIA_CTRL;
294 	mb();
295 
296 	/* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
297 	   each read.  This forces SG TLB misses.  NetBSD claims that the
298 	   TLB entries are not quite LRU, meaning that we need to read more
299 	   times than there are actual tags.  The 2117x docs claim strict
300 	   round-robin.  Oh well, we've come this far...  */
301 	/* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
302 	   be filled by the TLB misses *only once* after being invalidated
303 	   (by tbia or direct write). Next misses won't update them even
304 	   though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
305 	   so use them and read at window 3 base exactly 4 times. Reading
306 	   more sometimes makes the chip crazy.  -ink */
307 
308 	bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
309 
310 	cia_readl(bus_addr + 0x00000);
311 	cia_readl(bus_addr + 0x08000);
312 	cia_readl(bus_addr + 0x10000);
313 	cia_readl(bus_addr + 0x18000);
314 
315 	cia_iounmap(bus_addr);
316 
317 	/* Restore normal PCI operation.  */
318 	mb();
319 	*(vip)CIA_IOC_CIA_CTRL = ctrl;
320 	mb();
321 	*(vip)CIA_IOC_CIA_CTRL;
322 	mb();
323 }
324 
325 static inline void
cia_prepare_tbia_workaround(int window)326 cia_prepare_tbia_workaround(int window)
327 {
328 	unsigned long *ppte, pte;
329 	long i;
330 
331 	/* Use minimal 1K map. */
332 	ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
333 	pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
334 
335 	for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
336 		ppte[i] = pte;
337 
338 	*(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
339 	*(vip)CIA_IOC_PCI_Wn_MASK(window)
340 	  = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
341 	*(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
342 }
343 
344 static void __init
verify_tb_operation(void)345 verify_tb_operation(void)
346 {
347 	static int page[PAGE_SIZE/4]
348 		__attribute__((aligned(PAGE_SIZE)))
349 		__initdata = { 0 };
350 
351 	struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
352 	int ctrl, addr0, tag0, pte0, data0;
353 	int temp, use_tbia_try2 = 0;
354 	void __iomem *bus_addr;
355 
356 	/* pyxis -- tbia is broken */
357 	if (pci_isa_hose->dense_io_base)
358 		use_tbia_try2 = 1;
359 
360 	/* Put the chip into PCI loopback mode.  */
361 	mb();
362 	ctrl = *(vip)CIA_IOC_CIA_CTRL;
363 	*(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
364 	mb();
365 	*(vip)CIA_IOC_CIA_CTRL;
366 	mb();
367 
368 	/* Write a valid entry directly into the TLB registers.  */
369 
370 	addr0 = arena->dma_base;
371 	tag0 = addr0 | 1;
372 	pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
373 
374 	*(vip)CIA_IOC_TB_TAGn(0) = tag0;
375 	*(vip)CIA_IOC_TB_TAGn(1) = 0;
376 	*(vip)CIA_IOC_TB_TAGn(2) = 0;
377 	*(vip)CIA_IOC_TB_TAGn(3) = 0;
378 	*(vip)CIA_IOC_TB_TAGn(4) = 0;
379 	*(vip)CIA_IOC_TB_TAGn(5) = 0;
380 	*(vip)CIA_IOC_TB_TAGn(6) = 0;
381 	*(vip)CIA_IOC_TB_TAGn(7) = 0;
382 	*(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
383 	*(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
384 	*(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
385 	*(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
386 	mb();
387 
388 	/* Get a usable bus address */
389 	bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
390 
391 	/* First, verify we can read back what we've written.  If
392 	   this fails, we can't be sure of any of the other testing
393 	   we're going to do, so bail.  */
394 	/* ??? Actually, we could do the work with machine checks.
395 	   By passing this register update test, we pretty much
396 	   guarantee that cia_pci_tbi_try1 works.  If this test
397 	   fails, cia_pci_tbi_try2 might still work.  */
398 
399 	temp = *(vip)CIA_IOC_TB_TAGn(0);
400 	if (temp != tag0) {
401 		printk("pci: failed tb register update test "
402 		       "(tag0 %#x != %#x)\n", temp, tag0);
403 		goto failed;
404 	}
405 	temp = *(vip)CIA_IOC_TB_TAGn(1);
406 	if (temp != 0) {
407 		printk("pci: failed tb register update test "
408 		       "(tag1 %#x != 0)\n", temp);
409 		goto failed;
410 	}
411 	temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
412 	if (temp != pte0) {
413 		printk("pci: failed tb register update test "
414 		       "(pte0 %#x != %#x)\n", temp, pte0);
415 		goto failed;
416 	}
417 	printk("pci: passed tb register update test\n");
418 
419 	/* Second, verify we can actually do I/O through this entry.  */
420 
421 	data0 = 0xdeadbeef;
422 	page[0] = data0;
423 	mcheck_expected(0) = 1;
424 	mcheck_taken(0) = 0;
425 	mb();
426 	temp = cia_readl(bus_addr);
427 	mb();
428 	mcheck_expected(0) = 0;
429 	mb();
430 	if (mcheck_taken(0)) {
431 		printk("pci: failed sg loopback i/o read test (mcheck)\n");
432 		goto failed;
433 	}
434 	if (temp != data0) {
435 		printk("pci: failed sg loopback i/o read test "
436 		       "(%#x != %#x)\n", temp, data0);
437 		goto failed;
438 	}
439 	printk("pci: passed sg loopback i/o read test\n");
440 
441 	/* Third, try to invalidate the TLB.  */
442 
443 	if (! use_tbia_try2) {
444 		cia_pci_tbi(arena->hose, 0, -1);
445 		temp = *(vip)CIA_IOC_TB_TAGn(0);
446 		if (temp & 1) {
447 			use_tbia_try2 = 1;
448 			printk("pci: failed tbia test; workaround available\n");
449 		} else {
450 			printk("pci: passed tbia test\n");
451 		}
452 	}
453 
454 	/* Fourth, verify the TLB snoops the EV5's caches when
455 	   doing a tlb fill.  */
456 
457 	data0 = 0x5adda15e;
458 	page[0] = data0;
459 	arena->ptes[4] = pte0;
460 	mcheck_expected(0) = 1;
461 	mcheck_taken(0) = 0;
462 	mb();
463 	temp = cia_readl(bus_addr + 4*PAGE_SIZE);
464 	mb();
465 	mcheck_expected(0) = 0;
466 	mb();
467 	if (mcheck_taken(0)) {
468 		printk("pci: failed pte write cache snoop test (mcheck)\n");
469 		goto failed;
470 	}
471 	if (temp != data0) {
472 		printk("pci: failed pte write cache snoop test "
473 		       "(%#x != %#x)\n", temp, data0);
474 		goto failed;
475 	}
476 	printk("pci: passed pte write cache snoop test\n");
477 
478 	/* Fifth, verify that a previously invalid PTE entry gets
479 	   filled from the page table.  */
480 
481 	data0 = 0xabcdef12;
482 	page[0] = data0;
483 	arena->ptes[5] = pte0;
484 	mcheck_expected(0) = 1;
485 	mcheck_taken(0) = 0;
486 	mb();
487 	temp = cia_readl(bus_addr + 5*PAGE_SIZE);
488 	mb();
489 	mcheck_expected(0) = 0;
490 	mb();
491 	if (mcheck_taken(0)) {
492 		printk("pci: failed valid tag invalid pte reload test "
493 		       "(mcheck; workaround available)\n");
494 		/* Work around this bug by aligning new allocations
495 		   on 4 page boundaries.  */
496 		arena->align_entry = 4;
497 	} else if (temp != data0) {
498 		printk("pci: failed valid tag invalid pte reload test "
499 		       "(%#x != %#x)\n", temp, data0);
500 		goto failed;
501 	} else {
502 		printk("pci: passed valid tag invalid pte reload test\n");
503 	}
504 
505 	/* Sixth, verify machine checks are working.  Test invalid
506 	   pte under the same valid tag as we used above.  */
507 
508 	mcheck_expected(0) = 1;
509 	mcheck_taken(0) = 0;
510 	mb();
511 	temp = cia_readl(bus_addr + 6*PAGE_SIZE);
512 	mb();
513 	mcheck_expected(0) = 0;
514 	mb();
515 	printk("pci: %s pci machine check test\n",
516 	       mcheck_taken(0) ? "passed" : "failed");
517 
518 	/* Clean up after the tests.  */
519 	arena->ptes[4] = 0;
520 	arena->ptes[5] = 0;
521 
522 	if (use_tbia_try2) {
523 		alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
524 
525 		/* Tags 0-3 must be disabled if we use this workaraund. */
526 		wmb();
527 		*(vip)CIA_IOC_TB_TAGn(0) = 2;
528 		*(vip)CIA_IOC_TB_TAGn(1) = 2;
529 		*(vip)CIA_IOC_TB_TAGn(2) = 2;
530 		*(vip)CIA_IOC_TB_TAGn(3) = 2;
531 
532 		printk("pci: tbia workaround enabled\n");
533 	}
534 	alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
535 
536 exit:
537 	/* unmap the bus addr */
538 	cia_iounmap(bus_addr);
539 
540 	/* Restore normal PCI operation.  */
541 	mb();
542 	*(vip)CIA_IOC_CIA_CTRL = ctrl;
543 	mb();
544 	*(vip)CIA_IOC_CIA_CTRL;
545 	mb();
546 	return;
547 
548 failed:
549 	printk("pci: disabling sg translation window\n");
550 	*(vip)CIA_IOC_PCI_W0_BASE = 0;
551 	*(vip)CIA_IOC_PCI_W1_BASE = 0;
552 	pci_isa_hose->sg_isa = NULL;
553 	alpha_mv.mv_pci_tbi = NULL;
554 	goto exit;
555 }
556 
557 #if defined(ALPHA_RESTORE_SRM_SETUP)
558 /* Save CIA configuration data as the console had it set up.  */
559 struct
560 {
561     unsigned int hae_mem;
562     unsigned int hae_io;
563     unsigned int pci_dac_offset;
564     unsigned int err_mask;
565     unsigned int cia_ctrl;
566     unsigned int cia_cnfg;
567     struct {
568 	unsigned int w_base;
569 	unsigned int w_mask;
570 	unsigned int t_base;
571     } window[4];
572 } saved_config __attribute((common));
573 
574 void
cia_save_srm_settings(int is_pyxis)575 cia_save_srm_settings(int is_pyxis)
576 {
577 	int i;
578 
579 	/* Save some important registers. */
580 	saved_config.err_mask       = *(vip)CIA_IOC_ERR_MASK;
581 	saved_config.cia_ctrl       = *(vip)CIA_IOC_CIA_CTRL;
582 	saved_config.hae_mem        = *(vip)CIA_IOC_HAE_MEM;
583 	saved_config.hae_io         = *(vip)CIA_IOC_HAE_IO;
584 	saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
585 
586 	if (is_pyxis)
587 	    saved_config.cia_cnfg   = *(vip)CIA_IOC_CIA_CNFG;
588 	else
589 	    saved_config.cia_cnfg   = 0;
590 
591 	/* Save DMA windows configuration. */
592 	for (i = 0; i < 4; i++) {
593 	    saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
594 	    saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
595 	    saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
596 	}
597 	mb();
598 }
599 
600 void
cia_restore_srm_settings(void)601 cia_restore_srm_settings(void)
602 {
603 	int i;
604 
605 	for (i = 0; i < 4; i++) {
606 	    *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
607 	    *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
608 	    *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
609 	}
610 
611 	*(vip)CIA_IOC_HAE_MEM   = saved_config.hae_mem;
612 	*(vip)CIA_IOC_HAE_IO    = saved_config.hae_io;
613 	*(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
614 	*(vip)CIA_IOC_ERR_MASK  = saved_config.err_mask;
615 	*(vip)CIA_IOC_CIA_CTRL  = saved_config.cia_ctrl;
616 
617 	if (saved_config.cia_cnfg) /* Must be pyxis. */
618 	    *(vip)CIA_IOC_CIA_CNFG  = saved_config.cia_cnfg;
619 
620 	mb();
621 }
622 #else /* ALPHA_RESTORE_SRM_SETUP */
623 #define cia_save_srm_settings(p)	do {} while (0)
624 #define cia_restore_srm_settings()	do {} while (0)
625 #endif /* ALPHA_RESTORE_SRM_SETUP */
626 
627 
628 static void __init
do_init_arch(int is_pyxis)629 do_init_arch(int is_pyxis)
630 {
631 	struct pci_controller *hose;
632 	int temp, cia_rev, tbia_window;
633 
634 	cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
635 	printk("pci: cia revision %d%s\n",
636 	       cia_rev, is_pyxis ? " (pyxis)" : "");
637 
638 	if (alpha_using_srm)
639 		cia_save_srm_settings(is_pyxis);
640 
641 	/* Set up error reporting.  */
642 	temp = *(vip)CIA_IOC_ERR_MASK;
643 	temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
644 		  | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
645 	*(vip)CIA_IOC_ERR_MASK = temp;
646 
647 	/* Clear all currently pending errors.  */
648 	temp = *(vip)CIA_IOC_CIA_ERR;
649 	*(vip)CIA_IOC_CIA_ERR = temp;
650 
651 	/* Turn on mchecks.  */
652 	temp = *(vip)CIA_IOC_CIA_CTRL;
653 	temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
654 	*(vip)CIA_IOC_CIA_CTRL = temp;
655 
656 	/* Clear the CFG register, which gets used for PCI config space
657 	   accesses.  That is the way we want to use it, and we do not
658 	   want to depend on what ARC or SRM might have left behind.  */
659 	*(vip)CIA_IOC_CFG = 0;
660 
661 	/* Zero the HAEs.  */
662 	*(vip)CIA_IOC_HAE_MEM = 0;
663 	*(vip)CIA_IOC_HAE_IO = 0;
664 
665 	/* For PYXIS, we always use BWX bus and i/o accesses.  To that end,
666 	   make sure they're enabled on the controller.  At the same time,
667 	   enable the monster window.  */
668 	if (is_pyxis) {
669 		temp = *(vip)CIA_IOC_CIA_CNFG;
670 		temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
671 		*(vip)CIA_IOC_CIA_CNFG = temp;
672 	}
673 
674 	/* Synchronize with all previous changes.  */
675 	mb();
676 	*(vip)CIA_IOC_CIA_REV;
677 
678 	/*
679 	 * Create our single hose.
680 	 */
681 
682 	pci_isa_hose = hose = alloc_pci_controller();
683 	hose->io_space = &ioport_resource;
684 	hose->mem_space = &iomem_resource;
685 	hose->index = 0;
686 
687 	if (! is_pyxis) {
688 		struct resource *hae_mem = alloc_resource();
689 		hose->mem_space = hae_mem;
690 
691 		hae_mem->start = 0;
692 		hae_mem->end = CIA_MEM_R1_MASK;
693 		hae_mem->name = pci_hae0_name;
694 		hae_mem->flags = IORESOURCE_MEM;
695 
696 		if (request_resource(&iomem_resource, hae_mem) < 0)
697 			printk(KERN_ERR "Failed to request HAE_MEM\n");
698 
699 		hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
700 		hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
701 		hose->sparse_io_base = CIA_IO - IDENT_ADDR;
702 		hose->dense_io_base = 0;
703 	} else {
704 		hose->sparse_mem_base = 0;
705 		hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
706 		hose->sparse_io_base = 0;
707 		hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
708 	}
709 
710 	/*
711 	 * Set up the PCI to main memory translation windows.
712 	 *
713 	 * Window 0 is S/G 8MB at 8MB (for isa)
714 	 * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
715 	 * Window 2 is direct access 2GB at 2GB
716 	 * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
717 	 *
718 	 * ??? NetBSD hints that page tables must be aligned to 32K,
719 	 * possibly due to a hardware bug.  This is over-aligned
720 	 * from the 8K alignment one would expect for an 8MB window.
721 	 * No description of what revisions affected.
722 	 */
723 
724 	hose->sg_pci = NULL;
725 	hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
726 
727 	__direct_map_base = 0x80000000;
728 	__direct_map_size = 0x80000000;
729 
730 	*(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
731 	*(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
732 	*(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
733 
734 	*(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
735 	*(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
736 	*(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
737 
738 	/* On PYXIS we have the monster window, selected by bit 40, so
739 	   there is no need for window3 to be enabled.
740 
741 	   On CIA, we don't have true arbitrary addressing -- bits <39:32>
742 	   are compared against W_DAC.  We can, however, directly map 4GB,
743 	   which is better than before.  However, due to assumptions made
744 	   elsewhere, we should not claim that we support DAC unless that
745 	   4GB covers all of physical memory.
746 
747 	   On CIA rev 1, apparently W1 and W2 can't be used for SG.
748 	   At least, there are reports that it doesn't work for Alcor.
749 	   In that case, we have no choice but to use W3 for the TBIA
750 	   workaround, which means we can't use DAC at all. */
751 
752 	tbia_window = 1;
753 	if (is_pyxis) {
754 		*(vip)CIA_IOC_PCI_W3_BASE = 0;
755 	} else if (cia_rev == 1) {
756 		*(vip)CIA_IOC_PCI_W1_BASE = 0;
757 		tbia_window = 3;
758 	} else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
759 		*(vip)CIA_IOC_PCI_W3_BASE = 0;
760 	} else {
761 		*(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
762 		*(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
763 		*(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
764 
765 		alpha_mv.pci_dac_offset = 0x200000000UL;
766 		*(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
767 	}
768 
769 	/* Prepare workaround for apparently broken tbia. */
770 	cia_prepare_tbia_workaround(tbia_window);
771 }
772 
773 void __init
cia_init_arch(void)774 cia_init_arch(void)
775 {
776 	do_init_arch(0);
777 }
778 
779 void __init
pyxis_init_arch(void)780 pyxis_init_arch(void)
781 {
782 	/* On pyxis machines we can precisely calculate the
783 	   CPU clock frequency using pyxis real time counter.
784 	   It's especially useful for SX164 with broken RTC.
785 
786 	   Both CPU and chipset are driven by the single 16.666M
787 	   or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
788 	   66.66 MHz. -ink */
789 
790 	unsigned int cc0, cc1;
791 	unsigned long pyxis_cc;
792 
793 	__asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
794 	pyxis_cc = *(vulp)PYXIS_RT_COUNT;
795 	do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
796 	__asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
797 	cc1 -= cc0;
798 	hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
799 	hwrpb_update_checksum(hwrpb);
800 
801 	do_init_arch(1);
802 }
803 
804 void
cia_kill_arch(int mode)805 cia_kill_arch(int mode)
806 {
807 	if (alpha_using_srm)
808 		cia_restore_srm_settings();
809 }
810 
811 void __init
cia_init_pci(void)812 cia_init_pci(void)
813 {
814 	/* Must delay this from init_arch, as we need machine checks.  */
815 	verify_tb_operation();
816 	common_init_pci();
817 }
818 
819 static inline void
cia_pci_clr_err(void)820 cia_pci_clr_err(void)
821 {
822 	int jd;
823 
824 	jd = *(vip)CIA_IOC_CIA_ERR;
825 	*(vip)CIA_IOC_CIA_ERR = jd;
826 	mb();
827 	*(vip)CIA_IOC_CIA_ERR;		/* re-read to force write.  */
828 }
829 
830 #ifdef CONFIG_VERBOSE_MCHECK
831 static void
cia_decode_pci_error(struct el_CIA_sysdata_mcheck * cia,const char * msg)832 cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
833 {
834 	static const char * const pci_cmd_desc[16] = {
835 		"Interrupt Acknowledge", "Special Cycle", "I/O Read",
836 		"I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
837 		"Memory Write", "Reserved 0x8", "Reserved 0x9",
838 		"Configuration Read", "Configuration Write",
839 		"Memory Read Multiple", "Dual Address Cycle",
840 		"Memory Read Line", "Memory Write and Invalidate"
841 	};
842 
843 	if (cia->cia_err & (CIA_ERR_COR_ERR
844 			    | CIA_ERR_UN_COR_ERR
845 			    | CIA_ERR_MEM_NEM
846 			    | CIA_ERR_PA_PTE_INV)) {
847 		static const char * const window_desc[6] = {
848 			"No window active", "Window 0 hit", "Window 1 hit",
849 			"Window 2 hit", "Window 3 hit", "Monster window hit"
850 		};
851 
852 		const char *window;
853 		const char *cmd;
854 		unsigned long addr, tmp;
855 		int lock, dac;
856 
857 		cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
858 		lock = (cia->pci_err0 >> 4) & 1;
859 		dac = (cia->pci_err0 >> 5) & 1;
860 
861 		tmp = (cia->pci_err0 >> 8) & 0x1F;
862 		tmp = ffs(tmp);
863 		window = window_desc[tmp];
864 
865 		addr = cia->pci_err1;
866 		if (dac) {
867 			tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
868 			addr |= tmp << 32;
869 		}
870 
871 		printk(KERN_CRIT "CIA machine check: %s\n", msg);
872 		printk(KERN_CRIT "  DMA command: %s\n", cmd);
873 		printk(KERN_CRIT "  PCI address: %#010lx\n", addr);
874 		printk(KERN_CRIT "  %s, Lock: %d, DAC: %d\n",
875 		       window, lock, dac);
876 	} else if (cia->cia_err & (CIA_ERR_PERR
877 				   | CIA_ERR_PCI_ADDR_PE
878 				   | CIA_ERR_RCVD_MAS_ABT
879 				   | CIA_ERR_RCVD_TAR_ABT
880 				   | CIA_ERR_IOA_TIMEOUT)) {
881 		static const char * const master_st_desc[16] = {
882 			"Idle", "Drive bus", "Address step cycle",
883 			"Address cycle", "Data cycle", "Last read data cycle",
884 			"Last write data cycle", "Read stop cycle",
885 			"Write stop cycle", "Read turnaround cycle",
886 			"Write turnaround cycle", "Reserved 0xB",
887 			"Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
888 			"Unknown state"
889 		};
890 		static const char * const target_st_desc[16] = {
891 			"Idle", "Busy", "Read data cycle", "Write data cycle",
892 			"Read stop cycle", "Write stop cycle",
893 			"Read turnaround cycle", "Write turnaround cycle",
894 			"Read wait cycle", "Write wait cycle",
895 			"Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
896 			"Reserved 0xD", "Reserved 0xE", "Unknown state"
897 		};
898 
899 		const char *cmd;
900 		const char *master, *target;
901 		unsigned long addr, tmp;
902 		int dac;
903 
904 		master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
905 		target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
906 		cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
907 		dac = (cia->pci_err0 >> 28) & 1;
908 
909 		addr = cia->pci_err2;
910 		if (dac) {
911 			tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
912 			addr |= tmp << 32;
913 		}
914 
915 		printk(KERN_CRIT "CIA machine check: %s\n", msg);
916 		printk(KERN_CRIT "  PCI command: %s\n", cmd);
917 		printk(KERN_CRIT "  Master state: %s, Target state: %s\n",
918 		       master, target);
919 		printk(KERN_CRIT "  PCI address: %#010lx, DAC: %d\n",
920 		       addr, dac);
921 	} else {
922 		printk(KERN_CRIT "CIA machine check: %s\n", msg);
923 		printk(KERN_CRIT "  Unknown PCI error\n");
924 		printk(KERN_CRIT "  PCI_ERR0 = %#08lx", cia->pci_err0);
925 		printk(KERN_CRIT "  PCI_ERR1 = %#08lx", cia->pci_err1);
926 		printk(KERN_CRIT "  PCI_ERR2 = %#08lx", cia->pci_err2);
927 	}
928 }
929 
930 static void
cia_decode_mem_error(struct el_CIA_sysdata_mcheck * cia,const char * msg)931 cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
932 {
933 	unsigned long mem_port_addr;
934 	unsigned long mem_port_mask;
935 	const char *mem_port_cmd;
936 	const char *seq_state;
937 	const char *set_select;
938 	unsigned long tmp;
939 
940 	/* If this is a DMA command, also decode the PCI bits.  */
941 	if ((cia->mem_err1 >> 20) & 1)
942 		cia_decode_pci_error(cia, msg);
943 	else
944 		printk(KERN_CRIT "CIA machine check: %s\n", msg);
945 
946 	mem_port_addr = cia->mem_err0 & 0xfffffff0;
947 	mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
948 
949 	mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
950 
951 	tmp = (cia->mem_err1 >> 8) & 0xF;
952 	tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
953 	if ((tmp & 0x1E) == 0x06)
954 		mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
955 	else if ((tmp & 0x1C) == 0x08)
956 		mem_port_cmd = "READ MISS or READ MISS MODIFY";
957 	else if (tmp == 0x1C)
958 		mem_port_cmd = "BC VICTIM";
959 	else if ((tmp & 0x1E) == 0x0E)
960 		mem_port_cmd = "READ MISS MODIFY";
961 	else if ((tmp & 0x1C) == 0x18)
962 		mem_port_cmd = "DMA READ or DMA READ MODIFY";
963 	else if ((tmp & 0x1E) == 0x12)
964 		mem_port_cmd = "DMA WRITE";
965 	else
966 		mem_port_cmd = "Unknown";
967 
968 	tmp = (cia->mem_err1 >> 16) & 0xF;
969 	switch (tmp) {
970 	case 0x0:
971 		seq_state = "Idle";
972 		break;
973 	case 0x1:
974 		seq_state = "DMA READ or DMA WRITE";
975 		break;
976 	case 0x2: case 0x3:
977 		seq_state = "READ MISS (or READ MISS MODIFY) with victim";
978 		break;
979 	case 0x4: case 0x5: case 0x6:
980 		seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
981 		break;
982 	case 0x8: case 0x9: case 0xB:
983 		seq_state = "Refresh";
984 		break;
985 	case 0xC:
986 		seq_state = "Idle, waiting for DMA pending read";
987 		break;
988 	case 0xE: case 0xF:
989 		seq_state = "Idle, ras precharge";
990 		break;
991 	default:
992 		seq_state = "Unknown";
993 		break;
994 	}
995 
996 	tmp = (cia->mem_err1 >> 24) & 0x1F;
997 	switch (tmp) {
998 	case 0x00: set_select = "Set 0 selected"; break;
999 	case 0x01: set_select = "Set 1 selected"; break;
1000 	case 0x02: set_select = "Set 2 selected"; break;
1001 	case 0x03: set_select = "Set 3 selected"; break;
1002 	case 0x04: set_select = "Set 4 selected"; break;
1003 	case 0x05: set_select = "Set 5 selected"; break;
1004 	case 0x06: set_select = "Set 6 selected"; break;
1005 	case 0x07: set_select = "Set 7 selected"; break;
1006 	case 0x08: set_select = "Set 8 selected"; break;
1007 	case 0x09: set_select = "Set 9 selected"; break;
1008 	case 0x0A: set_select = "Set A selected"; break;
1009 	case 0x0B: set_select = "Set B selected"; break;
1010 	case 0x0C: set_select = "Set C selected"; break;
1011 	case 0x0D: set_select = "Set D selected"; break;
1012 	case 0x0E: set_select = "Set E selected"; break;
1013 	case 0x0F: set_select = "Set F selected"; break;
1014 	case 0x10: set_select = "No set selected"; break;
1015 	case 0x1F: set_select = "Refresh cycle"; break;
1016 	default:   set_select = "Unknown"; break;
1017 	}
1018 
1019 	printk(KERN_CRIT "  Memory port command: %s\n", mem_port_cmd);
1020 	printk(KERN_CRIT "  Memory port address: %#010lx, mask: %#lx\n",
1021 	       mem_port_addr, mem_port_mask);
1022 	printk(KERN_CRIT "  Memory sequencer state: %s\n", seq_state);
1023 	printk(KERN_CRIT "  Memory set: %s\n", set_select);
1024 }
1025 
1026 static void
cia_decode_ecc_error(struct el_CIA_sysdata_mcheck * cia,const char * msg)1027 cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
1028 {
1029 	long syn;
1030 	long i;
1031 	const char *fmt;
1032 
1033 	cia_decode_mem_error(cia, msg);
1034 
1035 	syn = cia->cia_syn & 0xff;
1036 	if (syn == (syn & -syn)) {
1037 		fmt = KERN_CRIT "  ECC syndrome %#x -- check bit %d\n";
1038 		i = ffs(syn) - 1;
1039 	} else {
1040 		static unsigned char const data_bit[64] = {
1041 			0xCE, 0xCB, 0xD3, 0xD5,
1042 			0xD6, 0xD9, 0xDA, 0xDC,
1043 			0x23, 0x25, 0x26, 0x29,
1044 			0x2A, 0x2C, 0x31, 0x34,
1045 			0x0E, 0x0B, 0x13, 0x15,
1046 			0x16, 0x19, 0x1A, 0x1C,
1047 			0xE3, 0xE5, 0xE6, 0xE9,
1048 			0xEA, 0xEC, 0xF1, 0xF4,
1049 			0x4F, 0x4A, 0x52, 0x54,
1050 			0x57, 0x58, 0x5B, 0x5D,
1051 			0xA2, 0xA4, 0xA7, 0xA8,
1052 			0xAB, 0xAD, 0xB0, 0xB5,
1053 			0x8F, 0x8A, 0x92, 0x94,
1054 			0x97, 0x98, 0x9B, 0x9D,
1055 			0x62, 0x64, 0x67, 0x68,
1056 			0x6B, 0x6D, 0x70, 0x75
1057 		};
1058 
1059 		for (i = 0; i < 64; ++i)
1060 			if (data_bit[i] == syn)
1061 				break;
1062 
1063 		if (i < 64)
1064 			fmt = KERN_CRIT "  ECC syndrome %#x -- data bit %d\n";
1065 		else
1066 			fmt = KERN_CRIT "  ECC syndrome %#x -- unknown bit\n";
1067 	}
1068 
1069 	printk (fmt, syn, i);
1070 }
1071 
1072 static void
cia_decode_parity_error(struct el_CIA_sysdata_mcheck * cia)1073 cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
1074 {
1075 	static const char * const cmd_desc[16] = {
1076 		"NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
1077 		"SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
1078 		"READ MISS0", "READ MISS1", "READ MISS MOD0",
1079 		"READ MISS MOD1", "BCACHE VICTIM", "Spare",
1080 		"READ MISS MOD STC0", "READ MISS MOD STC1"
1081 	};
1082 
1083 	unsigned long addr;
1084 	unsigned long mask;
1085 	const char *cmd;
1086 	int par;
1087 
1088 	addr = cia->cpu_err0 & 0xfffffff0;
1089 	addr |= (cia->cpu_err1 & 0x83UL) << 32;
1090 	cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
1091 	mask = (cia->cpu_err1 >> 12) & 0xF;
1092 	par = (cia->cpu_err1 >> 21) & 1;
1093 
1094 	printk(KERN_CRIT "CIA machine check: System bus parity error\n");
1095 	printk(KERN_CRIT "  Command: %s, Parity bit: %d\n", cmd, par);
1096 	printk(KERN_CRIT "  Address: %#010lx, Mask: %#lx\n", addr, mask);
1097 }
1098 #endif /* CONFIG_VERBOSE_MCHECK */
1099 
1100 
1101 static int
cia_decode_mchk(unsigned long la_ptr)1102 cia_decode_mchk(unsigned long la_ptr)
1103 {
1104 	struct el_common *com;
1105 	struct el_CIA_sysdata_mcheck *cia;
1106 
1107 	com = (void *)la_ptr;
1108 	cia = (void *)(la_ptr + com->sys_offset);
1109 
1110 	if ((cia->cia_err & CIA_ERR_VALID) == 0)
1111 		return 0;
1112 
1113 #ifdef CONFIG_VERBOSE_MCHECK
1114 	if (!alpha_verbose_mcheck)
1115 		return 1;
1116 
1117 	switch (ffs(cia->cia_err & 0xfff) - 1) {
1118 	case 0: /* CIA_ERR_COR_ERR */
1119 		cia_decode_ecc_error(cia, "Corrected ECC error");
1120 		break;
1121 	case 1: /* CIA_ERR_UN_COR_ERR */
1122 		cia_decode_ecc_error(cia, "Uncorrected ECC error");
1123 		break;
1124 	case 2: /* CIA_ERR_CPU_PE */
1125 		cia_decode_parity_error(cia);
1126 		break;
1127 	case 3: /* CIA_ERR_MEM_NEM */
1128 		cia_decode_mem_error(cia, "Access to nonexistent memory");
1129 		break;
1130 	case 4: /* CIA_ERR_PCI_SERR */
1131 		cia_decode_pci_error(cia, "PCI bus system error");
1132 		break;
1133 	case 5: /* CIA_ERR_PERR */
1134 		cia_decode_pci_error(cia, "PCI data parity error");
1135 		break;
1136 	case 6: /* CIA_ERR_PCI_ADDR_PE */
1137 		cia_decode_pci_error(cia, "PCI address parity error");
1138 		break;
1139 	case 7: /* CIA_ERR_RCVD_MAS_ABT */
1140 		cia_decode_pci_error(cia, "PCI master abort");
1141 		break;
1142 	case 8: /* CIA_ERR_RCVD_TAR_ABT */
1143 		cia_decode_pci_error(cia, "PCI target abort");
1144 		break;
1145 	case 9: /* CIA_ERR_PA_PTE_INV */
1146 		cia_decode_pci_error(cia, "PCI invalid PTE");
1147 		break;
1148 	case 10: /* CIA_ERR_FROM_WRT_ERR */
1149 		cia_decode_mem_error(cia, "Write to flash ROM attempted");
1150 		break;
1151 	case 11: /* CIA_ERR_IOA_TIMEOUT */
1152 		cia_decode_pci_error(cia, "I/O timeout");
1153 		break;
1154 	}
1155 
1156 	if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
1157 		printk(KERN_CRIT "CIA lost machine check: "
1158 		       "Correctable ECC error\n");
1159 	if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
1160 		printk(KERN_CRIT "CIA lost machine check: "
1161 		       "Uncorrectable ECC error\n");
1162 	if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
1163 		printk(KERN_CRIT "CIA lost machine check: "
1164 		       "System bus parity error\n");
1165 	if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
1166 		printk(KERN_CRIT "CIA lost machine check: "
1167 		       "Access to nonexistent memory\n");
1168 	if (cia->cia_err & CIA_ERR_LOST_PERR)
1169 		printk(KERN_CRIT "CIA lost machine check: "
1170 		       "PCI data parity error\n");
1171 	if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
1172 		printk(KERN_CRIT "CIA lost machine check: "
1173 		       "PCI address parity error\n");
1174 	if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
1175 		printk(KERN_CRIT "CIA lost machine check: "
1176 		       "PCI master abort\n");
1177 	if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
1178 		printk(KERN_CRIT "CIA lost machine check: "
1179 		       "PCI target abort\n");
1180 	if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
1181 		printk(KERN_CRIT "CIA lost machine check: "
1182 		       "PCI invalid PTE\n");
1183 	if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
1184 		printk(KERN_CRIT "CIA lost machine check: "
1185 		       "Write to flash ROM attempted\n");
1186 	if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
1187 		printk(KERN_CRIT "CIA lost machine check: "
1188 		       "I/O timeout\n");
1189 #endif /* CONFIG_VERBOSE_MCHECK */
1190 
1191 	return 1;
1192 }
1193 
1194 void
cia_machine_check(unsigned long vector,unsigned long la_ptr)1195 cia_machine_check(unsigned long vector, unsigned long la_ptr)
1196 {
1197 	int expected;
1198 
1199 	/* Clear the error before any reporting.  */
1200 	mb();
1201 	mb();  /* magic */
1202 	draina();
1203 	cia_pci_clr_err();
1204 	wrmces(rdmces());	/* reset machine check pending flag.  */
1205 	mb();
1206 
1207 	expected = mcheck_expected(0);
1208 	if (!expected && vector == 0x660)
1209 		expected = cia_decode_mchk(la_ptr);
1210 	process_mcheck_info(vector, la_ptr, "CIA", expected);
1211 }
1212