1 /* $Id: traps.c,v 1.82 2001/11/18 00:12:56 davem Exp $
2  * arch/sparc64/kernel/traps.c
3  *
4  * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
5  * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
6  */
7 
8 /*
9  * I like traps on v9, :))))
10  */
11 
12 #include <linux/config.h>
13 #include <linux/sched.h>  /* for jiffies */
14 #include <linux/kernel.h>
15 #include <linux/signal.h>
16 #include <linux/smp.h>
17 #include <linux/smp_lock.h>
18 #include <linux/mm.h>
19 
20 #include <asm/delay.h>
21 #include <asm/system.h>
22 #include <asm/ptrace.h>
23 #include <asm/oplib.h>
24 #include <asm/page.h>
25 #include <asm/pgtable.h>
26 #include <asm/unistd.h>
27 #include <asm/uaccess.h>
28 #include <asm/fpumacro.h>
29 #include <asm/lsu.h>
30 #include <asm/dcu.h>
31 #include <asm/estate.h>
32 #include <asm/chafsr.h>
33 #include <asm/sfafsr.h>
34 #include <asm/psrcompat.h>
35 #include <asm/processor.h>
36 #ifdef CONFIG_KMOD
37 #include <linux/kmod.h>
38 #endif
39 
40 /* When an irrecoverable trap occurs at tl > 0, the trap entry
41  * code logs the trap state registers at every level in the trap
42  * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
43  * is as follows:
44  */
45 struct tl1_traplog {
46 	struct {
47 		unsigned long tstate;
48 		unsigned long tpc;
49 		unsigned long tnpc;
50 		unsigned long tt;
51 	} trapstack[4];
52 	unsigned long tl;
53 };
54 
dump_tl1_traplog(struct tl1_traplog * p)55 static void dump_tl1_traplog(struct tl1_traplog *p)
56 {
57 	int i;
58 
59 	printk("TRAPLOG: Error at trap level 0x%lx, dumping track stack.\n",
60 	       p->tl);
61 	for (i = 0; i < 4; i++) {
62 		printk(KERN_CRIT
63 		       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
64 		       "TNPC[%016lx] TT[%lx]\n",
65 		       i + 1,
66 		       p->trapstack[i].tstate, p->trapstack[i].tpc,
67 		       p->trapstack[i].tnpc, p->trapstack[i].tt);
68 	}
69 }
70 
bad_trap(struct pt_regs * regs,long lvl)71 void bad_trap (struct pt_regs *regs, long lvl)
72 {
73 	char buffer[32];
74 	siginfo_t info;
75 
76 	if (lvl < 0x100) {
77 		sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
78 		die_if_kernel(buffer, regs);
79 	}
80 
81 	lvl -= 0x100;
82 	if (regs->tstate & TSTATE_PRIV) {
83 		sprintf(buffer, "Kernel bad sw trap %lx", lvl);
84 		die_if_kernel (buffer, regs);
85 	}
86 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
87 		regs->tpc &= 0xffffffff;
88 		regs->tnpc &= 0xffffffff;
89 	}
90 	info.si_signo = SIGILL;
91 	info.si_errno = 0;
92 	info.si_code = ILL_ILLTRP;
93 	info.si_addr = (void *)regs->tpc;
94 	info.si_trapno = lvl;
95 	force_sig_info(SIGILL, &info, current);
96 }
97 
bad_trap_tl1(struct pt_regs * regs,long lvl)98 void bad_trap_tl1 (struct pt_regs *regs, long lvl)
99 {
100 	char buffer[32];
101 
102 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
103 
104 	sprintf (buffer, "Bad trap %lx at tl>0", lvl);
105 	die_if_kernel (buffer, regs);
106 }
107 
108 #ifdef CONFIG_DEBUG_BUGVERBOSE
do_BUG(const char * file,int line)109 void do_BUG(const char *file, int line)
110 {
111 	bust_spinlocks(1);
112 	printk("kernel BUG at %s:%d!\n", file, line);
113 }
114 #endif
115 
spitfire_insn_access_exception(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)116 void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
117 {
118 	siginfo_t info;
119 
120 	if (regs->tstate & TSTATE_PRIV) {
121 		printk("spitfire_insn_access_exception: SFSR[%016lx] "
122 		       "SFAR[%016lx], going.\n", sfsr, sfar);
123 		die_if_kernel("Iax", regs);
124 	}
125 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
126 		regs->tpc &= 0xffffffff;
127 		regs->tnpc &= 0xffffffff;
128 	}
129 	info.si_signo = SIGSEGV;
130 	info.si_errno = 0;
131 	info.si_code = SEGV_MAPERR;
132 	info.si_addr = (void *)regs->tpc;
133 	info.si_trapno = 0;
134 	force_sig_info(SIGSEGV, &info, current);
135 }
136 
spitfire_insn_access_exception_tl1(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)137 void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
138 {
139 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
140 	spitfire_insn_access_exception(regs, sfsr, sfar);
141 }
142 
spitfire_data_access_exception(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)143 void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
144 {
145 	siginfo_t info;
146 
147 	if (regs->tstate & TSTATE_PRIV) {
148 		/* Test if this comes from uaccess places. */
149 		unsigned long fixup, g2;
150 
151 		g2 = regs->u_regs[UREG_G2];
152 		if ((fixup = search_exception_table (regs->tpc, &g2))) {
153 			/* Ouch, somebody is trying ugly VM hole tricks on us... */
154 #ifdef DEBUG_EXCEPTIONS
155 			printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
156 			printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
157 			       "g2<%016lx>\n", regs->tpc, fixup, g2);
158 #endif
159 			regs->tpc = fixup;
160 			regs->tnpc = regs->tpc + 4;
161 			regs->u_regs[UREG_G2] = g2;
162 			return;
163 		}
164 		/* Shit... */
165 		printk("spitfire_data_access_exception: SFSR[%016lx] "
166 		       "SFAR[%016lx], going.\n", sfsr, sfar);
167 		die_if_kernel("Dax", regs);
168 	}
169 
170 	info.si_signo = SIGSEGV;
171 	info.si_errno = 0;
172 	info.si_code = SEGV_MAPERR;
173 	info.si_addr = (void *)sfar;
174 	info.si_trapno = 0;
175 	force_sig_info(SIGSEGV, &info, current);
176 }
177 
spitfire_data_access_exception_tl1(struct pt_regs * regs,unsigned long sfsr,unsigned long sfar)178 void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
179 {
180 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
181 	spitfire_data_access_exception(regs, sfsr, sfar);
182 }
183 
184 #ifdef CONFIG_PCI
185 /* This is really pathetic... */
186 extern volatile int pci_poke_in_progress;
187 extern volatile int pci_poke_cpu;
188 extern volatile int pci_poke_faulted;
189 #endif
190 
191 /* When access exceptions happen, we must do this. */
spitfire_clean_and_reenable_l1_caches(void)192 static void spitfire_clean_and_reenable_l1_caches(void)
193 {
194 	unsigned long va;
195 
196 	if (tlb_type != spitfire)
197 		BUG();
198 
199 	/* Clean 'em. */
200 	for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
201 		spitfire_put_icache_tag(va, 0x0);
202 		spitfire_put_dcache_tag(va, 0x0);
203 	}
204 
205 	/* Re-enable in LSU. */
206 	__asm__ __volatile__("flush %%g6\n\t"
207 			     "membar #Sync\n\t"
208 			     "stxa %0, [%%g0] %1\n\t"
209 			     "membar #Sync"
210 			     : /* no outputs */
211 			     : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
212 				    LSU_CONTROL_IM | LSU_CONTROL_DM),
213 			     "i" (ASI_LSU_CONTROL)
214 			     : "memory");
215 }
216 
spitfire_enable_estate_errors(void)217 static void spitfire_enable_estate_errors(void)
218 {
219 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
220 			     "membar	#Sync"
221 			     : /* no outputs */
222 			     : "r" (ESTATE_ERR_ALL),
223 			       "i" (ASI_ESTATE_ERROR_EN));
224 }
225 
226 static char ecc_syndrome_table[] = {
227 	0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
228 	0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
229 	0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
230 	0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
231 	0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
232 	0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
233 	0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
234 	0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
235 	0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
236 	0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
237 	0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
238 	0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
239 	0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
240 	0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
241 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
242 	0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
243 	0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
244 	0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
245 	0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
246 	0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
247 	0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
248 	0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
249 	0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
250 	0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
251 	0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
252 	0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
253 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
254 	0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
255 	0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
256 	0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
257 	0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
258 	0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
259 };
260 
261 static char *syndrome_unknown = "<Unknown>";
262 
spitfire_log_udb_syndrome(unsigned long afar,unsigned long udbh,unsigned long udbl,unsigned long bit)263 static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
264 {
265 	unsigned short scode;
266 	char memmod_str[64], *p;
267 
268 	if (udbl & bit) {
269 		scode = ecc_syndrome_table[udbl & 0xff];
270 		if (prom_getunumber(scode, afar,
271 				    memmod_str, sizeof(memmod_str)) == -1)
272 			p = syndrome_unknown;
273 		else
274 			p = memmod_str;
275 		printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
276 		       "Memory Module \"%s\"\n",
277 		       smp_processor_id(), scode, p);
278 	}
279 
280 	if (udbh & bit) {
281 		scode = ecc_syndrome_table[udbh & 0xff];
282 		if (prom_getunumber(scode, afar,
283 				    memmod_str, sizeof(memmod_str)) == -1)
284 			p = syndrome_unknown;
285 		else
286 			p = memmod_str;
287 		printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
288 		       "Memory Module \"%s\"\n",
289 		       smp_processor_id(), scode, p);
290 	}
291 
292 }
293 
spitfire_cee_log(unsigned long afsr,unsigned long afar,unsigned long udbh,unsigned long udbl,int tl1,struct pt_regs * regs)294 static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
295 {
296 
297 	printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
298 	       "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
299 	       smp_processor_id(), afsr, afar, udbl, udbh, tl1);
300 
301 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
302 
303 	/* The Correctable ECC Error trap does not disable I/D caches.  So
304 	 * we only have to restore the ESTATE Error Enable register.
305 	 */
306 	spitfire_enable_estate_errors();
307 }
308 
spitfire_ue_log(unsigned long afsr,unsigned long afar,unsigned long udbh,unsigned long udbl,unsigned long tt,int tl1,struct pt_regs * regs)309 static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
310 {
311 	siginfo_t info;
312 
313 	printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
314 	       "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
315 	       smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
316 
317 	/* XXX add more human friendly logging of the error status
318 	 * XXX as is implemented for cheetah
319 	 */
320 
321 	spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
322 
323 	if (regs->tstate & TSTATE_PRIV) {
324 		if (tl1)
325 			dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
326 		die_if_kernel("UE", regs);
327 	}
328 
329 	/* XXX need more intelligent processing here, such as is implemented
330 	 * XXX for cheetah errors, in fact if the E-cache still holds the
331 	 * XXX line with bad parity this will loop
332 	 */
333 
334 	spitfire_clean_and_reenable_l1_caches();
335 	spitfire_enable_estate_errors();
336 
337 	if (current->thread.flags & SPARC_FLAG_32BIT) {
338 		regs->tpc &= 0xffffffff;
339 		regs->tnpc &= 0xffffffff;
340 	}
341 	info.si_signo = SIGBUS;
342 	info.si_errno = 0;
343 	info.si_code = BUS_OBJERR;
344 	info.si_addr = (void *)0;
345 	info.si_trapno = 0;
346 	force_sig_info(SIGBUS, &info, current);
347 }
348 
spitfire_access_error(struct pt_regs * regs,unsigned long status_encoded,unsigned long afar)349 void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
350 {
351 	unsigned long afsr, tt, udbh, udbl;
352 	int tl1;
353 
354 	afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
355 	tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
356 	tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
357 	udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
358 	udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
359 
360 #ifdef CONFIG_PCI
361 	if (tt == TRAP_TYPE_DAE &&
362 	    pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
363 		spitfire_clean_and_reenable_l1_caches();
364 		spitfire_enable_estate_errors();
365 
366 		pci_poke_faulted = 1;
367 		regs->tnpc = regs->tpc + 4;
368 		return;
369 	}
370 #endif
371 
372 	if (afsr & SFAFSR_UE)
373 		spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
374 
375 	if (tt == TRAP_TYPE_CEE) {
376 		/* Handle the case where we took a CEE trap, but ACK'd
377 		 * only the UE state in the UDB error registers.
378 		 */
379 		if (afsr & SFAFSR_UE) {
380 			if (udbh & UDBE_CE) {
381 				__asm__ __volatile__(
382 					"stxa	%0, [%1] %2\n\t"
383 					"membar	#Sync"
384 					: /* no outputs */
385 					: "r" (udbh & UDBE_CE),
386 					  "r" (0x0), "i" (ASI_UDB_ERROR_W));
387 			}
388 			if (udbl & UDBE_CE) {
389 				__asm__ __volatile__(
390 					"stxa	%0, [%1] %2\n\t"
391 					"membar	#Sync"
392 					: /* no outputs */
393 					: "r" (udbl & UDBE_CE),
394 					  "r" (0x18), "i" (ASI_UDB_ERROR_W));
395 			}
396 		}
397 
398 		spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
399 	}
400 }
401 
402 /* Cheetah error trap handling. */
403 static unsigned long ecache_flush_physbase;
404 static unsigned long ecache_flush_linesize;
405 static unsigned long ecache_flush_size;
406 
407 /* WARNING: The error trap handlers in assembly know the precise
408  *	    layout of the following structure.
409  *
410  * C-level handlers below use this information to log the error
411  * and then determine how to recover (if possible).
412  */
413 struct cheetah_err_info {
414 /*0x00*/u64 afsr;
415 /*0x08*/u64 afar;
416 
417 	/* D-cache state */
418 /*0x10*/u64 dcache_data[4];	/* The actual data	*/
419 /*0x30*/u64 dcache_index;	/* D-cache index	*/
420 /*0x38*/u64 dcache_tag;		/* D-cache tag/valid	*/
421 /*0x40*/u64 dcache_utag;	/* D-cache microtag	*/
422 /*0x48*/u64 dcache_stag;	/* D-cache snooptag	*/
423 
424 	/* I-cache state */
425 /*0x50*/u64 icache_data[8];	/* The actual insns + predecode	*/
426 /*0x90*/u64 icache_index;	/* I-cache index	*/
427 /*0x98*/u64 icache_tag;		/* I-cache phys tag	*/
428 /*0xa0*/u64 icache_utag;	/* I-cache microtag	*/
429 /*0xa8*/u64 icache_stag;	/* I-cache snooptag	*/
430 /*0xb0*/u64 icache_upper;	/* I-cache upper-tag	*/
431 /*0xb8*/u64 icache_lower;	/* I-cache lower-tag	*/
432 
433 	/* E-cache state */
434 /*0xc0*/u64 ecache_data[4];	/* 32 bytes from staging registers */
435 /*0xe0*/u64 ecache_index;	/* E-cache index	*/
436 /*0xe8*/u64 ecache_tag;		/* E-cache tag/state	*/
437 
438 /*0xf0*/u64 __pad[32 - 30];
439 };
440 #define CHAFSR_INVALID		((u64)-1L)
441 
442 /* This table is ordered in priority of errors and matches the
443  * AFAR overwrite policy as well.
444  */
445 
446 struct afsr_error_table {
447 	unsigned long mask;
448 	const char *name;
449 };
450 
451 static const char CHAFSR_PERR_msg[] =
452 	"System interface protocol error";
453 static const char CHAFSR_IERR_msg[] =
454 	"Internal processor error";
455 static const char CHAFSR_ISAP_msg[] =
456 	"System request parity error on incoming addresss";
457 static const char CHAFSR_UCU_msg[] =
458 	"Uncorrectable E-cache ECC error for ifetch/data";
459 static const char CHAFSR_UCC_msg[] =
460 	"SW Correctable E-cache ECC error for ifetch/data";
461 static const char CHAFSR_UE_msg[] =
462 	"Uncorrectable system bus data ECC error for read";
463 static const char CHAFSR_EDU_msg[] =
464 	"Uncorrectable E-cache ECC error for stmerge/blkld";
465 static const char CHAFSR_EMU_msg[] =
466 	"Uncorrectable system bus MTAG error";
467 static const char CHAFSR_WDU_msg[] =
468 	"Uncorrectable E-cache ECC error for writeback";
469 static const char CHAFSR_CPU_msg[] =
470 	"Uncorrectable ECC error for copyout";
471 static const char CHAFSR_CE_msg[] =
472 	"HW corrected system bus data ECC error for read";
473 static const char CHAFSR_EDC_msg[] =
474 	"HW corrected E-cache ECC error for stmerge/blkld";
475 static const char CHAFSR_EMC_msg[] =
476 	"HW corrected system bus MTAG ECC error";
477 static const char CHAFSR_WDC_msg[] =
478 	"HW corrected E-cache ECC error for writeback";
479 static const char CHAFSR_CPC_msg[] =
480 	"HW corrected ECC error for copyout";
481 static const char CHAFSR_TO_msg[] =
482 	"Unmapped error from system bus";
483 static const char CHAFSR_BERR_msg[] =
484 	"Bus error response from system bus";
485 static const char CHAFSR_IVC_msg[] =
486 	"HW corrected system bus data ECC error for ivec read";
487 static const char CHAFSR_IVU_msg[] =
488 	"Uncorrectable system bus data ECC error for ivec read";
489 static struct afsr_error_table __cheetah_error_table[] = {
490 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
491 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
492 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
493 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
494 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
495 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
496 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
497 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
498 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
499 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
500 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
501 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
502 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
503 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
504 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
505 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
506 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
507 	/* These two do not update the AFAR. */
508 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
509 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
510 	{	0,		NULL			},
511 };
512 static const char CHPAFSR_DTO_msg[] =
513 	"System bus unmapped error for prefetch/storequeue-read";
514 static const char CHPAFSR_DBERR_msg[] =
515 	"System bus error for prefetch/storequeue-read";
516 static const char CHPAFSR_THCE_msg[] =
517 	"Hardware corrected E-cache Tag ECC error";
518 static const char CHPAFSR_TSCE_msg[] =
519 	"SW handled correctable E-cache Tag ECC error";
520 static const char CHPAFSR_TUE_msg[] =
521 	"Uncorrectable E-cache Tag ECC error";
522 static const char CHPAFSR_DUE_msg[] =
523 	"System bus uncorrectable data ECC error due to prefetch/store-fill";
524 static struct afsr_error_table __cheetah_plus_error_table[] = {
525 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
526 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
527 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
528 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
529 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
530 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
531 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
532 	{	CHAFSR_EMU,	CHAFSR_EMU_msg		},
533 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
534 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
535 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
536 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
537 	{	CHAFSR_EMC,	CHAFSR_EMC_msg		},
538 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
539 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
540 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
541 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
542 	{	CHPAFSR_DTO,	CHPAFSR_DTO_msg		},
543 	{	CHPAFSR_DBERR,	CHPAFSR_DBERR_msg	},
544 	{	CHPAFSR_THCE,	CHPAFSR_THCE_msg	},
545 	{	CHPAFSR_TSCE,	CHPAFSR_TSCE_msg	},
546 	{	CHPAFSR_TUE,	CHPAFSR_TUE_msg		},
547 	{	CHPAFSR_DUE,	CHPAFSR_DUE_msg		},
548 	/* These two do not update the AFAR. */
549 	{	CHAFSR_IVC,	CHAFSR_IVC_msg		},
550 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
551 	{	0,		NULL			},
552 };
553 static const char JPAFSR_JETO_msg[] =
554 	"System interface protocol error, hw timeout caused";
555 static const char JPAFSR_SCE_msg[] =
556 	"Parity error on system snoop results";
557 static const char JPAFSR_JEIC_msg[] =
558 	"System interface protocol error, illegal command detected";
559 static const char JPAFSR_JEIT_msg[] =
560 	"System interface protocol error, illegal ADTYPE detected";
561 static const char JPAFSR_OM_msg[] =
562 	"Out of range memory error has occurred";
563 static const char JPAFSR_ETP_msg[] =
564 	"Parity error on L2 cache tag SRAM";
565 static const char JPAFSR_UMS_msg[] =
566 	"Error due to unsupported store";
567 static const char JPAFSR_RUE_msg[] =
568 	"Uncorrectable ECC error from remote cache/memory";
569 static const char JPAFSR_RCE_msg[] =
570 	"Correctable ECC error from remote cache/memory";
571 static const char JPAFSR_BP_msg[] =
572 	"JBUS parity error on returned read data";
573 static const char JPAFSR_WBP_msg[] =
574 	"JBUS parity error on data for writeback or block store";
575 static const char JPAFSR_FRC_msg[] =
576 	"Foreign read to DRAM incurring correctable ECC error";
577 static const char JPAFSR_FRU_msg[] =
578 	"Foreign read to DRAM incurring uncorrectable ECC error";
579 static struct afsr_error_table __jalapeno_error_table[] = {
580 	{	JPAFSR_JETO,	JPAFSR_JETO_msg		},
581 	{	JPAFSR_SCE,	JPAFSR_SCE_msg		},
582 	{	JPAFSR_JEIC,	JPAFSR_JEIC_msg		},
583 	{	JPAFSR_JEIT,	JPAFSR_JEIT_msg		},
584 	{	CHAFSR_PERR,	CHAFSR_PERR_msg		},
585 	{	CHAFSR_IERR,	CHAFSR_IERR_msg		},
586 	{	CHAFSR_ISAP,	CHAFSR_ISAP_msg		},
587 	{	CHAFSR_UCU,	CHAFSR_UCU_msg		},
588 	{	CHAFSR_UCC,	CHAFSR_UCC_msg		},
589 	{	CHAFSR_UE,	CHAFSR_UE_msg		},
590 	{	CHAFSR_EDU,	CHAFSR_EDU_msg		},
591 	{	JPAFSR_OM,	JPAFSR_OM_msg		},
592 	{	CHAFSR_WDU,	CHAFSR_WDU_msg		},
593 	{	CHAFSR_CPU,	CHAFSR_CPU_msg		},
594 	{	CHAFSR_CE,	CHAFSR_CE_msg		},
595 	{	CHAFSR_EDC,	CHAFSR_EDC_msg		},
596 	{	JPAFSR_ETP,	JPAFSR_ETP_msg		},
597 	{	CHAFSR_WDC,	CHAFSR_WDC_msg		},
598 	{	CHAFSR_CPC,	CHAFSR_CPC_msg		},
599 	{	CHAFSR_TO,	CHAFSR_TO_msg		},
600 	{	CHAFSR_BERR,	CHAFSR_BERR_msg		},
601 	{	JPAFSR_UMS,	JPAFSR_UMS_msg		},
602 	{	JPAFSR_RUE,	JPAFSR_RUE_msg		},
603 	{	JPAFSR_RCE,	JPAFSR_RCE_msg		},
604 	{	JPAFSR_BP,	JPAFSR_BP_msg		},
605 	{	JPAFSR_WBP,	JPAFSR_WBP_msg		},
606 	{	JPAFSR_FRC,	JPAFSR_FRC_msg		},
607 	{	JPAFSR_FRU,	JPAFSR_FRU_msg		},
608 	/* These two do not update the AFAR. */
609 	{	CHAFSR_IVU,	CHAFSR_IVU_msg		},
610 	{	0,		NULL			},
611 };
612 static struct afsr_error_table *cheetah_error_table;
613 static unsigned long cheetah_afsr_errors;
614 
615 /* This is allocated at boot time based upon the largest hardware
616  * cpu ID in the system.  We allocate two entries per cpu, one for
617  * TL==0 logging and one for TL >= 1 logging.
618  */
619 struct cheetah_err_info *cheetah_error_log;
620 
cheetah_get_error_log(unsigned long afsr)621 static __inline__ struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
622 {
623 	struct cheetah_err_info *p;
624 	int cpu = smp_processor_id();
625 
626 	if (!cheetah_error_log)
627 		return NULL;
628 
629 	p = cheetah_error_log + (cpu * 2);
630 	if ((afsr & CHAFSR_TL1) != 0UL)
631 		p++;
632 
633 	return p;
634 }
635 
636 extern unsigned int tl0_icpe[], tl1_icpe[];
637 extern unsigned int tl0_dcpe[], tl1_dcpe[];
638 extern unsigned int tl0_fecc[], tl1_fecc[];
639 extern unsigned int tl0_cee[], tl1_cee[];
640 extern unsigned int tl0_iae[], tl1_iae[];
641 extern unsigned int tl0_dae[], tl1_dae[];
642 extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
643 extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
644 extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
645 extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
646 extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
647 
cheetah_ecache_flush_init(void)648 void cheetah_ecache_flush_init(void)
649 {
650 	unsigned long largest_size, smallest_linesize, order, ver;
651 	char type[16];
652 	int node, highest_cpu, i;
653 
654 	/* Scan all cpu device tree nodes, note two values:
655 	 * 1) largest E-cache size
656 	 * 2) smallest E-cache line size
657 	 */
658 	largest_size = 0UL;
659 	smallest_linesize = ~0UL;
660 	node = prom_getchild(prom_root_node);
661 	while ((node = prom_getsibling(node)) != 0) {
662 		prom_getstring(node, "device_type", type, sizeof(type));
663 		if (!strcmp(type, "cpu")) {
664 			unsigned long val;
665 
666 			val = prom_getintdefault(node, "ecache-size",
667 						 (2 * 1024 * 1024));
668 			if (val > largest_size)
669 				largest_size = val;
670 			val = prom_getintdefault(node, "ecache-line-size", 64);
671 			if (val < smallest_linesize)
672 				smallest_linesize = val;
673 		}
674 	}
675 	if (largest_size == 0UL || smallest_linesize == ~0UL) {
676 		prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
677 			    "parameters.\n");
678 		prom_halt();
679 	}
680 
681 	ecache_flush_size = (2 * largest_size);
682 	ecache_flush_linesize = smallest_linesize;
683 
684 	/* Discover a physically contiguous chunk of physical
685 	 * memory in 'sp_banks' of size ecache_flush_size calculated
686 	 * above.  Store the physical base of this area at
687 	 * ecache_flush_physbase.
688 	 */
689 	for (node = 0; ; node++) {
690 		if (sp_banks[node].num_bytes == 0)
691 			break;
692 		if (sp_banks[node].num_bytes >= ecache_flush_size) {
693 			ecache_flush_physbase = sp_banks[node].base_addr;
694 			break;
695 		}
696 	}
697 
698 	/* Note: Zero would be a valid value of ecache_flush_physbase so
699 	 * don't use that as the success test. :-)
700 	 */
701 	if (sp_banks[node].num_bytes == 0) {
702 		prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
703 			    "contiguous physical memory.\n", ecache_flush_size);
704 		prom_halt();
705 	}
706 
707 	/* Now allocate error trap reporting scoreboard. */
708 	highest_cpu = 0;
709 #ifdef CONFIG_SMP
710 	for (i = 0; i < NR_CPUS; i++) {
711 		if ((1UL << i) & cpu_present_map)
712 			highest_cpu = i;
713 	}
714 #endif
715 	highest_cpu++;
716 	node = highest_cpu * (2 * sizeof(struct cheetah_err_info));
717 	for (order = 0; order < MAX_ORDER; order++) {
718 		if ((PAGE_SIZE << order) >= node)
719 			break;
720 	}
721 	cheetah_error_log = (struct cheetah_err_info *)
722 		__get_free_pages(GFP_KERNEL, order);
723 	if (!cheetah_error_log) {
724 		prom_printf("cheetah_ecache_flush_init: Failed to allocate "
725 			    "error logging scoreboard (%d bytes).\n", node);
726 		prom_halt();
727 	}
728 	memset(cheetah_error_log, 0, PAGE_SIZE << order);
729 
730 	/* Mark all AFSRs as invalid so that the trap handler will
731 	 * log new new information there.
732 	 */
733 	for (i = 0; i < 2 * highest_cpu; i++)
734 		cheetah_error_log[i].afsr = CHAFSR_INVALID;
735 
736 	__asm__ ("rdpr %%ver, %0" : "=r" (ver));
737 	if ((ver >> 32) == 0x003e0016) {
738 		cheetah_error_table = &__jalapeno_error_table[0];
739 		cheetah_afsr_errors = JPAFSR_ERRORS;
740 	} else if ((ver >> 32) == 0x003e0015) {
741 		cheetah_error_table = &__cheetah_plus_error_table[0];
742 		cheetah_afsr_errors = CHPAFSR_ERRORS;
743 	} else {
744 		cheetah_error_table = &__cheetah_error_table[0];
745 		cheetah_afsr_errors = CHAFSR_ERRORS;
746 	}
747 
748 	/* Now patch trap tables. */
749 	memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
750 	memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
751 	memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
752 	memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
753 	memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
754 	memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
755 	memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
756 	memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
757 	if (tlb_type == cheetah_plus) {
758 		memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
759 		memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
760 		memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
761 		memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
762 	}
763 	flushi(PAGE_OFFSET);
764 }
765 
cheetah_flush_ecache(void)766 static void cheetah_flush_ecache(void)
767 {
768 	unsigned long flush_base = ecache_flush_physbase;
769 	unsigned long flush_linesize = ecache_flush_linesize;
770 	unsigned long flush_size = ecache_flush_size;
771 
772 	__asm__ __volatile__("1: subcc	%0, %4, %0\n\t"
773 			     "   bne,pt	%%xcc, 1b\n\t"
774 			     "    ldxa	[%2 + %0] %3, %%g0\n\t"
775 			     : "=&r" (flush_size)
776 			     : "0" (flush_size), "r" (flush_base),
777 			       "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
778 }
779 
cheetah_flush_ecache_line(unsigned long physaddr)780 static void cheetah_flush_ecache_line(unsigned long physaddr)
781 {
782 	unsigned long alias;
783 
784 	physaddr &= ~(8UL - 1UL);
785 	physaddr = (ecache_flush_physbase +
786 		    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
787 	alias = physaddr + (ecache_flush_size >> 1UL);
788 	__asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
789 			     "ldxa [%1] %2, %%g0\n\t"
790 			     "membar #Sync"
791 			     : /* no outputs */
792 			     : "r" (physaddr), "r" (alias),
793 			       "i" (ASI_PHYS_USE_EC));
794 }
795 
796 /* Unfortunately, the diagnostic access to the I-cache tags we need to
797  * use to clear the thing interferes with I-cache coherency transactions.
798  *
799  * So we must only flush the I-cache when it is disabled.
800  */
__cheetah_flush_icache(void)801 static void __cheetah_flush_icache(void)
802 {
803 	unsigned long i;
804 
805 	/* Clear the valid bits in all the tags. */
806 	for (i = 0; i < (1 << 15); i += (1 << 5)) {
807 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
808 				     "membar #Sync"
809 				     : /* no outputs */
810 				     : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
811 	}
812 }
813 
cheetah_flush_icache(void)814 static void cheetah_flush_icache(void)
815 {
816 	unsigned long dcu_save;
817 
818 	/* Save current DCU, disable I-cache. */
819 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
820 			     "or %0, %2, %%g1\n\t"
821 			     "stxa %%g1, [%%g0] %1\n\t"
822 			     "membar #Sync"
823 			     : "=r" (dcu_save)
824 			     : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
825 			     : "g1");
826 
827 	__cheetah_flush_icache();
828 
829 	/* Restore DCU register */
830 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
831 			     "membar #Sync"
832 			     : /* no outputs */
833 			     : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
834 }
835 
cheetah_flush_dcache(void)836 static void cheetah_flush_dcache(void)
837 {
838 	unsigned long i;
839 
840 	for (i = 0; i < (1 << 16); i += (1 << 5)) {
841 		__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
842 				     "membar #Sync"
843 				     : /* no outputs */
844 				     : "r" (i), "i" (ASI_DCACHE_TAG));
845 	}
846 }
847 
848 /* In order to make the even parity correct we must do two things.
849  * First, we clear DC_data_parity and set DC_utag to an appropriate value.
850  * Next, we clear out all 32-bytes of data for that line.  Data of
851  * all-zero + tag parity value of zero == correct parity.
852  */
cheetah_plus_zap_dcache_parity(void)853 static void cheetah_plus_zap_dcache_parity(void)
854 {
855 	unsigned long i;
856 
857 	for (i = 0; i < (1 << 16); i += (1 << 5)) {
858 		unsigned long tag = (i >> 14);
859 		unsigned long j;
860 
861 		__asm__ __volatile__("membar	#Sync\n\t"
862 				     "stxa	%0, [%1] %2\n\t"
863 				     "membar	#Sync"
864 				     : /* no outputs */
865 				     : "r" (tag), "r" (i),
866 				       "i" (ASI_DCACHE_UTAG));
867 		for (j = i; j < i + (1 << 5); j += (1 << 3))
868 			__asm__ __volatile__("membar	#Sync\n\t"
869 					     "stxa	%%g0, [%0] %1\n\t"
870 					     "membar	#Sync"
871 					     : /* no outputs */
872 					     : "r" (j), "i" (ASI_DCACHE_DATA));
873 	}
874 }
875 
876 /* Conversion tables used to frob Cheetah AFSR syndrome values into
877  * something palatable to the memory controller driver get_unumber
878  * routine.
879  */
880 #define MT0	137
881 #define MT1	138
882 #define MT2	139
883 #define NONE	254
884 #define MTC0	140
885 #define MTC1	141
886 #define MTC2	142
887 #define MTC3	143
888 #define C0	128
889 #define C1	129
890 #define C2	130
891 #define C3	131
892 #define C4	132
893 #define C5	133
894 #define C6	134
895 #define C7	135
896 #define C8	136
897 #define M2	144
898 #define M3	145
899 #define M4	146
900 #define M	147
901 static unsigned char cheetah_ecc_syntab[] = {
902 /*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
903 /*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
904 /*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
905 /*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
906 /*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
907 /*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
908 /*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
909 /*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
910 /*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
911 /*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
912 /*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
913 /*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
914 /*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
915 /*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
916 /*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
917 /*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
918 /*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
919 /*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
920 /*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
921 /*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
922 /*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
923 /*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
924 /*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
925 /*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
926 /*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
927 /*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
928 /*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
929 /*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
930 /*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
931 /*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
932 /*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
933 /*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
934 };
935 static unsigned char cheetah_mtag_syntab[] = {
936        NONE, MTC0,
937        MTC1, NONE,
938        MTC2, NONE,
939        NONE, MT0,
940        MTC3, NONE,
941        NONE, MT1,
942        NONE, MT2,
943        NONE, NONE
944 };
945 
946 /* Return the highest priority error conditon mentioned. */
cheetah_get_hipri(unsigned long afsr)947 static __inline__ unsigned long cheetah_get_hipri(unsigned long afsr)
948 {
949 	unsigned long tmp = 0;
950 	int i;
951 
952 	for (i = 0; cheetah_error_table[i].mask; i++) {
953 		if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
954 			return tmp;
955 	}
956 	return tmp;
957 }
958 
cheetah_get_string(unsigned long bit)959 static const char *cheetah_get_string(unsigned long bit)
960 {
961 	int i;
962 
963 	for (i = 0; cheetah_error_table[i].mask; i++) {
964 		if ((bit & cheetah_error_table[i].mask) != 0UL)
965 			return cheetah_error_table[i].name;
966 	}
967 	return "???";
968 }
969 
970 extern int chmc_getunumber(int, unsigned long, char *, int);
971 
cheetah_log_errors(struct pt_regs * regs,struct cheetah_err_info * info,unsigned long afsr,unsigned long afar,int recoverable)972 static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
973 			       unsigned long afsr, unsigned long afar, int recoverable)
974 {
975 	unsigned long hipri;
976 	char unum[256];
977 
978 	printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
979 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
980 	       afsr, afar,
981 	       (afsr & CHAFSR_TL1) ? 1 : 0);
982 	printk("%s" "ERROR(%d): TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
983 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
984 	       regs->tpc, regs->tnpc, regs->tstate);
985 	printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
986 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
987 	       (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
988 	       (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
989 	       (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
990 	       (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
991 	hipri = cheetah_get_hipri(afsr);
992 	printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
993 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
994 	       hipri, cheetah_get_string(hipri));
995 
996 	/* Try to get unumber if relevant. */
997 #define ESYND_ERRORS	(CHAFSR_IVC | CHAFSR_IVU | \
998 			 CHAFSR_CPC | CHAFSR_CPU | \
999 			 CHAFSR_UE  | CHAFSR_CE  | \
1000 			 CHAFSR_EDC | CHAFSR_EDU  | \
1001 			 CHAFSR_UCC | CHAFSR_UCU  | \
1002 			 CHAFSR_WDU | CHAFSR_WDC)
1003 #define MSYND_ERRORS	(CHAFSR_EMC | CHAFSR_EMU)
1004 	if (afsr & ESYND_ERRORS) {
1005 		int syndrome;
1006 		int ret;
1007 
1008 		syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1009 		syndrome = cheetah_ecc_syntab[syndrome];
1010 		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1011 		if (ret != -1)
1012 			printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1013 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1014 			       smp_processor_id(), unum);
1015 	} else if (afsr & MSYND_ERRORS) {
1016 		int syndrome;
1017 		int ret;
1018 
1019 		syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1020 		syndrome = cheetah_mtag_syntab[syndrome];
1021 		ret = chmc_getunumber(syndrome, afar, unum, sizeof(unum));
1022 		if (ret != -1)
1023 			printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1024 			       (recoverable ? KERN_WARNING : KERN_CRIT),
1025 			       smp_processor_id(), unum);
1026 	}
1027 
1028 	/* Now dump the cache snapshots. */
1029 	printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx]\n",
1030 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1031 	       (int) info->dcache_index,
1032 	       info->dcache_tag,
1033 	       info->dcache_utag,
1034 	       info->dcache_stag);
1035 	printk("%s" "ERROR(%d): D-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1036 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1037 	       info->dcache_data[0],
1038 	       info->dcache_data[1],
1039 	       info->dcache_data[2],
1040 	       info->dcache_data[3]);
1041 	printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016lx] utag[%016lx] stag[%016lx] "
1042 	       "u[%016lx] l[%016lx]\n",
1043 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1044 	       (int) info->icache_index,
1045 	       info->icache_tag,
1046 	       info->icache_utag,
1047 	       info->icache_stag,
1048 	       info->icache_upper,
1049 	       info->icache_lower);
1050 	printk("%s" "ERROR(%d): I-cache INSN0[%016lx] INSN1[%016lx] INSN2[%016lx] INSN3[%016lx]\n",
1051 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1052 	       info->icache_data[0],
1053 	       info->icache_data[1],
1054 	       info->icache_data[2],
1055 	       info->icache_data[3]);
1056 	printk("%s" "ERROR(%d): I-cache INSN4[%016lx] INSN5[%016lx] INSN6[%016lx] INSN7[%016lx]\n",
1057 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1058 	       info->icache_data[4],
1059 	       info->icache_data[5],
1060 	       info->icache_data[6],
1061 	       info->icache_data[7]);
1062 	printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016lx]\n",
1063 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1064 	       (int) info->ecache_index, info->ecache_tag);
1065 	printk("%s" "ERROR(%d): E-cache data0[%016lx] data1[%016lx] data2[%016lx] data3[%016lx]\n",
1066 	       (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1067 	       info->ecache_data[0],
1068 	       info->ecache_data[1],
1069 	       info->ecache_data[2],
1070 	       info->ecache_data[3]);
1071 
1072 	afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1073 	while (afsr != 0UL) {
1074 		unsigned long bit = cheetah_get_hipri(afsr);
1075 
1076 		printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1077 		       (recoverable ? KERN_WARNING : KERN_CRIT),
1078 		       bit, cheetah_get_string(bit));
1079 
1080 		afsr &= ~bit;
1081 	}
1082 
1083 	if (!recoverable)
1084 		printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1085 }
1086 
cheetah_recheck_errors(struct cheetah_err_info * logp)1087 static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1088 {
1089 	unsigned long afsr, afar;
1090 	int ret = 0;
1091 
1092 	__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1093 			     : "=r" (afsr)
1094 			     : "i" (ASI_AFSR));
1095 	if ((afsr & cheetah_afsr_errors) != 0) {
1096 		if (logp != NULL) {
1097 			__asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1098 					     : "=r" (afar)
1099 					     : "i" (ASI_AFAR));
1100 			logp->afsr = afsr;
1101 			logp->afar = afar;
1102 		}
1103 		ret = 1;
1104 	}
1105 	__asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1106 			     "membar #Sync\n\t"
1107 			     : : "r" (afsr), "i" (ASI_AFSR));
1108 
1109 	return ret;
1110 }
1111 
cheetah_fecc_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1112 void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1113 {
1114 	struct cheetah_err_info local_snapshot, *p;
1115 	int recoverable;
1116 
1117 	/* Flush E-cache */
1118 	cheetah_flush_ecache();
1119 
1120 	p = cheetah_get_error_log(afsr);
1121 	if (!p) {
1122 		prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1123 			    afsr, afar);
1124 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1125 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1126 		prom_halt();
1127 	}
1128 
1129 	/* Grab snapshot of logged error. */
1130 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1131 
1132 	/* If the current trap snapshot does not match what the
1133 	 * trap handler passed along into our args, big trouble.
1134 	 * In such a case, mark the local copy as invalid.
1135 	 *
1136 	 * Else, it matches and we mark the afsr in the non-local
1137 	 * copy as invalid so we may log new error traps there.
1138 	 */
1139 	if (p->afsr != afsr || p->afar != afar)
1140 		local_snapshot.afsr = CHAFSR_INVALID;
1141 	else
1142 		p->afsr = CHAFSR_INVALID;
1143 
1144 	cheetah_flush_icache();
1145 	cheetah_flush_dcache();
1146 
1147 	/* Re-enable I-cache/D-cache */
1148 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1149 			     "or %%g1, %1, %%g1\n\t"
1150 			     "stxa %%g1, [%%g0] %0\n\t"
1151 			     "membar #Sync"
1152 			     : /* no outputs */
1153 			     : "i" (ASI_DCU_CONTROL_REG),
1154 			       "i" (DCU_DC | DCU_IC)
1155 			     : "g1");
1156 
1157 	/* Re-enable error reporting */
1158 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1159 			     "or %%g1, %1, %%g1\n\t"
1160 			     "stxa %%g1, [%%g0] %0\n\t"
1161 			     "membar #Sync"
1162 			     : /* no outputs */
1163 			     : "i" (ASI_ESTATE_ERROR_EN),
1164 			       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1165 			     : "g1");
1166 
1167 	/* Decide if we can continue after handling this trap and
1168 	 * logging the error.
1169 	 */
1170 	recoverable = 1;
1171 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1172 		recoverable = 0;
1173 
1174 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1175 	 * error was logged while we had error reporting traps disabled.
1176 	 */
1177 	if (cheetah_recheck_errors(&local_snapshot)) {
1178 		unsigned long new_afsr = local_snapshot.afsr;
1179 
1180 		/* If we got a new asynchronous error, die... */
1181 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1182 				CHAFSR_WDU | CHAFSR_CPU |
1183 				CHAFSR_IVU | CHAFSR_UE |
1184 				CHAFSR_BERR | CHAFSR_TO))
1185 			recoverable = 0;
1186 	}
1187 
1188 	/* Log errors. */
1189 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1190 
1191 	if (!recoverable)
1192 		panic("Irrecoverable Fast-ECC error trap.\n");
1193 
1194 	/* Flush E-cache to kick the error trap handlers out. */
1195 	cheetah_flush_ecache();
1196 }
1197 
1198 /* Try to fix a correctable error by pushing the line out from
1199  * the E-cache.  Recheck error reporting registers to see if the
1200  * problem is intermittent.
1201  */
cheetah_fix_ce(unsigned long physaddr)1202 static int cheetah_fix_ce(unsigned long physaddr)
1203 {
1204 	unsigned long orig_estate;
1205 	unsigned long alias1, alias2;
1206 	int ret;
1207 
1208 	/* Make sure correctable error traps are disabled. */
1209 	__asm__ __volatile__("ldxa	[%%g0] %2, %0\n\t"
1210 			     "andn	%0, %1, %%g1\n\t"
1211 			     "stxa	%%g1, [%%g0] %2\n\t"
1212 			     "membar	#Sync"
1213 			     : "=&r" (orig_estate)
1214 			     : "i" (ESTATE_ERROR_CEEN),
1215 			       "i" (ASI_ESTATE_ERROR_EN)
1216 			     : "g1");
1217 
1218 	/* We calculate alias addresses that will force the
1219 	 * cache line in question out of the E-cache.  Then
1220 	 * we bring it back in with an atomic instruction so
1221 	 * that we get it in some modified/exclusive state,
1222 	 * then we displace it again to try and get proper ECC
1223 	 * pushed back into the system.
1224 	 */
1225 	physaddr &= ~(8UL - 1UL);
1226 	alias1 = (ecache_flush_physbase +
1227 		  (physaddr & ((ecache_flush_size >> 1) - 1)));
1228 	alias2 = alias1 + (ecache_flush_size >> 1);
1229 	__asm__ __volatile__("ldxa	[%0] %3, %%g0\n\t"
1230 			     "ldxa	[%1] %3, %%g0\n\t"
1231 			     "casxa	[%2] %3, %%g0, %%g0\n\t"
1232 			     "membar	#StoreLoad | #StoreStore\n\t"
1233 			     "ldxa	[%0] %3, %%g0\n\t"
1234 			     "ldxa	[%1] %3, %%g0\n\t"
1235 			     "membar	#Sync"
1236 			     : /* no outputs */
1237 			     : "r" (alias1), "r" (alias2),
1238 			       "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1239 
1240 	/* Did that trigger another error? */
1241 	if (cheetah_recheck_errors(NULL)) {
1242 		/* Try one more time. */
1243 		__asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1244 				     "membar #Sync"
1245 				     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1246 		if (cheetah_recheck_errors(NULL))
1247 			ret = 2;
1248 		else
1249 			ret = 1;
1250 	} else {
1251 		/* No new error, intermittent problem. */
1252 		ret = 0;
1253 	}
1254 
1255 	/* Restore error enables. */
1256 	__asm__ __volatile__("stxa	%0, [%%g0] %1\n\t"
1257 			     "membar	#Sync"
1258 			     : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1259 
1260 	return ret;
1261 }
1262 
1263 /* Return non-zero if PADDR is a valid physical memory address. */
cheetah_check_main_memory(unsigned long paddr)1264 static int cheetah_check_main_memory(unsigned long paddr)
1265 {
1266 	int i;
1267 
1268 	for (i = 0; ; i++) {
1269 		if (sp_banks[i].num_bytes == 0)
1270 			break;
1271 		if (paddr >= sp_banks[i].base_addr &&
1272 		    paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
1273 			return 1;
1274 	}
1275 	return 0;
1276 }
1277 
cheetah_cee_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1278 void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1279 {
1280 	struct cheetah_err_info local_snapshot, *p;
1281 	int recoverable, is_memory;
1282 
1283 	p = cheetah_get_error_log(afsr);
1284 	if (!p) {
1285 		prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1286 			    afsr, afar);
1287 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1288 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1289 		prom_halt();
1290 	}
1291 
1292 	/* Grab snapshot of logged error. */
1293 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1294 
1295 	/* If the current trap snapshot does not match what the
1296 	 * trap handler passed along into our args, big trouble.
1297 	 * In such a case, mark the local copy as invalid.
1298 	 *
1299 	 * Else, it matches and we mark the afsr in the non-local
1300 	 * copy as invalid so we may log new error traps there.
1301 	 */
1302 	if (p->afsr != afsr || p->afar != afar)
1303 		local_snapshot.afsr = CHAFSR_INVALID;
1304 	else
1305 		p->afsr = CHAFSR_INVALID;
1306 
1307 	is_memory = cheetah_check_main_memory(afar);
1308 
1309 	if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1310 		/* XXX Might want to log the results of this operation
1311 		 * XXX somewhere... -DaveM
1312 		 */
1313 		cheetah_fix_ce(afar);
1314 	}
1315 
1316 	{
1317 		int flush_all, flush_line;
1318 
1319 		flush_all = flush_line = 0;
1320 		if ((afsr & CHAFSR_EDC) != 0UL) {
1321 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1322 				flush_line = 1;
1323 			else
1324 				flush_all = 1;
1325 		} else if ((afsr & CHAFSR_CPC) != 0UL) {
1326 			if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1327 				flush_line = 1;
1328 			else
1329 				flush_all = 1;
1330 		}
1331 
1332 		/* Trap handler only disabled I-cache, flush it. */
1333 		cheetah_flush_icache();
1334 
1335 		/* Re-enable I-cache */
1336 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1337 				     "or %%g1, %1, %%g1\n\t"
1338 				     "stxa %%g1, [%%g0] %0\n\t"
1339 				     "membar #Sync"
1340 				     : /* no outputs */
1341 				     : "i" (ASI_DCU_CONTROL_REG),
1342 				     "i" (DCU_IC)
1343 				     : "g1");
1344 
1345 		if (flush_all)
1346 			cheetah_flush_ecache();
1347 		else if (flush_line)
1348 			cheetah_flush_ecache_line(afar);
1349 	}
1350 
1351 	/* Re-enable error reporting */
1352 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1353 			     "or %%g1, %1, %%g1\n\t"
1354 			     "stxa %%g1, [%%g0] %0\n\t"
1355 			     "membar #Sync"
1356 			     : /* no outputs */
1357 			     : "i" (ASI_ESTATE_ERROR_EN),
1358 			       "i" (ESTATE_ERROR_CEEN)
1359 			     : "g1");
1360 
1361 	/* Decide if we can continue after handling this trap and
1362 	 * logging the error.
1363 	 */
1364 	recoverable = 1;
1365 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1366 		recoverable = 0;
1367 
1368 	/* Re-check AFSR/AFAR */
1369 	(void) cheetah_recheck_errors(&local_snapshot);
1370 
1371 	/* Log errors. */
1372 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1373 
1374 	if (!recoverable)
1375 		panic("Irrecoverable Correctable-ECC error trap.\n");
1376 }
1377 
cheetah_deferred_handler(struct pt_regs * regs,unsigned long afsr,unsigned long afar)1378 void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1379 {
1380 	struct cheetah_err_info local_snapshot, *p;
1381 	int recoverable, is_memory;
1382 
1383 #ifdef CONFIG_PCI
1384 	/* Check for the special PCI poke sequence. */
1385 	if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1386 		cheetah_flush_icache();
1387 		cheetah_flush_dcache();
1388 
1389 		/* Re-enable I-cache/D-cache */
1390 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1391 				     "or %%g1, %1, %%g1\n\t"
1392 				     "stxa %%g1, [%%g0] %0\n\t"
1393 				     "membar #Sync"
1394 				     : /* no outputs */
1395 				     : "i" (ASI_DCU_CONTROL_REG),
1396 				       "i" (DCU_DC | DCU_IC)
1397 				     : "g1");
1398 
1399 		/* Re-enable error reporting */
1400 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1401 				     "or %%g1, %1, %%g1\n\t"
1402 				     "stxa %%g1, [%%g0] %0\n\t"
1403 				     "membar #Sync"
1404 				     : /* no outputs */
1405 				     : "i" (ASI_ESTATE_ERROR_EN),
1406 				       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1407 				     : "g1");
1408 
1409 		(void) cheetah_recheck_errors(NULL);
1410 
1411 		pci_poke_faulted = 1;
1412 		regs->tpc += 4;
1413 		regs->tnpc = regs->tpc + 4;
1414 		return;
1415 	}
1416 #endif
1417 
1418 	p = cheetah_get_error_log(afsr);
1419 	if (!p) {
1420 		prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1421 			    afsr, afar);
1422 		prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1423 			    smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1424 		prom_halt();
1425 	}
1426 
1427 	/* Grab snapshot of logged error. */
1428 	memcpy(&local_snapshot, p, sizeof(local_snapshot));
1429 
1430 	/* If the current trap snapshot does not match what the
1431 	 * trap handler passed along into our args, big trouble.
1432 	 * In such a case, mark the local copy as invalid.
1433 	 *
1434 	 * Else, it matches and we mark the afsr in the non-local
1435 	 * copy as invalid so we may log new error traps there.
1436 	 */
1437 	if (p->afsr != afsr || p->afar != afar)
1438 		local_snapshot.afsr = CHAFSR_INVALID;
1439 	else
1440 		p->afsr = CHAFSR_INVALID;
1441 
1442 	is_memory = cheetah_check_main_memory(afar);
1443 
1444 	{
1445 		int flush_all, flush_line;
1446 
1447 		flush_all = flush_line = 0;
1448 		if ((afsr & CHAFSR_EDU) != 0UL) {
1449 			if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1450 				flush_line = 1;
1451 			else
1452 				flush_all = 1;
1453 		} else if ((afsr & CHAFSR_BERR) != 0UL) {
1454 			if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1455 				flush_line = 1;
1456 			else
1457 				flush_all = 1;
1458 		}
1459 
1460 		cheetah_flush_icache();
1461 		cheetah_flush_dcache();
1462 
1463 		/* Re-enable I/D caches */
1464 		__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1465 				     "or %%g1, %1, %%g1\n\t"
1466 				     "stxa %%g1, [%%g0] %0\n\t"
1467 				     "membar #Sync"
1468 				     : /* no outputs */
1469 				     : "i" (ASI_DCU_CONTROL_REG),
1470 				     "i" (DCU_IC | DCU_DC)
1471 				     : "g1");
1472 
1473 		if (flush_all)
1474 			cheetah_flush_ecache();
1475 		else if (flush_line)
1476 			cheetah_flush_ecache_line(afar);
1477 	}
1478 
1479 	/* Re-enable error reporting */
1480 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1481 			     "or %%g1, %1, %%g1\n\t"
1482 			     "stxa %%g1, [%%g0] %0\n\t"
1483 			     "membar #Sync"
1484 			     : /* no outputs */
1485 			     : "i" (ASI_ESTATE_ERROR_EN),
1486 			     "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1487 			     : "g1");
1488 
1489 	/* Decide if we can continue after handling this trap and
1490 	 * logging the error.
1491 	 */
1492 	recoverable = 1;
1493 	if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1494 		recoverable = 0;
1495 
1496 	/* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1497 	 * error was logged while we had error reporting traps disabled.
1498 	 */
1499 	if (cheetah_recheck_errors(&local_snapshot)) {
1500 		unsigned long new_afsr = local_snapshot.afsr;
1501 
1502 		/* If we got a new asynchronous error, die... */
1503 		if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1504 				CHAFSR_WDU | CHAFSR_CPU |
1505 				CHAFSR_IVU | CHAFSR_UE |
1506 				CHAFSR_BERR | CHAFSR_TO))
1507 			recoverable = 0;
1508 	}
1509 
1510 	/* Log errors. */
1511 	cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1512 
1513 	/* "Recoverable" here means we try to yank the page from ever
1514 	 * being newly used again.  This depends upon a few things:
1515 	 * 1) Must be main memory, and AFAR must be valid.
1516 	 * 2) If we trapped from use, OK.
1517 	 * 3) Else, if we trapped from kernel we must find exception
1518 	 *    table entry (ie. we have to have been accessing user
1519 	 *    space).
1520 	 *
1521 	 * If AFAR is not in main memory, or we trapped from kernel
1522 	 * and cannot find an exception table entry, it is unacceptable
1523 	 * to try and continue.
1524 	 */
1525 	if (recoverable && is_memory) {
1526 		if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1527 			/* OK, usermode access. */
1528 			recoverable = 1;
1529 		} else {
1530 			unsigned long g2 = regs->u_regs[UREG_G2];
1531 			unsigned long fixup = search_exception_table(regs->tpc, &g2);
1532 
1533 			if (fixup != 0UL) {
1534 				/* OK, kernel access to userspace. */
1535 				recoverable = 1;
1536 
1537 			} else {
1538 				/* BAD, privileged state is corrupted. */
1539 				recoverable = 0;
1540 			}
1541 
1542 			if (recoverable) {
1543 				struct page *page = virt_to_page(__va(afar));
1544 
1545 				if (VALID_PAGE(page))
1546 					get_page(page);
1547 				else
1548 					recoverable = 0;
1549 
1550 				/* Only perform fixup if we still have a
1551 				 * recoverable condition.
1552 				 */
1553 				if (fixup != 0UL && recoverable) {
1554 					regs->tpc = fixup;
1555 					regs->tnpc = regs->tpc + 4;
1556 					regs->u_regs[UREG_G2] = g2;
1557 				}
1558 			}
1559 		}
1560 	} else {
1561 		recoverable = 0;
1562 	}
1563 
1564 	if (!recoverable)
1565 		panic("Irrecoverable deferred error trap.\n");
1566 }
1567 
1568 /* Handle a D/I cache parity error trap.  TYPE is encoded as:
1569  *
1570  * Bit0:	0=dcache,1=icache
1571  * Bit1:	0=recoverable,1=unrecoverable
1572  *
1573  * The hardware has disabled both the I-cache and D-cache in
1574  * the %dcr register.
1575  */
cheetah_plus_parity_error(int type,struct pt_regs * regs)1576 void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1577 {
1578 	if (type & 0x1)
1579 		__cheetah_flush_icache();
1580 	else
1581 		cheetah_plus_zap_dcache_parity();
1582 	cheetah_flush_dcache();
1583 
1584 	/* Re-enable I-cache/D-cache */
1585 	__asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1586 			     "or %%g1, %1, %%g1\n\t"
1587 			     "stxa %%g1, [%%g0] %0\n\t"
1588 			     "membar #Sync"
1589 			     : /* no outputs */
1590 			     : "i" (ASI_DCU_CONTROL_REG),
1591 			       "i" (DCU_DC | DCU_IC)
1592 			     : "g1");
1593 
1594 	if (type & 0x2) {
1595 		printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1596 		       smp_processor_id(),
1597 		       (type & 0x1) ? 'I' : 'D',
1598 		       regs->tpc);
1599 		panic("Irrecoverable Cheetah+ parity error.");
1600 	}
1601 
1602 	printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1603 	       smp_processor_id(),
1604 	       (type & 0x1) ? 'I' : 'D',
1605 	       regs->tpc);
1606 }
1607 
do_fpe_common(struct pt_regs * regs)1608 void do_fpe_common(struct pt_regs *regs)
1609 {
1610 	if(regs->tstate & TSTATE_PRIV) {
1611 		regs->tpc = regs->tnpc;
1612 		regs->tnpc += 4;
1613 	} else {
1614 		unsigned long fsr = current->thread.xfsr[0];
1615 		siginfo_t info;
1616 
1617 		if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1618 			regs->tpc &= 0xffffffff;
1619 			regs->tnpc &= 0xffffffff;
1620 		}
1621 		info.si_signo = SIGFPE;
1622 		info.si_errno = 0;
1623 		info.si_addr = (void *)regs->tpc;
1624 		info.si_trapno = 0;
1625 		info.si_code = __SI_FAULT;
1626 		if ((fsr & 0x1c000) == (1 << 14)) {
1627 			if (fsr & 0x10)
1628 				info.si_code = FPE_FLTINV;
1629 			else if (fsr & 0x08)
1630 				info.si_code = FPE_FLTOVF;
1631 			else if (fsr & 0x04)
1632 				info.si_code = FPE_FLTUND;
1633 			else if (fsr & 0x02)
1634 				info.si_code = FPE_FLTDIV;
1635 			else if (fsr & 0x01)
1636 				info.si_code = FPE_FLTRES;
1637 		}
1638 		force_sig_info(SIGFPE, &info, current);
1639 	}
1640 }
1641 
do_fpieee(struct pt_regs * regs)1642 void do_fpieee(struct pt_regs *regs)
1643 {
1644 	do_fpe_common(regs);
1645 }
1646 
1647 extern int do_mathemu(struct pt_regs *, struct fpustate *);
1648 
do_fpother(struct pt_regs * regs)1649 void do_fpother(struct pt_regs *regs)
1650 {
1651 	struct fpustate *f = FPUSTATE;
1652 	int ret = 0;
1653 
1654 	switch ((current->thread.xfsr[0] & 0x1c000)) {
1655 	case (2 << 14): /* unfinished_FPop */
1656 	case (3 << 14): /* unimplemented_FPop */
1657 		ret = do_mathemu(regs, f);
1658 		break;
1659 	}
1660 	if (ret)
1661 		return;
1662 	do_fpe_common(regs);
1663 }
1664 
do_tof(struct pt_regs * regs)1665 void do_tof(struct pt_regs *regs)
1666 {
1667 	siginfo_t info;
1668 
1669 	if(regs->tstate & TSTATE_PRIV)
1670 		die_if_kernel("Penguin overflow trap from kernel mode", regs);
1671 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1672 		regs->tpc &= 0xffffffff;
1673 		regs->tnpc &= 0xffffffff;
1674 	}
1675 	info.si_signo = SIGEMT;
1676 	info.si_errno = 0;
1677 	info.si_code = EMT_TAGOVF;
1678 	info.si_addr = (void *)regs->tpc;
1679 	info.si_trapno = 0;
1680 	force_sig_info(SIGEMT, &info, current);
1681 }
1682 
do_div0(struct pt_regs * regs)1683 void do_div0(struct pt_regs *regs)
1684 {
1685 	siginfo_t info;
1686 
1687 	if (regs->tstate & TSTATE_PRIV)
1688 		die_if_kernel("TL0: Kernel divide by zero.", regs);
1689 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1690 		regs->tpc &= 0xffffffff;
1691 		regs->tnpc &= 0xffffffff;
1692 	}
1693 	info.si_signo = SIGFPE;
1694 	info.si_errno = 0;
1695 	info.si_code = FPE_INTDIV;
1696 	info.si_addr = (void *)regs->tpc;
1697 	info.si_trapno = 0;
1698 	force_sig_info(SIGFPE, &info, current);
1699 }
1700 
instruction_dump(unsigned int * pc)1701 void instruction_dump (unsigned int *pc)
1702 {
1703 	int i;
1704 
1705 	if((((unsigned long) pc) & 3))
1706 		return;
1707 
1708 	printk("Instruction DUMP:");
1709 	for(i = -3; i < 6; i++)
1710 		printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
1711 	printk("\n");
1712 }
1713 
user_instruction_dump(unsigned int * pc)1714 void user_instruction_dump (unsigned int *pc)
1715 {
1716 	int i;
1717 	unsigned int buf[9];
1718 
1719 	if((((unsigned long) pc) & 3))
1720 		return;
1721 
1722 	if(copy_from_user(buf, pc - 3, sizeof(buf)))
1723 		return;
1724 
1725 	printk("Instruction DUMP:");
1726 	for(i = 0; i < 9; i++)
1727 		printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
1728 	printk("\n");
1729 }
1730 
show_trace_raw(struct task_struct * tsk,unsigned long ksp)1731 void show_trace_raw(struct task_struct *tsk, unsigned long ksp)
1732 {
1733 	unsigned long pc, fp;
1734 	unsigned long task_base = (unsigned long)tsk;
1735 	struct reg_window *rw;
1736 	int count = 0;
1737 
1738 	if (tsk == current)
1739 		flushw_all();
1740 
1741 	fp = ksp + STACK_BIAS;
1742 	do {
1743 		/* Bogus frame pointer? */
1744 		if (fp < (task_base + sizeof(struct task_struct)) ||
1745 		    fp >= (task_base + THREAD_SIZE))
1746 			break;
1747 		rw = (struct reg_window *)fp;
1748 		pc = rw->ins[7];
1749 		printk("[%016lx] ", pc);
1750 		fp = rw->ins[6] + STACK_BIAS;
1751 	} while (++count < 16);
1752 	printk("\n");
1753 }
1754 
show_trace_task(struct task_struct * tsk)1755 void show_trace_task(struct task_struct *tsk)
1756 {
1757 	if (tsk)
1758 		show_trace_raw(tsk, tsk->thread.ksp);
1759 }
1760 
dump_stack(void)1761 void dump_stack(void)
1762 {
1763 	unsigned long ksp;
1764 
1765 	__asm__ __volatile__("mov	%%fp, %0"
1766 			     : "=r" (ksp));
1767 	show_trace_raw(current, ksp);
1768 }
1769 
die_if_kernel(char * str,struct pt_regs * regs)1770 void die_if_kernel(char *str, struct pt_regs *regs)
1771 {
1772 	extern void __show_regs(struct pt_regs * regs);
1773 	extern void smp_report_regs(void);
1774 	int count = 0;
1775 	struct reg_window *lastrw;
1776 
1777 	/* Amuse the user. */
1778 	printk(
1779 "              \\|/ ____ \\|/\n"
1780 "              \"@'/ .. \\`@\"\n"
1781 "              /_| \\__/ |_\\\n"
1782 "                 \\__U_/\n");
1783 
1784 	printk("%s(%d): %s\n", current->comm, current->pid, str);
1785 	__asm__ __volatile__("flushw");
1786 	__show_regs(regs);
1787 	if(regs->tstate & TSTATE_PRIV) {
1788 		struct reg_window *rw = (struct reg_window *)
1789 			(regs->u_regs[UREG_FP] + STACK_BIAS);
1790 
1791 		/* Stop the back trace when we hit userland or we
1792 		 * find some badly aligned kernel stack.
1793 		 */
1794 		lastrw = (struct reg_window *)current;
1795 		while(rw					&&
1796 		      count++ < 30				&&
1797 		      rw >= lastrw				&&
1798 		      (char *) rw < ((char *) current)
1799 		        + sizeof (union task_union) 		&&
1800 		      !(((unsigned long) rw) & 0x7)) {
1801 			printk("Caller[%016lx]\n", rw->ins[7]);
1802 			lastrw = rw;
1803 			rw = (struct reg_window *)
1804 				(rw->ins[6] + STACK_BIAS);
1805 		}
1806 		instruction_dump ((unsigned int *) regs->tpc);
1807 	} else {
1808 		if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1809 			regs->tpc &= 0xffffffff;
1810 			regs->tnpc &= 0xffffffff;
1811 		}
1812 		user_instruction_dump ((unsigned int *) regs->tpc);
1813 	}
1814 #ifdef CONFIG_SMP
1815 	smp_report_regs();
1816 #endif
1817 
1818 	if(regs->tstate & TSTATE_PRIV)
1819 		do_exit(SIGKILL);
1820 	do_exit(SIGSEGV);
1821 }
1822 
1823 extern int handle_popc(u32 insn, struct pt_regs *regs);
1824 extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
1825 
do_illegal_instruction(struct pt_regs * regs)1826 void do_illegal_instruction(struct pt_regs *regs)
1827 {
1828 	unsigned long pc = regs->tpc;
1829 	unsigned long tstate = regs->tstate;
1830 	u32 insn;
1831 	siginfo_t info;
1832 
1833 	if(tstate & TSTATE_PRIV)
1834 		die_if_kernel("Kernel illegal instruction", regs);
1835 	if(current->thread.flags & SPARC_FLAG_32BIT)
1836 		pc = (u32)pc;
1837 	if (get_user(insn, (u32 *)pc) != -EFAULT) {
1838 		if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
1839 			if (handle_popc(insn, regs))
1840 				return;
1841 		} else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
1842 			if (handle_ldf_stq(insn, regs))
1843 				return;
1844 		}
1845 	}
1846 	info.si_signo = SIGILL;
1847 	info.si_errno = 0;
1848 	info.si_code = ILL_ILLOPC;
1849 	info.si_addr = (void *)pc;
1850 	info.si_trapno = 0;
1851 	force_sig_info(SIGILL, &info, current);
1852 }
1853 
mem_address_unaligned(struct pt_regs * regs,unsigned long sfar,unsigned long sfsr)1854 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
1855 {
1856 	siginfo_t info;
1857 
1858 	if(regs->tstate & TSTATE_PRIV) {
1859 		extern void kernel_unaligned_trap(struct pt_regs *regs,
1860 						  unsigned int insn,
1861 						  unsigned long sfar, unsigned long sfsr);
1862 
1863 		return kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc), sfar, sfsr);
1864 	}
1865 	info.si_signo = SIGBUS;
1866 	info.si_errno = 0;
1867 	info.si_code = BUS_ADRALN;
1868 	info.si_addr = (void *)sfar;
1869 	info.si_trapno = 0;
1870 	force_sig_info(SIGBUS, &info, current);
1871 }
1872 
do_privop(struct pt_regs * regs)1873 void do_privop(struct pt_regs *regs)
1874 {
1875 	siginfo_t info;
1876 
1877 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
1878 		regs->tpc &= 0xffffffff;
1879 		regs->tnpc &= 0xffffffff;
1880 	}
1881 	info.si_signo = SIGILL;
1882 	info.si_errno = 0;
1883 	info.si_code = ILL_PRVOPC;
1884 	info.si_addr = (void *)regs->tpc;
1885 	info.si_trapno = 0;
1886 	force_sig_info(SIGILL, &info, current);
1887 }
1888 
do_privact(struct pt_regs * regs)1889 void do_privact(struct pt_regs *regs)
1890 {
1891 	do_privop(regs);
1892 }
1893 
1894 /* Trap level 1 stuff or other traps we should never see... */
do_cee(struct pt_regs * regs)1895 void do_cee(struct pt_regs *regs)
1896 {
1897 	die_if_kernel("TL0: Cache Error Exception", regs);
1898 }
1899 
do_cee_tl1(struct pt_regs * regs)1900 void do_cee_tl1(struct pt_regs *regs)
1901 {
1902 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1903 	die_if_kernel("TL1: Cache Error Exception", regs);
1904 }
1905 
do_dae_tl1(struct pt_regs * regs)1906 void do_dae_tl1(struct pt_regs *regs)
1907 {
1908 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1909 	die_if_kernel("TL1: Data Access Exception", regs);
1910 }
1911 
do_iae_tl1(struct pt_regs * regs)1912 void do_iae_tl1(struct pt_regs *regs)
1913 {
1914 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1915 	die_if_kernel("TL1: Instruction Access Exception", regs);
1916 }
1917 
do_div0_tl1(struct pt_regs * regs)1918 void do_div0_tl1(struct pt_regs *regs)
1919 {
1920 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1921 	die_if_kernel("TL1: DIV0 Exception", regs);
1922 }
1923 
do_fpdis_tl1(struct pt_regs * regs)1924 void do_fpdis_tl1(struct pt_regs *regs)
1925 {
1926 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1927 	die_if_kernel("TL1: FPU Disabled", regs);
1928 }
1929 
do_fpieee_tl1(struct pt_regs * regs)1930 void do_fpieee_tl1(struct pt_regs *regs)
1931 {
1932 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1933 	die_if_kernel("TL1: FPU IEEE Exception", regs);
1934 }
1935 
do_fpother_tl1(struct pt_regs * regs)1936 void do_fpother_tl1(struct pt_regs *regs)
1937 {
1938 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1939 	die_if_kernel("TL1: FPU Other Exception", regs);
1940 }
1941 
do_ill_tl1(struct pt_regs * regs)1942 void do_ill_tl1(struct pt_regs *regs)
1943 {
1944 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1945 	die_if_kernel("TL1: Illegal Instruction Exception", regs);
1946 }
1947 
do_irq_tl1(struct pt_regs * regs)1948 void do_irq_tl1(struct pt_regs *regs)
1949 {
1950 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1951 	die_if_kernel("TL1: IRQ Exception", regs);
1952 }
1953 
do_lddfmna_tl1(struct pt_regs * regs)1954 void do_lddfmna_tl1(struct pt_regs *regs)
1955 {
1956 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1957 	die_if_kernel("TL1: LDDF Exception", regs);
1958 }
1959 
do_stdfmna_tl1(struct pt_regs * regs)1960 void do_stdfmna_tl1(struct pt_regs *regs)
1961 {
1962 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1963 	die_if_kernel("TL1: STDF Exception", regs);
1964 }
1965 
do_paw(struct pt_regs * regs)1966 void do_paw(struct pt_regs *regs)
1967 {
1968 	die_if_kernel("TL0: Phys Watchpoint Exception", regs);
1969 }
1970 
do_paw_tl1(struct pt_regs * regs)1971 void do_paw_tl1(struct pt_regs *regs)
1972 {
1973 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1974 	die_if_kernel("TL1: Phys Watchpoint Exception", regs);
1975 }
1976 
do_vaw(struct pt_regs * regs)1977 void do_vaw(struct pt_regs *regs)
1978 {
1979 	die_if_kernel("TL0: Virt Watchpoint Exception", regs);
1980 }
1981 
do_vaw_tl1(struct pt_regs * regs)1982 void do_vaw_tl1(struct pt_regs *regs)
1983 {
1984 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1985 	die_if_kernel("TL1: Virt Watchpoint Exception", regs);
1986 }
1987 
do_tof_tl1(struct pt_regs * regs)1988 void do_tof_tl1(struct pt_regs *regs)
1989 {
1990 	dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
1991 	die_if_kernel("TL1: Tag Overflow Exception", regs);
1992 }
1993 
do_getpsr(struct pt_regs * regs)1994 void do_getpsr(struct pt_regs *regs)
1995 {
1996 	regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
1997 	regs->tpc   = regs->tnpc;
1998 	regs->tnpc += 4;
1999 	if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
2000 		regs->tpc &= 0xffffffff;
2001 		regs->tnpc &= 0xffffffff;
2002 	}
2003 }
2004 
trap_init(void)2005 void trap_init(void)
2006 {
2007 	/* Attach to the address space of init_task. */
2008 	atomic_inc(&init_mm.mm_count);
2009 	current->active_mm = &init_mm;
2010 
2011 	/* NOTE: Other cpus have this done as they are started
2012 	 *       up on SMP.
2013 	 */
2014 }
2015