1 /* provide some functions which dump the trace buffer, in a nice way for people
2 * to read it, and understand what is going on
3 *
4 * Copyright 2004-2010 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/hardirq.h>
11 #include <linux/thread_info.h>
12 #include <linux/mm.h>
13 #include <linux/uaccess.h>
14 #include <linux/module.h>
15 #include <linux/kallsyms.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/irq.h>
19 #include <asm/dma.h>
20 #include <asm/trace.h>
21 #include <asm/fixed_code.h>
22 #include <asm/traps.h>
23 #include <asm/irq_handler.h>
24 #include <asm/pda.h>
25
decode_address(char * buf,unsigned long address)26 void decode_address(char *buf, unsigned long address)
27 {
28 struct task_struct *p;
29 struct mm_struct *mm;
30 unsigned long flags, offset;
31 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
32 struct rb_node *n;
33
34 #ifdef CONFIG_KALLSYMS
35 unsigned long symsize;
36 const char *symname;
37 char *modname;
38 char *delim = ":";
39 char namebuf[128];
40 #endif
41
42 buf += sprintf(buf, "<0x%08lx> ", address);
43
44 #ifdef CONFIG_KALLSYMS
45 /* look up the address and see if we are in kernel space */
46 symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf);
47
48 if (symname) {
49 /* yeah! kernel space! */
50 if (!modname)
51 modname = delim = "";
52 sprintf(buf, "{ %s%s%s%s + 0x%lx }",
53 delim, modname, delim, symname,
54 (unsigned long)offset);
55 return;
56 }
57 #endif
58
59 if (address >= FIXED_CODE_START && address < FIXED_CODE_END) {
60 /* Problem in fixed code section? */
61 strcat(buf, "/* Maybe fixed code section */");
62 return;
63
64 } else if (address < CONFIG_BOOT_LOAD) {
65 /* Problem somewhere before the kernel start address */
66 strcat(buf, "/* Maybe null pointer? */");
67 return;
68
69 } else if (address >= COREMMR_BASE) {
70 strcat(buf, "/* core mmrs */");
71 return;
72
73 } else if (address >= SYSMMR_BASE) {
74 strcat(buf, "/* system mmrs */");
75 return;
76
77 } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) {
78 strcat(buf, "/* on-chip L1 ROM */");
79 return;
80
81 } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) {
82 strcat(buf, "/* on-chip scratchpad */");
83 return;
84
85 } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) {
86 strcat(buf, "/* unconnected memory */");
87 return;
88
89 } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) {
90 strcat(buf, "/* reserved memory */");
91 return;
92
93 } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) {
94 strcat(buf, "/* on-chip Data Bank A */");
95 return;
96
97 } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) {
98 strcat(buf, "/* on-chip Data Bank B */");
99 return;
100 }
101
102 /*
103 * Don't walk any of the vmas if we are oopsing, it has been known
104 * to cause problems - corrupt vmas (kernel crashes) cause double faults
105 */
106 if (oops_in_progress) {
107 strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
108 return;
109 }
110
111 /* looks like we're off in user-land, so let's walk all the
112 * mappings of all our processes and see if we can't be a whee
113 * bit more specific
114 */
115 write_lock_irqsave(&tasklist_lock, flags);
116 for_each_process(p) {
117 mm = (in_atomic ? p->mm : get_task_mm(p));
118 if (!mm)
119 continue;
120
121 if (!down_read_trylock(&mm->mmap_sem)) {
122 if (!in_atomic)
123 mmput(mm);
124 continue;
125 }
126
127 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
128 struct vm_area_struct *vma;
129
130 vma = rb_entry(n, struct vm_area_struct, vm_rb);
131
132 if (address >= vma->vm_start && address < vma->vm_end) {
133 char _tmpbuf[256];
134 char *name = p->comm;
135 struct file *file = vma->vm_file;
136
137 if (file) {
138 char *d_name = d_path(&file->f_path, _tmpbuf,
139 sizeof(_tmpbuf));
140 if (!IS_ERR(d_name))
141 name = d_name;
142 }
143
144 /* FLAT does not have its text aligned to the start of
145 * the map while FDPIC ELF does ...
146 */
147
148 /* before we can check flat/fdpic, we need to
149 * make sure current is valid
150 */
151 if ((unsigned long)current >= FIXED_CODE_START &&
152 !((unsigned long)current & 0x3)) {
153 if (current->mm &&
154 (address > current->mm->start_code) &&
155 (address < current->mm->end_code))
156 offset = address - current->mm->start_code;
157 else
158 offset = (address - vma->vm_start) +
159 (vma->vm_pgoff << PAGE_SHIFT);
160
161 sprintf(buf, "[ %s + 0x%lx ]", name, offset);
162 } else
163 sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
164 name, vma->vm_start, vma->vm_end);
165
166 up_read(&mm->mmap_sem);
167 if (!in_atomic)
168 mmput(mm);
169
170 if (buf[0] == '\0')
171 sprintf(buf, "[ %s ] dynamic memory", name);
172
173 goto done;
174 }
175 }
176
177 up_read(&mm->mmap_sem);
178 if (!in_atomic)
179 mmput(mm);
180 }
181
182 /*
183 * we were unable to find this address anywhere,
184 * or some MMs were skipped because they were in use.
185 */
186 sprintf(buf, "/* kernel dynamic memory */");
187
188 done:
189 write_unlock_irqrestore(&tasklist_lock, flags);
190 }
191
192 #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
193
194 /*
195 * Similar to get_user, do some address checking, then dereference
196 * Return true on success, false on bad address
197 */
get_mem16(unsigned short * val,unsigned short * address)198 bool get_mem16(unsigned short *val, unsigned short *address)
199 {
200 unsigned long addr = (unsigned long)address;
201
202 /* Check for odd addresses */
203 if (addr & 0x1)
204 return false;
205
206 switch (bfin_mem_access_type(addr, 2)) {
207 case BFIN_MEM_ACCESS_CORE:
208 case BFIN_MEM_ACCESS_CORE_ONLY:
209 *val = *address;
210 return true;
211 case BFIN_MEM_ACCESS_DMA:
212 dma_memcpy(val, address, 2);
213 return true;
214 case BFIN_MEM_ACCESS_ITEST:
215 isram_memcpy(val, address, 2);
216 return true;
217 default: /* invalid access */
218 return false;
219 }
220 }
221
get_instruction(unsigned int * val,unsigned short * address)222 bool get_instruction(unsigned int *val, unsigned short *address)
223 {
224 unsigned long addr = (unsigned long)address;
225 unsigned short opcode0, opcode1;
226
227 /* Check for odd addresses */
228 if (addr & 0x1)
229 return false;
230
231 /* MMR region will never have instructions */
232 if (addr >= SYSMMR_BASE)
233 return false;
234
235 /* Scratchpad will never have instructions */
236 if (addr >= L1_SCRATCH_START && addr < L1_SCRATCH_START + L1_SCRATCH_LENGTH)
237 return false;
238
239 /* Data banks will never have instructions */
240 if (addr >= BOOT_ROM_START + BOOT_ROM_LENGTH && addr < L1_CODE_START)
241 return false;
242
243 if (!get_mem16(&opcode0, address))
244 return false;
245
246 /* was this a 32-bit instruction? If so, get the next 16 bits */
247 if ((opcode0 & 0xc000) == 0xc000) {
248 if (!get_mem16(&opcode1, address + 1))
249 return false;
250 *val = (opcode0 << 16) + opcode1;
251 } else
252 *val = opcode0;
253
254 return true;
255 }
256
257 #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON)
258 /*
259 * decode the instruction if we are printing out the trace, as it
260 * makes things easier to follow, without running it through objdump
261 * Decode the change of flow, and the common load/store instructions
262 * which are the main cause for faults, and discontinuities in the trace
263 * buffer.
264 */
265
266 #define ProgCtrl_opcode 0x0000
267 #define ProgCtrl_poprnd_bits 0
268 #define ProgCtrl_poprnd_mask 0xf
269 #define ProgCtrl_prgfunc_bits 4
270 #define ProgCtrl_prgfunc_mask 0xf
271 #define ProgCtrl_code_bits 8
272 #define ProgCtrl_code_mask 0xff
273
decode_ProgCtrl_0(unsigned int opcode)274 static void decode_ProgCtrl_0(unsigned int opcode)
275 {
276 int poprnd = ((opcode >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask);
277 int prgfunc = ((opcode >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask);
278
279 if (prgfunc == 0 && poprnd == 0)
280 pr_cont("NOP");
281 else if (prgfunc == 1 && poprnd == 0)
282 pr_cont("RTS");
283 else if (prgfunc == 1 && poprnd == 1)
284 pr_cont("RTI");
285 else if (prgfunc == 1 && poprnd == 2)
286 pr_cont("RTX");
287 else if (prgfunc == 1 && poprnd == 3)
288 pr_cont("RTN");
289 else if (prgfunc == 1 && poprnd == 4)
290 pr_cont("RTE");
291 else if (prgfunc == 2 && poprnd == 0)
292 pr_cont("IDLE");
293 else if (prgfunc == 2 && poprnd == 3)
294 pr_cont("CSYNC");
295 else if (prgfunc == 2 && poprnd == 4)
296 pr_cont("SSYNC");
297 else if (prgfunc == 2 && poprnd == 5)
298 pr_cont("EMUEXCPT");
299 else if (prgfunc == 3)
300 pr_cont("CLI R%i", poprnd);
301 else if (prgfunc == 4)
302 pr_cont("STI R%i", poprnd);
303 else if (prgfunc == 5)
304 pr_cont("JUMP (P%i)", poprnd);
305 else if (prgfunc == 6)
306 pr_cont("CALL (P%i)", poprnd);
307 else if (prgfunc == 7)
308 pr_cont("CALL (PC + P%i)", poprnd);
309 else if (prgfunc == 8)
310 pr_cont("JUMP (PC + P%i", poprnd);
311 else if (prgfunc == 9)
312 pr_cont("RAISE %i", poprnd);
313 else if (prgfunc == 10)
314 pr_cont("EXCPT %i", poprnd);
315 else
316 pr_cont("0x%04x", opcode);
317
318 }
319
320 #define BRCC_opcode 0x1000
321 #define BRCC_offset_bits 0
322 #define BRCC_offset_mask 0x3ff
323 #define BRCC_B_bits 10
324 #define BRCC_B_mask 0x1
325 #define BRCC_T_bits 11
326 #define BRCC_T_mask 0x1
327 #define BRCC_code_bits 12
328 #define BRCC_code_mask 0xf
329
decode_BRCC_0(unsigned int opcode)330 static void decode_BRCC_0(unsigned int opcode)
331 {
332 int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask);
333 int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask);
334
335 pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : "");
336 }
337
338 #define CALLa_opcode 0xe2000000
339 #define CALLa_addr_bits 0
340 #define CALLa_addr_mask 0xffffff
341 #define CALLa_S_bits 24
342 #define CALLa_S_mask 0x1
343 #define CALLa_code_bits 25
344 #define CALLa_code_mask 0x7f
345
decode_CALLa_0(unsigned int opcode)346 static void decode_CALLa_0(unsigned int opcode)
347 {
348 int S = ((opcode >> (CALLa_S_bits - 16)) & CALLa_S_mask);
349
350 if (S)
351 pr_cont("CALL pcrel");
352 else
353 pr_cont("JUMP.L");
354 }
355
356 #define LoopSetup_opcode 0xe0800000
357 #define LoopSetup_eoffset_bits 0
358 #define LoopSetup_eoffset_mask 0x3ff
359 #define LoopSetup_dontcare_bits 10
360 #define LoopSetup_dontcare_mask 0x3
361 #define LoopSetup_reg_bits 12
362 #define LoopSetup_reg_mask 0xf
363 #define LoopSetup_soffset_bits 16
364 #define LoopSetup_soffset_mask 0xf
365 #define LoopSetup_c_bits 20
366 #define LoopSetup_c_mask 0x1
367 #define LoopSetup_rop_bits 21
368 #define LoopSetup_rop_mask 0x3
369 #define LoopSetup_code_bits 23
370 #define LoopSetup_code_mask 0x1ff
371
decode_LoopSetup_0(unsigned int opcode)372 static void decode_LoopSetup_0(unsigned int opcode)
373 {
374 int c = ((opcode >> LoopSetup_c_bits) & LoopSetup_c_mask);
375 int reg = ((opcode >> LoopSetup_reg_bits) & LoopSetup_reg_mask);
376 int rop = ((opcode >> LoopSetup_rop_bits) & LoopSetup_rop_mask);
377
378 pr_cont("LSETUP <> LC%i", c);
379 if ((rop & 1) == 1)
380 pr_cont("= P%i", reg);
381 if ((rop & 2) == 2)
382 pr_cont(" >> 0x1");
383 }
384
385 #define DspLDST_opcode 0x9c00
386 #define DspLDST_reg_bits 0
387 #define DspLDST_reg_mask 0x7
388 #define DspLDST_i_bits 3
389 #define DspLDST_i_mask 0x3
390 #define DspLDST_m_bits 5
391 #define DspLDST_m_mask 0x3
392 #define DspLDST_aop_bits 7
393 #define DspLDST_aop_mask 0x3
394 #define DspLDST_W_bits 9
395 #define DspLDST_W_mask 0x1
396 #define DspLDST_code_bits 10
397 #define DspLDST_code_mask 0x3f
398
decode_dspLDST_0(unsigned int opcode)399 static void decode_dspLDST_0(unsigned int opcode)
400 {
401 int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask);
402 int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask);
403 int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask);
404 int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask);
405 int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask);
406
407 if (W == 0) {
408 pr_cont("R%i", reg);
409 switch (m) {
410 case 0:
411 pr_cont(" = ");
412 break;
413 case 1:
414 pr_cont(".L = ");
415 break;
416 case 2:
417 pr_cont(".W = ");
418 break;
419 }
420 }
421
422 pr_cont("[ I%i", i);
423
424 switch (aop) {
425 case 0:
426 pr_cont("++ ]");
427 break;
428 case 1:
429 pr_cont("-- ]");
430 break;
431 }
432
433 if (W == 1) {
434 pr_cont(" = R%i", reg);
435 switch (m) {
436 case 1:
437 pr_cont(".L = ");
438 break;
439 case 2:
440 pr_cont(".W = ");
441 break;
442 }
443 }
444 }
445
446 #define LDST_opcode 0x9000
447 #define LDST_reg_bits 0
448 #define LDST_reg_mask 0x7
449 #define LDST_ptr_bits 3
450 #define LDST_ptr_mask 0x7
451 #define LDST_Z_bits 6
452 #define LDST_Z_mask 0x1
453 #define LDST_aop_bits 7
454 #define LDST_aop_mask 0x3
455 #define LDST_W_bits 9
456 #define LDST_W_mask 0x1
457 #define LDST_sz_bits 10
458 #define LDST_sz_mask 0x3
459 #define LDST_code_bits 12
460 #define LDST_code_mask 0xf
461
decode_LDST_0(unsigned int opcode)462 static void decode_LDST_0(unsigned int opcode)
463 {
464 int Z = ((opcode >> LDST_Z_bits) & LDST_Z_mask);
465 int W = ((opcode >> LDST_W_bits) & LDST_W_mask);
466 int sz = ((opcode >> LDST_sz_bits) & LDST_sz_mask);
467 int aop = ((opcode >> LDST_aop_bits) & LDST_aop_mask);
468 int reg = ((opcode >> LDST_reg_bits) & LDST_reg_mask);
469 int ptr = ((opcode >> LDST_ptr_bits) & LDST_ptr_mask);
470
471 if (W == 0)
472 pr_cont("%s%i = ", (sz == 0 && Z == 1) ? "P" : "R", reg);
473
474 switch (sz) {
475 case 1:
476 pr_cont("W");
477 break;
478 case 2:
479 pr_cont("B");
480 break;
481 }
482
483 pr_cont("[P%i", ptr);
484
485 switch (aop) {
486 case 0:
487 pr_cont("++");
488 break;
489 case 1:
490 pr_cont("--");
491 break;
492 }
493 pr_cont("]");
494
495 if (W == 1)
496 pr_cont(" = %s%i ", (sz == 0 && Z == 1) ? "P" : "R", reg);
497
498 if (sz) {
499 if (Z)
500 pr_cont(" (X)");
501 else
502 pr_cont(" (Z)");
503 }
504 }
505
506 #define LDSTii_opcode 0xa000
507 #define LDSTii_reg_bit 0
508 #define LDSTii_reg_mask 0x7
509 #define LDSTii_ptr_bit 3
510 #define LDSTii_ptr_mask 0x7
511 #define LDSTii_offset_bit 6
512 #define LDSTii_offset_mask 0xf
513 #define LDSTii_op_bit 10
514 #define LDSTii_op_mask 0x3
515 #define LDSTii_W_bit 12
516 #define LDSTii_W_mask 0x1
517 #define LDSTii_code_bit 13
518 #define LDSTii_code_mask 0x7
519
decode_LDSTii_0(unsigned int opcode)520 static void decode_LDSTii_0(unsigned int opcode)
521 {
522 int reg = ((opcode >> LDSTii_reg_bit) & LDSTii_reg_mask);
523 int ptr = ((opcode >> LDSTii_ptr_bit) & LDSTii_ptr_mask);
524 int offset = ((opcode >> LDSTii_offset_bit) & LDSTii_offset_mask);
525 int op = ((opcode >> LDSTii_op_bit) & LDSTii_op_mask);
526 int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask);
527
528 if (W == 0) {
529 pr_cont("%s%i = %s[P%i + %i]", op == 3 ? "R" : "P", reg,
530 op == 1 || op == 2 ? "" : "W", ptr, offset);
531 if (op == 2)
532 pr_cont("(Z)");
533 if (op == 3)
534 pr_cont("(X)");
535 } else {
536 pr_cont("%s[P%i + %i] = %s%i", op == 0 ? "" : "W", ptr,
537 offset, op == 3 ? "P" : "R", reg);
538 }
539 }
540
541 #define LDSTidxI_opcode 0xe4000000
542 #define LDSTidxI_offset_bits 0
543 #define LDSTidxI_offset_mask 0xffff
544 #define LDSTidxI_reg_bits 16
545 #define LDSTidxI_reg_mask 0x7
546 #define LDSTidxI_ptr_bits 19
547 #define LDSTidxI_ptr_mask 0x7
548 #define LDSTidxI_sz_bits 22
549 #define LDSTidxI_sz_mask 0x3
550 #define LDSTidxI_Z_bits 24
551 #define LDSTidxI_Z_mask 0x1
552 #define LDSTidxI_W_bits 25
553 #define LDSTidxI_W_mask 0x1
554 #define LDSTidxI_code_bits 26
555 #define LDSTidxI_code_mask 0x3f
556
decode_LDSTidxI_0(unsigned int opcode)557 static void decode_LDSTidxI_0(unsigned int opcode)
558 {
559 int Z = ((opcode >> LDSTidxI_Z_bits) & LDSTidxI_Z_mask);
560 int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask);
561 int sz = ((opcode >> LDSTidxI_sz_bits) & LDSTidxI_sz_mask);
562 int reg = ((opcode >> LDSTidxI_reg_bits) & LDSTidxI_reg_mask);
563 int ptr = ((opcode >> LDSTidxI_ptr_bits) & LDSTidxI_ptr_mask);
564 int offset = ((opcode >> LDSTidxI_offset_bits) & LDSTidxI_offset_mask);
565
566 if (W == 0)
567 pr_cont("%s%i = ", sz == 0 && Z == 1 ? "P" : "R", reg);
568
569 if (sz == 1)
570 pr_cont("W");
571 if (sz == 2)
572 pr_cont("B");
573
574 pr_cont("[P%i + %s0x%x]", ptr, offset & 0x20 ? "-" : "",
575 (offset & 0x1f) << 2);
576
577 if (W == 0 && sz != 0) {
578 if (Z)
579 pr_cont("(X)");
580 else
581 pr_cont("(Z)");
582 }
583
584 if (W == 1)
585 pr_cont("= %s%i", (sz == 0 && Z == 1) ? "P" : "R", reg);
586
587 }
588
decode_opcode(unsigned int opcode)589 static void decode_opcode(unsigned int opcode)
590 {
591 #ifdef CONFIG_BUG
592 if (opcode == BFIN_BUG_OPCODE)
593 pr_cont("BUG");
594 else
595 #endif
596 if ((opcode & 0xffffff00) == ProgCtrl_opcode)
597 decode_ProgCtrl_0(opcode);
598 else if ((opcode & 0xfffff000) == BRCC_opcode)
599 decode_BRCC_0(opcode);
600 else if ((opcode & 0xfffff000) == 0x2000)
601 pr_cont("JUMP.S");
602 else if ((opcode & 0xfe000000) == CALLa_opcode)
603 decode_CALLa_0(opcode);
604 else if ((opcode & 0xff8000C0) == LoopSetup_opcode)
605 decode_LoopSetup_0(opcode);
606 else if ((opcode & 0xfffffc00) == DspLDST_opcode)
607 decode_dspLDST_0(opcode);
608 else if ((opcode & 0xfffff000) == LDST_opcode)
609 decode_LDST_0(opcode);
610 else if ((opcode & 0xffffe000) == LDSTii_opcode)
611 decode_LDSTii_0(opcode);
612 else if ((opcode & 0xfc000000) == LDSTidxI_opcode)
613 decode_LDSTidxI_0(opcode);
614 else if (opcode & 0xffff0000)
615 pr_cont("0x%08x", opcode);
616 else
617 pr_cont("0x%04x", opcode);
618 }
619
620 #define BIT_MULTI_INS 0x08000000
decode_instruction(unsigned short * address)621 static void decode_instruction(unsigned short *address)
622 {
623 unsigned int opcode;
624
625 if (!get_instruction(&opcode, address))
626 return;
627
628 decode_opcode(opcode);
629
630 /* If things are a 32-bit instruction, it has the possibility of being
631 * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions)
632 * This test collidates with the unlink instruction, so disallow that
633 */
634 if ((opcode & 0xc0000000) == 0xc0000000 &&
635 (opcode & BIT_MULTI_INS) &&
636 (opcode & 0xe8000000) != 0xe8000000) {
637 pr_cont(" || ");
638 if (!get_instruction(&opcode, address + 2))
639 return;
640 decode_opcode(opcode);
641 pr_cont(" || ");
642 if (!get_instruction(&opcode, address + 3))
643 return;
644 decode_opcode(opcode);
645 }
646 }
647 #endif
648
dump_bfin_trace_buffer(void)649 void dump_bfin_trace_buffer(void)
650 {
651 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
652 int tflags, i = 0, fault = 0;
653 char buf[150];
654 unsigned short *addr;
655 unsigned int cpu = raw_smp_processor_id();
656 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
657 int j, index;
658 #endif
659
660 trace_buffer_save(tflags);
661
662 pr_notice("Hardware Trace:\n");
663
664 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
665 pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n");
666 #endif
667
668 if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) {
669 for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) {
670 addr = (unsigned short *)bfin_read_TBUF();
671 decode_address(buf, (unsigned long)addr);
672 pr_notice("%4i Target : %s\n", i, buf);
673 /* Normally, the faulting instruction doesn't go into
674 * the trace buffer, (since it doesn't commit), so
675 * we print out the fault address here
676 */
677 if (!fault && addr == ((unsigned short *)evt_ivhw)) {
678 addr = (unsigned short *)bfin_read_TBUF();
679 decode_address(buf, (unsigned long)addr);
680 pr_notice(" FAULT : %s ", buf);
681 decode_instruction(addr);
682 pr_cont("\n");
683 fault = 1;
684 continue;
685 }
686 if (!fault && addr == (unsigned short *)trap &&
687 (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) {
688 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
689 pr_notice(" FAULT : %s ", buf);
690 decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr);
691 pr_cont("\n");
692 fault = 1;
693 }
694 addr = (unsigned short *)bfin_read_TBUF();
695 decode_address(buf, (unsigned long)addr);
696 pr_notice(" Source : %s ", buf);
697 decode_instruction(addr);
698 pr_cont("\n");
699 }
700 }
701
702 #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
703 if (trace_buff_offset)
704 index = trace_buff_offset / 4;
705 else
706 index = EXPAND_LEN;
707
708 j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128;
709 while (j) {
710 decode_address(buf, software_trace_buff[index]);
711 pr_notice("%4i Target : %s\n", i, buf);
712 index -= 1;
713 if (index < 0)
714 index = EXPAND_LEN;
715 decode_address(buf, software_trace_buff[index]);
716 pr_notice(" Source : %s ", buf);
717 decode_instruction((unsigned short *)software_trace_buff[index]);
718 pr_cont("\n");
719 index -= 1;
720 if (index < 0)
721 index = EXPAND_LEN;
722 j--;
723 i++;
724 }
725 #endif
726
727 trace_buffer_restore(tflags);
728 #endif
729 }
730 EXPORT_SYMBOL(dump_bfin_trace_buffer);
731
dump_bfin_process(struct pt_regs * fp)732 void dump_bfin_process(struct pt_regs *fp)
733 {
734 /* We should be able to look at fp->ipend, but we don't push it on the
735 * stack all the time, so do this until we fix that */
736 unsigned int context = bfin_read_IPEND();
737
738 if (oops_in_progress)
739 pr_emerg("Kernel OOPS in progress\n");
740
741 if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)
742 pr_notice("HW Error context\n");
743 else if (context & 0x0020)
744 pr_notice("Deferred Exception context\n");
745 else if (context & 0x3FC0)
746 pr_notice("Interrupt context\n");
747 else if (context & 0x4000)
748 pr_notice("Deferred Interrupt context\n");
749 else if (context & 0x8000)
750 pr_notice("Kernel process context\n");
751
752 /* Because we are crashing, and pointers could be bad, we check things
753 * pretty closely before we use them
754 */
755 if ((unsigned long)current >= FIXED_CODE_START &&
756 !((unsigned long)current & 0x3) && current->pid) {
757 pr_notice("CURRENT PROCESS:\n");
758 if (current->comm >= (char *)FIXED_CODE_START)
759 pr_notice("COMM=%s PID=%d",
760 current->comm, current->pid);
761 else
762 pr_notice("COMM= invalid");
763
764 pr_cont(" CPU=%d\n", current_thread_info()->cpu);
765 if (!((unsigned long)current->mm & 0x3) &&
766 (unsigned long)current->mm >= FIXED_CODE_START) {
767 pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n",
768 (void *)current->mm->start_code,
769 (void *)current->mm->end_code,
770 (void *)current->mm->start_data,
771 (void *)current->mm->end_data);
772 pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n",
773 (void *)current->mm->end_data,
774 (void *)current->mm->brk,
775 (void *)current->mm->start_stack);
776 } else
777 pr_notice("invalid mm\n");
778 } else
779 pr_notice("No Valid process in current context\n");
780 }
781
dump_bfin_mem(struct pt_regs * fp)782 void dump_bfin_mem(struct pt_regs *fp)
783 {
784 unsigned short *addr, *erraddr, val = 0, err = 0;
785 char sti = 0, buf[6];
786
787 erraddr = (void *)fp->pc;
788
789 pr_notice("return address: [0x%p]; contents of:", erraddr);
790
791 for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10;
792 addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10;
793 addr++) {
794 if (!((unsigned long)addr & 0xF))
795 pr_notice("0x%p: ", addr);
796
797 if (!get_mem16(&val, addr)) {
798 val = 0;
799 sprintf(buf, "????");
800 } else
801 sprintf(buf, "%04x", val);
802
803 if (addr == erraddr) {
804 pr_cont("[%s]", buf);
805 err = val;
806 } else
807 pr_cont(" %s ", buf);
808
809 /* Do any previous instructions turn on interrupts? */
810 if (addr <= erraddr && /* in the past */
811 ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */
812 val == 0x017b)) /* [SP++] = RETI */
813 sti = 1;
814 }
815
816 pr_cont("\n");
817
818 /* Hardware error interrupts can be deferred */
819 if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR &&
820 oops_in_progress)){
821 pr_notice("Looks like this was a deferred error - sorry\n");
822 #ifndef CONFIG_DEBUG_HWERR
823 pr_notice("The remaining message may be meaningless\n");
824 pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n");
825 #else
826 /* If we are handling only one peripheral interrupt
827 * and current mm and pid are valid, and the last error
828 * was in that user space process's text area
829 * print it out - because that is where the problem exists
830 */
831 if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) &&
832 (current->pid && current->mm)) {
833 /* And the last RETI points to the current userspace context */
834 if ((fp + 1)->pc >= current->mm->start_code &&
835 (fp + 1)->pc <= current->mm->end_code) {
836 pr_notice("It might be better to look around here :\n");
837 pr_notice("-------------------------------------------\n");
838 show_regs(fp + 1);
839 pr_notice("-------------------------------------------\n");
840 }
841 }
842 #endif
843 }
844 }
845
show_regs(struct pt_regs * fp)846 void show_regs(struct pt_regs *fp)
847 {
848 char buf[150];
849 struct irqaction *action;
850 unsigned int i;
851 unsigned long flags = 0;
852 unsigned int cpu = raw_smp_processor_id();
853 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
854
855 pr_notice("\n");
856 if (CPUID != bfin_cpuid())
857 pr_notice("Compiled for cpu family 0x%04x (Rev %d), "
858 "but running on:0x%04x (Rev %d)\n",
859 CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
860
861 pr_notice("ADSP-%s-0.%d",
862 CPU, bfin_compiled_revid());
863
864 if (bfin_compiled_revid() != bfin_revid())
865 pr_cont("(Detected 0.%d)", bfin_revid());
866
867 pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
868 get_cclk()/1000000, get_sclk()/1000000,
869 #ifdef CONFIG_MPU
870 "mpu on"
871 #else
872 "mpu off"
873 #endif
874 );
875
876 pr_notice("%s", linux_banner);
877
878 pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted());
879 pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n",
880 (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg);
881 if (fp->ipend & EVT_IRPTEN)
882 pr_notice(" Global Interrupts Disabled (IPEND[4])\n");
883 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 |
884 EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR)))
885 pr_notice(" Peripheral interrupts masked off\n");
886 if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14)))
887 pr_notice(" Kernel interrupts masked off\n");
888 if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) {
889 pr_notice(" HWERRCAUSE: 0x%lx\n",
890 (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14);
891 #ifdef EBIU_ERRMST
892 /* If the error was from the EBIU, print it out */
893 if (bfin_read_EBIU_ERRMST() & CORE_ERROR) {
894 pr_notice(" EBIU Error Reason : 0x%04x\n",
895 bfin_read_EBIU_ERRMST());
896 pr_notice(" EBIU Error Address : 0x%08x\n",
897 bfin_read_EBIU_ERRADD());
898 }
899 #endif
900 }
901 pr_notice(" EXCAUSE : 0x%lx\n",
902 fp->seqstat & SEQSTAT_EXCAUSE);
903 for (i = 2; i <= 15 ; i++) {
904 if (fp->ipend & (1 << i)) {
905 if (i != 4) {
906 decode_address(buf, bfin_read32(EVT0 + 4*i));
907 pr_notice(" physical IVG%i asserted : %s\n", i, buf);
908 } else
909 pr_notice(" interrupts disabled\n");
910 }
911 }
912
913 /* if no interrupts are going off, don't print this out */
914 if (fp->ipend & ~0x3F) {
915 for (i = 0; i < (NR_IRQS - 1); i++) {
916 struct irq_desc *desc = irq_to_desc(i);
917 if (!in_atomic)
918 raw_spin_lock_irqsave(&desc->lock, flags);
919
920 action = desc->action;
921 if (!action)
922 goto unlock;
923
924 decode_address(buf, (unsigned int)action->handler);
925 pr_notice(" logical irq %3d mapped : %s", i, buf);
926 for (action = action->next; action; action = action->next) {
927 decode_address(buf, (unsigned int)action->handler);
928 pr_cont(", %s", buf);
929 }
930 pr_cont("\n");
931 unlock:
932 if (!in_atomic)
933 raw_spin_unlock_irqrestore(&desc->lock, flags);
934 }
935 }
936
937 decode_address(buf, fp->rete);
938 pr_notice(" RETE: %s\n", buf);
939 decode_address(buf, fp->retn);
940 pr_notice(" RETN: %s\n", buf);
941 decode_address(buf, fp->retx);
942 pr_notice(" RETX: %s\n", buf);
943 decode_address(buf, fp->rets);
944 pr_notice(" RETS: %s\n", buf);
945 decode_address(buf, fp->pc);
946 pr_notice(" PC : %s\n", buf);
947
948 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
949 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
950 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
951 pr_notice("DCPLB_FAULT_ADDR: %s\n", buf);
952 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
953 pr_notice("ICPLB_FAULT_ADDR: %s\n", buf);
954 }
955
956 pr_notice("PROCESSOR STATE:\n");
957 pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
958 fp->r0, fp->r1, fp->r2, fp->r3);
959 pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
960 fp->r4, fp->r5, fp->r6, fp->r7);
961 pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n",
962 fp->p0, fp->p1, fp->p2, fp->p3);
963 pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n",
964 fp->p4, fp->p5, fp->fp, (long)fp);
965 pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n",
966 fp->lb0, fp->lt0, fp->lc0);
967 pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n",
968 fp->lb1, fp->lt1, fp->lc1);
969 pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n",
970 fp->b0, fp->l0, fp->m0, fp->i0);
971 pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n",
972 fp->b1, fp->l1, fp->m1, fp->i1);
973 pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n",
974 fp->b2, fp->l2, fp->m2, fp->i2);
975 pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n",
976 fp->b3, fp->l3, fp->m3, fp->i3);
977 pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n",
978 fp->a0w, fp->a0x, fp->a1w, fp->a1x);
979
980 pr_notice("USP : %08lx ASTAT: %08lx\n",
981 rdusp(), fp->astat);
982
983 pr_notice("\n");
984 }
985