1 /*
2 * Copyright (C) 1999-2003 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
6 */
7 /*
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 *
20 * SMP conventions:
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
28 */
29 #include <linux/module.h>
30 #include <linux/bootmem.h>
31 #include <linux/elf.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35
36 #include <asm/unwind.h>
37
38 #include <asm/delay.h>
39 #include <asm/page.h>
40 #include <asm/ptrace.h>
41 #include <asm/ptrace_offsets.h>
42 #include <asm/rse.h>
43 #include <asm/system.h>
44 #include <asm/uaccess.h>
45
46 #include "entry.h"
47 #include "unwind_i.h"
48
49 #define MIN(a,b) ((a) < (b) ? (a) : (b))
50 #define p5 5
51 #define PRED_USER_STACK pUser
52 #define p3 3 /* for pUser */
53
54 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
55 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
56
57 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
58 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
59
60 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
61
62 #ifdef UNW_DEBUG
63 static unsigned int unw_debug_level = UNW_DEBUG;
64 # define UNW_DEBUG_ON(n) unw_debug_level >= n
65 /* Do not code a printk level, not all debug lines end in newline */
66 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
67 # define inline
68 #else /* !UNW_DEBUG */
69 # define UNW_DEBUG_ON(n) 0
70 # define UNW_DPRINT(n, ...)
71 #endif /* UNW_DEBUG */
72
73 #if UNW_STATS
74 # define STAT(x...) x
75 #else
76 # define STAT(x...)
77 #endif
78
79 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
80 #define free_reg_state(usr) kfree(usr)
81 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
82 #define free_labeled_state(usr) kfree(usr)
83
84 typedef unsigned long unw_word;
85 typedef unsigned char unw_hash_index_t;
86
87 static struct {
88 spinlock_t lock; /* spinlock for unwind data */
89
90 /* list of unwind tables (one per load-module) */
91 struct unw_table *tables;
92
93 /* table of registers that prologues can save (and order in which they're saved): */
94 const unsigned char save_order[8];
95
96 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
97 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
98
99 unsigned short lru_head; /* index of lead-recently used script */
100 unsigned short lru_tail; /* index of most-recently used script */
101
102 /* index into unw_frame_info for preserved register i */
103 unsigned short preg_index[UNW_NUM_REGS];
104
105 short pt_regs_offsets[32];
106
107 /* unwind table for the kernel: */
108 struct unw_table kernel_table;
109
110 /* unwind table describing the gate page (kernel code that is mapped into user space): */
111 size_t gate_table_size;
112 unsigned long *gate_table;
113
114 /* hash table that maps instruction pointer to script index: */
115 unsigned short hash[UNW_HASH_SIZE];
116
117 /* script cache: */
118 struct unw_script cache[UNW_CACHE_SIZE];
119
120 # ifdef UNW_DEBUG
121 const char *preg_name[UNW_NUM_REGS];
122 # endif
123 # if UNW_STATS
124 struct {
125 struct {
126 int lookups;
127 int hinted_hits;
128 int normal_hits;
129 int collision_chain_traversals;
130 } cache;
131 struct {
132 unsigned long build_time;
133 unsigned long run_time;
134 unsigned long parse_time;
135 int builds;
136 int news;
137 int collisions;
138 int runs;
139 } script;
140 struct {
141 unsigned long init_time;
142 unsigned long unwind_time;
143 int inits;
144 int unwinds;
145 } api;
146 } stat;
147 # endif
148 } unw = {
149 .tables = &unw.kernel_table,
150 .lock = SPIN_LOCK_UNLOCKED,
151 .save_order = {
152 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
153 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
154 },
155 .preg_index = {
156 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
157 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
158 offsetof(struct unw_frame_info, bsp_loc)/8,
159 offsetof(struct unw_frame_info, bspstore_loc)/8,
160 offsetof(struct unw_frame_info, pfs_loc)/8,
161 offsetof(struct unw_frame_info, rnat_loc)/8,
162 offsetof(struct unw_frame_info, psp)/8,
163 offsetof(struct unw_frame_info, rp_loc)/8,
164 offsetof(struct unw_frame_info, r4)/8,
165 offsetof(struct unw_frame_info, r5)/8,
166 offsetof(struct unw_frame_info, r6)/8,
167 offsetof(struct unw_frame_info, r7)/8,
168 offsetof(struct unw_frame_info, unat_loc)/8,
169 offsetof(struct unw_frame_info, pr_loc)/8,
170 offsetof(struct unw_frame_info, lc_loc)/8,
171 offsetof(struct unw_frame_info, fpsr_loc)/8,
172 offsetof(struct unw_frame_info, b1_loc)/8,
173 offsetof(struct unw_frame_info, b2_loc)/8,
174 offsetof(struct unw_frame_info, b3_loc)/8,
175 offsetof(struct unw_frame_info, b4_loc)/8,
176 offsetof(struct unw_frame_info, b5_loc)/8,
177 offsetof(struct unw_frame_info, f2_loc)/8,
178 offsetof(struct unw_frame_info, f3_loc)/8,
179 offsetof(struct unw_frame_info, f4_loc)/8,
180 offsetof(struct unw_frame_info, f5_loc)/8,
181 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
182 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
183 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
184 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
185 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
186 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
187 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
188 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
189 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
190 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
191 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
192 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
193 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
194 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
195 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
196 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
197 },
198 .pt_regs_offsets = {
199 [0] = -1,
200 offsetof(struct pt_regs, r1),
201 offsetof(struct pt_regs, r2),
202 offsetof(struct pt_regs, r3),
203 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
204 offsetof(struct pt_regs, r8),
205 offsetof(struct pt_regs, r9),
206 offsetof(struct pt_regs, r10),
207 offsetof(struct pt_regs, r11),
208 offsetof(struct pt_regs, r12),
209 offsetof(struct pt_regs, r13),
210 offsetof(struct pt_regs, r14),
211 offsetof(struct pt_regs, r15),
212 offsetof(struct pt_regs, r16),
213 offsetof(struct pt_regs, r17),
214 offsetof(struct pt_regs, r18),
215 offsetof(struct pt_regs, r19),
216 offsetof(struct pt_regs, r20),
217 offsetof(struct pt_regs, r21),
218 offsetof(struct pt_regs, r22),
219 offsetof(struct pt_regs, r23),
220 offsetof(struct pt_regs, r24),
221 offsetof(struct pt_regs, r25),
222 offsetof(struct pt_regs, r26),
223 offsetof(struct pt_regs, r27),
224 offsetof(struct pt_regs, r28),
225 offsetof(struct pt_regs, r29),
226 offsetof(struct pt_regs, r30),
227 offsetof(struct pt_regs, r31),
228 },
229 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
230 #ifdef UNW_DEBUG
231 .preg_name = {
232 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
233 "r4", "r5", "r6", "r7",
234 "ar.unat", "pr", "ar.lc", "ar.fpsr",
235 "b1", "b2", "b3", "b4", "b5",
236 "f2", "f3", "f4", "f5",
237 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
238 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
239 }
240 #endif
241 };
242
243 /* Unwind accessors. */
244
245 /*
246 * Returns offset of rREG in struct pt_regs.
247 */
248 static inline unsigned long
pt_regs_off(unsigned long reg)249 pt_regs_off (unsigned long reg)
250 {
251 short off = -1;
252
253 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
254 off = unw.pt_regs_offsets[reg];
255
256 if (off < 0) {
257 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
258 off = 0;
259 }
260 return (unsigned long) off;
261 }
262
263 static inline struct pt_regs *
get_scratch_regs(struct unw_frame_info * info)264 get_scratch_regs (struct unw_frame_info *info)
265 {
266 if (!info->pt) {
267 /* This should not happen with valid unwind info. */
268 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
269 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
270 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
271 else
272 info->pt = info->sp - 16;
273 }
274 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
275 return (struct pt_regs *) info->pt;
276 }
277
278 int
unw_access_gr(struct unw_frame_info * info,int regnum,unsigned long * val,char * nat,int write)279 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
280 {
281 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
282 struct unw_ireg *ireg;
283 struct pt_regs *pt;
284
285 if ((unsigned) regnum - 1 >= 127) {
286 if (regnum == 0 && !write) {
287 *val = 0; /* read r0 always returns 0 */
288 *nat = 0;
289 return 0;
290 }
291 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
292 __FUNCTION__, regnum);
293 return -1;
294 }
295
296 if (regnum < 32) {
297 if (regnum >= 4 && regnum <= 7) {
298 /* access a preserved register */
299 ireg = &info->r4 + (regnum - 4);
300 addr = ireg->loc;
301 if (addr) {
302 nat_addr = addr + ireg->nat.off;
303 switch (ireg->nat.type) {
304 case UNW_NAT_VAL:
305 /* simulate getf.sig/setf.sig */
306 if (write) {
307 if (*nat) {
308 /* write NaTVal and be done with it */
309 addr[0] = 0;
310 addr[1] = 0x1fffe;
311 return 0;
312 }
313 addr[1] = 0x1003e;
314 } else {
315 if (addr[0] == 0 && addr[1] == 0x1ffe) {
316 /* return NaT and be done with it */
317 *val = 0;
318 *nat = 1;
319 return 0;
320 }
321 }
322 /* fall through */
323 case UNW_NAT_NONE:
324 dummy_nat = 0;
325 nat_addr = &dummy_nat;
326 break;
327
328 case UNW_NAT_MEMSTK:
329 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
330 break;
331
332 case UNW_NAT_REGSTK:
333 nat_addr = ia64_rse_rnat_addr(addr);
334 if ((unsigned long) addr < info->regstk.limit
335 || (unsigned long) addr >= info->regstk.top)
336 {
337 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
338 "[0x%lx-0x%lx)\n",
339 __FUNCTION__, (void *) addr,
340 info->regstk.limit,
341 info->regstk.top);
342 return -1;
343 }
344 if ((unsigned long) nat_addr >= info->regstk.top)
345 nat_addr = &info->sw->ar_rnat;
346 nat_mask = (1UL << ia64_rse_slot_num(addr));
347 break;
348 }
349 } else {
350 addr = &info->sw->r4 + (regnum - 4);
351 nat_addr = &info->sw->ar_unat;
352 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
353 }
354 } else {
355 /* access a scratch register */
356 pt = get_scratch_regs(info);
357 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
358 if (info->pri_unat_loc)
359 nat_addr = info->pri_unat_loc;
360 else
361 nat_addr = &info->sw->ar_unat;
362 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
363 }
364 } else {
365 /* access a stacked register */
366 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
367 nat_addr = ia64_rse_rnat_addr(addr);
368 if ((unsigned long) addr < info->regstk.limit
369 || (unsigned long) addr >= info->regstk.top)
370 {
371 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
372 "of rbs\n", __FUNCTION__);
373 return -1;
374 }
375 if ((unsigned long) nat_addr >= info->regstk.top)
376 nat_addr = &info->sw->ar_rnat;
377 nat_mask = (1UL << ia64_rse_slot_num(addr));
378 }
379
380 if (write) {
381 *addr = *val;
382 if (*nat)
383 *nat_addr |= nat_mask;
384 else
385 *nat_addr &= ~nat_mask;
386 } else {
387 if ((*nat_addr & nat_mask) == 0) {
388 *val = *addr;
389 *nat = 0;
390 } else {
391 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
392 *nat = 1;
393 }
394 }
395 return 0;
396 }
397 EXPORT_SYMBOL(unw_access_gr);
398
399 int
unw_access_br(struct unw_frame_info * info,int regnum,unsigned long * val,int write)400 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
401 {
402 unsigned long *addr;
403 struct pt_regs *pt;
404
405 switch (regnum) {
406 /* scratch: */
407 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
408 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
409 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
410
411 /* preserved: */
412 case 1: case 2: case 3: case 4: case 5:
413 addr = *(&info->b1_loc + (regnum - 1));
414 if (!addr)
415 addr = &info->sw->b1 + (regnum - 1);
416 break;
417
418 default:
419 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
420 __FUNCTION__, regnum);
421 return -1;
422 }
423 if (write)
424 *addr = *val;
425 else
426 *val = *addr;
427 return 0;
428 }
429 EXPORT_SYMBOL(unw_access_br);
430
431 int
unw_access_fr(struct unw_frame_info * info,int regnum,struct ia64_fpreg * val,int write)432 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
433 {
434 struct ia64_fpreg *addr = 0;
435 struct pt_regs *pt;
436
437 if ((unsigned) (regnum - 2) >= 126) {
438 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
439 __FUNCTION__, regnum);
440 return -1;
441 }
442
443 if (regnum <= 5) {
444 addr = *(&info->f2_loc + (regnum - 2));
445 if (!addr)
446 addr = &info->sw->f2 + (regnum - 2);
447 } else if (regnum <= 15) {
448 if (regnum <= 11) {
449 pt = get_scratch_regs(info);
450 addr = &pt->f6 + (regnum - 6);
451 }
452 else
453 addr = &info->sw->f12 + (regnum - 12);
454 } else if (regnum <= 31) {
455 addr = info->fr_loc[regnum - 16];
456 if (!addr)
457 addr = &info->sw->f16 + (regnum - 16);
458 } else {
459 struct task_struct *t = info->task;
460
461 if (write)
462 ia64_sync_fph(t);
463 else
464 ia64_flush_fph(t);
465 addr = t->thread.fph + (regnum - 32);
466 }
467
468 if (write)
469 *addr = *val;
470 else
471 *val = *addr;
472 return 0;
473 }
474 EXPORT_SYMBOL(unw_access_fr);
475
476 int
unw_access_ar(struct unw_frame_info * info,int regnum,unsigned long * val,int write)477 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
478 {
479 unsigned long *addr;
480 struct pt_regs *pt;
481
482 switch (regnum) {
483 case UNW_AR_BSP:
484 addr = info->bsp_loc;
485 if (!addr)
486 addr = &info->sw->ar_bspstore;
487 break;
488
489 case UNW_AR_BSPSTORE:
490 addr = info->bspstore_loc;
491 if (!addr)
492 addr = &info->sw->ar_bspstore;
493 break;
494
495 case UNW_AR_PFS:
496 addr = info->pfs_loc;
497 if (!addr)
498 addr = &info->sw->ar_pfs;
499 break;
500
501 case UNW_AR_RNAT:
502 addr = info->rnat_loc;
503 if (!addr)
504 addr = &info->sw->ar_rnat;
505 break;
506
507 case UNW_AR_UNAT:
508 addr = info->unat_loc;
509 if (!addr)
510 addr = &info->sw->ar_unat;
511 break;
512
513 case UNW_AR_LC:
514 addr = info->lc_loc;
515 if (!addr)
516 addr = &info->sw->ar_lc;
517 break;
518
519 case UNW_AR_EC:
520 if (!info->cfm_loc)
521 return -1;
522 if (write)
523 *info->cfm_loc =
524 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
525 else
526 *val = (*info->cfm_loc >> 52) & 0x3f;
527 return 0;
528
529 case UNW_AR_FPSR:
530 addr = info->fpsr_loc;
531 if (!addr)
532 addr = &info->sw->ar_fpsr;
533 break;
534
535 case UNW_AR_RSC:
536 pt = get_scratch_regs(info);
537 addr = &pt->ar_rsc;
538 break;
539
540 case UNW_AR_CCV:
541 pt = get_scratch_regs(info);
542 addr = &pt->ar_ccv;
543 break;
544
545 case UNW_AR_CSD:
546 pt = get_scratch_regs(info);
547 addr = &pt->ar_csd;
548 break;
549
550 case UNW_AR_SSD:
551 pt = get_scratch_regs(info);
552 addr = &pt->ar_ssd;
553 break;
554
555 default:
556 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
557 __FUNCTION__, regnum);
558 return -1;
559 }
560
561 if (write)
562 *addr = *val;
563 else
564 *val = *addr;
565 return 0;
566 }
567 EXPORT_SYMBOL(unw_access_ar);
568
569 int
unw_access_pr(struct unw_frame_info * info,unsigned long * val,int write)570 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
571 {
572 unsigned long *addr;
573
574 addr = info->pr_loc;
575 if (!addr)
576 addr = &info->sw->pr;
577
578 if (write)
579 *addr = *val;
580 else
581 *val = *addr;
582 return 0;
583 }
584 EXPORT_SYMBOL(unw_access_pr);
585
586
587 /* Routines to manipulate the state stack. */
588
589 static inline void
push(struct unw_state_record * sr)590 push (struct unw_state_record *sr)
591 {
592 struct unw_reg_state *rs;
593
594 rs = alloc_reg_state();
595 if (!rs) {
596 printk(KERN_ERR "unwind: cannot stack reg state!\n");
597 return;
598 }
599 memcpy(rs, &sr->curr, sizeof(*rs));
600 sr->curr.next = rs;
601 }
602
603 static void
pop(struct unw_state_record * sr)604 pop (struct unw_state_record *sr)
605 {
606 struct unw_reg_state *rs = sr->curr.next;
607
608 if (!rs) {
609 printk(KERN_ERR "unwind: stack underflow!\n");
610 return;
611 }
612 memcpy(&sr->curr, rs, sizeof(*rs));
613 free_reg_state(rs);
614 }
615
616 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
617 static struct unw_reg_state *
dup_state_stack(struct unw_reg_state * rs)618 dup_state_stack (struct unw_reg_state *rs)
619 {
620 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
621
622 while (rs) {
623 copy = alloc_reg_state();
624 if (!copy) {
625 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
626 return NULL;
627 }
628 memcpy(copy, rs, sizeof(*copy));
629 if (first)
630 prev->next = copy;
631 else
632 first = copy;
633 rs = rs->next;
634 prev = copy;
635 }
636 return first;
637 }
638
639 /* Free all stacked register states (but not RS itself). */
640 static void
free_state_stack(struct unw_reg_state * rs)641 free_state_stack (struct unw_reg_state *rs)
642 {
643 struct unw_reg_state *p, *next;
644
645 for (p = rs->next; p != NULL; p = next) {
646 next = p->next;
647 free_reg_state(p);
648 }
649 rs->next = NULL;
650 }
651
652 /* Unwind decoder routines */
653
654 static enum unw_register_index __attribute__((const))
decode_abreg(unsigned char abreg,int memory)655 decode_abreg (unsigned char abreg, int memory)
656 {
657 switch (abreg) {
658 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
659 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
660 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
661 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
662 case 0x60: return UNW_REG_PR;
663 case 0x61: return UNW_REG_PSP;
664 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
665 case 0x63: return UNW_REG_RP;
666 case 0x64: return UNW_REG_BSP;
667 case 0x65: return UNW_REG_BSPSTORE;
668 case 0x66: return UNW_REG_RNAT;
669 case 0x67: return UNW_REG_UNAT;
670 case 0x68: return UNW_REG_FPSR;
671 case 0x69: return UNW_REG_PFS;
672 case 0x6a: return UNW_REG_LC;
673 default:
674 break;
675 }
676 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
677 return UNW_REG_LC;
678 }
679
680 static void
set_reg(struct unw_reg_info * reg,enum unw_where where,int when,unsigned long val)681 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
682 {
683 reg->val = val;
684 reg->where = where;
685 if (reg->when == UNW_WHEN_NEVER)
686 reg->when = when;
687 }
688
689 static void
alloc_spill_area(unsigned long * offp,unsigned long regsize,struct unw_reg_info * lo,struct unw_reg_info * hi)690 alloc_spill_area (unsigned long *offp, unsigned long regsize,
691 struct unw_reg_info *lo, struct unw_reg_info *hi)
692 {
693 struct unw_reg_info *reg;
694
695 for (reg = hi; reg >= lo; --reg) {
696 if (reg->where == UNW_WHERE_SPILL_HOME) {
697 reg->where = UNW_WHERE_PSPREL;
698 *offp -= regsize;
699 reg->val = *offp;
700 }
701 }
702 }
703
704 static inline void
spill_next_when(struct unw_reg_info ** regp,struct unw_reg_info * lim,unw_word t)705 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
706 {
707 struct unw_reg_info *reg;
708
709 for (reg = *regp; reg <= lim; ++reg) {
710 if (reg->where == UNW_WHERE_SPILL_HOME) {
711 reg->when = t;
712 *regp = reg + 1;
713 return;
714 }
715 }
716 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
717 }
718
719 static inline void
finish_prologue(struct unw_state_record * sr)720 finish_prologue (struct unw_state_record *sr)
721 {
722 struct unw_reg_info *reg;
723 unsigned long off;
724 int i;
725
726 /*
727 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
728 * for Using Unwind Descriptors", rule 3):
729 */
730 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
731 reg = sr->curr.reg + unw.save_order[i];
732 if (reg->where == UNW_WHERE_GR_SAVE) {
733 reg->where = UNW_WHERE_GR;
734 reg->val = sr->gr_save_loc++;
735 }
736 }
737
738 /*
739 * Next, compute when the fp, general, and branch registers get
740 * saved. This must come before alloc_spill_area() because
741 * we need to know which registers are spilled to their home
742 * locations.
743 */
744 if (sr->imask) {
745 unsigned char kind, mask = 0, *cp = sr->imask;
746 int t;
747 static const unsigned char limit[3] = {
748 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
749 };
750 struct unw_reg_info *(regs[3]);
751
752 regs[0] = sr->curr.reg + UNW_REG_F2;
753 regs[1] = sr->curr.reg + UNW_REG_R4;
754 regs[2] = sr->curr.reg + UNW_REG_B1;
755
756 for (t = 0; t < sr->region_len; ++t) {
757 if ((t & 3) == 0)
758 mask = *cp++;
759 kind = (mask >> 2*(3-(t & 3))) & 3;
760 if (kind > 0)
761 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
762 sr->region_start + t);
763 }
764 }
765 /*
766 * Next, lay out the memory stack spill area:
767 */
768 if (sr->any_spills) {
769 off = sr->spill_offset;
770 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
771 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
772 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
773 }
774 }
775
776 /*
777 * Region header descriptors.
778 */
779
780 static void
desc_prologue(int body,unw_word rlen,unsigned char mask,unsigned char grsave,struct unw_state_record * sr)781 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
782 struct unw_state_record *sr)
783 {
784 int i, region_start;
785
786 if (!(sr->in_body || sr->first_region))
787 finish_prologue(sr);
788 sr->first_region = 0;
789
790 /* check if we're done: */
791 if (sr->when_target < sr->region_start + sr->region_len) {
792 sr->done = 1;
793 return;
794 }
795
796 region_start = sr->region_start + sr->region_len;
797
798 for (i = 0; i < sr->epilogue_count; ++i)
799 pop(sr);
800 sr->epilogue_count = 0;
801 sr->epilogue_start = UNW_WHEN_NEVER;
802
803 sr->region_start = region_start;
804 sr->region_len = rlen;
805 sr->in_body = body;
806
807 if (!body) {
808 push(sr);
809
810 for (i = 0; i < 4; ++i) {
811 if (mask & 0x8)
812 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
813 sr->region_start + sr->region_len - 1, grsave++);
814 mask <<= 1;
815 }
816 sr->gr_save_loc = grsave;
817 sr->any_spills = 0;
818 sr->imask = 0;
819 sr->spill_offset = 0x10; /* default to psp+16 */
820 }
821 }
822
823 /*
824 * Prologue descriptors.
825 */
826
827 static inline void
desc_abi(unsigned char abi,unsigned char context,struct unw_state_record * sr)828 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
829 {
830 if (abi == 3 && context == 'i') {
831 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
832 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
833 }
834 else
835 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
836 __FUNCTION__, abi, context);
837 }
838
839 static inline void
desc_br_gr(unsigned char brmask,unsigned char gr,struct unw_state_record * sr)840 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
841 {
842 int i;
843
844 for (i = 0; i < 5; ++i) {
845 if (brmask & 1)
846 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
847 sr->region_start + sr->region_len - 1, gr++);
848 brmask >>= 1;
849 }
850 }
851
852 static inline void
desc_br_mem(unsigned char brmask,struct unw_state_record * sr)853 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
854 {
855 int i;
856
857 for (i = 0; i < 5; ++i) {
858 if (brmask & 1) {
859 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
860 sr->region_start + sr->region_len - 1, 0);
861 sr->any_spills = 1;
862 }
863 brmask >>= 1;
864 }
865 }
866
867 static inline void
desc_frgr_mem(unsigned char grmask,unw_word frmask,struct unw_state_record * sr)868 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
869 {
870 int i;
871
872 for (i = 0; i < 4; ++i) {
873 if ((grmask & 1) != 0) {
874 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
875 sr->region_start + sr->region_len - 1, 0);
876 sr->any_spills = 1;
877 }
878 grmask >>= 1;
879 }
880 for (i = 0; i < 20; ++i) {
881 if ((frmask & 1) != 0) {
882 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
883 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
884 sr->region_start + sr->region_len - 1, 0);
885 sr->any_spills = 1;
886 }
887 frmask >>= 1;
888 }
889 }
890
891 static inline void
desc_fr_mem(unsigned char frmask,struct unw_state_record * sr)892 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
893 {
894 int i;
895
896 for (i = 0; i < 4; ++i) {
897 if ((frmask & 1) != 0) {
898 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
899 sr->region_start + sr->region_len - 1, 0);
900 sr->any_spills = 1;
901 }
902 frmask >>= 1;
903 }
904 }
905
906 static inline void
desc_gr_gr(unsigned char grmask,unsigned char gr,struct unw_state_record * sr)907 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
908 {
909 int i;
910
911 for (i = 0; i < 4; ++i) {
912 if ((grmask & 1) != 0)
913 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
914 sr->region_start + sr->region_len - 1, gr++);
915 grmask >>= 1;
916 }
917 }
918
919 static inline void
desc_gr_mem(unsigned char grmask,struct unw_state_record * sr)920 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
921 {
922 int i;
923
924 for (i = 0; i < 4; ++i) {
925 if ((grmask & 1) != 0) {
926 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
927 sr->region_start + sr->region_len - 1, 0);
928 sr->any_spills = 1;
929 }
930 grmask >>= 1;
931 }
932 }
933
934 static inline void
desc_mem_stack_f(unw_word t,unw_word size,struct unw_state_record * sr)935 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
936 {
937 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
938 sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
939 }
940
941 static inline void
desc_mem_stack_v(unw_word t,struct unw_state_record * sr)942 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
943 {
944 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
945 }
946
947 static inline void
desc_reg_gr(unsigned char reg,unsigned char dst,struct unw_state_record * sr)948 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
949 {
950 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
951 }
952
953 static inline void
desc_reg_psprel(unsigned char reg,unw_word pspoff,struct unw_state_record * sr)954 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
955 {
956 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
957 0x10 - 4*pspoff);
958 }
959
960 static inline void
desc_reg_sprel(unsigned char reg,unw_word spoff,struct unw_state_record * sr)961 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
962 {
963 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
964 4*spoff);
965 }
966
967 static inline void
desc_rp_br(unsigned char dst,struct unw_state_record * sr)968 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
969 {
970 sr->return_link_reg = dst;
971 }
972
973 static inline void
desc_reg_when(unsigned char regnum,unw_word t,struct unw_state_record * sr)974 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
975 {
976 struct unw_reg_info *reg = sr->curr.reg + regnum;
977
978 if (reg->where == UNW_WHERE_NONE)
979 reg->where = UNW_WHERE_GR_SAVE;
980 reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
981 }
982
983 static inline void
desc_spill_base(unw_word pspoff,struct unw_state_record * sr)984 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
985 {
986 sr->spill_offset = 0x10 - 4*pspoff;
987 }
988
989 static inline unsigned char *
desc_spill_mask(unsigned char * imaskp,struct unw_state_record * sr)990 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
991 {
992 sr->imask = imaskp;
993 return imaskp + (2*sr->region_len + 7)/8;
994 }
995
996 /*
997 * Body descriptors.
998 */
999 static inline void
desc_epilogue(unw_word t,unw_word ecount,struct unw_state_record * sr)1000 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1001 {
1002 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1003 sr->epilogue_count = ecount + 1;
1004 }
1005
1006 static inline void
desc_copy_state(unw_word label,struct unw_state_record * sr)1007 desc_copy_state (unw_word label, struct unw_state_record *sr)
1008 {
1009 struct unw_labeled_state *ls;
1010
1011 for (ls = sr->labeled_states; ls; ls = ls->next) {
1012 if (ls->label == label) {
1013 free_state_stack(&sr->curr);
1014 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1015 sr->curr.next = dup_state_stack(ls->saved_state.next);
1016 return;
1017 }
1018 }
1019 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1020 }
1021
1022 static inline void
desc_label_state(unw_word label,struct unw_state_record * sr)1023 desc_label_state (unw_word label, struct unw_state_record *sr)
1024 {
1025 struct unw_labeled_state *ls;
1026
1027 ls = alloc_labeled_state();
1028 if (!ls) {
1029 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1030 return;
1031 }
1032 ls->label = label;
1033 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1034 ls->saved_state.next = dup_state_stack(sr->curr.next);
1035
1036 /* insert into list of labeled states: */
1037 ls->next = sr->labeled_states;
1038 sr->labeled_states = ls;
1039 }
1040
1041 /*
1042 * General descriptors.
1043 */
1044
1045 static inline int
desc_is_active(unsigned char qp,unw_word t,struct unw_state_record * sr)1046 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1047 {
1048 if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
1049 return 0;
1050 if (qp > 0) {
1051 if ((sr->pr_val & (1UL << qp)) == 0)
1052 return 0;
1053 sr->pr_mask |= (1UL << qp);
1054 }
1055 return 1;
1056 }
1057
1058 static inline void
desc_restore_p(unsigned char qp,unw_word t,unsigned char abreg,struct unw_state_record * sr)1059 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1060 {
1061 struct unw_reg_info *r;
1062
1063 if (!desc_is_active(qp, t, sr))
1064 return;
1065
1066 r = sr->curr.reg + decode_abreg(abreg, 0);
1067 r->where = UNW_WHERE_NONE;
1068 r->when = UNW_WHEN_NEVER;
1069 r->val = 0;
1070 }
1071
1072 static inline void
desc_spill_reg_p(unsigned char qp,unw_word t,unsigned char abreg,unsigned char x,unsigned char ytreg,struct unw_state_record * sr)1073 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1074 unsigned char ytreg, struct unw_state_record *sr)
1075 {
1076 enum unw_where where = UNW_WHERE_GR;
1077 struct unw_reg_info *r;
1078
1079 if (!desc_is_active(qp, t, sr))
1080 return;
1081
1082 if (x)
1083 where = UNW_WHERE_BR;
1084 else if (ytreg & 0x80)
1085 where = UNW_WHERE_FR;
1086
1087 r = sr->curr.reg + decode_abreg(abreg, 0);
1088 r->where = where;
1089 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1090 r->val = (ytreg & 0x7f);
1091 }
1092
1093 static inline void
desc_spill_psprel_p(unsigned char qp,unw_word t,unsigned char abreg,unw_word pspoff,struct unw_state_record * sr)1094 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1095 struct unw_state_record *sr)
1096 {
1097 struct unw_reg_info *r;
1098
1099 if (!desc_is_active(qp, t, sr))
1100 return;
1101
1102 r = sr->curr.reg + decode_abreg(abreg, 1);
1103 r->where = UNW_WHERE_PSPREL;
1104 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1105 r->val = 0x10 - 4*pspoff;
1106 }
1107
1108 static inline void
desc_spill_sprel_p(unsigned char qp,unw_word t,unsigned char abreg,unw_word spoff,struct unw_state_record * sr)1109 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1110 struct unw_state_record *sr)
1111 {
1112 struct unw_reg_info *r;
1113
1114 if (!desc_is_active(qp, t, sr))
1115 return;
1116
1117 r = sr->curr.reg + decode_abreg(abreg, 1);
1118 r->where = UNW_WHERE_SPREL;
1119 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1120 r->val = 4*spoff;
1121 }
1122
1123 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1124 code);
1125
1126 /*
1127 * region headers:
1128 */
1129 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1130 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1131 /*
1132 * prologue descriptors:
1133 */
1134 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1135 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1136 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1137 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1138 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1139 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1140 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1141 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1142 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1143 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1144 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1145 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1146 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1147 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1148 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1149 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1150 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1151 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1152 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1153 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1154 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1155 /*
1156 * body descriptors:
1157 */
1158 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1159 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1160 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1161 /*
1162 * general unwind descriptors:
1163 */
1164 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1165 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1166 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1167 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1168 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1169 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1170 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1171 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1172
1173 #include "unwind_decoder.c"
1174
1175
1176 /* Unwind scripts. */
1177
1178 static inline unw_hash_index_t
hash(unsigned long ip)1179 hash (unsigned long ip)
1180 {
1181 # define hashmagic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
1182
1183 return (ip >> 4)*hashmagic >> (64 - UNW_LOG_HASH_SIZE);
1184 #undef hashmagic
1185 }
1186
1187 static inline long
cache_match(struct unw_script * script,unsigned long ip,unsigned long pr)1188 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1189 {
1190 read_lock(&script->lock);
1191 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1192 /* keep the read lock... */
1193 return 1;
1194 read_unlock(&script->lock);
1195 return 0;
1196 }
1197
1198 static inline struct unw_script *
script_lookup(struct unw_frame_info * info)1199 script_lookup (struct unw_frame_info *info)
1200 {
1201 struct unw_script *script = unw.cache + info->hint;
1202 unsigned short index;
1203 unsigned long ip, pr;
1204
1205 if (UNW_DEBUG_ON(0))
1206 return 0; /* Always regenerate scripts in debug mode */
1207
1208 STAT(++unw.stat.cache.lookups);
1209
1210 ip = info->ip;
1211 pr = info->pr;
1212
1213 if (cache_match(script, ip, pr)) {
1214 STAT(++unw.stat.cache.hinted_hits);
1215 return script;
1216 }
1217
1218 index = unw.hash[hash(ip)];
1219 if (index >= UNW_CACHE_SIZE)
1220 return 0;
1221
1222 script = unw.cache + index;
1223 while (1) {
1224 if (cache_match(script, ip, pr)) {
1225 /* update hint; no locking required as single-word writes are atomic */
1226 STAT(++unw.stat.cache.normal_hits);
1227 unw.cache[info->prev_script].hint = script - unw.cache;
1228 return script;
1229 }
1230 if (script->coll_chain >= UNW_HASH_SIZE)
1231 return 0;
1232 script = unw.cache + script->coll_chain;
1233 STAT(++unw.stat.cache.collision_chain_traversals);
1234 }
1235 }
1236
1237 /*
1238 * On returning, a write lock for the SCRIPT is still being held.
1239 */
1240 static inline struct unw_script *
script_new(unsigned long ip)1241 script_new (unsigned long ip)
1242 {
1243 struct unw_script *script, *prev, *tmp;
1244 unw_hash_index_t index;
1245 unsigned long flags;
1246 unsigned short head;
1247
1248 STAT(++unw.stat.script.news);
1249
1250 /*
1251 * Can't (easily) use cmpxchg() here because of ABA problem
1252 * that is intrinsic in cmpxchg()...
1253 */
1254 spin_lock_irqsave(&unw.lock, flags);
1255 {
1256 head = unw.lru_head;
1257 script = unw.cache + head;
1258 unw.lru_head = script->lru_chain;
1259 }
1260 spin_unlock(&unw.lock);
1261
1262 /*
1263 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1264 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1265 * alternative would be to disable interrupts whenever we hold a read-lock, but
1266 * that seems silly.
1267 */
1268 /* Kludge: 2.4 has down_write_trylock on semaphores but not write_trylock on
1269 * spinlocks, even though they are both in 2.6 and are identical. Pretend
1270 * that script lock is a rw_semaphore so we can use the only 2.4 code that
1271 * avoids a deadlock. KAO.
1272 */
1273 if (!down_write_trylock((struct rw_semaphore *)(&script->lock)))
1274 return NULL;
1275
1276 spin_lock(&unw.lock);
1277 {
1278 /* re-insert script at the tail of the LRU chain: */
1279 unw.cache[unw.lru_tail].lru_chain = head;
1280 unw.lru_tail = head;
1281
1282 /* remove the old script from the hash table (if it's there): */
1283 if (script->ip) {
1284 index = hash(script->ip);
1285 tmp = unw.cache + unw.hash[index];
1286 prev = 0;
1287 while (1) {
1288 if (tmp == script) {
1289 if (prev)
1290 prev->coll_chain = tmp->coll_chain;
1291 else
1292 unw.hash[index] = tmp->coll_chain;
1293 break;
1294 } else
1295 prev = tmp;
1296 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1297 /* old script wasn't in the hash-table */
1298 break;
1299 tmp = unw.cache + tmp->coll_chain;
1300 }
1301 }
1302
1303 /* enter new script in the hash table */
1304 index = hash(ip);
1305 script->coll_chain = unw.hash[index];
1306 unw.hash[index] = script - unw.cache;
1307
1308 script->ip = ip; /* set new IP while we're holding the locks */
1309
1310 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1311 }
1312 spin_unlock_irqrestore(&unw.lock, flags);
1313
1314 script->flags = 0;
1315 script->hint = 0;
1316 script->count = 0;
1317 return script;
1318 }
1319
1320 static void
script_finalize(struct unw_script * script,struct unw_state_record * sr)1321 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1322 {
1323 script->pr_mask = sr->pr_mask;
1324 script->pr_val = sr->pr_val;
1325 /*
1326 * We could down-grade our write-lock on script->lock here but
1327 * the rwlock API doesn't offer atomic lock downgrading, so
1328 * we'll just keep the write-lock and release it later when
1329 * we're done using the script.
1330 */
1331 }
1332
1333 static inline void
script_emit(struct unw_script * script,struct unw_insn insn)1334 script_emit (struct unw_script *script, struct unw_insn insn)
1335 {
1336 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1337 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1338 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1339 return;
1340 }
1341 script->insn[script->count++] = insn;
1342 }
1343
1344 static inline void
emit_nat_info(struct unw_state_record * sr,int i,struct unw_script * script)1345 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1346 {
1347 struct unw_reg_info *r = sr->curr.reg + i;
1348 enum unw_insn_opcode opc;
1349 struct unw_insn insn;
1350 unsigned long val = 0;
1351
1352 switch (r->where) {
1353 case UNW_WHERE_GR:
1354 if (r->val >= 32) {
1355 /* register got spilled to a stacked register */
1356 opc = UNW_INSN_SETNAT_TYPE;
1357 val = UNW_NAT_REGSTK;
1358 } else
1359 /* register got spilled to a scratch register */
1360 opc = UNW_INSN_SETNAT_MEMSTK;
1361 break;
1362
1363 case UNW_WHERE_FR:
1364 opc = UNW_INSN_SETNAT_TYPE;
1365 val = UNW_NAT_VAL;
1366 break;
1367
1368 case UNW_WHERE_BR:
1369 opc = UNW_INSN_SETNAT_TYPE;
1370 val = UNW_NAT_NONE;
1371 break;
1372
1373 case UNW_WHERE_PSPREL:
1374 case UNW_WHERE_SPREL:
1375 opc = UNW_INSN_SETNAT_MEMSTK;
1376 break;
1377
1378 default:
1379 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1380 __FUNCTION__, r->where);
1381 return;
1382 }
1383 insn.opc = opc;
1384 insn.dst = unw.preg_index[i];
1385 insn.val = val;
1386 script_emit(script, insn);
1387 }
1388
1389 static void
compile_reg(struct unw_state_record * sr,int i,struct unw_script * script)1390 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1391 {
1392 struct unw_reg_info *r = sr->curr.reg + i;
1393 enum unw_insn_opcode opc;
1394 unsigned long val, rval;
1395 struct unw_insn insn;
1396 long need_nat_info;
1397
1398 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1399 return;
1400
1401 opc = UNW_INSN_MOVE;
1402 val = rval = r->val;
1403 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1404
1405 switch (r->where) {
1406 case UNW_WHERE_GR:
1407 if (rval >= 32) {
1408 opc = UNW_INSN_MOVE_STACKED;
1409 val = rval - 32;
1410 } else if (rval >= 4 && rval <= 7) {
1411 if (need_nat_info) {
1412 opc = UNW_INSN_MOVE2;
1413 need_nat_info = 0;
1414 }
1415 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1416 } else {
1417 /* register got spilled to a scratch register */
1418 opc = UNW_INSN_MOVE_SCRATCH;
1419 val = pt_regs_off(rval);
1420 }
1421 break;
1422
1423 case UNW_WHERE_FR:
1424 if (rval <= 5)
1425 val = unw.preg_index[UNW_REG_F2 + (rval - 2)];
1426 else if (rval >= 16 && rval <= 31)
1427 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1428 else {
1429 opc = UNW_INSN_MOVE_SCRATCH;
1430 if (rval <= 11)
1431 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1432 else
1433 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1434 __FUNCTION__, rval);
1435 }
1436 break;
1437
1438 case UNW_WHERE_BR:
1439 if (rval >= 1 && rval <= 5)
1440 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1441 else {
1442 opc = UNW_INSN_MOVE_SCRATCH;
1443 if (rval == 0)
1444 val = offsetof(struct pt_regs, b0);
1445 else if (rval == 6)
1446 val = offsetof(struct pt_regs, b6);
1447 else
1448 val = offsetof(struct pt_regs, b7);
1449 }
1450 break;
1451
1452 case UNW_WHERE_SPREL:
1453 opc = UNW_INSN_ADD_SP;
1454 break;
1455
1456 case UNW_WHERE_PSPREL:
1457 opc = UNW_INSN_ADD_PSP;
1458 break;
1459
1460 default:
1461 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1462 __FUNCTION__, i, r->where);
1463 break;
1464 }
1465 insn.opc = opc;
1466 insn.dst = unw.preg_index[i];
1467 insn.val = val;
1468 script_emit(script, insn);
1469 if (need_nat_info)
1470 emit_nat_info(sr, i, script);
1471
1472 if (i == UNW_REG_PSP) {
1473 /*
1474 * info->psp must contain the _value_ of the previous
1475 * sp, not it's save location. We get this by
1476 * dereferencing the value we just stored in
1477 * info->psp:
1478 */
1479 insn.opc = UNW_INSN_LOAD;
1480 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1481 script_emit(script, insn);
1482 }
1483 }
1484
1485 static inline const struct unw_table_entry *
lookup(struct unw_table * table,unsigned long rel_ip)1486 lookup (struct unw_table *table, unsigned long rel_ip)
1487 {
1488 const struct unw_table_entry *e = 0;
1489 unsigned long lo, hi, mid;
1490
1491 /* do a binary search for right entry: */
1492 for (lo = 0, hi = table->length; lo < hi; ) {
1493 mid = (lo + hi) / 2;
1494 e = &table->array[mid];
1495 if (rel_ip < e->start_offset)
1496 hi = mid;
1497 else if (rel_ip >= e->end_offset)
1498 lo = mid + 1;
1499 else
1500 break;
1501 }
1502 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1503 return NULL;
1504 return e;
1505 }
1506
1507 /*
1508 * Build an unwind script that unwinds from state OLD_STATE to the
1509 * entrypoint of the function that called OLD_STATE.
1510 */
1511 static inline struct unw_script *
build_script(struct unw_frame_info * info)1512 build_script (struct unw_frame_info *info)
1513 {
1514 const struct unw_table_entry *e = 0;
1515 struct unw_script *script = 0;
1516 struct unw_labeled_state *ls, *next;
1517 unsigned long ip = info->ip;
1518 struct unw_state_record sr;
1519 struct unw_table *table;
1520 struct unw_reg_info *r;
1521 struct unw_insn insn;
1522 u8 *dp, *desc_end;
1523 u64 hdr;
1524 int i;
1525 STAT(unsigned long start, parse_start;)
1526
1527 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1528
1529 /* build state record */
1530 memset(&sr, 0, sizeof(sr));
1531 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1532 r->when = UNW_WHEN_NEVER;
1533 sr.pr_val = info->pr;
1534
1535 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1536 script = script_new(ip);
1537 if (!script) {
1538 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1539 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1540 return 0;
1541 }
1542 unw.cache[info->prev_script].hint = script - unw.cache;
1543
1544 /* search the kernels and the modules' unwind tables for IP: */
1545
1546 STAT(parse_start = ia64_get_itc());
1547
1548 for (table = unw.tables; table; table = table->next) {
1549 if (ip >= table->start && ip < table->end) {
1550 e = lookup(table, ip - table->segment_base);
1551 break;
1552 }
1553 }
1554 if (!e) {
1555 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1556 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1557 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1558 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1559 sr.curr.reg[UNW_REG_RP].when = -1;
1560 sr.curr.reg[UNW_REG_RP].val = 0;
1561 compile_reg(&sr, UNW_REG_RP, script);
1562 script_finalize(script, &sr);
1563 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1564 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1565 return script;
1566 }
1567
1568 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1569 + (ip & 0xfUL));
1570 hdr = *(u64 *) (table->segment_base + e->info_offset);
1571 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1572 desc_end = dp + 8*UNW_LENGTH(hdr);
1573
1574 while (!sr.done && dp < desc_end)
1575 dp = unw_decode(dp, sr.in_body, &sr);
1576
1577 if (sr.when_target > sr.epilogue_start) {
1578 /*
1579 * sp has been restored and all values on the memory stack below
1580 * psp also have been restored.
1581 */
1582 sr.curr.reg[UNW_REG_PSP].val = 0;
1583 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1584 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1585 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1586 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1587 || r->where == UNW_WHERE_SPREL)
1588 {
1589 r->val = 0;
1590 r->where = UNW_WHERE_NONE;
1591 r->when = UNW_WHEN_NEVER;
1592 }
1593 }
1594
1595 script->flags = sr.flags;
1596
1597 /*
1598 * If RP did't get saved, generate entry for the return link
1599 * register.
1600 */
1601 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1602 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1603 sr.curr.reg[UNW_REG_RP].when = -1;
1604 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1605 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1606 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1607 sr.curr.reg[UNW_REG_RP].val);
1608 }
1609
1610 #ifdef UNW_DEBUG
1611 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1612 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1613 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1614 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1615 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1616 switch (r->where) {
1617 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1618 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1619 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1620 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1621 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1622 case UNW_WHERE_NONE:
1623 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1624 break;
1625
1626 default:
1627 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1628 break;
1629 }
1630 UNW_DPRINT(1, "\t\t%d\n", r->when);
1631 }
1632 }
1633 #endif
1634
1635 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1636
1637 /* translate state record into unwinder instructions: */
1638
1639 /*
1640 * First, set psp if we're dealing with a fixed-size frame;
1641 * subsequent instructions may depend on this value.
1642 */
1643 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1644 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1645 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1646 /* new psp is sp plus frame size */
1647 insn.opc = UNW_INSN_ADD;
1648 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1649 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1650 script_emit(script, insn);
1651 }
1652
1653 /* determine where the primary UNaT is: */
1654 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1655 i = UNW_REG_PRI_UNAT_MEM;
1656 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1657 i = UNW_REG_PRI_UNAT_GR;
1658 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1659 i = UNW_REG_PRI_UNAT_MEM;
1660 else
1661 i = UNW_REG_PRI_UNAT_GR;
1662
1663 compile_reg(&sr, i, script);
1664
1665 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1666 compile_reg(&sr, i, script);
1667
1668 /* free labeled register states & stack: */
1669
1670 STAT(parse_start = ia64_get_itc());
1671 for (ls = sr.labeled_states; ls; ls = next) {
1672 next = ls->next;
1673 free_state_stack(&ls->saved_state);
1674 free_labeled_state(ls);
1675 }
1676 free_state_stack(&sr.curr);
1677 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1678
1679 script_finalize(script, &sr);
1680 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1681 return script;
1682 }
1683
1684 /*
1685 * Apply the unwinding actions represented by OPS and update SR to
1686 * reflect the state that existed upon entry to the function that this
1687 * unwinder represents.
1688 */
1689 static inline void
run_script(struct unw_script * script,struct unw_frame_info * state)1690 run_script (struct unw_script *script, struct unw_frame_info *state)
1691 {
1692 struct unw_insn *ip, *limit, next_insn;
1693 unsigned long opc, dst, val, off;
1694 unsigned long *s = (unsigned long *) state;
1695 STAT(unsigned long start;)
1696
1697 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1698 state->flags = script->flags;
1699 ip = script->insn;
1700 limit = script->insn + script->count;
1701 next_insn = *ip;
1702
1703 while (ip++ < limit) {
1704 opc = next_insn.opc;
1705 dst = next_insn.dst;
1706 val = next_insn.val;
1707 next_insn = *ip;
1708
1709 redo:
1710 switch (opc) {
1711 case UNW_INSN_ADD:
1712 s[dst] += val;
1713 break;
1714
1715 case UNW_INSN_MOVE2:
1716 if (!s[val])
1717 goto lazy_init;
1718 s[dst+1] = s[val+1];
1719 s[dst] = s[val];
1720 break;
1721
1722 case UNW_INSN_MOVE:
1723 if (!s[val])
1724 goto lazy_init;
1725 s[dst] = s[val];
1726 break;
1727
1728 case UNW_INSN_MOVE_SCRATCH:
1729 if (state->pt) {
1730 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1731 } else {
1732 s[dst] = 0;
1733 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1734 __FUNCTION__, dst, val);
1735 }
1736 break;
1737
1738 case UNW_INSN_MOVE_STACKED:
1739 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1740 val);
1741 break;
1742
1743 case UNW_INSN_ADD_PSP:
1744 s[dst] = state->psp + val;
1745 break;
1746
1747 case UNW_INSN_ADD_SP:
1748 s[dst] = state->sp + val;
1749 break;
1750
1751 case UNW_INSN_SETNAT_MEMSTK:
1752 if (!state->pri_unat_loc)
1753 state->pri_unat_loc = &state->sw->ar_unat;
1754 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1755 s[dst+1] = ((unsigned long) state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1756 break;
1757
1758 case UNW_INSN_SETNAT_TYPE:
1759 s[dst+1] = val;
1760 break;
1761
1762 case UNW_INSN_LOAD:
1763 #ifdef UNW_DEBUG
1764 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1765 || s[val] < TASK_SIZE)
1766 {
1767 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1768 __FUNCTION__, s[val]);
1769 break;
1770 }
1771 #endif
1772 s[dst] = *(unsigned long *) s[val];
1773 break;
1774 }
1775 }
1776 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1777 return;
1778
1779 lazy_init:
1780 off = unw.sw_off[val];
1781 s[val] = (unsigned long) state->sw + off;
1782 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1783 /*
1784 * We're initializing a general register: init NaT info, too. Note that
1785 * the offset is a multiple of 8 which gives us the 3 bits needed for
1786 * the type field.
1787 */
1788 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1789 goto redo;
1790 }
1791
1792 static int
find_save_locs(struct unw_frame_info * info)1793 find_save_locs (struct unw_frame_info *info)
1794 {
1795 int have_write_lock = 0;
1796 struct unw_script *scr;
1797
1798 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1799 /* don't let obviously bad addresses pollute the cache */
1800 /* FIXME: should really be level 0 but it occurs too often. KAO */
1801 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1802 info->rp_loc = 0;
1803 return -1;
1804 }
1805
1806 scr = script_lookup(info);
1807 if (!scr) {
1808 scr = build_script(info);
1809 if (!scr) {
1810 UNW_DPRINT(0,
1811 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1812 __FUNCTION__, info->ip);
1813 return -1;
1814 }
1815 have_write_lock = 1;
1816 }
1817 info->hint = scr->hint;
1818 info->prev_script = scr - unw.cache;
1819
1820 run_script(scr, info);
1821
1822 if (have_write_lock)
1823 write_unlock(&scr->lock);
1824 else
1825 read_unlock(&scr->lock);
1826 return 0;
1827 }
1828
1829 int
unw_unwind(struct unw_frame_info * info)1830 unw_unwind (struct unw_frame_info *info)
1831 {
1832 unsigned long prev_ip, prev_sp, prev_bsp;
1833 unsigned long ip, pr, num_regs;
1834 STAT(unsigned long start, flags;)
1835 int retval;
1836
1837 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1838
1839 prev_ip = info->ip;
1840 prev_sp = info->sp;
1841 prev_bsp = info->bsp;
1842
1843 /* restore the ip */
1844 if (!info->rp_loc) {
1845 /* FIXME: should really be level 0 but it occurs too often. KAO */
1846 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1847 __FUNCTION__, info->ip);
1848 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1849 return -1;
1850 }
1851 ip = info->ip = *info->rp_loc;
1852 if (ip < GATE_ADDR + PAGE_SIZE) {
1853 /*
1854 * We don't have unwind info for the gate page, so we consider that part
1855 * of user-space for the purpose of unwinding.
1856 */
1857 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1858 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1859 return -1;
1860 }
1861
1862 /* restore the cfm: */
1863 if (!info->pfs_loc) {
1864 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1865 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1866 return -1;
1867 }
1868 info->cfm_loc = info->pfs_loc;
1869
1870 /* restore the bsp: */
1871 pr = info->pr;
1872 num_regs = 0;
1873 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1874 info->pt = info->sp + 16;
1875 if ((pr & (1UL << pNonSys)) != 0)
1876 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1877 info->pfs_loc =
1878 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1879 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1880 } else
1881 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1882 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1883 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1884 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1885 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1886 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1887 return -1;
1888 }
1889
1890 /* restore the sp: */
1891 info->sp = info->psp;
1892 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1893 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1894 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1895 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1896 return -1;
1897 }
1898
1899 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1900 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1901 __FUNCTION__, ip);
1902 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1903 return -1;
1904 }
1905
1906 /* as we unwind, the saved ar.unat becomes the primary unat: */
1907 info->pri_unat_loc = info->unat_loc;
1908
1909 /* finally, restore the predicates: */
1910 unw_get_pr(info, &info->pr);
1911
1912 retval = find_save_locs(info);
1913 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1914 return retval;
1915 }
1916 EXPORT_SYMBOL(unw_unwind);
1917
1918 int
unw_unwind_to_user(struct unw_frame_info * info)1919 unw_unwind_to_user (struct unw_frame_info *info)
1920 {
1921 unsigned long ip, sp, pr = 0;
1922
1923 while (unw_unwind(info) >= 0) {
1924 unw_get_sp(info, &sp);
1925 if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp)
1926 < IA64_PT_REGS_SIZE) {
1927 UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n",
1928 __FUNCTION__);
1929 break;
1930 }
1931 if (unw_is_intr_frame(info) &&
1932 (pr & (1UL << PRED_USER_STACK)))
1933 return 0;
1934 if (unw_get_pr (info, &pr) < 0) {
1935 unw_get_rp(info, &ip);
1936 UNW_DPRINT(0, "unwind.%s: failed to read "
1937 "predicate register (ip=0x%lx)\n",
1938 __FUNCTION__, ip);
1939 return -1;
1940 }
1941 }
1942 unw_get_ip(info, &ip);
1943 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n",
1944 __FUNCTION__, ip);
1945 return -1;
1946 }
1947 EXPORT_SYMBOL(unw_unwind_to_user);
1948
1949 static void
init_frame_info(struct unw_frame_info * info,struct task_struct * t,struct switch_stack * sw,unsigned long stktop)1950 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1951 struct switch_stack *sw, unsigned long stktop)
1952 {
1953 unsigned long rbslimit, rbstop, stklimit;
1954 STAT(unsigned long start, flags;)
1955
1956 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1957
1958 /*
1959 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1960 * don't want to do that because it would be slow as each preserved register would
1961 * have to be processed. Instead, what we do here is zero out the frame info and
1962 * start the unwind process at the function that created the switch_stack frame.
1963 * When a preserved value in switch_stack needs to be accessed, run_script() will
1964 * initialize the appropriate pointer on demand.
1965 */
1966 memset(info, 0, sizeof(*info));
1967
1968 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1969 rbstop = sw->ar_bspstore;
1970 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1971 rbstop = rbslimit;
1972
1973 stklimit = (unsigned long) t + IA64_STK_OFFSET;
1974 if (stktop <= rbstop)
1975 stktop = rbstop;
1976
1977 info->regstk.limit = rbslimit;
1978 info->regstk.top = rbstop;
1979 info->memstk.limit = stklimit;
1980 info->memstk.top = stktop;
1981 info->task = t;
1982 info->sw = sw;
1983 info->sp = info->psp = stktop;
1984 info->pr = sw->pr;
1985 UNW_DPRINT(3, "unwind.%s:\n"
1986 " task 0x%lx\n"
1987 " rbs = [0x%lx-0x%lx)\n"
1988 " stk = [0x%lx-0x%lx)\n"
1989 " pr 0x%lx\n"
1990 " sw 0x%lx\n"
1991 " sp 0x%lx\n",
1992 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
1993 info->pr, (unsigned long) info->sw, info->sp);
1994 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
1995 }
1996
1997 void
unw_init_from_interruption(struct unw_frame_info * info,struct task_struct * t,struct pt_regs * pt,struct switch_stack * sw)1998 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
1999 struct pt_regs *pt, struct switch_stack *sw)
2000 {
2001 unsigned long sof;
2002
2003 init_frame_info(info, t, sw, pt->r12);
2004 info->cfm_loc = &pt->cr_ifs;
2005 info->unat_loc = &pt->ar_unat;
2006 info->pfs_loc = &pt->ar_pfs;
2007 sof = *info->cfm_loc & 0x7f;
2008 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
2009 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
2010 info->pt = (unsigned long) pt;
2011 UNW_DPRINT(3, "unwind.%s:\n"
2012 " bsp 0x%lx\n"
2013 " sof 0x%lx\n"
2014 " ip 0x%lx\n",
2015 __FUNCTION__, info->bsp, sof, info->ip);
2016 find_save_locs(info);
2017 }
2018
2019 void
unw_init_frame_info(struct unw_frame_info * info,struct task_struct * t,struct switch_stack * sw)2020 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2021 {
2022 unsigned long sol;
2023
2024 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2025 info->cfm_loc = &sw->ar_pfs;
2026 sol = (*info->cfm_loc >> 7) & 0x7f;
2027 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2028 info->ip = sw->b0;
2029 UNW_DPRINT(3, "unwind.%s:\n"
2030 " bsp 0x%lx\n"
2031 " sol 0x%lx\n"
2032 " ip 0x%lx\n",
2033 __FUNCTION__, info->bsp, sol, info->ip);
2034 find_save_locs(info);
2035 }
2036
2037 void
unw_init_from_blocked_task(struct unw_frame_info * info,struct task_struct * t)2038 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2039 {
2040 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2041
2042 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2043 unw_init_frame_info(info, t, sw);
2044 }
2045 EXPORT_SYMBOL(unw_init_from_blocked_task);
2046
2047 static void
init_unwind_table(struct unw_table * table,const char * name,unsigned long segment_base,unsigned long gp,const void * table_start,const void * table_end)2048 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2049 unsigned long gp, const void *table_start, const void *table_end)
2050 {
2051 const struct unw_table_entry *start = table_start, *end = table_end;
2052
2053 table->name = name;
2054 table->segment_base = segment_base;
2055 table->gp = gp;
2056 table->start = segment_base + start[0].start_offset;
2057 table->end = segment_base + end[-1].end_offset;
2058 table->array = start;
2059 table->length = end - start;
2060 }
2061
2062 void *
unw_add_unwind_table(const char * name,unsigned long segment_base,unsigned long gp,const void * table_start,const void * table_end)2063 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2064 const void *table_start, const void *table_end)
2065 {
2066 const struct unw_table_entry *start = table_start, *end = table_end;
2067 struct unw_table *table;
2068 unsigned long flags;
2069
2070 if (end - start <= 0) {
2071 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2072 __FUNCTION__);
2073 return 0;
2074 }
2075
2076 table = kmalloc(sizeof(*table), GFP_USER);
2077 if (!table)
2078 return 0;
2079
2080 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2081
2082 spin_lock_irqsave(&unw.lock, flags);
2083 {
2084 /* keep kernel unwind table at the front (it's searched most commonly): */
2085 table->next = unw.tables->next;
2086 unw.tables->next = table;
2087 }
2088 spin_unlock_irqrestore(&unw.lock, flags);
2089
2090 return table;
2091 }
2092
2093 void
unw_remove_unwind_table(void * handle)2094 unw_remove_unwind_table (void *handle)
2095 {
2096 struct unw_table *table, *prev;
2097 struct unw_script *tmp;
2098 unsigned long flags;
2099 long index;
2100
2101 if (!handle) {
2102 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2103 __FUNCTION__);
2104 return;
2105 }
2106
2107 table = handle;
2108 if (table == &unw.kernel_table) {
2109 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2110 "no-can-do!\n", __FUNCTION__);
2111 return;
2112 }
2113
2114 spin_lock_irqsave(&unw.lock, flags);
2115 {
2116 /* first, delete the table: */
2117
2118 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2119 if (prev->next == table)
2120 break;
2121 if (!prev) {
2122 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2123 __FUNCTION__, (void *) table);
2124 spin_unlock_irqrestore(&unw.lock, flags);
2125 return;
2126 }
2127 prev->next = table->next;
2128 }
2129 spin_unlock_irqrestore(&unw.lock, flags);
2130
2131 /* next, remove hash table entries for this table */
2132
2133 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2134 tmp = unw.cache + unw.hash[index];
2135 if (unw.hash[index] >= UNW_CACHE_SIZE
2136 || tmp->ip < table->start || tmp->ip >= table->end)
2137 continue;
2138
2139 write_lock(&tmp->lock);
2140 {
2141 if (tmp->ip >= table->start && tmp->ip < table->end) {
2142 unw.hash[index] = tmp->coll_chain;
2143 tmp->ip = 0;
2144 }
2145 }
2146 write_unlock(&tmp->lock);
2147 }
2148
2149 kfree(table);
2150 }
2151
2152 void
unw_create_gate_table(void)2153 unw_create_gate_table (void)
2154 {
2155 extern char __start_gate_section[], __stop_gate_section[];
2156 unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base;
2157 const struct unw_table_entry *entry, *first, *unw_table_end;
2158 extern int ia64_unw_end;
2159 size_t info_size, size;
2160 char *info;
2161
2162 start = (unsigned long) __start_gate_section - segbase;
2163 end = (unsigned long) __stop_gate_section - segbase;
2164 unw_table_end = (struct unw_table_entry *) &ia64_unw_end;
2165 size = 0;
2166 first = lookup(&unw.kernel_table, start);
2167
2168 for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry)
2169 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2170 size += 8; /* reserve space for "end of table" marker */
2171
2172 unw.gate_table = alloc_bootmem(size);
2173 if (!unw.gate_table) {
2174 unw.gate_table_size = 0;
2175 printk(KERN_ERR "unwind: unable to create unwind data for gate page!\n");
2176 return;
2177 }
2178 unw.gate_table_size = size;
2179
2180 lp = unw.gate_table;
2181 info = (char *) unw.gate_table + size;
2182
2183 for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry, lp += 3) {
2184 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2185 info -= info_size;
2186 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2187
2188 lp[0] = entry->start_offset - start + GATE_ADDR; /* start */
2189 lp[1] = entry->end_offset - start + GATE_ADDR; /* end */
2190 lp[2] = info - (char *) unw.gate_table; /* info */
2191 }
2192 *lp = 0; /* end-of-table marker */
2193 }
2194
2195 void
unw_init(void)2196 unw_init (void)
2197 {
2198 extern int ia64_unw_start, ia64_unw_end, __gp;
2199 extern void unw_hash_index_t_is_too_narrow (void);
2200 long i, off;
2201
2202 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2203 unw_hash_index_t_is_too_narrow();
2204
2205 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2206 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2207 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2208 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2209 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2210 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2211 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2212 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2213 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2214 unw.sw_off[unw.preg_index[i]] = off;
2215 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2216 unw.sw_off[unw.preg_index[i]] = off;
2217 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2218 unw.sw_off[unw.preg_index[i]] = off;
2219 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2220 unw.sw_off[unw.preg_index[i]] = off;
2221
2222 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2223 if (i > 0)
2224 unw.cache[i].lru_chain = (i - 1);
2225 unw.cache[i].coll_chain = -1;
2226 unw.cache[i].lock = RW_LOCK_UNLOCKED;
2227 }
2228 unw.lru_head = UNW_CACHE_SIZE - 1;
2229 unw.lru_tail = 0;
2230
2231 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
2232 &ia64_unw_start, &ia64_unw_end);
2233 }
2234
2235 /*
2236 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2237 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2238 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2239 * unwind data.
2240 *
2241 * The first portion of the unwind data contains an unwind table and rest contains the
2242 * associated unwind info (in no particular order). The unwind table consists of a table
2243 * of entries of the form:
2244 *
2245 * u64 start; (64-bit address of start of function)
2246 * u64 end; (64-bit address of start of function)
2247 * u64 info; (BUF-relative offset to unwind info)
2248 *
2249 * The end of the unwind table is indicated by an entry with a START address of zero.
2250 *
2251 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2252 * on the format of the unwind info.
2253 *
2254 * ERRORS
2255 * EFAULT BUF points outside your accessible address space.
2256 */
2257 asmlinkage long
sys_getunwind(void * buf,size_t buf_size)2258 sys_getunwind (void *buf, size_t buf_size)
2259 {
2260 if (buf && buf_size >= unw.gate_table_size)
2261 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2262 return -EFAULT;
2263 return unw.gate_table_size;
2264 }
2265