1 /*
2 * Kernel support for the ptrace() and syscall tracing interfaces.
3 *
4 * Copyright (C) 1999-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * Derived from the x86 and Alpha versions. Most of the code in here
8 * could actually be factored into a common set of routines.
9 */
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/mm.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/smp_lock.h>
18 #include <linux/user.h>
19
20 #include <asm/pgtable.h>
21 #include <asm/processor.h>
22 #include <asm/ptrace_offsets.h>
23 #include <asm/rse.h>
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/unwind.h>
27 #ifdef CONFIG_PERFMON
28 #include <asm/perfmon.h>
29 #endif
30
31 #define offsetof(type,field) ((unsigned long) &((type *) 0)->field)
32
33 /*
34 * Bits in the PSR that we allow ptrace() to change:
35 * be, up, ac, mfl, mfh (the user mask; five bits total)
36 * db (debug breakpoint fault; one bit)
37 * id (instruction debug fault disable; one bit)
38 * dd (data debug fault disable; one bit)
39 * ri (restart instruction; two bits)
40 * is (instruction set; one bit)
41 */
42 #define IPSR_WRITE_MASK \
43 (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
44 #define IPSR_READ_MASK IPSR_WRITE_MASK
45
46 #define PTRACE_DEBUG 1
47
48 #if PTRACE_DEBUG
49 # define dprintk(format...) printk(format)
50 # define inline
51 #else
52 # define dprintk(format...)
53 #endif
54
55 /*
56 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
57 * bitset where bit i is set iff the NaT bit of register i is set.
58 */
59 unsigned long
ia64_get_scratch_nat_bits(struct pt_regs * pt,unsigned long scratch_unat)60 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
61 {
62 # define GET_BITS(first, last, unat) \
63 ({ \
64 unsigned long bit = ia64_unat_pos(&pt->r##first); \
65 unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
66 unsigned long dist; \
67 if (bit < first) \
68 dist = 64 + bit - first; \
69 else \
70 dist = bit - first; \
71 ia64_rotr(unat, dist) & mask; \
72 })
73 unsigned long val;
74
75 /*
76 * Registers that are stored consecutively in struct pt_regs can be handled in
77 * parallel. If the register order in struct_pt_regs changes, this code MUST be
78 * updated.
79 */
80 val = GET_BITS( 1, 1, scratch_unat);
81 val |= GET_BITS( 2, 3, scratch_unat);
82 val |= GET_BITS(12, 13, scratch_unat);
83 val |= GET_BITS(14, 14, scratch_unat);
84 val |= GET_BITS(15, 15, scratch_unat);
85 val |= GET_BITS( 8, 11, scratch_unat);
86 val |= GET_BITS(16, 31, scratch_unat);
87 return val;
88
89 # undef GET_BITS
90 }
91
92 /*
93 * Set the NaT bits for the scratch registers according to NAT and
94 * return the resulting unat (assuming the scratch registers are
95 * stored in PT).
96 */
97 unsigned long
ia64_put_scratch_nat_bits(struct pt_regs * pt,unsigned long nat)98 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
99 {
100 # define PUT_BITS(first, last, nat) \
101 ({ \
102 unsigned long bit = ia64_unat_pos(&pt->r##first); \
103 unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
104 long dist; \
105 if (bit < first) \
106 dist = 64 + bit - first; \
107 else \
108 dist = bit - first; \
109 ia64_rotl(nat & mask, dist); \
110 })
111 unsigned long scratch_unat;
112
113 /*
114 * Registers that are stored consecutively in struct pt_regs can be handled in
115 * parallel. If the register order in struct_pt_regs changes, this code MUST be
116 * updated.
117 */
118 scratch_unat = PUT_BITS( 1, 1, nat);
119 scratch_unat |= PUT_BITS( 2, 3, nat);
120 scratch_unat |= PUT_BITS(12, 13, nat);
121 scratch_unat |= PUT_BITS(14, 14, nat);
122 scratch_unat |= PUT_BITS(15, 15, nat);
123 scratch_unat |= PUT_BITS( 8, 11, nat);
124 scratch_unat |= PUT_BITS(16, 31, nat);
125
126 return scratch_unat;
127
128 # undef PUT_BITS
129 }
130
131 #define IA64_MLX_TEMPLATE 0x2
132 #define IA64_MOVL_OPCODE 6
133
134 void
ia64_increment_ip(struct pt_regs * regs)135 ia64_increment_ip (struct pt_regs *regs)
136 {
137 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
138
139 if (ri > 2) {
140 ri = 0;
141 regs->cr_iip += 16;
142 } else if (ri == 2) {
143 get_user(w0, (char *) regs->cr_iip + 0);
144 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
145 /*
146 * rfi'ing to slot 2 of an MLX bundle causes
147 * an illegal operation fault. We don't want
148 * that to happen...
149 */
150 ri = 0;
151 regs->cr_iip += 16;
152 }
153 }
154 ia64_psr(regs)->ri = ri;
155 }
156
157 void
ia64_decrement_ip(struct pt_regs * regs)158 ia64_decrement_ip (struct pt_regs *regs)
159 {
160 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
161
162 if (ia64_psr(regs)->ri == 0) {
163 regs->cr_iip -= 16;
164 ri = 2;
165 get_user(w0, (char *) regs->cr_iip + 0);
166 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
167 /*
168 * rfi'ing to slot 2 of an MLX bundle causes
169 * an illegal operation fault. We don't want
170 * that to happen...
171 */
172 ri = 1;
173 }
174 }
175 ia64_psr(regs)->ri = ri;
176 }
177
178 /*
179 * This routine is used to read an rnat bits that are stored on the kernel backing store.
180 * Since, in general, the alignment of the user and kernel are different, this is not
181 * completely trivial. In essence, we need to construct the user RNAT based on up to two
182 * kernel RNAT values and/or the RNAT value saved in the child's pt_regs.
183 *
184 * user rbs
185 *
186 * +--------+ <-- lowest address
187 * | slot62 |
188 * +--------+
189 * | rnat | 0x....1f8
190 * +--------+
191 * | slot00 | \
192 * +--------+ |
193 * | slot01 | > child_regs->ar_rnat
194 * +--------+ |
195 * | slot02 | / kernel rbs
196 * +--------+ +--------+
197 * <- child_regs->ar_bspstore | slot61 | <-- krbs
198 * +- - - - + +--------+
199 * | slot62 |
200 * +- - - - + +--------+
201 * | rnat |
202 * +- - - - + +--------+
203 * vrnat | slot00 |
204 * +- - - - + +--------+
205 * = =
206 * +--------+
207 * | slot00 | \
208 * +--------+ |
209 * | slot01 | > child_stack->ar_rnat
210 * +--------+ |
211 * | slot02 | /
212 * +--------+
213 * <--- child_stack->ar_bspstore
214 *
215 * The way to think of this code is as follows: bit 0 in the user rnat corresponds to some
216 * bit N (0 <= N <= 62) in one of the kernel rnat value. The kernel rnat value holding
217 * this bit is stored in variable rnat0. rnat1 is loaded with the kernel rnat value that
218 * form the upper bits of the user rnat value.
219 *
220 * Boundary cases:
221 *
222 * o when reading the rnat "below" the first rnat slot on the kernel backing store,
223 * rnat0/rnat1 are set to 0 and the low order bits are merged in from pt->ar_rnat.
224 *
225 * o when reading the rnat "above" the last rnat slot on the kernel backing store,
226 * rnat0/rnat1 gets its value from sw->ar_rnat.
227 */
228 static unsigned long
get_rnat(struct task_struct * task,struct switch_stack * sw,unsigned long * krbs,unsigned long * urnat_addr,unsigned long * urbs_end)229 get_rnat (struct task_struct *task, struct switch_stack *sw,
230 unsigned long *krbs, unsigned long *urnat_addr, unsigned long *urbs_end)
231 {
232 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0, mask, m;
233 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
234 long num_regs, nbits;
235 struct pt_regs *pt;
236
237 pt = ia64_task_regs(task);
238 kbsp = (unsigned long *) sw->ar_bspstore;
239 ubspstore = (unsigned long *) pt->ar_bspstore;
240
241 if (urbs_end < urnat_addr)
242 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_end);
243 else
244 nbits = 63;
245 mask = (1UL << nbits) - 1;
246 /*
247 * First, figure out which bit number slot 0 in user-land maps to in the kernel
248 * rnat. Do this by figuring out how many register slots we're beyond the user's
249 * backingstore and then computing the equivalent address in kernel space.
250 */
251 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
252 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
253 shift = ia64_rse_slot_num(slot0_kaddr);
254 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
255 rnat0_kaddr = rnat1_kaddr - 64;
256
257 if (ubspstore + 63 > urnat_addr) {
258 /* some bits need to be merged in from pt->ar_rnat */
259 umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask;
260 urnat = (pt->ar_rnat & umask);
261 mask &= ~umask;
262 if (!mask)
263 return urnat;
264 }
265
266 m = mask << shift;
267 if (rnat0_kaddr >= kbsp)
268 rnat0 = sw->ar_rnat;
269 else if (rnat0_kaddr > krbs)
270 rnat0 = *rnat0_kaddr;
271 urnat |= (rnat0 & m) >> shift;
272
273 m = mask >> (63 - shift);
274 if (rnat1_kaddr >= kbsp)
275 rnat1 = sw->ar_rnat;
276 else if (rnat1_kaddr > krbs)
277 rnat1 = *rnat1_kaddr;
278 urnat |= (rnat1 & m) << (63 - shift);
279 return urnat;
280 }
281
282 /*
283 * The reverse of get_rnat.
284 */
285 static void
put_rnat(struct task_struct * task,struct switch_stack * sw,unsigned long * krbs,unsigned long * urnat_addr,unsigned long urnat,unsigned long * urbs_end)286 put_rnat (struct task_struct *task, struct switch_stack *sw,
287 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat,
288 unsigned long *urbs_end)
289 {
290 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
291 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
292 long num_regs, nbits;
293 struct pt_regs *pt;
294 unsigned long cfm, *urbs_kargs;
295 struct unw_frame_info info;
296
297 pt = ia64_task_regs(task);
298 kbsp = (unsigned long *) sw->ar_bspstore;
299 ubspstore = (unsigned long *) pt->ar_bspstore;
300
301 urbs_kargs = urbs_end;
302 if ((long)pt->cr_ifs >= 0) {
303 /*
304 * If entered via syscall, don't allow user to set rnat bits
305 * for syscall args.
306 */
307 unw_init_from_blocked_task(&info,task);
308 if (unw_unwind_to_user(&info) == 0) {
309 unw_get_cfm(&info,&cfm);
310 urbs_kargs = ia64_rse_skip_regs(urbs_end,-(cfm & 0x7f));
311 }
312 }
313
314 if (urbs_kargs >= urnat_addr)
315 nbits = 63;
316 else {
317 if ((urnat_addr - 63) >= urbs_kargs)
318 return;
319 nbits = ia64_rse_num_regs(urnat_addr - 63, urbs_kargs);
320 }
321 mask = (1UL << nbits) - 1;
322
323 /*
324 * First, figure out which bit number slot 0 in user-land maps to in the kernel
325 * rnat. Do this by figuring out how many register slots we're beyond the user's
326 * backingstore and then computing the equivalent address in kernel space.
327 */
328 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
329 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
330 shift = ia64_rse_slot_num(slot0_kaddr);
331 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
332 rnat0_kaddr = rnat1_kaddr - 64;
333
334 if (ubspstore + 63 > urnat_addr) {
335 /* some bits need to be place in pt->ar_rnat: */
336 umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1) & mask;
337 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
338 mask &= ~umask;
339 if (!mask)
340 return;
341 }
342 /*
343 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
344 * rnat slot is ignored. so we don't have to clear it here.
345 */
346 rnat0 = (urnat << shift);
347 m = mask << shift;
348 if (rnat0_kaddr >= kbsp)
349 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
350 else if (rnat0_kaddr > krbs)
351 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
352
353 rnat1 = (urnat >> (63 - shift));
354 m = mask >> (63 - shift);
355 if (rnat1_kaddr >= kbsp)
356 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
357 else if (rnat1_kaddr > krbs)
358 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
359 }
360
361 /*
362 * Read a word from the user-level backing store of task CHILD. ADDR is the user-level
363 * address to read the word from, VAL a pointer to the return value, and USER_BSP gives
364 * the end of the user-level backing store (i.e., it's the address that would be in ar.bsp
365 * after the user executed a "cover" instruction).
366 *
367 * This routine takes care of accessing the kernel register backing store for those
368 * registers that got spilled there. It also takes care of calculating the appropriate
369 * RNaT collection words.
370 */
371 long
ia64_peek(struct task_struct * child,struct switch_stack * child_stack,unsigned long user_rbs_end,unsigned long addr,long * val)372 ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end,
373 unsigned long addr, long *val)
374 {
375 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
376 struct pt_regs *child_regs;
377 size_t copied;
378 long ret;
379
380 urbs_end = (long *) user_rbs_end;
381 laddr = (unsigned long *) addr;
382 child_regs = ia64_task_regs(child);
383 bspstore = (unsigned long *) child_regs->ar_bspstore;
384 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
385 if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) {
386 /*
387 * Attempt to read the RBS in an area that's actually on the kernel RBS =>
388 * read the corresponding bits in the kernel RBS.
389 */
390 rnat_addr = ia64_rse_rnat_addr(laddr);
391 ret = get_rnat(child, child_stack, krbs, rnat_addr, urbs_end);
392
393 if (laddr == rnat_addr) {
394 /* return NaT collection word itself */
395 *val = ret;
396 return 0;
397 }
398
399 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
400 /*
401 * It is implementation dependent whether the data portion of a
402 * NaT value gets saved on a st8.spill or RSE spill (e.g., see
403 * EAS 2.6, 4.4.4.6 Register Spill and Fill). To get consistent
404 * behavior across all possible IA-64 implementations, we return
405 * zero in this case.
406 */
407 *val = 0;
408 return 0;
409 }
410
411 if (laddr < urbs_end) {
412 /* the desired word is on the kernel RBS and is not a NaT */
413 regnum = ia64_rse_num_regs(bspstore, laddr);
414 *val = *ia64_rse_skip_regs(krbs, regnum);
415 return 0;
416 }
417 }
418 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
419 if (copied != sizeof(ret))
420 return -EIO;
421 *val = ret;
422 return 0;
423 }
424
425 long
ia64_poke(struct task_struct * child,struct switch_stack * child_stack,unsigned long user_rbs_end,unsigned long addr,long val)426 ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end,
427 unsigned long addr, long val)
428 {
429 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end = (long *) user_rbs_end;
430 struct pt_regs *child_regs;
431
432 laddr = (unsigned long *) addr;
433 child_regs = ia64_task_regs(child);
434 bspstore = (unsigned long *) child_regs->ar_bspstore;
435 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
436 if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) {
437 /*
438 * Attempt to write the RBS in an area that's actually on the kernel RBS
439 * => write the corresponding bits in the kernel RBS.
440 */
441 if (ia64_rse_is_rnat_slot(laddr))
442 put_rnat(child, child_stack, krbs, laddr, val, urbs_end);
443 else {
444 if (laddr < urbs_end) {
445 regnum = ia64_rse_num_regs(bspstore, laddr);
446 *ia64_rse_skip_regs(krbs, regnum) = val;
447 }
448 }
449 } else if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) {
450 return -EIO;
451 }
452 return 0;
453 }
454
455 /*
456 * Calculate the address of the end of the user-level register backing store. This is the
457 * address that would have been stored in ar.bsp if the user had executed a "cover"
458 * instruction right before entering the kernel. If CFMP is not NULL, it is used to
459 * return the "current frame mask" that was active at the time the kernel was entered.
460 */
461 unsigned long
ia64_get_user_rbs_end(struct task_struct * child,struct pt_regs * pt,unsigned long * cfmp)462 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned long *cfmp)
463 {
464 unsigned long *krbs, *bspstore, cfm;
465 struct unw_frame_info info;
466 long ndirty;
467
468 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
469 bspstore = (unsigned long *) pt->ar_bspstore;
470 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
471 cfm = pt->cr_ifs & ~(1UL << 63);
472
473 if ((long) pt->cr_ifs >= 0) {
474 /*
475 * If bit 63 of cr.ifs is cleared, the kernel was entered via a system
476 * call and we need to recover the CFM that existed on entry to the
477 * kernel by unwinding the kernel stack.
478 */
479 unw_init_from_blocked_task(&info, child);
480 if (unw_unwind_to_user(&info) == 0) {
481 unw_get_cfm(&info, &cfm);
482 ndirty += (cfm & 0x7f);
483 }
484 }
485 if (cfmp)
486 *cfmp = cfm;
487 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
488 }
489
490 /*
491 * Synchronize (i.e, write) the RSE backing store living in kernel space to the VM of the
492 * CHILD task. SW and PT are the pointers to the switch_stack and pt_regs structures,
493 * respectively. USER_RBS_END is the user-level address at which the backing store ends.
494 */
495 long
ia64_sync_user_rbs(struct task_struct * child,struct switch_stack * sw,unsigned long user_rbs_start,unsigned long user_rbs_end)496 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
497 unsigned long user_rbs_start, unsigned long user_rbs_end)
498 {
499 unsigned long addr, val;
500 long ret;
501
502 /* now copy word for word from kernel rbs to user rbs: */
503 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
504 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
505 if (ret < 0)
506 return ret;
507 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
508 return -EIO;
509 }
510 return 0;
511 }
512
513 /*
514 * Simulate user-level "flushrs". Note: we can't just add pt->loadrs>>16 to
515 * pt->ar_bspstore because the kernel backing store and the user-level backing store may
516 * have different alignments (and therefore a different number of intervening rnat slots).
517 */
518 static void
user_flushrs(struct task_struct * task,struct pt_regs * pt)519 user_flushrs (struct task_struct *task, struct pt_regs *pt)
520 {
521 unsigned long *krbs;
522 long ndirty;
523
524 krbs = (unsigned long *) task + IA64_RBS_OFFSET/8;
525 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
526
527 pt->ar_bspstore = (unsigned long) ia64_rse_skip_regs((unsigned long *) pt->ar_bspstore,
528 ndirty);
529 pt->loadrs = 0;
530 }
531
532 static inline void
sync_user_rbs_one_thread(struct task_struct * p,int make_writable)533 sync_user_rbs_one_thread (struct task_struct *p, int make_writable)
534 {
535 struct switch_stack *sw;
536 unsigned long urbs_end;
537 struct pt_regs *pt;
538
539 sw = (struct switch_stack *) (p->thread.ksp + 16);
540 pt = ia64_task_regs(p);
541 urbs_end = ia64_get_user_rbs_end(p, pt, NULL);
542 ia64_sync_user_rbs(p, sw, pt->ar_bspstore, urbs_end);
543 if (make_writable)
544 user_flushrs(p, pt);
545 }
546
547 struct task_list {
548 struct task_list *next;
549 struct task_struct *task;
550 };
551
552 #ifdef CONFIG_SMP
553
554 static inline void
collect_task(struct task_list ** listp,struct task_struct * p,int make_writable)555 collect_task (struct task_list **listp, struct task_struct *p, int make_writable)
556 {
557 struct task_list *e;
558
559 e = kmalloc(sizeof(*e), GFP_KERNEL);
560 if (!e)
561 /* oops, can't collect more: finish at least what we collected so far... */
562 return;
563
564 get_task_struct(p);
565 e->task = p;
566 e->next = *listp;
567 *listp = e;
568 }
569
570 static inline struct task_list *
finish_task(struct task_list * list,int make_writable)571 finish_task (struct task_list *list, int make_writable)
572 {
573 struct task_list *next = list->next;
574
575 sync_user_rbs_one_thread(list->task, make_writable);
576 free_task_struct(list->task);
577 kfree(list);
578 return next;
579 }
580
581 #else
582 # define collect_task(list, p, make_writable) sync_user_rbs_one_thread(p, make_writable)
583 # define finish_task(list, make_writable) (NULL)
584 #endif
585
586 /*
587 * Synchronize the RSE backing store of CHILD and all tasks that share the address space
588 * with it. CHILD_URBS_END is the address of the end of the register backing store of
589 * CHILD. If MAKE_WRITABLE is set, a user-level "flushrs" is simulated such that the VM
590 * can be written via ptrace() and the tasks will pick up the newly written values. It
591 * would be OK to unconditionally simulate a "flushrs", but this would be more intrusive
592 * than strictly necessary (e.g., it would make it impossible to obtain the original value
593 * of ar.bspstore).
594 */
595 static void
threads_sync_user_rbs(struct task_struct * child,unsigned long child_urbs_end,int make_writable)596 threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, int make_writable)
597 {
598 struct switch_stack *sw;
599 struct task_struct *p;
600 struct mm_struct *mm;
601 struct pt_regs *pt;
602 long multi_threaded;
603
604 task_lock(child);
605 {
606 mm = child->mm;
607 multi_threaded = mm && (atomic_read(&mm->mm_users) > 1);
608 }
609 task_unlock(child);
610
611 if (!multi_threaded) {
612 sw = (struct switch_stack *) (child->thread.ksp + 16);
613 pt = ia64_task_regs(child);
614 ia64_sync_user_rbs(child, sw, pt->ar_bspstore, child_urbs_end);
615 if (make_writable)
616 user_flushrs(child, pt);
617 } else {
618 /*
619 * Note: we can't call ia64_sync_user_rbs() while holding the
620 * tasklist_lock because that may cause a dead-lock: ia64_sync_user_rbs()
621 * may indirectly call tlb_flush_all(), which triggers an IPI.
622 * Furthermore, tasklist_lock is acquired by fork() with interrupts
623 * disabled, so with the right timing, the IPI never completes, hence
624 * tasklist_lock never gets released, hence fork() never completes...
625 */
626 struct task_list *list = NULL;
627
628 read_lock(&tasklist_lock);
629 {
630 for_each_task(p) {
631 if (p->mm == mm && p->state != TASK_RUNNING)
632 collect_task(&list, p, make_writable);
633 }
634 }
635 read_unlock(&tasklist_lock);
636
637 while (list)
638 list = finish_task(list, make_writable);
639 }
640 child->thread.flags |= IA64_THREAD_KRBS_SYNCED; /* set the flag in the child thread only */
641 }
642
643 /*
644 * Write f32-f127 back to task->thread.fph if it has been modified.
645 */
646 inline void
ia64_flush_fph(struct task_struct * task)647 ia64_flush_fph (struct task_struct *task)
648 {
649 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
650
651 if (ia64_is_local_fpu_owner(task) && psr->mfh) {
652 psr->mfh = 0;
653 task->thread.flags |= IA64_THREAD_FPH_VALID;
654 ia64_save_fpu(&task->thread.fph[0]);
655 }
656 }
657
658 /*
659 * Sync the fph state of the task so that it can be manipulated
660 * through thread.fph. If necessary, f32-f127 are written back to
661 * thread.fph or, if the fph state hasn't been used before, thread.fph
662 * is cleared to zeroes. Also, access to f32-f127 is disabled to
663 * ensure that the task picks up the state from thread.fph when it
664 * executes again.
665 */
666 void
ia64_sync_fph(struct task_struct * task)667 ia64_sync_fph (struct task_struct *task)
668 {
669 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
670
671 ia64_flush_fph(task);
672 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
673 task->thread.flags |= IA64_THREAD_FPH_VALID;
674 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
675 }
676 ia64_drop_fpu(task);
677 psr->dfh = 1;
678 }
679
680 static int
access_fr(struct unw_frame_info * info,int regnum,int hi,unsigned long * data,int write_access)681 access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access)
682 {
683 struct ia64_fpreg fpval;
684 int ret;
685
686 ret = unw_get_fr(info, regnum, &fpval);
687 if (ret < 0)
688 return ret;
689
690 if (write_access) {
691 fpval.u.bits[hi] = *data;
692 ret = unw_set_fr(info, regnum, fpval);
693 } else
694 *data = fpval.u.bits[hi];
695 return ret;
696 }
697
698 static int
access_uarea(struct task_struct * child,unsigned long addr,unsigned long * data,int write_access)699 access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
700 {
701 unsigned long *ptr, regnum, urbs_end, rnat_addr;
702 struct switch_stack *sw;
703 struct unw_frame_info info;
704 struct pt_regs *pt;
705
706 pt = ia64_task_regs(child);
707 sw = (struct switch_stack *) (child->thread.ksp + 16);
708
709 if ((addr & 0x7) != 0) {
710 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
711 return -1;
712 }
713
714 if (addr < PT_F127 + 16) {
715 /* accessing fph */
716 if (write_access)
717 ia64_sync_fph(child);
718 else
719 ia64_flush_fph(child);
720 ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
721 } else if ((addr >= PT_F10) && (addr < PT_F11 + 16)) {
722 /* scratch registers untouched by kernel (saved in pt_regs) */
723 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f10) + addr - PT_F10);
724 } else if (addr >= PT_F12 && addr < PT_F15 + 16) {
725 /* scratch registers untouched by kernel (saved in switch_stack) */
726 ptr = (unsigned long *) ((long) sw + (addr - PT_NAT_BITS - 32));
727 } else if (addr < PT_AR_LC + 8) {
728 /* preserved state: */
729 unsigned long nat_bits, scratch_unat, dummy = 0;
730 struct unw_frame_info info;
731 char nat = 0;
732 int ret;
733
734 unw_init_from_blocked_task(&info, child);
735 if (unw_unwind_to_user(&info) < 0)
736 return -1;
737
738 switch (addr) {
739 case PT_NAT_BITS:
740 if (write_access) {
741 nat_bits = *data;
742 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
743 if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) {
744 dprintk("ptrace: failed to set ar.unat\n");
745 return -1;
746 }
747 for (regnum = 4; regnum <= 7; ++regnum) {
748 unw_get_gr(&info, regnum, &dummy, &nat);
749 unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1);
750 }
751 } else {
752 if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) {
753 dprintk("ptrace: failed to read ar.unat\n");
754 return -1;
755 }
756 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
757 for (regnum = 4; regnum <= 7; ++regnum) {
758 unw_get_gr(&info, regnum, &dummy, &nat);
759 nat_bits |= (nat != 0) << regnum;
760 }
761 *data = nat_bits;
762 }
763 return 0;
764
765 case PT_R4: case PT_R5: case PT_R6: case PT_R7:
766 if (write_access) {
767 /* read NaT bit first: */
768 unsigned long dummy;
769
770 ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat);
771 if (ret < 0)
772 return ret;
773 }
774 return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat,
775 write_access);
776
777 case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5:
778 return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access);
779
780 case PT_AR_EC:
781 return unw_access_ar(&info, UNW_AR_EC, data, write_access);
782
783 case PT_AR_LC:
784 return unw_access_ar(&info, UNW_AR_LC, data, write_access);
785
786 default:
787 if (addr >= PT_F2 && addr < PT_F5 + 16)
788 return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0,
789 data, write_access);
790 else if (addr >= PT_F16 && addr < PT_F31 + 16)
791 return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0,
792 data, write_access);
793 else {
794 dprintk("ptrace: rejecting access to register address 0x%lx\n",
795 addr);
796 return -1;
797 }
798 }
799 } else if (addr < PT_F9+16) {
800 /* scratch state */
801 switch (addr) {
802 case PT_AR_BSP:
803 /*
804 * By convention, we use PT_AR_BSP to refer to the end of the user-level
805 * backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get
806 * the real value of ar.bsp at the time the kernel was entered.
807 */
808 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
809 if (write_access) {
810 if (*data != urbs_end) {
811 if (ia64_sync_user_rbs(child, sw,
812 pt->ar_bspstore, urbs_end) < 0)
813 return -1;
814 /* simulate user-level write of ar.bsp: */
815 pt->loadrs = 0;
816 pt->ar_bspstore = *data;
817 }
818 } else
819 *data = urbs_end;
820 return 0;
821
822 case PT_CFM:
823 if ((long) pt->cr_ifs < 0) {
824 if (write_access)
825 pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
826 | (*data & 0x3fffffffffUL));
827 else
828 *data = pt->cr_ifs & 0x3fffffffffUL;
829 } else {
830 /* kernel was entered through a system call */
831 unsigned long cfm;
832
833 unw_init_from_blocked_task(&info, child);
834 if (unw_unwind_to_user(&info) < 0)
835 return -1;
836
837 unw_get_cfm(&info, &cfm);
838 if (write_access)
839 unw_set_cfm(&info, ((cfm & ~0x3fffffffffU)
840 | (*data & 0x3fffffffffUL)));
841 else
842 *data = cfm;
843 }
844 return 0;
845
846 case PT_CR_IPSR:
847 if (write_access)
848 pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)
849 | (pt->cr_ipsr & ~IPSR_WRITE_MASK));
850 else
851 *data = (pt->cr_ipsr & IPSR_READ_MASK);
852 return 0;
853
854 case PT_AR_RNAT:
855 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
856 rnat_addr = (long) ia64_rse_rnat_addr((long *) urbs_end);
857 if (write_access)
858 return ia64_poke(child, sw, urbs_end, rnat_addr, *data);
859 else
860 return ia64_peek(child, sw, urbs_end, rnat_addr, data);
861
862 case PT_R1:
863 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r1));
864 break;
865
866 case PT_R2: case PT_R3:
867 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r2) + addr - PT_R2);
868 break;
869 case PT_R8: case PT_R9: case PT_R10: case PT_R11:
870 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r8)+ addr - PT_R8);
871 break;
872 case PT_R12: case PT_R13:
873 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r12)+ addr - PT_R12);
874 break;
875 case PT_R14:
876 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r14));
877 break;
878 case PT_R15:
879 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r15));
880 break;
881 case PT_R16: case PT_R17: case PT_R18: case PT_R19:
882 case PT_R20: case PT_R21: case PT_R22: case PT_R23:
883 case PT_R24: case PT_R25: case PT_R26: case PT_R27:
884 case PT_R28: case PT_R29: case PT_R30: case PT_R31:
885 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, r16) + addr - PT_R16);
886 break;
887 case PT_B0:
888 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b0));
889 break;
890 case PT_B6:
891 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b6));
892 break;
893 case PT_B7:
894 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, b7));
895 break;
896 case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
897 case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
898 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, f6) + addr - PT_F6);
899 break;
900 case PT_AR_BSPSTORE:
901 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_bspstore));
902 break;
903 case PT_AR_RSC:
904 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_rsc));
905 break;
906 case PT_AR_UNAT:
907 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_unat));
908 break;
909 case PT_AR_PFS:
910 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_pfs));
911 break;
912 case PT_AR_CCV:
913 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_ccv));
914 break;
915 case PT_AR_FPSR:
916 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_fpsr));
917 break;
918 case PT_CR_IIP:
919 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, cr_iip));
920 break;
921 case PT_PR:
922 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, pr));
923 break;
924 /* scratch register */
925
926 default:
927 /* disallow accessing anything else... */
928 dprintk("ptrace: rejecting access to register address 0x%lx\n",
929 addr);
930 return -1;
931 }
932 } else if (addr <= PT_AR_SSD) {
933 ptr = (unsigned long *) ((long) pt + offsetof(struct pt_regs, ar_csd) + addr - PT_AR_CSD);
934 } else {
935 /* access debug registers */
936
937 if (addr >= PT_IBR) {
938 regnum = (addr - PT_IBR) >> 3;
939 ptr = &child->thread.ibr[0];
940 } else {
941 regnum = (addr - PT_DBR) >> 3;
942 ptr = &child->thread.dbr[0];
943 }
944
945 if (regnum >= 8) {
946 dprintk("ptrace: rejecting access to register address 0x%lx\n", addr);
947 return -1;
948 }
949 #ifdef CONFIG_PERFMON
950 /*
951 * Check if debug registers are used by perfmon. This test must be done
952 * once we know that we can do the operation, i.e. the arguments are all
953 * valid, but before we start modifying the state.
954 *
955 * Perfmon needs to keep a count of how many processes are trying to
956 * modify the debug registers for system wide monitoring sessions.
957 *
958 * We also include read access here, because they may cause the
959 * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two
960 * arrays are also used by perfmon, but we do not use
961 * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context
962 * switch code.
963 */
964 if (pfm_use_debug_registers(child)) return -1;
965 #endif
966
967 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
968 child->thread.flags |= IA64_THREAD_DBG_VALID;
969 memset(child->thread.dbr, 0, sizeof(child->thread.dbr));
970 memset(child->thread.ibr, 0, sizeof(child->thread.ibr));
971 }
972
973 ptr += regnum;
974
975 if (write_access)
976 /* don't let the user set kernel-level breakpoints... */
977 *ptr = *data & ~(7UL << 56);
978 else
979 *data = *ptr;
980 return 0;
981 }
982 if (write_access)
983 *ptr = *data;
984 else
985 *data = *ptr;
986 return 0;
987 }
988
989 static long
ptrace_getregs(struct task_struct * child,struct pt_all_user_regs * ppr)990 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
991 {
992 struct switch_stack *sw;
993 struct pt_regs *pt;
994 long ret, retval;
995 struct unw_frame_info info;
996 char nat = 0;
997 int i;
998
999 retval = verify_area(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs));
1000 if (retval != 0) {
1001 return -EIO;
1002 }
1003
1004 pt = ia64_task_regs(child);
1005 sw = (struct switch_stack *) (child->thread.ksp + 16);
1006 unw_init_from_blocked_task(&info, child);
1007 if (unw_unwind_to_user(&info) < 0) {
1008 return -EIO;
1009 }
1010
1011 if (((unsigned long) ppr & 0x7) != 0) {
1012 dprintk("ptrace:unaligned register address %p\n", ppr);
1013 return -EIO;
1014 }
1015
1016 retval = 0;
1017
1018 /* control regs */
1019
1020 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
1021 retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 0);
1022
1023 /* app regs */
1024
1025 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1026 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
1027 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1028 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1029 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1030 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1031
1032 retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 0);
1033 retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 0);
1034 retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 0);
1035 retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 0);
1036 retval |= access_uarea(child, PT_CFM, &ppr->cfm, 0);
1037
1038 /* gr1-gr3 */
1039
1040 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long));
1041 retval |= __copy_to_user(&ppr->gr[2], &pt->r2, sizeof(long) *2);
1042
1043 /* gr4-gr7 */
1044
1045 for (i = 4; i < 8; i++) {
1046 retval |= unw_access_gr(&info, i, &ppr->gr[i], &nat, 0);
1047 }
1048
1049 /* gr8-gr11 */
1050
1051 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
1052
1053 /* gr12-gr15 */
1054
1055 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 2);
1056 retval |= __copy_to_user(&ppr->gr[14], &pt->r14, sizeof(long));
1057 retval |= __copy_to_user(&ppr->gr[15], &pt->r15, sizeof(long));
1058
1059 /* gr16-gr31 */
1060
1061 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
1062
1063 /* b0 */
1064
1065 retval |= __put_user(pt->b0, &ppr->br[0]);
1066
1067 /* b1-b5 */
1068
1069 for (i = 1; i < 6; i++) {
1070 retval |= unw_access_br(&info, i, &ppr->br[i], 0);
1071 }
1072
1073 /* b6-b7 */
1074
1075 retval |= __put_user(pt->b6, &ppr->br[6]);
1076 retval |= __put_user(pt->b7, &ppr->br[7]);
1077
1078 /* fr2-fr5 */
1079
1080 for (i = 2; i < 6; i++) {
1081 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 0);
1082 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
1083 }
1084
1085 /* fr6-fr11 */
1086
1087 retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 6);
1088
1089 /* fp scratch regs(12-15) */
1090
1091 retval |= __copy_to_user(&ppr->fr[12], &sw->f12, sizeof(struct ia64_fpreg) * 4);
1092
1093 /* fr16-fr31 */
1094
1095 for (i = 16; i < 32; i++) {
1096 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 0);
1097 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
1098 }
1099
1100 /* fph */
1101
1102 ia64_flush_fph(child);
1103 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, sizeof(ppr->fr[32]) * 96);
1104
1105 /* preds */
1106
1107 retval |= __put_user(pt->pr, &ppr->pr);
1108
1109 /* nat bits */
1110
1111 retval |= access_uarea(child, PT_NAT_BITS, &ppr->nat, 0);
1112
1113 ret = retval ? -EIO : 0;
1114 return ret;
1115 }
1116
1117 static long
ptrace_setregs(struct task_struct * child,struct pt_all_user_regs * ppr)1118 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
1119 {
1120 struct switch_stack *sw;
1121 struct pt_regs *pt;
1122 long ret, retval;
1123 struct unw_frame_info info;
1124 char nat = 0;
1125 int i;
1126
1127 retval = verify_area(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs));
1128 if (retval != 0) {
1129 return -EIO;
1130 }
1131
1132 pt = ia64_task_regs(child);
1133 sw = (struct switch_stack *) (child->thread.ksp + 16);
1134 unw_init_from_blocked_task(&info, child);
1135 if (unw_unwind_to_user(&info) < 0) {
1136 return -EIO;
1137 }
1138
1139 if (((unsigned long) ppr & 0x7) != 0) {
1140 dprintk("ptrace:unaligned register address %p\n", ppr);
1141 return -EIO;
1142 }
1143
1144 retval = 0;
1145
1146 /* control regs */
1147
1148 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1149 retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 1);
1150
1151 /* app regs */
1152
1153 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1154 retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
1155 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1156 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1157 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1158 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1159
1160 retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 1);
1161 retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 1);
1162 retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 1);
1163 retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 1);
1164 retval |= access_uarea(child, PT_CFM, &ppr->cfm, 1);
1165
1166 /* gr1-gr3 */
1167
1168 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long));
1169 retval |= __copy_from_user(&pt->r2, &ppr->gr[2], sizeof(long) * 2);
1170
1171 /* gr4-gr7 */
1172
1173 for (i = 4; i < 8; i++) {
1174 long ret = unw_get_gr(&info, i, &ppr->gr[i], &nat);
1175 if (ret < 0) {
1176 return ret;
1177 }
1178 retval |= unw_access_gr(&info, i, &ppr->gr[i], &nat, 1);
1179 }
1180
1181 /* gr8-gr11 */
1182
1183 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1184
1185 /* gr12-gr15 */
1186
1187 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 2);
1188 retval |= __copy_from_user(&pt->r14, &ppr->gr[14], sizeof(long));
1189 retval |= __copy_from_user(&pt->r15, &ppr->gr[15], sizeof(long));
1190
1191 /* gr16-gr31 */
1192
1193 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1194
1195 /* b0 */
1196
1197 retval |= __get_user(pt->b0, &ppr->br[0]);
1198
1199 /* b1-b5 */
1200
1201 for (i = 1; i < 6; i++) {
1202 retval |= unw_access_br(&info, i, &ppr->br[i], 1);
1203 }
1204
1205 /* b6-b7 */
1206
1207 retval |= __get_user(pt->b6, &ppr->br[6]);
1208 retval |= __get_user(pt->b7, &ppr->br[7]);
1209
1210 /* fr2-fr5 */
1211
1212 for (i = 2; i < 6; i++) {
1213 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 1);
1214 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
1215 }
1216
1217 /* fr6-fr11 */
1218
1219 retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 6);
1220
1221 /* fp scratch regs(12-15) */
1222
1223 retval |= __copy_from_user(&sw->f12, &ppr->fr[12], sizeof(ppr->fr[12]) * 4);
1224
1225 /* fr16-fr31 */
1226
1227 for (i = 16; i < 32; i++) {
1228 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 1);
1229 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
1230 }
1231
1232 /* fph */
1233
1234 ia64_sync_fph(child);
1235 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], sizeof(ppr->fr[32]) * 96);
1236
1237 /* preds */
1238
1239 retval |= __get_user(pt->pr, &ppr->pr);
1240
1241 /* nat bits */
1242
1243 retval |= access_uarea(child, PT_NAT_BITS, &ppr->nat, 1);
1244
1245 ret = retval ? -EIO : 0;
1246 return ret;
1247 }
1248
1249 /*
1250 * Called by kernel/ptrace.c when detaching..
1251 *
1252 * Make sure the single step bit is not set.
1253 */
1254 void
ptrace_disable(struct task_struct * child)1255 ptrace_disable (struct task_struct *child)
1256 {
1257 struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
1258
1259 /* make sure the single step/take-branch tra bits are not set: */
1260 child_psr->ss = 0;
1261 child_psr->tb = 0;
1262
1263 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1264 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1265 }
1266
1267 asmlinkage long
sys_ptrace(long request,pid_t pid,unsigned long addr,unsigned long data,long arg4,long arg5,long arg6,long arg7,long stack)1268 sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
1269 long arg4, long arg5, long arg6, long arg7, long stack)
1270 {
1271 struct pt_regs *pt, *regs = (struct pt_regs *) &stack;
1272 unsigned long urbs_end;
1273 struct task_struct *child;
1274 struct switch_stack *sw;
1275 long ret;
1276
1277 lock_kernel();
1278 ret = -EPERM;
1279 if (request == PTRACE_TRACEME) {
1280 /* are we already being traced? */
1281 if (current->ptrace & PT_PTRACED)
1282 goto out;
1283 current->ptrace |= PT_PTRACED;
1284 ret = 0;
1285 goto out;
1286 }
1287
1288 ret = -ESRCH;
1289 read_lock(&tasklist_lock);
1290 {
1291 child = find_task_by_pid(pid);
1292 if (child)
1293 get_task_struct(child);
1294 }
1295 read_unlock(&tasklist_lock);
1296 if (!child)
1297 goto out;
1298 ret = -EPERM;
1299 if (pid == 1) /* no messing around with init! */
1300 goto out_tsk;
1301
1302 if (request == PTRACE_ATTACH) {
1303 ret = ptrace_attach(child);
1304 goto out_tsk;
1305 }
1306
1307 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1308 if (ret < 0)
1309 goto out_tsk;
1310
1311 pt = ia64_task_regs(child);
1312 sw = (struct switch_stack *) (child->thread.ksp + 16);
1313
1314 switch (request) {
1315 case PTRACE_PEEKTEXT:
1316 case PTRACE_PEEKDATA: /* read word at location addr */
1317 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1318
1319 if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
1320 threads_sync_user_rbs(child, urbs_end, 0);
1321
1322 ret = ia64_peek(child, sw, urbs_end, addr, &data);
1323 if (ret == 0) {
1324 ret = data;
1325 regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
1326 }
1327 goto out_tsk;
1328
1329 case PTRACE_POKETEXT:
1330 case PTRACE_POKEDATA: /* write the word at location addr */
1331 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1332 if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
1333 threads_sync_user_rbs(child, urbs_end, 1);
1334
1335 ret = ia64_poke(child, sw, urbs_end, addr, data);
1336 goto out_tsk;
1337
1338 case PTRACE_PEEKUSR: /* read the word at addr in the USER area */
1339 if (access_uarea(child, addr, &data, 0) < 0) {
1340 ret = -EIO;
1341 goto out_tsk;
1342 }
1343 ret = data;
1344 regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
1345 goto out_tsk;
1346
1347 case PTRACE_POKEUSR: /* write the word at addr in the USER area */
1348 if (access_uarea(child, addr, &data, 1) < 0) {
1349 ret = -EIO;
1350 goto out_tsk;
1351 }
1352 ret = 0;
1353 goto out_tsk;
1354
1355 case PTRACE_GETSIGINFO:
1356 ret = -EIO;
1357 if (!access_ok(VERIFY_WRITE, data, sizeof (siginfo_t)) || !child->thread.siginfo)
1358 goto out_tsk;
1359 ret = copy_siginfo_to_user((siginfo_t *) data, child->thread.siginfo);
1360 goto out_tsk;
1361
1362 case PTRACE_SETSIGINFO:
1363 ret = -EIO;
1364 if (!access_ok(VERIFY_READ, data, sizeof (siginfo_t))
1365 || child->thread.siginfo == 0)
1366 goto out_tsk;
1367 ret = copy_siginfo_from_user(child->thread.siginfo, (siginfo_t *) data);
1368 goto out_tsk;
1369
1370 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
1371 case PTRACE_CONT: /* restart after signal. */
1372 ret = -EIO;
1373 if (data > _NSIG)
1374 goto out_tsk;
1375 if (request == PTRACE_SYSCALL)
1376 child->ptrace |= PT_TRACESYS;
1377 else
1378 child->ptrace &= ~PT_TRACESYS;
1379 child->exit_code = data;
1380
1381 /* make sure the single step/taken-branch trap bits are not set: */
1382 ia64_psr(pt)->ss = 0;
1383 ia64_psr(pt)->tb = 0;
1384
1385 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1386 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1387
1388 wake_up_process(child);
1389 ret = 0;
1390 goto out_tsk;
1391
1392 case PTRACE_KILL:
1393 /*
1394 * Make the child exit. Best I can do is send it a
1395 * sigkill. Perhaps it should be put in the status
1396 * that it wants to exit.
1397 */
1398 if (child->state == TASK_ZOMBIE) /* already dead */
1399 goto out_tsk;
1400 child->exit_code = SIGKILL;
1401
1402 /* make sure the single step/take-branch tra bits are not set: */
1403 ia64_psr(pt)->ss = 0;
1404 ia64_psr(pt)->tb = 0;
1405
1406 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1407 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1408
1409 wake_up_process(child);
1410 ret = 0;
1411 goto out_tsk;
1412
1413 case PTRACE_SINGLESTEP: /* let child execute for one instruction */
1414 case PTRACE_SINGLEBLOCK:
1415 ret = -EIO;
1416 if (data > _NSIG)
1417 goto out_tsk;
1418
1419 child->ptrace &= ~PT_TRACESYS;
1420 if (request == PTRACE_SINGLESTEP) {
1421 ia64_psr(pt)->ss = 1;
1422 } else {
1423 ia64_psr(pt)->tb = 1;
1424 }
1425 child->exit_code = data;
1426
1427 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1428 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1429
1430 /* give it a chance to run. */
1431 wake_up_process(child);
1432 ret = 0;
1433 goto out_tsk;
1434
1435 case PTRACE_DETACH: /* detach a process that was attached. */
1436 ret = ptrace_detach(child, data);
1437 goto out_tsk;
1438
1439 case PTRACE_GETREGS:
1440 ret = ptrace_getregs(child, (struct pt_all_user_regs*) data);
1441 goto out_tsk;
1442
1443 case PTRACE_SETREGS:
1444 ret = ptrace_setregs(child, (struct pt_all_user_regs*) data);
1445 goto out_tsk;
1446
1447 default:
1448 ret = -EIO;
1449 goto out_tsk;
1450 }
1451 out_tsk:
1452 free_task_struct(child);
1453 out:
1454 unlock_kernel();
1455 return ret;
1456 }
1457
1458 void
syscall_trace(void)1459 syscall_trace (void)
1460 {
1461 if ((current->ptrace & (PT_PTRACED|PT_TRACESYS)) != (PT_PTRACED|PT_TRACESYS))
1462 return;
1463 current->exit_code = SIGTRAP;
1464 set_current_state(TASK_STOPPED);
1465 notify_parent(current, SIGCHLD);
1466 schedule();
1467
1468 /*
1469 * This isn't the same as continuing with a signal, but it will do for normal use.
1470 * strace only continues with a signal if the stopping signal is not SIGTRAP.
1471 * -brl
1472 */
1473 if (current->exit_code) {
1474 send_sig(current->exit_code, current, 1);
1475 current->exit_code = 0;
1476 }
1477 }
1478