1 /*
2  *  Ptrace user space interface.
3  *
4  *    Copyright IBM Corp. 1999,2010
5  *    Author(s): Denis Joseph Barrow
6  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/sched.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/errno.h>
14 #include <linux/ptrace.h>
15 #include <linux/user.h>
16 #include <linux/security.h>
17 #include <linux/audit.h>
18 #include <linux/signal.h>
19 #include <linux/elf.h>
20 #include <linux/regset.h>
21 #include <linux/tracehook.h>
22 #include <linux/seccomp.h>
23 #include <trace/syscall.h>
24 #include <asm/compat.h>
25 #include <asm/segment.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/pgalloc.h>
29 #include <asm/system.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include "entry.h"
33 
34 #ifdef CONFIG_COMPAT
35 #include "compat_ptrace.h"
36 #endif
37 
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/syscalls.h>
40 
41 enum s390_regset {
42 	REGSET_GENERAL,
43 	REGSET_FP,
44 	REGSET_LAST_BREAK,
45 	REGSET_GENERAL_EXTENDED,
46 };
47 
update_per_regs(struct task_struct * task)48 void update_per_regs(struct task_struct *task)
49 {
50 	static const struct per_regs per_single_step = {
51 		.control = PER_EVENT_IFETCH,
52 		.start = 0,
53 		.end = PSW_ADDR_INSN,
54 	};
55 	struct pt_regs *regs = task_pt_regs(task);
56 	struct thread_struct *thread = &task->thread;
57 	const struct per_regs *new;
58 	struct per_regs old;
59 
60 	/* TIF_SINGLE_STEP overrides the user specified PER registers. */
61 	new = test_tsk_thread_flag(task, TIF_SINGLE_STEP) ?
62 		&per_single_step : &thread->per_user;
63 
64 	/* Take care of the PER enablement bit in the PSW. */
65 	if (!(new->control & PER_EVENT_MASK)) {
66 		regs->psw.mask &= ~PSW_MASK_PER;
67 		return;
68 	}
69 	regs->psw.mask |= PSW_MASK_PER;
70 	__ctl_store(old, 9, 11);
71 	if (memcmp(new, &old, sizeof(struct per_regs)) != 0)
72 		__ctl_load(*new, 9, 11);
73 }
74 
user_enable_single_step(struct task_struct * task)75 void user_enable_single_step(struct task_struct *task)
76 {
77 	set_tsk_thread_flag(task, TIF_SINGLE_STEP);
78 	if (task == current)
79 		update_per_regs(task);
80 }
81 
user_disable_single_step(struct task_struct * task)82 void user_disable_single_step(struct task_struct *task)
83 {
84 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
85 	if (task == current)
86 		update_per_regs(task);
87 }
88 
89 /*
90  * Called by kernel/ptrace.c when detaching..
91  *
92  * Clear all debugging related fields.
93  */
ptrace_disable(struct task_struct * task)94 void ptrace_disable(struct task_struct *task)
95 {
96 	memset(&task->thread.per_user, 0, sizeof(task->thread.per_user));
97 	memset(&task->thread.per_event, 0, sizeof(task->thread.per_event));
98 	clear_tsk_thread_flag(task, TIF_SINGLE_STEP);
99 	clear_tsk_thread_flag(task, TIF_PER_TRAP);
100 }
101 
102 #ifndef CONFIG_64BIT
103 # define __ADDR_MASK 3
104 #else
105 # define __ADDR_MASK 7
106 #endif
107 
__peek_user_per(struct task_struct * child,addr_t addr)108 static inline unsigned long __peek_user_per(struct task_struct *child,
109 					    addr_t addr)
110 {
111 	struct per_struct_kernel *dummy = NULL;
112 
113 	if (addr == (addr_t) &dummy->cr9)
114 		/* Control bits of the active per set. */
115 		return test_thread_flag(TIF_SINGLE_STEP) ?
116 			PER_EVENT_IFETCH : child->thread.per_user.control;
117 	else if (addr == (addr_t) &dummy->cr10)
118 		/* Start address of the active per set. */
119 		return test_thread_flag(TIF_SINGLE_STEP) ?
120 			0 : child->thread.per_user.start;
121 	else if (addr == (addr_t) &dummy->cr11)
122 		/* End address of the active per set. */
123 		return test_thread_flag(TIF_SINGLE_STEP) ?
124 			PSW_ADDR_INSN : child->thread.per_user.end;
125 	else if (addr == (addr_t) &dummy->bits)
126 		/* Single-step bit. */
127 		return test_thread_flag(TIF_SINGLE_STEP) ?
128 			(1UL << (BITS_PER_LONG - 1)) : 0;
129 	else if (addr == (addr_t) &dummy->starting_addr)
130 		/* Start address of the user specified per set. */
131 		return child->thread.per_user.start;
132 	else if (addr == (addr_t) &dummy->ending_addr)
133 		/* End address of the user specified per set. */
134 		return child->thread.per_user.end;
135 	else if (addr == (addr_t) &dummy->perc_atmid)
136 		/* PER code, ATMID and AI of the last PER trap */
137 		return (unsigned long)
138 			child->thread.per_event.cause << (BITS_PER_LONG - 16);
139 	else if (addr == (addr_t) &dummy->address)
140 		/* Address of the last PER trap */
141 		return child->thread.per_event.address;
142 	else if (addr == (addr_t) &dummy->access_id)
143 		/* Access id of the last PER trap */
144 		return (unsigned long)
145 			child->thread.per_event.paid << (BITS_PER_LONG - 8);
146 	return 0;
147 }
148 
149 /*
150  * Read the word at offset addr from the user area of a process. The
151  * trouble here is that the information is littered over different
152  * locations. The process registers are found on the kernel stack,
153  * the floating point stuff and the trace settings are stored in
154  * the task structure. In addition the different structures in
155  * struct user contain pad bytes that should be read as zeroes.
156  * Lovely...
157  */
__peek_user(struct task_struct * child,addr_t addr)158 static unsigned long __peek_user(struct task_struct *child, addr_t addr)
159 {
160 	struct user *dummy = NULL;
161 	addr_t offset, tmp;
162 
163 	if (addr < (addr_t) &dummy->regs.acrs) {
164 		/*
165 		 * psw and gprs are stored on the stack
166 		 */
167 		tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
168 		if (addr == (addr_t) &dummy->regs.psw.mask)
169 			/* Remove per bit from user psw. */
170 			tmp &= ~PSW_MASK_PER;
171 
172 	} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
173 		/*
174 		 * access registers are stored in the thread structure
175 		 */
176 		offset = addr - (addr_t) &dummy->regs.acrs;
177 #ifdef CONFIG_64BIT
178 		/*
179 		 * Very special case: old & broken 64 bit gdb reading
180 		 * from acrs[15]. Result is a 64 bit value. Read the
181 		 * 32 bit acrs[15] value and shift it by 32. Sick...
182 		 */
183 		if (addr == (addr_t) &dummy->regs.acrs[15])
184 			tmp = ((unsigned long) child->thread.acrs[15]) << 32;
185 		else
186 #endif
187 		tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
188 
189 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
190 		/*
191 		 * orig_gpr2 is stored on the kernel stack
192 		 */
193 		tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
194 
195 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
196 		/*
197 		 * prevent reads of padding hole between
198 		 * orig_gpr2 and fp_regs on s390.
199 		 */
200 		tmp = 0;
201 
202 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
203 		/*
204 		 * floating point regs. are stored in the thread structure
205 		 */
206 		offset = addr - (addr_t) &dummy->regs.fp_regs;
207 		tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
208 		if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
209 			tmp &= (unsigned long) FPC_VALID_MASK
210 				<< (BITS_PER_LONG - 32);
211 
212 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
213 		/*
214 		 * Handle access to the per_info structure.
215 		 */
216 		addr -= (addr_t) &dummy->regs.per_info;
217 		tmp = __peek_user_per(child, addr);
218 
219 	} else
220 		tmp = 0;
221 
222 	return tmp;
223 }
224 
225 static int
peek_user(struct task_struct * child,addr_t addr,addr_t data)226 peek_user(struct task_struct *child, addr_t addr, addr_t data)
227 {
228 	addr_t tmp, mask;
229 
230 	/*
231 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
232 	 * an alignment of 4. Programmers from hell...
233 	 */
234 	mask = __ADDR_MASK;
235 #ifdef CONFIG_64BIT
236 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
237 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
238 		mask = 3;
239 #endif
240 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
241 		return -EIO;
242 
243 	tmp = __peek_user(child, addr);
244 	return put_user(tmp, (addr_t __user *) data);
245 }
246 
__poke_user_per(struct task_struct * child,addr_t addr,addr_t data)247 static inline void __poke_user_per(struct task_struct *child,
248 				   addr_t addr, addr_t data)
249 {
250 	struct per_struct_kernel *dummy = NULL;
251 
252 	/*
253 	 * There are only three fields in the per_info struct that the
254 	 * debugger user can write to.
255 	 * 1) cr9: the debugger wants to set a new PER event mask
256 	 * 2) starting_addr: the debugger wants to set a new starting
257 	 *    address to use with the PER event mask.
258 	 * 3) ending_addr: the debugger wants to set a new ending
259 	 *    address to use with the PER event mask.
260 	 * The user specified PER event mask and the start and end
261 	 * addresses are used only if single stepping is not in effect.
262 	 * Writes to any other field in per_info are ignored.
263 	 */
264 	if (addr == (addr_t) &dummy->cr9)
265 		/* PER event mask of the user specified per set. */
266 		child->thread.per_user.control =
267 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
268 	else if (addr == (addr_t) &dummy->starting_addr)
269 		/* Starting address of the user specified per set. */
270 		child->thread.per_user.start = data;
271 	else if (addr == (addr_t) &dummy->ending_addr)
272 		/* Ending address of the user specified per set. */
273 		child->thread.per_user.end = data;
274 }
275 
276 /*
277  * Write a word to the user area of a process at location addr. This
278  * operation does have an additional problem compared to peek_user.
279  * Stores to the program status word and on the floating point
280  * control register needs to get checked for validity.
281  */
__poke_user(struct task_struct * child,addr_t addr,addr_t data)282 static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
283 {
284 	struct user *dummy = NULL;
285 	addr_t offset;
286 
287 	if (addr < (addr_t) &dummy->regs.acrs) {
288 		/*
289 		 * psw and gprs are stored on the stack
290 		 */
291 		if (addr == (addr_t) &dummy->regs.psw.mask &&
292 #ifdef CONFIG_COMPAT
293 		    data != PSW_MASK_MERGE(psw_user32_bits, data) &&
294 #endif
295 		    data != PSW_MASK_MERGE(psw_user_bits, data))
296 			/* Invalid psw mask. */
297 			return -EINVAL;
298 #ifndef CONFIG_64BIT
299 		if (addr == (addr_t) &dummy->regs.psw.addr)
300 			/* I'd like to reject addresses without the
301 			   high order bit but older gdb's rely on it */
302 			data |= PSW_ADDR_AMODE;
303 #endif
304 		*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
305 
306 	} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
307 		/*
308 		 * access registers are stored in the thread structure
309 		 */
310 		offset = addr - (addr_t) &dummy->regs.acrs;
311 #ifdef CONFIG_64BIT
312 		/*
313 		 * Very special case: old & broken 64 bit gdb writing
314 		 * to acrs[15] with a 64 bit value. Ignore the lower
315 		 * half of the value and write the upper 32 bit to
316 		 * acrs[15]. Sick...
317 		 */
318 		if (addr == (addr_t) &dummy->regs.acrs[15])
319 			child->thread.acrs[15] = (unsigned int) (data >> 32);
320 		else
321 #endif
322 		*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
323 
324 	} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
325 		/*
326 		 * orig_gpr2 is stored on the kernel stack
327 		 */
328 		task_pt_regs(child)->orig_gpr2 = data;
329 
330 	} else if (addr < (addr_t) &dummy->regs.fp_regs) {
331 		/*
332 		 * prevent writes of padding hole between
333 		 * orig_gpr2 and fp_regs on s390.
334 		 */
335 		return 0;
336 
337 	} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
338 		/*
339 		 * floating point regs. are stored in the thread structure
340 		 */
341 		if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
342 		    (data & ~((unsigned long) FPC_VALID_MASK
343 			      << (BITS_PER_LONG - 32))) != 0)
344 			return -EINVAL;
345 		offset = addr - (addr_t) &dummy->regs.fp_regs;
346 		*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
347 
348 	} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
349 		/*
350 		 * Handle access to the per_info structure.
351 		 */
352 		addr -= (addr_t) &dummy->regs.per_info;
353 		__poke_user_per(child, addr, data);
354 
355 	}
356 
357 	return 0;
358 }
359 
poke_user(struct task_struct * child,addr_t addr,addr_t data)360 static int poke_user(struct task_struct *child, addr_t addr, addr_t data)
361 {
362 	addr_t mask;
363 
364 	/*
365 	 * Stupid gdb peeks/pokes the access registers in 64 bit with
366 	 * an alignment of 4. Programmers from hell indeed...
367 	 */
368 	mask = __ADDR_MASK;
369 #ifdef CONFIG_64BIT
370 	if (addr >= (addr_t) &((struct user *) NULL)->regs.acrs &&
371 	    addr < (addr_t) &((struct user *) NULL)->regs.orig_gpr2)
372 		mask = 3;
373 #endif
374 	if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
375 		return -EIO;
376 
377 	return __poke_user(child, addr, data);
378 }
379 
arch_ptrace(struct task_struct * child,long request,unsigned long addr,unsigned long data)380 long arch_ptrace(struct task_struct *child, long request,
381 		 unsigned long addr, unsigned long data)
382 {
383 	ptrace_area parea;
384 	int copied, ret;
385 
386 	switch (request) {
387 	case PTRACE_PEEKUSR:
388 		/* read the word at location addr in the USER area. */
389 		return peek_user(child, addr, data);
390 
391 	case PTRACE_POKEUSR:
392 		/* write the word at location addr in the USER area */
393 		return poke_user(child, addr, data);
394 
395 	case PTRACE_PEEKUSR_AREA:
396 	case PTRACE_POKEUSR_AREA:
397 		if (copy_from_user(&parea, (void __force __user *) addr,
398 							sizeof(parea)))
399 			return -EFAULT;
400 		addr = parea.kernel_addr;
401 		data = parea.process_addr;
402 		copied = 0;
403 		while (copied < parea.len) {
404 			if (request == PTRACE_PEEKUSR_AREA)
405 				ret = peek_user(child, addr, data);
406 			else {
407 				addr_t utmp;
408 				if (get_user(utmp,
409 					     (addr_t __force __user *) data))
410 					return -EFAULT;
411 				ret = poke_user(child, addr, utmp);
412 			}
413 			if (ret)
414 				return ret;
415 			addr += sizeof(unsigned long);
416 			data += sizeof(unsigned long);
417 			copied += sizeof(unsigned long);
418 		}
419 		return 0;
420 	case PTRACE_GET_LAST_BREAK:
421 		put_user(task_thread_info(child)->last_break,
422 			 (unsigned long __user *) data);
423 		return 0;
424 	default:
425 		/* Removing high order bit from addr (only for 31 bit). */
426 		addr &= PSW_ADDR_INSN;
427 		return ptrace_request(child, request, addr, data);
428 	}
429 }
430 
431 #ifdef CONFIG_COMPAT
432 /*
433  * Now the fun part starts... a 31 bit program running in the
434  * 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
435  * PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
436  * to handle, the difference to the 64 bit versions of the requests
437  * is that the access is done in multiples of 4 byte instead of
438  * 8 bytes (sizeof(unsigned long) on 31/64 bit).
439  * The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
440  * PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
441  * is a 31 bit program too, the content of struct user can be
442  * emulated. A 31 bit program peeking into the struct user of
443  * a 64 bit program is a no-no.
444  */
445 
446 /*
447  * Same as peek_user_per but for a 31 bit program.
448  */
__peek_user_per_compat(struct task_struct * child,addr_t addr)449 static inline __u32 __peek_user_per_compat(struct task_struct *child,
450 					   addr_t addr)
451 {
452 	struct compat_per_struct_kernel *dummy32 = NULL;
453 
454 	if (addr == (addr_t) &dummy32->cr9)
455 		/* Control bits of the active per set. */
456 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
457 			PER_EVENT_IFETCH : child->thread.per_user.control;
458 	else if (addr == (addr_t) &dummy32->cr10)
459 		/* Start address of the active per set. */
460 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
461 			0 : child->thread.per_user.start;
462 	else if (addr == (addr_t) &dummy32->cr11)
463 		/* End address of the active per set. */
464 		return test_thread_flag(TIF_SINGLE_STEP) ?
465 			PSW32_ADDR_INSN : child->thread.per_user.end;
466 	else if (addr == (addr_t) &dummy32->bits)
467 		/* Single-step bit. */
468 		return (__u32) test_thread_flag(TIF_SINGLE_STEP) ?
469 			0x80000000 : 0;
470 	else if (addr == (addr_t) &dummy32->starting_addr)
471 		/* Start address of the user specified per set. */
472 		return (__u32) child->thread.per_user.start;
473 	else if (addr == (addr_t) &dummy32->ending_addr)
474 		/* End address of the user specified per set. */
475 		return (__u32) child->thread.per_user.end;
476 	else if (addr == (addr_t) &dummy32->perc_atmid)
477 		/* PER code, ATMID and AI of the last PER trap */
478 		return (__u32) child->thread.per_event.cause << 16;
479 	else if (addr == (addr_t) &dummy32->address)
480 		/* Address of the last PER trap */
481 		return (__u32) child->thread.per_event.address;
482 	else if (addr == (addr_t) &dummy32->access_id)
483 		/* Access id of the last PER trap */
484 		return (__u32) child->thread.per_event.paid << 24;
485 	return 0;
486 }
487 
488 /*
489  * Same as peek_user but for a 31 bit program.
490  */
__peek_user_compat(struct task_struct * child,addr_t addr)491 static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
492 {
493 	struct compat_user *dummy32 = NULL;
494 	addr_t offset;
495 	__u32 tmp;
496 
497 	if (addr < (addr_t) &dummy32->regs.acrs) {
498 		/*
499 		 * psw and gprs are stored on the stack
500 		 */
501 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
502 			/* Fake a 31 bit psw mask. */
503 			tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
504 			tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
505 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
506 			/* Fake a 31 bit psw address. */
507 			tmp = (__u32) task_pt_regs(child)->psw.addr |
508 				PSW32_ADDR_AMODE31;
509 		} else {
510 			/* gpr 0-15 */
511 			tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
512 					 addr*2 + 4);
513 		}
514 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
515 		/*
516 		 * access registers are stored in the thread structure
517 		 */
518 		offset = addr - (addr_t) &dummy32->regs.acrs;
519 		tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
520 
521 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
522 		/*
523 		 * orig_gpr2 is stored on the kernel stack
524 		 */
525 		tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
526 
527 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
528 		/*
529 		 * prevent reads of padding hole between
530 		 * orig_gpr2 and fp_regs on s390.
531 		 */
532 		tmp = 0;
533 
534 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
535 		/*
536 		 * floating point regs. are stored in the thread structure
537 		 */
538 	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
539 		tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
540 
541 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
542 		/*
543 		 * Handle access to the per_info structure.
544 		 */
545 		addr -= (addr_t) &dummy32->regs.per_info;
546 		tmp = __peek_user_per_compat(child, addr);
547 
548 	} else
549 		tmp = 0;
550 
551 	return tmp;
552 }
553 
peek_user_compat(struct task_struct * child,addr_t addr,addr_t data)554 static int peek_user_compat(struct task_struct *child,
555 			    addr_t addr, addr_t data)
556 {
557 	__u32 tmp;
558 
559 	if (!is_compat_task() || (addr & 3) || addr > sizeof(struct user) - 3)
560 		return -EIO;
561 
562 	tmp = __peek_user_compat(child, addr);
563 	return put_user(tmp, (__u32 __user *) data);
564 }
565 
566 /*
567  * Same as poke_user_per but for a 31 bit program.
568  */
__poke_user_per_compat(struct task_struct * child,addr_t addr,__u32 data)569 static inline void __poke_user_per_compat(struct task_struct *child,
570 					  addr_t addr, __u32 data)
571 {
572 	struct compat_per_struct_kernel *dummy32 = NULL;
573 
574 	if (addr == (addr_t) &dummy32->cr9)
575 		/* PER event mask of the user specified per set. */
576 		child->thread.per_user.control =
577 			data & (PER_EVENT_MASK | PER_CONTROL_MASK);
578 	else if (addr == (addr_t) &dummy32->starting_addr)
579 		/* Starting address of the user specified per set. */
580 		child->thread.per_user.start = data;
581 	else if (addr == (addr_t) &dummy32->ending_addr)
582 		/* Ending address of the user specified per set. */
583 		child->thread.per_user.end = data;
584 }
585 
586 /*
587  * Same as poke_user but for a 31 bit program.
588  */
__poke_user_compat(struct task_struct * child,addr_t addr,addr_t data)589 static int __poke_user_compat(struct task_struct *child,
590 			      addr_t addr, addr_t data)
591 {
592 	struct compat_user *dummy32 = NULL;
593 	__u32 tmp = (__u32) data;
594 	addr_t offset;
595 
596 	if (addr < (addr_t) &dummy32->regs.acrs) {
597 		/*
598 		 * psw, gprs, acrs and orig_gpr2 are stored on the stack
599 		 */
600 		if (addr == (addr_t) &dummy32->regs.psw.mask) {
601 			/* Build a 64 bit psw mask from 31 bit mask. */
602 			if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
603 				/* Invalid psw mask. */
604 				return -EINVAL;
605 			task_pt_regs(child)->psw.mask =
606 				PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
607 		} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
608 			/* Build a 64 bit psw address from 31 bit address. */
609 			task_pt_regs(child)->psw.addr =
610 				(__u64) tmp & PSW32_ADDR_INSN;
611 		} else {
612 			/* gpr 0-15 */
613 			*(__u32*)((addr_t) &task_pt_regs(child)->psw
614 				  + addr*2 + 4) = tmp;
615 		}
616 	} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
617 		/*
618 		 * access registers are stored in the thread structure
619 		 */
620 		offset = addr - (addr_t) &dummy32->regs.acrs;
621 		*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
622 
623 	} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
624 		/*
625 		 * orig_gpr2 is stored on the kernel stack
626 		 */
627 		*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
628 
629 	} else if (addr < (addr_t) &dummy32->regs.fp_regs) {
630 		/*
631 		 * prevent writess of padding hole between
632 		 * orig_gpr2 and fp_regs on s390.
633 		 */
634 		return 0;
635 
636 	} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
637 		/*
638 		 * floating point regs. are stored in the thread structure
639 		 */
640 		if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
641 		    (tmp & ~FPC_VALID_MASK) != 0)
642 			/* Invalid floating point control. */
643 			return -EINVAL;
644 	        offset = addr - (addr_t) &dummy32->regs.fp_regs;
645 		*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
646 
647 	} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
648 		/*
649 		 * Handle access to the per_info structure.
650 		 */
651 		addr -= (addr_t) &dummy32->regs.per_info;
652 		__poke_user_per_compat(child, addr, data);
653 	}
654 
655 	return 0;
656 }
657 
poke_user_compat(struct task_struct * child,addr_t addr,addr_t data)658 static int poke_user_compat(struct task_struct *child,
659 			    addr_t addr, addr_t data)
660 {
661 	if (!is_compat_task() || (addr & 3) ||
662 	    addr > sizeof(struct compat_user) - 3)
663 		return -EIO;
664 
665 	return __poke_user_compat(child, addr, data);
666 }
667 
compat_arch_ptrace(struct task_struct * child,compat_long_t request,compat_ulong_t caddr,compat_ulong_t cdata)668 long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
669 			compat_ulong_t caddr, compat_ulong_t cdata)
670 {
671 	unsigned long addr = caddr;
672 	unsigned long data = cdata;
673 	compat_ptrace_area parea;
674 	int copied, ret;
675 
676 	switch (request) {
677 	case PTRACE_PEEKUSR:
678 		/* read the word at location addr in the USER area. */
679 		return peek_user_compat(child, addr, data);
680 
681 	case PTRACE_POKEUSR:
682 		/* write the word at location addr in the USER area */
683 		return poke_user_compat(child, addr, data);
684 
685 	case PTRACE_PEEKUSR_AREA:
686 	case PTRACE_POKEUSR_AREA:
687 		if (copy_from_user(&parea, (void __force __user *) addr,
688 							sizeof(parea)))
689 			return -EFAULT;
690 		addr = parea.kernel_addr;
691 		data = parea.process_addr;
692 		copied = 0;
693 		while (copied < parea.len) {
694 			if (request == PTRACE_PEEKUSR_AREA)
695 				ret = peek_user_compat(child, addr, data);
696 			else {
697 				__u32 utmp;
698 				if (get_user(utmp,
699 					     (__u32 __force __user *) data))
700 					return -EFAULT;
701 				ret = poke_user_compat(child, addr, utmp);
702 			}
703 			if (ret)
704 				return ret;
705 			addr += sizeof(unsigned int);
706 			data += sizeof(unsigned int);
707 			copied += sizeof(unsigned int);
708 		}
709 		return 0;
710 	case PTRACE_GET_LAST_BREAK:
711 		put_user(task_thread_info(child)->last_break,
712 			 (unsigned int __user *) data);
713 		return 0;
714 	}
715 	return compat_ptrace_request(child, request, addr, data);
716 }
717 #endif
718 
do_syscall_trace_enter(struct pt_regs * regs)719 asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
720 {
721 	long ret = 0;
722 
723 	/* Do the secure computing check first. */
724 	secure_computing(regs->gprs[2]);
725 
726 	/*
727 	 * The sysc_tracesys code in entry.S stored the system
728 	 * call number to gprs[2].
729 	 */
730 	if (test_thread_flag(TIF_SYSCALL_TRACE) &&
731 	    (tracehook_report_syscall_entry(regs) ||
732 	     regs->gprs[2] >= NR_syscalls)) {
733 		/*
734 		 * Tracing decided this syscall should not happen or the
735 		 * debugger stored an invalid system call number. Skip
736 		 * the system call and the system call restart handling.
737 		 */
738 		regs->svcnr = 0;
739 		ret = -1;
740 	}
741 
742 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
743 		trace_sys_enter(regs, regs->gprs[2]);
744 
745 	if (unlikely(current->audit_context))
746 		audit_syscall_entry(is_compat_task() ?
747 					AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
748 				    regs->gprs[2], regs->orig_gpr2,
749 				    regs->gprs[3], regs->gprs[4],
750 				    regs->gprs[5]);
751 	return ret ?: regs->gprs[2];
752 }
753 
do_syscall_trace_exit(struct pt_regs * regs)754 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
755 {
756 	if (unlikely(current->audit_context))
757 		audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
758 				   regs->gprs[2]);
759 
760 	if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
761 		trace_sys_exit(regs, regs->gprs[2]);
762 
763 	if (test_thread_flag(TIF_SYSCALL_TRACE))
764 		tracehook_report_syscall_exit(regs, 0);
765 }
766 
767 /*
768  * user_regset definitions.
769  */
770 
s390_regs_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)771 static int s390_regs_get(struct task_struct *target,
772 			 const struct user_regset *regset,
773 			 unsigned int pos, unsigned int count,
774 			 void *kbuf, void __user *ubuf)
775 {
776 	if (target == current)
777 		save_access_regs(target->thread.acrs);
778 
779 	if (kbuf) {
780 		unsigned long *k = kbuf;
781 		while (count > 0) {
782 			*k++ = __peek_user(target, pos);
783 			count -= sizeof(*k);
784 			pos += sizeof(*k);
785 		}
786 	} else {
787 		unsigned long __user *u = ubuf;
788 		while (count > 0) {
789 			if (__put_user(__peek_user(target, pos), u++))
790 				return -EFAULT;
791 			count -= sizeof(*u);
792 			pos += sizeof(*u);
793 		}
794 	}
795 	return 0;
796 }
797 
s390_regs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)798 static int s390_regs_set(struct task_struct *target,
799 			 const struct user_regset *regset,
800 			 unsigned int pos, unsigned int count,
801 			 const void *kbuf, const void __user *ubuf)
802 {
803 	int rc = 0;
804 
805 	if (target == current)
806 		save_access_regs(target->thread.acrs);
807 
808 	if (kbuf) {
809 		const unsigned long *k = kbuf;
810 		while (count > 0 && !rc) {
811 			rc = __poke_user(target, pos, *k++);
812 			count -= sizeof(*k);
813 			pos += sizeof(*k);
814 		}
815 	} else {
816 		const unsigned long  __user *u = ubuf;
817 		while (count > 0 && !rc) {
818 			unsigned long word;
819 			rc = __get_user(word, u++);
820 			if (rc)
821 				break;
822 			rc = __poke_user(target, pos, word);
823 			count -= sizeof(*u);
824 			pos += sizeof(*u);
825 		}
826 	}
827 
828 	if (rc == 0 && target == current)
829 		restore_access_regs(target->thread.acrs);
830 
831 	return rc;
832 }
833 
s390_fpregs_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)834 static int s390_fpregs_get(struct task_struct *target,
835 			   const struct user_regset *regset, unsigned int pos,
836 			   unsigned int count, void *kbuf, void __user *ubuf)
837 {
838 	if (target == current)
839 		save_fp_regs(&target->thread.fp_regs);
840 
841 	return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
842 				   &target->thread.fp_regs, 0, -1);
843 }
844 
s390_fpregs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)845 static int s390_fpregs_set(struct task_struct *target,
846 			   const struct user_regset *regset, unsigned int pos,
847 			   unsigned int count, const void *kbuf,
848 			   const void __user *ubuf)
849 {
850 	int rc = 0;
851 
852 	if (target == current)
853 		save_fp_regs(&target->thread.fp_regs);
854 
855 	/* If setting FPC, must validate it first. */
856 	if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
857 		u32 fpc[2] = { target->thread.fp_regs.fpc, 0 };
858 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpc,
859 					0, offsetof(s390_fp_regs, fprs));
860 		if (rc)
861 			return rc;
862 		if ((fpc[0] & ~FPC_VALID_MASK) != 0 || fpc[1] != 0)
863 			return -EINVAL;
864 		target->thread.fp_regs.fpc = fpc[0];
865 	}
866 
867 	if (rc == 0 && count > 0)
868 		rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
869 					target->thread.fp_regs.fprs,
870 					offsetof(s390_fp_regs, fprs), -1);
871 
872 	if (rc == 0 && target == current)
873 		restore_fp_regs(&target->thread.fp_regs);
874 
875 	return rc;
876 }
877 
878 #ifdef CONFIG_64BIT
879 
s390_last_break_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)880 static int s390_last_break_get(struct task_struct *target,
881 			       const struct user_regset *regset,
882 			       unsigned int pos, unsigned int count,
883 			       void *kbuf, void __user *ubuf)
884 {
885 	if (count > 0) {
886 		if (kbuf) {
887 			unsigned long *k = kbuf;
888 			*k = task_thread_info(target)->last_break;
889 		} else {
890 			unsigned long  __user *u = ubuf;
891 			if (__put_user(task_thread_info(target)->last_break, u))
892 				return -EFAULT;
893 		}
894 	}
895 	return 0;
896 }
897 
898 #endif
899 
900 static const struct user_regset s390_regsets[] = {
901 	[REGSET_GENERAL] = {
902 		.core_note_type = NT_PRSTATUS,
903 		.n = sizeof(s390_regs) / sizeof(long),
904 		.size = sizeof(long),
905 		.align = sizeof(long),
906 		.get = s390_regs_get,
907 		.set = s390_regs_set,
908 	},
909 	[REGSET_FP] = {
910 		.core_note_type = NT_PRFPREG,
911 		.n = sizeof(s390_fp_regs) / sizeof(long),
912 		.size = sizeof(long),
913 		.align = sizeof(long),
914 		.get = s390_fpregs_get,
915 		.set = s390_fpregs_set,
916 	},
917 #ifdef CONFIG_64BIT
918 	[REGSET_LAST_BREAK] = {
919 		.core_note_type = NT_S390_LAST_BREAK,
920 		.n = 1,
921 		.size = sizeof(long),
922 		.align = sizeof(long),
923 		.get = s390_last_break_get,
924 	},
925 #endif
926 };
927 
928 static const struct user_regset_view user_s390_view = {
929 	.name = UTS_MACHINE,
930 	.e_machine = EM_S390,
931 	.regsets = s390_regsets,
932 	.n = ARRAY_SIZE(s390_regsets)
933 };
934 
935 #ifdef CONFIG_COMPAT
s390_compat_regs_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)936 static int s390_compat_regs_get(struct task_struct *target,
937 				const struct user_regset *regset,
938 				unsigned int pos, unsigned int count,
939 				void *kbuf, void __user *ubuf)
940 {
941 	if (target == current)
942 		save_access_regs(target->thread.acrs);
943 
944 	if (kbuf) {
945 		compat_ulong_t *k = kbuf;
946 		while (count > 0) {
947 			*k++ = __peek_user_compat(target, pos);
948 			count -= sizeof(*k);
949 			pos += sizeof(*k);
950 		}
951 	} else {
952 		compat_ulong_t __user *u = ubuf;
953 		while (count > 0) {
954 			if (__put_user(__peek_user_compat(target, pos), u++))
955 				return -EFAULT;
956 			count -= sizeof(*u);
957 			pos += sizeof(*u);
958 		}
959 	}
960 	return 0;
961 }
962 
s390_compat_regs_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)963 static int s390_compat_regs_set(struct task_struct *target,
964 				const struct user_regset *regset,
965 				unsigned int pos, unsigned int count,
966 				const void *kbuf, const void __user *ubuf)
967 {
968 	int rc = 0;
969 
970 	if (target == current)
971 		save_access_regs(target->thread.acrs);
972 
973 	if (kbuf) {
974 		const compat_ulong_t *k = kbuf;
975 		while (count > 0 && !rc) {
976 			rc = __poke_user_compat(target, pos, *k++);
977 			count -= sizeof(*k);
978 			pos += sizeof(*k);
979 		}
980 	} else {
981 		const compat_ulong_t  __user *u = ubuf;
982 		while (count > 0 && !rc) {
983 			compat_ulong_t word;
984 			rc = __get_user(word, u++);
985 			if (rc)
986 				break;
987 			rc = __poke_user_compat(target, pos, word);
988 			count -= sizeof(*u);
989 			pos += sizeof(*u);
990 		}
991 	}
992 
993 	if (rc == 0 && target == current)
994 		restore_access_regs(target->thread.acrs);
995 
996 	return rc;
997 }
998 
s390_compat_regs_high_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)999 static int s390_compat_regs_high_get(struct task_struct *target,
1000 				     const struct user_regset *regset,
1001 				     unsigned int pos, unsigned int count,
1002 				     void *kbuf, void __user *ubuf)
1003 {
1004 	compat_ulong_t *gprs_high;
1005 
1006 	gprs_high = (compat_ulong_t *)
1007 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1008 	if (kbuf) {
1009 		compat_ulong_t *k = kbuf;
1010 		while (count > 0) {
1011 			*k++ = *gprs_high;
1012 			gprs_high += 2;
1013 			count -= sizeof(*k);
1014 		}
1015 	} else {
1016 		compat_ulong_t __user *u = ubuf;
1017 		while (count > 0) {
1018 			if (__put_user(*gprs_high, u++))
1019 				return -EFAULT;
1020 			gprs_high += 2;
1021 			count -= sizeof(*u);
1022 		}
1023 	}
1024 	return 0;
1025 }
1026 
s390_compat_regs_high_set(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,const void * kbuf,const void __user * ubuf)1027 static int s390_compat_regs_high_set(struct task_struct *target,
1028 				     const struct user_regset *regset,
1029 				     unsigned int pos, unsigned int count,
1030 				     const void *kbuf, const void __user *ubuf)
1031 {
1032 	compat_ulong_t *gprs_high;
1033 	int rc = 0;
1034 
1035 	gprs_high = (compat_ulong_t *)
1036 		&task_pt_regs(target)->gprs[pos / sizeof(compat_ulong_t)];
1037 	if (kbuf) {
1038 		const compat_ulong_t *k = kbuf;
1039 		while (count > 0) {
1040 			*gprs_high = *k++;
1041 			*gprs_high += 2;
1042 			count -= sizeof(*k);
1043 		}
1044 	} else {
1045 		const compat_ulong_t  __user *u = ubuf;
1046 		while (count > 0 && !rc) {
1047 			unsigned long word;
1048 			rc = __get_user(word, u++);
1049 			if (rc)
1050 				break;
1051 			*gprs_high = word;
1052 			*gprs_high += 2;
1053 			count -= sizeof(*u);
1054 		}
1055 	}
1056 
1057 	return rc;
1058 }
1059 
s390_compat_last_break_get(struct task_struct * target,const struct user_regset * regset,unsigned int pos,unsigned int count,void * kbuf,void __user * ubuf)1060 static int s390_compat_last_break_get(struct task_struct *target,
1061 				      const struct user_regset *regset,
1062 				      unsigned int pos, unsigned int count,
1063 				      void *kbuf, void __user *ubuf)
1064 {
1065 	compat_ulong_t last_break;
1066 
1067 	if (count > 0) {
1068 		last_break = task_thread_info(target)->last_break;
1069 		if (kbuf) {
1070 			unsigned long *k = kbuf;
1071 			*k = last_break;
1072 		} else {
1073 			unsigned long  __user *u = ubuf;
1074 			if (__put_user(last_break, u))
1075 				return -EFAULT;
1076 		}
1077 	}
1078 	return 0;
1079 }
1080 
1081 static const struct user_regset s390_compat_regsets[] = {
1082 	[REGSET_GENERAL] = {
1083 		.core_note_type = NT_PRSTATUS,
1084 		.n = sizeof(s390_compat_regs) / sizeof(compat_long_t),
1085 		.size = sizeof(compat_long_t),
1086 		.align = sizeof(compat_long_t),
1087 		.get = s390_compat_regs_get,
1088 		.set = s390_compat_regs_set,
1089 	},
1090 	[REGSET_FP] = {
1091 		.core_note_type = NT_PRFPREG,
1092 		.n = sizeof(s390_fp_regs) / sizeof(compat_long_t),
1093 		.size = sizeof(compat_long_t),
1094 		.align = sizeof(compat_long_t),
1095 		.get = s390_fpregs_get,
1096 		.set = s390_fpregs_set,
1097 	},
1098 	[REGSET_LAST_BREAK] = {
1099 		.core_note_type = NT_S390_LAST_BREAK,
1100 		.n = 1,
1101 		.size = sizeof(long),
1102 		.align = sizeof(long),
1103 		.get = s390_compat_last_break_get,
1104 	},
1105 	[REGSET_GENERAL_EXTENDED] = {
1106 		.core_note_type = NT_S390_HIGH_GPRS,
1107 		.n = sizeof(s390_compat_regs_high) / sizeof(compat_long_t),
1108 		.size = sizeof(compat_long_t),
1109 		.align = sizeof(compat_long_t),
1110 		.get = s390_compat_regs_high_get,
1111 		.set = s390_compat_regs_high_set,
1112 	},
1113 };
1114 
1115 static const struct user_regset_view user_s390_compat_view = {
1116 	.name = "s390",
1117 	.e_machine = EM_S390,
1118 	.regsets = s390_compat_regsets,
1119 	.n = ARRAY_SIZE(s390_compat_regsets)
1120 };
1121 #endif
1122 
task_user_regset_view(struct task_struct * task)1123 const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1124 {
1125 #ifdef CONFIG_COMPAT
1126 	if (test_tsk_thread_flag(task, TIF_31BIT))
1127 		return &user_s390_compat_view;
1128 #endif
1129 	return &user_s390_view;
1130 }
1131 
1132 static const char *gpr_names[NUM_GPRS] = {
1133 	"r0", "r1",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",
1134 	"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
1135 };
1136 
regs_get_register(struct pt_regs * regs,unsigned int offset)1137 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset)
1138 {
1139 	if (offset >= NUM_GPRS)
1140 		return 0;
1141 	return regs->gprs[offset];
1142 }
1143 
regs_query_register_offset(const char * name)1144 int regs_query_register_offset(const char *name)
1145 {
1146 	unsigned long offset;
1147 
1148 	if (!name || *name != 'r')
1149 		return -EINVAL;
1150 	if (strict_strtoul(name + 1, 10, &offset))
1151 		return -EINVAL;
1152 	if (offset >= NUM_GPRS)
1153 		return -EINVAL;
1154 	return offset;
1155 }
1156 
regs_query_register_name(unsigned int offset)1157 const char *regs_query_register_name(unsigned int offset)
1158 {
1159 	if (offset >= NUM_GPRS)
1160 		return NULL;
1161 	return gpr_names[offset];
1162 }
1163 
regs_within_kernel_stack(struct pt_regs * regs,unsigned long addr)1164 static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
1165 {
1166 	unsigned long ksp = kernel_stack_pointer(regs);
1167 
1168 	return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1));
1169 }
1170 
1171 /**
1172  * regs_get_kernel_stack_nth() - get Nth entry of the stack
1173  * @regs:pt_regs which contains kernel stack pointer.
1174  * @n:stack entry number.
1175  *
1176  * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
1177  * is specifined by @regs. If the @n th entry is NOT in the kernel stack,
1178  * this returns 0.
1179  */
regs_get_kernel_stack_nth(struct pt_regs * regs,unsigned int n)1180 unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
1181 {
1182 	unsigned long addr;
1183 
1184 	addr = kernel_stack_pointer(regs) + n * sizeof(long);
1185 	if (!regs_within_kernel_stack(regs, addr))
1186 		return 0;
1187 	return *(unsigned long *)addr;
1188 }
1189