1 /*
2  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3  *
4  *  PowerPC version
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  * Copyright (C) 2001 IBM
7  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9  *
10  *  Derived from "arch/i386/kernel/signal.c"
11  *    Copyright (C) 1991, 1992 Linus Torvalds
12  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
13  *
14  *  This program is free software; you can redistribute it and/or
15  *  modify it under the terms of the GNU General Public License
16  *  as published by the Free Software Foundation; either version
17  *  2 of the License, or (at your option) any later version.
18  */
19 
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/ratelimit.h>
29 #ifdef CONFIG_PPC64
30 #include <linux/syscalls.h>
31 #include <linux/compat.h>
32 #else
33 #include <linux/wait.h>
34 #include <linux/unistd.h>
35 #include <linux/stddef.h>
36 #include <linux/tty.h>
37 #include <linux/binfmts.h>
38 #include <linux/freezer.h>
39 #endif
40 
41 #include <asm/uaccess.h>
42 #include <asm/cacheflush.h>
43 #include <asm/syscalls.h>
44 #include <asm/sigcontext.h>
45 #include <asm/vdso.h>
46 #include <asm/switch_to.h>
47 #ifdef CONFIG_PPC64
48 #include "ppc32.h"
49 #include <asm/unistd.h>
50 #else
51 #include <asm/ucontext.h>
52 #include <asm/pgtable.h>
53 #endif
54 
55 #include "signal.h"
56 
57 #undef DEBUG_SIG
58 
59 #ifdef CONFIG_PPC64
60 #define sys_sigsuspend	compat_sys_sigsuspend
61 #define sys_rt_sigsuspend	compat_sys_rt_sigsuspend
62 #define sys_rt_sigreturn	compat_sys_rt_sigreturn
63 #define sys_sigaction	compat_sys_sigaction
64 #define sys_swapcontext	compat_sys_swapcontext
65 #define sys_sigreturn	compat_sys_sigreturn
66 
67 #define old_sigaction	old_sigaction32
68 #define sigcontext	sigcontext32
69 #define mcontext	mcontext32
70 #define ucontext	ucontext32
71 
72 /*
73  * Userspace code may pass a ucontext which doesn't include VSX added
74  * at the end.  We need to check for this case.
75  */
76 #define UCONTEXTSIZEWITHOUTVSX \
77 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
78 
79 /*
80  * Returning 0 means we return to userspace via
81  * ret_from_except and thus restore all user
82  * registers from *regs.  This is what we need
83  * to do when a signal has been delivered.
84  */
85 
86 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
87 #undef __SIGNAL_FRAMESIZE
88 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
89 #undef ELF_NVRREG
90 #define ELF_NVRREG	ELF_NVRREG32
91 
92 /*
93  * Functions for flipping sigsets (thanks to brain dead generic
94  * implementation that makes things simple for little endian only)
95  */
put_sigset_t(compat_sigset_t __user * uset,sigset_t * set)96 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
97 {
98 	compat_sigset_t	cset;
99 
100 	switch (_NSIG_WORDS) {
101 	case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
102 		cset.sig[7] = set->sig[3] >> 32;
103 	case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
104 		cset.sig[5] = set->sig[2] >> 32;
105 	case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
106 		cset.sig[3] = set->sig[1] >> 32;
107 	case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
108 		cset.sig[1] = set->sig[0] >> 32;
109 	}
110 	return copy_to_user(uset, &cset, sizeof(*uset));
111 }
112 
get_sigset_t(sigset_t * set,const compat_sigset_t __user * uset)113 static inline int get_sigset_t(sigset_t *set,
114 			       const compat_sigset_t __user *uset)
115 {
116 	compat_sigset_t s32;
117 
118 	if (copy_from_user(&s32, uset, sizeof(*uset)))
119 		return -EFAULT;
120 
121 	/*
122 	 * Swap the 2 words of the 64-bit sigset_t (they are stored
123 	 * in the "wrong" endian in 32-bit user storage).
124 	 */
125 	switch (_NSIG_WORDS) {
126 	case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
127 	case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
128 	case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
129 	case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
130 	}
131 	return 0;
132 }
133 
get_old_sigaction(struct k_sigaction * new_ka,struct old_sigaction __user * act)134 static inline int get_old_sigaction(struct k_sigaction *new_ka,
135 		struct old_sigaction __user *act)
136 {
137 	compat_old_sigset_t mask;
138 	compat_uptr_t handler, restorer;
139 
140 	if (get_user(handler, &act->sa_handler) ||
141 	    __get_user(restorer, &act->sa_restorer) ||
142 	    __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
143 	    __get_user(mask, &act->sa_mask))
144 		return -EFAULT;
145 	new_ka->sa.sa_handler = compat_ptr(handler);
146 	new_ka->sa.sa_restorer = compat_ptr(restorer);
147 	siginitset(&new_ka->sa.sa_mask, mask);
148 	return 0;
149 }
150 
151 #define to_user_ptr(p)		ptr_to_compat(p)
152 #define from_user_ptr(p)	compat_ptr(p)
153 
save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)154 static inline int save_general_regs(struct pt_regs *regs,
155 		struct mcontext __user *frame)
156 {
157 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
158 	int i;
159 
160 	WARN_ON(!FULL_REGS(regs));
161 
162 	for (i = 0; i <= PT_RESULT; i ++) {
163 		if (i == 14 && !FULL_REGS(regs))
164 			i = 32;
165 		if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
166 			return -EFAULT;
167 	}
168 	return 0;
169 }
170 
restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)171 static inline int restore_general_regs(struct pt_regs *regs,
172 		struct mcontext __user *sr)
173 {
174 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
175 	int i;
176 
177 	for (i = 0; i <= PT_RESULT; i++) {
178 		if ((i == PT_MSR) || (i == PT_SOFTE))
179 			continue;
180 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
181 			return -EFAULT;
182 	}
183 	return 0;
184 }
185 
186 #else /* CONFIG_PPC64 */
187 
188 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
189 
put_sigset_t(sigset_t __user * uset,sigset_t * set)190 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
191 {
192 	return copy_to_user(uset, set, sizeof(*uset));
193 }
194 
get_sigset_t(sigset_t * set,const sigset_t __user * uset)195 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
196 {
197 	return copy_from_user(set, uset, sizeof(*uset));
198 }
199 
get_old_sigaction(struct k_sigaction * new_ka,struct old_sigaction __user * act)200 static inline int get_old_sigaction(struct k_sigaction *new_ka,
201 		struct old_sigaction __user *act)
202 {
203 	old_sigset_t mask;
204 
205 	if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
206 			__get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
207 			__get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
208 		return -EFAULT;
209 	__get_user(new_ka->sa.sa_flags, &act->sa_flags);
210 	__get_user(mask, &act->sa_mask);
211 	siginitset(&new_ka->sa.sa_mask, mask);
212 	return 0;
213 }
214 
215 #define to_user_ptr(p)		((unsigned long)(p))
216 #define from_user_ptr(p)	((void __user *)(p))
217 
save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)218 static inline int save_general_regs(struct pt_regs *regs,
219 		struct mcontext __user *frame)
220 {
221 	WARN_ON(!FULL_REGS(regs));
222 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
223 }
224 
restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)225 static inline int restore_general_regs(struct pt_regs *regs,
226 		struct mcontext __user *sr)
227 {
228 	/* copy up to but not including MSR */
229 	if (__copy_from_user(regs, &sr->mc_gregs,
230 				PT_MSR * sizeof(elf_greg_t)))
231 		return -EFAULT;
232 	/* copy from orig_r3 (the word after the MSR) up to the end */
233 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
234 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
235 		return -EFAULT;
236 	return 0;
237 }
238 
239 #endif /* CONFIG_PPC64 */
240 
241 /*
242  * Atomically swap in the new signal mask, and wait for a signal.
243  */
sys_sigsuspend(old_sigset_t mask)244 long sys_sigsuspend(old_sigset_t mask)
245 {
246 	sigset_t blocked;
247 
248 	current->saved_sigmask = current->blocked;
249 
250 	mask &= _BLOCKABLE;
251 	siginitset(&blocked, mask);
252 	set_current_blocked(&blocked);
253 
254  	current->state = TASK_INTERRUPTIBLE;
255  	schedule();
256 	set_restore_sigmask();
257  	return -ERESTARTNOHAND;
258 }
259 
sys_sigaction(int sig,struct old_sigaction __user * act,struct old_sigaction __user * oact)260 long sys_sigaction(int sig, struct old_sigaction __user *act,
261 		struct old_sigaction __user *oact)
262 {
263 	struct k_sigaction new_ka, old_ka;
264 	int ret;
265 
266 #ifdef CONFIG_PPC64
267 	if (sig < 0)
268 		sig = -sig;
269 #endif
270 
271 	if (act) {
272 		if (get_old_sigaction(&new_ka, act))
273 			return -EFAULT;
274 	}
275 
276 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
277 	if (!ret && oact) {
278 		if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
279 		    __put_user(to_user_ptr(old_ka.sa.sa_handler),
280 			    &oact->sa_handler) ||
281 		    __put_user(to_user_ptr(old_ka.sa.sa_restorer),
282 			    &oact->sa_restorer) ||
283 		    __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
284 		    __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
285 			return -EFAULT;
286 	}
287 
288 	return ret;
289 }
290 
291 /*
292  * When we have signals to deliver, we set up on the
293  * user stack, going down from the original stack pointer:
294  *	an ABI gap of 56 words
295  *	an mcontext struct
296  *	a sigcontext struct
297  *	a gap of __SIGNAL_FRAMESIZE bytes
298  *
299  * Each of these things must be a multiple of 16 bytes in size. The following
300  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
301  *
302  */
303 struct sigframe {
304 	struct sigcontext sctx;		/* the sigcontext */
305 	struct mcontext	mctx;		/* all the register values */
306 	/*
307 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
308 	 * regs and 18 fp regs below sp before decrementing it.
309 	 */
310 	int			abigap[56];
311 };
312 
313 /* We use the mc_pad field for the signal return trampoline. */
314 #define tramp	mc_pad
315 
316 /*
317  *  When we have rt signals to deliver, we set up on the
318  *  user stack, going down from the original stack pointer:
319  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
320  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
321  *  (the +16 is to get the siginfo and ucontext in the same
322  *  positions as in older kernels).
323  *
324  *  Each of these things must be a multiple of 16 bytes in size.
325  *
326  */
327 struct rt_sigframe {
328 #ifdef CONFIG_PPC64
329 	compat_siginfo_t info;
330 #else
331 	struct siginfo info;
332 #endif
333 	struct ucontext	uc;
334 	/*
335 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
336 	 * regs and 18 fp regs below sp before decrementing it.
337 	 */
338 	int			abigap[56];
339 };
340 
341 #ifdef CONFIG_VSX
copy_fpr_to_user(void __user * to,struct task_struct * task)342 unsigned long copy_fpr_to_user(void __user *to,
343 			       struct task_struct *task)
344 {
345 	double buf[ELF_NFPREG];
346 	int i;
347 
348 	/* save FPR copy to local buffer then write to the thread_struct */
349 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
350 		buf[i] = task->thread.TS_FPR(i);
351 	memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
352 	return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
353 }
354 
copy_fpr_from_user(struct task_struct * task,void __user * from)355 unsigned long copy_fpr_from_user(struct task_struct *task,
356 				 void __user *from)
357 {
358 	double buf[ELF_NFPREG];
359 	int i;
360 
361 	if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
362 		return 1;
363 	for (i = 0; i < (ELF_NFPREG - 1) ; i++)
364 		task->thread.TS_FPR(i) = buf[i];
365 	memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
366 
367 	return 0;
368 }
369 
copy_vsx_to_user(void __user * to,struct task_struct * task)370 unsigned long copy_vsx_to_user(void __user *to,
371 			       struct task_struct *task)
372 {
373 	double buf[ELF_NVSRHALFREG];
374 	int i;
375 
376 	/* save FPR copy to local buffer then write to the thread_struct */
377 	for (i = 0; i < ELF_NVSRHALFREG; i++)
378 		buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
379 	return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
380 }
381 
copy_vsx_from_user(struct task_struct * task,void __user * from)382 unsigned long copy_vsx_from_user(struct task_struct *task,
383 				 void __user *from)
384 {
385 	double buf[ELF_NVSRHALFREG];
386 	int i;
387 
388 	if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
389 		return 1;
390 	for (i = 0; i < ELF_NVSRHALFREG ; i++)
391 		task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
392 	return 0;
393 }
394 #else
copy_fpr_to_user(void __user * to,struct task_struct * task)395 inline unsigned long copy_fpr_to_user(void __user *to,
396 				      struct task_struct *task)
397 {
398 	return __copy_to_user(to, task->thread.fpr,
399 			      ELF_NFPREG * sizeof(double));
400 }
401 
copy_fpr_from_user(struct task_struct * task,void __user * from)402 inline unsigned long copy_fpr_from_user(struct task_struct *task,
403 					void __user *from)
404 {
405 	return __copy_from_user(task->thread.fpr, from,
406 			      ELF_NFPREG * sizeof(double));
407 }
408 #endif
409 
410 /*
411  * Save the current user registers on the user stack.
412  * We only save the altivec/spe registers if the process has used
413  * altivec/spe instructions at some point.
414  */
save_user_regs(struct pt_regs * regs,struct mcontext __user * frame,int sigret,int ctx_has_vsx_region)415 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
416 		int sigret, int ctx_has_vsx_region)
417 {
418 	unsigned long msr = regs->msr;
419 
420 	/* Make sure floating point registers are stored in regs */
421 	flush_fp_to_thread(current);
422 
423 	/* save general registers */
424 	if (save_general_regs(regs, frame))
425 		return 1;
426 
427 #ifdef CONFIG_ALTIVEC
428 	/* save altivec registers */
429 	if (current->thread.used_vr) {
430 		flush_altivec_to_thread(current);
431 		if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
432 				   ELF_NVRREG * sizeof(vector128)))
433 			return 1;
434 		/* set MSR_VEC in the saved MSR value to indicate that
435 		   frame->mc_vregs contains valid data */
436 		msr |= MSR_VEC;
437 	}
438 	/* else assert((regs->msr & MSR_VEC) == 0) */
439 
440 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
441 	 * use altivec. Since VSCR only contains 32 bits saved in the least
442 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
443 	 * most significant bits of that same vector. --BenH
444 	 */
445 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
446 		return 1;
447 #endif /* CONFIG_ALTIVEC */
448 	if (copy_fpr_to_user(&frame->mc_fregs, current))
449 		return 1;
450 
451 	/*
452 	 * Clear the MSR VSX bit to indicate there is no valid state attached
453 	 * to this context, except in the specific case below where we set it.
454 	 */
455 	msr &= ~MSR_VSX;
456 #ifdef CONFIG_VSX
457 	/*
458 	 * Copy VSR 0-31 upper half from thread_struct to local
459 	 * buffer, then write that to userspace.  Also set MSR_VSX in
460 	 * the saved MSR value to indicate that frame->mc_vregs
461 	 * contains valid data
462 	 */
463 	if (current->thread.used_vsr && ctx_has_vsx_region) {
464 		__giveup_vsx(current);
465 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
466 			return 1;
467 		msr |= MSR_VSX;
468 	}
469 #endif /* CONFIG_VSX */
470 #ifdef CONFIG_SPE
471 	/* save spe registers */
472 	if (current->thread.used_spe) {
473 		flush_spe_to_thread(current);
474 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
475 				   ELF_NEVRREG * sizeof(u32)))
476 			return 1;
477 		/* set MSR_SPE in the saved MSR value to indicate that
478 		   frame->mc_vregs contains valid data */
479 		msr |= MSR_SPE;
480 	}
481 	/* else assert((regs->msr & MSR_SPE) == 0) */
482 
483 	/* We always copy to/from spefscr */
484 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
485 		return 1;
486 #endif /* CONFIG_SPE */
487 
488 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
489 		return 1;
490 	if (sigret) {
491 		/* Set up the sigreturn trampoline: li r0,sigret; sc */
492 		if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
493 		    || __put_user(0x44000002UL, &frame->tramp[1]))
494 			return 1;
495 		flush_icache_range((unsigned long) &frame->tramp[0],
496 				   (unsigned long) &frame->tramp[2]);
497 	}
498 
499 	return 0;
500 }
501 
502 /*
503  * Restore the current user register values from the user stack,
504  * (except for MSR).
505  */
restore_user_regs(struct pt_regs * regs,struct mcontext __user * sr,int sig)506 static long restore_user_regs(struct pt_regs *regs,
507 			      struct mcontext __user *sr, int sig)
508 {
509 	long err;
510 	unsigned int save_r2 = 0;
511 	unsigned long msr;
512 #ifdef CONFIG_VSX
513 	int i;
514 #endif
515 
516 	/*
517 	 * restore general registers but not including MSR or SOFTE. Also
518 	 * take care of keeping r2 (TLS) intact if not a signal
519 	 */
520 	if (!sig)
521 		save_r2 = (unsigned int)regs->gpr[2];
522 	err = restore_general_regs(regs, sr);
523 	regs->trap = 0;
524 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
525 	if (!sig)
526 		regs->gpr[2] = (unsigned long) save_r2;
527 	if (err)
528 		return 1;
529 
530 	/* if doing signal return, restore the previous little-endian mode */
531 	if (sig)
532 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
533 
534 	/*
535 	 * Do this before updating the thread state in
536 	 * current->thread.fpr/vr/evr.  That way, if we get preempted
537 	 * and another task grabs the FPU/Altivec/SPE, it won't be
538 	 * tempted to save the current CPU state into the thread_struct
539 	 * and corrupt what we are writing there.
540 	 */
541 	discard_lazy_cpu_state();
542 
543 #ifdef CONFIG_ALTIVEC
544 	/*
545 	 * Force the process to reload the altivec registers from
546 	 * current->thread when it next does altivec instructions
547 	 */
548 	regs->msr &= ~MSR_VEC;
549 	if (msr & MSR_VEC) {
550 		/* restore altivec registers from the stack */
551 		if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
552 				     sizeof(sr->mc_vregs)))
553 			return 1;
554 	} else if (current->thread.used_vr)
555 		memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
556 
557 	/* Always get VRSAVE back */
558 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
559 		return 1;
560 #endif /* CONFIG_ALTIVEC */
561 	if (copy_fpr_from_user(current, &sr->mc_fregs))
562 		return 1;
563 
564 #ifdef CONFIG_VSX
565 	/*
566 	 * Force the process to reload the VSX registers from
567 	 * current->thread when it next does VSX instruction.
568 	 */
569 	regs->msr &= ~MSR_VSX;
570 	if (msr & MSR_VSX) {
571 		/*
572 		 * Restore altivec registers from the stack to a local
573 		 * buffer, then write this out to the thread_struct
574 		 */
575 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
576 			return 1;
577 	} else if (current->thread.used_vsr)
578 		for (i = 0; i < 32 ; i++)
579 			current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
580 #endif /* CONFIG_VSX */
581 	/*
582 	 * force the process to reload the FP registers from
583 	 * current->thread when it next does FP instructions
584 	 */
585 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
586 
587 #ifdef CONFIG_SPE
588 	/* force the process to reload the spe registers from
589 	   current->thread when it next does spe instructions */
590 	regs->msr &= ~MSR_SPE;
591 	if (msr & MSR_SPE) {
592 		/* restore spe registers from the stack */
593 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
594 				     ELF_NEVRREG * sizeof(u32)))
595 			return 1;
596 	} else if (current->thread.used_spe)
597 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
598 
599 	/* Always get SPEFSCR back */
600 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
601 		return 1;
602 #endif /* CONFIG_SPE */
603 
604 	return 0;
605 }
606 
607 #ifdef CONFIG_PPC64
compat_sys_rt_sigaction(int sig,const struct sigaction32 __user * act,struct sigaction32 __user * oact,size_t sigsetsize)608 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
609 		struct sigaction32 __user *oact, size_t sigsetsize)
610 {
611 	struct k_sigaction new_ka, old_ka;
612 	int ret;
613 
614 	/* XXX: Don't preclude handling different sized sigset_t's.  */
615 	if (sigsetsize != sizeof(compat_sigset_t))
616 		return -EINVAL;
617 
618 	if (act) {
619 		compat_uptr_t handler;
620 
621 		ret = get_user(handler, &act->sa_handler);
622 		new_ka.sa.sa_handler = compat_ptr(handler);
623 		ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
624 		ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
625 		if (ret)
626 			return -EFAULT;
627 	}
628 
629 	ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
630 	if (!ret && oact) {
631 		ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
632 		ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
633 		ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
634 	}
635 	return ret;
636 }
637 
638 /*
639  * Note: it is necessary to treat how as an unsigned int, with the
640  * corresponding cast to a signed int to insure that the proper
641  * conversion (sign extension) between the register representation
642  * of a signed int (msr in 32-bit mode) and the register representation
643  * of a signed int (msr in 64-bit mode) is performed.
644  */
compat_sys_rt_sigprocmask(u32 how,compat_sigset_t __user * set,compat_sigset_t __user * oset,size_t sigsetsize)645 long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
646 		compat_sigset_t __user *oset, size_t sigsetsize)
647 {
648 	sigset_t s;
649 	sigset_t __user *up;
650 	int ret;
651 	mm_segment_t old_fs = get_fs();
652 
653 	if (set) {
654 		if (get_sigset_t(&s, set))
655 			return -EFAULT;
656 	}
657 
658 	set_fs(KERNEL_DS);
659 	/* This is valid because of the set_fs() */
660 	up = (sigset_t __user *) &s;
661 	ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
662 				 sigsetsize);
663 	set_fs(old_fs);
664 	if (ret)
665 		return ret;
666 	if (oset) {
667 		if (put_sigset_t(oset, &s))
668 			return -EFAULT;
669 	}
670 	return 0;
671 }
672 
compat_sys_rt_sigpending(compat_sigset_t __user * set,compat_size_t sigsetsize)673 long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
674 {
675 	sigset_t s;
676 	int ret;
677 	mm_segment_t old_fs = get_fs();
678 
679 	set_fs(KERNEL_DS);
680 	/* The __user pointer cast is valid because of the set_fs() */
681 	ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
682 	set_fs(old_fs);
683 	if (!ret) {
684 		if (put_sigset_t(set, &s))
685 			return -EFAULT;
686 	}
687 	return ret;
688 }
689 
690 
copy_siginfo_to_user32(struct compat_siginfo __user * d,siginfo_t * s)691 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
692 {
693 	int err;
694 
695 	if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
696 		return -EFAULT;
697 
698 	/* If you change siginfo_t structure, please be sure
699 	 * this code is fixed accordingly.
700 	 * It should never copy any pad contained in the structure
701 	 * to avoid security leaks, but must copy the generic
702 	 * 3 ints plus the relevant union member.
703 	 * This routine must convert siginfo from 64bit to 32bit as well
704 	 * at the same time.
705 	 */
706 	err = __put_user(s->si_signo, &d->si_signo);
707 	err |= __put_user(s->si_errno, &d->si_errno);
708 	err |= __put_user((short)s->si_code, &d->si_code);
709 	if (s->si_code < 0)
710 		err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
711 				      SI_PAD_SIZE32);
712 	else switch(s->si_code >> 16) {
713 	case __SI_CHLD >> 16:
714 		err |= __put_user(s->si_pid, &d->si_pid);
715 		err |= __put_user(s->si_uid, &d->si_uid);
716 		err |= __put_user(s->si_utime, &d->si_utime);
717 		err |= __put_user(s->si_stime, &d->si_stime);
718 		err |= __put_user(s->si_status, &d->si_status);
719 		break;
720 	case __SI_FAULT >> 16:
721 		err |= __put_user((unsigned int)(unsigned long)s->si_addr,
722 				  &d->si_addr);
723 		break;
724 	case __SI_POLL >> 16:
725 		err |= __put_user(s->si_band, &d->si_band);
726 		err |= __put_user(s->si_fd, &d->si_fd);
727 		break;
728 	case __SI_TIMER >> 16:
729 		err |= __put_user(s->si_tid, &d->si_tid);
730 		err |= __put_user(s->si_overrun, &d->si_overrun);
731 		err |= __put_user(s->si_int, &d->si_int);
732 		break;
733 	case __SI_RT >> 16: /* This is not generated by the kernel as of now.  */
734 	case __SI_MESGQ >> 16:
735 		err |= __put_user(s->si_int, &d->si_int);
736 		/* fallthrough */
737 	case __SI_KILL >> 16:
738 	default:
739 		err |= __put_user(s->si_pid, &d->si_pid);
740 		err |= __put_user(s->si_uid, &d->si_uid);
741 		break;
742 	}
743 	return err;
744 }
745 
746 #define copy_siginfo_to_user	copy_siginfo_to_user32
747 
copy_siginfo_from_user32(siginfo_t * to,struct compat_siginfo __user * from)748 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
749 {
750 	memset(to, 0, sizeof *to);
751 
752 	if (copy_from_user(to, from, 3*sizeof(int)) ||
753 	    copy_from_user(to->_sifields._pad,
754 			   from->_sifields._pad, SI_PAD_SIZE32))
755 		return -EFAULT;
756 
757 	return 0;
758 }
759 
760 /*
761  * Note: it is necessary to treat pid and sig as unsigned ints, with the
762  * corresponding cast to a signed int to insure that the proper conversion
763  * (sign extension) between the register representation of a signed int
764  * (msr in 32-bit mode) and the register representation of a signed int
765  * (msr in 64-bit mode) is performed.
766  */
compat_sys_rt_sigqueueinfo(u32 pid,u32 sig,compat_siginfo_t __user * uinfo)767 long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
768 {
769 	siginfo_t info;
770 	int ret;
771 	mm_segment_t old_fs = get_fs();
772 
773 	ret = copy_siginfo_from_user32(&info, uinfo);
774 	if (unlikely(ret))
775 		return ret;
776 
777 	set_fs (KERNEL_DS);
778 	/* The __user pointer cast is valid becasuse of the set_fs() */
779 	ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
780 	set_fs (old_fs);
781 	return ret;
782 }
783 /*
784  *  Start Alternate signal stack support
785  *
786  *  System Calls
787  *       sigaltatck               compat_sys_sigaltstack
788  */
789 
compat_sys_sigaltstack(u32 __new,u32 __old,int r5,int r6,int r7,int r8,struct pt_regs * regs)790 int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
791 		      int r6, int r7, int r8, struct pt_regs *regs)
792 {
793 	stack_32_t __user * newstack = compat_ptr(__new);
794 	stack_32_t __user * oldstack = compat_ptr(__old);
795 	stack_t uss, uoss;
796 	int ret;
797 	mm_segment_t old_fs;
798 	unsigned long sp;
799 	compat_uptr_t ss_sp;
800 
801 	/*
802 	 * set sp to the user stack on entry to the system call
803 	 * the system call router sets R9 to the saved registers
804 	 */
805 	sp = regs->gpr[1];
806 
807 	/* Put new stack info in local 64 bit stack struct */
808 	if (newstack) {
809 		if (get_user(ss_sp, &newstack->ss_sp) ||
810 		    __get_user(uss.ss_flags, &newstack->ss_flags) ||
811 		    __get_user(uss.ss_size, &newstack->ss_size))
812 			return -EFAULT;
813 		uss.ss_sp = compat_ptr(ss_sp);
814 	}
815 
816 	old_fs = get_fs();
817 	set_fs(KERNEL_DS);
818 	/* The __user pointer casts are valid because of the set_fs() */
819 	ret = do_sigaltstack(
820 		newstack ? (stack_t __user *) &uss : NULL,
821 		oldstack ? (stack_t __user *) &uoss : NULL,
822 		sp);
823 	set_fs(old_fs);
824 	/* Copy the stack information to the user output buffer */
825 	if (!ret && oldstack  &&
826 		(put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
827 		 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
828 		 __put_user(uoss.ss_size, &oldstack->ss_size)))
829 		return -EFAULT;
830 	return ret;
831 }
832 #endif /* CONFIG_PPC64 */
833 
834 /*
835  * Set up a signal frame for a "real-time" signal handler
836  * (one which gets siginfo).
837  */
handle_rt_signal32(unsigned long sig,struct k_sigaction * ka,siginfo_t * info,sigset_t * oldset,struct pt_regs * regs)838 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
839 		siginfo_t *info, sigset_t *oldset,
840 		struct pt_regs *regs)
841 {
842 	struct rt_sigframe __user *rt_sf;
843 	struct mcontext __user *frame;
844 	void __user *addr;
845 	unsigned long newsp = 0;
846 
847 	/* Set up Signal Frame */
848 	/* Put a Real Time Context onto stack */
849 	rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
850 	addr = rt_sf;
851 	if (unlikely(rt_sf == NULL))
852 		goto badframe;
853 
854 	/* Put the siginfo & fill in most of the ucontext */
855 	if (copy_siginfo_to_user(&rt_sf->info, info)
856 	    || __put_user(0, &rt_sf->uc.uc_flags)
857 	    || __put_user(0, &rt_sf->uc.uc_link)
858 	    || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
859 	    || __put_user(sas_ss_flags(regs->gpr[1]),
860 			  &rt_sf->uc.uc_stack.ss_flags)
861 	    || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
862 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
863 		    &rt_sf->uc.uc_regs)
864 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
865 		goto badframe;
866 
867 	/* Save user registers on the stack */
868 	frame = &rt_sf->uc.uc_mcontext;
869 	addr = frame;
870 	if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
871 		if (save_user_regs(regs, frame, 0, 1))
872 			goto badframe;
873 		regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
874 	} else {
875 		if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
876 			goto badframe;
877 		regs->link = (unsigned long) frame->tramp;
878 	}
879 
880 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
881 
882 	/* create a stack frame for the caller of the handler */
883 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
884 	addr = (void __user *)regs->gpr[1];
885 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
886 		goto badframe;
887 
888 	/* Fill registers for signal handler */
889 	regs->gpr[1] = newsp;
890 	regs->gpr[3] = sig;
891 	regs->gpr[4] = (unsigned long) &rt_sf->info;
892 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
893 	regs->gpr[6] = (unsigned long) rt_sf;
894 	regs->nip = (unsigned long) ka->sa.sa_handler;
895 	/* enter the signal handler in big-endian mode */
896 	regs->msr &= ~MSR_LE;
897 	return 1;
898 
899 badframe:
900 #ifdef DEBUG_SIG
901 	printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
902 	       regs, frame, newsp);
903 #endif
904 	if (show_unhandled_signals)
905 		printk_ratelimited(KERN_INFO
906 				   "%s[%d]: bad frame in handle_rt_signal32: "
907 				   "%p nip %08lx lr %08lx\n",
908 				   current->comm, current->pid,
909 				   addr, regs->nip, regs->link);
910 
911 	force_sigsegv(sig, current);
912 	return 0;
913 }
914 
do_setcontext(struct ucontext __user * ucp,struct pt_regs * regs,int sig)915 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
916 {
917 	sigset_t set;
918 	struct mcontext __user *mcp;
919 
920 	if (get_sigset_t(&set, &ucp->uc_sigmask))
921 		return -EFAULT;
922 #ifdef CONFIG_PPC64
923 	{
924 		u32 cmcp;
925 
926 		if (__get_user(cmcp, &ucp->uc_regs))
927 			return -EFAULT;
928 		mcp = (struct mcontext __user *)(u64)cmcp;
929 		/* no need to check access_ok(mcp), since mcp < 4GB */
930 	}
931 #else
932 	if (__get_user(mcp, &ucp->uc_regs))
933 		return -EFAULT;
934 	if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
935 		return -EFAULT;
936 #endif
937 	restore_sigmask(&set);
938 	if (restore_user_regs(regs, mcp, sig))
939 		return -EFAULT;
940 
941 	return 0;
942 }
943 
sys_swapcontext(struct ucontext __user * old_ctx,struct ucontext __user * new_ctx,int ctx_size,int r6,int r7,int r8,struct pt_regs * regs)944 long sys_swapcontext(struct ucontext __user *old_ctx,
945 		     struct ucontext __user *new_ctx,
946 		     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
947 {
948 	unsigned char tmp;
949 	int ctx_has_vsx_region = 0;
950 
951 #ifdef CONFIG_PPC64
952 	unsigned long new_msr = 0;
953 
954 	if (new_ctx) {
955 		struct mcontext __user *mcp;
956 		u32 cmcp;
957 
958 		/*
959 		 * Get pointer to the real mcontext.  No need for
960 		 * access_ok since we are dealing with compat
961 		 * pointers.
962 		 */
963 		if (__get_user(cmcp, &new_ctx->uc_regs))
964 			return -EFAULT;
965 		mcp = (struct mcontext __user *)(u64)cmcp;
966 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
967 			return -EFAULT;
968 	}
969 	/*
970 	 * Check that the context is not smaller than the original
971 	 * size (with VMX but without VSX)
972 	 */
973 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
974 		return -EINVAL;
975 	/*
976 	 * If the new context state sets the MSR VSX bits but
977 	 * it doesn't provide VSX state.
978 	 */
979 	if ((ctx_size < sizeof(struct ucontext)) &&
980 	    (new_msr & MSR_VSX))
981 		return -EINVAL;
982 	/* Does the context have enough room to store VSX data? */
983 	if (ctx_size >= sizeof(struct ucontext))
984 		ctx_has_vsx_region = 1;
985 #else
986 	/* Context size is for future use. Right now, we only make sure
987 	 * we are passed something we understand
988 	 */
989 	if (ctx_size < sizeof(struct ucontext))
990 		return -EINVAL;
991 #endif
992 	if (old_ctx != NULL) {
993 		struct mcontext __user *mctx;
994 
995 		/*
996 		 * old_ctx might not be 16-byte aligned, in which
997 		 * case old_ctx->uc_mcontext won't be either.
998 		 * Because we have the old_ctx->uc_pad2 field
999 		 * before old_ctx->uc_mcontext, we need to round down
1000 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1001 		 */
1002 		mctx = (struct mcontext __user *)
1003 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1004 		if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1005 		    || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
1006 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1007 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1008 			return -EFAULT;
1009 	}
1010 	if (new_ctx == NULL)
1011 		return 0;
1012 	if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1013 	    || __get_user(tmp, (u8 __user *) new_ctx)
1014 	    || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1015 		return -EFAULT;
1016 
1017 	/*
1018 	 * If we get a fault copying the context into the kernel's
1019 	 * image of the user's registers, we can't just return -EFAULT
1020 	 * because the user's registers will be corrupted.  For instance
1021 	 * the NIP value may have been updated but not some of the
1022 	 * other registers.  Given that we have done the access_ok
1023 	 * and successfully read the first and last bytes of the region
1024 	 * above, this should only happen in an out-of-memory situation
1025 	 * or if another thread unmaps the region containing the context.
1026 	 * We kill the task with a SIGSEGV in this situation.
1027 	 */
1028 	if (do_setcontext(new_ctx, regs, 0))
1029 		do_exit(SIGSEGV);
1030 
1031 	set_thread_flag(TIF_RESTOREALL);
1032 	return 0;
1033 }
1034 
sys_rt_sigreturn(int r3,int r4,int r5,int r6,int r7,int r8,struct pt_regs * regs)1035 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1036 		     struct pt_regs *regs)
1037 {
1038 	struct rt_sigframe __user *rt_sf;
1039 
1040 	/* Always make any pending restarted system calls return -EINTR */
1041 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1042 
1043 	rt_sf = (struct rt_sigframe __user *)
1044 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1045 	if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1046 		goto bad;
1047 	if (do_setcontext(&rt_sf->uc, regs, 1))
1048 		goto bad;
1049 
1050 	/*
1051 	 * It's not clear whether or why it is desirable to save the
1052 	 * sigaltstack setting on signal delivery and restore it on
1053 	 * signal return.  But other architectures do this and we have
1054 	 * always done it up until now so it is probably better not to
1055 	 * change it.  -- paulus
1056 	 */
1057 #ifdef CONFIG_PPC64
1058 	/*
1059 	 * We use the compat_sys_ version that does the 32/64 bits conversion
1060 	 * and takes userland pointer directly. What about error checking ?
1061 	 * nobody does any...
1062 	 */
1063 	compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1064 #else
1065 	do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1066 #endif
1067 	set_thread_flag(TIF_RESTOREALL);
1068 	return 0;
1069 
1070  bad:
1071 	if (show_unhandled_signals)
1072 		printk_ratelimited(KERN_INFO
1073 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1074 				   "%p nip %08lx lr %08lx\n",
1075 				   current->comm, current->pid,
1076 				   rt_sf, regs->nip, regs->link);
1077 
1078 	force_sig(SIGSEGV, current);
1079 	return 0;
1080 }
1081 
1082 #ifdef CONFIG_PPC32
sys_debug_setcontext(struct ucontext __user * ctx,int ndbg,struct sig_dbg_op __user * dbg,int r6,int r7,int r8,struct pt_regs * regs)1083 int sys_debug_setcontext(struct ucontext __user *ctx,
1084 			 int ndbg, struct sig_dbg_op __user *dbg,
1085 			 int r6, int r7, int r8,
1086 			 struct pt_regs *regs)
1087 {
1088 	struct sig_dbg_op op;
1089 	int i;
1090 	unsigned char tmp;
1091 	unsigned long new_msr = regs->msr;
1092 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1093 	unsigned long new_dbcr0 = current->thread.dbcr0;
1094 #endif
1095 
1096 	for (i=0; i<ndbg; i++) {
1097 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1098 			return -EFAULT;
1099 		switch (op.dbg_type) {
1100 		case SIG_DBG_SINGLE_STEPPING:
1101 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1102 			if (op.dbg_value) {
1103 				new_msr |= MSR_DE;
1104 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1105 			} else {
1106 				new_dbcr0 &= ~DBCR0_IC;
1107 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1108 						current->thread.dbcr1)) {
1109 					new_msr &= ~MSR_DE;
1110 					new_dbcr0 &= ~DBCR0_IDM;
1111 				}
1112 			}
1113 #else
1114 			if (op.dbg_value)
1115 				new_msr |= MSR_SE;
1116 			else
1117 				new_msr &= ~MSR_SE;
1118 #endif
1119 			break;
1120 		case SIG_DBG_BRANCH_TRACING:
1121 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1122 			return -EINVAL;
1123 #else
1124 			if (op.dbg_value)
1125 				new_msr |= MSR_BE;
1126 			else
1127 				new_msr &= ~MSR_BE;
1128 #endif
1129 			break;
1130 
1131 		default:
1132 			return -EINVAL;
1133 		}
1134 	}
1135 
1136 	/* We wait until here to actually install the values in the
1137 	   registers so if we fail in the above loop, it will not
1138 	   affect the contents of these registers.  After this point,
1139 	   failure is a problem, anyway, and it's very unlikely unless
1140 	   the user is really doing something wrong. */
1141 	regs->msr = new_msr;
1142 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1143 	current->thread.dbcr0 = new_dbcr0;
1144 #endif
1145 
1146 	if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1147 	    || __get_user(tmp, (u8 __user *) ctx)
1148 	    || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1149 		return -EFAULT;
1150 
1151 	/*
1152 	 * If we get a fault copying the context into the kernel's
1153 	 * image of the user's registers, we can't just return -EFAULT
1154 	 * because the user's registers will be corrupted.  For instance
1155 	 * the NIP value may have been updated but not some of the
1156 	 * other registers.  Given that we have done the access_ok
1157 	 * and successfully read the first and last bytes of the region
1158 	 * above, this should only happen in an out-of-memory situation
1159 	 * or if another thread unmaps the region containing the context.
1160 	 * We kill the task with a SIGSEGV in this situation.
1161 	 */
1162 	if (do_setcontext(ctx, regs, 1)) {
1163 		if (show_unhandled_signals)
1164 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1165 					   "sys_debug_setcontext: %p nip %08lx "
1166 					   "lr %08lx\n",
1167 					   current->comm, current->pid,
1168 					   ctx, regs->nip, regs->link);
1169 
1170 		force_sig(SIGSEGV, current);
1171 		goto out;
1172 	}
1173 
1174 	/*
1175 	 * It's not clear whether or why it is desirable to save the
1176 	 * sigaltstack setting on signal delivery and restore it on
1177 	 * signal return.  But other architectures do this and we have
1178 	 * always done it up until now so it is probably better not to
1179 	 * change it.  -- paulus
1180 	 */
1181 	do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1182 
1183 	set_thread_flag(TIF_RESTOREALL);
1184  out:
1185 	return 0;
1186 }
1187 #endif
1188 
1189 /*
1190  * OK, we're invoking a handler
1191  */
handle_signal32(unsigned long sig,struct k_sigaction * ka,siginfo_t * info,sigset_t * oldset,struct pt_regs * regs)1192 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1193 		    siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1194 {
1195 	struct sigcontext __user *sc;
1196 	struct sigframe __user *frame;
1197 	unsigned long newsp = 0;
1198 
1199 	/* Set up Signal Frame */
1200 	frame = get_sigframe(ka, regs, sizeof(*frame), 1);
1201 	if (unlikely(frame == NULL))
1202 		goto badframe;
1203 	sc = (struct sigcontext __user *) &frame->sctx;
1204 
1205 #if _NSIG != 64
1206 #error "Please adjust handle_signal()"
1207 #endif
1208 	if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1209 	    || __put_user(oldset->sig[0], &sc->oldmask)
1210 #ifdef CONFIG_PPC64
1211 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1212 #else
1213 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1214 #endif
1215 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1216 	    || __put_user(sig, &sc->signal))
1217 		goto badframe;
1218 
1219 	if (vdso32_sigtramp && current->mm->context.vdso_base) {
1220 		if (save_user_regs(regs, &frame->mctx, 0, 1))
1221 			goto badframe;
1222 		regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1223 	} else {
1224 		if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
1225 			goto badframe;
1226 		regs->link = (unsigned long) frame->mctx.tramp;
1227 	}
1228 
1229 	current->thread.fpscr.val = 0;	/* turn off all fp exceptions */
1230 
1231 	/* create a stack frame for the caller of the handler */
1232 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1233 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1234 		goto badframe;
1235 
1236 	regs->gpr[1] = newsp;
1237 	regs->gpr[3] = sig;
1238 	regs->gpr[4] = (unsigned long) sc;
1239 	regs->nip = (unsigned long) ka->sa.sa_handler;
1240 	/* enter the signal handler in big-endian mode */
1241 	regs->msr &= ~MSR_LE;
1242 
1243 	return 1;
1244 
1245 badframe:
1246 #ifdef DEBUG_SIG
1247 	printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1248 	       regs, frame, newsp);
1249 #endif
1250 	if (show_unhandled_signals)
1251 		printk_ratelimited(KERN_INFO
1252 				   "%s[%d]: bad frame in handle_signal32: "
1253 				   "%p nip %08lx lr %08lx\n",
1254 				   current->comm, current->pid,
1255 				   frame, regs->nip, regs->link);
1256 
1257 	force_sigsegv(sig, current);
1258 	return 0;
1259 }
1260 
1261 /*
1262  * Do a signal return; undo the signal stack.
1263  */
sys_sigreturn(int r3,int r4,int r5,int r6,int r7,int r8,struct pt_regs * regs)1264 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1265 		       struct pt_regs *regs)
1266 {
1267 	struct sigcontext __user *sc;
1268 	struct sigcontext sigctx;
1269 	struct mcontext __user *sr;
1270 	void __user *addr;
1271 	sigset_t set;
1272 
1273 	/* Always make any pending restarted system calls return -EINTR */
1274 	current_thread_info()->restart_block.fn = do_no_restart_syscall;
1275 
1276 	sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1277 	addr = sc;
1278 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1279 		goto badframe;
1280 
1281 #ifdef CONFIG_PPC64
1282 	/*
1283 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1284 	 * unused part of the signal stackframe
1285 	 */
1286 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1287 #else
1288 	set.sig[0] = sigctx.oldmask;
1289 	set.sig[1] = sigctx._unused[3];
1290 #endif
1291 	restore_sigmask(&set);
1292 
1293 	sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1294 	addr = sr;
1295 	if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1296 	    || restore_user_regs(regs, sr, 1))
1297 		goto badframe;
1298 
1299 	set_thread_flag(TIF_RESTOREALL);
1300 	return 0;
1301 
1302 badframe:
1303 	if (show_unhandled_signals)
1304 		printk_ratelimited(KERN_INFO
1305 				   "%s[%d]: bad frame in sys_sigreturn: "
1306 				   "%p nip %08lx lr %08lx\n",
1307 				   current->comm, current->pid,
1308 				   addr, regs->nip, regs->link);
1309 
1310 	force_sig(SIGSEGV, current);
1311 	return 0;
1312 }
1313