1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/kernel/signal.c
4 *
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/cache.h>
10 #include <linux/compat.h>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/signal.h>
14 #include <linux/freezer.h>
15 #include <linux/stddef.h>
16 #include <linux/uaccess.h>
17 #include <linux/sizes.h>
18 #include <linux/string.h>
19 #include <linux/resume_user_mode.h>
20 #include <linux/ratelimit.h>
21 #include <linux/syscalls.h>
22
23 #include <asm/daifflags.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/elf.h>
26 #include <asm/cacheflush.h>
27 #include <asm/ucontext.h>
28 #include <asm/unistd.h>
29 #include <asm/fpsimd.h>
30 #include <asm/ptrace.h>
31 #include <asm/syscall.h>
32 #include <asm/signal32.h>
33 #include <asm/traps.h>
34 #include <asm/vdso.h>
35
36 /*
37 * Do a signal return; undo the signal stack. These are aligned to 128-bit.
38 */
39 struct rt_sigframe {
40 struct siginfo info;
41 struct ucontext uc;
42 };
43
44 struct frame_record {
45 u64 fp;
46 u64 lr;
47 };
48
49 struct rt_sigframe_user_layout {
50 struct rt_sigframe __user *sigframe;
51 struct frame_record __user *next_frame;
52
53 unsigned long size; /* size of allocated sigframe data */
54 unsigned long limit; /* largest allowed size */
55
56 unsigned long fpsimd_offset;
57 unsigned long esr_offset;
58 unsigned long sve_offset;
59 unsigned long za_offset;
60 unsigned long extra_offset;
61 unsigned long end_offset;
62 };
63
64 #define BASE_SIGFRAME_SIZE round_up(sizeof(struct rt_sigframe), 16)
65 #define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
66 #define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
67
init_user_layout(struct rt_sigframe_user_layout * user)68 static void init_user_layout(struct rt_sigframe_user_layout *user)
69 {
70 const size_t reserved_size =
71 sizeof(user->sigframe->uc.uc_mcontext.__reserved);
72
73 memset(user, 0, sizeof(*user));
74 user->size = offsetof(struct rt_sigframe, uc.uc_mcontext.__reserved);
75
76 user->limit = user->size + reserved_size;
77
78 user->limit -= TERMINATOR_SIZE;
79 user->limit -= EXTRA_CONTEXT_SIZE;
80 /* Reserve space for extension and terminator ^ */
81 }
82
sigframe_size(struct rt_sigframe_user_layout const * user)83 static size_t sigframe_size(struct rt_sigframe_user_layout const *user)
84 {
85 return round_up(max(user->size, sizeof(struct rt_sigframe)), 16);
86 }
87
88 /*
89 * Sanity limit on the approximate maximum size of signal frame we'll
90 * try to generate. Stack alignment padding and the frame record are
91 * not taken into account. This limit is not a guarantee and is
92 * NOT ABI.
93 */
94 #define SIGFRAME_MAXSZ SZ_64K
95
__sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size,bool extend)96 static int __sigframe_alloc(struct rt_sigframe_user_layout *user,
97 unsigned long *offset, size_t size, bool extend)
98 {
99 size_t padded_size = round_up(size, 16);
100
101 if (padded_size > user->limit - user->size &&
102 !user->extra_offset &&
103 extend) {
104 int ret;
105
106 user->limit += EXTRA_CONTEXT_SIZE;
107 ret = __sigframe_alloc(user, &user->extra_offset,
108 sizeof(struct extra_context), false);
109 if (ret) {
110 user->limit -= EXTRA_CONTEXT_SIZE;
111 return ret;
112 }
113
114 /* Reserve space for the __reserved[] terminator */
115 user->size += TERMINATOR_SIZE;
116
117 /*
118 * Allow expansion up to SIGFRAME_MAXSZ, ensuring space for
119 * the terminator:
120 */
121 user->limit = SIGFRAME_MAXSZ - TERMINATOR_SIZE;
122 }
123
124 /* Still not enough space? Bad luck! */
125 if (padded_size > user->limit - user->size)
126 return -ENOMEM;
127
128 *offset = user->size;
129 user->size += padded_size;
130
131 return 0;
132 }
133
134 /*
135 * Allocate space for an optional record of <size> bytes in the user
136 * signal frame. The offset from the signal frame base address to the
137 * allocated block is assigned to *offset.
138 */
sigframe_alloc(struct rt_sigframe_user_layout * user,unsigned long * offset,size_t size)139 static int sigframe_alloc(struct rt_sigframe_user_layout *user,
140 unsigned long *offset, size_t size)
141 {
142 return __sigframe_alloc(user, offset, size, true);
143 }
144
145 /* Allocate the null terminator record and prevent further allocations */
sigframe_alloc_end(struct rt_sigframe_user_layout * user)146 static int sigframe_alloc_end(struct rt_sigframe_user_layout *user)
147 {
148 int ret;
149
150 /* Un-reserve the space reserved for the terminator: */
151 user->limit += TERMINATOR_SIZE;
152
153 ret = sigframe_alloc(user, &user->end_offset,
154 sizeof(struct _aarch64_ctx));
155 if (ret)
156 return ret;
157
158 /* Prevent further allocation: */
159 user->limit = user->size;
160 return 0;
161 }
162
apply_user_offset(struct rt_sigframe_user_layout const * user,unsigned long offset)163 static void __user *apply_user_offset(
164 struct rt_sigframe_user_layout const *user, unsigned long offset)
165 {
166 char __user *base = (char __user *)user->sigframe;
167
168 return base + offset;
169 }
170
preserve_fpsimd_context(struct fpsimd_context __user * ctx)171 static int preserve_fpsimd_context(struct fpsimd_context __user *ctx)
172 {
173 struct user_fpsimd_state const *fpsimd =
174 ¤t->thread.uw.fpsimd_state;
175 int err;
176
177 /* copy the FP and status/control registers */
178 err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs));
179 __put_user_error(fpsimd->fpsr, &ctx->fpsr, err);
180 __put_user_error(fpsimd->fpcr, &ctx->fpcr, err);
181
182 /* copy the magic/size information */
183 __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err);
184 __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err);
185
186 return err ? -EFAULT : 0;
187 }
188
restore_fpsimd_context(struct fpsimd_context __user * ctx)189 static int restore_fpsimd_context(struct fpsimd_context __user *ctx)
190 {
191 struct user_fpsimd_state fpsimd;
192 __u32 magic, size;
193 int err = 0;
194
195 /* check the magic/size information */
196 __get_user_error(magic, &ctx->head.magic, err);
197 __get_user_error(size, &ctx->head.size, err);
198 if (err)
199 return -EFAULT;
200 if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context))
201 return -EINVAL;
202
203 /* copy the FP and status/control registers */
204 err = __copy_from_user(fpsimd.vregs, ctx->vregs,
205 sizeof(fpsimd.vregs));
206 __get_user_error(fpsimd.fpsr, &ctx->fpsr, err);
207 __get_user_error(fpsimd.fpcr, &ctx->fpcr, err);
208
209 clear_thread_flag(TIF_SVE);
210
211 /* load the hardware registers from the fpsimd_state structure */
212 if (!err)
213 fpsimd_update_current_state(&fpsimd);
214
215 return err ? -EFAULT : 0;
216 }
217
218
219 struct user_ctxs {
220 struct fpsimd_context __user *fpsimd;
221 struct sve_context __user *sve;
222 struct za_context __user *za;
223 };
224
225 #ifdef CONFIG_ARM64_SVE
226
preserve_sve_context(struct sve_context __user * ctx)227 static int preserve_sve_context(struct sve_context __user *ctx)
228 {
229 int err = 0;
230 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
231 u16 flags = 0;
232 unsigned int vl = task_get_sve_vl(current);
233 unsigned int vq = 0;
234
235 if (thread_sm_enabled(¤t->thread)) {
236 vl = task_get_sme_vl(current);
237 vq = sve_vq_from_vl(vl);
238 flags |= SVE_SIG_FLAG_SM;
239 } else if (test_thread_flag(TIF_SVE)) {
240 vq = sve_vq_from_vl(vl);
241 }
242
243 memset(reserved, 0, sizeof(reserved));
244
245 __put_user_error(SVE_MAGIC, &ctx->head.magic, err);
246 __put_user_error(round_up(SVE_SIG_CONTEXT_SIZE(vq), 16),
247 &ctx->head.size, err);
248 __put_user_error(vl, &ctx->vl, err);
249 __put_user_error(flags, &ctx->flags, err);
250 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
251 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
252
253 if (vq) {
254 /*
255 * This assumes that the SVE state has already been saved to
256 * the task struct by calling the function
257 * fpsimd_signal_preserve_current_state().
258 */
259 err |= __copy_to_user((char __user *)ctx + SVE_SIG_REGS_OFFSET,
260 current->thread.sve_state,
261 SVE_SIG_REGS_SIZE(vq));
262 }
263
264 return err ? -EFAULT : 0;
265 }
266
restore_sve_fpsimd_context(struct user_ctxs * user)267 static int restore_sve_fpsimd_context(struct user_ctxs *user)
268 {
269 int err;
270 unsigned int vl, vq;
271 struct user_fpsimd_state fpsimd;
272 struct sve_context sve;
273
274 if (__copy_from_user(&sve, user->sve, sizeof(sve)))
275 return -EFAULT;
276
277 if (sve.flags & SVE_SIG_FLAG_SM) {
278 if (!system_supports_sme())
279 return -EINVAL;
280
281 vl = task_get_sme_vl(current);
282 } else {
283 vl = task_get_sve_vl(current);
284 }
285
286 if (sve.vl != vl)
287 return -EINVAL;
288
289 if (sve.head.size <= sizeof(*user->sve)) {
290 clear_thread_flag(TIF_SVE);
291 current->thread.svcr &= ~SVCR_SM_MASK;
292 goto fpsimd_only;
293 }
294
295 vq = sve_vq_from_vl(sve.vl);
296
297 if (sve.head.size < SVE_SIG_CONTEXT_SIZE(vq))
298 return -EINVAL;
299
300 /*
301 * Careful: we are about __copy_from_user() directly into
302 * thread.sve_state with preemption enabled, so protection is
303 * needed to prevent a racing context switch from writing stale
304 * registers back over the new data.
305 */
306
307 fpsimd_flush_task_state(current);
308 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
309
310 sve_alloc(current, true);
311 if (!current->thread.sve_state) {
312 clear_thread_flag(TIF_SVE);
313 return -ENOMEM;
314 }
315
316 err = __copy_from_user(current->thread.sve_state,
317 (char __user const *)user->sve +
318 SVE_SIG_REGS_OFFSET,
319 SVE_SIG_REGS_SIZE(vq));
320 if (err)
321 return -EFAULT;
322
323 if (sve.flags & SVE_SIG_FLAG_SM)
324 current->thread.svcr |= SVCR_SM_MASK;
325 else
326 set_thread_flag(TIF_SVE);
327
328 fpsimd_only:
329 /* copy the FP and status/control registers */
330 /* restore_sigframe() already checked that user->fpsimd != NULL. */
331 err = __copy_from_user(fpsimd.vregs, user->fpsimd->vregs,
332 sizeof(fpsimd.vregs));
333 __get_user_error(fpsimd.fpsr, &user->fpsimd->fpsr, err);
334 __get_user_error(fpsimd.fpcr, &user->fpsimd->fpcr, err);
335
336 /* load the hardware registers from the fpsimd_state structure */
337 if (!err)
338 fpsimd_update_current_state(&fpsimd);
339
340 return err ? -EFAULT : 0;
341 }
342
343 #else /* ! CONFIG_ARM64_SVE */
344
345 /* Turn any non-optimised out attempts to use these into a link error: */
346 extern int preserve_sve_context(void __user *ctx);
347 extern int restore_sve_fpsimd_context(struct user_ctxs *user);
348
349 #endif /* ! CONFIG_ARM64_SVE */
350
351 #ifdef CONFIG_ARM64_SME
352
preserve_za_context(struct za_context __user * ctx)353 static int preserve_za_context(struct za_context __user *ctx)
354 {
355 int err = 0;
356 u16 reserved[ARRAY_SIZE(ctx->__reserved)];
357 unsigned int vl = task_get_sme_vl(current);
358 unsigned int vq;
359
360 if (thread_za_enabled(¤t->thread))
361 vq = sve_vq_from_vl(vl);
362 else
363 vq = 0;
364
365 memset(reserved, 0, sizeof(reserved));
366
367 __put_user_error(ZA_MAGIC, &ctx->head.magic, err);
368 __put_user_error(round_up(ZA_SIG_CONTEXT_SIZE(vq), 16),
369 &ctx->head.size, err);
370 __put_user_error(vl, &ctx->vl, err);
371 BUILD_BUG_ON(sizeof(ctx->__reserved) != sizeof(reserved));
372 err |= __copy_to_user(&ctx->__reserved, reserved, sizeof(reserved));
373
374 if (vq) {
375 /*
376 * This assumes that the ZA state has already been saved to
377 * the task struct by calling the function
378 * fpsimd_signal_preserve_current_state().
379 */
380 err |= __copy_to_user((char __user *)ctx + ZA_SIG_REGS_OFFSET,
381 current->thread.za_state,
382 ZA_SIG_REGS_SIZE(vq));
383 }
384
385 return err ? -EFAULT : 0;
386 }
387
restore_za_context(struct user_ctxs * user)388 static int restore_za_context(struct user_ctxs *user)
389 {
390 int err;
391 unsigned int vq;
392 struct za_context za;
393
394 if (__copy_from_user(&za, user->za, sizeof(za)))
395 return -EFAULT;
396
397 if (za.vl != task_get_sme_vl(current))
398 return -EINVAL;
399
400 if (za.head.size <= sizeof(*user->za)) {
401 current->thread.svcr &= ~SVCR_ZA_MASK;
402 return 0;
403 }
404
405 vq = sve_vq_from_vl(za.vl);
406
407 if (za.head.size < ZA_SIG_CONTEXT_SIZE(vq))
408 return -EINVAL;
409
410 /*
411 * Careful: we are about __copy_from_user() directly into
412 * thread.za_state with preemption enabled, so protection is
413 * needed to prevent a racing context switch from writing stale
414 * registers back over the new data.
415 */
416
417 fpsimd_flush_task_state(current);
418 /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
419
420 sme_alloc(current);
421 if (!current->thread.za_state) {
422 current->thread.svcr &= ~SVCR_ZA_MASK;
423 clear_thread_flag(TIF_SME);
424 return -ENOMEM;
425 }
426
427 err = __copy_from_user(current->thread.za_state,
428 (char __user const *)user->za +
429 ZA_SIG_REGS_OFFSET,
430 ZA_SIG_REGS_SIZE(vq));
431 if (err)
432 return -EFAULT;
433
434 set_thread_flag(TIF_SME);
435 current->thread.svcr |= SVCR_ZA_MASK;
436
437 return 0;
438 }
439 #else /* ! CONFIG_ARM64_SME */
440
441 /* Turn any non-optimised out attempts to use these into a link error: */
442 extern int preserve_za_context(void __user *ctx);
443 extern int restore_za_context(struct user_ctxs *user);
444
445 #endif /* ! CONFIG_ARM64_SME */
446
parse_user_sigframe(struct user_ctxs * user,struct rt_sigframe __user * sf)447 static int parse_user_sigframe(struct user_ctxs *user,
448 struct rt_sigframe __user *sf)
449 {
450 struct sigcontext __user *const sc = &sf->uc.uc_mcontext;
451 struct _aarch64_ctx __user *head;
452 char __user *base = (char __user *)&sc->__reserved;
453 size_t offset = 0;
454 size_t limit = sizeof(sc->__reserved);
455 bool have_extra_context = false;
456 char const __user *const sfp = (char const __user *)sf;
457
458 user->fpsimd = NULL;
459 user->sve = NULL;
460 user->za = NULL;
461
462 if (!IS_ALIGNED((unsigned long)base, 16))
463 goto invalid;
464
465 while (1) {
466 int err = 0;
467 u32 magic, size;
468 char const __user *userp;
469 struct extra_context const __user *extra;
470 u64 extra_datap;
471 u32 extra_size;
472 struct _aarch64_ctx const __user *end;
473 u32 end_magic, end_size;
474
475 if (limit - offset < sizeof(*head))
476 goto invalid;
477
478 if (!IS_ALIGNED(offset, 16))
479 goto invalid;
480
481 head = (struct _aarch64_ctx __user *)(base + offset);
482 __get_user_error(magic, &head->magic, err);
483 __get_user_error(size, &head->size, err);
484 if (err)
485 return err;
486
487 if (limit - offset < size)
488 goto invalid;
489
490 switch (magic) {
491 case 0:
492 if (size)
493 goto invalid;
494
495 goto done;
496
497 case FPSIMD_MAGIC:
498 if (!system_supports_fpsimd())
499 goto invalid;
500 if (user->fpsimd)
501 goto invalid;
502
503 if (size < sizeof(*user->fpsimd))
504 goto invalid;
505
506 user->fpsimd = (struct fpsimd_context __user *)head;
507 break;
508
509 case ESR_MAGIC:
510 /* ignore */
511 break;
512
513 case SVE_MAGIC:
514 if (!system_supports_sve() && !system_supports_sme())
515 goto invalid;
516
517 if (user->sve)
518 goto invalid;
519
520 if (size < sizeof(*user->sve))
521 goto invalid;
522
523 user->sve = (struct sve_context __user *)head;
524 break;
525
526 case ZA_MAGIC:
527 if (!system_supports_sme())
528 goto invalid;
529
530 if (user->za)
531 goto invalid;
532
533 if (size < sizeof(*user->za))
534 goto invalid;
535
536 user->za = (struct za_context __user *)head;
537 break;
538
539 case EXTRA_MAGIC:
540 if (have_extra_context)
541 goto invalid;
542
543 if (size < sizeof(*extra))
544 goto invalid;
545
546 userp = (char const __user *)head;
547
548 extra = (struct extra_context const __user *)userp;
549 userp += size;
550
551 __get_user_error(extra_datap, &extra->datap, err);
552 __get_user_error(extra_size, &extra->size, err);
553 if (err)
554 return err;
555
556 /* Check for the dummy terminator in __reserved[]: */
557
558 if (limit - offset - size < TERMINATOR_SIZE)
559 goto invalid;
560
561 end = (struct _aarch64_ctx const __user *)userp;
562 userp += TERMINATOR_SIZE;
563
564 __get_user_error(end_magic, &end->magic, err);
565 __get_user_error(end_size, &end->size, err);
566 if (err)
567 return err;
568
569 if (end_magic || end_size)
570 goto invalid;
571
572 /* Prevent looping/repeated parsing of extra_context */
573 have_extra_context = true;
574
575 base = (__force void __user *)extra_datap;
576 if (!IS_ALIGNED((unsigned long)base, 16))
577 goto invalid;
578
579 if (!IS_ALIGNED(extra_size, 16))
580 goto invalid;
581
582 if (base != userp)
583 goto invalid;
584
585 /* Reject "unreasonably large" frames: */
586 if (extra_size > sfp + SIGFRAME_MAXSZ - userp)
587 goto invalid;
588
589 /*
590 * Ignore trailing terminator in __reserved[]
591 * and start parsing extra data:
592 */
593 offset = 0;
594 limit = extra_size;
595
596 if (!access_ok(base, limit))
597 goto invalid;
598
599 continue;
600
601 default:
602 goto invalid;
603 }
604
605 if (size < sizeof(*head))
606 goto invalid;
607
608 if (limit - offset < size)
609 goto invalid;
610
611 offset += size;
612 }
613
614 done:
615 return 0;
616
617 invalid:
618 return -EINVAL;
619 }
620
restore_sigframe(struct pt_regs * regs,struct rt_sigframe __user * sf)621 static int restore_sigframe(struct pt_regs *regs,
622 struct rt_sigframe __user *sf)
623 {
624 sigset_t set;
625 int i, err;
626 struct user_ctxs user;
627
628 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
629 if (err == 0)
630 set_current_blocked(&set);
631
632 for (i = 0; i < 31; i++)
633 __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
634 err);
635 __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
636 __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
637 __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
638
639 /*
640 * Avoid sys_rt_sigreturn() restarting.
641 */
642 forget_syscall(regs);
643
644 err |= !valid_user_regs(®s->user_regs, current);
645 if (err == 0)
646 err = parse_user_sigframe(&user, sf);
647
648 if (err == 0 && system_supports_fpsimd()) {
649 if (!user.fpsimd)
650 return -EINVAL;
651
652 if (user.sve) {
653 if (!system_supports_sve())
654 return -EINVAL;
655
656 err = restore_sve_fpsimd_context(&user);
657 } else {
658 err = restore_fpsimd_context(user.fpsimd);
659 }
660 }
661
662 if (err == 0 && system_supports_sme() && user.za)
663 err = restore_za_context(&user);
664
665 return err;
666 }
667
SYSCALL_DEFINE0(rt_sigreturn)668 SYSCALL_DEFINE0(rt_sigreturn)
669 {
670 struct pt_regs *regs = current_pt_regs();
671 struct rt_sigframe __user *frame;
672
673 /* Always make any pending restarted system calls return -EINTR */
674 current->restart_block.fn = do_no_restart_syscall;
675
676 /*
677 * Since we stacked the signal on a 128-bit boundary, then 'sp' should
678 * be word aligned here.
679 */
680 if (regs->sp & 15)
681 goto badframe;
682
683 frame = (struct rt_sigframe __user *)regs->sp;
684
685 if (!access_ok(frame, sizeof (*frame)))
686 goto badframe;
687
688 if (restore_sigframe(regs, frame))
689 goto badframe;
690
691 if (restore_altstack(&frame->uc.uc_stack))
692 goto badframe;
693
694 return regs->regs[0];
695
696 badframe:
697 arm64_notify_segfault(regs->sp);
698 return 0;
699 }
700
701 /*
702 * Determine the layout of optional records in the signal frame
703 *
704 * add_all: if true, lays out the biggest possible signal frame for
705 * this task; otherwise, generates a layout for the current state
706 * of the task.
707 */
setup_sigframe_layout(struct rt_sigframe_user_layout * user,bool add_all)708 static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
709 bool add_all)
710 {
711 int err;
712
713 if (system_supports_fpsimd()) {
714 err = sigframe_alloc(user, &user->fpsimd_offset,
715 sizeof(struct fpsimd_context));
716 if (err)
717 return err;
718 }
719
720 /* fault information, if valid */
721 if (add_all || current->thread.fault_code) {
722 err = sigframe_alloc(user, &user->esr_offset,
723 sizeof(struct esr_context));
724 if (err)
725 return err;
726 }
727
728 if (system_supports_sve()) {
729 unsigned int vq = 0;
730
731 if (add_all || test_thread_flag(TIF_SVE) ||
732 thread_sm_enabled(¤t->thread)) {
733 int vl = max(sve_max_vl(), sme_max_vl());
734
735 if (!add_all)
736 vl = thread_get_cur_vl(¤t->thread);
737
738 vq = sve_vq_from_vl(vl);
739 }
740
741 err = sigframe_alloc(user, &user->sve_offset,
742 SVE_SIG_CONTEXT_SIZE(vq));
743 if (err)
744 return err;
745 }
746
747 if (system_supports_sme()) {
748 unsigned int vl;
749 unsigned int vq = 0;
750
751 if (add_all)
752 vl = sme_max_vl();
753 else
754 vl = task_get_sme_vl(current);
755
756 if (thread_za_enabled(¤t->thread))
757 vq = sve_vq_from_vl(vl);
758
759 err = sigframe_alloc(user, &user->za_offset,
760 ZA_SIG_CONTEXT_SIZE(vq));
761 if (err)
762 return err;
763 }
764
765 return sigframe_alloc_end(user);
766 }
767
setup_sigframe(struct rt_sigframe_user_layout * user,struct pt_regs * regs,sigset_t * set)768 static int setup_sigframe(struct rt_sigframe_user_layout *user,
769 struct pt_regs *regs, sigset_t *set)
770 {
771 int i, err = 0;
772 struct rt_sigframe __user *sf = user->sigframe;
773
774 /* set up the stack frame for unwinding */
775 __put_user_error(regs->regs[29], &user->next_frame->fp, err);
776 __put_user_error(regs->regs[30], &user->next_frame->lr, err);
777
778 for (i = 0; i < 31; i++)
779 __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i],
780 err);
781 __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err);
782 __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err);
783 __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err);
784
785 __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
786
787 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
788
789 if (err == 0 && system_supports_fpsimd()) {
790 struct fpsimd_context __user *fpsimd_ctx =
791 apply_user_offset(user, user->fpsimd_offset);
792 err |= preserve_fpsimd_context(fpsimd_ctx);
793 }
794
795 /* fault information, if valid */
796 if (err == 0 && user->esr_offset) {
797 struct esr_context __user *esr_ctx =
798 apply_user_offset(user, user->esr_offset);
799
800 __put_user_error(ESR_MAGIC, &esr_ctx->head.magic, err);
801 __put_user_error(sizeof(*esr_ctx), &esr_ctx->head.size, err);
802 __put_user_error(current->thread.fault_code, &esr_ctx->esr, err);
803 }
804
805 /* Scalable Vector Extension state (including streaming), if present */
806 if ((system_supports_sve() || system_supports_sme()) &&
807 err == 0 && user->sve_offset) {
808 struct sve_context __user *sve_ctx =
809 apply_user_offset(user, user->sve_offset);
810 err |= preserve_sve_context(sve_ctx);
811 }
812
813 /* ZA state if present */
814 if (system_supports_sme() && err == 0 && user->za_offset) {
815 struct za_context __user *za_ctx =
816 apply_user_offset(user, user->za_offset);
817 err |= preserve_za_context(za_ctx);
818 }
819
820 if (err == 0 && user->extra_offset) {
821 char __user *sfp = (char __user *)user->sigframe;
822 char __user *userp =
823 apply_user_offset(user, user->extra_offset);
824
825 struct extra_context __user *extra;
826 struct _aarch64_ctx __user *end;
827 u64 extra_datap;
828 u32 extra_size;
829
830 extra = (struct extra_context __user *)userp;
831 userp += EXTRA_CONTEXT_SIZE;
832
833 end = (struct _aarch64_ctx __user *)userp;
834 userp += TERMINATOR_SIZE;
835
836 /*
837 * extra_datap is just written to the signal frame.
838 * The value gets cast back to a void __user *
839 * during sigreturn.
840 */
841 extra_datap = (__force u64)userp;
842 extra_size = sfp + round_up(user->size, 16) - userp;
843
844 __put_user_error(EXTRA_MAGIC, &extra->head.magic, err);
845 __put_user_error(EXTRA_CONTEXT_SIZE, &extra->head.size, err);
846 __put_user_error(extra_datap, &extra->datap, err);
847 __put_user_error(extra_size, &extra->size, err);
848
849 /* Add the terminator */
850 __put_user_error(0, &end->magic, err);
851 __put_user_error(0, &end->size, err);
852 }
853
854 /* set the "end" magic */
855 if (err == 0) {
856 struct _aarch64_ctx __user *end =
857 apply_user_offset(user, user->end_offset);
858
859 __put_user_error(0, &end->magic, err);
860 __put_user_error(0, &end->size, err);
861 }
862
863 return err;
864 }
865
get_sigframe(struct rt_sigframe_user_layout * user,struct ksignal * ksig,struct pt_regs * regs)866 static int get_sigframe(struct rt_sigframe_user_layout *user,
867 struct ksignal *ksig, struct pt_regs *regs)
868 {
869 unsigned long sp, sp_top;
870 int err;
871
872 init_user_layout(user);
873 err = setup_sigframe_layout(user, false);
874 if (err)
875 return err;
876
877 sp = sp_top = sigsp(regs->sp, ksig);
878
879 sp = round_down(sp - sizeof(struct frame_record), 16);
880 user->next_frame = (struct frame_record __user *)sp;
881
882 sp = round_down(sp, 16) - sigframe_size(user);
883 user->sigframe = (struct rt_sigframe __user *)sp;
884
885 /*
886 * Check that we can actually write to the signal frame.
887 */
888 if (!access_ok(user->sigframe, sp_top - sp))
889 return -EFAULT;
890
891 return 0;
892 }
893
setup_return(struct pt_regs * regs,struct k_sigaction * ka,struct rt_sigframe_user_layout * user,int usig)894 static void setup_return(struct pt_regs *regs, struct k_sigaction *ka,
895 struct rt_sigframe_user_layout *user, int usig)
896 {
897 __sigrestore_t sigtramp;
898
899 regs->regs[0] = usig;
900 regs->sp = (unsigned long)user->sigframe;
901 regs->regs[29] = (unsigned long)&user->next_frame->fp;
902 regs->pc = (unsigned long)ka->sa.sa_handler;
903
904 /*
905 * Signal delivery is a (wacky) indirect function call in
906 * userspace, so simulate the same setting of BTYPE as a BLR
907 * <register containing the signal handler entry point>.
908 * Signal delivery to a location in a PROT_BTI guarded page
909 * that is not a function entry point will now trigger a
910 * SIGILL in userspace.
911 *
912 * If the signal handler entry point is not in a PROT_BTI
913 * guarded page, this is harmless.
914 */
915 if (system_supports_bti()) {
916 regs->pstate &= ~PSR_BTYPE_MASK;
917 regs->pstate |= PSR_BTYPE_C;
918 }
919
920 /* TCO (Tag Check Override) always cleared for signal handlers */
921 regs->pstate &= ~PSR_TCO_BIT;
922
923 /* Signal handlers are invoked with ZA and streaming mode disabled */
924 if (system_supports_sme()) {
925 /*
926 * If we were in streaming mode the saved register
927 * state was SVE but we will exit SM and use the
928 * FPSIMD register state - flush the saved FPSIMD
929 * register state in case it gets loaded.
930 */
931 if (current->thread.svcr & SVCR_SM_MASK)
932 memset(¤t->thread.uw.fpsimd_state, 0,
933 sizeof(current->thread.uw.fpsimd_state));
934
935 current->thread.svcr &= ~(SVCR_ZA_MASK |
936 SVCR_SM_MASK);
937 sme_smstop();
938 }
939
940 if (ka->sa.sa_flags & SA_RESTORER)
941 sigtramp = ka->sa.sa_restorer;
942 else
943 sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp);
944
945 regs->regs[30] = (unsigned long)sigtramp;
946 }
947
setup_rt_frame(int usig,struct ksignal * ksig,sigset_t * set,struct pt_regs * regs)948 static int setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set,
949 struct pt_regs *regs)
950 {
951 struct rt_sigframe_user_layout user;
952 struct rt_sigframe __user *frame;
953 int err = 0;
954
955 fpsimd_signal_preserve_current_state();
956
957 if (get_sigframe(&user, ksig, regs))
958 return 1;
959
960 frame = user.sigframe;
961
962 __put_user_error(0, &frame->uc.uc_flags, err);
963 __put_user_error(NULL, &frame->uc.uc_link, err);
964
965 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
966 err |= setup_sigframe(&user, regs, set);
967 if (err == 0) {
968 setup_return(regs, &ksig->ka, &user, usig);
969 if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
970 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
971 regs->regs[1] = (unsigned long)&frame->info;
972 regs->regs[2] = (unsigned long)&frame->uc;
973 }
974 }
975
976 return err;
977 }
978
setup_restart_syscall(struct pt_regs * regs)979 static void setup_restart_syscall(struct pt_regs *regs)
980 {
981 if (is_compat_task())
982 compat_setup_restart_syscall(regs);
983 else
984 regs->regs[8] = __NR_restart_syscall;
985 }
986
987 /*
988 * OK, we're invoking a handler
989 */
handle_signal(struct ksignal * ksig,struct pt_regs * regs)990 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
991 {
992 sigset_t *oldset = sigmask_to_save();
993 int usig = ksig->sig;
994 int ret;
995
996 rseq_signal_deliver(ksig, regs);
997
998 /*
999 * Set up the stack frame
1000 */
1001 if (is_compat_task()) {
1002 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1003 ret = compat_setup_rt_frame(usig, ksig, oldset, regs);
1004 else
1005 ret = compat_setup_frame(usig, ksig, oldset, regs);
1006 } else {
1007 ret = setup_rt_frame(usig, ksig, oldset, regs);
1008 }
1009
1010 /*
1011 * Check that the resulting registers are actually sane.
1012 */
1013 ret |= !valid_user_regs(®s->user_regs, current);
1014
1015 /* Step into the signal handler if we are stepping */
1016 signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
1017 }
1018
1019 /*
1020 * Note that 'init' is a special process: it doesn't get signals it doesn't
1021 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1022 * mistake.
1023 *
1024 * Note that we go through the signals twice: once to check the signals that
1025 * the kernel can handle, and then we build all the user-level signal handling
1026 * stack-frames in one go after that.
1027 */
do_signal(struct pt_regs * regs)1028 static void do_signal(struct pt_regs *regs)
1029 {
1030 unsigned long continue_addr = 0, restart_addr = 0;
1031 int retval = 0;
1032 struct ksignal ksig;
1033 bool syscall = in_syscall(regs);
1034
1035 /*
1036 * If we were from a system call, check for system call restarting...
1037 */
1038 if (syscall) {
1039 continue_addr = regs->pc;
1040 restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
1041 retval = regs->regs[0];
1042
1043 /*
1044 * Avoid additional syscall restarting via ret_to_user.
1045 */
1046 forget_syscall(regs);
1047
1048 /*
1049 * Prepare for system call restart. We do this here so that a
1050 * debugger will see the already changed PC.
1051 */
1052 switch (retval) {
1053 case -ERESTARTNOHAND:
1054 case -ERESTARTSYS:
1055 case -ERESTARTNOINTR:
1056 case -ERESTART_RESTARTBLOCK:
1057 regs->regs[0] = regs->orig_x0;
1058 regs->pc = restart_addr;
1059 break;
1060 }
1061 }
1062
1063 /*
1064 * Get the signal to deliver. When running under ptrace, at this point
1065 * the debugger may change all of our registers.
1066 */
1067 if (get_signal(&ksig)) {
1068 /*
1069 * Depending on the signal settings, we may need to revert the
1070 * decision to restart the system call, but skip this if a
1071 * debugger has chosen to restart at a different PC.
1072 */
1073 if (regs->pc == restart_addr &&
1074 (retval == -ERESTARTNOHAND ||
1075 retval == -ERESTART_RESTARTBLOCK ||
1076 (retval == -ERESTARTSYS &&
1077 !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
1078 syscall_set_return_value(current, regs, -EINTR, 0);
1079 regs->pc = continue_addr;
1080 }
1081
1082 handle_signal(&ksig, regs);
1083 return;
1084 }
1085
1086 /*
1087 * Handle restarting a different system call. As above, if a debugger
1088 * has chosen to restart at a different PC, ignore the restart.
1089 */
1090 if (syscall && regs->pc == restart_addr) {
1091 if (retval == -ERESTART_RESTARTBLOCK)
1092 setup_restart_syscall(regs);
1093 user_rewind_single_step(current);
1094 }
1095
1096 restore_saved_sigmask();
1097 }
1098
do_notify_resume(struct pt_regs * regs,unsigned long thread_flags)1099 void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags)
1100 {
1101 do {
1102 if (thread_flags & _TIF_NEED_RESCHED) {
1103 /* Unmask Debug and SError for the next task */
1104 local_daif_restore(DAIF_PROCCTX_NOIRQ);
1105
1106 schedule();
1107 } else {
1108 local_daif_restore(DAIF_PROCCTX);
1109
1110 if (thread_flags & _TIF_UPROBE)
1111 uprobe_notify_resume(regs);
1112
1113 if (thread_flags & _TIF_MTE_ASYNC_FAULT) {
1114 clear_thread_flag(TIF_MTE_ASYNC_FAULT);
1115 send_sig_fault(SIGSEGV, SEGV_MTEAERR,
1116 (void __user *)NULL, current);
1117 }
1118
1119 if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
1120 do_signal(regs);
1121
1122 if (thread_flags & _TIF_NOTIFY_RESUME)
1123 resume_user_mode_work(regs);
1124
1125 if (thread_flags & _TIF_FOREIGN_FPSTATE)
1126 fpsimd_restore_current_state();
1127 }
1128
1129 local_daif_mask();
1130 thread_flags = read_thread_flags();
1131 } while (thread_flags & _TIF_WORK_MASK);
1132 }
1133
1134 unsigned long __ro_after_init signal_minsigstksz;
1135
1136 /*
1137 * Determine the stack space required for guaranteed signal devliery.
1138 * This function is used to populate AT_MINSIGSTKSZ at process startup.
1139 * cpufeatures setup is assumed to be complete.
1140 */
minsigstksz_setup(void)1141 void __init minsigstksz_setup(void)
1142 {
1143 struct rt_sigframe_user_layout user;
1144
1145 init_user_layout(&user);
1146
1147 /*
1148 * If this fails, SIGFRAME_MAXSZ needs to be enlarged. It won't
1149 * be big enough, but it's our best guess:
1150 */
1151 if (WARN_ON(setup_sigframe_layout(&user, true)))
1152 return;
1153
1154 signal_minsigstksz = sigframe_size(&user) +
1155 round_up(sizeof(struct frame_record), 16) +
1156 16; /* max alignment padding */
1157 }
1158
1159 /*
1160 * Compile-time assertions for siginfo_t offsets. Check NSIG* as well, as
1161 * changes likely come with new fields that should be added below.
1162 */
1163 static_assert(NSIGILL == 11);
1164 static_assert(NSIGFPE == 15);
1165 static_assert(NSIGSEGV == 9);
1166 static_assert(NSIGBUS == 5);
1167 static_assert(NSIGTRAP == 6);
1168 static_assert(NSIGCHLD == 6);
1169 static_assert(NSIGSYS == 2);
1170 static_assert(sizeof(siginfo_t) == 128);
1171 static_assert(__alignof__(siginfo_t) == 8);
1172 static_assert(offsetof(siginfo_t, si_signo) == 0x00);
1173 static_assert(offsetof(siginfo_t, si_errno) == 0x04);
1174 static_assert(offsetof(siginfo_t, si_code) == 0x08);
1175 static_assert(offsetof(siginfo_t, si_pid) == 0x10);
1176 static_assert(offsetof(siginfo_t, si_uid) == 0x14);
1177 static_assert(offsetof(siginfo_t, si_tid) == 0x10);
1178 static_assert(offsetof(siginfo_t, si_overrun) == 0x14);
1179 static_assert(offsetof(siginfo_t, si_status) == 0x18);
1180 static_assert(offsetof(siginfo_t, si_utime) == 0x20);
1181 static_assert(offsetof(siginfo_t, si_stime) == 0x28);
1182 static_assert(offsetof(siginfo_t, si_value) == 0x18);
1183 static_assert(offsetof(siginfo_t, si_int) == 0x18);
1184 static_assert(offsetof(siginfo_t, si_ptr) == 0x18);
1185 static_assert(offsetof(siginfo_t, si_addr) == 0x10);
1186 static_assert(offsetof(siginfo_t, si_addr_lsb) == 0x18);
1187 static_assert(offsetof(siginfo_t, si_lower) == 0x20);
1188 static_assert(offsetof(siginfo_t, si_upper) == 0x28);
1189 static_assert(offsetof(siginfo_t, si_pkey) == 0x20);
1190 static_assert(offsetof(siginfo_t, si_perf_data) == 0x18);
1191 static_assert(offsetof(siginfo_t, si_perf_type) == 0x20);
1192 static_assert(offsetof(siginfo_t, si_perf_flags) == 0x24);
1193 static_assert(offsetof(siginfo_t, si_band) == 0x10);
1194 static_assert(offsetof(siginfo_t, si_fd) == 0x18);
1195 static_assert(offsetof(siginfo_t, si_call_addr) == 0x10);
1196 static_assert(offsetof(siginfo_t, si_syscall) == 0x18);
1197 static_assert(offsetof(siginfo_t, si_arch) == 0x1c);
1198