1/* Install given context.
2   Copyright (C) 2002-2022 Free Software Foundation, Inc.
3   This file is part of the GNU C Library.
4
5   The GNU C Library is free software; you can redistribute it and/or
6   modify it under the terms of the GNU Lesser General Public
7   License as published by the Free Software Foundation; either
8   version 2.1 of the License, or (at your option) any later version.
9
10   The GNU C Library is distributed in the hope that it will be useful,
11   but WITHOUT ANY WARRANTY; without even the implied warranty of
12   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13   Lesser General Public License for more details.
14
15   You should have received a copy of the GNU Lesser General Public
16   License along with the GNU C Library; if not, see
17   <https://www.gnu.org/licenses/>.  */
18
19#include <sysdep.h>
20#include <asm/prctl.h>
21
22#include "ucontext_i.h"
23
24
25/*  int __setcontext (const ucontext_t *ucp)
26
27  Restores the machine context in UCP and thereby resumes execution
28  in that context.
29
30  This implementation is intended to be used for *synchronous* context
31  switches only.  Therefore, it does not have to restore anything
32  other than the PRESERVED state.  */
33
34ENTRY(__setcontext)
35	/* Save argument since syscall will destroy it.  */
36	pushq	%rdi
37	cfi_adjust_cfa_offset(8)
38
39	/* Set the signal mask with
40	   rt_sigprocmask (SIG_SETMASK, mask, NULL, _NSIG/8).  */
41	leaq	oSIGMASK(%rdi), %rsi
42	xorl	%edx, %edx
43	movl	$SIG_SETMASK, %edi
44	movl	$_NSIG8,%r10d
45	movl	$__NR_rt_sigprocmask, %eax
46	syscall
47	/* Pop the pointer into RDX. The choice is arbitrary, but
48	   leaving RDI and RSI available for use later can avoid
49	   shuffling values.  */
50	popq	%rdx
51	cfi_adjust_cfa_offset(-8)
52	cmpq	$-4095, %rax		/* Check %rax for error.  */
53	jae	SYSCALL_ERROR_LABEL	/* Jump to error handler if error.  */
54
55	/* Restore the floating-point context.  Not the registers, only the
56	   rest.  */
57	movq	oFPREGS(%rdx), %rcx
58	fldenv	(%rcx)
59	ldmxcsr oMXCSR(%rdx)
60
61
62	/* Load the new stack pointer, the preserved registers and
63	   registers used for passing args.  */
64	cfi_def_cfa(%rdx, 0)
65	cfi_offset(%rbx,oRBX)
66	cfi_offset(%rbp,oRBP)
67	cfi_offset(%r12,oR12)
68	cfi_offset(%r13,oR13)
69	cfi_offset(%r14,oR14)
70	cfi_offset(%r15,oR15)
71	cfi_offset(%rsp,oRSP)
72	cfi_offset(%rip,oRIP)
73
74	movq	oRSP(%rdx), %rsp
75	movq	oRBX(%rdx), %rbx
76	movq	oRBP(%rdx), %rbp
77	movq	oR12(%rdx), %r12
78	movq	oR13(%rdx), %r13
79	movq	oR14(%rdx), %r14
80	movq	oR15(%rdx), %r15
81
82#if SHSTK_ENABLED
83	/* Check if shadow stack is enabled.  */
84	testl	$X86_FEATURE_1_SHSTK, %fs:FEATURE_1_OFFSET
85	jz	L(no_shstk)
86
87	/* If the base of the target shadow stack is the same as the
88	   base of the current shadow stack, we unwind the shadow
89	   stack.  Otherwise it is a stack switch and we look for a
90	   restore token.  */
91	movq	oSSP(%rdx), %rsi
92	movq	%rsi, %rdi
93
94	/* Get the base of the target shadow stack.  */
95	movq	(oSSP + 8)(%rdx), %rcx
96	cmpq	%fs:SSP_BASE_OFFSET, %rcx
97	je	L(unwind_shadow_stack)
98
99L(find_restore_token_loop):
100	/* Look for a restore token.  */
101	movq	-8(%rsi), %rax
102	andq	$-8, %rax
103	cmpq	%rsi, %rax
104	je	L(restore_shadow_stack)
105
106	/* Try the next slot.  */
107	subq	$8, %rsi
108	jmp	L(find_restore_token_loop)
109
110L(restore_shadow_stack):
111	/* Pop return address from the shadow stack since setcontext
112	   will not return.  */
113	movq	$1, %rax
114	incsspq	%rax
115
116	/* Use the restore stoken to restore the target shadow stack.  */
117	rstorssp -8(%rsi)
118
119	/* Save the restore token on the old shadow stack.  NB: This
120	   restore token may be checked by setcontext or swapcontext
121	   later.  */
122	saveprevssp
123
124	/* Record the new shadow stack base that was switched to.  */
125	movq	(oSSP + 8)(%rdx), %rax
126	movq	%rax, %fs:SSP_BASE_OFFSET
127
128L(unwind_shadow_stack):
129	rdsspq	%rcx
130	subq	%rdi, %rcx
131	je	L(skip_unwind_shadow_stack)
132	negq	%rcx
133	shrq	$3, %rcx
134	movl	$255, %esi
135L(loop):
136	cmpq	%rsi, %rcx
137	cmovb	%rcx, %rsi
138	incsspq	%rsi
139	subq	%rsi, %rcx
140	ja	L(loop)
141
142L(skip_unwind_shadow_stack):
143	movq	oRSI(%rdx), %rsi
144	movq	oRDI(%rdx), %rdi
145	movq	oRCX(%rdx), %rcx
146	movq	oR8(%rdx), %r8
147	movq	oR9(%rdx), %r9
148
149	/* Get the return address set with getcontext.  */
150	movq	oRIP(%rdx), %r10
151
152	/* Setup finally %rdx.  */
153	movq	oRDX(%rdx), %rdx
154
155	/* Check if return address is valid for the case when setcontext
156	   is invoked from __start_context with linked context.  */
157	rdsspq	%rax
158	cmpq	(%rax), %r10
159	/* Clear RAX to indicate success.  NB: Don't use xorl to keep
160	   EFLAGS for jne.  */
161	movl	$0, %eax
162	jne	L(jmp)
163	/* Return to the new context if return address valid.  */
164	pushq	%r10
165	ret
166
167L(jmp):
168	/* Jump to the new context directly.  */
169	jmp	*%r10
170
171L(no_shstk):
172#endif
173	/* The following ret should return to the address set with
174	getcontext.  Therefore push the address on the stack.  */
175	movq	oRIP(%rdx), %rcx
176	pushq	%rcx
177
178	movq	oRSI(%rdx), %rsi
179	movq	oRDI(%rdx), %rdi
180	movq	oRCX(%rdx), %rcx
181	movq	oR8(%rdx), %r8
182	movq	oR9(%rdx), %r9
183
184	/* Setup finally %rdx.  */
185	movq	oRDX(%rdx), %rdx
186
187	/* End FDE here, we fall into another context.  */
188	cfi_endproc
189	cfi_startproc
190
191	/* Clear rax to indicate success.  */
192	xorl	%eax, %eax
193	ret
194PSEUDO_END(__setcontext)
195
196weak_alias (__setcontext, setcontext)
197