1/* Save current context and install the given one. 2 Copyright (C) 2002-2022 Free Software Foundation, Inc. 3 This file is part of the GNU C Library. 4 5 The GNU C Library is free software; you can redistribute it and/or 6 modify it under the terms of the GNU Lesser General Public 7 License as published by the Free Software Foundation; either 8 version 2.1 of the License, or (at your option) any later version. 9 10 The GNU C Library is distributed in the hope that it will be useful, 11 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 13 Lesser General Public License for more details. 14 15 You should have received a copy of the GNU Lesser General Public 16 License along with the GNU C Library; if not, see 17 <https://www.gnu.org/licenses/>. */ 18 19#include <sysdep.h> 20#include <asm/prctl.h> 21 22#include "ucontext_i.h" 23 24 25/* int __swapcontext (ucontext_t *oucp, const ucontext_t *ucp); 26 27 Saves the machine context in oucp such that when it is activated, 28 it appears as if __swapcontextt() returned again, restores the 29 machine context in ucp and thereby resumes execution in that 30 context. 31 32 This implementation is intended to be used for *synchronous* context 33 switches only. Therefore, it does not have to save anything 34 other than the PRESERVED state. */ 35 36ENTRY(__swapcontext) 37 /* Save the preserved registers, the registers used for passing args, 38 and the return address. */ 39 movq %rbx, oRBX(%rdi) 40 movq %rbp, oRBP(%rdi) 41 movq %r12, oR12(%rdi) 42 movq %r13, oR13(%rdi) 43 movq %r14, oR14(%rdi) 44 movq %r15, oR15(%rdi) 45 46 movq %rdi, oRDI(%rdi) 47 movq %rsi, oRSI(%rdi) 48 movq %rdx, oRDX(%rdi) 49 movq %rcx, oRCX(%rdi) 50 movq %r8, oR8(%rdi) 51 movq %r9, oR9(%rdi) 52 53 movq (%rsp), %rcx 54 movq %rcx, oRIP(%rdi) 55 leaq 8(%rsp), %rcx /* Exclude the return address. */ 56 movq %rcx, oRSP(%rdi) 57 58 /* We have separate floating-point register content memory on the 59 stack. We use the __fpregs_mem block in the context. Set the 60 links up correctly. */ 61 leaq oFPREGSMEM(%rdi), %rcx 62 movq %rcx, oFPREGS(%rdi) 63 /* Save the floating-point environment. */ 64 fnstenv (%rcx) 65 stmxcsr oMXCSR(%rdi) 66 67 68 /* The syscall destroys some registers, save them. */ 69 movq %rsi, %r12 70 movq %rdi, %r9 71 72 /* Save the current signal mask and install the new one with 73 rt_sigprocmask (SIG_BLOCK, newset, oldset,_NSIG/8). */ 74 leaq oSIGMASK(%rdi), %rdx 75 leaq oSIGMASK(%rsi), %rsi 76 movl $SIG_SETMASK, %edi 77 movl $_NSIG8,%r10d 78 movl $__NR_rt_sigprocmask, %eax 79 syscall 80 cmpq $-4095, %rax /* Check %rax for error. */ 81 jae SYSCALL_ERROR_LABEL /* Jump to error handler if error. */ 82 83 /* Restore destroyed register into RDX. The choice is arbitrary, 84 but leaving RDI and RSI available for use later can avoid 85 shuffling values. */ 86 movq %r12, %rdx 87 88 /* Restore the floating-point context. Not the registers, only the 89 rest. */ 90 movq oFPREGS(%rdx), %rcx 91 fldenv (%rcx) 92 ldmxcsr oMXCSR(%rdx) 93 94 /* Load the new stack pointer and the preserved registers. */ 95 movq oRSP(%rdx), %rsp 96 movq oRBX(%rdx), %rbx 97 movq oRBP(%rdx), %rbp 98 movq oR12(%rdx), %r12 99 movq oR13(%rdx), %r13 100 movq oR14(%rdx), %r14 101 movq oR15(%rdx), %r15 102 103#if SHSTK_ENABLED 104 /* Check if shadow stack is enabled. */ 105 testl $X86_FEATURE_1_SHSTK, %fs:FEATURE_1_OFFSET 106 jz L(no_shstk) 107 108 xorl %eax, %eax 109 cmpq %fs:SSP_BASE_OFFSET, %rax 110 jnz L(shadow_stack_bound_recorded) 111 112 /* Get the base address and size of the default shadow stack 113 which must be the current shadow stack since nothing has 114 been recorded yet. */ 115 sub $24, %RSP_LP 116 mov %RSP_LP, %RSI_LP 117 movl $ARCH_CET_STATUS, %edi 118 movl $__NR_arch_prctl, %eax 119 syscall 120 testq %rax, %rax 121 jz L(continue_no_err) 122 123 /* This should never happen. */ 124 hlt 125 126L(continue_no_err): 127 /* Record the base of the current shadow stack. */ 128 movq 8(%rsp), %rax 129 movq %rax, %fs:SSP_BASE_OFFSET 130 add $24, %RSP_LP 131 132L(shadow_stack_bound_recorded): 133 /* If we unwind the stack, we can't undo stack unwinding. Just 134 save the target shadow stack pointer as the current shadow 135 stack pointer. */ 136 movq oSSP(%rdx), %rcx 137 movq %rcx, oSSP(%r9) 138 139 /* Save the base of the current shadow stack. */ 140 movq %fs:SSP_BASE_OFFSET, %rax 141 movq %rax, (oSSP + 8)(%r9) 142 143 /* If the base of the target shadow stack is the same as the 144 base of the current shadow stack, we unwind the shadow 145 stack. Otherwise it is a stack switch and we look for a 146 restore token. */ 147 movq oSSP(%rdx), %rsi 148 movq %rsi, %rdi 149 150 /* Get the base of the target shadow stack. */ 151 movq (oSSP + 8)(%rdx), %rcx 152 cmpq %fs:SSP_BASE_OFFSET, %rcx 153 je L(unwind_shadow_stack) 154 155L(find_restore_token_loop): 156 /* Look for a restore token. */ 157 movq -8(%rsi), %rax 158 andq $-8, %rax 159 cmpq %rsi, %rax 160 je L(restore_shadow_stack) 161 162 /* Try the next slot. */ 163 subq $8, %rsi 164 jmp L(find_restore_token_loop) 165 166L(restore_shadow_stack): 167 /* The target shadow stack will be restored. Save the current 168 shadow stack pointer. */ 169 rdsspq %rcx 170 movq %rcx, oSSP(%r9) 171 172 /* Restore the target shadow stack. */ 173 rstorssp -8(%rsi) 174 175 /* Save the restore token on the old shadow stack. NB: This 176 restore token may be checked by setcontext or swapcontext 177 later. */ 178 saveprevssp 179 180 /* Record the new shadow stack base that was switched to. */ 181 movq (oSSP + 8)(%rdx), %rax 182 movq %rax, %fs:SSP_BASE_OFFSET 183 184L(unwind_shadow_stack): 185 rdsspq %rcx 186 subq %rdi, %rcx 187 je L(skip_unwind_shadow_stack) 188 negq %rcx 189 shrq $3, %rcx 190 movl $255, %esi 191L(loop): 192 cmpq %rsi, %rcx 193 cmovb %rcx, %rsi 194 incsspq %rsi 195 subq %rsi, %rcx 196 ja L(loop) 197 198L(skip_unwind_shadow_stack): 199 /* Setup registers used for passing args. */ 200 movq oRDI(%rdx), %rdi 201 movq oRSI(%rdx), %rsi 202 movq oRCX(%rdx), %rcx 203 movq oR8(%rdx), %r8 204 movq oR9(%rdx), %r9 205 206 /* Get the return address set with getcontext. */ 207 movq oRIP(%rdx), %r10 208 209 /* Setup finally %rdx. */ 210 movq oRDX(%rdx), %rdx 211 212 /* Check if return address is valid for the case when setcontext 213 is invoked from __start_context with linked context. */ 214 rdsspq %rax 215 cmpq (%rax), %r10 216 /* Clear rax to indicate success. NB: Don't use xorl to keep 217 EFLAGS for jne. */ 218 movl $0, %eax 219 jne L(jmp) 220 /* Return to the new context if return address valid. */ 221 pushq %r10 222 ret 223 224L(jmp): 225 /* Jump to the new context directly. */ 226 jmp *%r10 227 228L(no_shstk): 229#endif 230 /* The following ret should return to the address set with 231 getcontext. Therefore push the address on the stack. */ 232 movq oRIP(%rdx), %rcx 233 pushq %rcx 234 235 /* Setup registers used for passing args. */ 236 movq oRDI(%rdx), %rdi 237 movq oRSI(%rdx), %rsi 238 movq oRCX(%rdx), %rcx 239 movq oR8(%rdx), %r8 240 movq oR9(%rdx), %r9 241 242 /* Setup finally %rdx. */ 243 movq oRDX(%rdx), %rdx 244 245 /* Clear rax to indicate success. */ 246 xorl %eax, %eax 247 ret 248PSEUDO_END(__swapcontext) 249 250weak_alias (__swapcontext, swapcontext) 251