1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012-2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #ifndef __ARM64_KVM_HYP_SYSREG_SR_H__
8 #define __ARM64_KVM_HYP_SYSREG_SR_H__
9
10 #include <linux/compiler.h>
11 #include <linux/kvm_host.h>
12
13 #include <asm/kprobes.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_emulate.h>
16 #include <asm/kvm_hyp.h>
17 #include <asm/kvm_mmu.h>
18
__sysreg_save_common_state(struct kvm_cpu_context * ctxt)19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
20 {
21 ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1);
22 }
23
__sysreg_save_user_state(struct kvm_cpu_context * ctxt)24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
25 {
26 ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
27 ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
28 }
29
ctxt_has_mte(struct kvm_cpu_context * ctxt)30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
31 {
32 struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu;
33
34 if (!vcpu)
35 vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
36
37 return kvm_has_mte(kern_hyp_va(vcpu->kvm));
38 }
39
__sysreg_save_el1_state(struct kvm_cpu_context * ctxt)40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
41 {
42 ctxt_sys_reg(ctxt, CSSELR_EL1) = read_sysreg(csselr_el1);
43 ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
44 ctxt_sys_reg(ctxt, CPACR_EL1) = read_sysreg_el1(SYS_CPACR);
45 ctxt_sys_reg(ctxt, TTBR0_EL1) = read_sysreg_el1(SYS_TTBR0);
46 ctxt_sys_reg(ctxt, TTBR1_EL1) = read_sysreg_el1(SYS_TTBR1);
47 ctxt_sys_reg(ctxt, TCR_EL1) = read_sysreg_el1(SYS_TCR);
48 ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR);
49 ctxt_sys_reg(ctxt, AFSR0_EL1) = read_sysreg_el1(SYS_AFSR0);
50 ctxt_sys_reg(ctxt, AFSR1_EL1) = read_sysreg_el1(SYS_AFSR1);
51 ctxt_sys_reg(ctxt, FAR_EL1) = read_sysreg_el1(SYS_FAR);
52 ctxt_sys_reg(ctxt, MAIR_EL1) = read_sysreg_el1(SYS_MAIR);
53 ctxt_sys_reg(ctxt, VBAR_EL1) = read_sysreg_el1(SYS_VBAR);
54 ctxt_sys_reg(ctxt, CONTEXTIDR_EL1) = read_sysreg_el1(SYS_CONTEXTIDR);
55 ctxt_sys_reg(ctxt, AMAIR_EL1) = read_sysreg_el1(SYS_AMAIR);
56 ctxt_sys_reg(ctxt, CNTKCTL_EL1) = read_sysreg_el1(SYS_CNTKCTL);
57 ctxt_sys_reg(ctxt, PAR_EL1) = read_sysreg_par();
58 ctxt_sys_reg(ctxt, TPIDR_EL1) = read_sysreg(tpidr_el1);
59
60 if (ctxt_has_mte(ctxt)) {
61 ctxt_sys_reg(ctxt, TFSR_EL1) = read_sysreg_el1(SYS_TFSR);
62 ctxt_sys_reg(ctxt, TFSRE0_EL1) = read_sysreg_s(SYS_TFSRE0_EL1);
63 }
64
65 ctxt_sys_reg(ctxt, SP_EL1) = read_sysreg(sp_el1);
66 ctxt_sys_reg(ctxt, ELR_EL1) = read_sysreg_el1(SYS_ELR);
67 ctxt_sys_reg(ctxt, SPSR_EL1) = read_sysreg_el1(SYS_SPSR);
68 }
69
__sysreg_save_el2_return_state(struct kvm_cpu_context * ctxt)70 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
71 {
72 ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
73 /*
74 * Guest PSTATE gets saved at guest fixup time in all
75 * cases. We still need to handle the nVHE host side here.
76 */
77 if (!has_vhe() && ctxt->__hyp_running_vcpu)
78 ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
79
80 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
81 ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
82 }
83
__sysreg_restore_common_state(struct kvm_cpu_context * ctxt)84 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
85 {
86 write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1), mdscr_el1);
87 }
88
__sysreg_restore_user_state(struct kvm_cpu_context * ctxt)89 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
90 {
91 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
92 write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
93 }
94
__sysreg_restore_el1_state(struct kvm_cpu_context * ctxt)95 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
96 {
97 write_sysreg(ctxt_sys_reg(ctxt, MPIDR_EL1), vmpidr_el2);
98 write_sysreg(ctxt_sys_reg(ctxt, CSSELR_EL1), csselr_el1);
99
100 if (has_vhe() ||
101 !cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
102 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
103 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
104 } else if (!ctxt->__hyp_running_vcpu) {
105 /*
106 * Must only be done for guest registers, hence the context
107 * test. We're coming from the host, so SCTLR.M is already
108 * set. Pairs with nVHE's __activate_traps().
109 */
110 write_sysreg_el1((ctxt_sys_reg(ctxt, TCR_EL1) |
111 TCR_EPD1_MASK | TCR_EPD0_MASK),
112 SYS_TCR);
113 isb();
114 }
115
116 write_sysreg_el1(ctxt_sys_reg(ctxt, CPACR_EL1), SYS_CPACR);
117 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR0_EL1), SYS_TTBR0);
118 write_sysreg_el1(ctxt_sys_reg(ctxt, TTBR1_EL1), SYS_TTBR1);
119 write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR);
120 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
121 write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR1_EL1), SYS_AFSR1);
122 write_sysreg_el1(ctxt_sys_reg(ctxt, FAR_EL1), SYS_FAR);
123 write_sysreg_el1(ctxt_sys_reg(ctxt, MAIR_EL1), SYS_MAIR);
124 write_sysreg_el1(ctxt_sys_reg(ctxt, VBAR_EL1), SYS_VBAR);
125 write_sysreg_el1(ctxt_sys_reg(ctxt, CONTEXTIDR_EL1), SYS_CONTEXTIDR);
126 write_sysreg_el1(ctxt_sys_reg(ctxt, AMAIR_EL1), SYS_AMAIR);
127 write_sysreg_el1(ctxt_sys_reg(ctxt, CNTKCTL_EL1), SYS_CNTKCTL);
128 write_sysreg(ctxt_sys_reg(ctxt, PAR_EL1), par_el1);
129 write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL1), tpidr_el1);
130
131 if (ctxt_has_mte(ctxt)) {
132 write_sysreg_el1(ctxt_sys_reg(ctxt, TFSR_EL1), SYS_TFSR);
133 write_sysreg_s(ctxt_sys_reg(ctxt, TFSRE0_EL1), SYS_TFSRE0_EL1);
134 }
135
136 if (!has_vhe() &&
137 cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT) &&
138 ctxt->__hyp_running_vcpu) {
139 /*
140 * Must only be done for host registers, hence the context
141 * test. Pairs with nVHE's __deactivate_traps().
142 */
143 isb();
144 /*
145 * At this stage, and thanks to the above isb(), S2 is
146 * deconfigured and disabled. We can now restore the host's
147 * S1 configuration: SCTLR, and only then TCR.
148 */
149 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
150 isb();
151 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
152 }
153
154 write_sysreg(ctxt_sys_reg(ctxt, SP_EL1), sp_el1);
155 write_sysreg_el1(ctxt_sys_reg(ctxt, ELR_EL1), SYS_ELR);
156 write_sysreg_el1(ctxt_sys_reg(ctxt, SPSR_EL1), SYS_SPSR);
157 }
158
__sysreg_restore_el2_return_state(struct kvm_cpu_context * ctxt)159 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt)
160 {
161 u64 pstate = ctxt->regs.pstate;
162 u64 mode = pstate & PSR_AA32_MODE_MASK;
163
164 /*
165 * Safety check to ensure we're setting the CPU up to enter the guest
166 * in a less privileged mode.
167 *
168 * If we are attempting a return to EL2 or higher in AArch64 state,
169 * program SPSR_EL2 with M=EL2h and the IL bit set which ensures that
170 * we'll take an illegal exception state exception immediately after
171 * the ERET to the guest. Attempts to return to AArch32 Hyp will
172 * result in an illegal exception return because EL2's execution state
173 * is determined by SCR_EL3.RW.
174 */
175 if (!(mode & PSR_MODE32_BIT) && mode >= PSR_MODE_EL2t)
176 pstate = PSR_MODE_EL2h | PSR_IL_BIT;
177
178 write_sysreg_el2(ctxt->regs.pc, SYS_ELR);
179 write_sysreg_el2(pstate, SYS_SPSR);
180
181 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
182 write_sysreg_s(ctxt_sys_reg(ctxt, DISR_EL1), SYS_VDISR_EL2);
183 }
184
__sysreg32_save_state(struct kvm_vcpu * vcpu)185 static inline void __sysreg32_save_state(struct kvm_vcpu *vcpu)
186 {
187 if (!vcpu_el1_is_32bit(vcpu))
188 return;
189
190 vcpu->arch.ctxt.spsr_abt = read_sysreg(spsr_abt);
191 vcpu->arch.ctxt.spsr_und = read_sysreg(spsr_und);
192 vcpu->arch.ctxt.spsr_irq = read_sysreg(spsr_irq);
193 vcpu->arch.ctxt.spsr_fiq = read_sysreg(spsr_fiq);
194
195 __vcpu_sys_reg(vcpu, DACR32_EL2) = read_sysreg(dacr32_el2);
196 __vcpu_sys_reg(vcpu, IFSR32_EL2) = read_sysreg(ifsr32_el2);
197
198 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
199 __vcpu_sys_reg(vcpu, DBGVCR32_EL2) = read_sysreg(dbgvcr32_el2);
200 }
201
__sysreg32_restore_state(struct kvm_vcpu * vcpu)202 static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu)
203 {
204 if (!vcpu_el1_is_32bit(vcpu))
205 return;
206
207 write_sysreg(vcpu->arch.ctxt.spsr_abt, spsr_abt);
208 write_sysreg(vcpu->arch.ctxt.spsr_und, spsr_und);
209 write_sysreg(vcpu->arch.ctxt.spsr_irq, spsr_irq);
210 write_sysreg(vcpu->arch.ctxt.spsr_fiq, spsr_fiq);
211
212 write_sysreg(__vcpu_sys_reg(vcpu, DACR32_EL2), dacr32_el2);
213 write_sysreg(__vcpu_sys_reg(vcpu, IFSR32_EL2), ifsr32_el2);
214
215 if (has_vhe() || vcpu_get_flag(vcpu, DEBUG_DIRTY))
216 write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2);
217 }
218
219 #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */
220