1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #include <hyp/switch.h>
8 
9 #include <linux/arm-smccc.h>
10 #include <linux/kvm_host.h>
11 #include <linux/types.h>
12 #include <linux/jump_label.h>
13 #include <linux/percpu.h>
14 #include <uapi/linux/psci.h>
15 
16 #include <kvm/arm_psci.h>
17 
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
28 #include <asm/thread_info.h>
29 #include <asm/vectors.h>
30 
31 /* VHE specific context */
32 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
34 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
35 
__activate_traps(struct kvm_vcpu * vcpu)36 static void __activate_traps(struct kvm_vcpu *vcpu)
37 {
38 	u64 val;
39 
40 	___activate_traps(vcpu);
41 
42 	val = read_sysreg(cpacr_el1);
43 	val |= CPACR_EL1_TTA;
44 	val &= ~(CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN |
45 		 CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN);
46 
47 	/*
48 	 * With VHE (HCR.E2H == 1), accesses to CPACR_EL1 are routed to
49 	 * CPTR_EL2. In general, CPACR_EL1 has the same layout as CPTR_EL2,
50 	 * except for some missing controls, such as TAM.
51 	 * In this case, CPTR_EL2.TAM has the same position with or without
52 	 * VHE (HCR.E2H == 1) which allows us to use here the CPTR_EL2.TAM
53 	 * shift value for trapping the AMU accesses.
54 	 */
55 
56 	val |= CPTR_EL2_TAM;
57 
58 	if (guest_owns_fp_regs(vcpu)) {
59 		if (vcpu_has_sve(vcpu))
60 			val |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
61 	} else {
62 		val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
63 		__activate_traps_fpsimd32(vcpu);
64 	}
65 
66 	write_sysreg(val, cpacr_el1);
67 
68 	write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el1);
69 }
70 NOKPROBE_SYMBOL(__activate_traps);
71 
__deactivate_traps(struct kvm_vcpu * vcpu)72 static void __deactivate_traps(struct kvm_vcpu *vcpu)
73 {
74 	const char *host_vectors = vectors;
75 
76 	___deactivate_traps(vcpu);
77 
78 	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
79 
80 	/*
81 	 * ARM errata 1165522 and 1530923 require the actual execution of the
82 	 * above before we can switch to the EL2/EL0 translation regime used by
83 	 * the host.
84 	 */
85 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
86 
87 	write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
88 
89 	if (!arm64_kernel_unmapped_at_el0())
90 		host_vectors = __this_cpu_read(this_cpu_vector);
91 	write_sysreg(host_vectors, vbar_el1);
92 }
93 NOKPROBE_SYMBOL(__deactivate_traps);
94 
activate_traps_vhe_load(struct kvm_vcpu * vcpu)95 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
96 {
97 	__activate_traps_common(vcpu);
98 }
99 
deactivate_traps_vhe_put(struct kvm_vcpu * vcpu)100 void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu)
101 {
102 	__deactivate_traps_common(vcpu);
103 }
104 
105 static const exit_handler_fn hyp_exit_handlers[] = {
106 	[0 ... ESR_ELx_EC_MAX]		= NULL,
107 	[ESR_ELx_EC_CP15_32]		= kvm_hyp_handle_cp15_32,
108 	[ESR_ELx_EC_SYS64]		= kvm_hyp_handle_sysreg,
109 	[ESR_ELx_EC_SVE]		= kvm_hyp_handle_fpsimd,
110 	[ESR_ELx_EC_FP_ASIMD]		= kvm_hyp_handle_fpsimd,
111 	[ESR_ELx_EC_IABT_LOW]		= kvm_hyp_handle_iabt_low,
112 	[ESR_ELx_EC_DABT_LOW]		= kvm_hyp_handle_dabt_low,
113 	[ESR_ELx_EC_PAC]		= kvm_hyp_handle_ptrauth,
114 };
115 
kvm_get_exit_handler_array(struct kvm_vcpu * vcpu)116 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
117 {
118 	return hyp_exit_handlers;
119 }
120 
early_exit_filter(struct kvm_vcpu * vcpu,u64 * exit_code)121 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
122 {
123 }
124 
125 /* Switch to the guest for VHE systems running in EL2 */
__kvm_vcpu_run_vhe(struct kvm_vcpu * vcpu)126 static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
127 {
128 	struct kvm_cpu_context *host_ctxt;
129 	struct kvm_cpu_context *guest_ctxt;
130 	u64 exit_code;
131 
132 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
133 	host_ctxt->__hyp_running_vcpu = vcpu;
134 	guest_ctxt = &vcpu->arch.ctxt;
135 
136 	sysreg_save_host_state_vhe(host_ctxt);
137 
138 	/*
139 	 * ARM erratum 1165522 requires us to configure both stage 1 and
140 	 * stage 2 translation for the guest context before we clear
141 	 * HCR_EL2.TGE.
142 	 *
143 	 * We have already configured the guest's stage 1 translation in
144 	 * kvm_vcpu_load_sysregs_vhe above.  We must now call
145 	 * __load_stage2 before __activate_traps, because
146 	 * __load_stage2 configures stage 2 translation, and
147 	 * __activate_traps clear HCR_EL2.TGE (among other things).
148 	 */
149 	__load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
150 	__activate_traps(vcpu);
151 
152 	__kvm_adjust_pc(vcpu);
153 
154 	sysreg_restore_guest_state_vhe(guest_ctxt);
155 	__debug_switch_to_guest(vcpu);
156 
157 	do {
158 		/* Jump in the fire! */
159 		exit_code = __guest_enter(vcpu);
160 
161 		/* And we're baaack! */
162 	} while (fixup_guest_exit(vcpu, &exit_code));
163 
164 	sysreg_save_guest_state_vhe(guest_ctxt);
165 
166 	__deactivate_traps(vcpu);
167 
168 	sysreg_restore_host_state_vhe(host_ctxt);
169 
170 	if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
171 		__fpsimd_save_fpexc32(vcpu);
172 
173 	__debug_switch_to_host(vcpu);
174 
175 	return exit_code;
176 }
177 NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
178 
__kvm_vcpu_run(struct kvm_vcpu * vcpu)179 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
180 {
181 	int ret;
182 
183 	local_daif_mask();
184 
185 	/*
186 	 * Having IRQs masked via PMR when entering the guest means the GIC
187 	 * will not signal the CPU of interrupts of lower priority, and the
188 	 * only way to get out will be via guest exceptions.
189 	 * Naturally, we want to avoid this.
190 	 *
191 	 * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
192 	 * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
193 	 */
194 	pmr_sync();
195 
196 	ret = __kvm_vcpu_run_vhe(vcpu);
197 
198 	/*
199 	 * local_daif_restore() takes care to properly restore PSTATE.DAIF
200 	 * and the GIC PMR if the host is using IRQ priorities.
201 	 */
202 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
203 
204 	/*
205 	 * When we exit from the guest we change a number of CPU configuration
206 	 * parameters, such as traps.  Make sure these changes take effect
207 	 * before running the host or additional guests.
208 	 */
209 	isb();
210 
211 	return ret;
212 }
213 
__hyp_call_panic(u64 spsr,u64 elr,u64 par)214 static void __hyp_call_panic(u64 spsr, u64 elr, u64 par)
215 {
216 	struct kvm_cpu_context *host_ctxt;
217 	struct kvm_vcpu *vcpu;
218 
219 	host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
220 	vcpu = host_ctxt->__hyp_running_vcpu;
221 
222 	__deactivate_traps(vcpu);
223 	sysreg_restore_host_state_vhe(host_ctxt);
224 
225 	panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n",
226 	      spsr, elr,
227 	      read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
228 	      read_sysreg(hpfar_el2), par, vcpu);
229 }
230 NOKPROBE_SYMBOL(__hyp_call_panic);
231 
hyp_panic(void)232 void __noreturn hyp_panic(void)
233 {
234 	u64 spsr = read_sysreg_el2(SYS_SPSR);
235 	u64 elr = read_sysreg_el2(SYS_ELR);
236 	u64 par = read_sysreg_par();
237 
238 	__hyp_call_panic(spsr, elr, par);
239 	unreachable();
240 }
241 
kvm_unexpected_el2_exception(void)242 asmlinkage void kvm_unexpected_el2_exception(void)
243 {
244 	__kvm_unexpected_el2_exception();
245 }
246