1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/kvm/handle_exit.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13
14 #include <asm/esr.h>
15 #include <asm/exception.h>
16 #include <asm/kvm_asm.h>
17 #include <asm/kvm_emulate.h>
18 #include <asm/kvm_mmu.h>
19 #include <asm/debug-monitors.h>
20 #include <asm/traps.h>
21
22 #include <kvm/arm_hypercalls.h>
23
24 #define CREATE_TRACE_POINTS
25 #include "trace_handle_exit.h"
26
27 typedef int (*exit_handle_fn)(struct kvm_vcpu *);
28
kvm_handle_guest_serror(struct kvm_vcpu * vcpu,u64 esr)29 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
30 {
31 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(NULL, esr))
32 kvm_inject_vabt(vcpu);
33 }
34
handle_hvc(struct kvm_vcpu * vcpu)35 static int handle_hvc(struct kvm_vcpu *vcpu)
36 {
37 int ret;
38
39 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0),
40 kvm_vcpu_hvc_get_imm(vcpu));
41 vcpu->stat.hvc_exit_stat++;
42
43 ret = kvm_hvc_call_handler(vcpu);
44 if (ret < 0) {
45 vcpu_set_reg(vcpu, 0, ~0UL);
46 return 1;
47 }
48
49 return ret;
50 }
51
handle_smc(struct kvm_vcpu * vcpu)52 static int handle_smc(struct kvm_vcpu *vcpu)
53 {
54 /*
55 * "If an SMC instruction executed at Non-secure EL1 is
56 * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
57 * Trap exception, not a Secure Monitor Call exception [...]"
58 *
59 * We need to advance the PC after the trap, as it would
60 * otherwise return to the same address...
61 */
62 vcpu_set_reg(vcpu, 0, ~0UL);
63 kvm_incr_pc(vcpu);
64 return 1;
65 }
66
67 /*
68 * Guest access to FP/ASIMD registers are routed to this handler only
69 * when the system doesn't support FP/ASIMD.
70 */
handle_no_fpsimd(struct kvm_vcpu * vcpu)71 static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
72 {
73 kvm_inject_undefined(vcpu);
74 return 1;
75 }
76
77 /**
78 * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
79 * instruction executed by a guest
80 *
81 * @vcpu: the vcpu pointer
82 *
83 * WFE[T]: Yield the CPU and come back to this vcpu when the scheduler
84 * decides to.
85 * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
86 * world-switches and schedule other host processes until there is an
87 * incoming IRQ or FIQ to the VM.
88 * WFIT: Same as WFI, with a timed wakeup implemented as a background timer
89 *
90 * WF{I,E}T can immediately return if the deadline has already expired.
91 */
kvm_handle_wfx(struct kvm_vcpu * vcpu)92 static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
93 {
94 u64 esr = kvm_vcpu_get_esr(vcpu);
95
96 if (esr & ESR_ELx_WFx_ISS_WFE) {
97 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
98 vcpu->stat.wfe_exit_stat++;
99 } else {
100 trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
101 vcpu->stat.wfi_exit_stat++;
102 }
103
104 if (esr & ESR_ELx_WFx_ISS_WFxT) {
105 if (esr & ESR_ELx_WFx_ISS_RV) {
106 u64 val, now;
107
108 now = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_TIMER_CNT);
109 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu));
110
111 if (now >= val)
112 goto out;
113 } else {
114 /* Treat WFxT as WFx if RN is invalid */
115 esr &= ~ESR_ELx_WFx_ISS_WFxT;
116 }
117 }
118
119 if (esr & ESR_ELx_WFx_ISS_WFE) {
120 kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
121 } else {
122 if (esr & ESR_ELx_WFx_ISS_WFxT)
123 vcpu->arch.flags |= KVM_ARM64_WFIT;
124
125 kvm_vcpu_wfi(vcpu);
126 }
127 out:
128 kvm_incr_pc(vcpu);
129
130 return 1;
131 }
132
133 /**
134 * kvm_handle_guest_debug - handle a debug exception instruction
135 *
136 * @vcpu: the vcpu pointer
137 *
138 * We route all debug exceptions through the same handler. If both the
139 * guest and host are using the same debug facilities it will be up to
140 * userspace to re-inject the correct exception for guest delivery.
141 *
142 * @return: 0 (while setting vcpu->run->exit_reason)
143 */
kvm_handle_guest_debug(struct kvm_vcpu * vcpu)144 static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu)
145 {
146 struct kvm_run *run = vcpu->run;
147 u64 esr = kvm_vcpu_get_esr(vcpu);
148
149 run->exit_reason = KVM_EXIT_DEBUG;
150 run->debug.arch.hsr = lower_32_bits(esr);
151 run->debug.arch.hsr_high = upper_32_bits(esr);
152 run->flags = KVM_DEBUG_ARCH_HSR_HIGH_VALID;
153
154 if (ESR_ELx_EC(esr) == ESR_ELx_EC_WATCHPT_LOW)
155 run->debug.arch.far = vcpu->arch.fault.far_el2;
156
157 return 0;
158 }
159
kvm_handle_unknown_ec(struct kvm_vcpu * vcpu)160 static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu)
161 {
162 u64 esr = kvm_vcpu_get_esr(vcpu);
163
164 kvm_pr_unimpl("Unknown exception class: esr: %#016llx -- %s\n",
165 esr, esr_get_class_string(esr));
166
167 kvm_inject_undefined(vcpu);
168 return 1;
169 }
170
171 /*
172 * Guest access to SVE registers should be routed to this handler only
173 * when the system doesn't support SVE.
174 */
handle_sve(struct kvm_vcpu * vcpu)175 static int handle_sve(struct kvm_vcpu *vcpu)
176 {
177 kvm_inject_undefined(vcpu);
178 return 1;
179 }
180
181 /*
182 * Guest usage of a ptrauth instruction (which the guest EL1 did not turn into
183 * a NOP). If we get here, it is that we didn't fixup ptrauth on exit, and all
184 * that we can do is give the guest an UNDEF.
185 */
kvm_handle_ptrauth(struct kvm_vcpu * vcpu)186 static int kvm_handle_ptrauth(struct kvm_vcpu *vcpu)
187 {
188 kvm_inject_undefined(vcpu);
189 return 1;
190 }
191
192 static exit_handle_fn arm_exit_handlers[] = {
193 [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
194 [ESR_ELx_EC_WFx] = kvm_handle_wfx,
195 [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
196 [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
197 [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
198 [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
199 [ESR_ELx_EC_CP10_ID] = kvm_handle_cp10_id,
200 [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
201 [ESR_ELx_EC_HVC32] = handle_hvc,
202 [ESR_ELx_EC_SMC32] = handle_smc,
203 [ESR_ELx_EC_HVC64] = handle_hvc,
204 [ESR_ELx_EC_SMC64] = handle_smc,
205 [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
206 [ESR_ELx_EC_SVE] = handle_sve,
207 [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
208 [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
209 [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
210 [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
211 [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
212 [ESR_ELx_EC_BKPT32] = kvm_handle_guest_debug,
213 [ESR_ELx_EC_BRK64] = kvm_handle_guest_debug,
214 [ESR_ELx_EC_FP_ASIMD] = handle_no_fpsimd,
215 [ESR_ELx_EC_PAC] = kvm_handle_ptrauth,
216 };
217
kvm_get_exit_handler(struct kvm_vcpu * vcpu)218 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
219 {
220 u64 esr = kvm_vcpu_get_esr(vcpu);
221 u8 esr_ec = ESR_ELx_EC(esr);
222
223 return arm_exit_handlers[esr_ec];
224 }
225
226 /*
227 * We may be single-stepping an emulated instruction. If the emulation
228 * has been completed in the kernel, we can return to userspace with a
229 * KVM_EXIT_DEBUG, otherwise userspace needs to complete its
230 * emulation first.
231 */
handle_trap_exceptions(struct kvm_vcpu * vcpu)232 static int handle_trap_exceptions(struct kvm_vcpu *vcpu)
233 {
234 int handled;
235
236 /*
237 * See ARM ARM B1.14.1: "Hyp traps on instructions
238 * that fail their condition code check"
239 */
240 if (!kvm_condition_valid(vcpu)) {
241 kvm_incr_pc(vcpu);
242 handled = 1;
243 } else {
244 exit_handle_fn exit_handler;
245
246 exit_handler = kvm_get_exit_handler(vcpu);
247 handled = exit_handler(vcpu);
248 }
249
250 return handled;
251 }
252
253 /*
254 * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
255 * proper exit to userspace.
256 */
handle_exit(struct kvm_vcpu * vcpu,int exception_index)257 int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
258 {
259 struct kvm_run *run = vcpu->run;
260
261 if (ARM_SERROR_PENDING(exception_index)) {
262 /*
263 * The SError is handled by handle_exit_early(). If the guest
264 * survives it will re-execute the original instruction.
265 */
266 return 1;
267 }
268
269 exception_index = ARM_EXCEPTION_CODE(exception_index);
270
271 switch (exception_index) {
272 case ARM_EXCEPTION_IRQ:
273 return 1;
274 case ARM_EXCEPTION_EL1_SERROR:
275 return 1;
276 case ARM_EXCEPTION_TRAP:
277 return handle_trap_exceptions(vcpu);
278 case ARM_EXCEPTION_HYP_GONE:
279 /*
280 * EL2 has been reset to the hyp-stub. This happens when a guest
281 * is pre-emptied by kvm_reboot()'s shutdown call.
282 */
283 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
284 return 0;
285 case ARM_EXCEPTION_IL:
286 /*
287 * We attempted an illegal exception return. Guest state must
288 * have been corrupted somehow. Give up.
289 */
290 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
291 return -EINVAL;
292 default:
293 kvm_pr_unimpl("Unsupported exception type: %d",
294 exception_index);
295 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
296 return 0;
297 }
298 }
299
300 /* For exit types that need handling before we can be preempted */
handle_exit_early(struct kvm_vcpu * vcpu,int exception_index)301 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index)
302 {
303 if (ARM_SERROR_PENDING(exception_index)) {
304 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
305 u64 disr = kvm_vcpu_get_disr(vcpu);
306
307 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
308 } else {
309 kvm_inject_vabt(vcpu);
310 }
311
312 return;
313 }
314
315 exception_index = ARM_EXCEPTION_CODE(exception_index);
316
317 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
318 kvm_handle_guest_serror(vcpu, kvm_vcpu_get_esr(vcpu));
319 }
320
nvhe_hyp_panic_handler(u64 esr,u64 spsr,u64 elr_virt,u64 elr_phys,u64 par,uintptr_t vcpu,u64 far,u64 hpfar)321 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr,
322 u64 elr_virt, u64 elr_phys,
323 u64 par, uintptr_t vcpu,
324 u64 far, u64 hpfar) {
325 u64 elr_in_kimg = __phys_to_kimg(elr_phys);
326 u64 hyp_offset = elr_in_kimg - kaslr_offset() - elr_virt;
327 u64 mode = spsr & PSR_MODE_MASK;
328 u64 panic_addr = elr_virt + hyp_offset;
329
330 if (mode != PSR_MODE_EL2t && mode != PSR_MODE_EL2h) {
331 kvm_err("Invalid host exception to nVHE hyp!\n");
332 } else if (ESR_ELx_EC(esr) == ESR_ELx_EC_BRK64 &&
333 (esr & ESR_ELx_BRK64_ISS_COMMENT_MASK) == BUG_BRK_IMM) {
334 const char *file = NULL;
335 unsigned int line = 0;
336
337 /* All hyp bugs, including warnings, are treated as fatal. */
338 if (!is_protected_kvm_enabled() ||
339 IS_ENABLED(CONFIG_NVHE_EL2_DEBUG)) {
340 struct bug_entry *bug = find_bug(elr_in_kimg);
341
342 if (bug)
343 bug_get_file_line(bug, &file, &line);
344 }
345
346 if (file)
347 kvm_err("nVHE hyp BUG at: %s:%u!\n", file, line);
348 else
349 kvm_err("nVHE hyp BUG at: [<%016llx>] %pB!\n", panic_addr,
350 (void *)(panic_addr + kaslr_offset()));
351 } else {
352 kvm_err("nVHE hyp panic at: [<%016llx>] %pB!\n", panic_addr,
353 (void *)(panic_addr + kaslr_offset()));
354 }
355
356 /*
357 * Hyp has panicked and we're going to handle that by panicking the
358 * kernel. The kernel offset will be revealed in the panic so we're
359 * also safe to reveal the hyp offset as a debugging aid for translating
360 * hyp VAs to vmlinux addresses.
361 */
362 kvm_err("Hyp Offset: 0x%llx\n", hyp_offset);
363
364 panic("HYP panic:\nPS:%08llx PC:%016llx ESR:%016llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%016lx\n",
365 spsr, elr_virt, esr, far, hpfar, par, vcpu);
366 }
367