1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13
14 #include <linux/kvm_host.h>
15
16 #include <asm/debug-monitors.h>
17 #include <asm/esr.h>
18 #include <asm/kvm_arm.h>
19 #include <asm/kvm_hyp.h>
20 #include <asm/ptrace.h>
21 #include <asm/cputype.h>
22 #include <asm/virt.h>
23
24 #define CURRENT_EL_SP_EL0_VECTOR 0x0
25 #define CURRENT_EL_SP_ELx_VECTOR 0x200
26 #define LOWER_EL_AArch64_VECTOR 0x400
27 #define LOWER_EL_AArch32_VECTOR 0x600
28
29 enum exception_type {
30 except_type_sync = 0,
31 except_type_irq = 0x80,
32 except_type_fiq = 0x100,
33 except_type_serror = 0x180,
34 };
35
36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
38
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
43 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
44
45 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
46
47 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)48 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
49 {
50 return !(vcpu->arch.hcr_el2 & HCR_RW);
51 }
52 #else
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)53 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
54 {
55 struct kvm *kvm = vcpu->kvm;
56
57 WARN_ON_ONCE(!test_bit(KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED,
58 &kvm->arch.flags));
59
60 return test_bit(KVM_ARCH_FLAG_EL1_32BIT, &kvm->arch.flags);
61 }
62 #endif
63
vcpu_reset_hcr(struct kvm_vcpu * vcpu)64 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
65 {
66 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
67 if (is_kernel_in_hyp_mode())
68 vcpu->arch.hcr_el2 |= HCR_E2H;
69 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
70 /* route synchronous external abort exceptions to EL2 */
71 vcpu->arch.hcr_el2 |= HCR_TEA;
72 /* trap error record accesses */
73 vcpu->arch.hcr_el2 |= HCR_TERR;
74 }
75
76 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
77 vcpu->arch.hcr_el2 |= HCR_FWB;
78 } else {
79 /*
80 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
81 * get set in SCTLR_EL1 such that we can detect when the guest
82 * MMU gets turned on and do the necessary cache maintenance
83 * then.
84 */
85 vcpu->arch.hcr_el2 |= HCR_TVM;
86 }
87
88 if (vcpu_el1_is_32bit(vcpu))
89 vcpu->arch.hcr_el2 &= ~HCR_RW;
90
91 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
92 vcpu_el1_is_32bit(vcpu))
93 vcpu->arch.hcr_el2 |= HCR_TID2;
94
95 if (kvm_has_mte(vcpu->kvm))
96 vcpu->arch.hcr_el2 |= HCR_ATA;
97 }
98
vcpu_hcr(struct kvm_vcpu * vcpu)99 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
100 {
101 return (unsigned long *)&vcpu->arch.hcr_el2;
102 }
103
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)104 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
105 {
106 vcpu->arch.hcr_el2 &= ~HCR_TWE;
107 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
108 vcpu->kvm->arch.vgic.nassgireq)
109 vcpu->arch.hcr_el2 &= ~HCR_TWI;
110 else
111 vcpu->arch.hcr_el2 |= HCR_TWI;
112 }
113
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)114 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
115 {
116 vcpu->arch.hcr_el2 |= HCR_TWE;
117 vcpu->arch.hcr_el2 |= HCR_TWI;
118 }
119
vcpu_ptrauth_enable(struct kvm_vcpu * vcpu)120 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
121 {
122 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
123 }
124
vcpu_ptrauth_disable(struct kvm_vcpu * vcpu)125 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
126 {
127 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
128 }
129
vcpu_get_vsesr(struct kvm_vcpu * vcpu)130 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
131 {
132 return vcpu->arch.vsesr_el2;
133 }
134
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)135 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
136 {
137 vcpu->arch.vsesr_el2 = vsesr;
138 }
139
vcpu_pc(const struct kvm_vcpu * vcpu)140 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
141 {
142 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
143 }
144
vcpu_cpsr(const struct kvm_vcpu * vcpu)145 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
146 {
147 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
148 }
149
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)150 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
151 {
152 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
153 }
154
kvm_condition_valid(const struct kvm_vcpu * vcpu)155 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
156 {
157 if (vcpu_mode_is_32bit(vcpu))
158 return kvm_condition_valid32(vcpu);
159
160 return true;
161 }
162
vcpu_set_thumb(struct kvm_vcpu * vcpu)163 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
164 {
165 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
166 }
167
168 /*
169 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
170 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
171 * AArch32 with banked registers.
172 */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)173 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
174 u8 reg_num)
175 {
176 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
177 }
178
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)179 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
180 unsigned long val)
181 {
182 if (reg_num != 31)
183 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
184 }
185
186 /*
187 * The layout of SPSR for an AArch32 state is different when observed from an
188 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
189 * view given an AArch64 view.
190 *
191 * In ARM DDI 0487E.a see:
192 *
193 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
194 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
195 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
196 *
197 * Which show the following differences:
198 *
199 * | Bit | AA64 | AA32 | Notes |
200 * +-----+------+------+-----------------------------|
201 * | 24 | DIT | J | J is RES0 in ARMv8 |
202 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
203 *
204 * ... and all other bits are (currently) common.
205 */
host_spsr_to_spsr32(unsigned long spsr)206 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
207 {
208 const unsigned long overlap = BIT(24) | BIT(21);
209 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
210
211 spsr &= ~overlap;
212
213 spsr |= dit << 21;
214
215 return spsr;
216 }
217
vcpu_mode_priv(const struct kvm_vcpu * vcpu)218 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
219 {
220 u32 mode;
221
222 if (vcpu_mode_is_32bit(vcpu)) {
223 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
224 return mode > PSR_AA32_MODE_USR;
225 }
226
227 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
228
229 return mode != PSR_MODE_EL0t;
230 }
231
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)232 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
233 {
234 return vcpu->arch.fault.esr_el2;
235 }
236
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)237 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
238 {
239 u64 esr = kvm_vcpu_get_esr(vcpu);
240
241 if (esr & ESR_ELx_CV)
242 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
243
244 return -1;
245 }
246
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)247 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
248 {
249 return vcpu->arch.fault.far_el2;
250 }
251
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)252 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
253 {
254 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
255 }
256
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)257 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
258 {
259 return vcpu->arch.fault.disr_el1;
260 }
261
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)262 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
263 {
264 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
265 }
266
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)267 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
268 {
269 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
270 }
271
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)272 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
273 {
274 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
275 }
276
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)277 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
278 {
279 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
280 }
281
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)282 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
283 {
284 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
285 }
286
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)287 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
288 {
289 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
290 }
291
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)292 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
293 {
294 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
295 }
296
297 /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)298 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
299 {
300 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
301 }
302
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)303 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
304 {
305 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
306 }
307
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)308 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
309 {
310 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
311 }
312
313 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)314 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
315 {
316 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
317 }
318
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)319 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
320 {
321 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
322 }
323
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)324 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
325 {
326 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
327 }
328
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)329 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
330 {
331 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
332 }
333
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)334 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
335 {
336 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
337 }
338
kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu * vcpu)339 static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
340 {
341 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
342 }
343
kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu * vcpu)344 static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
345 {
346 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
347 }
348
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)349 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
350 {
351 switch (kvm_vcpu_trap_get_fault(vcpu)) {
352 case FSC_SEA:
353 case FSC_SEA_TTW0:
354 case FSC_SEA_TTW1:
355 case FSC_SEA_TTW2:
356 case FSC_SEA_TTW3:
357 case FSC_SECC:
358 case FSC_SECC_TTW0:
359 case FSC_SECC_TTW1:
360 case FSC_SECC_TTW2:
361 case FSC_SECC_TTW3:
362 return true;
363 default:
364 return false;
365 }
366 }
367
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)368 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
369 {
370 u64 esr = kvm_vcpu_get_esr(vcpu);
371 return ESR_ELx_SYS64_ISS_RT(esr);
372 }
373
kvm_is_write_fault(struct kvm_vcpu * vcpu)374 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
375 {
376 if (kvm_vcpu_abt_iss1tw(vcpu))
377 return true;
378
379 if (kvm_vcpu_trap_is_iabt(vcpu))
380 return false;
381
382 return kvm_vcpu_dabt_iswrite(vcpu);
383 }
384
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)385 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
386 {
387 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
388 }
389
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)390 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
391 {
392 if (vcpu_mode_is_32bit(vcpu)) {
393 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
394 } else {
395 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
396 sctlr |= SCTLR_ELx_EE;
397 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
398 }
399 }
400
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)401 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
402 {
403 if (vcpu_mode_is_32bit(vcpu))
404 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
405
406 if (vcpu_mode_priv(vcpu))
407 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
408 else
409 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
410 }
411
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)412 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
413 unsigned long data,
414 unsigned int len)
415 {
416 if (kvm_vcpu_is_be(vcpu)) {
417 switch (len) {
418 case 1:
419 return data & 0xff;
420 case 2:
421 return be16_to_cpu(data & 0xffff);
422 case 4:
423 return be32_to_cpu(data & 0xffffffff);
424 default:
425 return be64_to_cpu(data);
426 }
427 } else {
428 switch (len) {
429 case 1:
430 return data & 0xff;
431 case 2:
432 return le16_to_cpu(data & 0xffff);
433 case 4:
434 return le32_to_cpu(data & 0xffffffff);
435 default:
436 return le64_to_cpu(data);
437 }
438 }
439
440 return data; /* Leave LE untouched */
441 }
442
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)443 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
444 unsigned long data,
445 unsigned int len)
446 {
447 if (kvm_vcpu_is_be(vcpu)) {
448 switch (len) {
449 case 1:
450 return data & 0xff;
451 case 2:
452 return cpu_to_be16(data & 0xffff);
453 case 4:
454 return cpu_to_be32(data & 0xffffffff);
455 default:
456 return cpu_to_be64(data);
457 }
458 } else {
459 switch (len) {
460 case 1:
461 return data & 0xff;
462 case 2:
463 return cpu_to_le16(data & 0xffff);
464 case 4:
465 return cpu_to_le32(data & 0xffffffff);
466 default:
467 return cpu_to_le64(data);
468 }
469 }
470
471 return data; /* Leave LE untouched */
472 }
473
kvm_incr_pc(struct kvm_vcpu * vcpu)474 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
475 {
476 vcpu->arch.flags |= KVM_ARM64_INCREMENT_PC;
477 }
478
vcpu_has_feature(struct kvm_vcpu * vcpu,int feature)479 static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
480 {
481 return test_bit(feature, vcpu->arch.features);
482 }
483
484 #endif /* __ARM64_KVM_EMULATE_H__ */
485