1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/asm/kvm_host.h:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 */
10
11 #ifndef __ARM64_KVM_HOST_H__
12 #define __ARM64_KVM_HOST_H__
13
14 #include <linux/arm-smccc.h>
15 #include <linux/bitmap.h>
16 #include <linux/types.h>
17 #include <linux/jump_label.h>
18 #include <linux/kvm_types.h>
19 #include <linux/percpu.h>
20 #include <linux/psci.h>
21 #include <asm/arch_gicv3.h>
22 #include <asm/barrier.h>
23 #include <asm/cpufeature.h>
24 #include <asm/cputype.h>
25 #include <asm/daifflags.h>
26 #include <asm/fpsimd.h>
27 #include <asm/kvm.h>
28 #include <asm/kvm_asm.h>
29
30 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
31
32 #define KVM_HALT_POLL_NS_DEFAULT 500000
33
34 #include <kvm/arm_vgic.h>
35 #include <kvm/arm_arch_timer.h>
36 #include <kvm/arm_pmu.h>
37
38 #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
39
40 #define KVM_VCPU_MAX_FEATURES 7
41
42 #define KVM_REQ_SLEEP \
43 KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
44 #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
45 #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
46 #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
47 #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
48 #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
49 #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
50
51 #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
52 KVM_DIRTY_LOG_INITIALLY_SET)
53
54 #define KVM_HAVE_MMU_RWLOCK
55
56 /*
57 * Mode of operation configurable with kvm-arm.mode early param.
58 * See Documentation/admin-guide/kernel-parameters.txt for more information.
59 */
60 enum kvm_mode {
61 KVM_MODE_DEFAULT,
62 KVM_MODE_PROTECTED,
63 KVM_MODE_NONE,
64 };
65 enum kvm_mode kvm_get_mode(void);
66
67 DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
68
69 extern unsigned int kvm_sve_max_vl;
70 int kvm_arm_init_sve(void);
71
72 u32 __attribute_const__ kvm_target_cpu(void);
73 int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
74 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
75
76 struct kvm_vmid {
77 atomic64_t id;
78 };
79
80 struct kvm_s2_mmu {
81 struct kvm_vmid vmid;
82
83 /*
84 * stage2 entry level table
85 *
86 * Two kvm_s2_mmu structures in the same VM can point to the same
87 * pgd here. This happens when running a guest using a
88 * translation regime that isn't affected by its own stage-2
89 * translation, such as a non-VHE hypervisor running at vEL2, or
90 * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
91 * canonical stage-2 page tables.
92 */
93 phys_addr_t pgd_phys;
94 struct kvm_pgtable *pgt;
95
96 /* The last vcpu id that ran on each physical CPU */
97 int __percpu *last_vcpu_ran;
98
99 struct kvm_arch *arch;
100 };
101
102 struct kvm_arch_memory_slot {
103 };
104
105 /**
106 * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
107 *
108 * @std_bmap: Bitmap of standard secure service calls
109 * @std_hyp_bmap: Bitmap of standard hypervisor service calls
110 * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
111 */
112 struct kvm_smccc_features {
113 unsigned long std_bmap;
114 unsigned long std_hyp_bmap;
115 unsigned long vendor_hyp_bmap;
116 };
117
118 struct kvm_arch {
119 struct kvm_s2_mmu mmu;
120
121 /* VTCR_EL2 value for this VM */
122 u64 vtcr;
123
124 /* Interrupt controller */
125 struct vgic_dist vgic;
126
127 /* Mandated version of PSCI */
128 u32 psci_version;
129
130 /*
131 * If we encounter a data abort without valid instruction syndrome
132 * information, report this to user space. User space can (and
133 * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
134 * supported.
135 */
136 #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
137 /* Memory Tagging Extension enabled for the guest */
138 #define KVM_ARCH_FLAG_MTE_ENABLED 1
139 /* At least one vCPU has ran in the VM */
140 #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
141 /*
142 * The following two bits are used to indicate the guest's EL1
143 * register width configuration. A value of KVM_ARCH_FLAG_EL1_32BIT
144 * bit is valid only when KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED is set.
145 * Otherwise, the guest's EL1 register width has not yet been
146 * determined yet.
147 */
148 #define KVM_ARCH_FLAG_REG_WIDTH_CONFIGURED 3
149 #define KVM_ARCH_FLAG_EL1_32BIT 4
150 /* PSCI SYSTEM_SUSPEND enabled for the guest */
151 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 5
152
153 unsigned long flags;
154
155 /*
156 * VM-wide PMU filter, implemented as a bitmap and big enough for
157 * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
158 */
159 unsigned long *pmu_filter;
160 struct arm_pmu *arm_pmu;
161
162 cpumask_var_t supported_cpus;
163
164 u8 pfr0_csv2;
165 u8 pfr0_csv3;
166
167 /* Hypercall features firmware registers' descriptor */
168 struct kvm_smccc_features smccc_feat;
169 };
170
171 struct kvm_vcpu_fault_info {
172 u64 esr_el2; /* Hyp Syndrom Register */
173 u64 far_el2; /* Hyp Fault Address Register */
174 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
175 u64 disr_el1; /* Deferred [SError] Status Register */
176 };
177
178 enum vcpu_sysreg {
179 __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
180 MPIDR_EL1, /* MultiProcessor Affinity Register */
181 CSSELR_EL1, /* Cache Size Selection Register */
182 SCTLR_EL1, /* System Control Register */
183 ACTLR_EL1, /* Auxiliary Control Register */
184 CPACR_EL1, /* Coprocessor Access Control */
185 ZCR_EL1, /* SVE Control */
186 TTBR0_EL1, /* Translation Table Base Register 0 */
187 TTBR1_EL1, /* Translation Table Base Register 1 */
188 TCR_EL1, /* Translation Control Register */
189 ESR_EL1, /* Exception Syndrome Register */
190 AFSR0_EL1, /* Auxiliary Fault Status Register 0 */
191 AFSR1_EL1, /* Auxiliary Fault Status Register 1 */
192 FAR_EL1, /* Fault Address Register */
193 MAIR_EL1, /* Memory Attribute Indirection Register */
194 VBAR_EL1, /* Vector Base Address Register */
195 CONTEXTIDR_EL1, /* Context ID Register */
196 TPIDR_EL0, /* Thread ID, User R/W */
197 TPIDRRO_EL0, /* Thread ID, User R/O */
198 TPIDR_EL1, /* Thread ID, Privileged */
199 AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
200 CNTKCTL_EL1, /* Timer Control Register (EL1) */
201 PAR_EL1, /* Physical Address Register */
202 MDSCR_EL1, /* Monitor Debug System Control Register */
203 MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
204 OSLSR_EL1, /* OS Lock Status Register */
205 DISR_EL1, /* Deferred Interrupt Status Register */
206
207 /* Performance Monitors Registers */
208 PMCR_EL0, /* Control Register */
209 PMSELR_EL0, /* Event Counter Selection Register */
210 PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
211 PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
212 PMCCNTR_EL0, /* Cycle Counter Register */
213 PMEVTYPER0_EL0, /* Event Type Register (0-30) */
214 PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
215 PMCCFILTR_EL0, /* Cycle Count Filter Register */
216 PMCNTENSET_EL0, /* Count Enable Set Register */
217 PMINTENSET_EL1, /* Interrupt Enable Set Register */
218 PMOVSSET_EL0, /* Overflow Flag Status Set Register */
219 PMUSERENR_EL0, /* User Enable Register */
220
221 /* Pointer Authentication Registers in a strict increasing order. */
222 APIAKEYLO_EL1,
223 APIAKEYHI_EL1,
224 APIBKEYLO_EL1,
225 APIBKEYHI_EL1,
226 APDAKEYLO_EL1,
227 APDAKEYHI_EL1,
228 APDBKEYLO_EL1,
229 APDBKEYHI_EL1,
230 APGAKEYLO_EL1,
231 APGAKEYHI_EL1,
232
233 ELR_EL1,
234 SP_EL1,
235 SPSR_EL1,
236
237 CNTVOFF_EL2,
238 CNTV_CVAL_EL0,
239 CNTV_CTL_EL0,
240 CNTP_CVAL_EL0,
241 CNTP_CTL_EL0,
242
243 /* Memory Tagging Extension registers */
244 RGSR_EL1, /* Random Allocation Tag Seed Register */
245 GCR_EL1, /* Tag Control Register */
246 TFSR_EL1, /* Tag Fault Status Register (EL1) */
247 TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
248
249 /* 32bit specific registers. Keep them at the end of the range */
250 DACR32_EL2, /* Domain Access Control Register */
251 IFSR32_EL2, /* Instruction Fault Status Register */
252 FPEXC32_EL2, /* Floating-Point Exception Control Register */
253 DBGVCR32_EL2, /* Debug Vector Catch Register */
254
255 NR_SYS_REGS /* Nothing after this line! */
256 };
257
258 struct kvm_cpu_context {
259 struct user_pt_regs regs; /* sp = sp_el0 */
260
261 u64 spsr_abt;
262 u64 spsr_und;
263 u64 spsr_irq;
264 u64 spsr_fiq;
265
266 struct user_fpsimd_state fp_regs;
267
268 u64 sys_regs[NR_SYS_REGS];
269
270 struct kvm_vcpu *__hyp_running_vcpu;
271 };
272
273 struct kvm_host_data {
274 struct kvm_cpu_context host_ctxt;
275 };
276
277 struct kvm_host_psci_config {
278 /* PSCI version used by host. */
279 u32 version;
280
281 /* Function IDs used by host if version is v0.1. */
282 struct psci_0_1_function_ids function_ids_0_1;
283
284 bool psci_0_1_cpu_suspend_implemented;
285 bool psci_0_1_cpu_on_implemented;
286 bool psci_0_1_cpu_off_implemented;
287 bool psci_0_1_migrate_implemented;
288 };
289
290 extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
291 #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
292
293 extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
294 #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
295
296 extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
297 #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
298
299 struct vcpu_reset_state {
300 unsigned long pc;
301 unsigned long r0;
302 bool be;
303 bool reset;
304 };
305
306 struct kvm_vcpu_arch {
307 struct kvm_cpu_context ctxt;
308
309 /* Guest floating point state */
310 void *sve_state;
311 unsigned int sve_max_vl;
312 u64 svcr;
313
314 /* Stage 2 paging state used by the hardware on next switch */
315 struct kvm_s2_mmu *hw_mmu;
316
317 /* Values of trap registers for the guest. */
318 u64 hcr_el2;
319 u64 mdcr_el2;
320 u64 cptr_el2;
321
322 /* Values of trap registers for the host before guest entry. */
323 u64 mdcr_el2_host;
324
325 /* Exception Information */
326 struct kvm_vcpu_fault_info fault;
327
328 /* Miscellaneous vcpu state flags */
329 u64 flags;
330
331 /*
332 * We maintain more than a single set of debug registers to support
333 * debugging the guest from the host and to maintain separate host and
334 * guest state during world switches. vcpu_debug_state are the debug
335 * registers of the vcpu as the guest sees them. host_debug_state are
336 * the host registers which are saved and restored during
337 * world switches. external_debug_state contains the debug
338 * values we want to debug the guest. This is set via the
339 * KVM_SET_GUEST_DEBUG ioctl.
340 *
341 * debug_ptr points to the set of debug registers that should be loaded
342 * onto the hardware when running the guest.
343 */
344 struct kvm_guest_debug_arch *debug_ptr;
345 struct kvm_guest_debug_arch vcpu_debug_state;
346 struct kvm_guest_debug_arch external_debug_state;
347
348 struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
349 struct task_struct *parent_task;
350
351 struct {
352 /* {Break,watch}point registers */
353 struct kvm_guest_debug_arch regs;
354 /* Statistical profiling extension */
355 u64 pmscr_el1;
356 /* Self-hosted trace */
357 u64 trfcr_el1;
358 } host_debug_state;
359
360 /* VGIC state */
361 struct vgic_cpu vgic_cpu;
362 struct arch_timer_cpu timer_cpu;
363 struct kvm_pmu pmu;
364
365 /*
366 * Guest registers we preserve during guest debugging.
367 *
368 * These shadow registers are updated by the kvm_handle_sys_reg
369 * trap handler if the guest accesses or updates them while we
370 * are using guest debug.
371 */
372 struct {
373 u32 mdscr_el1;
374 } guest_debug_preserved;
375
376 /* vcpu power state */
377 struct kvm_mp_state mp_state;
378
379 /* Don't run the guest (internal implementation need) */
380 bool pause;
381
382 /* Cache some mmu pages needed inside spinlock regions */
383 struct kvm_mmu_memory_cache mmu_page_cache;
384
385 /* Target CPU and feature flags */
386 int target;
387 DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
388
389 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
390 u64 vsesr_el2;
391
392 /* Additional reset state */
393 struct vcpu_reset_state reset_state;
394
395 /* True when deferrable sysregs are loaded on the physical CPU,
396 * see kvm_vcpu_load_sysregs_vhe and kvm_vcpu_put_sysregs_vhe. */
397 bool sysregs_loaded_on_cpu;
398
399 /* Guest PV state */
400 struct {
401 u64 last_steal;
402 gpa_t base;
403 } steal;
404 };
405
406 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
407 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
408 sve_ffr_offset((vcpu)->arch.sve_max_vl))
409
410 #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
411
412 #define vcpu_sve_state_size(vcpu) ({ \
413 size_t __size_ret; \
414 unsigned int __vcpu_vq; \
415 \
416 if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
417 __size_ret = 0; \
418 } else { \
419 __vcpu_vq = vcpu_sve_max_vq(vcpu); \
420 __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
421 } \
422 \
423 __size_ret; \
424 })
425
426 /* vcpu_arch flags field values: */
427 #define KVM_ARM64_DEBUG_DIRTY (1 << 0)
428 #define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
429 #define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
430 #define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
431 #define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
432 #define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
433 #define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
434 #define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
435 /*
436 * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
437 * set together with an exception...
438 */
439 #define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
440 #define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
441 /*
442 * When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
443 * take the following values:
444 *
445 * For AArch32 EL1:
446 */
447 #define KVM_ARM64_EXCEPT_AA32_UND (0 << 9)
448 #define KVM_ARM64_EXCEPT_AA32_IABT (1 << 9)
449 #define KVM_ARM64_EXCEPT_AA32_DABT (2 << 9)
450 /* For AArch64: */
451 #define KVM_ARM64_EXCEPT_AA64_ELx_SYNC (0 << 9)
452 #define KVM_ARM64_EXCEPT_AA64_ELx_IRQ (1 << 9)
453 #define KVM_ARM64_EXCEPT_AA64_ELx_FIQ (2 << 9)
454 #define KVM_ARM64_EXCEPT_AA64_ELx_SERR (3 << 9)
455 #define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
456 #define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
457
458 #define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
459 #define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
460 #define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
461 #define KVM_ARM64_ON_UNSUPPORTED_CPU (1 << 15) /* Physical CPU not in supported_cpus */
462 #define KVM_ARM64_HOST_SME_ENABLED (1 << 16) /* SME enabled for EL0 */
463 #define KVM_ARM64_WFIT (1 << 17) /* WFIT instruction trapped */
464
465 #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
466 KVM_GUESTDBG_USE_SW_BP | \
467 KVM_GUESTDBG_USE_HW | \
468 KVM_GUESTDBG_SINGLESTEP)
469
470 #define vcpu_has_sve(vcpu) (system_supports_sve() && \
471 ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
472
473 #ifdef CONFIG_ARM64_PTR_AUTH
474 #define vcpu_has_ptrauth(vcpu) \
475 ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
476 cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
477 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
478 #else
479 #define vcpu_has_ptrauth(vcpu) false
480 #endif
481
482 #define vcpu_on_unsupported_cpu(vcpu) \
483 ((vcpu)->arch.flags & KVM_ARM64_ON_UNSUPPORTED_CPU)
484
485 #define vcpu_set_on_unsupported_cpu(vcpu) \
486 ((vcpu)->arch.flags |= KVM_ARM64_ON_UNSUPPORTED_CPU)
487
488 #define vcpu_clear_on_unsupported_cpu(vcpu) \
489 ((vcpu)->arch.flags &= ~KVM_ARM64_ON_UNSUPPORTED_CPU)
490
491 #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
492
493 /*
494 * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
495 * memory backed version of a register, and not the one most recently
496 * accessed by a running VCPU. For example, for userspace access or
497 * for system registers that are never context switched, but only
498 * emulated.
499 */
500 #define __ctxt_sys_reg(c,r) (&(c)->sys_regs[(r)])
501
502 #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
503
504 #define __vcpu_sys_reg(v,r) (ctxt_sys_reg(&(v)->arch.ctxt, (r)))
505
506 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
507 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
508
__vcpu_read_sys_reg_from_cpu(int reg,u64 * val)509 static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
510 {
511 /*
512 * *** VHE ONLY ***
513 *
514 * System registers listed in the switch are not saved on every
515 * exit from the guest but are only saved on vcpu_put.
516 *
517 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
518 * should never be listed below, because the guest cannot modify its
519 * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
520 * thread when emulating cross-VCPU communication.
521 */
522 if (!has_vhe())
523 return false;
524
525 switch (reg) {
526 case CSSELR_EL1: *val = read_sysreg_s(SYS_CSSELR_EL1); break;
527 case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
528 case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
529 case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
530 case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
531 case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
532 case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
533 case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
534 case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
535 case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
536 case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
537 case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
538 case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
539 case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
540 case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
541 case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
542 case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
543 case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
544 case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
545 case PAR_EL1: *val = read_sysreg_par(); break;
546 case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
547 case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
548 case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
549 default: return false;
550 }
551
552 return true;
553 }
554
__vcpu_write_sys_reg_to_cpu(u64 val,int reg)555 static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
556 {
557 /*
558 * *** VHE ONLY ***
559 *
560 * System registers listed in the switch are not restored on every
561 * entry to the guest but are only restored on vcpu_load.
562 *
563 * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
564 * should never be listed below, because the MPIDR should only be set
565 * once, before running the VCPU, and never changed later.
566 */
567 if (!has_vhe())
568 return false;
569
570 switch (reg) {
571 case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); break;
572 case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
573 case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
574 case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
575 case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
576 case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
577 case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
578 case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
579 case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
580 case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
581 case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
582 case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
583 case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
584 case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
585 case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
586 case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
587 case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
588 case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
589 case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
590 case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
591 case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
592 case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
593 case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
594 default: return false;
595 }
596
597 return true;
598 }
599
600 struct kvm_vm_stat {
601 struct kvm_vm_stat_generic generic;
602 };
603
604 struct kvm_vcpu_stat {
605 struct kvm_vcpu_stat_generic generic;
606 u64 hvc_exit_stat;
607 u64 wfe_exit_stat;
608 u64 wfi_exit_stat;
609 u64 mmio_exit_user;
610 u64 mmio_exit_kernel;
611 u64 signal_exits;
612 u64 exits;
613 };
614
615 void kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
616 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
617 int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
618 int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
619 int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
620
621 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
622 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
623 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
624 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
625
626 int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
627 struct kvm_vcpu_events *events);
628
629 int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
630 struct kvm_vcpu_events *events);
631
632 #define KVM_ARCH_WANT_MMU_NOTIFIER
633
634 void kvm_arm_halt_guest(struct kvm *kvm);
635 void kvm_arm_resume_guest(struct kvm *kvm);
636
637 #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
638
639 #ifndef __KVM_NVHE_HYPERVISOR__
640 #define kvm_call_hyp_nvhe(f, ...) \
641 ({ \
642 struct arm_smccc_res res; \
643 \
644 arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
645 ##__VA_ARGS__, &res); \
646 WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
647 \
648 res.a1; \
649 })
650
651 /*
652 * The couple of isb() below are there to guarantee the same behaviour
653 * on VHE as on !VHE, where the eret to EL1 acts as a context
654 * synchronization event.
655 */
656 #define kvm_call_hyp(f, ...) \
657 do { \
658 if (has_vhe()) { \
659 f(__VA_ARGS__); \
660 isb(); \
661 } else { \
662 kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
663 } \
664 } while(0)
665
666 #define kvm_call_hyp_ret(f, ...) \
667 ({ \
668 typeof(f(__VA_ARGS__)) ret; \
669 \
670 if (has_vhe()) { \
671 ret = f(__VA_ARGS__); \
672 isb(); \
673 } else { \
674 ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
675 } \
676 \
677 ret; \
678 })
679 #else /* __KVM_NVHE_HYPERVISOR__ */
680 #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
681 #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
682 #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
683 #endif /* __KVM_NVHE_HYPERVISOR__ */
684
685 void force_vm_exit(const cpumask_t *mask);
686
687 int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
688 void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
689
690 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
691 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
692 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
693 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
694 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
695 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
696 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
697
698 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
699
700 int kvm_sys_reg_table_init(void);
701
702 /* MMIO helpers */
703 void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
704 unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
705
706 int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
707 int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
708
709 /*
710 * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
711 * arrived in guest context. For arm64, any event that arrives while a vCPU is
712 * loaded is considered to be "in guest".
713 */
kvm_arch_pmi_in_guest(struct kvm_vcpu * vcpu)714 static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
715 {
716 return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
717 }
718
719 long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
720 gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
721 void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
722
723 bool kvm_arm_pvtime_supported(void);
724 int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
725 struct kvm_device_attr *attr);
726 int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
727 struct kvm_device_attr *attr);
728 int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
729 struct kvm_device_attr *attr);
730
731 extern unsigned int kvm_arm_vmid_bits;
732 int kvm_arm_vmid_alloc_init(void);
733 void kvm_arm_vmid_alloc_free(void);
734 void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
735 void kvm_arm_vmid_clear_active(void);
736
kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch * vcpu_arch)737 static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
738 {
739 vcpu_arch->steal.base = GPA_INVALID;
740 }
741
kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch * vcpu_arch)742 static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
743 {
744 return (vcpu_arch->steal.base != GPA_INVALID);
745 }
746
747 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
748
749 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
750
751 DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
752
kvm_init_host_cpu_context(struct kvm_cpu_context * cpu_ctxt)753 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
754 {
755 /* The host's MPIDR is immutable, so let's set it up at boot time */
756 ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
757 }
758
kvm_system_needs_idmapped_vectors(void)759 static inline bool kvm_system_needs_idmapped_vectors(void)
760 {
761 return cpus_have_const_cap(ARM64_SPECTRE_V3A);
762 }
763
764 void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
765
kvm_arch_hardware_unsetup(void)766 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)767 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)768 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
769
770 void kvm_arm_init_debug(void);
771 void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
772 void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
773 void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
774 void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
775
776 #define kvm_vcpu_os_lock_enabled(vcpu) \
777 (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & SYS_OSLSR_OSLK))
778
779 int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
780 struct kvm_device_attr *attr);
781 int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
782 struct kvm_device_attr *attr);
783 int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
784 struct kvm_device_attr *attr);
785
786 long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
787 struct kvm_arm_copy_mte_tags *copy_tags);
788
789 /* Guest/host FPSIMD coordination helpers */
790 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
791 void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
792 void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
793 void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
794 void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
795 void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
796
kvm_pmu_counter_deferred(struct perf_event_attr * attr)797 static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
798 {
799 return (!has_vhe() && attr->exclude_host);
800 }
801
802 /* Flags for host debug state */
803 void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
804 void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
805
806 #ifdef CONFIG_KVM
807 void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
808 void kvm_clr_pmu_events(u32 clr);
809 #else
kvm_set_pmu_events(u32 set,struct perf_event_attr * attr)810 static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
kvm_clr_pmu_events(u32 clr)811 static inline void kvm_clr_pmu_events(u32 clr) {}
812 #endif
813
814 void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu);
815 void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu);
816
817 int kvm_set_ipa_limit(void);
818
819 #define __KVM_HAVE_ARCH_VM_ALLOC
820 struct kvm *kvm_arch_alloc_vm(void);
821
822 int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type);
823
kvm_vm_is_protected(struct kvm * kvm)824 static inline bool kvm_vm_is_protected(struct kvm *kvm)
825 {
826 return false;
827 }
828
829 void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
830
831 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
832 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
833
834 #define kvm_arm_vcpu_sve_finalized(vcpu) \
835 ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
836
837 #define kvm_has_mte(kvm) \
838 (system_supports_mte() && \
839 test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
840
841 #define kvm_supports_32bit_el0() \
842 (system_supports_32bit_el0() && \
843 !static_branch_unlikely(&arm64_mismatched_32bit_el0))
844
845 int kvm_trng_call(struct kvm_vcpu *vcpu);
846 #ifdef CONFIG_KVM
847 extern phys_addr_t hyp_mem_base;
848 extern phys_addr_t hyp_mem_size;
849 void __init kvm_hyp_reserve(void);
850 #else
kvm_hyp_reserve(void)851 static inline void kvm_hyp_reserve(void) { }
852 #endif
853
854 void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
855 bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
856
857 #endif /* __ARM64_KVM_HOST_H__ */
858