/linux-6.1.9/arch/arm64/kvm/hyp/nvhe/ |
D | hyp-main.c | 22 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 24 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run() 31 static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) in handle___kvm_adjust_pc() 38 static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) in handle___kvm_flush_vm_context() 43 static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid_ipa() 52 static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt) in handle___kvm_tlb_flush_vmid() 59 static void handle___kvm_flush_cpu_context(struct kvm_cpu_context *host_ctxt) in handle___kvm_flush_cpu_context() 66 static void handle___kvm_timer_set_cntvoff(struct kvm_cpu_context *host_ctxt) in handle___kvm_timer_set_cntvoff() 71 static void handle___kvm_enable_ssbs(struct kvm_cpu_context *host_ctxt) in handle___kvm_enable_ssbs() 80 static void handle___vgic_v3_get_gic_config(struct kvm_cpu_context *host_ctxt) in handle___vgic_v3_get_gic_config() [all …]
|
D | psci-relay.c | 20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward() 107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on() 151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_suspend() 179 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_system_suspend() 206 struct kvm_cpu_context *host_ctxt; in kvm_host_psci_cpu_entry() 224 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_1_handler() 236 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_2_handler() 262 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_1_0_handler() 276 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt) in kvm_host_psci_handler()
|
D | sysreg-sr.c | 21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() 29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe()
|
D | switch.c | 34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 59 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps() 242 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() 243 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run() 341 struct kvm_cpu_context *host_ctxt; in hyp_panic()
|
D | setup.c | 264 struct kvm_cpu_context *host_ctxt = &host_data->host_ctxt; in __pkvm_init_finalise()
|
/linux-6.1.9/arch/riscv/include/asm/ |
D | kvm_vcpu_fp.h | 15 struct kvm_cpu_context; 18 void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); 19 void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); 20 void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); 21 void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); 24 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, 26 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, 28 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx); 29 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx); 34 static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save() [all …]
|
D | kvm_host.h | 110 struct kvm_cpu_context { struct 178 struct kvm_cpu_context host_context; 181 struct kvm_cpu_context guest_context; 187 struct kvm_cpu_context guest_reset_context;
|
/linux-6.1.9/arch/riscv/kernel/ |
D | asm-offsets.c | 203 OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); in asm_offsets() 204 OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); in asm_offsets() 205 OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); in asm_offsets() 206 OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); in asm_offsets() 207 OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); in asm_offsets() 208 OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); in asm_offsets() 209 OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); in asm_offsets() 210 OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); in asm_offsets() 211 OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); in asm_offsets() 212 OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); in asm_offsets() [all …]
|
/linux-6.1.9/arch/arm64/kvm/hyp/vhe/ |
D | sysreg-sr.c | 27 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() 33 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() 40 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() 46 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe() 66 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in kvm_vcpu_load_sysregs_vhe() 67 struct kvm_cpu_context *host_ctxt; in kvm_vcpu_load_sysregs_vhe() 100 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in kvm_vcpu_put_sysregs_vhe() 101 struct kvm_cpu_context *host_ctxt; in kvm_vcpu_put_sysregs_vhe()
|
D | switch.c | 33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 128 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() 129 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run_vhe() 216 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic()
|
/linux-6.1.9/arch/arm64/include/asm/ |
D | kvm_hyp.h | 15 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 74 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 75 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 77 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 78 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 79 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 80 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); 102 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt); 105 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 114 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
D | kvm_host.h | 258 struct kvm_cpu_context { struct 274 struct kvm_cpu_context host_ctxt; argument 307 struct kvm_cpu_context ctxt; 849 static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt) in kvm_init_host_cpu_context()
|
/linux-6.1.9/arch/riscv/kvm/ |
D | vcpu_fp.c | 19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 29 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_fp_clean() 35 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save() 47 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_restore() 59 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_save() 68 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_restore() 81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 126 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
|
D | vcpu_sbi_hsm.c | 18 struct kvm_cpu_context *reset_cntx; in kvm_sbi_hsm_vcpu_start() 19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_start() 55 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_get_status() 76 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_hsm_handler()
|
D | vcpu_sbi_replace.c | 22 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler() 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler() 84 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler() 137 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_srst_handler()
|
D | vcpu_sbi.c | 57 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward() 94 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return() 129 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
|
D | vcpu_sbi_base.c | 23 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_base_handler()
|
D | vcpu.c | 113 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu() 114 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu() 157 struct kvm_cpu_context *cntx; in kvm_arch_vcpu_create() 345 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_core() 378 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_core()
|
D | vcpu_sbi_v01.c | 27 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_v01_handler()
|
D | vcpu_insn.c | 412 struct kvm_cpu_context *ct; in kvm_riscv_vcpu_virtual_insn() 458 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_load() 584 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in kvm_riscv_vcpu_mmio_store()
|
/linux-6.1.9/arch/arm64/kvm/hyp/include/hyp/ |
D | sysreg-sr.h | 19 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() 24 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() 30 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) in ctxt_has_mte() 40 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el1_state() 70 static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) in __sysreg_save_el2_return_state() 84 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_common_state() 89 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_user_state() 95 static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_el1_state() 159 static inline void __sysreg_restore_el2_return_state(struct kvm_cpu_context *ctxt) in __sysreg_restore_el2_return_state()
|
D | debug-sr.h | 92 struct kvm_cpu_context *ctxt) in __debug_save_state() 110 struct kvm_cpu_context *ctxt) in __debug_restore_state() 130 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common() 131 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_guest_common() 149 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common() 150 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_host_common()
|
D | switch.h | 303 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 307 struct kvm_cpu_context *ctxt; in kvm_hyp_handle_ptrauth()
|
/linux-6.1.9/arch/arm64/kernel/ |
D | asm-offsets.c | 115 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); in main() 116 DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); in main() 117 DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); in main() 118 DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); in main() 119 DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); in main() 120 DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); in main() 121 DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); in main() 122 DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); in main() 123 DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); in main()
|
/linux-6.1.9/arch/arm64/kvm/hyp/include/nvhe/ |
D | mem_protect.h | 64 void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
|