Searched refs:guest_context (Results 1 – 10 of 10) sorted by relevance
/linux-5.19.10/arch/riscv/kvm/ |
D | vcpu_exit.c | 133 utrap.sepc = vcpu->arch.guest_context.sepc; in truly_illegal_insn() 148 vcpu->arch.guest_context.sepc += INSN_LEN(insn); in system_opcode_insn() 164 ct = &vcpu->arch.guest_context; in virtual_inst_fault() 193 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in emulate_load() 306 struct kvm_cpu_context *ct = &vcpu->arch.guest_context; in emulate_store() 332 data = GET_RS2(insn, &vcpu->arch.guest_context); in emulate_store() 348 data64 = GET_RS2S(insn, &vcpu->arch.guest_context); in emulate_store() 352 data64 = GET_RS2C(insn, &vcpu->arch.guest_context); in emulate_store() 356 data32 = GET_RS2S(insn, &vcpu->arch.guest_context); in emulate_store() 360 data32 = GET_RS2C(insn, &vcpu->arch.guest_context); in emulate_store() [all …]
|
D | vcpu_sbi_replace.c | 22 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler() 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler() 84 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler() 137 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_srst_handler()
|
D | vcpu_sbi.c | 57 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward() 94 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return() 106 vcpu->arch.guest_context.sepc += 4; in kvm_riscv_vcpu_sbi_return() 129 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
|
D | vcpu_sbi_hsm.c | 19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_start() 55 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_get_status() 76 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_hsm_handler()
|
D | vcpu_fp.c | 20 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 82 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 128 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
|
D | vcpu.c | 56 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu() 179 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel() 252 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_core() 285 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_core() 751 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load() 763 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put() 956 trap.sepc = vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_ioctl_run()
|
D | vcpu_sbi_base.c | 23 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_base_handler()
|
D | vcpu_sbi_v01.c | 27 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_v01_handler()
|
/linux-5.19.10/arch/riscv/kernel/ |
D | asm-offsets.c | 119 OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); in asm_offsets() 120 OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); in asm_offsets() 121 OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); in asm_offsets() 122 OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp); in asm_offsets() 123 OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp); in asm_offsets() 124 OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0); in asm_offsets() 125 OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1); in asm_offsets() 126 OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2); in asm_offsets() 127 OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0); in asm_offsets() 128 OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1); in asm_offsets() [all …]
|
/linux-5.19.10/arch/riscv/include/asm/ |
D | kvm_host.h | 184 struct kvm_cpu_context guest_context; member
|