Home
last modified time | relevance | path

Searched refs:vmcb (Results 1 – 22 of 22) sorted by relevance

/linux-6.1.9/arch/x86/kvm/svm/
Dsvm.h114 struct vmcb *ptr;
209 struct vmcb *vmcb; member
292 struct vmcb *current_vmcb;
295 struct vmcb **sev_vmcbs;
329 static inline void vmcb_mark_all_dirty(struct vmcb *vmcb) in vmcb_mark_all_dirty() argument
331 vmcb->control.clean = 0; in vmcb_mark_all_dirty()
334 static inline void vmcb_mark_all_clean(struct vmcb *vmcb) in vmcb_mark_all_clean() argument
336 vmcb->control.clean = VMCB_ALL_CLEAN_MASK in vmcb_mark_all_clean()
340 static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit) in vmcb_mark_dirty() argument
342 vmcb->control.clean &= ~(1 << bit); in vmcb_mark_dirty()
[all …]
Dsvm_onhyperv.h18 static inline void svm_hv_init_vmcb(struct vmcb *vmcb) in svm_hv_init_vmcb() argument
21 (struct hv_enlightenments *)vmcb->control.reserved_sw; in svm_hv_init_vmcb()
62 struct vmcb *vmcb = to_svm(vcpu)->vmcb; in svm_hv_vmcb_dirty_nested_enlightenments() local
64 (struct hv_enlightenments *)vmcb->control.reserved_sw; in svm_hv_vmcb_dirty_nested_enlightenments()
67 vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); in svm_hv_vmcb_dirty_nested_enlightenments()
70 static inline void svm_hv_update_vp_id(struct vmcb *vmcb, in svm_hv_update_vp_id() argument
74 (struct hv_enlightenments *)vmcb->control.reserved_sw; in svm_hv_update_vp_id()
79 vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); in svm_hv_update_vp_id()
84 static inline void svm_hv_init_vmcb(struct vmcb *vmcb) in svm_hv_init_vmcb() argument
97 static inline void svm_hv_update_vp_id(struct vmcb *vmcb, in svm_hv_update_vp_id() argument
Dsvm.c344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
345 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
354 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
364 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
366 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
383 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
385 svm->next_rip = svm->vmcb->control.next_rip; in __svm_skip_emulated_instruction()
390 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction()
396 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction()
445 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip()
[all …]
Dnested.c39 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_npf_exit() local
41 if (vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
46 vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
47 vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
48 vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
49 vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
52 vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
53 vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
132 c = &svm->vmcb->control; in recalc_intercepts()
[all …]
Davic.c68 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb() local
70 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_activate_vmcb()
71 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_activate_vmcb()
73 vmcb->control.int_ctl |= AVIC_ENABLE_MASK; in avic_activate_vmcb()
84 vmcb->control.int_ctl |= X2APIC_MODE_MASK; in avic_activate_vmcb()
85 vmcb->control.avic_physical_id |= X2AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
90 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID; in avic_activate_vmcb()
98 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb() local
100 vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK); in avic_deactivate_vmcb()
101 vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK; in avic_deactivate_vmcb()
[all …]
Dsvm_onhyperv.c29 hve = (struct hv_enlightenments *)to_svm(vcpu)->vmcb->control.reserved_sw; in svm_hv_enable_direct_tlbflush()
35 vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS); in svm_hv_enable_direct_tlbflush()
Dsev.c569 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa()
578 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa()
1735 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa; in sev_migrate_from()
1736 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa; in sev_migrate_from()
1740 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE; in sev_migrate_from()
1741 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE; in sev_migrate_from()
2361 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa); in dump_ghcb()
2394 struct vmcb_control_area *control = &svm->vmcb->control; in sev_es_sync_from_ghcb()
2419 svm->vmcb->save.cpl = ghcb_get_cpl_if_valid(ghcb); in sev_es_sync_from_ghcb()
2615 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
[all …]
/linux-6.1.9/tools/testing/selftests/kvm/x86_64/
Dhyperv_svm_test.c66 struct vmcb *vmcb = svm->vmcb; in guest_code() local
68 (struct hv_enlightenments *)vmcb->control.reserved_sw; in guest_code()
80 run_guest(vmcb, svm->vmcb_gpa); in guest_code()
81 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in guest_code()
83 vmcb->save.rip += 3; in guest_code()
86 vmcb->control.intercept |= 1ULL << INTERCEPT_MSR_PROT; in guest_code()
88 run_guest(vmcb, svm->vmcb_gpa); in guest_code()
89 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_MSR); in guest_code()
90 vmcb->save.rip += 2; /* rdmsr */ in guest_code()
94 run_guest(vmcb, svm->vmcb_gpa); in guest_code()
[all …]
Dsvm_nested_soft_inject_test.c76 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local
86 vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR); in l1_guest_code()
87 vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT); in l1_guest_code()
90 vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in l1_guest_code()
92 vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT; in l1_guest_code()
94 vmcb->control.next_rip = vmcb->save.rip; in l1_guest_code()
97 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code()
98 GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL, in l1_guest_code()
99 vmcb->control.exit_code, in l1_guest_code()
100 vmcb->control.exit_info_1, vmcb->control.exit_info_2); in l1_guest_code()
[all …]
Dsvm_int_ctl_test.c62 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local
71 vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; in l1_guest_code()
74 vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR)); in l1_guest_code()
77 vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); in l1_guest_code()
78 vmcb->control.int_vector = VINTR_IRQ_NUMBER; in l1_guest_code()
80 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code()
81 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
Dsvm_vmcall_test.c24 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local
30 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code()
32 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in l1_guest_code()
Dsvm_nested_shutdown_test.c24 struct vmcb *vmcb = svm->vmcb; in l1_guest_code() local
29 vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN)); in l1_guest_code()
36 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code()
Dstate_test.c38 struct vmcb *vmcb = svm->vmcb; in svm_l1_guest_code() local
46 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code()
47 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code()
49 vmcb->save.rip += 3; in svm_l1_guest_code()
50 run_guest(vmcb, svm->vmcb_gpa); in svm_l1_guest_code()
51 GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL); in svm_l1_guest_code()
Dtriple_fault_event_test.c45 struct vmcb *vmcb = svm->vmcb; in l1_guest_code_svm() local
51 vmcb->control.intercept &= ~(BIT(INTERCEPT_SHUTDOWN)); in l1_guest_code_svm()
53 run_guest(vmcb, svm->vmcb_gpa); in l1_guest_code_svm()
Dnested_exceptions_test.c79 struct vmcb *vmcb = svm->vmcb; in svm_run_l2() local
80 struct vmcb_control_area *ctrl = &vmcb->control; in svm_run_l2()
82 vmcb->save.rip = (u64)l2_code; in svm_run_l2()
83 run_guest(vmcb, svm->vmcb_gpa); in svm_run_l2()
94 struct vmcb_control_area *ctrl = &svm->vmcb->control; in l1_svm_code()
98 svm->vmcb->save.idtr.limit = 0; in l1_svm_code()
Dsmm_test.c103 run_guest(svm->vmcb, svm->vmcb_gpa); in guest_code()
104 run_guest(svm->vmcb, svm->vmcb_gpa); in guest_code()
/linux-6.1.9/tools/testing/selftests/kvm/lib/x86_64/
Dsvm.c37 svm->vmcb = (void *)vm_vaddr_alloc_page(vm); in vcpu_alloc_svm()
38 svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); in vcpu_alloc_svm()
39 svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); in vcpu_alloc_svm()
65 struct vmcb *vmcb = svm->vmcb; in generic_svm_setup() local
67 struct vmcb_save_area *save = &vmcb->save; in generic_svm_setup()
68 struct vmcb_control_area *ctrl = &vmcb->control; in generic_svm_setup()
79 memset(vmcb, 0, sizeof(*vmcb)); in generic_svm_setup()
103 vmcb->save.rip = (u64)guest_rip; in generic_svm_setup()
104 vmcb->save.rsp = (u64)guest_rsp; in generic_svm_setup()
135 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) in run_guest() argument
[all …]
/linux-6.1.9/tools/testing/selftests/kvm/include/x86_64/
Dsvm_util.h20 struct vmcb *vmcb; /* gva */ member
47 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
Dsvm.h216 struct __attribute__ ((__packed__)) vmcb { struct
/linux-6.1.9/arch/x86/kvm/
Dtrace.h598 TP_PROTO(__u64 rip, __u64 vmcb, __u64 nested_rip, __u32 int_ctl,
601 TP_ARGS(rip, vmcb, nested_rip, int_ctl, event_inj, tdp_enabled,
606 __field( __u64, vmcb )
617 __entry->vmcb = vmcb;
630 __entry->vmcb,
/linux-6.1.9/arch/x86/include/asm/
Dsvm.h514 struct vmcb { struct
/linux-6.1.9/Documentation/virt/kvm/
Dlocking.rst242 - tsc offset in vmcb