Lines Matching refs:to_vmx
547 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; in hv_enable_l2_tlb_flush()
899 if (to_vmx(vcpu)->rmode.vm86_active) in vmx_update_exception_bitmap()
1291 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_prepare_switch_to_guest()
1425 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load_vmcs()
1495 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
1508 vmx_prepare_switch_to_host(to_vmx(vcpu)); in vmx_vcpu_put()
1518 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_rflags()
1536 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_rflags()
1599 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_rtit_ctl_check()
1679 if (to_vmx(vcpu)->exit_reason.enclave_mode) { in vmx_can_emulate_instruction()
1688 union vmx_exit_reason exit_reason = to_vmx(vcpu)->exit_reason; in skip_emulated_instruction()
1757 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_emulated_instruction()
1808 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_exception()
1992 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_msr()
2026 msr_info->data = to_vmx(vcpu)->spec_ctrl; in vmx_get_msr()
2058 msr_info->data = to_vmx(vcpu)->msr_ia32_sgxlepubkeyhash in vmx_get_msr()
2171 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2250 if (intel_pmu_lbr_is_enabled(vcpu) && !to_vmx(vcpu)->lbr_desc.event && in vmx_set_msr()
2330 !(to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2500 if (!(exec_controls_get(to_vmx(vcpu)) & CPU_BASED_CR3_LOAD_EXITING)) in vmx_cache_reg()
3005 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
3075 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
3122 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
3149 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
3171 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_tlb_all()
3196 return to_vmx(vcpu)->vpid; in vmx_get_current_vpid()
3274 if (to_vmx(vcpu)->nested.vmxon) in vmx_is_valid_cr0()
3282 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3430 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) in vmx_is_valid_cr4()
3439 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr4()
3502 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3540 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3544 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3549 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3578 struct vcpu_vmx *vmx = to_vmx(vcpu); in __vmx_set_segment()
3617 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu); in vmx_set_segment()
3622 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
3968 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_disable_intercept_for_msr()
4012 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_enable_intercept_for_msr()
4051 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_msr_bitmap_x2apic()
4104 struct vcpu_vmx *vmx = to_vmx(vcpu); in pt_update_intercept_for_msr()
4120 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_guest_apic_has_interrupt()
4140 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_msr_filter_changed()
4213 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
4251 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
4444 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_refresh_apicv_exec_ctrl()
4833 struct vcpu_vmx *vmx = to_vmx(vcpu); in __vmx_vcpu_reset()
4860 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
4919 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); in vmx_enable_irq_window()
4930 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); in vmx_enable_nmi_window()
4935 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
4963 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
4994 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_nmi_mask()
5008 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
5031 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) in vmx_nmi_blocked()
5041 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
5063 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_interrupt_allowed()
5110 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
5186 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception_nmi()
5566 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); in handle_dr()
5602 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); in vmx_sync_dirty_debug_regs()
5624 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_INTR_WINDOW_EXITING); in handle_interrupt_window()
5691 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5760 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5826 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_NMI_WINDOW_EXITING); in handle_nmi_window()
5835 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_emulation_required_with_pending_exception()
5843 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
5894 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
5910 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
5991 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
6006 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_fastpath_preemption_timer()
6053 to_vmx(vcpu)->exit_reason.bus_lock_detected = true; in handle_bus_lock_vmexit()
6150 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_exit_info()
6178 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer()
6235 struct vcpu_vmx *vmx = to_vmx(vcpu); in dump_vmcs()
6414 struct vcpu_vmx *vmx = to_vmx(vcpu); in __vmx_handle_exit()
6599 if (to_vmx(vcpu)->exit_reason.bus_lock_detected) { in vmx_handle_exit()
6690 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; in vmx_update_cr8_intercept()
6697 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_virtual_apic_mode()
6760 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; in vmx_set_apic_access_page_addr()
6764 if (!(secondary_exec_controls_get(to_vmx(vcpu)) & in vmx_set_apic_access_page_addr()
6869 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
6926 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_apicv_pre_state_restore()
6989 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit_irqoff()
7162 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_hv_timer()
7221 switch (to_vmx(vcpu)->exit_reason.basic) { in vmx_exit_handlers_fastpath()
7234 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_enter_exit()
7287 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
7443 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_free()
7459 vmx = to_vmx(vcpu); in vmx_vcpu_create()
7657 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_cr_fixed1_bits_update()
7697 struct vcpu_vmx *vmx = to_vmx(vcpu); in update_intel_pt_cfg()
7766 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_after_set_cpuid()
7919 to_vmx(vcpu)->req_immediate_exit = true; in vmx_request_immediate_exit()
8046 vmx = to_vmx(vcpu); in vmx_set_hv_timer()
8081 to_vmx(vcpu)->hv_deadline_tsc = -1; in vmx_cancel_hv_timer()
8093 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_cpu_dirty_logging()
8117 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_setup_mce()
8120 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_setup_mce()
8128 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_smi_allowed()
8135 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_enter_smm()
8156 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_leave_smm()
8183 return to_vmx(vcpu)->nested.vmxon && !is_guest_mode(vcpu); in vmx_apic_init_signal_blocked()
8189 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; in vmx_migrate_timers()