Lines Matching refs:gva

1372 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)  in nonpaging_invlpg()  argument
2564 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument
2576 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn()
2584 gva_t gva, pfn_t *pfn, bool write, bool *writable);
2978 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument
2984 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); in nonpaging_page_fault()
2987 return handle_mmio_page_fault(vcpu, gva, error_code, true); in nonpaging_page_fault()
2996 gfn = gva >> PAGE_SHIFT; in nonpaging_page_fault()
2998 return nonpaging_map(vcpu, gva & PAGE_MASK, in nonpaging_page_fault()
3002 static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) in kvm_arch_setup_async_pf() argument
3011 return kvm_setup_async_pf(vcpu, gva, gfn, &arch); in kvm_arch_setup_async_pf()
3024 gva_t gva, pfn_t *pfn, bool write, bool *writable) in try_async_pf() argument
3036 trace_kvm_try_async_get_page(gva, gfn); in try_async_pf()
3038 trace_kvm_async_pf_doublefault(gva, gfn); in try_async_pf()
3041 } else if (kvm_arch_setup_async_pf(vcpu, gva, gfn)) in try_async_pf()
3694 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_unprotect_page_virt() argument
3702 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt()
3770 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_invlpg() argument
3772 vcpu->arch.mmu.invlpg(vcpu, gva); in kvm_mmu_invlpg()