Home
last modified time | relevance | path

Searched refs:gva (Results 1 – 25 of 31) sorted by relevance

12

/linux-6.1.9/include/trace/events/
Dkvm.h261 TP_PROTO(u64 gva, u64 gfn),
263 TP_ARGS(gva, gfn),
266 __field(__u64, gva)
271 __entry->gva = gva;
275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
280 TP_PROTO(u64 gva, u64 gfn),
282 TP_ARGS(gva, gfn)
287 TP_PROTO(u64 gva, u64 gfn),
289 TP_ARGS(gva, gfn)
294 TP_PROTO(u64 token, u64 gva),
[all …]
/linux-6.1.9/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c49 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) in virt_arch_pg_map() argument
54 TEST_ASSERT((gva % vm->page_size) == 0, in virt_arch_pg_map()
57 gva, vm->page_size); in virt_arch_pg_map()
59 (gva >> vm->page_shift)), in virt_arch_pg_map()
61 gva); in virt_arch_pg_map()
65 gva, vm->page_size); in virt_arch_pg_map()
69 gva, vm->max_gfn, vm->page_size); in virt_arch_pg_map()
74 idx = (gva >> (64 - 11 * ri)) & 0x7ffu; in virt_arch_pg_map()
81 idx = (gva >> 12) & 0x0ffu; /* page index */ in virt_arch_pg_map()
88 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
[all …]
/linux-6.1.9/arch/riscv/kvm/
Dtlb.c80 unsigned long gva, in kvm_riscv_local_hfence_vvma_asid_gva() argument
95 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_asid_gva()
100 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_asid_gva()
121 unsigned long gva, unsigned long gvsz, in kvm_riscv_local_hfence_vvma_gva() argument
135 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_gva()
140 for (pos = gva; pos < (gva + gvsz); pos += BIT(order)) in kvm_riscv_local_hfence_vvma_gva()
354 unsigned long gva, unsigned long gvsz, in kvm_riscv_hfence_vvma_asid_gva() argument
361 data.addr = gva; in kvm_riscv_hfence_vvma_asid_gva()
383 unsigned long gva, unsigned long gvsz, in kvm_riscv_hfence_vvma_gva() argument
390 data.addr = gva; in kvm_riscv_hfence_vvma_gva()
/linux-6.1.9/arch/x86/kvm/vmx/
Dsgx.c24 int size, int alignment, gva_t *gva) in sgx_get_encls_gva() argument
30 *gva = offset; in sgx_get_encls_gva()
33 *gva += s.base; in sgx_get_encls_gva()
36 if (!IS_ALIGNED(*gva, alignment)) { in sgx_get_encls_gva()
39 fault = is_noncanonical_address(*gva, vcpu); in sgx_get_encls_gva()
41 *gva &= 0xffffffff; in sgx_get_encls_gva()
44 (*gva > s.limit) || in sgx_get_encls_gva()
46 (((u64)*gva + size - 1) > s.limit + 1)); in sgx_get_encls_gva()
72 static int sgx_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t gva, bool write, in sgx_gva_to_gpa() argument
78 *gpa = kvm_mmu_gva_to_gpa_write(vcpu, gva, &ex); in sgx_gva_to_gpa()
[all …]
Dvmx_ops.h19 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
282 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) in __invvpid() argument
287 u64 gva; in __invvpid() member
288 } operand = { vpid, 0, gva }; in __invvpid()
290 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva); in __invvpid()
Dnested.c4994 gva_t gva; in nested_vmx_get_vmptr() local
5000 sizeof(*vmpointer), &gva)) { in nested_vmx_get_vmptr()
5005 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e); in nested_vmx_get_vmptr()
5295 gva_t gva = 0; in handle_vmread() local
5356 instr_info, true, len, &gva)) in handle_vmread()
5359 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e); in handle_vmread()
5401 gva_t gva; in handle_vmwrite() local
5430 instr_info, false, len, &gva)) in handle_vmwrite()
5432 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e); in handle_vmwrite()
5588 gva_t gva; in handle_vmptrst() local
[all …]
/linux-6.1.9/tools/testing/selftests/kvm/lib/aarch64/
Dprocessor.c24 static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pgd_index() argument
29 return (gva >> shift) & mask; in pgd_index()
32 static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) in pud_index() argument
40 return (gva >> shift) & mask; in pud_index()
43 static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) in pmd_index() argument
51 return (gva >> shift) & mask; in pmd_index()
54 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) in pte_index() argument
57 return (gva >> vm->page_shift) & mask; in pte_index()
141 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
148 ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; in addr_arch_gva2gpa()
[all …]
Ducall.c100 vm_vaddr_t gva; in get_ucall() local
104 memcpy(&gva, run->mmio.data, sizeof(gva)); in get_ucall()
105 memcpy(&ucall, addr_gva2hva(vcpu->vm, gva), sizeof(ucall)); in get_ucall()
/linux-6.1.9/tools/testing/selftests/kvm/lib/
Dperf_test_util.c49 uint64_t gva; in perf_test_guest_code() local
53 gva = vcpu_args->gva; in perf_test_guest_code()
61 uint64_t addr = gva + (i * pta->guest_page_size); in perf_test_guest_code()
89 vcpu_args->gva = guest_test_virt_mem + in perf_test_setup_vcpus()
95 vcpu_args->gva = guest_test_virt_mem; in perf_test_setup_vcpus()
/linux-6.1.9/arch/x86/kvm/
Dx86.h204 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument
215 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info()
232 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument
234 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info()
240 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument
243 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva()
303 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code);
444 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva);
Dtrace.h850 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match),
851 TP_ARGS(gva, gpa, write, gpa_match),
854 __field(gva_t, gva)
861 __entry->gva = gva;
867 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
/linux-6.1.9/tools/testing/selftests/kvm/
Daccess_tracking_perf_test.c97 static uint64_t lookup_pfn(int pagemap_fd, struct kvm_vm *vm, uint64_t gva) in lookup_pfn() argument
99 uint64_t hva = (uint64_t) addr_gva2hva(vm, gva); in lookup_pfn()
132 uint64_t base_gva = vcpu_args->gva; in mark_vcpu_memory_idle()
151 uint64_t gva = base_gva + page * perf_test_args.guest_page_size; in mark_vcpu_memory_idle() local
152 uint64_t pfn = lookup_pfn(pagemap_fd, vm, gva); in mark_vcpu_memory_idle()
/linux-6.1.9/arch/riscv/include/asm/
Dkvm_host.h249 unsigned long gva,
255 unsigned long gva, unsigned long gvsz,
276 unsigned long gva, unsigned long gvsz,
283 unsigned long gva, unsigned long gvsz,
/linux-6.1.9/tools/testing/selftests/kvm/lib/riscv/
Dprocessor.c46 static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) in pte_index() argument
53 return (gva & pte_index_mask[level]) >> pte_index_shift[level]; in pte_index()
111 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
119 ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
126 pte_index(vm, gva, level) * 8; in addr_arch_gva2gpa()
132 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_arch_gva2gpa()
136 gva, level); in addr_arch_gva2gpa()
/linux-6.1.9/arch/s390/kvm/
Dgaccess.c496 static int trans_exc_ending(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, in trans_exc_ending() argument
546 tec->addr = gva >> PAGE_SHIFT; in trans_exc_ending()
566 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, u8 ar, in trans_exc() argument
569 return trans_exc_ending(vcpu, code, gva, ar, mode, prot, false); in trans_exc()
632 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, in guest_translate() argument
636 union vaddress vaddr = {.addr = gva}; in guest_translate()
637 union raddress raddr = {.addr = gva}; in guest_translate()
1179 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, in guest_translate_address_with_key() argument
1186 gva = kvm_s390_logical_to_effective(vcpu, gva); in guest_translate_address_with_key()
1187 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); in guest_translate_address_with_key()
[all …]
Dgaccess.h189 int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
193 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
/linux-6.1.9/arch/mips/kvm/
Dtlb.c166 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_guest_tlb_lookup() argument
184 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); in kvm_vz_guest_tlb_lookup()
226 pa = entrylo[!!(gva & pagemaskbit)]; in kvm_vz_guest_tlb_lookup()
240 pa |= gva & ~(pagemask | pagemaskbit); in kvm_vz_guest_tlb_lookup()
Dvz.c197 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) in kvm_vz_gva_to_gpa_cb() argument
200 return gva; in kvm_vz_gva_to_gpa_cb()
689 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_gva_to_gpa() argument
692 u32 gva32 = gva; in kvm_vz_gva_to_gpa()
695 if ((long)gva == (s32)gva32) { in kvm_vz_gva_to_gpa()
750 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { in kvm_vz_gva_to_gpa()
758 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { in kvm_vz_gva_to_gpa()
772 *gpa = gva & 0x07ffffffffffffff; in kvm_vz_gva_to_gpa()
778 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); in kvm_vz_gva_to_gpa()
/linux-6.1.9/tools/testing/selftests/kvm/x86_64/
Dcpuid_test.c138 vm_vaddr_t gva = vm_vaddr_alloc(vm, size, KVM_UTIL_MIN_VADDR); in vcpu_alloc_cpuid() local
139 struct kvm_cpuid2 *guest_cpuids = addr_gva2hva(vm, gva); in vcpu_alloc_cpuid()
143 *p_gva = gva; in vcpu_alloc_cpuid()
/linux-6.1.9/tools/testing/selftests/kvm/include/
Dperf_test_util.h24 uint64_t gva; member
Dkvm_util_base.h283 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
393 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
806 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
808 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument
810 return addr_arch_gva2gpa(vm, gva); in addr_gva2gpa()
/linux-6.1.9/arch/x86/include/asm/
Dkvm_host.h440 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
1910 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
1912 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
1914 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1916 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1943 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
1945 gva_t gva, hpa_t root_hpa);
1946 void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
/linux-6.1.9/tools/testing/selftests/kvm/lib/x86_64/
Dprocessor.c513 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_arch_gva2gpa() argument
522 index[0] = (gva >> 12) & 0x1ffu; in addr_arch_gva2gpa()
523 index[1] = (gva >> 21) & 0x1ffu; in addr_arch_gva2gpa()
524 index[2] = (gva >> 30) & 0x1ffu; in addr_arch_gva2gpa()
525 index[3] = (gva >> 39) & 0x1ffu; in addr_arch_gva2gpa()
545 return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK); in addr_arch_gva2gpa()
548 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva); in addr_arch_gva2gpa()
/linux-6.1.9/arch/x86/kvm/mmu/
Dmmu.c2648 static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) in kvm_mmu_unprotect_page_virt() argument
2656 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL); in kvm_mmu_unprotect_page_virt()
3178 gva_t gva = fault->is_tdp ? 0 : fault->addr; in handle_abnormal_pfn() local
3180 vcpu_cache_mmio_info(vcpu, gva, fault->gfn, in handle_abnormal_pfn()
5594 gva_t gva, hpa_t root_hpa) in kvm_mmu_invalidate_gva() argument
5601 if (is_noncanonical_address(gva, vcpu)) in kvm_mmu_invalidate_gva()
5604 static_call(kvm_x86_flush_tlb_gva)(vcpu, gva); in kvm_mmu_invalidate_gva()
5611 mmu->invlpg(vcpu, gva, mmu->root.hpa); in kvm_mmu_invalidate_gva()
5626 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invalidate_gva()
5628 mmu->invlpg(vcpu, gva, root_hpa); in kvm_mmu_invalidate_gva()
[all …]
Dpaging_tmpl.h900 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) in FNAME()
908 vcpu_clear_mmio_info(vcpu, gva); in FNAME()
922 for_each_shadow_entry_using_root(vcpu, root_hpa, gva, iterator) { in FNAME()

12