Home
last modified time | relevance | path

Searched refs:base_gfn (Results 1 – 25 of 26) sorted by relevance

12

/linux-6.1.9/arch/x86/kvm/
Dmmu.h243 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
247 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index()
254 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages()
255 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
/linux-6.1.9/arch/riscv/kvm/
Dmmu.c337 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; in gstage_wp_memory_region()
338 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in gstage_wp_memory_region()
398 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
399 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked()
400 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_arch_mmu_enable_log_dirty_pt_masked()
431 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
471 if ((new->base_gfn + new->npages) >= in kvm_arch_prepare_memory_region()
478 base_gpa = new->base_gfn << PAGE_SHIFT; in kvm_arch_prepare_memory_region()
/linux-6.1.9/arch/arm64/kvm/
Dmmu.c231 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
726 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
890 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
891 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
914 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_mmu_write_protect_pt_masked() local
915 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked()
916 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked()
954 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
1718 if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) in kvm_arch_prepare_memory_region()
1783 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
/linux-6.1.9/arch/powerpc/kvm/
Dtrace_hv.h305 __field(u64, base_gfn)
317 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
325 __entry->base_gfn, __entry->slot_flags)
Dbook3s_hv_uvmem.c261 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init()
279 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free()
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
445 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create()
619 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
792 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
Dbook3s_64_mmu_hv.c577 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
691 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
813 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp()
864 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
893 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp()
964 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp()
1109 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty()
1110 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty()
1115 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty()
1190 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page()
[all …]
Dbook3s_64_mmu_radix.c1058 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix()
1090 unsigned long gfn = memslot->base_gfn + pagenum; in kvm_radix_test_clear_dirty()
1133 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty()
1181 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot()
Dbook3s_hv_rm_mmu.c104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
142 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
242 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
De500_mmu_host.c381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
Dbook3s_hv_nested.c1008 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range()
1640 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in __kvmhv_nested_page_fault()
Dbook3s_pr.c1886 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr()
Dbook3s_hv.c925 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) in kvmppc_copy_guest()
937 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) in kvmppc_copy_guest()
/linux-6.1.9/arch/mips/kvm/
Dmmu.c419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
420 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
421 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
Dmips.c210 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
211 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot()
245 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
246 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
/linux-6.1.9/arch/x86/kvm/mmu/
Dpage_track.c92 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track()
193 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_slot_page_track_is_active()
Dtdp_mmu.c432 gfn_t base_gfn = sp->gfn; in handle_removed_pt() local
441 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt()
1411 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
1412 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot()
1663 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
1664 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot()
1736 gfn_t start = slot->base_gfn; in zap_collapsible_spte_range()
Dpaging_tmpl.h625 gfn_t base_gfn = fault->gfn; in FNAME() local
627 WARN_ON_ONCE(gw->gfn != base_gfn); in FNAME()
704 base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME()
710 sp = kvm_mmu_get_child_sp(vcpu, it.sptep, base_gfn, in FNAME()
725 base_gfn, fault->pfn, fault); in FNAME()
Dmmu.c756 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
1021 idx = gfn_to_index(gfn, slot->base_gfn, level); in gfn_to_rmap()
1258 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked()
1264 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked()
1291 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked()
1297 rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked()
1330 gfn_t start = slot->base_gfn + gfn_offset + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
1331 gfn_t end = slot->base_gfn + gfn_offset + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
3108 gfn_t base_gfn = fault->gfn; in __direct_map() local
3121 base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in __direct_map()
[all …]
/linux-6.1.9/include/linux/
Dkvm_host.h570 gfn_t base_gfn; member
1027 if (start < slot->base_gfn) { in kvm_memslot_iter_start()
1065 if (iter->slot->base_gfn + iter->slot->npages <= start) in kvm_memslot_iter_start()
1079 return iter->slot->base_gfn < end; in kvm_memslot_iter_is_valid()
1624 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot()
1647 if (gfn >= slot->base_gfn) { in search_memslots()
1648 if (gfn < slot->base_gfn + slot->npages) in search_memslots()
1697 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot()
1712 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
/linux-6.1.9/virt/kvm/
Dkvm_main.c1435 if (slot->base_gfn < tmp->base_gfn) in kvm_insert_gfn_node()
1437 else if (slot->base_gfn > tmp->base_gfn) in kvm_insert_gfn_node()
1459 WARN_ON_ONCE(old->base_gfn != new->base_gfn); in kvm_replace_gfn_node()
1518 if (old && old->base_gfn == new->base_gfn) { in kvm_replace_memslot()
1706 dest->base_gfn = src->base_gfn; in kvm_copy_memslot()
1927 gfn_t base_gfn; in __kvm_set_memory_region() local
1975 base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT); in __kvm_set_memory_region()
1993 if (base_gfn != old->base_gfn) in __kvm_set_memory_region()
2002 kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages)) in __kvm_set_memory_region()
2012 new->base_gfn = base_gfn; in __kvm_set_memory_region()
[all …]
/linux-6.1.9/arch/s390/kvm/
Dkvm-s390.c663 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
664 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
2138 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
2141 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
2155 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
2182 if (test_and_clear_bit(cur_gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in kvm_s390_get_cmma()
2200 if (cur_gfn - ms->base_gfn >= ms->npages) { in kvm_s390_get_cmma()
5597 if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit) in kvm_arch_prepare_memory_region()
5612 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
5616 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE, in kvm_arch_commit_memory_region()
[all …]
Dkvm-s390.h231 return ms->base_gfn + ms->npages; in kvm_s390_get_gfn_end()
Dpriv.c1210 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()
/linux-6.1.9/arch/powerpc/include/asm/
Dkvm_book3s_64.h495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
/linux-6.1.9/drivers/gpu/drm/i915/gvt/
Dkvmgt.c1631 gfn = slot->base_gfn + i; in kvmgt_page_track_flush_slot()

12