Home
last modified time | relevance | path

Searched refs:gfn (Results 1 – 25 of 84) sorted by relevance

1234

/linux-6.6.21/arch/x86/kvm/mmu/
Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
37 __entry->gfn, role.level, \
212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
213 TP_ARGS(sptep, gfn, spte),
217 __field(gfn_t, gfn)
224 __entry->gfn = gfn;
230 __entry->gfn, __entry->access, __entry->gen)
235 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
236 TP_ARGS(addr, gfn, access),
[all …]
Dpage_track.c62 static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_write_track() argument
67 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_write_track()
78 gfn_t gfn) in __kvm_write_track_add_gfn() argument
88 update_gfn_write_track(slot, gfn, 1); in __kvm_write_track_add_gfn()
94 kvm_mmu_gfn_disallow_lpage(slot, gfn); in __kvm_write_track_add_gfn()
96 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in __kvm_write_track_add_gfn()
101 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn() argument
111 update_gfn_write_track(slot, gfn, -1); in __kvm_write_track_remove_gfn()
117 kvm_mmu_gfn_allow_lpage(slot, gfn); in __kvm_write_track_remove_gfn()
124 const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gfn_is_write_tracked() argument
[all …]
Dtdp_mmu.c192 gfn_t gfn, union kvm_mmu_page_role role) in tdp_mmu_init_sp() argument
199 sp->gfn = gfn; in tdp_mmu_init_sp()
217 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp()
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
324 gfn_t base_gfn = sp->gfn; in handle_removed_pt()
333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() local
396 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt()
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte() argument
431 WARN_ON_ONCE(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); in handle_changed_spte()
447 as_id, gfn, old_spte, new_spte, level); in handle_changed_spte()
[all …]
Dmmu_internal.h80 gfn_t gfn; member
160 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument
162 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level()
166 gfn_t gfn, bool can_unsync, bool prefetch);
168 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
169 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
171 struct kvm_memory_slot *slot, u64 gfn,
175 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn() argument
177 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), in kvm_flush_remote_tlbs_gfn()
231 gfn_t gfn; member
[all …]
Dtdp_iter.c15 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep()
29 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart()
97 iter->gfn = gfn_round_for_level(iter->next_last_level_gfn, iter->level); in try_step_down()
116 if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side()
120 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side()
121 iter->next_last_level_gfn = iter->gfn; in try_step_side()
139 iter->gfn = gfn_round_for_level(iter->gfn, iter->level); in try_step_up()
Dmmu.c277 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_arch_flush_remote_tlbs_range() argument
282 return static_call(kvm_x86_flush_remote_tlbs_range)(kvm, gfn, nr_pages); in kvm_arch_flush_remote_tlbs_range()
291 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep() local
293 kvm_flush_remote_tlbs_gfn(kvm, gfn, sp->role.level); in kvm_flush_remote_tlbs_sptep()
296 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
299 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte()
301 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte()
724 return sp->gfn; in kvm_mmu_page_get_gfn()
729 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
759 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation() argument
[all …]
Dpaging_tmpl.h91 gfn_t gfn; member
322 gfn_t gfn; in FNAME() local
440 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
441 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
445 gfn += pse36_gfn_delta(pte); in FNAME()
448 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
452 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME()
538 gfn_t gfn; in FNAME() local
544 gfn = gpte_to_gfn(gpte); in FNAME()
548 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, pte_access & ACC_WRITE_MASK); in FNAME()
[all …]
Dpage_track.h19 gfn_t gfn);
21 struct kvm_memory_slot *slot, gfn_t gfn);
24 const struct kvm_memory_slot *slot, gfn_t gfn);
/linux-6.6.21/drivers/gpu/drm/i915/gvt/
Dpage_track.c35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument
37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track()
50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument
56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track()
67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track()
83 unsigned long gfn) in intel_vgpu_unregister_page_track() argument
87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track()
90 intel_gvt_page_track_remove(vgpu, gfn); in intel_vgpu_unregister_page_track()
103 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_enable_page_track() argument
108 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track()
[all …]
Dpage_track.h45 struct intel_vgpu *vgpu, unsigned long gfn);
48 unsigned long gfn, gvt_page_track_handler_t handler,
51 unsigned long gfn);
53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
Dkvmgt.c92 gfn_t gfn; member
100 gfn_t gfn; member
111 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
128 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument
131 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page()
136 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument
149 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; in gvt_pin_guest_page()
173 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page()
177 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument
184 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page()
[all …]
/linux-6.6.21/arch/powerpc/kvm/
Dbook3s_hv_uvmem.c289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument
295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn()
308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument
311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn()
315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument
317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn()
321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument
323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared()
327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument
[all …]
Dbook3s_64_mmu_hv.c515 unsigned long gpa, gfn, hva, pfn, hpa; in kvmppc_book3s_hv_page_fault() local
578 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault()
579 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
604 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
616 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, in kvmppc_book3s_hv_page_fault()
781 unsigned long *rmapp, unsigned long gfn) in kvmppc_unmap_hpte() argument
805 hpte_rpn(ptel, psize) == gfn) { in kvmppc_unmap_hpte()
813 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_unmap_hpte()
822 unsigned long gfn) in kvm_unmap_rmapp() argument
828 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp()
[all …]
De500_mmu_host.c323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument
353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map()
381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
453 __func__, (long)gfn); in kvmppc_e500_shadow_map()
[all …]
Dbook3s_hv_rm_mmu.c97 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument
104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map()
113 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local
117 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte()
118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte()
120 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte()
131 unsigned long gfn; in revmap_for_hpte() local
133 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte()
134 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte()
[all …]
Dbook3s_64_mmu_radix.c425 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() local
437 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
454 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte()
825 unsigned long hva, gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_instantiate_page() local
843 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_instantiate_page()
850 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, in kvmppc_book3s_instantiate_page()
944 unsigned long gpa, gfn; in kvmppc_book3s_radix_page_fault() local
967 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_radix_page_fault()
972 return kvmppc_send_page_to_uv(kvm, gfn); in kvmppc_book3s_radix_page_fault()
975 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault()
[all …]
/linux-6.6.21/include/linux/
Dkvm_host.h295 kvm_pfn_t gfn; member
1168 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1171 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1172 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1173 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1174 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
1175 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
1180 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1181 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1183 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
[all …]
/linux-6.6.21/include/xen/
Dxen-ops.h66 xen_pfn_t *gfn, int nr,
79 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
116 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array()
176 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
183 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
/linux-6.6.21/drivers/xen/
Dxlate_mmu.c45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument
89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams()
145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
158 data.fgfn = gfn; in xen_xlate_remap_gfn_array()
174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument
179 xrp.gpfn = gfn; in unmap_gfn()
197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument
201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
/linux-6.6.21/virt/kvm/
Dkvm_main.c369 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range() argument
371 if (!kvm_arch_flush_remote_tlbs_range(kvm, gfn, nr_pages)) in kvm_flush_remote_tlbs_range()
2345 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
2347 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
2351 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
2366 slot = try_get_memslot(vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot()
2375 slot = search_memslots(slots, gfn, false); in kvm_vcpu_gfn_to_memslot()
2384 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
2386 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
2392 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument
[all …]
Ddirty_ring.c89 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument
91 smp_store_release(&gfn->flags, 0); in kvm_dirty_gfn_set_invalid()
94 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument
96 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied()
99 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument
101 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
/linux-6.6.21/arch/x86/include/asm/
Dkvm_page_track.h43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages,
52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
Dsev-common.h86 #define GHCB_MSR_PSC_REQ_GFN(gfn, op) \ argument
90 ((u64)((gfn) & GENMASK_ULL(39, 0)) << 12) | \
125 gfn : 40, member
/linux-6.6.21/include/trace/events/
Dkvm.h261 TP_PROTO(u64 gva, u64 gfn),
263 TP_ARGS(gva, gfn),
267 __field(u64, gfn)
272 __entry->gfn = gfn;
275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
280 TP_PROTO(u64 gva, u64 gfn),
282 TP_ARGS(gva, gfn)
287 TP_PROTO(u64 gva, u64 gfn),
289 TP_ARGS(gva, gfn)
/linux-6.6.21/arch/riscv/kvm/
Dvcpu_exit.c19 gfn_t gfn; in gstage_page_fault() local
23 gfn = fault_addr >> PAGE_SHIFT; in gstage_page_fault()
24 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault()
25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault()

1234