Lines Matching defs:kvm
277 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_arch_flush_remote_tlbs_range()
288 static void kvm_flush_remote_tlbs_sptep(struct kvm *kvm, u64 *sptep) in kvm_flush_remote_tlbs_sptep()
565 static u64 mmu_spte_clear_track_bits(struct kvm *kvm, u64 *sptep) in mmu_spte_clear_track_bits()
821 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed()
842 void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in track_possible_nx_huge_page()
860 static void account_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp, in account_nx_huge_page()
869 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed()
885 void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in untrack_possible_nx_huge_page()
894 static void unaccount_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_nx_huge_page()
963 static void pte_list_desc_remove_entry(struct kvm *kvm, in pte_list_desc_remove_entry()
1000 static void pte_list_remove(struct kvm *kvm, u64 *spte, in pte_list_remove()
1031 static void kvm_zap_one_rmap_spte(struct kvm *kvm, in kvm_zap_one_rmap_spte()
1039 static bool kvm_zap_all_rmap_sptes(struct kvm *kvm, in kvm_zap_all_rmap_sptes()
1089 static void rmap_remove(struct kvm *kvm, u64 *spte) in rmap_remove()
1189 static void drop_spte(struct kvm *kvm, u64 *sptep) in drop_spte()
1197 static void drop_large_spte(struct kvm *kvm, u64 *sptep, bool flush) in drop_large_spte()
1276 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in __rmap_clear_dirty()
1301 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, in kvm_mmu_write_protect_pt_masked()
1334 static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm, in kvm_mmu_clear_dirty_pt_masked()
1367 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, in kvm_arch_mmu_enable_log_dirty_pt_masked()
1408 bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, in kvm_mmu_slot_gfn_write_protect()
1438 static bool __kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in __kvm_zap_rmap()
1444 static bool kvm_zap_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_zap_rmap()
1451 static bool kvm_set_pte_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_set_pte_rmap()
1561 static __always_inline bool kvm_handle_gfn_range(struct kvm *kvm, in kvm_handle_gfn_range()
1576 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_unmap_gfn_range()
1593 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_set_spte_gfn()
1606 static bool kvm_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_age_rmap()
1620 static bool kvm_test_age_rmap(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_test_age_rmap()
1635 static void __rmap_add(struct kvm *kvm, in __rmap_add()
1667 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_age_gfn()
1680 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) in kvm_test_age_gfn()
1713 static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr) in kvm_mod_used_mmu_pages()
1719 static void kvm_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_account_mmu_page()
1725 static void kvm_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unaccount_mmu_page()
1757 static void mmu_page_remove_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte()
1763 static void drop_parent_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in drop_parent_pte()
1878 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page()
1991 static bool kvm_mmu_remote_flush_or_zap(struct kvm *kvm, in kvm_mmu_remote_flush_or_zap()
2005 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp()
2145 static struct kvm_mmu_page *kvm_mmu_find_shadow_page(struct kvm *kvm, in kvm_mmu_find_shadow_page()
2230 static struct kvm_mmu_page *kvm_mmu_alloc_shadow_page(struct kvm *kvm, in kvm_mmu_alloc_shadow_page()
2266 static struct kvm_mmu_page *__kvm_mmu_get_shadow_page(struct kvm *kvm, in __kvm_mmu_get_shadow_page()
2423 static void __link_shadow_page(struct kvm *kvm, in __link_shadow_page()
2487 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte()
2517 static int kvm_mmu_page_unlink_children(struct kvm *kvm, in kvm_mmu_page_unlink_children()
2530 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents()
2539 static int mmu_zap_unsync_children(struct kvm *kvm, in mmu_zap_unsync_children()
2563 static bool __kvm_mmu_prepare_zap_page(struct kvm *kvm, in __kvm_mmu_prepare_zap_page()
2628 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page()
2637 static void kvm_mmu_commit_zap_page(struct kvm *kvm, in kvm_mmu_commit_zap_page()
2662 static unsigned long kvm_mmu_zap_oldest_mmu_pages(struct kvm *kvm, in kvm_mmu_zap_oldest_mmu_pages()
2699 static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm) in kvm_mmu_available_pages()
2735 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long goal_nr_mmu_pages) in kvm_mmu_change_mmu_pages()
2751 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) in kvm_mmu_unprotect_page()
2784 static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unsync_page()
2799 int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, in mmu_try_to_unsync_pages()
3076 static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, in host_pfn_mapping_level()
3140 int kvm_mmu_max_mapping_level(struct kvm *kvm, in kvm_mmu_max_mapping_level()
3538 static void mmu_free_root_page(struct kvm *kvm, hpa_t *root_hpa, in mmu_free_root_page()
3559 void kvm_mmu_free_roots(struct kvm *kvm, struct kvm_mmu *mmu, in kvm_mmu_free_roots()
3615 void kvm_mmu_free_guest_mode_roots(struct kvm *kvm, struct kvm_mmu *mmu) in kvm_mmu_free_guest_mode_roots()
3707 static int mmu_first_shadow_root_alloc(struct kvm *kvm) in mmu_first_shadow_root_alloc()
4548 static bool cached_root_find_and_keep_current(struct kvm *kvm, struct kvm_mmu *mmu, in cached_root_find_and_keep_current()
4582 static bool cached_root_find_without_current(struct kvm *kvm, struct kvm_mmu *mmu, in cached_root_find_without_current()
4603 static bool fast_pgd_switch(struct kvm *kvm, struct kvm_mmu *mmu, in fast_pgd_switch()
5489 struct kvm *kvm = vcpu->kvm; in kvm_mmu_unload() local
5498 static bool is_obsolete_root(struct kvm *kvm, hpa_t root_hpa) in is_obsolete_root()
5525 static void __kvm_mmu_free_obsolete_roots(struct kvm *kvm, struct kvm_mmu *mmu) in __kvm_mmu_free_obsolete_roots()
5909 static __always_inline bool __walk_slot_rmaps(struct kvm *kvm, in __walk_slot_rmaps()
5938 static __always_inline bool walk_slot_rmaps(struct kvm *kvm, in walk_slot_rmaps()
5949 static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm, in walk_slot_rmaps_4k()
6049 static void kvm_zap_obsolete_pages(struct kvm *kvm) in kvm_zap_obsolete_pages()
6114 static void kvm_mmu_zap_all_fast(struct kvm *kvm) in kvm_mmu_zap_all_fast()
6165 static bool kvm_has_zapped_obsolete_pages(struct kvm *kvm) in kvm_has_zapped_obsolete_pages()
6170 void kvm_mmu_init_vm(struct kvm *kvm) in kvm_mmu_init_vm()
6189 static void mmu_free_vm_memory_caches(struct kvm *kvm) in mmu_free_vm_memory_caches()
6196 void kvm_mmu_uninit_vm(struct kvm *kvm) in kvm_mmu_uninit_vm()
6204 static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_rmap_zap_gfn_range()
6239 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_zap_gfn_range()
6263 static bool slot_rmap_write_protect(struct kvm *kvm, in slot_rmap_write_protect()
6270 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, in kvm_mmu_slot_remove_write_access()
6293 static bool need_topup_split_caches_or_resched(struct kvm *kvm) in need_topup_split_caches_or_resched()
6308 static int topup_split_caches(struct kvm *kvm) in topup_split_caches()
6341 static struct kvm_mmu_page *shadow_mmu_get_sp_for_split(struct kvm *kvm, u64 *huge_sptep) in shadow_mmu_get_sp_for_split()
6368 static void shadow_mmu_split_huge_page(struct kvm *kvm, in shadow_mmu_split_huge_page()
6414 static int shadow_mmu_try_split_huge_page(struct kvm *kvm, in shadow_mmu_try_split_huge_page()
6453 static bool shadow_mmu_try_split_huge_pages(struct kvm *kvm, in shadow_mmu_try_split_huge_pages()
6499 static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_shadow_mmu_try_split_huge_pages()
6518 void kvm_mmu_try_split_huge_pages(struct kvm *kvm, in kvm_mmu_try_split_huge_pages()
6537 void kvm_mmu_slot_try_split_huge_pages(struct kvm *kvm, in kvm_mmu_slot_try_split_huge_pages()
6568 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm, in kvm_mmu_zap_collapsible_spte()
6605 static void kvm_rmap_zap_collapsible_sptes(struct kvm *kvm, in kvm_rmap_zap_collapsible_sptes()
6617 void kvm_mmu_zap_collapsible_sptes(struct kvm *kvm, in kvm_mmu_zap_collapsible_sptes()
6633 void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm, in kvm_mmu_slot_leaf_clear_dirty()
6662 static void kvm_mmu_zap_all(struct kvm *kvm) in kvm_mmu_zap_all()
6687 void kvm_arch_flush_shadow_all(struct kvm *kvm) in kvm_arch_flush_shadow_all()
6692 void kvm_arch_flush_shadow_memslot(struct kvm *kvm, in kvm_arch_flush_shadow_memslot()
6698 void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen) in kvm_mmu_invalidate_mmio_sptes()
6726 struct kvm *kvm; in mmu_shrink_scan() local
6851 struct kvm *kvm; in set_nx_huge_pages() local
6997 struct kvm *kvm; in set_nx_huge_pages_recovery_param() local
7010 static void kvm_recover_nx_huge_pages(struct kvm *kvm) in kvm_recover_nx_huge_pages()
7116 static int kvm_nx_huge_page_recovery_worker(struct kvm *kvm, uintptr_t data) in kvm_nx_huge_page_recovery_worker()
7141 int kvm_mmu_post_init_vm(struct kvm *kvm) in kvm_mmu_post_init_vm()
7157 void kvm_mmu_pre_destroy_vm(struct kvm *kvm) in kvm_mmu_pre_destroy_vm()