1 // SPDX-License-Identifier: GPL-2.0
2
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
5
6 #include <linux/kvm_host.h>
7
8 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
9
kvm_tdp_mmu_get_root(struct kvm_mmu_page * root)10 __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
11 {
12 return refcount_inc_not_zero(&root->tdp_mmu_root_count);
13 }
14
15 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
16 bool shared);
17
18 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, int as_id, gfn_t start,
19 gfn_t end, bool can_yield, bool flush);
20 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
21 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
22 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
23 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
24
25 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
26
27 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
28 bool flush);
29 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
30 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
31 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
32
33 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
34 const struct kvm_memory_slot *slot, int min_level);
35 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
36 const struct kvm_memory_slot *slot);
37 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
38 struct kvm_memory_slot *slot,
39 gfn_t gfn, unsigned long mask,
40 bool wrprot);
41 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
42 const struct kvm_memory_slot *slot);
43
44 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
45 struct kvm_memory_slot *slot, gfn_t gfn,
46 int min_level);
47
48 void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
49 const struct kvm_memory_slot *slot,
50 gfn_t start, gfn_t end,
51 int target_level, bool shared);
52
kvm_tdp_mmu_walk_lockless_begin(void)53 static inline void kvm_tdp_mmu_walk_lockless_begin(void)
54 {
55 rcu_read_lock();
56 }
57
kvm_tdp_mmu_walk_lockless_end(void)58 static inline void kvm_tdp_mmu_walk_lockless_end(void)
59 {
60 rcu_read_unlock();
61 }
62
63 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
64 int *root_level);
65 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
66 u64 *spte);
67
68 #ifdef CONFIG_X86_64
69 int kvm_mmu_init_tdp_mmu(struct kvm *kvm);
70 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
is_tdp_mmu_page(struct kvm_mmu_page * sp)71 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
72
is_tdp_mmu(struct kvm_mmu * mmu)73 static inline bool is_tdp_mmu(struct kvm_mmu *mmu)
74 {
75 struct kvm_mmu_page *sp;
76 hpa_t hpa = mmu->root.hpa;
77
78 if (WARN_ON(!VALID_PAGE(hpa)))
79 return false;
80
81 /*
82 * A NULL shadow page is legal when shadowing a non-paging guest with
83 * PAE paging, as the MMU will be direct with root_hpa pointing at the
84 * pae_root page, not a shadow page.
85 */
86 sp = to_shadow_page(hpa);
87 return sp && is_tdp_mmu_page(sp) && sp->root_count;
88 }
89 #else
kvm_mmu_init_tdp_mmu(struct kvm * kvm)90 static inline int kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return 0; }
kvm_mmu_uninit_tdp_mmu(struct kvm * kvm)91 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
is_tdp_mmu_page(struct kvm_mmu_page * sp)92 static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
is_tdp_mmu(struct kvm_mmu * mmu)93 static inline bool is_tdp_mmu(struct kvm_mmu *mmu) { return false; }
94 #endif
95
96 #endif /* __KVM_X86_MMU_TDP_MMU_H */
97