xref: /DragonOS/kernel/src/arch/x86_64/kvm/vmx/mmu.rs (revision 83ed0ebc293d5a10245089f627f52770fd5b9dd4)
1 use crate::{
2     arch::kvm::vmx::ept::EptMapper,
3     kdebug,
4     libs::mutex::Mutex,
5     mm::{page::PageFlags, syscall::ProtFlags},
6     syscall::SystemError,
7     virt::kvm::host_mem::{__gfn_to_pfn, kvm_vcpu_gfn_to_memslot, PAGE_MASK, PAGE_SHIFT},
8 };
9 use bitfield_struct::bitfield;
10 
11 use super::{
12     ept::check_ept_features,
13     vcpu::VmxVcpu,
14     vmcs::VmcsFields,
15     vmx_asm_wrapper::{vmx_vmread, vmx_vmwrite},
16 };
17 use crate::arch::kvm::vmx::mmu::VmcsFields::CTRL_EPTP_PTR;
18 
19 // pub const PT64_ROOT_LEVEL: u32 = 4;
20 // pub const PT32_ROOT_LEVEL: u32 = 2;
21 // pub const PT32E_ROOT_LEVEL: u32 = 3;
22 
23 // pub struct KvmMmuPage{
24 //     gfn: u64, // 管理地址范围的起始地址对应的 gfn
25 //     role: KvmMmuPageRole, // 基本信息,包括硬件特性和所属层级等
26 //     // spt: *mut u64, // spt: shadow page table,指向 struct page 的地址,其包含了所有页表项 (pte)。同时 page->private 会指向该 kvm_mmu_page
27 // }
28 
29 #[bitfield(u32)]
30 pub struct KvmMmuPageRole {
31     #[bits(4)]
32     level: usize, // 页所处的层级
33     cr4_pae: bool, // cr4.pae,1 表示使用 64bit gpte
34     #[bits(2)]
35     quadrant: usize, // 如果 cr4.pae=0,则 gpte 为 32bit,但 spte 为 64bit,因此需要用多个 spte 来表示一个 gpte,该字段指示是 gpte 的第几块
36     direct: bool,
37     #[bits(3)]
38     access: usize, // 访问权限
39     invalid: bool,        // 失效,一旦 unpin 就会被销毁
40     nxe: bool,            // efer.nxe,不可执行
41     cr0_wp: bool,         // cr0.wp, 写保护
42     smep_andnot_wp: bool, // smep && !cr0.wp,SMEP启用,用户模式代码将无法执行位于内核地址空间中的指令。
43     smap_andnot_wp: bool, // smap && !cr0.wp
44     #[bits(8)]
45     unused: usize,
46     #[bits(8)]
47     smm: usize, // 1 表示处于 system management mode, 0 表示非 SMM
48 }
49 
50 //  We don't want allocation failures within the mmu code, so we preallocate
51 // enough memory for a single page fault in a cache.
52 // pub struct KvmMmuMemoryCache {
53 //     num_objs: u32,
54 //     objs: [*mut u8; KVM_NR_MEM_OBJS as usize],
55 // }
56 
57 #[derive(Default)]
58 pub struct KvmMmu {
59     pub root_hpa: u64,
60     pub root_level: u32,
61     pub base_role: KvmMmuPageRole,
62     // ...还有一些变量不知道用来做什么
63     pub get_cr3: Option<fn(&VmxVcpu) -> u64>,
64     pub set_eptp: Option<fn(u64) -> Result<(), SystemError>>,
65     pub page_fault: Option<
66         fn(
67             vcpu: &mut VmxVcpu,
68             gpa: u64,
69             error_code: u32,
70             prefault: bool,
71         ) -> Result<(), SystemError>,
72     >,
73     // get_pdptr: Option<fn(& VmxVcpu, index:u32) -> u64>, // Page Directory Pointer Table Register?暂时不知道和CR3的区别是什么
74     // inject_page_fault: Option<fn(&mut VmxVcpu, fault: &X86Exception)>,
75     // gva_to_gpa: Option<fn(&mut VmxVcpu, gva: u64, access: u32, exception: &X86Exception) -> u64>,
76     // translate_gpa: Option<fn(&mut VmxVcpu, gpa: u64, access: u32, exception: &X86Exception) -> u64>,
77     // sync_page: Option<fn(&mut VmxVcpu, &mut KvmMmuPage)>,
78     // invlpg: Option<fn(&mut VmxVcpu, gva: u64)>, // invalid entry
79     // update_pte: Option<fn(&mut VmxVcpu, sp: &KvmMmuPage, spte: u64, pte: u64)>,
80 }
81 
82 impl core::fmt::Debug for KvmMmu {
83     fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
84         f.debug_struct("KvmMmu")
85             .field("root_hpa", &self.root_hpa)
86             .field("root_level", &self.root_level)
87             .field("base_role", &self.base_role)
88             .finish()
89     }
90 }
91 
92 fn tdp_get_cr3(_vcpu: &VmxVcpu) -> u64 {
93     let guest_cr3 = vmx_vmread(VmcsFields::GUEST_CR3 as u32).expect("Failed to read eptp");
94     return guest_cr3;
95 }
96 
97 fn tdp_set_eptp(root_hpa: u64) -> Result<(), SystemError> {
98     // 设置权限位,目前是写死的,可读可写可执行
99     //  EPT paging-structure memory type: Uncacheable
100     let mut eptp = 0x0 as u64;
101     // This value is 1 less than the EPT page-walk length.  3 means 4-level paging.
102     eptp |= 0x3 << 3;
103     eptp |= root_hpa & (PAGE_MASK as u64);
104     vmx_vmwrite(CTRL_EPTP_PTR as u32, eptp)?;
105     Ok(())
106 }
107 
108 fn tdp_page_fault(
109     vcpu: &mut VmxVcpu,
110     gpa: u64,
111     error_code: u32,
112     prefault: bool,
113 ) -> Result<(), SystemError> {
114     kdebug!("tdp_page_fault");
115     let gfn = gpa >> PAGE_SHIFT; // 物理地址右移12位得到物理页框号(相对于虚拟机而言)
116                                  // 分配缓存池,为了避免在运行时分配空间失败,这里提前分配/填充足额的空间
117     mmu_topup_memory_caches(vcpu)?;
118     // TODO:获取gfn使用的level,处理hugepage的问题
119     let level = 1; // 4KB page
120                    // TODO: 快速处理由读写操作引起violation,即present同时有写权限的非mmio page fault
121                    // fast_page_fault(vcpu, gpa, level, error_code)
122                    // gfn->pfn
123     let mut map_writable = false;
124     let write = error_code & ((1 as u32) << 1);
125     let pfn = mmu_gfn_to_pfn_fast(vcpu, gpa, prefault, gfn, write == 0, &mut map_writable)?;
126     // direct map就是映射ept页表的过程
127     __direct_map(vcpu, gpa, write, map_writable, level, gfn, pfn, prefault)?;
128     Ok(())
129 }
130 
131 /*
132  * Caculate mmu pages needed for kvm.
133  */
134 // pub fn kvm_mmu_calculate_mmu_pages() -> u32 {
135 // 	let mut nr_mmu_pages:u32;
136 //     let mut nr_pages = 0;
137 
138 //     let kvm = vm(0).unwrap();
139 //     for as_id in 0..KVM_ADDRESS_SPACE_NUM {
140 //         let slots = kvm.memslots[as_id];
141 //         for i in 0..KVM_MEM_SLOTS_NUM {
142 //             let memslot = slots.memslots[i as usize];
143 //             nr_pages += memslot.npages;
144 //         }
145 //     }
146 
147 // 	nr_mmu_pages = (nr_pages as u32)* KVM_PERMILLE_MMU_PAGES / 1000;
148 // 	nr_mmu_pages = nr_mmu_pages.max(KVM_MIN_ALLOC_MMU_PAGES);
149 // 	return nr_mmu_pages;
150 // }
151 
152 // pub fn kvm_mmu_change_mmu_pages(mut goal_nr_mmu_pages: u32){
153 //     let kvm = KVM();
154 //     // 释放多余的mmu page
155 //     if kvm.lock().arch.n_used_mmu_pages > goal_nr_mmu_pages {
156 //         while kvm.lock().arch.n_used_mmu_pages > goal_nr_mmu_pages {
157 //             if !prepare_zap_oldest_mmu_page() {
158 //                 break;
159 //             }
160 //         }
161 //         // kvm_mmu_commit_zap_page();
162 //         goal_nr_mmu_pages = kvm.lock().arch.n_used_mmu_pages;
163 
164 //     }
165 //     kvm.lock().arch.n_max_mmu_pages = goal_nr_mmu_pages;
166 // }
167 
168 // pub fn prepare_zap_oldest_mmu_page() -> bool {
169 //     return false;
170 // }
171 
172 pub fn kvm_mmu_setup(vcpu: &Mutex<VmxVcpu>) {
173     // TODO: init_kvm_softmmu(vcpu), init_kvm_nested_mmu(vcpu)
174     init_kvm_tdp_mmu(vcpu);
175 }
176 
177 pub fn kvm_vcpu_mtrr_init(_vcpu: &Mutex<VmxVcpu>) -> Result<(), SystemError> {
178     check_ept_features()?;
179     Ok(())
180 }
181 
182 pub fn init_kvm_tdp_mmu(vcpu: &Mutex<VmxVcpu>) {
183     let context = &mut vcpu.lock().mmu;
184     context.page_fault = Some(tdp_page_fault);
185     context.get_cr3 = Some(tdp_get_cr3);
186     context.set_eptp = Some(tdp_set_eptp);
187     // context.inject_page_fault = kvm_inject_page_fault; TODO: inject_page_fault
188     // context.invlpg = nonpaging_invlpg;
189     // context.sync_page = nonpaging_sync_page;
190     // context.update_pte = nonpaging_update_pte;
191 
192     // TODO: gva to gpa in kvm
193     // if !is_paging(vcpu) { // vcpu不分页
194     //     context.gva_to_gpa = nonpaging_gva_to_gpa;
195     // 	context.root_level = 0;
196     // } else if (is_long_mode(vcpu)) {
197     // 	context.gva_to_gpa = paging64_gva_to_gpa;
198     // 	context.root_level = PT64_ROOT_LEVEL;
199     // TODO:: different paging strategy
200     // } else if (is_pae(vcpu)) {
201     //     context.gva_to_gpa = paging64_gva_to_gpa;
202     //     context.root_level = PT32E_ROOT_LEVEL;
203     // } else {
204     //     context.gva_to_gpa = paging32_gva_to_gpa;
205     //     context.root_level = PT32_ROOT_LEVEL;
206     // }
207 }
208 
209 pub fn __direct_map(
210     vcpu: &mut VmxVcpu,
211     gpa: u64,
212     _write: u32,
213     _map_writable: bool,
214     _level: i32,
215     _gfn: u64,
216     pfn: u64,
217     _prefault: bool,
218 ) -> Result<u32, SystemError> {
219     kdebug!("gpa={}, pfn={}, root_hpa={:x}", gpa, pfn, vcpu.mmu.root_hpa);
220     // 判断vcpu.mmu.root_hpa是否有效
221     if vcpu.mmu.root_hpa == 0 {
222         return Err(SystemError::KVM_HVA_ERR_BAD);
223     }
224     // 把gpa映射到hpa
225     let mut ept_mapper = EptMapper::lock();
226     let page_flags = PageFlags::from_prot_flags(ProtFlags::from_bits_truncate(0x7 as u64), false);
227     unsafe {
228         assert!(ept_mapper.walk(gpa, pfn << PAGE_SHIFT, page_flags).is_ok());
229     }
230     drop(ept_mapper);
231     return Ok(0);
232 }
233 
234 pub fn mmu_gfn_to_pfn_fast(
235     vcpu: &mut VmxVcpu,
236     _gpa: u64,
237     _prefault: bool,
238     gfn: u64,
239     write: bool,
240     writable: &mut bool,
241 ) -> Result<u64, SystemError> {
242     let slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
243     let pfn = __gfn_to_pfn(slot, gfn, false, write, writable)?;
244     Ok(pfn)
245 }
246 
247 // TODO: 添加cache
248 pub fn mmu_topup_memory_caches(_vcpu: &mut VmxVcpu) -> Result<(), SystemError> {
249     // 如果 vcpu->arch.mmu_page_header_cache 不足,从 mmu_page_header_cache 中分配
250     // pte_list_desc_cache 和 mmu_page_header_cache 两块全局 slab cache 在 kvm_mmu_module_init 中被创建
251     // mmu_topup_memory_cache(vcpu.mmu_page_header_cache,
252     //     mmu_page_header_cache, 4);
253     Ok(())
254 }
255