xref: /DragonOS/kernel/src/virt/kvm/host_mem.rs (revision cf7f801e1d50ee5b04cb728e4251a57f4183bfbc)
1 use log::debug;
2 use system_error::SystemError;
3 
4 use super::{vcpu::Vcpu, vm};
5 use crate::mm::{kernel_mapper::KernelMapper, page::EntryFlags, VirtAddr};
6 
7 /*
8  * Address types:
9  *
10  *  gva - guest virtual address
11  *  gpa - guest physical address
12  *  gfn - guest frame number
13  *  hva - host virtual address
14  *  hpa - host physical address
15  *  hfn - host frame number
16  */
17 pub const KVM_USER_MEM_SLOTS: u32 = 16;
18 pub const KVM_PRIVATE_MEM_SLOTS: u32 = 3;
19 pub const KVM_MEM_SLOTS_NUM: u32 = KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS;
20 pub const KVM_ADDRESS_SPACE_NUM: usize = 2;
21 
22 pub const KVM_MEM_LOG_DIRTY_PAGES: u32 = 1 << 0;
23 pub const KVM_MEM_READONLY: u32 = 1 << 1;
24 pub const KVM_MEM_MAX_NR_PAGES: u32 = (1 << 31) - 1;
25 
26 /*
27  * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
28  * in kvm, other bits are visible for userspace which are defined in
29  * include/linux/kvm_h.
30  */
31 pub const KVM_MEMSLOT_INVALID: u32 = 1 << 16;
32 // pub const  KVM_MEMSLOT_INCOHERENT:u32 = 1 << 17;
33 
34 // pub const KVM_PERMILLE_MMU_PAGES: u32 = 20; //  the proportion of MMU pages required per thousand (out of 1000) memory pages.
35 // pub const KVM_MIN_ALLOC_MMU_PAGES: u32 = 64;
36 
37 pub const PAGE_SHIFT: u32 = 12;
38 pub const PAGE_SIZE: u32 = 1 << PAGE_SHIFT;
39 pub const PAGE_MASK: u32 = !(PAGE_SIZE - 1);
40 
41 /// 通过这个结构可以将虚拟机的物理地址对应到用户进程的虚拟地址
42 /// 用来表示虚拟机的一段物理内存
43 #[repr(C)]
44 #[derive(Default)]
45 pub struct KvmUserspaceMemoryRegion {
46     pub slot: u32, // 要在哪个slot上注册内存区间
47     // flags有两个取值,KVM_MEM_LOG_DIRTY_PAGES和KVM_MEM_READONLY,用来指示kvm针对这段内存应该做的事情。
48     // KVM_MEM_LOG_DIRTY_PAGES用来开启内存脏页,KVM_MEM_READONLY用来开启内存只读。
49     pub flags: u32,
50     pub guest_phys_addr: u64, // 虚机内存区间起始物理地址
51     pub memory_size: u64,     // 虚机内存区间大小
52     pub userspace_addr: u64,  // 虚机内存区间对应的主机虚拟地址
53 }
54 
55 #[derive(Default, Clone, Copy, Debug)]
56 pub struct KvmMemorySlot {
57     pub base_gfn: u64,       // 虚机内存区间起始物理页框号
58     pub npages: u64,         // 虚机内存区间页数,即内存区间的大小
59     pub userspace_addr: u64, // 虚机内存区间对应的主机虚拟地址
60     pub flags: u32,          // 虚机内存区间属性
61     pub id: u16,             // 虚机内存区间id
62                              // 用来记录虚机内存区间的脏页信息,每个bit对应一个页,如果bit为1,表示对应的页是脏页,如果bit为0,表示对应的页是干净页。
63                              // pub dirty_bitmap: *mut u8,
64                              // unsigned long *rmap[KVM_NR_PAGE_SIZES]; 反向映射相关的结构, 创建EPT页表项时就记录GPA对应的页表项地址(GPA-->页表项地址),暂时不需要
65 }
66 
67 #[derive(Default, Clone, Copy, Debug)]
68 pub struct KvmMemorySlots {
69     pub memslots: [KvmMemorySlot; KVM_MEM_SLOTS_NUM as usize], // 虚机内存区间数组
70     pub used_slots: u32,                                       // 已经使用的slot数量
71 }
72 
73 #[derive(PartialEq, Eq, Debug)]
74 pub enum KvmMemoryChange {
75     Create,
76     Delete,
77     Move,
78     FlagsOnly,
79 }
80 
kvm_vcpu_memslots(_vcpu: &mut dyn Vcpu) -> KvmMemorySlots81 pub fn kvm_vcpu_memslots(_vcpu: &mut dyn Vcpu) -> KvmMemorySlots {
82     let kvm = vm(0).unwrap();
83     let as_id = 0;
84     return kvm.memslots[as_id];
85 }
86 
__gfn_to_memslot(slots: KvmMemorySlots, gfn: u64) -> Option<KvmMemorySlot>87 fn __gfn_to_memslot(slots: KvmMemorySlots, gfn: u64) -> Option<KvmMemorySlot> {
88     debug!("__gfn_to_memslot");
89     // TODO: 使用二分查找的方式优化
90     for i in 0..slots.used_slots {
91         let memslot = slots.memslots[i as usize];
92         if gfn >= memslot.base_gfn && gfn < memslot.base_gfn + memslot.npages {
93             return Some(memslot);
94         }
95     }
96     return None;
97 }
98 
__gfn_to_hva(slot: KvmMemorySlot, gfn: u64) -> u6499 fn __gfn_to_hva(slot: KvmMemorySlot, gfn: u64) -> u64 {
100     return slot.userspace_addr + (gfn - slot.base_gfn) * (PAGE_SIZE as u64);
101 }
__gfn_to_hva_many( slot: Option<KvmMemorySlot>, gfn: u64, nr_pages: Option<&mut u64>, write: bool, ) -> Result<u64, SystemError>102 fn __gfn_to_hva_many(
103     slot: Option<KvmMemorySlot>,
104     gfn: u64,
105     nr_pages: Option<&mut u64>,
106     write: bool,
107 ) -> Result<u64, SystemError> {
108     debug!("__gfn_to_hva_many");
109     if slot.is_none() {
110         return Err(SystemError::KVM_HVA_ERR_BAD);
111     }
112     let slot = slot.unwrap();
113     if slot.flags & KVM_MEMSLOT_INVALID != 0 || (slot.flags & KVM_MEM_READONLY != 0) && write {
114         return Err(SystemError::KVM_HVA_ERR_BAD);
115     }
116 
117     if let Some(nr_pages) = nr_pages {
118         *nr_pages = slot.npages - (gfn - slot.base_gfn);
119     }
120 
121     return Ok(__gfn_to_hva(slot, gfn));
122 }
123 
124 /* From Linux kernel
125  * Pin guest page in memory and return its pfn.
126  * @addr: host virtual address which maps memory to the guest
127  * @atomic: whether this function can sleep
128  * @async: whether this function need to wait IO complete if the
129  *         host page is not in the memory
130  * @write_fault: whether we should get a writable host page
131  * @writable: whether it allows to map a writable host page for !@write_fault
132  *
133  * The function will map a writable host page for these two cases:
134  * 1): @write_fault = true
135  * 2): @write_fault = false && @writable, @writable will tell the caller
136  *     whether the mapping is writable.
137  */
138 // 计算 HVA 对应的 pfn,同时确保该物理页在内存中
139 // host端虚拟地址到物理地址的转换,有两种方式,hva_to_pfn_fast、hva_to_pfn_slow
140 // 正确性待验证
hva_to_pfn(addr: u64, _atomic: bool, _writable: &mut bool) -> Result<u64, SystemError>141 fn hva_to_pfn(addr: u64, _atomic: bool, _writable: &mut bool) -> Result<u64, SystemError> {
142     debug!("hva_to_pfn");
143     unsafe {
144         let raw = addr as *const i32;
145         debug!("raw={:x}", *raw);
146     }
147     // let hpa = MMArch::virt_2_phys(VirtAddr::new(addr)).unwrap().data() as u64;
148     let hva = VirtAddr::new(addr as usize);
149     let mut mapper = KernelMapper::lock();
150     let mapper = mapper.as_mut().unwrap();
151     if let Some((hpa, _)) = mapper.translate(hva) {
152         return Ok(hpa.data() as u64 >> PAGE_SHIFT);
153     }
154     unsafe {
155         mapper.map(hva, EntryFlags::mmio_flags());
156     }
157     let (hpa, _) = mapper.translate(hva).unwrap();
158     return Ok(hpa.data() as u64 >> PAGE_SHIFT);
159 }
160 
__gfn_to_pfn( slot: Option<KvmMemorySlot>, gfn: u64, atomic: bool, write: bool, writable: &mut bool, ) -> Result<u64, SystemError>161 pub fn __gfn_to_pfn(
162     slot: Option<KvmMemorySlot>,
163     gfn: u64,
164     atomic: bool,
165     write: bool,
166     writable: &mut bool,
167 ) -> Result<u64, SystemError> {
168     debug!("__gfn_to_pfn");
169     let mut nr_pages = 0;
170     let addr = __gfn_to_hva_many(slot, gfn, Some(&mut nr_pages), write)?;
171     let pfn = hva_to_pfn(addr, atomic, writable)?;
172     debug!("hva={}, pfn={}", addr, pfn);
173     return Ok(pfn);
174 }
175 
kvm_vcpu_gfn_to_memslot(vcpu: &mut dyn Vcpu, gfn: u64) -> Option<KvmMemorySlot>176 pub fn kvm_vcpu_gfn_to_memslot(vcpu: &mut dyn Vcpu, gfn: u64) -> Option<KvmMemorySlot> {
177     return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn);
178 }
179