1 use system_error::SystemError; 2 3 use super::{vcpu::Vcpu, vm}; 4 use crate::{ 5 kdebug, 6 mm::{kernel_mapper::KernelMapper, page::PageFlags, VirtAddr}, 7 }; 8 9 /* 10 * Address types: 11 * 12 * gva - guest virtual address 13 * gpa - guest physical address 14 * gfn - guest frame number 15 * hva - host virtual address 16 * hpa - host physical address 17 * hfn - host frame number 18 */ 19 pub const KVM_USER_MEM_SLOTS: u32 = 16; 20 pub const KVM_PRIVATE_MEM_SLOTS: u32 = 3; 21 pub const KVM_MEM_SLOTS_NUM: u32 = KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS; 22 pub const KVM_ADDRESS_SPACE_NUM: usize = 2; 23 24 pub const KVM_MEM_LOG_DIRTY_PAGES: u32 = 1 << 0; 25 pub const KVM_MEM_READONLY: u32 = 1 << 1; 26 pub const KVM_MEM_MAX_NR_PAGES: u32 = (1 << 31) - 1; 27 28 /* 29 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used 30 * in kvm, other bits are visible for userspace which are defined in 31 * include/linux/kvm_h. 32 */ 33 pub const KVM_MEMSLOT_INVALID: u32 = 1 << 16; 34 // pub const KVM_MEMSLOT_INCOHERENT:u32 = 1 << 17; 35 36 // pub const KVM_PERMILLE_MMU_PAGES: u32 = 20; // the proportion of MMU pages required per thousand (out of 1000) memory pages. 37 // pub const KVM_MIN_ALLOC_MMU_PAGES: u32 = 64; 38 39 pub const PAGE_SHIFT: u32 = 12; 40 pub const PAGE_SIZE: u32 = 1 << PAGE_SHIFT; 41 pub const PAGE_MASK: u32 = !(PAGE_SIZE - 1); 42 43 #[repr(C)] 44 /// 通过这个结构可以将虚拟机的物理地址对应到用户进程的虚拟地址 45 /// 用来表示虚拟机的一段物理内存 46 pub struct KvmUserspaceMemoryRegion { 47 pub slot: u32, // 要在哪个slot上注册内存区间 48 // flags有两个取值,KVM_MEM_LOG_DIRTY_PAGES和KVM_MEM_READONLY,用来指示kvm针对这段内存应该做的事情。 49 // KVM_MEM_LOG_DIRTY_PAGES用来开启内存脏页,KVM_MEM_READONLY用来开启内存只读。 50 pub flags: u32, 51 pub guest_phys_addr: u64, // 虚机内存区间起始物理地址 52 pub memory_size: u64, // 虚机内存区间大小 53 pub userspace_addr: u64, // 虚机内存区间对应的主机虚拟地址 54 } 55 56 #[derive(Default, Clone, Copy, Debug)] 57 pub struct KvmMemorySlot { 58 pub base_gfn: u64, // 虚机内存区间起始物理页框号 59 pub npages: u64, // 虚机内存区间页数,即内存区间的大小 60 pub userspace_addr: u64, // 虚机内存区间对应的主机虚拟地址 61 pub flags: u32, // 虚机内存区间属性 62 pub id: u16, // 虚机内存区间id 63 // 用来记录虚机内存区间的脏页信息,每个bit对应一个页,如果bit为1,表示对应的页是脏页,如果bit为0,表示对应的页是干净页。 64 // pub dirty_bitmap: *mut u8, 65 // unsigned long *rmap[KVM_NR_PAGE_SIZES]; 反向映射相关的结构, 创建EPT页表项时就记录GPA对应的页表项地址(GPA-->页表项地址),暂时不需要 66 } 67 68 #[derive(Default, Clone, Copy, Debug)] 69 pub struct KvmMemorySlots { 70 pub memslots: [KvmMemorySlot; KVM_MEM_SLOTS_NUM as usize], // 虚机内存区间数组 71 pub used_slots: u32, // 已经使用的slot数量 72 } 73 74 #[derive(PartialEq, Eq, Debug)] 75 pub enum KvmMemoryChange { 76 Create, 77 Delete, 78 Move, 79 FlagsOnly, 80 } 81 82 impl Default for KvmUserspaceMemoryRegion { 83 fn default() -> KvmUserspaceMemoryRegion { 84 KvmUserspaceMemoryRegion { 85 slot: 0, 86 flags: 0, 87 guest_phys_addr: 0, 88 memory_size: 0, 89 userspace_addr: 0, 90 } 91 } 92 } 93 94 pub fn kvm_vcpu_memslots(_vcpu: &mut dyn Vcpu) -> KvmMemorySlots { 95 let kvm = vm(0).unwrap(); 96 let as_id = 0; 97 return kvm.memslots[as_id]; 98 } 99 100 fn __gfn_to_memslot(slots: KvmMemorySlots, gfn: u64) -> Option<KvmMemorySlot> { 101 kdebug!("__gfn_to_memslot"); 102 // TODO: 使用二分查找的方式优化 103 for i in 0..slots.used_slots { 104 let memslot = slots.memslots[i as usize]; 105 if gfn >= memslot.base_gfn && gfn < memslot.base_gfn + memslot.npages { 106 return Some(memslot); 107 } 108 } 109 return None; 110 } 111 112 fn __gfn_to_hva(slot: KvmMemorySlot, gfn: u64) -> u64 { 113 return slot.userspace_addr + (gfn - slot.base_gfn) * (PAGE_SIZE as u64); 114 } 115 fn __gfn_to_hva_many( 116 slot: Option<KvmMemorySlot>, 117 gfn: u64, 118 nr_pages: Option<&mut u64>, 119 write: bool, 120 ) -> Result<u64, SystemError> { 121 kdebug!("__gfn_to_hva_many"); 122 if slot.is_none() { 123 return Err(SystemError::KVM_HVA_ERR_BAD); 124 } 125 let slot = slot.unwrap(); 126 if slot.flags & KVM_MEMSLOT_INVALID != 0 || (slot.flags & KVM_MEM_READONLY != 0) && write { 127 return Err(SystemError::KVM_HVA_ERR_BAD); 128 } 129 130 if nr_pages.is_some() { 131 let nr_pages = nr_pages.unwrap(); 132 *nr_pages = slot.npages - (gfn - slot.base_gfn); 133 } 134 return Ok(__gfn_to_hva(slot, gfn)); 135 } 136 137 /* From Linux kernel 138 * Pin guest page in memory and return its pfn. 139 * @addr: host virtual address which maps memory to the guest 140 * @atomic: whether this function can sleep 141 * @async: whether this function need to wait IO complete if the 142 * host page is not in the memory 143 * @write_fault: whether we should get a writable host page 144 * @writable: whether it allows to map a writable host page for !@write_fault 145 * 146 * The function will map a writable host page for these two cases: 147 * 1): @write_fault = true 148 * 2): @write_fault = false && @writable, @writable will tell the caller 149 * whether the mapping is writable. 150 */ 151 // 计算 HVA 对应的 pfn,同时确保该物理页在内存中 152 // host端虚拟地址到物理地址的转换,有两种方式,hva_to_pfn_fast、hva_to_pfn_slow 153 // 正确性待验证 154 fn hva_to_pfn(addr: u64, _atomic: bool, _writable: &mut bool) -> Result<u64, SystemError> { 155 kdebug!("hva_to_pfn"); 156 unsafe { 157 let raw = addr as *const i32; 158 kdebug!("raw={:x}", *raw); 159 } 160 // let hpa = MMArch::virt_2_phys(VirtAddr::new(addr)).unwrap().data() as u64; 161 let hva = VirtAddr::new(addr as usize); 162 let mut mapper = KernelMapper::lock(); 163 let mapper = mapper.as_mut().unwrap(); 164 if let Some((hpa, _)) = mapper.translate(hva) { 165 return Ok(hpa.data() as u64 >> PAGE_SHIFT); 166 } 167 unsafe { 168 mapper.map(hva, PageFlags::mmio_flags()); 169 } 170 let (hpa, _) = mapper.translate(hva).unwrap(); 171 return Ok(hpa.data() as u64 >> PAGE_SHIFT); 172 } 173 174 pub fn __gfn_to_pfn( 175 slot: Option<KvmMemorySlot>, 176 gfn: u64, 177 atomic: bool, 178 write: bool, 179 writable: &mut bool, 180 ) -> Result<u64, SystemError> { 181 kdebug!("__gfn_to_pfn"); 182 let mut nr_pages = 0; 183 let addr = __gfn_to_hva_many(slot, gfn, Some(&mut nr_pages), write)?; 184 let pfn = hva_to_pfn(addr, atomic, writable)?; 185 kdebug!("hva={}, pfn={}", addr, pfn); 186 return Ok(pfn); 187 } 188 189 pub fn kvm_vcpu_gfn_to_memslot(vcpu: &mut dyn Vcpu, gfn: u64) -> Option<KvmMemorySlot> { 190 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); 191 } 192