1 use super::vmcs::{
2 VMCSRegion, VmcsFields, VmxEntryCtrl, VmxPrimaryExitCtrl, VmxPrimaryProcessBasedExecuteCtrl,
3 VmxSecondaryProcessBasedExecuteCtrl,
4 };
5 use super::vmx_asm_wrapper::{vmx_vmclear, vmx_vmptrld, vmx_vmread, vmx_vmwrite, vmxoff, vmxon};
6 use crate::arch::kvm::vmx::mmu::KvmMmu;
7 use crate::arch::kvm::vmx::seg::{seg_setup, Sreg};
8 use crate::arch::kvm::vmx::{VcpuRegIndex, X86_CR0};
9 use crate::arch::mm::{LockedFrameAllocator, PageMapper};
10 use crate::arch::x86_64::mm::X86_64MMArch;
11 use crate::arch::MMArch;
12
13 use crate::mm::{MemoryManagementArch, PageTableKind};
14 use crate::mm::{PhysAddr, VirtAddr};
15 use crate::virt::kvm::vcpu::Vcpu;
16 use crate::virt::kvm::vm::Vm;
17 use alloc::alloc::Global;
18 use alloc::boxed::Box;
19 use core::slice;
20 use log::debug;
21 use raw_cpuid::CpuId;
22 use system_error::SystemError;
23 use x86;
24 use x86::{controlregs, msr, segmentation};
25 // use crate::arch::kvm::vmx::seg::RMODE_TSS_SIZE;
26 // use crate::virt::kvm::{KVM};
27
28 // KERNEL_ALLOCATOR
29 pub const PAGE_SIZE: usize = 0x1000;
30 pub const NR_VCPU_REGS: usize = 16;
31
32 #[repr(C, align(4096))]
33 #[derive(Debug)]
34 pub struct VmxonRegion {
35 pub revision_id: u32,
36 pub data: [u8; PAGE_SIZE - 4],
37 }
38
39 #[repr(C, align(4096))]
40 #[derive(Debug)]
41 pub struct MSRBitmap {
42 pub data: [u8; PAGE_SIZE],
43 }
44
45 #[allow(dead_code)]
46 #[derive(Debug)]
47 pub struct VcpuData {
48 /// The virtual and physical address of the Vmxon naturally aligned 4-KByte region of memory
49 pub vmxon_region: Box<VmxonRegion>,
50 pub vmxon_region_physical_address: u64, // vmxon需要该地址
51 /// The virtual and physical address of the Vmcs naturally aligned 4-KByte region of memory
52 /// holds the complete CPU state of both the host and the guest.
53 /// includes the segment registers, GDT, IDT, TR, various MSR’s
54 /// and control field structures for handling exit and entry operations
55 pub vmcs_region: Box<VMCSRegion>,
56 pub vmcs_region_physical_address: u64, // vmptrld, vmclear需要该地址
57 pub msr_bitmap: Box<MSRBitmap>,
58 pub msr_bitmap_physical_address: u64,
59 }
60
61 #[derive(Default, Debug)]
62 #[repr(C)]
63 pub struct VcpuContextFrame {
64 pub regs: [usize; NR_VCPU_REGS], // 通用寄存器
65 pub rip: usize,
66 pub rflags: usize,
67 }
68
69 #[derive(Debug)]
70 #[allow(dead_code)]
71 pub enum VcpuState {
72 Inv = 0,
73 Pend = 1,
74 Act = 2,
75 }
76
77 #[allow(dead_code)]
78 #[derive(Debug)]
79 pub struct VmxVcpu {
80 pub vcpu_id: u32,
81 pub vcpu_ctx: VcpuContextFrame, // 保存vcpu切换时的上下文,如通用寄存器等
82 pub vcpu_state: VcpuState, // vcpu当前运行状态
83 pub mmu: KvmMmu, // vcpu的内存管理单元
84 pub data: VcpuData, // vcpu的数据
85 pub parent_vm: Vm, // parent KVM
86 }
87
88 impl VcpuData {
alloc() -> Result<Self, SystemError>89 pub fn alloc() -> Result<Self, SystemError> {
90 let vmxon_region: Box<VmxonRegion> = unsafe {
91 Box::try_new_zeroed_in(Global)
92 .expect("Try new zeroed fail!")
93 .assume_init()
94 };
95 let vmcs_region: Box<VMCSRegion> = unsafe {
96 Box::try_new_zeroed_in(Global)
97 .expect("Try new zeroed fail!")
98 .assume_init()
99 };
100 let msr_bitmap: Box<MSRBitmap> = unsafe {
101 Box::try_new_zeroed_in(Global)
102 .expect("Try new zeroed fail!")
103 .assume_init()
104 };
105 // FIXME: virt_2_phys的转换正确性存疑
106 let vmxon_region_physical_address = {
107 let vaddr = VirtAddr::new(vmxon_region.as_ref() as *const _ as _);
108 unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
109 };
110 let vmcs_region_physical_address = {
111 let vaddr = VirtAddr::new(vmcs_region.as_ref() as *const _ as _);
112 unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
113 };
114 let msr_bitmap_physical_address = {
115 let vaddr = VirtAddr::new(msr_bitmap.as_ref() as *const _ as _);
116 unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
117 };
118
119 let mut instance = Self {
120 // Allocate a naturally aligned 4-KByte VMXON region of memory to enable VMX operation (Intel Manual: 25.11.5 VMXON Region)
121 vmxon_region,
122 vmxon_region_physical_address,
123 // Allocate a naturally aligned 4-KByte VMCS region of memory
124 vmcs_region,
125 vmcs_region_physical_address,
126 msr_bitmap,
127 msr_bitmap_physical_address,
128 };
129 // printk_color!(GREEN, BLACK, "[+] init_region\n");
130 instance.init_region()?;
131 Ok(instance)
132 }
133
init_region(&mut self) -> Result<(), SystemError>134 pub fn init_region(&mut self) -> Result<(), SystemError> {
135 // Get the Virtual Machine Control Structure revision identifier (VMCS revision ID)
136 // (Intel Manual: 25.11.5 VMXON Region)
137 let revision_id = unsafe { (msr::rdmsr(msr::IA32_VMX_BASIC) as u32) & 0x7FFF_FFFF };
138 debug!("[+] VMXON Region Virtual Address: {:p}", self.vmxon_region);
139 debug!(
140 "[+] VMXON Region Physical Addresss: 0x{:x}",
141 self.vmxon_region_physical_address
142 );
143 debug!("[+] VMCS Region Virtual Address: {:p}", self.vmcs_region);
144 debug!(
145 "[+] VMCS Region Physical Address1: 0x{:x}",
146 self.vmcs_region_physical_address
147 );
148 self.vmxon_region.revision_id = revision_id;
149 self.vmcs_region.revision_id = revision_id;
150 return Ok(());
151 }
152 }
153
154 impl VmxVcpu {
new(vcpu_id: u32, parent_vm: Vm) -> Result<Self, SystemError>155 pub fn new(vcpu_id: u32, parent_vm: Vm) -> Result<Self, SystemError> {
156 debug!("Creating processor {}", vcpu_id);
157 let instance = Self {
158 vcpu_id,
159 vcpu_ctx: VcpuContextFrame {
160 regs: [0; NR_VCPU_REGS],
161 rip: 0,
162 rflags: 0,
163 },
164 vcpu_state: VcpuState::Inv,
165 mmu: KvmMmu::default(),
166 data: VcpuData::alloc()?,
167 parent_vm,
168 };
169 Ok(instance)
170 }
171
vmx_set_cr0(cr0: X86_CR0) -> Result<(), SystemError>172 pub fn vmx_set_cr0(cr0: X86_CR0) -> Result<(), SystemError> {
173 let mut hw_cr0 = cr0 & !(X86_CR0::CR0_NW | X86_CR0::CR0_CD);
174 hw_cr0 |= X86_CR0::CR0_WP | X86_CR0::CR0_NE;
175
176 vmx_vmwrite(VmcsFields::GUEST_CR0 as u32, cr0.bits() as u64)?;
177 Ok(())
178 }
179
vmcs_init_guest(&self) -> Result<(), SystemError>180 pub fn vmcs_init_guest(&self) -> Result<(), SystemError> {
181 // https://www.sandpile.org/x86/initial.htm
182 // segment field initialization
183 seg_setup(Sreg::CS as usize)?;
184 vmx_vmwrite(VmcsFields::GUEST_CS_SELECTOR as u32, 0xf000)?;
185 vmx_vmwrite(VmcsFields::GUEST_CS_BASE as u32, 0xffff0000)?;
186
187 seg_setup(Sreg::DS as usize)?;
188 seg_setup(Sreg::ES as usize)?;
189 seg_setup(Sreg::FS as usize)?;
190 seg_setup(Sreg::GS as usize)?;
191 seg_setup(Sreg::SS as usize)?;
192
193 vmx_vmwrite(VmcsFields::GUEST_TR_SELECTOR as u32, 0)?;
194 vmx_vmwrite(VmcsFields::GUEST_TR_BASE as u32, 0)?;
195 vmx_vmwrite(VmcsFields::GUEST_TR_LIMIT as u32, 0xffff)?;
196 vmx_vmwrite(VmcsFields::GUEST_TR_ACCESS_RIGHTS as u32, 0x008b)?;
197
198 vmx_vmwrite(VmcsFields::GUEST_LDTR_SELECTOR as u32, 0)?;
199 vmx_vmwrite(VmcsFields::GUEST_LDTR_BASE as u32, 0)?;
200 vmx_vmwrite(VmcsFields::GUEST_LDTR_LIMIT as u32, 0xffff)?;
201 vmx_vmwrite(VmcsFields::GUEST_LDTR_ACCESS_RIGHTS as u32, 0x00082)?;
202
203 vmx_vmwrite(VmcsFields::GUEST_RFLAGS as u32, 2)?;
204
205 vmx_vmwrite(VmcsFields::GUEST_GDTR_BASE as u32, 0)?;
206 vmx_vmwrite(VmcsFields::GUEST_GDTR_LIMIT as u32, 0x0000_FFFF_u64)?;
207
208 vmx_vmwrite(VmcsFields::GUEST_IDTR_BASE as u32, 0)?;
209 vmx_vmwrite(VmcsFields::GUEST_IDTR_LIMIT as u32, 0x0000_FFFF_u64)?;
210
211 vmx_vmwrite(VmcsFields::GUEST_ACTIVITY_STATE as u32, 0)?; // State = Active
212 vmx_vmwrite(VmcsFields::GUEST_INTERRUPTIBILITY_STATE as u32, 0)?;
213 vmx_vmwrite(VmcsFields::GUEST_PENDING_DBG_EXCEPTIONS as u32, 0)?;
214
215 vmx_vmwrite(VmcsFields::CTRL_VM_ENTRY_INTR_INFO_FIELD as u32, 0)?;
216
217 let cr0 = X86_CR0::CR0_NW | X86_CR0::CR0_CD | X86_CR0::CR0_ET;
218 Self::vmx_set_cr0(cr0)?;
219
220 vmx_vmwrite(VmcsFields::GUEST_CR0 as u32, cr0.bits() as u64)?;
221
222 vmx_vmwrite(
223 VmcsFields::GUEST_SYSENTER_CS as u32,
224 vmx_vmread(VmcsFields::HOST_SYSENTER_CS as u32).unwrap(),
225 )?;
226 vmx_vmwrite(VmcsFields::GUEST_VMX_PREEMPT_TIMER_VALUE as u32, 0)?;
227
228 vmx_vmwrite(VmcsFields::GUEST_INTR_STATUS as u32, 0)?;
229 vmx_vmwrite(VmcsFields::GUEST_PML_INDEX as u32, 0)?;
230
231 vmx_vmwrite(VmcsFields::GUEST_VMCS_LINK_PTR as u32, u64::MAX)?;
232 vmx_vmwrite(VmcsFields::GUEST_DEBUGCTL as u32, unsafe {
233 msr::rdmsr(msr::IA32_DEBUGCTL)
234 })?;
235
236 vmx_vmwrite(
237 VmcsFields::GUEST_SYSENTER_ESP as u32,
238 vmx_vmread(VmcsFields::HOST_SYSENTER_ESP as u32).unwrap(),
239 )?;
240 vmx_vmwrite(
241 VmcsFields::GUEST_SYSENTER_EIP as u32,
242 vmx_vmread(VmcsFields::HOST_SYSENTER_EIP as u32).unwrap(),
243 )?;
244
245 // Self::vmx_set_cr0();
246 vmx_vmwrite(VmcsFields::GUEST_CR3 as u32, 0)?;
247 vmx_vmwrite(
248 VmcsFields::GUEST_CR4 as u32,
249 1, // enable vme
250 )?;
251 vmx_vmwrite(VmcsFields::GUEST_DR7 as u32, 0x0000_0000_0000_0400)?;
252 vmx_vmwrite(
253 VmcsFields::GUEST_RSP as u32,
254 self.vcpu_ctx.regs[VcpuRegIndex::Rsp as usize] as u64,
255 )?;
256 vmx_vmwrite(VmcsFields::GUEST_RIP as u32, self.vcpu_ctx.rip as u64)?;
257 debug!("vmcs init guest rip: {:#x}", self.vcpu_ctx.rip as u64);
258 debug!(
259 "vmcs init guest rsp: {:#x}",
260 self.vcpu_ctx.regs[VcpuRegIndex::Rsp as usize] as u64
261 );
262
263 // vmx_vmwrite(VmcsFields::GUEST_RFLAGS as u32, x86::bits64::rflags::read().bits())?;
264 Ok(())
265 }
266
267 #[allow(deprecated)]
vmcs_init_host(&self) -> Result<(), SystemError>268 pub fn vmcs_init_host(&self) -> Result<(), SystemError> {
269 vmx_vmwrite(VmcsFields::HOST_CR0 as u32, unsafe {
270 controlregs::cr0().bits().try_into().unwrap()
271 })?;
272 vmx_vmwrite(VmcsFields::HOST_CR3 as u32, unsafe { controlregs::cr3() })?;
273 vmx_vmwrite(VmcsFields::HOST_CR4 as u32, unsafe {
274 controlregs::cr4().bits().try_into().unwrap()
275 })?;
276 vmx_vmwrite(
277 VmcsFields::HOST_ES_SELECTOR as u32,
278 (segmentation::es().bits() & (!0x07)).into(),
279 )?;
280 vmx_vmwrite(
281 VmcsFields::HOST_CS_SELECTOR as u32,
282 (segmentation::cs().bits() & (!0x07)).into(),
283 )?;
284 vmx_vmwrite(
285 VmcsFields::HOST_SS_SELECTOR as u32,
286 (segmentation::ss().bits() & (!0x07)).into(),
287 )?;
288 vmx_vmwrite(
289 VmcsFields::HOST_DS_SELECTOR as u32,
290 (segmentation::ds().bits() & (!0x07)).into(),
291 )?;
292 vmx_vmwrite(
293 VmcsFields::HOST_FS_SELECTOR as u32,
294 (segmentation::fs().bits() & (!0x07)).into(),
295 )?;
296 vmx_vmwrite(
297 VmcsFields::HOST_GS_SELECTOR as u32,
298 (segmentation::gs().bits() & (!0x07)).into(),
299 )?;
300 vmx_vmwrite(VmcsFields::HOST_TR_SELECTOR as u32, unsafe {
301 (x86::task::tr().bits() & (!0x07)).into()
302 })?;
303 vmx_vmwrite(VmcsFields::HOST_FS_BASE as u32, unsafe {
304 msr::rdmsr(msr::IA32_FS_BASE)
305 })?;
306 vmx_vmwrite(VmcsFields::HOST_GS_BASE as u32, unsafe {
307 msr::rdmsr(msr::IA32_GS_BASE)
308 })?;
309
310 let mut pseudo_descriptpr: x86::dtables::DescriptorTablePointer<u64> = Default::default();
311 unsafe {
312 x86::dtables::sgdt(&mut pseudo_descriptpr);
313 };
314
315 vmx_vmwrite(
316 VmcsFields::HOST_TR_BASE as u32,
317 get_segment_base(pseudo_descriptpr.base, pseudo_descriptpr.limit, unsafe {
318 x86::task::tr().bits()
319 }),
320 )?;
321 vmx_vmwrite(
322 VmcsFields::HOST_GDTR_BASE as u32,
323 pseudo_descriptpr.base as usize as u64,
324 )?;
325 vmx_vmwrite(VmcsFields::HOST_IDTR_BASE as u32, unsafe {
326 let mut pseudo_descriptpr: x86::dtables::DescriptorTablePointer<u64> =
327 Default::default();
328 x86::dtables::sidt(&mut pseudo_descriptpr);
329 pseudo_descriptpr.base as usize as u64
330 })?;
331
332 // fast entry into the kernel
333 vmx_vmwrite(VmcsFields::HOST_SYSENTER_ESP as u32, unsafe {
334 msr::rdmsr(msr::IA32_SYSENTER_ESP)
335 })?;
336 vmx_vmwrite(VmcsFields::HOST_SYSENTER_EIP as u32, unsafe {
337 msr::rdmsr(msr::IA32_SYSENTER_EIP)
338 })?;
339 vmx_vmwrite(VmcsFields::HOST_SYSENTER_CS as u32, unsafe {
340 msr::rdmsr(msr::IA32_SYSENTER_CS)
341 })?;
342
343 // vmx_vmwrite(VmcsFields::HOST_RIP as u32, vmx_return as *const () as u64)?;
344 // debug!("vmcs init host rip: {:#x}", vmx_return as *const () as u64);
345
346 Ok(())
347 }
348
349 // Intel SDM Volume 3C Chapter 25.3 “Organization of VMCS Data”
vmcs_init(&self) -> Result<(), SystemError>350 pub fn vmcs_init(&self) -> Result<(), SystemError> {
351 vmx_vmwrite(VmcsFields::CTRL_PAGE_FAULT_ERR_CODE_MASK as u32, 0)?;
352 vmx_vmwrite(VmcsFields::CTRL_PAGE_FAULT_ERR_CODE_MATCH as u32, 0)?;
353 vmx_vmwrite(VmcsFields::CTRL_CR3_TARGET_COUNT as u32, 0)?;
354
355 vmx_vmwrite(
356 VmcsFields::CTRL_PIN_BASED_VM_EXEC_CTRLS as u32,
357 adjust_vmx_pinbased_controls() as u64,
358 )?;
359
360 vmx_vmwrite(
361 VmcsFields::CTRL_MSR_BITMAP_ADDR as u32,
362 self.data.msr_bitmap_physical_address,
363 )?;
364
365 vmx_vmwrite(VmcsFields::CTRL_CR0_READ_SHADOW as u32, unsafe {
366 controlregs::cr0().bits().try_into().unwrap()
367 })?;
368 vmx_vmwrite(VmcsFields::CTRL_CR4_READ_SHADOW as u32, unsafe {
369 controlregs::cr4().bits().try_into().unwrap()
370 })?;
371 vmx_vmwrite(
372 VmcsFields::CTRL_VM_ENTRY_CTRLS as u32,
373 adjust_vmx_entry_controls() as u64,
374 )?;
375 vmx_vmwrite(
376 VmcsFields::CTRL_PRIMARY_VM_EXIT_CTRLS as u32,
377 adjust_vmx_exit_controls() as u64,
378 )?;
379 vmx_vmwrite(
380 VmcsFields::CTRL_PRIMARY_PROCESSOR_VM_EXEC_CTRLS as u32,
381 adjust_vmx_primary_process_exec_controls() as u64,
382 )?;
383 vmx_vmwrite(
384 VmcsFields::CTRL_SECONDARY_PROCESSOR_VM_EXEC_CTRLS as u32,
385 adjust_vmx_secondary_process_exec_controls() as u64,
386 )?;
387
388 self.vmcs_init_host()?;
389 self.vmcs_init_guest()?;
390 Ok(())
391 }
392
kvm_mmu_load(&mut self) -> Result<(), SystemError>393 fn kvm_mmu_load(&mut self) -> Result<(), SystemError> {
394 debug!("kvm_mmu_load!");
395 // 申请并创建新的页表
396 let mapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
397 PageMapper::create(PageTableKind::EPT, LockedFrameAllocator)
398 .ok_or(SystemError::ENOMEM)?
399 };
400
401 let ept_root_hpa = mapper.table().phys();
402 let set_eptp_fn = self.mmu.set_eptp.unwrap();
403 set_eptp_fn(ept_root_hpa.data() as u64)?;
404 self.mmu.root_hpa = ept_root_hpa.data() as u64;
405 debug!("ept_root_hpa:{:x}!", ept_root_hpa.data() as u64);
406
407 return Ok(());
408 }
409
set_regs(&mut self, regs: VcpuContextFrame) -> Result<(), SystemError>410 pub fn set_regs(&mut self, regs: VcpuContextFrame) -> Result<(), SystemError> {
411 self.vcpu_ctx = regs;
412 Ok(())
413 }
414 }
415
416 impl Vcpu for VmxVcpu {
417 /// Virtualize the CPU
virtualize_cpu(&mut self) -> Result<(), SystemError>418 fn virtualize_cpu(&mut self) -> Result<(), SystemError> {
419 match has_intel_vmx_support() {
420 Ok(_) => {
421 debug!("[+] CPU supports Intel VMX");
422 }
423 Err(e) => {
424 debug!("[-] CPU does not support Intel VMX: {:?}", e);
425 return Err(SystemError::ENOSYS);
426 }
427 };
428
429 match enable_vmx_operation() {
430 Ok(_) => {
431 debug!("[+] Enabling Virtual Machine Extensions (VMX)");
432 }
433 Err(_) => {
434 debug!("[-] VMX operation is not supported on this processor.");
435 return Err(SystemError::ENOSYS);
436 }
437 }
438
439 vmxon(self.data.vmxon_region_physical_address)?;
440 debug!("[+] VMXON successful!");
441 vmx_vmclear(self.data.vmcs_region_physical_address)?;
442 vmx_vmptrld(self.data.vmcs_region_physical_address)?;
443 debug!("[+] VMPTRLD successful!");
444 self.vmcs_init().expect("vncs_init fail");
445 debug!("[+] VMCS init!");
446 // debug!("vmcs init host rip: {:#x}", vmx_return as *const () as u64);
447 // debug!("vmcs init host rsp: {:#x}", x86::bits64::registers::rsp());
448 // vmx_vmwrite(VmcsFields::HOST_RSP as u32, x86::bits64::registers::rsp())?;
449 // vmx_vmwrite(VmcsFields::HOST_RIP as u32, vmx_return as *const () as u64)?;
450 // vmx_vmwrite(VmcsFields::HOST_RSP as u32, x86::bits64::registers::rsp())?;
451 self.kvm_mmu_load()?;
452 Ok(())
453 }
454
devirtualize_cpu(&self) -> Result<(), SystemError>455 fn devirtualize_cpu(&self) -> Result<(), SystemError> {
456 vmxoff()?;
457 Ok(())
458 }
459
460 /// Gets the index of the current logical/virtual processor
id(&self) -> u32461 fn id(&self) -> u32 {
462 self.vcpu_id
463 }
464 }
465
get_segment_base(gdt_base: *const u64, gdt_size: u16, segment_selector: u16) -> u64466 pub fn get_segment_base(gdt_base: *const u64, gdt_size: u16, segment_selector: u16) -> u64 {
467 let table = segment_selector & 0x0004; // get table indicator in selector
468 let index = (segment_selector >> 3) as usize; // get index in selector
469 if table == 0 && index == 0 {
470 return 0;
471 }
472 let descriptor_table = unsafe { slice::from_raw_parts(gdt_base, gdt_size.into()) };
473 let descriptor = descriptor_table[index];
474
475 let base_high = (descriptor & 0xFF00_0000_0000_0000) >> 32;
476 let base_mid = (descriptor & 0x0000_00FF_0000_0000) >> 16;
477 let base_low = (descriptor & 0x0000_0000_FFFF_0000) >> 16;
478 let segment_base = (base_high | base_mid | base_low) & 0xFFFFFFFF;
479 let virtaddr = unsafe { MMArch::phys_2_virt(PhysAddr::new(segment_base as usize)).unwrap() };
480
481 return virtaddr.data() as u64;
482 }
483
484 // FIXME: may have bug
485 // pub fn read_segment_access_rights(segement_selector: u16) -> u32{
486 // let table = segement_selector & 0x0004; // get table indicator in selector
487 // let index = segement_selector & 0xFFF8; // get index in selector
488 // let mut flag: u16;
489 // if table==0 && index==0 {
490 // return 0;
491 // }
492 // unsafe{
493 // asm!(
494 // "lar {0:r}, rcx",
495 // "mov {1:r}, {0:r}",
496 // in(reg) segement_selector,
497 // out(reg) flag,
498 // );
499 // }
500 // return (flag >> 8) as u32;
501 // }
adjust_vmx_controls(ctl_min: u32, ctl_opt: u32, msr: u32, result: &mut u32)502 pub fn adjust_vmx_controls(ctl_min: u32, ctl_opt: u32, msr: u32, result: &mut u32) {
503 let vmx_msr_low: u32 = unsafe { (msr::rdmsr(msr) & 0x0000_0000_FFFF_FFFF) as u32 };
504 let vmx_msr_high: u32 = unsafe { (msr::rdmsr(msr) << 32) as u32 };
505 let mut ctl: u32 = ctl_min | ctl_opt;
506 ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
507 ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
508 *result = ctl;
509 }
510
adjust_vmx_entry_controls() -> u32511 pub fn adjust_vmx_entry_controls() -> u32 {
512 let mut entry_controls: u32 = 0;
513 adjust_vmx_controls(
514 VmxEntryCtrl::LOAD_DBG_CTRLS.bits(),
515 VmxEntryCtrl::IA32E_MODE_GUEST.bits(),
516 msr::IA32_VMX_ENTRY_CTLS, //Capability Reporting Register of VM-entry Controls (R/O)
517 &mut entry_controls,
518 );
519 return entry_controls;
520 // msr::IA32_VMX_TRUE_ENTRY_CTLS//Capability Reporting Register of VM-entry Flex Controls (R/O) See Table 35-2
521 }
522
adjust_vmx_exit_controls() -> u32523 pub fn adjust_vmx_exit_controls() -> u32 {
524 let mut exit_controls: u32 = 0;
525 adjust_vmx_controls(
526 VmxPrimaryExitCtrl::SAVE_DBG_CTRLS.bits(),
527 VmxPrimaryExitCtrl::HOST_ADDR_SPACE_SIZE.bits(),
528 msr::IA32_VMX_EXIT_CTLS,
529 &mut exit_controls,
530 );
531 return exit_controls;
532 }
533
adjust_vmx_pinbased_controls() -> u32534 pub fn adjust_vmx_pinbased_controls() -> u32 {
535 let mut controls: u32 = 16;
536 adjust_vmx_controls(0, 0, msr::IA32_VMX_TRUE_PINBASED_CTLS, &mut controls);
537 // debug!("adjust_vmx_pinbased_controls: {:x}", controls);
538 return controls;
539 }
540
adjust_vmx_primary_process_exec_controls() -> u32541 pub fn adjust_vmx_primary_process_exec_controls() -> u32 {
542 let mut controls: u32 = 0;
543 adjust_vmx_controls(
544 0,
545 VmxPrimaryProcessBasedExecuteCtrl::USE_MSR_BITMAPS.bits()
546 | VmxPrimaryProcessBasedExecuteCtrl::ACTIVATE_SECONDARY_CONTROLS.bits(),
547 msr::IA32_VMX_PROCBASED_CTLS,
548 &mut controls,
549 );
550 return controls;
551 }
552
adjust_vmx_secondary_process_exec_controls() -> u32553 pub fn adjust_vmx_secondary_process_exec_controls() -> u32 {
554 let mut controls: u32 = 0;
555 adjust_vmx_controls(
556 0,
557 VmxSecondaryProcessBasedExecuteCtrl::ENABLE_RDTSCP.bits()
558 | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_XSAVES_XRSTORS.bits()
559 | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_INVPCID.bits()
560 | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_EPT.bits()
561 | VmxSecondaryProcessBasedExecuteCtrl::UNRESTRICTED_GUEST.bits(),
562 msr::IA32_VMX_PROCBASED_CTLS2,
563 &mut controls,
564 );
565 return controls;
566 }
567
568 /// Check to see if CPU is Intel (“GenuineIntel”).
569 /// Check processor supports for Virtual Machine Extension (VMX) technology
570 // CPUID.1:ECX.VMX[bit 5] = 1 (Intel Manual: 24.6 Discovering Support for VMX)
has_intel_vmx_support() -> Result<(), SystemError>571 pub fn has_intel_vmx_support() -> Result<(), SystemError> {
572 let cpuid = CpuId::new();
573 if let Some(vi) = cpuid.get_vendor_info() {
574 if vi.as_str() != "GenuineIntel" {
575 return Err(SystemError::ENOSYS);
576 }
577 }
578 if let Some(fi) = cpuid.get_feature_info() {
579 if !fi.has_vmx() {
580 return Err(SystemError::ENOSYS);
581 }
582 }
583 Ok(())
584 }
585
586 /// Enables Virtual Machine Extensions
587 // - CR4.VMXE[bit 13] = 1 (Intel Manual: 24.7 Enabling and Entering VMX Operation)
enable_vmx_operation() -> Result<(), SystemError>588 pub fn enable_vmx_operation() -> Result<(), SystemError> {
589 let mut cr4 = unsafe { controlregs::cr4() };
590 cr4.set(controlregs::Cr4::CR4_ENABLE_VMX, true);
591 unsafe { controlregs::cr4_write(cr4) };
592
593 set_lock_bit()?;
594 debug!("[+] Lock bit set via IA32_FEATURE_CONTROL");
595 set_cr0_bits();
596 debug!("[+] Mandatory bits in CR0 set/cleared");
597 set_cr4_bits();
598 debug!("[+] Mandatory bits in CR4 set/cleared");
599
600 Ok(())
601 }
602
603 /// Check if we need to set bits in IA32_FEATURE_CONTROL
604 // (Intel Manual: 24.7 Enabling and Entering VMX Operation)
set_lock_bit() -> Result<(), SystemError>605 fn set_lock_bit() -> Result<(), SystemError> {
606 const VMX_LOCK_BIT: u64 = 1 << 0;
607 const VMXON_OUTSIDE_SMX: u64 = 1 << 2;
608
609 let ia32_feature_control = unsafe { msr::rdmsr(msr::IA32_FEATURE_CONTROL) };
610
611 if (ia32_feature_control & VMX_LOCK_BIT) == 0 {
612 unsafe {
613 msr::wrmsr(
614 msr::IA32_FEATURE_CONTROL,
615 VMXON_OUTSIDE_SMX | VMX_LOCK_BIT | ia32_feature_control,
616 )
617 };
618 } else if (ia32_feature_control & VMXON_OUTSIDE_SMX) == 0 {
619 return Err(SystemError::EPERM);
620 }
621
622 Ok(())
623 }
624
625 /// Set the mandatory bits in CR0 and clear bits that are mandatory zero
626 /// (Intel Manual: 24.8 Restrictions on VMX Operation)
set_cr0_bits()627 fn set_cr0_bits() {
628 let ia32_vmx_cr0_fixed0 = unsafe { msr::rdmsr(msr::IA32_VMX_CR0_FIXED0) };
629 let ia32_vmx_cr0_fixed1 = unsafe { msr::rdmsr(msr::IA32_VMX_CR0_FIXED1) };
630
631 let mut cr0 = unsafe { controlregs::cr0() };
632
633 cr0 |= controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed0 as usize);
634 cr0 &= controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed1 as usize);
635
636 unsafe { controlregs::cr0_write(cr0) };
637 }
638
639 /// Set the mandatory bits in CR4 and clear bits that are mandatory zero
640 /// (Intel Manual: 24.8 Restrictions on VMX Operation)
set_cr4_bits()641 fn set_cr4_bits() {
642 let ia32_vmx_cr4_fixed0 = unsafe { msr::rdmsr(msr::IA32_VMX_CR4_FIXED0) };
643 let ia32_vmx_cr4_fixed1 = unsafe { msr::rdmsr(msr::IA32_VMX_CR4_FIXED1) };
644
645 let mut cr4 = unsafe { controlregs::cr4() };
646
647 cr4 |= controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed0 as usize);
648 cr4 &= controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed1 as usize);
649
650 unsafe { controlregs::cr4_write(cr4) };
651 }
652