xref: /DragonOS/kernel/src/arch/x86_64/kvm/vmx/vcpu.rs (revision 1ea2daad8121b77ed704e6d7c3a09f478147441d)
1 use super::vmcs::{
2     VMCSRegion, VmcsFields, VmxEntryCtrl, VmxPrimaryExitCtrl, VmxPrimaryProcessBasedExecuteCtrl,
3     VmxSecondaryProcessBasedExecuteCtrl,
4 };
5 use super::vmx_asm_wrapper::{vmx_vmclear, vmx_vmptrld, vmx_vmread, vmx_vmwrite, vmxoff, vmxon};
6 use crate::arch::kvm::vmx::mmu::KvmMmu;
7 use crate::arch::kvm::vmx::seg::{seg_setup, Sreg};
8 use crate::arch::kvm::vmx::{VcpuRegIndex, X86_CR0};
9 use crate::arch::mm::{LockedFrameAllocator, PageMapper};
10 use crate::arch::x86_64::mm::X86_64MMArch;
11 use crate::arch::MMArch;
12 
13 use crate::mm::{phys_2_virt, VirtAddr};
14 use crate::mm::{MemoryManagementArch, PageTableKind};
15 use crate::virt::kvm::vcpu::Vcpu;
16 use crate::virt::kvm::vm::Vm;
17 use alloc::alloc::Global;
18 use alloc::boxed::Box;
19 use core::slice;
20 use log::debug;
21 use raw_cpuid::CpuId;
22 use system_error::SystemError;
23 use x86;
24 use x86::{controlregs, msr, segmentation};
25 // use crate::arch::kvm::vmx::seg::RMODE_TSS_SIZE;
26 // use crate::virt::kvm::{KVM};
27 
28 // KERNEL_ALLOCATOR
29 pub const PAGE_SIZE: usize = 0x1000;
30 pub const NR_VCPU_REGS: usize = 16;
31 
32 #[repr(C, align(4096))]
33 #[derive(Debug)]
34 pub struct VmxonRegion {
35     pub revision_id: u32,
36     pub data: [u8; PAGE_SIZE - 4],
37 }
38 
39 #[repr(C, align(4096))]
40 #[derive(Debug)]
41 pub struct MSRBitmap {
42     pub data: [u8; PAGE_SIZE],
43 }
44 
45 #[derive(Debug)]
46 pub struct VcpuData {
47     /// The virtual and physical address of the Vmxon naturally aligned 4-KByte region of memory
48     pub vmxon_region: Box<VmxonRegion>,
49     pub vmxon_region_physical_address: u64, // vmxon需要该地址
50     /// The virtual and physical address of the Vmcs naturally aligned 4-KByte region of memory
51     /// holds the complete CPU state of both the host and the guest.
52     /// includes the segment registers, GDT, IDT, TR, various MSR’s
53     /// and control field structures for handling exit and entry operations
54     pub vmcs_region: Box<VMCSRegion>,
55     pub vmcs_region_physical_address: u64, // vmptrld, vmclear需要该地址
56     pub msr_bitmap: Box<MSRBitmap>,
57     pub msr_bitmap_physical_address: u64,
58 }
59 
60 #[derive(Default, Debug)]
61 #[repr(C)]
62 pub struct VcpuContextFrame {
63     pub regs: [usize; NR_VCPU_REGS], // 通用寄存器
64     pub rip: usize,
65     pub rflags: usize,
66 }
67 
68 #[derive(Debug)]
69 #[allow(dead_code)]
70 pub enum VcpuState {
71     Inv = 0,
72     Pend = 1,
73     Act = 2,
74 }
75 
76 #[derive(Debug)]
77 pub struct VmxVcpu {
78     pub vcpu_id: u32,
79     pub vcpu_ctx: VcpuContextFrame, // 保存vcpu切换时的上下文,如通用寄存器等
80     pub vcpu_state: VcpuState,      // vcpu当前运行状态
81     pub mmu: KvmMmu,                // vcpu的内存管理单元
82     pub data: VcpuData,             // vcpu的数据
83     pub parent_vm: Vm,              // parent KVM
84 }
85 
86 impl VcpuData {
87     pub fn alloc() -> Result<Self, SystemError> {
88         let vmxon_region: Box<VmxonRegion> = unsafe {
89             Box::try_new_zeroed_in(Global)
90                 .expect("Try new zeroed fail!")
91                 .assume_init()
92         };
93         let vmcs_region: Box<VMCSRegion> = unsafe {
94             Box::try_new_zeroed_in(Global)
95                 .expect("Try new zeroed fail!")
96                 .assume_init()
97         };
98         let msr_bitmap: Box<MSRBitmap> = unsafe {
99             Box::try_new_zeroed_in(Global)
100                 .expect("Try new zeroed fail!")
101                 .assume_init()
102         };
103         // FIXME: virt_2_phys的转换正确性存疑
104         let vmxon_region_physical_address = {
105             let vaddr = VirtAddr::new(vmxon_region.as_ref() as *const _ as _);
106             unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
107         };
108         let vmcs_region_physical_address = {
109             let vaddr = VirtAddr::new(vmcs_region.as_ref() as *const _ as _);
110             unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
111         };
112         let msr_bitmap_physical_address = {
113             let vaddr = VirtAddr::new(msr_bitmap.as_ref() as *const _ as _);
114             unsafe { MMArch::virt_2_phys(vaddr).unwrap().data() as u64 }
115         };
116 
117         let mut instance = Self {
118             // Allocate a naturally aligned 4-KByte VMXON region of memory to enable VMX operation (Intel Manual: 25.11.5 VMXON Region)
119             vmxon_region,
120             vmxon_region_physical_address,
121             // Allocate a naturally aligned 4-KByte VMCS region of memory
122             vmcs_region,
123             vmcs_region_physical_address,
124             msr_bitmap,
125             msr_bitmap_physical_address,
126         };
127         // printk_color!(GREEN, BLACK, "[+] init_region\n");
128         instance.init_region()?;
129         Ok(instance)
130     }
131 
132     pub fn init_region(&mut self) -> Result<(), SystemError> {
133         // Get the Virtual Machine Control Structure revision identifier (VMCS revision ID)
134         // (Intel Manual: 25.11.5 VMXON Region)
135         let revision_id = unsafe { (msr::rdmsr(msr::IA32_VMX_BASIC) as u32) & 0x7FFF_FFFF };
136         debug!("[+] VMXON Region Virtual Address: {:p}", self.vmxon_region);
137         debug!(
138             "[+] VMXON Region Physical Addresss: 0x{:x}",
139             self.vmxon_region_physical_address
140         );
141         debug!("[+] VMCS Region Virtual Address: {:p}", self.vmcs_region);
142         debug!(
143             "[+] VMCS Region Physical Address1: 0x{:x}",
144             self.vmcs_region_physical_address
145         );
146         self.vmxon_region.revision_id = revision_id;
147         self.vmcs_region.revision_id = revision_id;
148         return Ok(());
149     }
150 }
151 
152 impl VmxVcpu {
153     pub fn new(vcpu_id: u32, parent_vm: Vm) -> Result<Self, SystemError> {
154         debug!("Creating processor {}", vcpu_id);
155         let instance = Self {
156             vcpu_id,
157             vcpu_ctx: VcpuContextFrame {
158                 regs: [0; NR_VCPU_REGS],
159                 rip: 0,
160                 rflags: 0,
161             },
162             vcpu_state: VcpuState::Inv,
163             mmu: KvmMmu::default(),
164             data: VcpuData::alloc()?,
165             parent_vm,
166         };
167         Ok(instance)
168     }
169 
170     pub fn vmx_set_cr0(cr0: X86_CR0) -> Result<(), SystemError> {
171         let mut hw_cr0 = cr0 & !(X86_CR0::CR0_NW | X86_CR0::CR0_CD);
172         hw_cr0 |= X86_CR0::CR0_WP | X86_CR0::CR0_NE;
173 
174         vmx_vmwrite(VmcsFields::GUEST_CR0 as u32, cr0.bits() as u64)?;
175         Ok(())
176     }
177 
178     pub fn vmcs_init_guest(&self) -> Result<(), SystemError> {
179         // https://www.sandpile.org/x86/initial.htm
180         // segment field initialization
181         seg_setup(Sreg::CS as usize)?;
182         vmx_vmwrite(VmcsFields::GUEST_CS_SELECTOR as u32, 0xf000)?;
183         vmx_vmwrite(VmcsFields::GUEST_CS_BASE as u32, 0xffff0000)?;
184 
185         seg_setup(Sreg::DS as usize)?;
186         seg_setup(Sreg::ES as usize)?;
187         seg_setup(Sreg::FS as usize)?;
188         seg_setup(Sreg::GS as usize)?;
189         seg_setup(Sreg::SS as usize)?;
190 
191         vmx_vmwrite(VmcsFields::GUEST_TR_SELECTOR as u32, 0)?;
192         vmx_vmwrite(VmcsFields::GUEST_TR_BASE as u32, 0)?;
193         vmx_vmwrite(VmcsFields::GUEST_TR_LIMIT as u32, 0xffff)?;
194         vmx_vmwrite(VmcsFields::GUEST_TR_ACCESS_RIGHTS as u32, 0x008b)?;
195 
196         vmx_vmwrite(VmcsFields::GUEST_LDTR_SELECTOR as u32, 0)?;
197         vmx_vmwrite(VmcsFields::GUEST_LDTR_BASE as u32, 0)?;
198         vmx_vmwrite(VmcsFields::GUEST_LDTR_LIMIT as u32, 0xffff)?;
199         vmx_vmwrite(VmcsFields::GUEST_LDTR_ACCESS_RIGHTS as u32, 0x00082)?;
200 
201         vmx_vmwrite(VmcsFields::GUEST_RFLAGS as u32, 2)?;
202 
203         vmx_vmwrite(VmcsFields::GUEST_GDTR_BASE as u32, 0)?;
204         vmx_vmwrite(VmcsFields::GUEST_GDTR_LIMIT as u32, 0x0000_FFFF_u64)?;
205 
206         vmx_vmwrite(VmcsFields::GUEST_IDTR_BASE as u32, 0)?;
207         vmx_vmwrite(VmcsFields::GUEST_IDTR_LIMIT as u32, 0x0000_FFFF_u64)?;
208 
209         vmx_vmwrite(VmcsFields::GUEST_ACTIVITY_STATE as u32, 0)?; // State = Active
210         vmx_vmwrite(VmcsFields::GUEST_INTERRUPTIBILITY_STATE as u32, 0)?;
211         vmx_vmwrite(VmcsFields::GUEST_PENDING_DBG_EXCEPTIONS as u32, 0)?;
212 
213         vmx_vmwrite(VmcsFields::CTRL_VM_ENTRY_INTR_INFO_FIELD as u32, 0)?;
214 
215         let cr0 = X86_CR0::CR0_NW | X86_CR0::CR0_CD | X86_CR0::CR0_ET;
216         Self::vmx_set_cr0(cr0)?;
217 
218         vmx_vmwrite(VmcsFields::GUEST_CR0 as u32, cr0.bits() as u64)?;
219 
220         vmx_vmwrite(
221             VmcsFields::GUEST_SYSENTER_CS as u32,
222             vmx_vmread(VmcsFields::HOST_SYSENTER_CS as u32).unwrap(),
223         )?;
224         vmx_vmwrite(VmcsFields::GUEST_VMX_PREEMPT_TIMER_VALUE as u32, 0)?;
225 
226         vmx_vmwrite(VmcsFields::GUEST_INTR_STATUS as u32, 0)?;
227         vmx_vmwrite(VmcsFields::GUEST_PML_INDEX as u32, 0)?;
228 
229         vmx_vmwrite(VmcsFields::GUEST_VMCS_LINK_PTR as u32, u64::MAX)?;
230         vmx_vmwrite(VmcsFields::GUEST_DEBUGCTL as u32, unsafe {
231             msr::rdmsr(msr::IA32_DEBUGCTL)
232         })?;
233 
234         vmx_vmwrite(
235             VmcsFields::GUEST_SYSENTER_ESP as u32,
236             vmx_vmread(VmcsFields::HOST_SYSENTER_ESP as u32).unwrap(),
237         )?;
238         vmx_vmwrite(
239             VmcsFields::GUEST_SYSENTER_EIP as u32,
240             vmx_vmread(VmcsFields::HOST_SYSENTER_EIP as u32).unwrap(),
241         )?;
242 
243         // Self::vmx_set_cr0();
244         vmx_vmwrite(VmcsFields::GUEST_CR3 as u32, 0)?;
245         vmx_vmwrite(
246             VmcsFields::GUEST_CR4 as u32,
247             1, // enable vme
248         )?;
249         vmx_vmwrite(VmcsFields::GUEST_DR7 as u32, 0x0000_0000_0000_0400)?;
250         vmx_vmwrite(
251             VmcsFields::GUEST_RSP as u32,
252             self.vcpu_ctx.regs[VcpuRegIndex::Rsp as usize] as u64,
253         )?;
254         vmx_vmwrite(VmcsFields::GUEST_RIP as u32, self.vcpu_ctx.rip as u64)?;
255         debug!("vmcs init guest rip: {:#x}", self.vcpu_ctx.rip as u64);
256         debug!(
257             "vmcs init guest rsp: {:#x}",
258             self.vcpu_ctx.regs[VcpuRegIndex::Rsp as usize] as u64
259         );
260 
261         // vmx_vmwrite(VmcsFields::GUEST_RFLAGS as u32, x86::bits64::rflags::read().bits())?;
262         Ok(())
263     }
264 
265     #[allow(deprecated)]
266     pub fn vmcs_init_host(&self) -> Result<(), SystemError> {
267         vmx_vmwrite(VmcsFields::HOST_CR0 as u32, unsafe {
268             controlregs::cr0().bits().try_into().unwrap()
269         })?;
270         vmx_vmwrite(VmcsFields::HOST_CR3 as u32, unsafe { controlregs::cr3() })?;
271         vmx_vmwrite(VmcsFields::HOST_CR4 as u32, unsafe {
272             controlregs::cr4().bits().try_into().unwrap()
273         })?;
274         vmx_vmwrite(
275             VmcsFields::HOST_ES_SELECTOR as u32,
276             (segmentation::es().bits() & (!0x07)).into(),
277         )?;
278         vmx_vmwrite(
279             VmcsFields::HOST_CS_SELECTOR as u32,
280             (segmentation::cs().bits() & (!0x07)).into(),
281         )?;
282         vmx_vmwrite(
283             VmcsFields::HOST_SS_SELECTOR as u32,
284             (segmentation::ss().bits() & (!0x07)).into(),
285         )?;
286         vmx_vmwrite(
287             VmcsFields::HOST_DS_SELECTOR as u32,
288             (segmentation::ds().bits() & (!0x07)).into(),
289         )?;
290         vmx_vmwrite(
291             VmcsFields::HOST_FS_SELECTOR as u32,
292             (segmentation::fs().bits() & (!0x07)).into(),
293         )?;
294         vmx_vmwrite(
295             VmcsFields::HOST_GS_SELECTOR as u32,
296             (segmentation::gs().bits() & (!0x07)).into(),
297         )?;
298         vmx_vmwrite(VmcsFields::HOST_TR_SELECTOR as u32, unsafe {
299             (x86::task::tr().bits() & (!0x07)).into()
300         })?;
301         vmx_vmwrite(VmcsFields::HOST_FS_BASE as u32, unsafe {
302             msr::rdmsr(msr::IA32_FS_BASE)
303         })?;
304         vmx_vmwrite(VmcsFields::HOST_GS_BASE as u32, unsafe {
305             msr::rdmsr(msr::IA32_GS_BASE)
306         })?;
307 
308         let mut pseudo_descriptpr: x86::dtables::DescriptorTablePointer<u64> = Default::default();
309         unsafe {
310             x86::dtables::sgdt(&mut pseudo_descriptpr);
311         };
312 
313         vmx_vmwrite(
314             VmcsFields::HOST_TR_BASE as u32,
315             get_segment_base(pseudo_descriptpr.base, pseudo_descriptpr.limit, unsafe {
316                 x86::task::tr().bits()
317             }),
318         )?;
319         vmx_vmwrite(
320             VmcsFields::HOST_GDTR_BASE as u32,
321             pseudo_descriptpr.base.to_bits() as u64,
322         )?;
323         vmx_vmwrite(VmcsFields::HOST_IDTR_BASE as u32, unsafe {
324             let mut pseudo_descriptpr: x86::dtables::DescriptorTablePointer<u64> =
325                 Default::default();
326             x86::dtables::sidt(&mut pseudo_descriptpr);
327             pseudo_descriptpr.base.to_bits() as u64
328         })?;
329 
330         // fast entry into the kernel
331         vmx_vmwrite(VmcsFields::HOST_SYSENTER_ESP as u32, unsafe {
332             msr::rdmsr(msr::IA32_SYSENTER_ESP)
333         })?;
334         vmx_vmwrite(VmcsFields::HOST_SYSENTER_EIP as u32, unsafe {
335             msr::rdmsr(msr::IA32_SYSENTER_EIP)
336         })?;
337         vmx_vmwrite(VmcsFields::HOST_SYSENTER_CS as u32, unsafe {
338             msr::rdmsr(msr::IA32_SYSENTER_CS)
339         })?;
340 
341         // vmx_vmwrite(VmcsFields::HOST_RIP as u32, vmx_return as *const () as u64)?;
342         // debug!("vmcs init host rip: {:#x}", vmx_return as *const () as u64);
343 
344         Ok(())
345     }
346 
347     // Intel SDM Volume 3C Chapter 25.3 “Organization of VMCS Data”
348     pub fn vmcs_init(&self) -> Result<(), SystemError> {
349         vmx_vmwrite(VmcsFields::CTRL_PAGE_FAULT_ERR_CODE_MASK as u32, 0)?;
350         vmx_vmwrite(VmcsFields::CTRL_PAGE_FAULT_ERR_CODE_MATCH as u32, 0)?;
351         vmx_vmwrite(VmcsFields::CTRL_CR3_TARGET_COUNT as u32, 0)?;
352 
353         vmx_vmwrite(
354             VmcsFields::CTRL_PIN_BASED_VM_EXEC_CTRLS as u32,
355             adjust_vmx_pinbased_controls() as u64,
356         )?;
357 
358         vmx_vmwrite(
359             VmcsFields::CTRL_MSR_BITMAP_ADDR as u32,
360             self.data.msr_bitmap_physical_address,
361         )?;
362 
363         vmx_vmwrite(VmcsFields::CTRL_CR0_READ_SHADOW as u32, unsafe {
364             controlregs::cr0().bits().try_into().unwrap()
365         })?;
366         vmx_vmwrite(VmcsFields::CTRL_CR4_READ_SHADOW as u32, unsafe {
367             controlregs::cr4().bits().try_into().unwrap()
368         })?;
369         vmx_vmwrite(
370             VmcsFields::CTRL_VM_ENTRY_CTRLS as u32,
371             adjust_vmx_entry_controls() as u64,
372         )?;
373         vmx_vmwrite(
374             VmcsFields::CTRL_PRIMARY_VM_EXIT_CTRLS as u32,
375             adjust_vmx_exit_controls() as u64,
376         )?;
377         vmx_vmwrite(
378             VmcsFields::CTRL_PRIMARY_PROCESSOR_VM_EXEC_CTRLS as u32,
379             adjust_vmx_primary_process_exec_controls() as u64,
380         )?;
381         vmx_vmwrite(
382             VmcsFields::CTRL_SECONDARY_PROCESSOR_VM_EXEC_CTRLS as u32,
383             adjust_vmx_secondary_process_exec_controls() as u64,
384         )?;
385 
386         self.vmcs_init_host()?;
387         self.vmcs_init_guest()?;
388         Ok(())
389     }
390 
391     fn kvm_mmu_load(&mut self) -> Result<(), SystemError> {
392         debug!("kvm_mmu_load!");
393         // 申请并创建新的页表
394         let mapper: crate::mm::page::PageMapper<X86_64MMArch, LockedFrameAllocator> = unsafe {
395             PageMapper::create(PageTableKind::EPT, LockedFrameAllocator)
396                 .ok_or(SystemError::ENOMEM)?
397         };
398 
399         let ept_root_hpa = mapper.table().phys();
400         let set_eptp_fn = self.mmu.set_eptp.unwrap();
401         set_eptp_fn(ept_root_hpa.data() as u64)?;
402         self.mmu.root_hpa = ept_root_hpa.data() as u64;
403         debug!("ept_root_hpa:{:x}!", ept_root_hpa.data() as u64);
404 
405         return Ok(());
406     }
407 
408     pub fn set_regs(&mut self, regs: VcpuContextFrame) -> Result<(), SystemError> {
409         self.vcpu_ctx = regs;
410         Ok(())
411     }
412 }
413 
414 impl Vcpu for VmxVcpu {
415     /// Virtualize the CPU
416     fn virtualize_cpu(&mut self) -> Result<(), SystemError> {
417         match has_intel_vmx_support() {
418             Ok(_) => {
419                 debug!("[+] CPU supports Intel VMX");
420             }
421             Err(e) => {
422                 debug!("[-] CPU does not support Intel VMX: {:?}", e);
423                 return Err(SystemError::ENOSYS);
424             }
425         };
426 
427         match enable_vmx_operation() {
428             Ok(_) => {
429                 debug!("[+] Enabling Virtual Machine Extensions (VMX)");
430             }
431             Err(_) => {
432                 debug!("[-] VMX operation is not supported on this processor.");
433                 return Err(SystemError::ENOSYS);
434             }
435         }
436 
437         vmxon(self.data.vmxon_region_physical_address)?;
438         debug!("[+] VMXON successful!");
439         vmx_vmclear(self.data.vmcs_region_physical_address)?;
440         vmx_vmptrld(self.data.vmcs_region_physical_address)?;
441         debug!("[+] VMPTRLD successful!");
442         self.vmcs_init().expect("vncs_init fail");
443         debug!("[+] VMCS init!");
444         // debug!("vmcs init host rip: {:#x}", vmx_return as *const () as u64);
445         // debug!("vmcs init host rsp: {:#x}", x86::bits64::registers::rsp());
446         // vmx_vmwrite(VmcsFields::HOST_RSP as u32, x86::bits64::registers::rsp())?;
447         // vmx_vmwrite(VmcsFields::HOST_RIP as u32, vmx_return as *const () as u64)?;
448         // vmx_vmwrite(VmcsFields::HOST_RSP as u32,  x86::bits64::registers::rsp())?;
449         self.kvm_mmu_load()?;
450         Ok(())
451     }
452 
453     fn devirtualize_cpu(&self) -> Result<(), SystemError> {
454         vmxoff()?;
455         Ok(())
456     }
457 
458     /// Gets the index of the current logical/virtual processor
459     fn id(&self) -> u32 {
460         self.vcpu_id
461     }
462 }
463 
464 pub fn get_segment_base(gdt_base: *const u64, gdt_size: u16, segment_selector: u16) -> u64 {
465     let table = segment_selector & 0x0004; // get table indicator in selector
466     let index = (segment_selector >> 3) as usize; // get index in selector
467     if table == 0 && index == 0 {
468         return 0;
469     }
470     let descriptor_table = unsafe { slice::from_raw_parts(gdt_base, gdt_size.into()) };
471     let descriptor = descriptor_table[index];
472 
473     let base_high = (descriptor & 0xFF00_0000_0000_0000) >> 32;
474     let base_mid = (descriptor & 0x0000_00FF_0000_0000) >> 16;
475     let base_low = (descriptor & 0x0000_0000_FFFF_0000) >> 16;
476     let segment_base = (base_high | base_mid | base_low) & 0xFFFFFFFF;
477     let virtaddr = phys_2_virt(segment_base.try_into().unwrap())
478         .try_into()
479         .unwrap();
480     debug!(
481         "segment_base={:x}",
482         phys_2_virt(segment_base.try_into().unwrap())
483     );
484     return virtaddr;
485 }
486 
487 // FIXME: may have bug
488 // pub fn read_segment_access_rights(segement_selector: u16) -> u32{
489 //     let table = segement_selector & 0x0004; // get table indicator in selector
490 //     let index = segement_selector & 0xFFF8; // get index in selector
491 //     let mut flag: u16;
492 //     if table==0 && index==0 {
493 //         return 0;
494 //     }
495 //     unsafe{
496 //         asm!(
497 //             "lar {0:r}, rcx",
498 //             "mov {1:r}, {0:r}",
499 //             in(reg) segement_selector,
500 //             out(reg) flag,
501 //         );
502 //     }
503 //     return (flag >> 8) as u32;
504 // }
505 pub fn adjust_vmx_controls(ctl_min: u32, ctl_opt: u32, msr: u32, result: &mut u32) {
506     let vmx_msr_low: u32 = unsafe { (msr::rdmsr(msr) & 0x0000_0000_FFFF_FFFF) as u32 };
507     let vmx_msr_high: u32 = unsafe { (msr::rdmsr(msr) << 32) as u32 };
508     let mut ctl: u32 = ctl_min | ctl_opt;
509     ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
510     ctl |= vmx_msr_low; /* bit == 1 in low word  ==> must be one  */
511     *result = ctl;
512 }
513 
514 pub fn adjust_vmx_entry_controls() -> u32 {
515     let mut entry_controls: u32 = 0;
516     adjust_vmx_controls(
517         VmxEntryCtrl::LOAD_DBG_CTRLS.bits(),
518         VmxEntryCtrl::IA32E_MODE_GUEST.bits(),
519         msr::IA32_VMX_ENTRY_CTLS, //Capability Reporting Register of VM-entry Controls (R/O)
520         &mut entry_controls,
521     );
522     return entry_controls;
523     // msr::IA32_VMX_TRUE_ENTRY_CTLS//Capability Reporting Register of VM-entry Flex Controls (R/O) See Table 35-2
524 }
525 
526 pub fn adjust_vmx_exit_controls() -> u32 {
527     let mut exit_controls: u32 = 0;
528     adjust_vmx_controls(
529         VmxPrimaryExitCtrl::SAVE_DBG_CTRLS.bits(),
530         VmxPrimaryExitCtrl::HOST_ADDR_SPACE_SIZE.bits(),
531         msr::IA32_VMX_EXIT_CTLS,
532         &mut exit_controls,
533     );
534     return exit_controls;
535 }
536 
537 pub fn adjust_vmx_pinbased_controls() -> u32 {
538     let mut controls: u32 = 16;
539     adjust_vmx_controls(0, 0, msr::IA32_VMX_TRUE_PINBASED_CTLS, &mut controls);
540     // debug!("adjust_vmx_pinbased_controls: {:x}", controls);
541     return controls;
542 }
543 
544 pub fn adjust_vmx_primary_process_exec_controls() -> u32 {
545     let mut controls: u32 = 0;
546     adjust_vmx_controls(
547         0,
548         VmxPrimaryProcessBasedExecuteCtrl::USE_MSR_BITMAPS.bits()
549             | VmxPrimaryProcessBasedExecuteCtrl::ACTIVATE_SECONDARY_CONTROLS.bits(),
550         msr::IA32_VMX_PROCBASED_CTLS,
551         &mut controls,
552     );
553     return controls;
554 }
555 
556 pub fn adjust_vmx_secondary_process_exec_controls() -> u32 {
557     let mut controls: u32 = 0;
558     adjust_vmx_controls(
559         0,
560         VmxSecondaryProcessBasedExecuteCtrl::ENABLE_RDTSCP.bits()
561             | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_XSAVES_XRSTORS.bits()
562             | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_INVPCID.bits()
563             | VmxSecondaryProcessBasedExecuteCtrl::ENABLE_EPT.bits()
564             | VmxSecondaryProcessBasedExecuteCtrl::UNRESTRICTED_GUEST.bits(),
565         msr::IA32_VMX_PROCBASED_CTLS2,
566         &mut controls,
567     );
568     return controls;
569 }
570 
571 /// Check to see if CPU is Intel (“GenuineIntel”).
572 /// Check processor supports for Virtual Machine Extension (VMX) technology
573 //  CPUID.1:ECX.VMX[bit 5] = 1 (Intel Manual: 24.6 Discovering Support for VMX)
574 pub fn has_intel_vmx_support() -> Result<(), SystemError> {
575     let cpuid = CpuId::new();
576     if let Some(vi) = cpuid.get_vendor_info() {
577         if vi.as_str() != "GenuineIntel" {
578             return Err(SystemError::ENOSYS);
579         }
580     }
581     if let Some(fi) = cpuid.get_feature_info() {
582         if !fi.has_vmx() {
583             return Err(SystemError::ENOSYS);
584         }
585     }
586     Ok(())
587 }
588 
589 /// Enables Virtual Machine Extensions
590 // - CR4.VMXE[bit 13] = 1 (Intel Manual: 24.7 Enabling and Entering VMX Operation)
591 pub fn enable_vmx_operation() -> Result<(), SystemError> {
592     let mut cr4 = unsafe { controlregs::cr4() };
593     cr4.set(controlregs::Cr4::CR4_ENABLE_VMX, true);
594     unsafe { controlregs::cr4_write(cr4) };
595 
596     set_lock_bit()?;
597     debug!("[+] Lock bit set via IA32_FEATURE_CONTROL");
598     set_cr0_bits();
599     debug!("[+] Mandatory bits in CR0 set/cleared");
600     set_cr4_bits();
601     debug!("[+] Mandatory bits in CR4 set/cleared");
602 
603     Ok(())
604 }
605 
606 /// Check if we need to set bits in IA32_FEATURE_CONTROL
607 // (Intel Manual: 24.7 Enabling and Entering VMX Operation)
608 fn set_lock_bit() -> Result<(), SystemError> {
609     const VMX_LOCK_BIT: u64 = 1 << 0;
610     const VMXON_OUTSIDE_SMX: u64 = 1 << 2;
611 
612     let ia32_feature_control = unsafe { msr::rdmsr(msr::IA32_FEATURE_CONTROL) };
613 
614     if (ia32_feature_control & VMX_LOCK_BIT) == 0 {
615         unsafe {
616             msr::wrmsr(
617                 msr::IA32_FEATURE_CONTROL,
618                 VMXON_OUTSIDE_SMX | VMX_LOCK_BIT | ia32_feature_control,
619             )
620         };
621     } else if (ia32_feature_control & VMXON_OUTSIDE_SMX) == 0 {
622         return Err(SystemError::EPERM);
623     }
624 
625     Ok(())
626 }
627 
628 /// Set the mandatory bits in CR0 and clear bits that are mandatory zero
629 /// (Intel Manual: 24.8 Restrictions on VMX Operation)
630 fn set_cr0_bits() {
631     let ia32_vmx_cr0_fixed0 = unsafe { msr::rdmsr(msr::IA32_VMX_CR0_FIXED0) };
632     let ia32_vmx_cr0_fixed1 = unsafe { msr::rdmsr(msr::IA32_VMX_CR0_FIXED1) };
633 
634     let mut cr0 = unsafe { controlregs::cr0() };
635 
636     cr0 |= controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed0 as usize);
637     cr0 &= controlregs::Cr0::from_bits_truncate(ia32_vmx_cr0_fixed1 as usize);
638 
639     unsafe { controlregs::cr0_write(cr0) };
640 }
641 
642 /// Set the mandatory bits in CR4 and clear bits that are mandatory zero
643 /// (Intel Manual: 24.8 Restrictions on VMX Operation)
644 fn set_cr4_bits() {
645     let ia32_vmx_cr4_fixed0 = unsafe { msr::rdmsr(msr::IA32_VMX_CR4_FIXED0) };
646     let ia32_vmx_cr4_fixed1 = unsafe { msr::rdmsr(msr::IA32_VMX_CR4_FIXED1) };
647 
648     let mut cr4 = unsafe { controlregs::cr4() };
649 
650     cr4 |= controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed0 as usize);
651     cr4 &= controlregs::Cr4::from_bits_truncate(ia32_vmx_cr4_fixed1 as usize);
652 
653     unsafe { controlregs::cr4_write(cr4) };
654 }
655