1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * tools/testing/selftests/kvm/lib/x86_64/vmx.c
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8 #include <asm/msr-index.h>
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "vmx.h"
14
15 #define PAGE_SHIFT_4K 12
16
17 #define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
18
19 bool enable_evmcs;
20
21 struct hv_enlightened_vmcs *current_evmcs;
22 struct hv_vp_assist_page *current_vp_assist;
23
24 struct eptPageTableEntry {
25 uint64_t readable:1;
26 uint64_t writable:1;
27 uint64_t executable:1;
28 uint64_t memory_type:3;
29 uint64_t ignore_pat:1;
30 uint64_t page_size:1;
31 uint64_t accessed:1;
32 uint64_t dirty:1;
33 uint64_t ignored_11_10:2;
34 uint64_t address:40;
35 uint64_t ignored_62_52:11;
36 uint64_t suppress_ve:1;
37 };
38
39 struct eptPageTablePointer {
40 uint64_t memory_type:3;
41 uint64_t page_walk_length:3;
42 uint64_t ad_enabled:1;
43 uint64_t reserved_11_07:5;
44 uint64_t address:40;
45 uint64_t reserved_63_52:12;
46 };
vcpu_enable_evmcs(struct kvm_vcpu * vcpu)47 int vcpu_enable_evmcs(struct kvm_vcpu *vcpu)
48 {
49 uint16_t evmcs_ver;
50
51 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
52 (unsigned long)&evmcs_ver);
53
54 /* KVM should return supported EVMCS version range */
55 TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
56 (evmcs_ver & 0xff) > 0,
57 "Incorrect EVMCS version range: %x:%x\n",
58 evmcs_ver & 0xff, evmcs_ver >> 8);
59
60 return evmcs_ver;
61 }
62
63 /* Allocate memory regions for nested VMX tests.
64 *
65 * Input Args:
66 * vm - The VM to allocate guest-virtual addresses in.
67 *
68 * Output Args:
69 * p_vmx_gva - The guest virtual address for the struct vmx_pages.
70 *
71 * Return:
72 * Pointer to structure with the addresses of the VMX areas.
73 */
74 struct vmx_pages *
vcpu_alloc_vmx(struct kvm_vm * vm,vm_vaddr_t * p_vmx_gva)75 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
76 {
77 vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm);
78 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
79
80 /* Setup of a region of guest memory for the vmxon region. */
81 vmx->vmxon = (void *)vm_vaddr_alloc_page(vm);
82 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
83 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
84
85 /* Setup of a region of guest memory for a vmcs. */
86 vmx->vmcs = (void *)vm_vaddr_alloc_page(vm);
87 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
88 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
89
90 /* Setup of a region of guest memory for the MSR bitmap. */
91 vmx->msr = (void *)vm_vaddr_alloc_page(vm);
92 vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
93 vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
94 memset(vmx->msr_hva, 0, getpagesize());
95
96 /* Setup of a region of guest memory for the shadow VMCS. */
97 vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm);
98 vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
99 vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
100
101 /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
102 vmx->vmread = (void *)vm_vaddr_alloc_page(vm);
103 vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
104 vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
105 memset(vmx->vmread_hva, 0, getpagesize());
106
107 vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm);
108 vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
109 vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
110 memset(vmx->vmwrite_hva, 0, getpagesize());
111
112 /* Setup of a region of guest memory for the VP Assist page. */
113 vmx->vp_assist = (void *)vm_vaddr_alloc_page(vm);
114 vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist);
115 vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist);
116
117 /* Setup of a region of guest memory for the enlightened VMCS. */
118 vmx->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm);
119 vmx->enlightened_vmcs_hva =
120 addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs);
121 vmx->enlightened_vmcs_gpa =
122 addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs);
123
124 *p_vmx_gva = vmx_gva;
125 return vmx;
126 }
127
prepare_for_vmx_operation(struct vmx_pages * vmx)128 bool prepare_for_vmx_operation(struct vmx_pages *vmx)
129 {
130 uint64_t feature_control;
131 uint64_t required;
132 unsigned long cr0;
133 unsigned long cr4;
134
135 /*
136 * Ensure bits in CR0 and CR4 are valid in VMX operation:
137 * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
138 * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
139 */
140 __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
141 cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
142 cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
143 __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
144
145 __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory");
146 cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1);
147 cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0);
148 /* Enable VMX operation */
149 cr4 |= X86_CR4_VMXE;
150 __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory");
151
152 /*
153 * Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
154 * Bit 0: Lock bit. If clear, VMXON causes a #GP.
155 * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
156 * outside of SMX causes a #GP.
157 */
158 required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
159 required |= FEAT_CTL_LOCKED;
160 feature_control = rdmsr(MSR_IA32_FEAT_CTL);
161 if ((feature_control & required) != required)
162 wrmsr(MSR_IA32_FEAT_CTL, feature_control | required);
163
164 /* Enter VMX root operation. */
165 *(uint32_t *)(vmx->vmxon) = vmcs_revision();
166 if (vmxon(vmx->vmxon_gpa))
167 return false;
168
169 return true;
170 }
171
load_vmcs(struct vmx_pages * vmx)172 bool load_vmcs(struct vmx_pages *vmx)
173 {
174 if (!enable_evmcs) {
175 /* Load a VMCS. */
176 *(uint32_t *)(vmx->vmcs) = vmcs_revision();
177 if (vmclear(vmx->vmcs_gpa))
178 return false;
179
180 if (vmptrld(vmx->vmcs_gpa))
181 return false;
182
183 /* Setup shadow VMCS, do not load it yet. */
184 *(uint32_t *)(vmx->shadow_vmcs) =
185 vmcs_revision() | 0x80000000ul;
186 if (vmclear(vmx->shadow_vmcs_gpa))
187 return false;
188 } else {
189 if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa,
190 vmx->enlightened_vmcs))
191 return false;
192 current_evmcs->revision_id = EVMCS_VERSION;
193 }
194
195 return true;
196 }
197
ept_vpid_cap_supported(uint64_t mask)198 static bool ept_vpid_cap_supported(uint64_t mask)
199 {
200 return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask;
201 }
202
ept_1g_pages_supported(void)203 bool ept_1g_pages_supported(void)
204 {
205 return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES);
206 }
207
208 /*
209 * Initialize the control fields to the most basic settings possible.
210 */
init_vmcs_control_fields(struct vmx_pages * vmx)211 static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
212 {
213 uint32_t sec_exec_ctl = 0;
214
215 vmwrite(VIRTUAL_PROCESSOR_ID, 0);
216 vmwrite(POSTED_INTR_NV, 0);
217
218 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
219
220 if (vmx->eptp_gpa) {
221 uint64_t ept_paddr;
222 struct eptPageTablePointer eptp = {
223 .memory_type = VMX_BASIC_MEM_TYPE_WB,
224 .page_walk_length = 3, /* + 1 */
225 .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS),
226 .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
227 };
228
229 memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
230 vmwrite(EPT_POINTER, ept_paddr);
231 sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
232 }
233
234 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
235 vmwrite(CPU_BASED_VM_EXEC_CONTROL,
236 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
237 else {
238 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
239 GUEST_ASSERT(!sec_exec_ctl);
240 }
241
242 vmwrite(EXCEPTION_BITMAP, 0);
243 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
244 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
245 vmwrite(CR3_TARGET_COUNT, 0);
246 vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) |
247 VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */
248 vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
249 vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
250 vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) |
251 VM_ENTRY_IA32E_MODE); /* 64-bit guest */
252 vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
253 vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
254 vmwrite(TPR_THRESHOLD, 0);
255
256 vmwrite(CR0_GUEST_HOST_MASK, 0);
257 vmwrite(CR4_GUEST_HOST_MASK, 0);
258 vmwrite(CR0_READ_SHADOW, get_cr0());
259 vmwrite(CR4_READ_SHADOW, get_cr4());
260
261 vmwrite(MSR_BITMAP, vmx->msr_gpa);
262 vmwrite(VMREAD_BITMAP, vmx->vmread_gpa);
263 vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa);
264 }
265
266 /*
267 * Initialize the host state fields based on the current host state, with
268 * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
269 * or vmresume.
270 */
init_vmcs_host_state(void)271 static inline void init_vmcs_host_state(void)
272 {
273 uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
274
275 vmwrite(HOST_ES_SELECTOR, get_es());
276 vmwrite(HOST_CS_SELECTOR, get_cs());
277 vmwrite(HOST_SS_SELECTOR, get_ss());
278 vmwrite(HOST_DS_SELECTOR, get_ds());
279 vmwrite(HOST_FS_SELECTOR, get_fs());
280 vmwrite(HOST_GS_SELECTOR, get_gs());
281 vmwrite(HOST_TR_SELECTOR, get_tr());
282
283 if (exit_controls & VM_EXIT_LOAD_IA32_PAT)
284 vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT));
285 if (exit_controls & VM_EXIT_LOAD_IA32_EFER)
286 vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER));
287 if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
288 vmwrite(HOST_IA32_PERF_GLOBAL_CTRL,
289 rdmsr(MSR_CORE_PERF_GLOBAL_CTRL));
290
291 vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS));
292
293 vmwrite(HOST_CR0, get_cr0());
294 vmwrite(HOST_CR3, get_cr3());
295 vmwrite(HOST_CR4, get_cr4());
296 vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
297 vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
298 vmwrite(HOST_TR_BASE,
299 get_desc64_base((struct desc64 *)(get_gdt().address + get_tr())));
300 vmwrite(HOST_GDTR_BASE, get_gdt().address);
301 vmwrite(HOST_IDTR_BASE, get_idt().address);
302 vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
303 vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
304 }
305
306 /*
307 * Initialize the guest state fields essentially as a clone of
308 * the host state fields. Some host state fields have fixed
309 * values, and we set the corresponding guest state fields accordingly.
310 */
init_vmcs_guest_state(void * rip,void * rsp)311 static inline void init_vmcs_guest_state(void *rip, void *rsp)
312 {
313 vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
314 vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
315 vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
316 vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
317 vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
318 vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
319 vmwrite(GUEST_LDTR_SELECTOR, 0);
320 vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
321 vmwrite(GUEST_INTR_STATUS, 0);
322 vmwrite(GUEST_PML_INDEX, 0);
323
324 vmwrite(VMCS_LINK_POINTER, -1ll);
325 vmwrite(GUEST_IA32_DEBUGCTL, 0);
326 vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT));
327 vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER));
328 vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL,
329 vmreadz(HOST_IA32_PERF_GLOBAL_CTRL));
330
331 vmwrite(GUEST_ES_LIMIT, -1);
332 vmwrite(GUEST_CS_LIMIT, -1);
333 vmwrite(GUEST_SS_LIMIT, -1);
334 vmwrite(GUEST_DS_LIMIT, -1);
335 vmwrite(GUEST_FS_LIMIT, -1);
336 vmwrite(GUEST_GS_LIMIT, -1);
337 vmwrite(GUEST_LDTR_LIMIT, -1);
338 vmwrite(GUEST_TR_LIMIT, 0x67);
339 vmwrite(GUEST_GDTR_LIMIT, 0xffff);
340 vmwrite(GUEST_IDTR_LIMIT, 0xffff);
341 vmwrite(GUEST_ES_AR_BYTES,
342 vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093);
343 vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
344 vmwrite(GUEST_SS_AR_BYTES, 0xc093);
345 vmwrite(GUEST_DS_AR_BYTES,
346 vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093);
347 vmwrite(GUEST_FS_AR_BYTES,
348 vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093);
349 vmwrite(GUEST_GS_AR_BYTES,
350 vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093);
351 vmwrite(GUEST_LDTR_AR_BYTES, 0x10000);
352 vmwrite(GUEST_TR_AR_BYTES, 0x8b);
353 vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
354 vmwrite(GUEST_ACTIVITY_STATE, 0);
355 vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS));
356 vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0);
357
358 vmwrite(GUEST_CR0, vmreadz(HOST_CR0));
359 vmwrite(GUEST_CR3, vmreadz(HOST_CR3));
360 vmwrite(GUEST_CR4, vmreadz(HOST_CR4));
361 vmwrite(GUEST_ES_BASE, 0);
362 vmwrite(GUEST_CS_BASE, 0);
363 vmwrite(GUEST_SS_BASE, 0);
364 vmwrite(GUEST_DS_BASE, 0);
365 vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE));
366 vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE));
367 vmwrite(GUEST_LDTR_BASE, 0);
368 vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE));
369 vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
370 vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
371 vmwrite(GUEST_DR7, 0x400);
372 vmwrite(GUEST_RSP, (uint64_t)rsp);
373 vmwrite(GUEST_RIP, (uint64_t)rip);
374 vmwrite(GUEST_RFLAGS, 2);
375 vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
376 vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
377 vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
378 }
379
prepare_vmcs(struct vmx_pages * vmx,void * guest_rip,void * guest_rsp)380 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
381 {
382 init_vmcs_control_fields(vmx);
383 init_vmcs_host_state();
384 init_vmcs_guest_state(guest_rip, guest_rsp);
385 }
386
nested_create_pte(struct kvm_vm * vm,struct eptPageTableEntry * pte,uint64_t nested_paddr,uint64_t paddr,int current_level,int target_level)387 static void nested_create_pte(struct kvm_vm *vm,
388 struct eptPageTableEntry *pte,
389 uint64_t nested_paddr,
390 uint64_t paddr,
391 int current_level,
392 int target_level)
393 {
394 if (!pte->readable) {
395 pte->writable = true;
396 pte->readable = true;
397 pte->executable = true;
398 pte->page_size = (current_level == target_level);
399 if (pte->page_size)
400 pte->address = paddr >> vm->page_shift;
401 else
402 pte->address = vm_alloc_page_table(vm) >> vm->page_shift;
403 } else {
404 /*
405 * Entry already present. Assert that the caller doesn't want
406 * a hugepage at this level, and that there isn't a hugepage at
407 * this level.
408 */
409 TEST_ASSERT(current_level != target_level,
410 "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n",
411 current_level, nested_paddr);
412 TEST_ASSERT(!pte->page_size,
413 "Cannot create page table at level: %u, nested_paddr: 0x%lx\n",
414 current_level, nested_paddr);
415 }
416 }
417
418
__nested_pg_map(struct vmx_pages * vmx,struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,int target_level)419 void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
420 uint64_t nested_paddr, uint64_t paddr, int target_level)
421 {
422 const uint64_t page_size = PG_LEVEL_SIZE(target_level);
423 struct eptPageTableEntry *pt = vmx->eptp_hva, *pte;
424 uint16_t index;
425
426 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
427 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
428
429 TEST_ASSERT((nested_paddr >> 48) == 0,
430 "Nested physical address 0x%lx requires 5-level paging",
431 nested_paddr);
432 TEST_ASSERT((nested_paddr % page_size) == 0,
433 "Nested physical address not on page boundary,\n"
434 " nested_paddr: 0x%lx page_size: 0x%lx",
435 nested_paddr, page_size);
436 TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
437 "Physical address beyond beyond maximum supported,\n"
438 " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
439 paddr, vm->max_gfn, vm->page_size);
440 TEST_ASSERT((paddr % page_size) == 0,
441 "Physical address not on page boundary,\n"
442 " paddr: 0x%lx page_size: 0x%lx",
443 paddr, page_size);
444 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
445 "Physical address beyond beyond maximum supported,\n"
446 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
447 paddr, vm->max_gfn, vm->page_size);
448
449 for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) {
450 index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
451 pte = &pt[index];
452
453 nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level);
454
455 if (pte->page_size)
456 break;
457
458 pt = addr_gpa2hva(vm, pte->address * vm->page_size);
459 }
460
461 /*
462 * For now mark these as accessed and dirty because the only
463 * testcase we have needs that. Can be reconsidered later.
464 */
465 pte->accessed = true;
466 pte->dirty = true;
467
468 }
469
nested_pg_map(struct vmx_pages * vmx,struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr)470 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
471 uint64_t nested_paddr, uint64_t paddr)
472 {
473 __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K);
474 }
475
476 /*
477 * Map a range of EPT guest physical addresses to the VM's physical address
478 *
479 * Input Args:
480 * vm - Virtual Machine
481 * nested_paddr - Nested guest physical address to map
482 * paddr - VM Physical Address
483 * size - The size of the range to map
484 * level - The level at which to map the range
485 *
486 * Output Args: None
487 *
488 * Return: None
489 *
490 * Within the VM given by vm, creates a nested guest translation for the
491 * page range starting at nested_paddr to the page range starting at paddr.
492 */
__nested_map(struct vmx_pages * vmx,struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,uint64_t size,int level)493 void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
494 uint64_t nested_paddr, uint64_t paddr, uint64_t size,
495 int level)
496 {
497 size_t page_size = PG_LEVEL_SIZE(level);
498 size_t npages = size / page_size;
499
500 TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
501 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
502
503 while (npages--) {
504 __nested_pg_map(vmx, vm, nested_paddr, paddr, level);
505 nested_paddr += page_size;
506 paddr += page_size;
507 }
508 }
509
nested_map(struct vmx_pages * vmx,struct kvm_vm * vm,uint64_t nested_paddr,uint64_t paddr,uint64_t size)510 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
511 uint64_t nested_paddr, uint64_t paddr, uint64_t size)
512 {
513 __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K);
514 }
515
516 /* Prepare an identity extended page table that maps all the
517 * physical pages in VM.
518 */
nested_map_memslot(struct vmx_pages * vmx,struct kvm_vm * vm,uint32_t memslot)519 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
520 uint32_t memslot)
521 {
522 sparsebit_idx_t i, last;
523 struct userspace_mem_region *region =
524 memslot2region(vm, memslot);
525
526 i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
527 last = i + (region->region.memory_size >> vm->page_shift);
528 for (;;) {
529 i = sparsebit_next_clear(region->unused_phy_pages, i);
530 if (i > last)
531 break;
532
533 nested_map(vmx, vm,
534 (uint64_t)i << vm->page_shift,
535 (uint64_t)i << vm->page_shift,
536 1 << vm->page_shift);
537 }
538 }
539
540 /* Identity map a region with 1GiB Pages. */
nested_identity_map_1g(struct vmx_pages * vmx,struct kvm_vm * vm,uint64_t addr,uint64_t size)541 void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
542 uint64_t addr, uint64_t size)
543 {
544 __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G);
545 }
546
kvm_vm_has_ept(struct kvm_vm * vm)547 bool kvm_vm_has_ept(struct kvm_vm *vm)
548 {
549 struct kvm_vcpu *vcpu;
550 uint64_t ctrl;
551
552 vcpu = list_first_entry(&vm->vcpus, struct kvm_vcpu, list);
553 TEST_ASSERT(vcpu, "Cannot determine EPT support without vCPUs.\n");
554
555 ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32;
556 if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
557 return false;
558
559 ctrl = vcpu_get_msr(vcpu, MSR_IA32_VMX_PROCBASED_CTLS2) >> 32;
560 return ctrl & SECONDARY_EXEC_ENABLE_EPT;
561 }
562
prepare_eptp(struct vmx_pages * vmx,struct kvm_vm * vm,uint32_t eptp_memslot)563 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
564 uint32_t eptp_memslot)
565 {
566 TEST_REQUIRE(kvm_vm_has_ept(vm));
567
568 vmx->eptp = (void *)vm_vaddr_alloc_page(vm);
569 vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
570 vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
571 }
572
prepare_virtualize_apic_accesses(struct vmx_pages * vmx,struct kvm_vm * vm)573 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm)
574 {
575 vmx->apic_access = (void *)vm_vaddr_alloc_page(vm);
576 vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access);
577 vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access);
578 }
579