1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * tools/testing/selftests/kvm/lib/x86_64/processor.c
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7
8 #include "test_util.h"
9 #include "kvm_util.h"
10 #include "processor.h"
11
12 #ifndef NUM_INTERRUPTS
13 #define NUM_INTERRUPTS 256
14 #endif
15
16 #define DEFAULT_CODE_SELECTOR 0x8
17 #define DEFAULT_DATA_SELECTOR 0x10
18
19 #define MAX_NR_CPUID_ENTRIES 100
20
21 vm_vaddr_t exception_handlers;
22
regs_dump(FILE * stream,struct kvm_regs * regs,uint8_t indent)23 static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent)
24 {
25 fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
26 "rcx: 0x%.16llx rdx: 0x%.16llx\n",
27 indent, "",
28 regs->rax, regs->rbx, regs->rcx, regs->rdx);
29 fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
30 "rsp: 0x%.16llx rbp: 0x%.16llx\n",
31 indent, "",
32 regs->rsi, regs->rdi, regs->rsp, regs->rbp);
33 fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx "
34 "r10: 0x%.16llx r11: 0x%.16llx\n",
35 indent, "",
36 regs->r8, regs->r9, regs->r10, regs->r11);
37 fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
38 "r14: 0x%.16llx r15: 0x%.16llx\n",
39 indent, "",
40 regs->r12, regs->r13, regs->r14, regs->r15);
41 fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
42 indent, "",
43 regs->rip, regs->rflags);
44 }
45
segment_dump(FILE * stream,struct kvm_segment * segment,uint8_t indent)46 static void segment_dump(FILE *stream, struct kvm_segment *segment,
47 uint8_t indent)
48 {
49 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
50 "selector: 0x%.4x type: 0x%.2x\n",
51 indent, "", segment->base, segment->limit,
52 segment->selector, segment->type);
53 fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
54 "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
55 indent, "", segment->present, segment->dpl,
56 segment->db, segment->s, segment->l);
57 fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
58 "unusable: 0x%.2x padding: 0x%.2x\n",
59 indent, "", segment->g, segment->avl,
60 segment->unusable, segment->padding);
61 }
62
dtable_dump(FILE * stream,struct kvm_dtable * dtable,uint8_t indent)63 static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
64 uint8_t indent)
65 {
66 fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
67 "padding: 0x%.4x 0x%.4x 0x%.4x\n",
68 indent, "", dtable->base, dtable->limit,
69 dtable->padding[0], dtable->padding[1], dtable->padding[2]);
70 }
71
sregs_dump(FILE * stream,struct kvm_sregs * sregs,uint8_t indent)72 static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent)
73 {
74 unsigned int i;
75
76 fprintf(stream, "%*scs:\n", indent, "");
77 segment_dump(stream, &sregs->cs, indent + 2);
78 fprintf(stream, "%*sds:\n", indent, "");
79 segment_dump(stream, &sregs->ds, indent + 2);
80 fprintf(stream, "%*ses:\n", indent, "");
81 segment_dump(stream, &sregs->es, indent + 2);
82 fprintf(stream, "%*sfs:\n", indent, "");
83 segment_dump(stream, &sregs->fs, indent + 2);
84 fprintf(stream, "%*sgs:\n", indent, "");
85 segment_dump(stream, &sregs->gs, indent + 2);
86 fprintf(stream, "%*sss:\n", indent, "");
87 segment_dump(stream, &sregs->ss, indent + 2);
88 fprintf(stream, "%*str:\n", indent, "");
89 segment_dump(stream, &sregs->tr, indent + 2);
90 fprintf(stream, "%*sldt:\n", indent, "");
91 segment_dump(stream, &sregs->ldt, indent + 2);
92
93 fprintf(stream, "%*sgdt:\n", indent, "");
94 dtable_dump(stream, &sregs->gdt, indent + 2);
95 fprintf(stream, "%*sidt:\n", indent, "");
96 dtable_dump(stream, &sregs->idt, indent + 2);
97
98 fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
99 "cr3: 0x%.16llx cr4: 0x%.16llx\n",
100 indent, "",
101 sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
102 fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
103 "apic_base: 0x%.16llx\n",
104 indent, "",
105 sregs->cr8, sregs->efer, sregs->apic_base);
106
107 fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
108 for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
109 fprintf(stream, "%*s%.16llx\n", indent + 2, "",
110 sregs->interrupt_bitmap[i]);
111 }
112 }
113
kvm_is_tdp_enabled(void)114 bool kvm_is_tdp_enabled(void)
115 {
116 if (is_intel_cpu())
117 return get_kvm_intel_param_bool("ept");
118 else
119 return get_kvm_amd_param_bool("npt");
120 }
121
virt_arch_pgd_alloc(struct kvm_vm * vm)122 void virt_arch_pgd_alloc(struct kvm_vm *vm)
123 {
124 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
125 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
126
127 /* If needed, create page map l4 table. */
128 if (!vm->pgd_created) {
129 vm->pgd = vm_alloc_page_table(vm);
130 vm->pgd_created = true;
131 }
132 }
133
virt_get_pte(struct kvm_vm * vm,uint64_t pt_pfn,uint64_t vaddr,int level)134 static void *virt_get_pte(struct kvm_vm *vm, uint64_t pt_pfn, uint64_t vaddr,
135 int level)
136 {
137 uint64_t *page_table = addr_gpa2hva(vm, pt_pfn << vm->page_shift);
138 int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu;
139
140 return &page_table[index];
141 }
142
virt_create_upper_pte(struct kvm_vm * vm,uint64_t pt_pfn,uint64_t vaddr,uint64_t paddr,int current_level,int target_level)143 static uint64_t *virt_create_upper_pte(struct kvm_vm *vm,
144 uint64_t pt_pfn,
145 uint64_t vaddr,
146 uint64_t paddr,
147 int current_level,
148 int target_level)
149 {
150 uint64_t *pte = virt_get_pte(vm, pt_pfn, vaddr, current_level);
151
152 if (!(*pte & PTE_PRESENT_MASK)) {
153 *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK;
154 if (current_level == target_level)
155 *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK);
156 else
157 *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK;
158 } else {
159 /*
160 * Entry already present. Assert that the caller doesn't want
161 * a hugepage at this level, and that there isn't a hugepage at
162 * this level.
163 */
164 TEST_ASSERT(current_level != target_level,
165 "Cannot create hugepage at level: %u, vaddr: 0x%lx\n",
166 current_level, vaddr);
167 TEST_ASSERT(!(*pte & PTE_LARGE_MASK),
168 "Cannot create page table at level: %u, vaddr: 0x%lx\n",
169 current_level, vaddr);
170 }
171 return pte;
172 }
173
__virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,int level)174 void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level)
175 {
176 const uint64_t pg_size = PG_LEVEL_SIZE(level);
177 uint64_t *pml4e, *pdpe, *pde;
178 uint64_t *pte;
179
180 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K,
181 "Unknown or unsupported guest mode, mode: 0x%x", vm->mode);
182
183 TEST_ASSERT((vaddr % pg_size) == 0,
184 "Virtual address not aligned,\n"
185 "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size);
186 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)),
187 "Invalid virtual address, vaddr: 0x%lx", vaddr);
188 TEST_ASSERT((paddr % pg_size) == 0,
189 "Physical address not aligned,\n"
190 " paddr: 0x%lx page size: 0x%lx", paddr, pg_size);
191 TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
192 "Physical address beyond maximum supported,\n"
193 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
194 paddr, vm->max_gfn, vm->page_size);
195
196 /*
197 * Allocate upper level page tables, if not already present. Return
198 * early if a hugepage was created.
199 */
200 pml4e = virt_create_upper_pte(vm, vm->pgd >> vm->page_shift,
201 vaddr, paddr, PG_LEVEL_512G, level);
202 if (*pml4e & PTE_LARGE_MASK)
203 return;
204
205 pdpe = virt_create_upper_pte(vm, PTE_GET_PFN(*pml4e), vaddr, paddr, PG_LEVEL_1G, level);
206 if (*pdpe & PTE_LARGE_MASK)
207 return;
208
209 pde = virt_create_upper_pte(vm, PTE_GET_PFN(*pdpe), vaddr, paddr, PG_LEVEL_2M, level);
210 if (*pde & PTE_LARGE_MASK)
211 return;
212
213 /* Fill in page table entry. */
214 pte = virt_get_pte(vm, PTE_GET_PFN(*pde), vaddr, PG_LEVEL_4K);
215 TEST_ASSERT(!(*pte & PTE_PRESENT_MASK),
216 "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr);
217 *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK);
218 }
219
virt_arch_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)220 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
221 {
222 __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K);
223 }
224
virt_map_level(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr,uint64_t nr_bytes,int level)225 void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
226 uint64_t nr_bytes, int level)
227 {
228 uint64_t pg_size = PG_LEVEL_SIZE(level);
229 uint64_t nr_pages = nr_bytes / pg_size;
230 int i;
231
232 TEST_ASSERT(nr_bytes % pg_size == 0,
233 "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx",
234 nr_bytes, pg_size);
235
236 for (i = 0; i < nr_pages; i++) {
237 __virt_pg_map(vm, vaddr, paddr, level);
238
239 vaddr += pg_size;
240 paddr += pg_size;
241 }
242 }
243
_vm_get_page_table_entry(struct kvm_vm * vm,struct kvm_vcpu * vcpu,uint64_t vaddr)244 static uint64_t *_vm_get_page_table_entry(struct kvm_vm *vm,
245 struct kvm_vcpu *vcpu,
246 uint64_t vaddr)
247 {
248 uint16_t index[4];
249 uint64_t *pml4e, *pdpe, *pde;
250 uint64_t *pte;
251 struct kvm_sregs sregs;
252 uint64_t rsvd_mask = 0;
253
254 /* Set the high bits in the reserved mask. */
255 if (vm->pa_bits < 52)
256 rsvd_mask = GENMASK_ULL(51, vm->pa_bits);
257
258 /*
259 * SDM vol 3, fig 4-11 "Formats of CR3 and Paging-Structure Entries
260 * with 4-Level Paging and 5-Level Paging".
261 * If IA32_EFER.NXE = 0 and the P flag of a paging-structure entry is 1,
262 * the XD flag (bit 63) is reserved.
263 */
264 vcpu_sregs_get(vcpu, &sregs);
265 if ((sregs.efer & EFER_NX) == 0) {
266 rsvd_mask |= PTE_NX_MASK;
267 }
268
269 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
270 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
271 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
272 (vaddr >> vm->page_shift)),
273 "Invalid virtual address, vaddr: 0x%lx",
274 vaddr);
275 /*
276 * Based on the mode check above there are 48 bits in the vaddr, so
277 * shift 16 to sign extend the last bit (bit-47),
278 */
279 TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16),
280 "Canonical check failed. The virtual address is invalid.");
281
282 index[0] = (vaddr >> 12) & 0x1ffu;
283 index[1] = (vaddr >> 21) & 0x1ffu;
284 index[2] = (vaddr >> 30) & 0x1ffu;
285 index[3] = (vaddr >> 39) & 0x1ffu;
286
287 pml4e = addr_gpa2hva(vm, vm->pgd);
288 TEST_ASSERT(pml4e[index[3]] & PTE_PRESENT_MASK,
289 "Expected pml4e to be present for gva: 0x%08lx", vaddr);
290 TEST_ASSERT((pml4e[index[3]] & (rsvd_mask | PTE_LARGE_MASK)) == 0,
291 "Unexpected reserved bits set.");
292
293 pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
294 TEST_ASSERT(pdpe[index[2]] & PTE_PRESENT_MASK,
295 "Expected pdpe to be present for gva: 0x%08lx", vaddr);
296 TEST_ASSERT(!(pdpe[index[2]] & PTE_LARGE_MASK),
297 "Expected pdpe to map a pde not a 1-GByte page.");
298 TEST_ASSERT((pdpe[index[2]] & rsvd_mask) == 0,
299 "Unexpected reserved bits set.");
300
301 pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
302 TEST_ASSERT(pde[index[1]] & PTE_PRESENT_MASK,
303 "Expected pde to be present for gva: 0x%08lx", vaddr);
304 TEST_ASSERT(!(pde[index[1]] & PTE_LARGE_MASK),
305 "Expected pde to map a pte not a 2-MByte page.");
306 TEST_ASSERT((pde[index[1]] & rsvd_mask) == 0,
307 "Unexpected reserved bits set.");
308
309 pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
310 TEST_ASSERT(pte[index[0]] & PTE_PRESENT_MASK,
311 "Expected pte to be present for gva: 0x%08lx", vaddr);
312
313 return &pte[index[0]];
314 }
315
vm_get_page_table_entry(struct kvm_vm * vm,struct kvm_vcpu * vcpu,uint64_t vaddr)316 uint64_t vm_get_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
317 uint64_t vaddr)
318 {
319 uint64_t *pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
320
321 return *(uint64_t *)pte;
322 }
323
vm_set_page_table_entry(struct kvm_vm * vm,struct kvm_vcpu * vcpu,uint64_t vaddr,uint64_t pte)324 void vm_set_page_table_entry(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
325 uint64_t vaddr, uint64_t pte)
326 {
327 uint64_t *new_pte = _vm_get_page_table_entry(vm, vcpu, vaddr);
328
329 *(uint64_t *)new_pte = pte;
330 }
331
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)332 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
333 {
334 uint64_t *pml4e, *pml4e_start;
335 uint64_t *pdpe, *pdpe_start;
336 uint64_t *pde, *pde_start;
337 uint64_t *pte, *pte_start;
338
339 if (!vm->pgd_created)
340 return;
341
342 fprintf(stream, "%*s "
343 " no\n", indent, "");
344 fprintf(stream, "%*s index hvaddr gpaddr "
345 "addr w exec dirty\n",
346 indent, "");
347 pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd);
348 for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
349 pml4e = &pml4e_start[n1];
350 if (!(*pml4e & PTE_PRESENT_MASK))
351 continue;
352 fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u "
353 " %u\n",
354 indent, "",
355 pml4e - pml4e_start, pml4e,
356 addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e),
357 !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK));
358
359 pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK);
360 for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
361 pdpe = &pdpe_start[n2];
362 if (!(*pdpe & PTE_PRESENT_MASK))
363 continue;
364 fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx "
365 "%u %u\n",
366 indent, "",
367 pdpe - pdpe_start, pdpe,
368 addr_hva2gpa(vm, pdpe),
369 PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK),
370 !!(*pdpe & PTE_NX_MASK));
371
372 pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK);
373 for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
374 pde = &pde_start[n3];
375 if (!(*pde & PTE_PRESENT_MASK))
376 continue;
377 fprintf(stream, "%*spde 0x%-3zx %p "
378 "0x%-12lx 0x%-10llx %u %u\n",
379 indent, "", pde - pde_start, pde,
380 addr_hva2gpa(vm, pde),
381 PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK),
382 !!(*pde & PTE_NX_MASK));
383
384 pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK);
385 for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
386 pte = &pte_start[n4];
387 if (!(*pte & PTE_PRESENT_MASK))
388 continue;
389 fprintf(stream, "%*spte 0x%-3zx %p "
390 "0x%-12lx 0x%-10llx %u %u "
391 " %u 0x%-10lx\n",
392 indent, "",
393 pte - pte_start, pte,
394 addr_hva2gpa(vm, pte),
395 PTE_GET_PFN(*pte),
396 !!(*pte & PTE_WRITABLE_MASK),
397 !!(*pte & PTE_NX_MASK),
398 !!(*pte & PTE_DIRTY_MASK),
399 ((uint64_t) n1 << 27)
400 | ((uint64_t) n2 << 18)
401 | ((uint64_t) n3 << 9)
402 | ((uint64_t) n4));
403 }
404 }
405 }
406 }
407 }
408
409 /*
410 * Set Unusable Segment
411 *
412 * Input Args: None
413 *
414 * Output Args:
415 * segp - Pointer to segment register
416 *
417 * Return: None
418 *
419 * Sets the segment register pointed to by @segp to an unusable state.
420 */
kvm_seg_set_unusable(struct kvm_segment * segp)421 static void kvm_seg_set_unusable(struct kvm_segment *segp)
422 {
423 memset(segp, 0, sizeof(*segp));
424 segp->unusable = true;
425 }
426
kvm_seg_fill_gdt_64bit(struct kvm_vm * vm,struct kvm_segment * segp)427 static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
428 {
429 void *gdt = addr_gva2hva(vm, vm->gdt);
430 struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
431
432 desc->limit0 = segp->limit & 0xFFFF;
433 desc->base0 = segp->base & 0xFFFF;
434 desc->base1 = segp->base >> 16;
435 desc->type = segp->type;
436 desc->s = segp->s;
437 desc->dpl = segp->dpl;
438 desc->p = segp->present;
439 desc->limit1 = segp->limit >> 16;
440 desc->avl = segp->avl;
441 desc->l = segp->l;
442 desc->db = segp->db;
443 desc->g = segp->g;
444 desc->base2 = segp->base >> 24;
445 if (!segp->s)
446 desc->base3 = segp->base >> 32;
447 }
448
449
450 /*
451 * Set Long Mode Flat Kernel Code Segment
452 *
453 * Input Args:
454 * vm - VM whose GDT is being filled, or NULL to only write segp
455 * selector - selector value
456 *
457 * Output Args:
458 * segp - Pointer to KVM segment
459 *
460 * Return: None
461 *
462 * Sets up the KVM segment pointed to by @segp, to be a code segment
463 * with the selector value given by @selector.
464 */
kvm_seg_set_kernel_code_64bit(struct kvm_vm * vm,uint16_t selector,struct kvm_segment * segp)465 static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
466 struct kvm_segment *segp)
467 {
468 memset(segp, 0, sizeof(*segp));
469 segp->selector = selector;
470 segp->limit = 0xFFFFFFFFu;
471 segp->s = 0x1; /* kTypeCodeData */
472 segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
473 * | kFlagCodeReadable
474 */
475 segp->g = true;
476 segp->l = true;
477 segp->present = 1;
478 if (vm)
479 kvm_seg_fill_gdt_64bit(vm, segp);
480 }
481
482 /*
483 * Set Long Mode Flat Kernel Data Segment
484 *
485 * Input Args:
486 * vm - VM whose GDT is being filled, or NULL to only write segp
487 * selector - selector value
488 *
489 * Output Args:
490 * segp - Pointer to KVM segment
491 *
492 * Return: None
493 *
494 * Sets up the KVM segment pointed to by @segp, to be a data segment
495 * with the selector value given by @selector.
496 */
kvm_seg_set_kernel_data_64bit(struct kvm_vm * vm,uint16_t selector,struct kvm_segment * segp)497 static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
498 struct kvm_segment *segp)
499 {
500 memset(segp, 0, sizeof(*segp));
501 segp->selector = selector;
502 segp->limit = 0xFFFFFFFFu;
503 segp->s = 0x1; /* kTypeCodeData */
504 segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
505 * | kFlagDataWritable
506 */
507 segp->g = true;
508 segp->present = true;
509 if (vm)
510 kvm_seg_fill_gdt_64bit(vm, segp);
511 }
512
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)513 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
514 {
515 uint16_t index[4];
516 uint64_t *pml4e, *pdpe, *pde;
517 uint64_t *pte;
518
519 TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
520 "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
521
522 index[0] = (gva >> 12) & 0x1ffu;
523 index[1] = (gva >> 21) & 0x1ffu;
524 index[2] = (gva >> 30) & 0x1ffu;
525 index[3] = (gva >> 39) & 0x1ffu;
526
527 if (!vm->pgd_created)
528 goto unmapped_gva;
529 pml4e = addr_gpa2hva(vm, vm->pgd);
530 if (!(pml4e[index[3]] & PTE_PRESENT_MASK))
531 goto unmapped_gva;
532
533 pdpe = addr_gpa2hva(vm, PTE_GET_PFN(pml4e[index[3]]) * vm->page_size);
534 if (!(pdpe[index[2]] & PTE_PRESENT_MASK))
535 goto unmapped_gva;
536
537 pde = addr_gpa2hva(vm, PTE_GET_PFN(pdpe[index[2]]) * vm->page_size);
538 if (!(pde[index[1]] & PTE_PRESENT_MASK))
539 goto unmapped_gva;
540
541 pte = addr_gpa2hva(vm, PTE_GET_PFN(pde[index[1]]) * vm->page_size);
542 if (!(pte[index[0]] & PTE_PRESENT_MASK))
543 goto unmapped_gva;
544
545 return (PTE_GET_PFN(pte[index[0]]) * vm->page_size) + (gva & ~PAGE_MASK);
546
547 unmapped_gva:
548 TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
549 exit(EXIT_FAILURE);
550 }
551
kvm_setup_gdt(struct kvm_vm * vm,struct kvm_dtable * dt)552 static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt)
553 {
554 if (!vm->gdt)
555 vm->gdt = vm_vaddr_alloc_page(vm);
556
557 dt->base = vm->gdt;
558 dt->limit = getpagesize();
559 }
560
kvm_setup_tss_64bit(struct kvm_vm * vm,struct kvm_segment * segp,int selector)561 static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
562 int selector)
563 {
564 if (!vm->tss)
565 vm->tss = vm_vaddr_alloc_page(vm);
566
567 memset(segp, 0, sizeof(*segp));
568 segp->base = vm->tss;
569 segp->limit = 0x67;
570 segp->selector = selector;
571 segp->type = 0xb;
572 segp->present = 1;
573 kvm_seg_fill_gdt_64bit(vm, segp);
574 }
575
vcpu_setup(struct kvm_vm * vm,struct kvm_vcpu * vcpu)576 static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
577 {
578 struct kvm_sregs sregs;
579
580 /* Set mode specific system register values. */
581 vcpu_sregs_get(vcpu, &sregs);
582
583 sregs.idt.limit = 0;
584
585 kvm_setup_gdt(vm, &sregs.gdt);
586
587 switch (vm->mode) {
588 case VM_MODE_PXXV48_4K:
589 sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
590 sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
591 sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
592
593 kvm_seg_set_unusable(&sregs.ldt);
594 kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
595 kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
596 kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
597 kvm_setup_tss_64bit(vm, &sregs.tr, 0x18);
598 break;
599
600 default:
601 TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
602 }
603
604 sregs.cr3 = vm->pgd;
605 vcpu_sregs_set(vcpu, &sregs);
606 }
607
__vm_xsave_require_permission(int bit,const char * name)608 void __vm_xsave_require_permission(int bit, const char *name)
609 {
610 int kvm_fd;
611 u64 bitmask;
612 long rc;
613 struct kvm_device_attr attr = {
614 .group = 0,
615 .attr = KVM_X86_XCOMP_GUEST_SUPP,
616 .addr = (unsigned long) &bitmask
617 };
618
619 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XFD));
620
621 kvm_fd = open_kvm_dev_path_or_exit();
622 rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr);
623 close(kvm_fd);
624
625 if (rc == -1 && (errno == ENXIO || errno == EINVAL))
626 __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported");
627
628 TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc);
629
630 __TEST_REQUIRE(bitmask & (1ULL << bit),
631 "Required XSAVE feature '%s' not supported", name);
632
633 TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit));
634
635 rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask);
636 TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc);
637 TEST_ASSERT(bitmask & (1ULL << bit),
638 "prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure bitmask=0x%lx",
639 bitmask);
640 }
641
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)642 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
643 void *guest_code)
644 {
645 struct kvm_mp_state mp_state;
646 struct kvm_regs regs;
647 vm_vaddr_t stack_vaddr;
648 struct kvm_vcpu *vcpu;
649
650 stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
651 DEFAULT_GUEST_STACK_VADDR_MIN);
652
653 vcpu = __vm_vcpu_add(vm, vcpu_id);
654 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
655 vcpu_setup(vm, vcpu);
656
657 /* Setup guest general purpose registers */
658 vcpu_regs_get(vcpu, ®s);
659 regs.rflags = regs.rflags | 0x2;
660 regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
661 regs.rip = (unsigned long) guest_code;
662 vcpu_regs_set(vcpu, ®s);
663
664 /* Setup the MP state */
665 mp_state.mp_state = 0;
666 vcpu_mp_state_set(vcpu, &mp_state);
667
668 return vcpu;
669 }
670
vm_arch_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)671 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id)
672 {
673 struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id);
674
675 vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid());
676
677 return vcpu;
678 }
679
vcpu_arch_free(struct kvm_vcpu * vcpu)680 void vcpu_arch_free(struct kvm_vcpu *vcpu)
681 {
682 if (vcpu->cpuid)
683 free(vcpu->cpuid);
684 }
685
kvm_get_supported_cpuid(void)686 const struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
687 {
688 static struct kvm_cpuid2 *cpuid;
689 int kvm_fd;
690
691 if (cpuid)
692 return cpuid;
693
694 cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
695 kvm_fd = open_kvm_dev_path_or_exit();
696
697 kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
698
699 close(kvm_fd);
700 return cpuid;
701 }
702
kvm_cpuid_has(const struct kvm_cpuid2 * cpuid,struct kvm_x86_cpu_feature feature)703 bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid,
704 struct kvm_x86_cpu_feature feature)
705 {
706 const struct kvm_cpuid_entry2 *entry;
707 int i;
708
709 for (i = 0; i < cpuid->nent; i++) {
710 entry = &cpuid->entries[i];
711
712 /*
713 * The output registers in kvm_cpuid_entry2 are in alphabetical
714 * order, but kvm_x86_cpu_feature matches that mess, so yay
715 * pointer shenanigans!
716 */
717 if (entry->function == feature.function &&
718 entry->index == feature.index)
719 return (&entry->eax)[feature.reg] & BIT(feature.bit);
720 }
721
722 return false;
723 }
724
kvm_get_feature_msr(uint64_t msr_index)725 uint64_t kvm_get_feature_msr(uint64_t msr_index)
726 {
727 struct {
728 struct kvm_msrs header;
729 struct kvm_msr_entry entry;
730 } buffer = {};
731 int r, kvm_fd;
732
733 buffer.header.nmsrs = 1;
734 buffer.entry.index = msr_index;
735 kvm_fd = open_kvm_dev_path_or_exit();
736
737 r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header);
738 TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r));
739
740 close(kvm_fd);
741 return buffer.entry.data;
742 }
743
vcpu_init_cpuid(struct kvm_vcpu * vcpu,const struct kvm_cpuid2 * cpuid)744 void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid)
745 {
746 TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID");
747
748 /* Allow overriding the default CPUID. */
749 if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) {
750 free(vcpu->cpuid);
751 vcpu->cpuid = NULL;
752 }
753
754 if (!vcpu->cpuid)
755 vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent);
756
757 memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent));
758 vcpu_set_cpuid(vcpu);
759 }
760
vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu * vcpu,uint8_t maxphyaddr)761 void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr)
762 {
763 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, 0x80000008);
764
765 entry->eax = (entry->eax & ~0xff) | maxphyaddr;
766 vcpu_set_cpuid(vcpu);
767 }
768
vcpu_clear_cpuid_entry(struct kvm_vcpu * vcpu,uint32_t function)769 void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function)
770 {
771 struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function);
772
773 entry->eax = 0;
774 entry->ebx = 0;
775 entry->ecx = 0;
776 entry->edx = 0;
777 vcpu_set_cpuid(vcpu);
778 }
779
vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu * vcpu,struct kvm_x86_cpu_feature feature,bool set)780 void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu,
781 struct kvm_x86_cpu_feature feature,
782 bool set)
783 {
784 struct kvm_cpuid_entry2 *entry;
785 u32 *reg;
786
787 entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index);
788 reg = (&entry->eax) + feature.reg;
789
790 if (set)
791 *reg |= BIT(feature.bit);
792 else
793 *reg &= ~BIT(feature.bit);
794
795 vcpu_set_cpuid(vcpu);
796 }
797
vcpu_get_msr(struct kvm_vcpu * vcpu,uint64_t msr_index)798 uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index)
799 {
800 struct {
801 struct kvm_msrs header;
802 struct kvm_msr_entry entry;
803 } buffer = {};
804
805 buffer.header.nmsrs = 1;
806 buffer.entry.index = msr_index;
807
808 vcpu_msrs_get(vcpu, &buffer.header);
809
810 return buffer.entry.data;
811 }
812
_vcpu_set_msr(struct kvm_vcpu * vcpu,uint64_t msr_index,uint64_t msr_value)813 int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value)
814 {
815 struct {
816 struct kvm_msrs header;
817 struct kvm_msr_entry entry;
818 } buffer = {};
819
820 memset(&buffer, 0, sizeof(buffer));
821 buffer.header.nmsrs = 1;
822 buffer.entry.index = msr_index;
823 buffer.entry.data = msr_value;
824
825 return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header);
826 }
827
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)828 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
829 {
830 va_list ap;
831 struct kvm_regs regs;
832
833 TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
834 " num: %u\n",
835 num);
836
837 va_start(ap, num);
838 vcpu_regs_get(vcpu, ®s);
839
840 if (num >= 1)
841 regs.rdi = va_arg(ap, uint64_t);
842
843 if (num >= 2)
844 regs.rsi = va_arg(ap, uint64_t);
845
846 if (num >= 3)
847 regs.rdx = va_arg(ap, uint64_t);
848
849 if (num >= 4)
850 regs.rcx = va_arg(ap, uint64_t);
851
852 if (num >= 5)
853 regs.r8 = va_arg(ap, uint64_t);
854
855 if (num >= 6)
856 regs.r9 = va_arg(ap, uint64_t);
857
858 vcpu_regs_set(vcpu, ®s);
859 va_end(ap);
860 }
861
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)862 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
863 {
864 struct kvm_regs regs;
865 struct kvm_sregs sregs;
866
867 fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id);
868
869 fprintf(stream, "%*sregs:\n", indent + 2, "");
870 vcpu_regs_get(vcpu, ®s);
871 regs_dump(stream, ®s, indent + 4);
872
873 fprintf(stream, "%*ssregs:\n", indent + 2, "");
874 vcpu_sregs_get(vcpu, &sregs);
875 sregs_dump(stream, &sregs, indent + 4);
876 }
877
__kvm_get_msr_index_list(bool feature_msrs)878 static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs)
879 {
880 struct kvm_msr_list *list;
881 struct kvm_msr_list nmsrs;
882 int kvm_fd, r;
883
884 kvm_fd = open_kvm_dev_path_or_exit();
885
886 nmsrs.nmsrs = 0;
887 if (!feature_msrs)
888 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
889 else
890 r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs);
891
892 TEST_ASSERT(r == -1 && errno == E2BIG,
893 "Expected -E2BIG, got rc: %i errno: %i (%s)",
894 r, errno, strerror(errno));
895
896 list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0]));
897 TEST_ASSERT(list, "-ENOMEM when allocating MSR index list");
898 list->nmsrs = nmsrs.nmsrs;
899
900 if (!feature_msrs)
901 kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
902 else
903 kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list);
904 close(kvm_fd);
905
906 TEST_ASSERT(list->nmsrs == nmsrs.nmsrs,
907 "Number of MSRs in list changed, was %d, now %d",
908 nmsrs.nmsrs, list->nmsrs);
909 return list;
910 }
911
kvm_get_msr_index_list(void)912 const struct kvm_msr_list *kvm_get_msr_index_list(void)
913 {
914 static const struct kvm_msr_list *list;
915
916 if (!list)
917 list = __kvm_get_msr_index_list(false);
918 return list;
919 }
920
921
kvm_get_feature_msr_index_list(void)922 const struct kvm_msr_list *kvm_get_feature_msr_index_list(void)
923 {
924 static const struct kvm_msr_list *list;
925
926 if (!list)
927 list = __kvm_get_msr_index_list(true);
928 return list;
929 }
930
kvm_msr_is_in_save_restore_list(uint32_t msr_index)931 bool kvm_msr_is_in_save_restore_list(uint32_t msr_index)
932 {
933 const struct kvm_msr_list *list = kvm_get_msr_index_list();
934 int i;
935
936 for (i = 0; i < list->nmsrs; ++i) {
937 if (list->indices[i] == msr_index)
938 return true;
939 }
940
941 return false;
942 }
943
vcpu_save_xsave_state(struct kvm_vcpu * vcpu,struct kvm_x86_state * state)944 static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu,
945 struct kvm_x86_state *state)
946 {
947 int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2);
948
949 if (size) {
950 state->xsave = malloc(size);
951 vcpu_xsave2_get(vcpu, state->xsave);
952 } else {
953 state->xsave = malloc(sizeof(struct kvm_xsave));
954 vcpu_xsave_get(vcpu, state->xsave);
955 }
956 }
957
vcpu_save_state(struct kvm_vcpu * vcpu)958 struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu)
959 {
960 const struct kvm_msr_list *msr_list = kvm_get_msr_index_list();
961 struct kvm_x86_state *state;
962 int i;
963
964 static int nested_size = -1;
965
966 if (nested_size == -1) {
967 nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
968 TEST_ASSERT(nested_size <= sizeof(state->nested_),
969 "Nested state size too big, %i > %zi",
970 nested_size, sizeof(state->nested_));
971 }
972
973 /*
974 * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
975 * guest state is consistent only after userspace re-enters the
976 * kernel with KVM_RUN. Complete IO prior to migrating state
977 * to a new VM.
978 */
979 vcpu_run_complete_io(vcpu);
980
981 state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0]));
982
983 vcpu_events_get(vcpu, &state->events);
984 vcpu_mp_state_get(vcpu, &state->mp_state);
985 vcpu_regs_get(vcpu, &state->regs);
986 vcpu_save_xsave_state(vcpu, state);
987
988 if (kvm_has_cap(KVM_CAP_XCRS))
989 vcpu_xcrs_get(vcpu, &state->xcrs);
990
991 vcpu_sregs_get(vcpu, &state->sregs);
992
993 if (nested_size) {
994 state->nested.size = sizeof(state->nested_);
995
996 vcpu_nested_state_get(vcpu, &state->nested);
997 TEST_ASSERT(state->nested.size <= nested_size,
998 "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
999 state->nested.size, nested_size);
1000 } else {
1001 state->nested.size = 0;
1002 }
1003
1004 state->msrs.nmsrs = msr_list->nmsrs;
1005 for (i = 0; i < msr_list->nmsrs; i++)
1006 state->msrs.entries[i].index = msr_list->indices[i];
1007 vcpu_msrs_get(vcpu, &state->msrs);
1008
1009 vcpu_debugregs_get(vcpu, &state->debugregs);
1010
1011 return state;
1012 }
1013
vcpu_load_state(struct kvm_vcpu * vcpu,struct kvm_x86_state * state)1014 void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state)
1015 {
1016 vcpu_sregs_set(vcpu, &state->sregs);
1017 vcpu_msrs_set(vcpu, &state->msrs);
1018
1019 if (kvm_has_cap(KVM_CAP_XCRS))
1020 vcpu_xcrs_set(vcpu, &state->xcrs);
1021
1022 vcpu_xsave_set(vcpu, state->xsave);
1023 vcpu_events_set(vcpu, &state->events);
1024 vcpu_mp_state_set(vcpu, &state->mp_state);
1025 vcpu_debugregs_set(vcpu, &state->debugregs);
1026 vcpu_regs_set(vcpu, &state->regs);
1027
1028 if (state->nested.size)
1029 vcpu_nested_state_set(vcpu, &state->nested);
1030 }
1031
kvm_x86_state_cleanup(struct kvm_x86_state * state)1032 void kvm_x86_state_cleanup(struct kvm_x86_state *state)
1033 {
1034 free(state->xsave);
1035 free(state);
1036 }
1037
cpu_vendor_string_is(const char * vendor)1038 static bool cpu_vendor_string_is(const char *vendor)
1039 {
1040 const uint32_t *chunk = (const uint32_t *)vendor;
1041 uint32_t eax, ebx, ecx, edx;
1042
1043 cpuid(0, &eax, &ebx, &ecx, &edx);
1044 return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
1045 }
1046
is_intel_cpu(void)1047 bool is_intel_cpu(void)
1048 {
1049 return cpu_vendor_string_is("GenuineIntel");
1050 }
1051
1052 /*
1053 * Exclude early K5 samples with a vendor string of "AMDisbetter!"
1054 */
is_amd_cpu(void)1055 bool is_amd_cpu(void)
1056 {
1057 return cpu_vendor_string_is("AuthenticAMD");
1058 }
1059
kvm_get_cpu_address_width(unsigned int * pa_bits,unsigned int * va_bits)1060 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1061 {
1062 const struct kvm_cpuid_entry2 *entry;
1063 bool pae;
1064
1065 /* SDM 4.1.4 */
1066 if (kvm_get_cpuid_max_extended() < 0x80000008) {
1067 pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
1068 *pa_bits = pae ? 36 : 32;
1069 *va_bits = 32;
1070 } else {
1071 entry = kvm_get_supported_cpuid_entry(0x80000008);
1072 *pa_bits = entry->eax & 0xff;
1073 *va_bits = (entry->eax >> 8) & 0xff;
1074 }
1075 }
1076
set_idt_entry(struct kvm_vm * vm,int vector,unsigned long addr,int dpl,unsigned short selector)1077 static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
1078 int dpl, unsigned short selector)
1079 {
1080 struct idt_entry *base =
1081 (struct idt_entry *)addr_gva2hva(vm, vm->idt);
1082 struct idt_entry *e = &base[vector];
1083
1084 memset(e, 0, sizeof(*e));
1085 e->offset0 = addr;
1086 e->selector = selector;
1087 e->ist = 0;
1088 e->type = 14;
1089 e->dpl = dpl;
1090 e->p = 1;
1091 e->offset1 = addr >> 16;
1092 e->offset2 = addr >> 32;
1093 }
1094
1095
kvm_fixup_exception(struct ex_regs * regs)1096 static bool kvm_fixup_exception(struct ex_regs *regs)
1097 {
1098 if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10)
1099 return false;
1100
1101 if (regs->vector == DE_VECTOR)
1102 return false;
1103
1104 regs->rip = regs->r11;
1105 regs->r9 = regs->vector;
1106 return true;
1107 }
1108
kvm_exit_unexpected_vector(uint32_t value)1109 void kvm_exit_unexpected_vector(uint32_t value)
1110 {
1111 ucall(UCALL_UNHANDLED, 1, value);
1112 }
1113
route_exception(struct ex_regs * regs)1114 void route_exception(struct ex_regs *regs)
1115 {
1116 typedef void(*handler)(struct ex_regs *);
1117 handler *handlers = (handler *)exception_handlers;
1118
1119 if (handlers && handlers[regs->vector]) {
1120 handlers[regs->vector](regs);
1121 return;
1122 }
1123
1124 if (kvm_fixup_exception(regs))
1125 return;
1126
1127 kvm_exit_unexpected_vector(regs->vector);
1128 }
1129
vm_init_descriptor_tables(struct kvm_vm * vm)1130 void vm_init_descriptor_tables(struct kvm_vm *vm)
1131 {
1132 extern void *idt_handlers;
1133 int i;
1134
1135 vm->idt = vm_vaddr_alloc_page(vm);
1136 vm->handlers = vm_vaddr_alloc_page(vm);
1137 /* Handlers have the same address in both address spaces.*/
1138 for (i = 0; i < NUM_INTERRUPTS; i++)
1139 set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
1140 DEFAULT_CODE_SELECTOR);
1141 }
1142
vcpu_init_descriptor_tables(struct kvm_vcpu * vcpu)1143 void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu)
1144 {
1145 struct kvm_vm *vm = vcpu->vm;
1146 struct kvm_sregs sregs;
1147
1148 vcpu_sregs_get(vcpu, &sregs);
1149 sregs.idt.base = vm->idt;
1150 sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
1151 sregs.gdt.base = vm->gdt;
1152 sregs.gdt.limit = getpagesize() - 1;
1153 kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
1154 vcpu_sregs_set(vcpu, &sregs);
1155 *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
1156 }
1157
vm_install_exception_handler(struct kvm_vm * vm,int vector,void (* handler)(struct ex_regs *))1158 void vm_install_exception_handler(struct kvm_vm *vm, int vector,
1159 void (*handler)(struct ex_regs *))
1160 {
1161 vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
1162
1163 handlers[vector] = (vm_vaddr_t)handler;
1164 }
1165
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)1166 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
1167 {
1168 struct ucall uc;
1169
1170 if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) {
1171 uint64_t vector = uc.args[0];
1172
1173 TEST_FAIL("Unexpected vectored event in guest (vector:0x%lx)",
1174 vector);
1175 }
1176 }
1177
get_cpuid_entry(const struct kvm_cpuid2 * cpuid,uint32_t function,uint32_t index)1178 const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid,
1179 uint32_t function, uint32_t index)
1180 {
1181 int i;
1182
1183 for (i = 0; i < cpuid->nent; i++) {
1184 if (cpuid->entries[i].function == function &&
1185 cpuid->entries[i].index == index)
1186 return &cpuid->entries[i];
1187 }
1188
1189 TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index);
1190
1191 return NULL;
1192 }
1193
kvm_hypercall(uint64_t nr,uint64_t a0,uint64_t a1,uint64_t a2,uint64_t a3)1194 uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1195 uint64_t a3)
1196 {
1197 uint64_t r;
1198
1199 asm volatile("vmcall"
1200 : "=a"(r)
1201 : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1202 return r;
1203 }
1204
kvm_get_supported_hv_cpuid(void)1205 const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void)
1206 {
1207 static struct kvm_cpuid2 *cpuid;
1208 int kvm_fd;
1209
1210 if (cpuid)
1211 return cpuid;
1212
1213 cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
1214 kvm_fd = open_kvm_dev_path_or_exit();
1215
1216 kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1217
1218 close(kvm_fd);
1219 return cpuid;
1220 }
1221
vcpu_set_hv_cpuid(struct kvm_vcpu * vcpu)1222 void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu)
1223 {
1224 static struct kvm_cpuid2 *cpuid_full;
1225 const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv;
1226 int i, nent = 0;
1227
1228 if (!cpuid_full) {
1229 cpuid_sys = kvm_get_supported_cpuid();
1230 cpuid_hv = kvm_get_supported_hv_cpuid();
1231
1232 cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent);
1233 if (!cpuid_full) {
1234 perror("malloc");
1235 abort();
1236 }
1237
1238 /* Need to skip KVM CPUID leaves 0x400000xx */
1239 for (i = 0; i < cpuid_sys->nent; i++) {
1240 if (cpuid_sys->entries[i].function >= 0x40000000 &&
1241 cpuid_sys->entries[i].function < 0x40000100)
1242 continue;
1243 cpuid_full->entries[nent] = cpuid_sys->entries[i];
1244 nent++;
1245 }
1246
1247 memcpy(&cpuid_full->entries[nent], cpuid_hv->entries,
1248 cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2));
1249 cpuid_full->nent = nent + cpuid_hv->nent;
1250 }
1251
1252 vcpu_init_cpuid(vcpu, cpuid_full);
1253 }
1254
vcpu_get_supported_hv_cpuid(struct kvm_vcpu * vcpu)1255 const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu)
1256 {
1257 struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES);
1258
1259 vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
1260
1261 return cpuid;
1262 }
1263
vm_compute_max_gfn(struct kvm_vm * vm)1264 unsigned long vm_compute_max_gfn(struct kvm_vm *vm)
1265 {
1266 const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */
1267 unsigned long ht_gfn, max_gfn, max_pfn;
1268 uint32_t eax, ebx, ecx, edx, max_ext_leaf;
1269
1270 max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1;
1271
1272 /* Avoid reserved HyperTransport region on AMD processors. */
1273 if (!is_amd_cpu())
1274 return max_gfn;
1275
1276 /* On parts with <40 physical address bits, the area is fully hidden */
1277 if (vm->pa_bits < 40)
1278 return max_gfn;
1279
1280 /* Before family 17h, the HyperTransport area is just below 1T. */
1281 ht_gfn = (1 << 28) - num_ht_pages;
1282 cpuid(1, &eax, &ebx, &ecx, &edx);
1283 if (x86_family(eax) < 0x17)
1284 goto done;
1285
1286 /*
1287 * Otherwise it's at the top of the physical address space, possibly
1288 * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use
1289 * the old conservative value if MAXPHYADDR is not enumerated.
1290 */
1291 cpuid(0x80000000, &eax, &ebx, &ecx, &edx);
1292 max_ext_leaf = eax;
1293 if (max_ext_leaf < 0x80000008)
1294 goto done;
1295
1296 cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
1297 max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1;
1298 if (max_ext_leaf >= 0x8000001f) {
1299 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
1300 max_pfn >>= (ebx >> 6) & 0x3f;
1301 }
1302
1303 ht_gfn = max_pfn - num_ht_pages;
1304 done:
1305 return min(max_gfn, ht_gfn - 1);
1306 }
1307
1308 /* Returns true if kvm_intel was loaded with unrestricted_guest=1. */
vm_is_unrestricted_guest(struct kvm_vm * vm)1309 bool vm_is_unrestricted_guest(struct kvm_vm *vm)
1310 {
1311 /* Ensure that a KVM vendor-specific module is loaded. */
1312 if (vm == NULL)
1313 close(open_kvm_dev_path_or_exit());
1314
1315 return get_kvm_intel_param_bool("unrestricted_guest");
1316 }
1317