1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * vmx_tsc_adjust_test
4  *
5  * Copyright (C) 2018, Google LLC.
6  *
7  * IA32_TSC_ADJUST test
8  *
9  * According to the SDM, "if an execution of WRMSR to the
10  * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
11  * the logical processor also adds (or subtracts) value X from the
12  * IA32_TSC_ADJUST MSR.
13  *
14  * Note that when L1 doesn't intercept writes to IA32_TSC, a
15  * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
16  * value.
17  *
18  * This test verifies that this unusual case is handled correctly.
19  */
20 
21 #include "test_util.h"
22 #include "kvm_util.h"
23 #include "processor.h"
24 #include "vmx.h"
25 
26 #include <string.h>
27 #include <sys/ioctl.h>
28 
29 #include "kselftest.h"
30 
31 #ifndef MSR_IA32_TSC_ADJUST
32 #define MSR_IA32_TSC_ADJUST 0x3b
33 #endif
34 
35 #define TSC_ADJUST_VALUE (1ll << 32)
36 #define TSC_OFFSET_VALUE -(1ll << 48)
37 
38 enum {
39 	PORT_ABORT = 0x1000,
40 	PORT_REPORT,
41 	PORT_DONE,
42 };
43 
44 enum {
45 	VMXON_PAGE = 0,
46 	VMCS_PAGE,
47 	MSR_BITMAP_PAGE,
48 
49 	NUM_VMX_PAGES,
50 };
51 
52 struct kvm_single_msr {
53 	struct kvm_msrs header;
54 	struct kvm_msr_entry entry;
55 } __attribute__((packed));
56 
57 /* The virtual machine object. */
58 static struct kvm_vm *vm;
59 
check_ia32_tsc_adjust(int64_t max)60 static void check_ia32_tsc_adjust(int64_t max)
61 {
62 	int64_t adjust;
63 
64 	adjust = rdmsr(MSR_IA32_TSC_ADJUST);
65 	GUEST_SYNC(adjust);
66 	GUEST_ASSERT(adjust <= max);
67 }
68 
l2_guest_code(void)69 static void l2_guest_code(void)
70 {
71 	uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
72 
73 	wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
74 	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
75 
76 	/* Exit to L1 */
77 	__asm__ __volatile__("vmcall");
78 }
79 
l1_guest_code(struct vmx_pages * vmx_pages)80 static void l1_guest_code(struct vmx_pages *vmx_pages)
81 {
82 #define L2_GUEST_STACK_SIZE 64
83 	unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
84 	uint32_t control;
85 	uintptr_t save_cr3;
86 
87 	GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
88 	wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
89 	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
90 
91 	GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
92 	GUEST_ASSERT(load_vmcs(vmx_pages));
93 
94 	/* Prepare the VMCS for L2 execution. */
95 	prepare_vmcs(vmx_pages, l2_guest_code,
96 		     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
97 	control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
98 	control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
99 	vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
100 	vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
101 
102 	/* Jump into L2.  First, test failure to load guest CR3.  */
103 	save_cr3 = vmreadz(GUEST_CR3);
104 	vmwrite(GUEST_CR3, -1ull);
105 	GUEST_ASSERT(!vmlaunch());
106 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
107 		     (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
108 	check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
109 	vmwrite(GUEST_CR3, save_cr3);
110 
111 	GUEST_ASSERT(!vmlaunch());
112 	GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
113 
114 	check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
115 
116 	GUEST_DONE();
117 }
118 
report(int64_t val)119 static void report(int64_t val)
120 {
121 	pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
122 		val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
123 }
124 
main(int argc,char * argv[])125 int main(int argc, char *argv[])
126 {
127 	vm_vaddr_t vmx_pages_gva;
128 	struct kvm_vcpu *vcpu;
129 
130 	TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
131 
132 	vm = vm_create_with_one_vcpu(&vcpu, (void *) l1_guest_code);
133 
134 	/* Allocate VMX pages and shared descriptors (vmx_pages). */
135 	vcpu_alloc_vmx(vm, &vmx_pages_gva);
136 	vcpu_args_set(vcpu, 1, vmx_pages_gva);
137 
138 	for (;;) {
139 		volatile struct kvm_run *run = vcpu->run;
140 		struct ucall uc;
141 
142 		vcpu_run(vcpu);
143 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
144 			    "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
145 			    run->exit_reason,
146 			    exit_reason_str(run->exit_reason));
147 
148 		switch (get_ucall(vcpu, &uc)) {
149 		case UCALL_ABORT:
150 			REPORT_GUEST_ASSERT(uc);
151 			/* NOT REACHED */
152 		case UCALL_SYNC:
153 			report(uc.args[1]);
154 			break;
155 		case UCALL_DONE:
156 			goto done;
157 		default:
158 			TEST_FAIL("Unknown ucall %lu", uc.cmd);
159 		}
160 	}
161 
162 done:
163 	kvm_vm_free(vm);
164 	return 0;
165 }
166