1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021, Google LLC.
4  *
5  * Tests for adjusting the KVM clock from userspace
6  */
7 #include <asm/kvm_para.h>
8 #include <asm/pvclock.h>
9 #include <asm/pvclock-abi.h>
10 #include <stdint.h>
11 #include <string.h>
12 #include <sys/stat.h>
13 #include <time.h>
14 
15 #include "test_util.h"
16 #include "kvm_util.h"
17 #include "processor.h"
18 
19 #define VCPU_ID 0
20 
21 struct test_case {
22 	uint64_t kvmclock_base;
23 	int64_t realtime_offset;
24 };
25 
26 static struct test_case test_cases[] = {
27 	{ .kvmclock_base = 0 },
28 	{ .kvmclock_base = 180 * NSEC_PER_SEC },
29 	{ .kvmclock_base = 0, .realtime_offset = -180 * NSEC_PER_SEC },
30 	{ .kvmclock_base = 0, .realtime_offset = 180 * NSEC_PER_SEC },
31 };
32 
33 #define GUEST_SYNC_CLOCK(__stage, __val)			\
34 		GUEST_SYNC_ARGS(__stage, __val, 0, 0, 0)
35 
guest_main(vm_paddr_t pvti_pa,struct pvclock_vcpu_time_info * pvti)36 static void guest_main(vm_paddr_t pvti_pa, struct pvclock_vcpu_time_info *pvti)
37 {
38 	int i;
39 
40 	wrmsr(MSR_KVM_SYSTEM_TIME_NEW, pvti_pa | KVM_MSR_ENABLED);
41 	for (i = 0; i < ARRAY_SIZE(test_cases); i++)
42 		GUEST_SYNC_CLOCK(i, __pvclock_read_cycles(pvti, rdtsc()));
43 }
44 
45 #define EXPECTED_FLAGS (KVM_CLOCK_REALTIME | KVM_CLOCK_HOST_TSC)
46 
assert_flags(struct kvm_clock_data * data)47 static inline void assert_flags(struct kvm_clock_data *data)
48 {
49 	TEST_ASSERT((data->flags & EXPECTED_FLAGS) == EXPECTED_FLAGS,
50 		    "unexpected clock data flags: %x (want set: %x)",
51 		    data->flags, EXPECTED_FLAGS);
52 }
53 
handle_sync(struct ucall * uc,struct kvm_clock_data * start,struct kvm_clock_data * end)54 static void handle_sync(struct ucall *uc, struct kvm_clock_data *start,
55 			struct kvm_clock_data *end)
56 {
57 	uint64_t obs, exp_lo, exp_hi;
58 
59 	obs = uc->args[2];
60 	exp_lo = start->clock;
61 	exp_hi = end->clock;
62 
63 	assert_flags(start);
64 	assert_flags(end);
65 
66 	TEST_ASSERT(exp_lo <= obs && obs <= exp_hi,
67 		    "unexpected kvm-clock value: %"PRIu64" expected range: [%"PRIu64", %"PRIu64"]",
68 		    obs, exp_lo, exp_hi);
69 
70 	pr_info("kvm-clock value: %"PRIu64" expected range [%"PRIu64", %"PRIu64"]\n",
71 		obs, exp_lo, exp_hi);
72 }
73 
handle_abort(struct ucall * uc)74 static void handle_abort(struct ucall *uc)
75 {
76 	TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0],
77 		  __FILE__, uc->args[1]);
78 }
79 
setup_clock(struct kvm_vm * vm,struct test_case * test_case)80 static void setup_clock(struct kvm_vm *vm, struct test_case *test_case)
81 {
82 	struct kvm_clock_data data;
83 
84 	memset(&data, 0, sizeof(data));
85 
86 	data.clock = test_case->kvmclock_base;
87 	if (test_case->realtime_offset) {
88 		struct timespec ts;
89 		int r;
90 
91 		data.flags |= KVM_CLOCK_REALTIME;
92 		do {
93 			r = clock_gettime(CLOCK_REALTIME, &ts);
94 			if (!r)
95 				break;
96 		} while (errno == EINTR);
97 
98 		TEST_ASSERT(!r, "clock_gettime() failed: %d\n", r);
99 
100 		data.realtime = ts.tv_sec * NSEC_PER_SEC;
101 		data.realtime += ts.tv_nsec;
102 		data.realtime += test_case->realtime_offset;
103 	}
104 
105 	vm_ioctl(vm, KVM_SET_CLOCK, &data);
106 }
107 
enter_guest(struct kvm_vm * vm)108 static void enter_guest(struct kvm_vm *vm)
109 {
110 	struct kvm_clock_data start, end;
111 	struct kvm_run *run;
112 	struct ucall uc;
113 	int i, r;
114 
115 	run = vcpu_state(vm, VCPU_ID);
116 
117 	for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
118 		setup_clock(vm, &test_cases[i]);
119 
120 		vm_ioctl(vm, KVM_GET_CLOCK, &start);
121 
122 		r = _vcpu_run(vm, VCPU_ID);
123 		vm_ioctl(vm, KVM_GET_CLOCK, &end);
124 
125 		TEST_ASSERT(!r, "vcpu_run failed: %d\n", r);
126 		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
127 			    "unexpected exit reason: %u (%s)",
128 			    run->exit_reason, exit_reason_str(run->exit_reason));
129 
130 		switch (get_ucall(vm, VCPU_ID, &uc)) {
131 		case UCALL_SYNC:
132 			handle_sync(&uc, &start, &end);
133 			break;
134 		case UCALL_ABORT:
135 			handle_abort(&uc);
136 			return;
137 		default:
138 			TEST_ASSERT(0, "unhandled ucall: %ld\n", uc.cmd);
139 		}
140 	}
141 }
142 
143 #define CLOCKSOURCE_PATH "/sys/devices/system/clocksource/clocksource0/current_clocksource"
144 
check_clocksource(void)145 static void check_clocksource(void)
146 {
147 	char *clk_name;
148 	struct stat st;
149 	FILE *fp;
150 
151 	fp = fopen(CLOCKSOURCE_PATH, "r");
152 	if (!fp) {
153 		pr_info("failed to open clocksource file: %d; assuming TSC.\n",
154 			errno);
155 		return;
156 	}
157 
158 	if (fstat(fileno(fp), &st)) {
159 		pr_info("failed to stat clocksource file: %d; assuming TSC.\n",
160 			errno);
161 		goto out;
162 	}
163 
164 	clk_name = malloc(st.st_size);
165 	TEST_ASSERT(clk_name, "failed to allocate buffer to read file\n");
166 
167 	if (!fgets(clk_name, st.st_size, fp)) {
168 		pr_info("failed to read clocksource file: %d; assuming TSC.\n",
169 			ferror(fp));
170 		goto out;
171 	}
172 
173 	TEST_ASSERT(!strncmp(clk_name, "tsc\n", st.st_size),
174 		    "clocksource not supported: %s", clk_name);
175 out:
176 	fclose(fp);
177 }
178 
main(void)179 int main(void)
180 {
181 	vm_vaddr_t pvti_gva;
182 	vm_paddr_t pvti_gpa;
183 	struct kvm_vm *vm;
184 	int flags;
185 
186 	flags = kvm_check_cap(KVM_CAP_ADJUST_CLOCK);
187 	if (!(flags & KVM_CLOCK_REALTIME)) {
188 		print_skip("KVM_CLOCK_REALTIME not supported; flags: %x",
189 			   flags);
190 		exit(KSFT_SKIP);
191 	}
192 
193 	check_clocksource();
194 
195 	vm = vm_create_default(VCPU_ID, 0, guest_main);
196 
197 	pvti_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000);
198 	pvti_gpa = addr_gva2gpa(vm, pvti_gva);
199 	vcpu_args_set(vm, VCPU_ID, 2, pvti_gpa, pvti_gva);
200 
201 	enter_guest(vm);
202 	kvm_vm_free(vm);
203 }
204