1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kvm.h>
3 #include <linux/psp-sev.h>
4 #include <stdio.h>
5 #include <sys/ioctl.h>
6 #include <stdlib.h>
7 #include <errno.h>
8 #include <pthread.h>
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "processor.h"
13 #include "svm_util.h"
14 #include "kselftest.h"
15 #include "../lib/kvm_util_internal.h"
16
17 #define SEV_POLICY_ES 0b100
18
19 #define NR_MIGRATE_TEST_VCPUS 4
20 #define NR_MIGRATE_TEST_VMS 3
21 #define NR_LOCK_TESTING_THREADS 3
22 #define NR_LOCK_TESTING_ITERATIONS 10000
23
24 bool have_sev_es;
25
__sev_ioctl(int vm_fd,int cmd_id,void * data,__u32 * fw_error)26 static int __sev_ioctl(int vm_fd, int cmd_id, void *data, __u32 *fw_error)
27 {
28 struct kvm_sev_cmd cmd = {
29 .id = cmd_id,
30 .data = (uint64_t)data,
31 .sev_fd = open_sev_dev_path_or_exit(),
32 };
33 int ret;
34
35 ret = ioctl(vm_fd, KVM_MEMORY_ENCRYPT_OP, &cmd);
36 *fw_error = cmd.error;
37 return ret;
38 }
39
sev_ioctl(int vm_fd,int cmd_id,void * data)40 static void sev_ioctl(int vm_fd, int cmd_id, void *data)
41 {
42 int ret;
43 __u32 fw_error;
44
45 ret = __sev_ioctl(vm_fd, cmd_id, data, &fw_error);
46 TEST_ASSERT(ret == 0 && fw_error == SEV_RET_SUCCESS,
47 "%d failed: return code: %d, errno: %d, fw error: %d",
48 cmd_id, ret, errno, fw_error);
49 }
50
sev_vm_create(bool es)51 static struct kvm_vm *sev_vm_create(bool es)
52 {
53 struct kvm_vm *vm;
54 struct kvm_sev_launch_start start = { 0 };
55 int i;
56
57 vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
58 sev_ioctl(vm->fd, es ? KVM_SEV_ES_INIT : KVM_SEV_INIT, NULL);
59 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
60 vm_vcpu_add(vm, i);
61 if (es)
62 start.policy |= SEV_POLICY_ES;
63 sev_ioctl(vm->fd, KVM_SEV_LAUNCH_START, &start);
64 if (es)
65 sev_ioctl(vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
66 return vm;
67 }
68
aux_vm_create(bool with_vcpus)69 static struct kvm_vm *aux_vm_create(bool with_vcpus)
70 {
71 struct kvm_vm *vm;
72 int i;
73
74 vm = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
75 if (!with_vcpus)
76 return vm;
77
78 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
79 vm_vcpu_add(vm, i);
80
81 return vm;
82 }
83
__sev_migrate_from(int dst_fd,int src_fd)84 static int __sev_migrate_from(int dst_fd, int src_fd)
85 {
86 struct kvm_enable_cap cap = {
87 .cap = KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM,
88 .args = { src_fd }
89 };
90
91 return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
92 }
93
94
sev_migrate_from(int dst_fd,int src_fd)95 static void sev_migrate_from(int dst_fd, int src_fd)
96 {
97 int ret;
98
99 ret = __sev_migrate_from(dst_fd, src_fd);
100 TEST_ASSERT(!ret, "Migration failed, ret: %d, errno: %d\n", ret, errno);
101 }
102
test_sev_migrate_from(bool es)103 static void test_sev_migrate_from(bool es)
104 {
105 struct kvm_vm *src_vm;
106 struct kvm_vm *dst_vms[NR_MIGRATE_TEST_VMS];
107 int i, ret;
108
109 src_vm = sev_vm_create(es);
110 for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
111 dst_vms[i] = aux_vm_create(true);
112
113 /* Initial migration from the src to the first dst. */
114 sev_migrate_from(dst_vms[0]->fd, src_vm->fd);
115
116 for (i = 1; i < NR_MIGRATE_TEST_VMS; i++)
117 sev_migrate_from(dst_vms[i]->fd, dst_vms[i - 1]->fd);
118
119 /* Migrate the guest back to the original VM. */
120 ret = __sev_migrate_from(src_vm->fd, dst_vms[NR_MIGRATE_TEST_VMS - 1]->fd);
121 TEST_ASSERT(ret == -1 && errno == EIO,
122 "VM that was migrated from should be dead. ret %d, errno: %d\n", ret,
123 errno);
124
125 kvm_vm_free(src_vm);
126 for (i = 0; i < NR_MIGRATE_TEST_VMS; ++i)
127 kvm_vm_free(dst_vms[i]);
128 }
129
130 struct locking_thread_input {
131 struct kvm_vm *vm;
132 int source_fds[NR_LOCK_TESTING_THREADS];
133 };
134
locking_test_thread(void * arg)135 static void *locking_test_thread(void *arg)
136 {
137 int i, j;
138 struct locking_thread_input *input = (struct locking_thread_input *)arg;
139
140 for (i = 0; i < NR_LOCK_TESTING_ITERATIONS; ++i) {
141 j = i % NR_LOCK_TESTING_THREADS;
142 __sev_migrate_from(input->vm->fd, input->source_fds[j]);
143 }
144
145 return NULL;
146 }
147
test_sev_migrate_locking(void)148 static void test_sev_migrate_locking(void)
149 {
150 struct locking_thread_input input[NR_LOCK_TESTING_THREADS];
151 pthread_t pt[NR_LOCK_TESTING_THREADS];
152 int i;
153
154 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i) {
155 input[i].vm = sev_vm_create(/* es= */ false);
156 input[0].source_fds[i] = input[i].vm->fd;
157 }
158 for (i = 1; i < NR_LOCK_TESTING_THREADS; ++i)
159 memcpy(input[i].source_fds, input[0].source_fds,
160 sizeof(input[i].source_fds));
161
162 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
163 pthread_create(&pt[i], NULL, locking_test_thread, &input[i]);
164
165 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
166 pthread_join(pt[i], NULL);
167 for (i = 0; i < NR_LOCK_TESTING_THREADS; ++i)
168 kvm_vm_free(input[i].vm);
169 }
170
test_sev_migrate_parameters(void)171 static void test_sev_migrate_parameters(void)
172 {
173 struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_no_sev,
174 *sev_es_vm_no_vmsa;
175 int ret;
176
177 vm_no_vcpu = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
178 vm_no_sev = aux_vm_create(true);
179 ret = __sev_migrate_from(vm_no_vcpu->fd, vm_no_sev->fd);
180 TEST_ASSERT(ret == -1 && errno == EINVAL,
181 "Migrations require SEV enabled. ret %d, errno: %d\n", ret,
182 errno);
183
184 if (!have_sev_es)
185 goto out;
186
187 sev_vm = sev_vm_create(/* es= */ false);
188 sev_es_vm = sev_vm_create(/* es= */ true);
189 sev_es_vm_no_vmsa = vm_create(VM_MODE_DEFAULT, 0, O_RDWR);
190 sev_ioctl(sev_es_vm_no_vmsa->fd, KVM_SEV_ES_INIT, NULL);
191 vm_vcpu_add(sev_es_vm_no_vmsa, 1);
192
193 ret = __sev_migrate_from(sev_vm->fd, sev_es_vm->fd);
194 TEST_ASSERT(
195 ret == -1 && errno == EINVAL,
196 "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d\n",
197 ret, errno);
198
199 ret = __sev_migrate_from(sev_es_vm->fd, sev_vm->fd);
200 TEST_ASSERT(
201 ret == -1 && errno == EINVAL,
202 "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d\n",
203 ret, errno);
204
205 ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm->fd);
206 TEST_ASSERT(
207 ret == -1 && errno == EINVAL,
208 "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d\n",
209 ret, errno);
210
211 ret = __sev_migrate_from(vm_no_vcpu->fd, sev_es_vm_no_vmsa->fd);
212 TEST_ASSERT(
213 ret == -1 && errno == EINVAL,
214 "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d\n",
215 ret, errno);
216
217 kvm_vm_free(sev_vm);
218 kvm_vm_free(sev_es_vm);
219 kvm_vm_free(sev_es_vm_no_vmsa);
220 out:
221 kvm_vm_free(vm_no_vcpu);
222 kvm_vm_free(vm_no_sev);
223 }
224
__sev_mirror_create(int dst_fd,int src_fd)225 static int __sev_mirror_create(int dst_fd, int src_fd)
226 {
227 struct kvm_enable_cap cap = {
228 .cap = KVM_CAP_VM_COPY_ENC_CONTEXT_FROM,
229 .args = { src_fd }
230 };
231
232 return ioctl(dst_fd, KVM_ENABLE_CAP, &cap);
233 }
234
235
sev_mirror_create(int dst_fd,int src_fd)236 static void sev_mirror_create(int dst_fd, int src_fd)
237 {
238 int ret;
239
240 ret = __sev_mirror_create(dst_fd, src_fd);
241 TEST_ASSERT(!ret, "Copying context failed, ret: %d, errno: %d\n", ret, errno);
242 }
243
verify_mirror_allowed_cmds(int vm_fd)244 static void verify_mirror_allowed_cmds(int vm_fd)
245 {
246 struct kvm_sev_guest_status status;
247
248 for (int cmd_id = KVM_SEV_INIT; cmd_id < KVM_SEV_NR_MAX; ++cmd_id) {
249 int ret;
250 __u32 fw_error;
251
252 /*
253 * These commands are allowed for mirror VMs, all others are
254 * not.
255 */
256 switch (cmd_id) {
257 case KVM_SEV_LAUNCH_UPDATE_VMSA:
258 case KVM_SEV_GUEST_STATUS:
259 case KVM_SEV_DBG_DECRYPT:
260 case KVM_SEV_DBG_ENCRYPT:
261 continue;
262 default:
263 break;
264 }
265
266 /*
267 * These commands should be disallowed before the data
268 * parameter is examined so NULL is OK here.
269 */
270 ret = __sev_ioctl(vm_fd, cmd_id, NULL, &fw_error);
271 TEST_ASSERT(
272 ret == -1 && errno == EINVAL,
273 "Should not be able call command: %d. ret: %d, errno: %d\n",
274 cmd_id, ret, errno);
275 }
276
277 sev_ioctl(vm_fd, KVM_SEV_GUEST_STATUS, &status);
278 }
279
test_sev_mirror(bool es)280 static void test_sev_mirror(bool es)
281 {
282 struct kvm_vm *src_vm, *dst_vm;
283 int i;
284
285 src_vm = sev_vm_create(es);
286 dst_vm = aux_vm_create(false);
287
288 sev_mirror_create(dst_vm->fd, src_vm->fd);
289
290 /* Check that we can complete creation of the mirror VM. */
291 for (i = 0; i < NR_MIGRATE_TEST_VCPUS; ++i)
292 vm_vcpu_add(dst_vm, i);
293
294 if (es)
295 sev_ioctl(dst_vm->fd, KVM_SEV_LAUNCH_UPDATE_VMSA, NULL);
296
297 verify_mirror_allowed_cmds(dst_vm->fd);
298
299 kvm_vm_free(src_vm);
300 kvm_vm_free(dst_vm);
301 }
302
test_sev_mirror_parameters(void)303 static void test_sev_mirror_parameters(void)
304 {
305 struct kvm_vm *sev_vm, *sev_es_vm, *vm_no_vcpu, *vm_with_vcpu;
306 int ret;
307
308 sev_vm = sev_vm_create(/* es= */ false);
309 vm_with_vcpu = aux_vm_create(true);
310 vm_no_vcpu = aux_vm_create(false);
311
312 ret = __sev_mirror_create(sev_vm->fd, sev_vm->fd);
313 TEST_ASSERT(
314 ret == -1 && errno == EINVAL,
315 "Should not be able copy context to self. ret: %d, errno: %d\n",
316 ret, errno);
317
318 ret = __sev_mirror_create(vm_no_vcpu->fd, vm_with_vcpu->fd);
319 TEST_ASSERT(ret == -1 && errno == EINVAL,
320 "Copy context requires SEV enabled. ret %d, errno: %d\n", ret,
321 errno);
322
323 ret = __sev_mirror_create(vm_with_vcpu->fd, sev_vm->fd);
324 TEST_ASSERT(
325 ret == -1 && errno == EINVAL,
326 "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d\n",
327 ret, errno);
328
329 if (!have_sev_es)
330 goto out;
331
332 sev_es_vm = sev_vm_create(/* es= */ true);
333 ret = __sev_mirror_create(sev_vm->fd, sev_es_vm->fd);
334 TEST_ASSERT(
335 ret == -1 && errno == EINVAL,
336 "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d\n",
337 ret, errno);
338
339 ret = __sev_mirror_create(sev_es_vm->fd, sev_vm->fd);
340 TEST_ASSERT(
341 ret == -1 && errno == EINVAL,
342 "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d\n",
343 ret, errno);
344
345 kvm_vm_free(sev_es_vm);
346
347 out:
348 kvm_vm_free(sev_vm);
349 kvm_vm_free(vm_with_vcpu);
350 kvm_vm_free(vm_no_vcpu);
351 }
352
test_sev_move_copy(void)353 static void test_sev_move_copy(void)
354 {
355 struct kvm_vm *dst_vm, *dst2_vm, *dst3_vm, *sev_vm, *mirror_vm,
356 *dst_mirror_vm, *dst2_mirror_vm, *dst3_mirror_vm;
357
358 sev_vm = sev_vm_create(/* es= */ false);
359 dst_vm = aux_vm_create(true);
360 dst2_vm = aux_vm_create(true);
361 dst3_vm = aux_vm_create(true);
362 mirror_vm = aux_vm_create(false);
363 dst_mirror_vm = aux_vm_create(false);
364 dst2_mirror_vm = aux_vm_create(false);
365 dst3_mirror_vm = aux_vm_create(false);
366
367 sev_mirror_create(mirror_vm->fd, sev_vm->fd);
368
369 sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
370 sev_migrate_from(dst_vm->fd, sev_vm->fd);
371
372 sev_migrate_from(dst2_vm->fd, dst_vm->fd);
373 sev_migrate_from(dst2_mirror_vm->fd, dst_mirror_vm->fd);
374
375 sev_migrate_from(dst3_mirror_vm->fd, dst2_mirror_vm->fd);
376 sev_migrate_from(dst3_vm->fd, dst2_vm->fd);
377
378 kvm_vm_free(dst_vm);
379 kvm_vm_free(sev_vm);
380 kvm_vm_free(dst2_vm);
381 kvm_vm_free(dst3_vm);
382 kvm_vm_free(mirror_vm);
383 kvm_vm_free(dst_mirror_vm);
384 kvm_vm_free(dst2_mirror_vm);
385 kvm_vm_free(dst3_mirror_vm);
386
387 /*
388 * Run similar test be destroy mirrors before mirrored VMs to ensure
389 * destruction is done safely.
390 */
391 sev_vm = sev_vm_create(/* es= */ false);
392 dst_vm = aux_vm_create(true);
393 mirror_vm = aux_vm_create(false);
394 dst_mirror_vm = aux_vm_create(false);
395
396 sev_mirror_create(mirror_vm->fd, sev_vm->fd);
397
398 sev_migrate_from(dst_mirror_vm->fd, mirror_vm->fd);
399 sev_migrate_from(dst_vm->fd, sev_vm->fd);
400
401 kvm_vm_free(mirror_vm);
402 kvm_vm_free(dst_mirror_vm);
403 kvm_vm_free(dst_vm);
404 kvm_vm_free(sev_vm);
405 }
406
407 #define X86_FEATURE_SEV (1 << 1)
408 #define X86_FEATURE_SEV_ES (1 << 3)
409
main(int argc,char * argv[])410 int main(int argc, char *argv[])
411 {
412 struct kvm_cpuid_entry2 *cpuid;
413
414 if (!kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM) &&
415 !kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
416 print_skip("Capabilities not available");
417 exit(KSFT_SKIP);
418 }
419
420 cpuid = kvm_get_supported_cpuid_entry(0x80000000);
421 if (cpuid->eax < 0x8000001f) {
422 print_skip("AMD memory encryption not available");
423 exit(KSFT_SKIP);
424 }
425 cpuid = kvm_get_supported_cpuid_entry(0x8000001f);
426 if (!(cpuid->eax & X86_FEATURE_SEV)) {
427 print_skip("AMD SEV not available");
428 exit(KSFT_SKIP);
429 }
430 have_sev_es = !!(cpuid->eax & X86_FEATURE_SEV_ES);
431
432 if (kvm_check_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM)) {
433 test_sev_migrate_from(/* es= */ false);
434 if (have_sev_es)
435 test_sev_migrate_from(/* es= */ true);
436 test_sev_migrate_locking();
437 test_sev_migrate_parameters();
438 if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM))
439 test_sev_move_copy();
440 }
441 if (kvm_check_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM)) {
442 test_sev_mirror(/* es= */ false);
443 if (have_sev_es)
444 test_sev_mirror(/* es= */ true);
445 test_sev_mirror_parameters();
446 }
447 return 0;
448 }
449