1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * tools/testing/selftests/kvm/include/kvm_util_base.h
4 *
5 * Copyright (C) 2018, Google LLC.
6 */
7 #ifndef SELFTEST_KVM_UTIL_BASE_H
8 #define SELFTEST_KVM_UTIL_BASE_H
9
10 #include "test_util.h"
11
12 #include <linux/compiler.h>
13 #include "linux/hashtable.h"
14 #include "linux/list.h"
15 #include <linux/kernel.h>
16 #include <linux/kvm.h>
17 #include "linux/rbtree.h"
18
19
20 #include <sys/ioctl.h>
21
22 #include "sparsebit.h"
23
24 #define KVM_DEV_PATH "/dev/kvm"
25 #define KVM_MAX_VCPUS 512
26
27 #define NSEC_PER_SEC 1000000000L
28
29 typedef uint64_t vm_paddr_t; /* Virtual Machine (Guest) physical address */
30 typedef uint64_t vm_vaddr_t; /* Virtual Machine (Guest) virtual address */
31
32 struct userspace_mem_region {
33 struct kvm_userspace_memory_region region;
34 struct sparsebit *unused_phy_pages;
35 int fd;
36 off_t offset;
37 void *host_mem;
38 void *host_alias;
39 void *mmap_start;
40 void *mmap_alias;
41 size_t mmap_size;
42 struct rb_node gpa_node;
43 struct rb_node hva_node;
44 struct hlist_node slot_node;
45 };
46
47 struct kvm_vcpu {
48 struct list_head list;
49 uint32_t id;
50 int fd;
51 struct kvm_vm *vm;
52 struct kvm_run *run;
53 #ifdef __x86_64__
54 struct kvm_cpuid2 *cpuid;
55 #endif
56 struct kvm_dirty_gfn *dirty_gfns;
57 uint32_t fetch_index;
58 uint32_t dirty_gfns_count;
59 };
60
61 struct userspace_mem_regions {
62 struct rb_root gpa_tree;
63 struct rb_root hva_tree;
64 DECLARE_HASHTABLE(slot_hash, 9);
65 };
66
67 struct kvm_vm {
68 int mode;
69 unsigned long type;
70 int kvm_fd;
71 int fd;
72 unsigned int pgtable_levels;
73 unsigned int page_size;
74 unsigned int page_shift;
75 unsigned int pa_bits;
76 unsigned int va_bits;
77 uint64_t max_gfn;
78 struct list_head vcpus;
79 struct userspace_mem_regions regions;
80 struct sparsebit *vpages_valid;
81 struct sparsebit *vpages_mapped;
82 bool has_irqchip;
83 bool pgd_created;
84 vm_paddr_t pgd;
85 vm_vaddr_t gdt;
86 vm_vaddr_t tss;
87 vm_vaddr_t idt;
88 vm_vaddr_t handlers;
89 uint32_t dirty_ring_size;
90
91 /* Cache of information for binary stats interface */
92 int stats_fd;
93 struct kvm_stats_header stats_header;
94 struct kvm_stats_desc *stats_desc;
95 };
96
97
98 #define kvm_for_each_vcpu(vm, i, vcpu) \
99 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
100 if (!((vcpu) = vm->vcpus[i])) \
101 continue; \
102 else
103
104 struct userspace_mem_region *
105 memslot2region(struct kvm_vm *vm, uint32_t memslot);
106
107 /* Minimum allocated guest virtual and physical addresses */
108 #define KVM_UTIL_MIN_VADDR 0x2000
109 #define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
110
111 #define DEFAULT_GUEST_STACK_VADDR_MIN 0xab6000
112 #define DEFAULT_STACK_PGS 5
113
114 enum vm_guest_mode {
115 VM_MODE_P52V48_4K,
116 VM_MODE_P52V48_64K,
117 VM_MODE_P48V48_4K,
118 VM_MODE_P48V48_16K,
119 VM_MODE_P48V48_64K,
120 VM_MODE_P40V48_4K,
121 VM_MODE_P40V48_16K,
122 VM_MODE_P40V48_64K,
123 VM_MODE_PXXV48_4K, /* For 48bits VA but ANY bits PA */
124 VM_MODE_P47V64_4K,
125 VM_MODE_P44V64_4K,
126 VM_MODE_P36V48_4K,
127 VM_MODE_P36V48_16K,
128 VM_MODE_P36V48_64K,
129 VM_MODE_P36V47_16K,
130 NUM_VM_MODES,
131 };
132
133 #if defined(__aarch64__)
134
135 extern enum vm_guest_mode vm_mode_default;
136
137 #define VM_MODE_DEFAULT vm_mode_default
138 #define MIN_PAGE_SHIFT 12U
139 #define ptes_per_page(page_size) ((page_size) / 8)
140
141 #elif defined(__x86_64__)
142
143 #define VM_MODE_DEFAULT VM_MODE_PXXV48_4K
144 #define MIN_PAGE_SHIFT 12U
145 #define ptes_per_page(page_size) ((page_size) / 8)
146
147 #elif defined(__s390x__)
148
149 #define VM_MODE_DEFAULT VM_MODE_P44V64_4K
150 #define MIN_PAGE_SHIFT 12U
151 #define ptes_per_page(page_size) ((page_size) / 16)
152
153 #elif defined(__riscv)
154
155 #if __riscv_xlen == 32
156 #error "RISC-V 32-bit kvm selftests not supported"
157 #endif
158
159 #define VM_MODE_DEFAULT VM_MODE_P40V48_4K
160 #define MIN_PAGE_SHIFT 12U
161 #define ptes_per_page(page_size) ((page_size) / 8)
162
163 #endif
164
165 #define MIN_PAGE_SIZE (1U << MIN_PAGE_SHIFT)
166 #define PTES_PER_MIN_PAGE ptes_per_page(MIN_PAGE_SIZE)
167
168 struct vm_guest_mode_params {
169 unsigned int pa_bits;
170 unsigned int va_bits;
171 unsigned int page_size;
172 unsigned int page_shift;
173 };
174 extern const struct vm_guest_mode_params vm_guest_mode_params[];
175
176 int open_path_or_exit(const char *path, int flags);
177 int open_kvm_dev_path_or_exit(void);
178
179 bool get_kvm_intel_param_bool(const char *param);
180 bool get_kvm_amd_param_bool(const char *param);
181
182 unsigned int kvm_check_cap(long cap);
183
kvm_has_cap(long cap)184 static inline bool kvm_has_cap(long cap)
185 {
186 return kvm_check_cap(cap);
187 }
188
189 #define __KVM_SYSCALL_ERROR(_name, _ret) \
190 "%s failed, rc: %i errno: %i (%s)", (_name), (_ret), errno, strerror(errno)
191
192 #define __KVM_IOCTL_ERROR(_name, _ret) __KVM_SYSCALL_ERROR(_name, _ret)
193 #define KVM_IOCTL_ERROR(_ioctl, _ret) __KVM_IOCTL_ERROR(#_ioctl, _ret)
194
195 #define kvm_do_ioctl(fd, cmd, arg) \
196 ({ \
197 static_assert(!_IOC_SIZE(cmd) || sizeof(*arg) == _IOC_SIZE(cmd), ""); \
198 ioctl(fd, cmd, arg); \
199 })
200
201 #define __kvm_ioctl(kvm_fd, cmd, arg) \
202 kvm_do_ioctl(kvm_fd, cmd, arg)
203
204
205 #define _kvm_ioctl(kvm_fd, cmd, name, arg) \
206 ({ \
207 int ret = __kvm_ioctl(kvm_fd, cmd, arg); \
208 \
209 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
210 })
211
212 #define kvm_ioctl(kvm_fd, cmd, arg) \
213 _kvm_ioctl(kvm_fd, cmd, #cmd, arg)
214
static_assert_is_vm(struct kvm_vm * vm)215 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { }
216
217 #define __vm_ioctl(vm, cmd, arg) \
218 ({ \
219 static_assert_is_vm(vm); \
220 kvm_do_ioctl((vm)->fd, cmd, arg); \
221 })
222
223 #define _vm_ioctl(vm, cmd, name, arg) \
224 ({ \
225 int ret = __vm_ioctl(vm, cmd, arg); \
226 \
227 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
228 })
229
230 #define vm_ioctl(vm, cmd, arg) \
231 _vm_ioctl(vm, cmd, #cmd, arg)
232
233
static_assert_is_vcpu(struct kvm_vcpu * vcpu)234 static __always_inline void static_assert_is_vcpu(struct kvm_vcpu *vcpu) { }
235
236 #define __vcpu_ioctl(vcpu, cmd, arg) \
237 ({ \
238 static_assert_is_vcpu(vcpu); \
239 kvm_do_ioctl((vcpu)->fd, cmd, arg); \
240 })
241
242 #define _vcpu_ioctl(vcpu, cmd, name, arg) \
243 ({ \
244 int ret = __vcpu_ioctl(vcpu, cmd, arg); \
245 \
246 TEST_ASSERT(!ret, __KVM_IOCTL_ERROR(name, ret)); \
247 })
248
249 #define vcpu_ioctl(vcpu, cmd, arg) \
250 _vcpu_ioctl(vcpu, cmd, #cmd, arg)
251
252 /*
253 * Looks up and returns the value corresponding to the capability
254 * (KVM_CAP_*) given by cap.
255 */
vm_check_cap(struct kvm_vm * vm,long cap)256 static inline int vm_check_cap(struct kvm_vm *vm, long cap)
257 {
258 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap);
259
260 TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret));
261 return ret;
262 }
263
__vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)264 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
265 {
266 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
267
268 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
269 }
vm_enable_cap(struct kvm_vm * vm,uint32_t cap,uint64_t arg0)270 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0)
271 {
272 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
273
274 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap);
275 }
276
277 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
278 const char *vm_guest_mode_string(uint32_t i);
279
280 void kvm_vm_free(struct kvm_vm *vmp);
281 void kvm_vm_restart(struct kvm_vm *vmp);
282 void kvm_vm_release(struct kvm_vm *vmp);
283 int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, const vm_vaddr_t gva,
284 size_t len);
285 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
286 int kvm_memfd_alloc(size_t size, bool hugepages);
287
288 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
289
kvm_vm_get_dirty_log(struct kvm_vm * vm,int slot,void * log)290 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
291 {
292 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
293
294 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args);
295 }
296
kvm_vm_clear_dirty_log(struct kvm_vm * vm,int slot,void * log,uint64_t first_page,uint32_t num_pages)297 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
298 uint64_t first_page, uint32_t num_pages)
299 {
300 struct kvm_clear_dirty_log args = {
301 .dirty_bitmap = log,
302 .slot = slot,
303 .first_page = first_page,
304 .num_pages = num_pages
305 };
306
307 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args);
308 }
309
kvm_vm_reset_dirty_ring(struct kvm_vm * vm)310 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
311 {
312 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL);
313 }
314
vm_get_stats_fd(struct kvm_vm * vm)315 static inline int vm_get_stats_fd(struct kvm_vm *vm)
316 {
317 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL);
318
319 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
320 return fd;
321 }
322
read_stats_header(int stats_fd,struct kvm_stats_header * header)323 static inline void read_stats_header(int stats_fd, struct kvm_stats_header *header)
324 {
325 ssize_t ret;
326
327 ret = read(stats_fd, header, sizeof(*header));
328 TEST_ASSERT(ret == sizeof(*header), "Read stats header");
329 }
330
331 struct kvm_stats_desc *read_stats_descriptors(int stats_fd,
332 struct kvm_stats_header *header);
333
get_stats_descriptor_size(struct kvm_stats_header * header)334 static inline ssize_t get_stats_descriptor_size(struct kvm_stats_header *header)
335 {
336 /*
337 * The base size of the descriptor is defined by KVM's ABI, but the
338 * size of the name field is variable, as far as KVM's ABI is
339 * concerned. For a given instance of KVM, the name field is the same
340 * size for all stats and is provided in the overall stats header.
341 */
342 return sizeof(struct kvm_stats_desc) + header->name_size;
343 }
344
get_stats_descriptor(struct kvm_stats_desc * stats,int index,struct kvm_stats_header * header)345 static inline struct kvm_stats_desc *get_stats_descriptor(struct kvm_stats_desc *stats,
346 int index,
347 struct kvm_stats_header *header)
348 {
349 /*
350 * Note, size_desc includes the size of the name field, which is
351 * variable. i.e. this is NOT equivalent to &stats_desc[i].
352 */
353 return (void *)stats + index * get_stats_descriptor_size(header);
354 }
355
356 void read_stat_data(int stats_fd, struct kvm_stats_header *header,
357 struct kvm_stats_desc *desc, uint64_t *data,
358 size_t max_elements);
359
360 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
361 size_t max_elements);
362
vm_get_stat(struct kvm_vm * vm,const char * stat_name)363 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name)
364 {
365 uint64_t data;
366
367 __vm_get_stat(vm, stat_name, &data, 1);
368 return data;
369 }
370
371 void vm_create_irqchip(struct kvm_vm *vm);
372
373 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
374 uint64_t gpa, uint64_t size, void *hva);
375 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
376 uint64_t gpa, uint64_t size, void *hva);
377 void vm_userspace_mem_region_add(struct kvm_vm *vm,
378 enum vm_mem_backing_src_type src_type,
379 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
380 uint32_t flags);
381
382 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
383 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
384 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
385 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
386 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
387 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
388 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
389
390 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
391 unsigned int npages);
392 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
393 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
394 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
395 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
396
397 void vcpu_run(struct kvm_vcpu *vcpu);
398 int _vcpu_run(struct kvm_vcpu *vcpu);
399
__vcpu_run(struct kvm_vcpu * vcpu)400 static inline int __vcpu_run(struct kvm_vcpu *vcpu)
401 {
402 return __vcpu_ioctl(vcpu, KVM_RUN, NULL);
403 }
404
405 void vcpu_run_complete_io(struct kvm_vcpu *vcpu);
406 struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu);
407
vcpu_enable_cap(struct kvm_vcpu * vcpu,uint32_t cap,uint64_t arg0)408 static inline void vcpu_enable_cap(struct kvm_vcpu *vcpu, uint32_t cap,
409 uint64_t arg0)
410 {
411 struct kvm_enable_cap enable_cap = { .cap = cap, .args = { arg0 } };
412
413 vcpu_ioctl(vcpu, KVM_ENABLE_CAP, &enable_cap);
414 }
415
vcpu_guest_debug_set(struct kvm_vcpu * vcpu,struct kvm_guest_debug * debug)416 static inline void vcpu_guest_debug_set(struct kvm_vcpu *vcpu,
417 struct kvm_guest_debug *debug)
418 {
419 vcpu_ioctl(vcpu, KVM_SET_GUEST_DEBUG, debug);
420 }
421
vcpu_mp_state_get(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)422 static inline void vcpu_mp_state_get(struct kvm_vcpu *vcpu,
423 struct kvm_mp_state *mp_state)
424 {
425 vcpu_ioctl(vcpu, KVM_GET_MP_STATE, mp_state);
426 }
vcpu_mp_state_set(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)427 static inline void vcpu_mp_state_set(struct kvm_vcpu *vcpu,
428 struct kvm_mp_state *mp_state)
429 {
430 vcpu_ioctl(vcpu, KVM_SET_MP_STATE, mp_state);
431 }
432
vcpu_regs_get(struct kvm_vcpu * vcpu,struct kvm_regs * regs)433 static inline void vcpu_regs_get(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
434 {
435 vcpu_ioctl(vcpu, KVM_GET_REGS, regs);
436 }
437
vcpu_regs_set(struct kvm_vcpu * vcpu,struct kvm_regs * regs)438 static inline void vcpu_regs_set(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
439 {
440 vcpu_ioctl(vcpu, KVM_SET_REGS, regs);
441 }
vcpu_sregs_get(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)442 static inline void vcpu_sregs_get(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
443 {
444 vcpu_ioctl(vcpu, KVM_GET_SREGS, sregs);
445
446 }
vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)447 static inline void vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
448 {
449 vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
450 }
_vcpu_sregs_set(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)451 static inline int _vcpu_sregs_set(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
452 {
453 return __vcpu_ioctl(vcpu, KVM_SET_SREGS, sregs);
454 }
vcpu_fpu_get(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)455 static inline void vcpu_fpu_get(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
456 {
457 vcpu_ioctl(vcpu, KVM_GET_FPU, fpu);
458 }
vcpu_fpu_set(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)459 static inline void vcpu_fpu_set(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
460 {
461 vcpu_ioctl(vcpu, KVM_SET_FPU, fpu);
462 }
463
__vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)464 static inline int __vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
465 {
466 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
467
468 return __vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
469 }
__vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)470 static inline int __vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
471 {
472 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
473
474 return __vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
475 }
vcpu_get_reg(struct kvm_vcpu * vcpu,uint64_t id,void * addr)476 static inline void vcpu_get_reg(struct kvm_vcpu *vcpu, uint64_t id, void *addr)
477 {
478 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)addr };
479
480 vcpu_ioctl(vcpu, KVM_GET_ONE_REG, ®);
481 }
vcpu_set_reg(struct kvm_vcpu * vcpu,uint64_t id,uint64_t val)482 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, uint64_t id, uint64_t val)
483 {
484 struct kvm_one_reg reg = { .id = id, .addr = (uint64_t)&val };
485
486 vcpu_ioctl(vcpu, KVM_SET_ONE_REG, ®);
487 }
488
489 #ifdef __KVM_HAVE_VCPU_EVENTS
vcpu_events_get(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)490 static inline void vcpu_events_get(struct kvm_vcpu *vcpu,
491 struct kvm_vcpu_events *events)
492 {
493 vcpu_ioctl(vcpu, KVM_GET_VCPU_EVENTS, events);
494 }
vcpu_events_set(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)495 static inline void vcpu_events_set(struct kvm_vcpu *vcpu,
496 struct kvm_vcpu_events *events)
497 {
498 vcpu_ioctl(vcpu, KVM_SET_VCPU_EVENTS, events);
499 }
500 #endif
501 #ifdef __x86_64__
vcpu_nested_state_get(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)502 static inline void vcpu_nested_state_get(struct kvm_vcpu *vcpu,
503 struct kvm_nested_state *state)
504 {
505 vcpu_ioctl(vcpu, KVM_GET_NESTED_STATE, state);
506 }
__vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)507 static inline int __vcpu_nested_state_set(struct kvm_vcpu *vcpu,
508 struct kvm_nested_state *state)
509 {
510 return __vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
511 }
512
vcpu_nested_state_set(struct kvm_vcpu * vcpu,struct kvm_nested_state * state)513 static inline void vcpu_nested_state_set(struct kvm_vcpu *vcpu,
514 struct kvm_nested_state *state)
515 {
516 vcpu_ioctl(vcpu, KVM_SET_NESTED_STATE, state);
517 }
518 #endif
vcpu_get_stats_fd(struct kvm_vcpu * vcpu)519 static inline int vcpu_get_stats_fd(struct kvm_vcpu *vcpu)
520 {
521 int fd = __vcpu_ioctl(vcpu, KVM_GET_STATS_FD, NULL);
522
523 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_GET_STATS_FD, fd));
524 return fd;
525 }
526
527 int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr);
528
kvm_has_device_attr(int dev_fd,uint32_t group,uint64_t attr)529 static inline void kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr)
530 {
531 int ret = __kvm_has_device_attr(dev_fd, group, attr);
532
533 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
534 }
535
536 int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val);
537
kvm_device_attr_get(int dev_fd,uint32_t group,uint64_t attr,void * val)538 static inline void kvm_device_attr_get(int dev_fd, uint32_t group,
539 uint64_t attr, void *val)
540 {
541 int ret = __kvm_device_attr_get(dev_fd, group, attr, val);
542
543 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_GET_DEVICE_ATTR, ret));
544 }
545
546 int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val);
547
kvm_device_attr_set(int dev_fd,uint32_t group,uint64_t attr,void * val)548 static inline void kvm_device_attr_set(int dev_fd, uint32_t group,
549 uint64_t attr, void *val)
550 {
551 int ret = __kvm_device_attr_set(dev_fd, group, attr, val);
552
553 TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_DEVICE_ATTR, ret));
554 }
555
__vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)556 static inline int __vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
557 uint64_t attr)
558 {
559 return __kvm_has_device_attr(vcpu->fd, group, attr);
560 }
561
vcpu_has_device_attr(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr)562 static inline void vcpu_has_device_attr(struct kvm_vcpu *vcpu, uint32_t group,
563 uint64_t attr)
564 {
565 kvm_has_device_attr(vcpu->fd, group, attr);
566 }
567
__vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)568 static inline int __vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
569 uint64_t attr, void *val)
570 {
571 return __kvm_device_attr_get(vcpu->fd, group, attr, val);
572 }
573
vcpu_device_attr_get(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)574 static inline void vcpu_device_attr_get(struct kvm_vcpu *vcpu, uint32_t group,
575 uint64_t attr, void *val)
576 {
577 kvm_device_attr_get(vcpu->fd, group, attr, val);
578 }
579
__vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)580 static inline int __vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
581 uint64_t attr, void *val)
582 {
583 return __kvm_device_attr_set(vcpu->fd, group, attr, val);
584 }
585
vcpu_device_attr_set(struct kvm_vcpu * vcpu,uint32_t group,uint64_t attr,void * val)586 static inline void vcpu_device_attr_set(struct kvm_vcpu *vcpu, uint32_t group,
587 uint64_t attr, void *val)
588 {
589 kvm_device_attr_set(vcpu->fd, group, attr, val);
590 }
591
592 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
593 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
594
kvm_create_device(struct kvm_vm * vm,uint64_t type)595 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type)
596 {
597 int fd = __kvm_create_device(vm, type);
598
599 TEST_ASSERT(fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_DEVICE, fd));
600 return fd;
601 }
602
603 void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu);
604
605 /*
606 * VM VCPU Args Set
607 *
608 * Input Args:
609 * vm - Virtual Machine
610 * num - number of arguments
611 * ... - arguments, each of type uint64_t
612 *
613 * Output Args: None
614 *
615 * Return: None
616 *
617 * Sets the first @num input parameters for the function at @vcpu's entry point,
618 * per the C calling convention of the architecture, to the values given as
619 * variable args. Each of the variable args is expected to be of type uint64_t.
620 * The maximum @num can be is specific to the architecture.
621 */
622 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...);
623
624 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
625 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
626
627 #define KVM_MAX_IRQ_ROUTES 4096
628
629 struct kvm_irq_routing *kvm_gsi_routing_create(void);
630 void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
631 uint32_t gsi, uint32_t pin);
632 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
633 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
634
635 const char *exit_reason_str(unsigned int exit_reason);
636
637 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
638 uint32_t memslot);
639 vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
640 vm_paddr_t paddr_min, uint32_t memslot);
641 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
642
643 /*
644 * ____vm_create() does KVM_CREATE_VM and little else. __vm_create() also
645 * loads the test binary into guest memory and creates an IRQ chip (x86 only).
646 * __vm_create() does NOT create vCPUs, @nr_runnable_vcpus is used purely to
647 * calculate the amount of memory needed for per-vCPU data, e.g. stacks.
648 */
649 struct kvm_vm *____vm_create(enum vm_guest_mode mode, uint64_t nr_pages);
650 struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus,
651 uint64_t nr_extra_pages);
652
vm_create_barebones(void)653 static inline struct kvm_vm *vm_create_barebones(void)
654 {
655 return ____vm_create(VM_MODE_DEFAULT, 0);
656 }
657
vm_create(uint32_t nr_runnable_vcpus)658 static inline struct kvm_vm *vm_create(uint32_t nr_runnable_vcpus)
659 {
660 return __vm_create(VM_MODE_DEFAULT, nr_runnable_vcpus, 0);
661 }
662
663 struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
664 uint64_t extra_mem_pages,
665 void *guest_code, struct kvm_vcpu *vcpus[]);
666
vm_create_with_vcpus(uint32_t nr_vcpus,void * guest_code,struct kvm_vcpu * vcpus[])667 static inline struct kvm_vm *vm_create_with_vcpus(uint32_t nr_vcpus,
668 void *guest_code,
669 struct kvm_vcpu *vcpus[])
670 {
671 return __vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, 0,
672 guest_code, vcpus);
673 }
674
675 /*
676 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
677 * additional pages of guest memory. Returns the VM and vCPU (via out param).
678 */
679 struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
680 uint64_t extra_mem_pages,
681 void *guest_code);
682
vm_create_with_one_vcpu(struct kvm_vcpu ** vcpu,void * guest_code)683 static inline struct kvm_vm *vm_create_with_one_vcpu(struct kvm_vcpu **vcpu,
684 void *guest_code)
685 {
686 return __vm_create_with_one_vcpu(vcpu, 0, guest_code);
687 }
688
689 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
690
691 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
692 unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size);
693 unsigned int vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages);
694 unsigned int vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages);
695 static inline unsigned int
vm_adjust_num_guest_pages(enum vm_guest_mode mode,unsigned int num_guest_pages)696 vm_adjust_num_guest_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
697 {
698 unsigned int n;
699 n = vm_num_guest_pages(mode, vm_num_host_pages(mode, num_guest_pages));
700 #ifdef __s390x__
701 /* s390 requires 1M aligned guest sizes */
702 n = (n + 255) & ~255;
703 #endif
704 return n;
705 }
706
707 struct kvm_userspace_memory_region *
708 kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
709 uint64_t end);
710
711 #define sync_global_to_guest(vm, g) ({ \
712 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
713 memcpy(_p, &(g), sizeof(g)); \
714 })
715
716 #define sync_global_from_guest(vm, g) ({ \
717 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
718 memcpy(&(g), _p, sizeof(g)); \
719 })
720
721 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu);
722
723 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu,
724 uint8_t indent);
725
vcpu_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)726 static inline void vcpu_dump(FILE *stream, struct kvm_vcpu *vcpu,
727 uint8_t indent)
728 {
729 vcpu_arch_dump(stream, vcpu, indent);
730 }
731
732 /*
733 * Adds a vCPU with reasonable defaults (e.g. a stack)
734 *
735 * Input Args:
736 * vm - Virtual Machine
737 * vcpu_id - The id of the VCPU to add to the VM.
738 * guest_code - The vCPU's entry point
739 */
740 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
741 void *guest_code);
742
vm_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)743 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
744 void *guest_code)
745 {
746 return vm_arch_vcpu_add(vm, vcpu_id, guest_code);
747 }
748
749 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
750 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
751
vm_vcpu_recreate(struct kvm_vm * vm,uint32_t vcpu_id)752 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm,
753 uint32_t vcpu_id)
754 {
755 return vm_arch_vcpu_recreate(vm, vcpu_id);
756 }
757
758 void vcpu_arch_free(struct kvm_vcpu *vcpu);
759
760 void virt_arch_pgd_alloc(struct kvm_vm *vm);
761
virt_pgd_alloc(struct kvm_vm * vm)762 static inline void virt_pgd_alloc(struct kvm_vm *vm)
763 {
764 virt_arch_pgd_alloc(vm);
765 }
766
767 /*
768 * VM Virtual Page Map
769 *
770 * Input Args:
771 * vm - Virtual Machine
772 * vaddr - VM Virtual Address
773 * paddr - VM Physical Address
774 * memslot - Memory region slot for new virtual translation tables
775 *
776 * Output Args: None
777 *
778 * Return: None
779 *
780 * Within @vm, creates a virtual translation for the page starting
781 * at @vaddr to the page starting at @paddr.
782 */
783 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
784
virt_pg_map(struct kvm_vm * vm,uint64_t vaddr,uint64_t paddr)785 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr)
786 {
787 virt_arch_pg_map(vm, vaddr, paddr);
788 }
789
790
791 /*
792 * Address Guest Virtual to Guest Physical
793 *
794 * Input Args:
795 * vm - Virtual Machine
796 * gva - VM virtual address
797 *
798 * Output Args: None
799 *
800 * Return:
801 * Equivalent VM physical address
802 *
803 * Returns the VM physical address of the translated VM virtual
804 * address given by @gva.
805 */
806 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
807
addr_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)808 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
809 {
810 return addr_arch_gva2gpa(vm, gva);
811 }
812
813 /*
814 * Virtual Translation Tables Dump
815 *
816 * Input Args:
817 * stream - Output FILE stream
818 * vm - Virtual Machine
819 * indent - Left margin indent amount
820 *
821 * Output Args: None
822 *
823 * Return: None
824 *
825 * Dumps to the FILE stream given by @stream, the contents of all the
826 * virtual translation tables for the VM given by @vm.
827 */
828 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
829
virt_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)830 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
831 {
832 virt_arch_dump(stream, vm, indent);
833 }
834
835
__vm_disable_nx_huge_pages(struct kvm_vm * vm)836 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm)
837 {
838 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0);
839 }
840
841 #endif /* SELFTEST_KVM_UTIL_BASE_H */
842