1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2022 Oracle and/or its affiliates.
4 *
5 * Based on:
6 * svm_int_ctl_test
7 *
8 * Copyright (C) 2021, Red Hat, Inc.
9 *
10 */
11
12 #include <stdatomic.h>
13 #include <stdio.h>
14 #include <unistd.h>
15 #include "apic.h"
16 #include "kvm_util.h"
17 #include "processor.h"
18 #include "svm_util.h"
19 #include "test_util.h"
20
21 #define INT_NR 0x20
22
23 static_assert(ATOMIC_INT_LOCK_FREE == 2, "atomic int is not lockless");
24
25 static unsigned int bp_fired;
guest_bp_handler(struct ex_regs * regs)26 static void guest_bp_handler(struct ex_regs *regs)
27 {
28 bp_fired++;
29 }
30
31 static unsigned int int_fired;
32 static void l2_guest_code_int(void);
33
guest_int_handler(struct ex_regs * regs)34 static void guest_int_handler(struct ex_regs *regs)
35 {
36 int_fired++;
37 GUEST_ASSERT_2(regs->rip == (unsigned long)l2_guest_code_int,
38 regs->rip, (unsigned long)l2_guest_code_int);
39 }
40
l2_guest_code_int(void)41 static void l2_guest_code_int(void)
42 {
43 GUEST_ASSERT_1(int_fired == 1, int_fired);
44 vmmcall();
45 ud2();
46
47 GUEST_ASSERT_1(bp_fired == 1, bp_fired);
48 hlt();
49 }
50
51 static atomic_int nmi_stage;
52 #define nmi_stage_get() atomic_load_explicit(&nmi_stage, memory_order_acquire)
53 #define nmi_stage_inc() atomic_fetch_add_explicit(&nmi_stage, 1, memory_order_acq_rel)
guest_nmi_handler(struct ex_regs * regs)54 static void guest_nmi_handler(struct ex_regs *regs)
55 {
56 nmi_stage_inc();
57
58 if (nmi_stage_get() == 1) {
59 vmmcall();
60 GUEST_ASSERT(false);
61 } else {
62 GUEST_ASSERT_1(nmi_stage_get() == 3, nmi_stage_get());
63 GUEST_DONE();
64 }
65 }
66
l2_guest_code_nmi(void)67 static void l2_guest_code_nmi(void)
68 {
69 ud2();
70 }
71
l1_guest_code(struct svm_test_data * svm,uint64_t is_nmi,uint64_t idt_alt)72 static void l1_guest_code(struct svm_test_data *svm, uint64_t is_nmi, uint64_t idt_alt)
73 {
74 #define L2_GUEST_STACK_SIZE 64
75 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
76 struct vmcb *vmcb = svm->vmcb;
77
78 if (is_nmi)
79 x2apic_enable();
80
81 /* Prepare for L2 execution. */
82 generic_svm_setup(svm,
83 is_nmi ? l2_guest_code_nmi : l2_guest_code_int,
84 &l2_guest_stack[L2_GUEST_STACK_SIZE]);
85
86 vmcb->control.intercept_exceptions |= BIT(PF_VECTOR) | BIT(UD_VECTOR);
87 vmcb->control.intercept |= BIT(INTERCEPT_NMI) | BIT(INTERCEPT_HLT);
88
89 if (is_nmi) {
90 vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
91 } else {
92 vmcb->control.event_inj = INT_NR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_SOFT;
93 /* The return address pushed on stack */
94 vmcb->control.next_rip = vmcb->save.rip;
95 }
96
97 run_guest(vmcb, svm->vmcb_gpa);
98 GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_VMMCALL,
99 vmcb->control.exit_code,
100 vmcb->control.exit_info_1, vmcb->control.exit_info_2);
101
102 if (is_nmi) {
103 clgi();
104 x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_NMI);
105
106 GUEST_ASSERT_1(nmi_stage_get() == 1, nmi_stage_get());
107 nmi_stage_inc();
108
109 stgi();
110 /* self-NMI happens here */
111 while (true)
112 cpu_relax();
113 }
114
115 /* Skip over VMMCALL */
116 vmcb->save.rip += 3;
117
118 /* Switch to alternate IDT to cause intervening NPF again */
119 vmcb->save.idtr.base = idt_alt;
120 vmcb->control.clean = 0; /* &= ~BIT(VMCB_DT) would be enough */
121
122 vmcb->control.event_inj = BP_VECTOR | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
123 /* The return address pushed on stack, skip over UD2 */
124 vmcb->control.next_rip = vmcb->save.rip + 2;
125
126 run_guest(vmcb, svm->vmcb_gpa);
127 GUEST_ASSERT_3(vmcb->control.exit_code == SVM_EXIT_HLT,
128 vmcb->control.exit_code,
129 vmcb->control.exit_info_1, vmcb->control.exit_info_2);
130
131 GUEST_DONE();
132 }
133
run_test(bool is_nmi)134 static void run_test(bool is_nmi)
135 {
136 struct kvm_vcpu *vcpu;
137 struct kvm_vm *vm;
138 vm_vaddr_t svm_gva;
139 vm_vaddr_t idt_alt_vm;
140 struct kvm_guest_debug debug;
141
142 pr_info("Running %s test\n", is_nmi ? "NMI" : "soft int");
143
144 vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code);
145
146 vm_init_descriptor_tables(vm);
147 vcpu_init_descriptor_tables(vcpu);
148
149 vm_install_exception_handler(vm, NMI_VECTOR, guest_nmi_handler);
150 vm_install_exception_handler(vm, BP_VECTOR, guest_bp_handler);
151 vm_install_exception_handler(vm, INT_NR, guest_int_handler);
152
153 vcpu_alloc_svm(vm, &svm_gva);
154
155 if (!is_nmi) {
156 void *idt, *idt_alt;
157
158 idt_alt_vm = vm_vaddr_alloc_page(vm);
159 idt_alt = addr_gva2hva(vm, idt_alt_vm);
160 idt = addr_gva2hva(vm, vm->idt);
161 memcpy(idt_alt, idt, getpagesize());
162 } else {
163 idt_alt_vm = 0;
164 }
165 vcpu_args_set(vcpu, 3, svm_gva, (uint64_t)is_nmi, (uint64_t)idt_alt_vm);
166
167 memset(&debug, 0, sizeof(debug));
168 vcpu_guest_debug_set(vcpu, &debug);
169
170 struct kvm_run *run = vcpu->run;
171 struct ucall uc;
172
173 alarm(2);
174 vcpu_run(vcpu);
175 alarm(0);
176 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
177 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
178 run->exit_reason,
179 exit_reason_str(run->exit_reason));
180
181 switch (get_ucall(vcpu, &uc)) {
182 case UCALL_ABORT:
183 REPORT_GUEST_ASSERT_3(uc, "vals = 0x%lx 0x%lx 0x%lx");
184 break;
185 /* NOT REACHED */
186 case UCALL_DONE:
187 goto done;
188 default:
189 TEST_FAIL("Unknown ucall 0x%lx.", uc.cmd);
190 }
191 done:
192 kvm_vm_free(vm);
193 }
194
main(int argc,char * argv[])195 int main(int argc, char *argv[])
196 {
197 /* Tell stdout not to buffer its content */
198 setbuf(stdout, NULL);
199
200 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM));
201
202 TEST_ASSERT(kvm_cpu_has(X86_FEATURE_NRIPS),
203 "KVM with nSVM is supposed to unconditionally advertise nRIP Save");
204
205 atomic_init(&nmi_stage, 0);
206
207 run_test(false);
208 run_test(true);
209
210 return 0;
211 }
212