1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * AMD SVM support
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
9 *
10 * Authors:
11 * Yaniv Kamay <yaniv@qumranet.com>
12 * Avi Kivity <avi@qumranet.com>
13 */
14
15 #define pr_fmt(fmt) "SVM: " fmt
16
17 #include <linux/kvm_types.h>
18 #include <linux/kvm_host.h>
19 #include <linux/kernel.h>
20
21 #include <asm/msr-index.h>
22 #include <asm/debugreg.h>
23
24 #include "kvm_emulate.h"
25 #include "trace.h"
26 #include "mmu.h"
27 #include "x86.h"
28 #include "cpuid.h"
29 #include "lapic.h"
30 #include "svm.h"
31 #include "hyperv.h"
32
33 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
34
nested_svm_inject_npf_exit(struct kvm_vcpu * vcpu,struct x86_exception * fault)35 static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
36 struct x86_exception *fault)
37 {
38 struct vcpu_svm *svm = to_svm(vcpu);
39 struct vmcb *vmcb = svm->vmcb;
40
41 if (vmcb->control.exit_code != SVM_EXIT_NPF) {
42 /*
43 * TODO: track the cause of the nested page fault, and
44 * correctly fill in the high bits of exit_info_1.
45 */
46 vmcb->control.exit_code = SVM_EXIT_NPF;
47 vmcb->control.exit_code_hi = 0;
48 vmcb->control.exit_info_1 = (1ULL << 32);
49 vmcb->control.exit_info_2 = fault->address;
50 }
51
52 vmcb->control.exit_info_1 &= ~0xffffffffULL;
53 vmcb->control.exit_info_1 |= fault->error_code;
54
55 nested_svm_vmexit(svm);
56 }
57
nested_svm_handle_page_fault_workaround(struct kvm_vcpu * vcpu,struct x86_exception * fault)58 static bool nested_svm_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
59 struct x86_exception *fault)
60 {
61 struct vcpu_svm *svm = to_svm(vcpu);
62 struct vmcb *vmcb = svm->vmcb;
63
64 WARN_ON(!is_guest_mode(vcpu));
65
66 if (vmcb12_is_intercept(&svm->nested.ctl,
67 INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
68 !WARN_ON_ONCE(svm->nested.nested_run_pending)) {
69 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
70 vmcb->control.exit_code_hi = 0;
71 vmcb->control.exit_info_1 = fault->error_code;
72 vmcb->control.exit_info_2 = fault->address;
73 nested_svm_vmexit(svm);
74 return true;
75 }
76
77 return false;
78 }
79
nested_svm_get_tdp_pdptr(struct kvm_vcpu * vcpu,int index)80 static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
81 {
82 struct vcpu_svm *svm = to_svm(vcpu);
83 u64 cr3 = svm->nested.ctl.nested_cr3;
84 u64 pdpte;
85 int ret;
86
87 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
88 offset_in_page(cr3) + index * 8, 8);
89 if (ret)
90 return 0;
91 return pdpte;
92 }
93
nested_svm_get_tdp_cr3(struct kvm_vcpu * vcpu)94 static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
95 {
96 struct vcpu_svm *svm = to_svm(vcpu);
97
98 return svm->nested.ctl.nested_cr3;
99 }
100
nested_svm_init_mmu_context(struct kvm_vcpu * vcpu)101 static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
102 {
103 struct vcpu_svm *svm = to_svm(vcpu);
104
105 WARN_ON(mmu_is_nested(vcpu));
106
107 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
108
109 /*
110 * The NPT format depends on L1's CR4 and EFER, which is in vmcb01. Note,
111 * when called via KVM_SET_NESTED_STATE, that state may _not_ match current
112 * vCPU state. CR0.WP is explicitly ignored, while CR0.PG is required.
113 */
114 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4,
115 svm->vmcb01.ptr->save.efer,
116 svm->nested.ctl.nested_cr3);
117 vcpu->arch.mmu->get_guest_pgd = nested_svm_get_tdp_cr3;
118 vcpu->arch.mmu->get_pdptr = nested_svm_get_tdp_pdptr;
119 vcpu->arch.mmu->inject_page_fault = nested_svm_inject_npf_exit;
120 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
121 }
122
nested_svm_uninit_mmu_context(struct kvm_vcpu * vcpu)123 static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
124 {
125 vcpu->arch.mmu = &vcpu->arch.root_mmu;
126 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
127 }
128
nested_vmcb_needs_vls_intercept(struct vcpu_svm * svm)129 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm)
130 {
131 if (!svm->v_vmload_vmsave_enabled)
132 return true;
133
134 if (!nested_npt_enabled(svm))
135 return true;
136
137 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK))
138 return true;
139
140 return false;
141 }
142
recalc_intercepts(struct vcpu_svm * svm)143 void recalc_intercepts(struct vcpu_svm *svm)
144 {
145 struct vmcb_control_area *c, *h;
146 struct vmcb_ctrl_area_cached *g;
147 unsigned int i;
148
149 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
150
151 if (!is_guest_mode(&svm->vcpu))
152 return;
153
154 c = &svm->vmcb->control;
155 h = &svm->vmcb01.ptr->control;
156 g = &svm->nested.ctl;
157
158 for (i = 0; i < MAX_INTERCEPT; i++)
159 c->intercepts[i] = h->intercepts[i];
160
161 if (g->int_ctl & V_INTR_MASKING_MASK) {
162 /* We only want the cr8 intercept bits of L1 */
163 vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
164 vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
165
166 /*
167 * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
168 * affect any interrupt we may want to inject; therefore,
169 * interrupt window vmexits are irrelevant to L0.
170 */
171 vmcb_clr_intercept(c, INTERCEPT_VINTR);
172 }
173
174 /* We don't want to see VMMCALLs from a nested guest */
175 vmcb_clr_intercept(c, INTERCEPT_VMMCALL);
176
177 for (i = 0; i < MAX_INTERCEPT; i++)
178 c->intercepts[i] |= g->intercepts[i];
179
180 /* If SMI is not intercepted, ignore guest SMI intercept as well */
181 if (!intercept_smi)
182 vmcb_clr_intercept(c, INTERCEPT_SMI);
183
184 if (nested_vmcb_needs_vls_intercept(svm)) {
185 /*
186 * If the virtual VMLOAD/VMSAVE is not enabled for the L2,
187 * we must intercept these instructions to correctly
188 * emulate them in case L1 doesn't intercept them.
189 */
190 vmcb_set_intercept(c, INTERCEPT_VMLOAD);
191 vmcb_set_intercept(c, INTERCEPT_VMSAVE);
192 } else {
193 WARN_ON(!(c->virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK));
194 }
195 }
196
197 /*
198 * Merge L0's (KVM) and L1's (Nested VMCB) MSR permission bitmaps. The function
199 * is optimized in that it only merges the parts where KVM MSR permission bitmap
200 * may contain zero bits.
201 */
nested_svm_vmrun_msrpm(struct vcpu_svm * svm)202 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
203 {
204 struct hv_enlightenments *hve =
205 (struct hv_enlightenments *)svm->nested.ctl.reserved_sw;
206 int i;
207
208 /*
209 * MSR bitmap update can be skipped when:
210 * - MSR bitmap for L1 hasn't changed.
211 * - Nested hypervisor (L1) is attempting to launch the same L2 as
212 * before.
213 * - Nested hypervisor (L1) is using Hyper-V emulation interface and
214 * tells KVM (L0) there were no changes in MSR bitmap for L2.
215 */
216 if (!svm->nested.force_msr_bitmap_recalc &&
217 kvm_hv_hypercall_enabled(&svm->vcpu) &&
218 hve->hv_enlightenments_control.msr_bitmap &&
219 (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS)))
220 goto set_msrpm_base_pa;
221
222 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
223 return true;
224
225 for (i = 0; i < MSRPM_OFFSETS; i++) {
226 u32 value, p;
227 u64 offset;
228
229 if (msrpm_offsets[i] == 0xffffffff)
230 break;
231
232 p = msrpm_offsets[i];
233 offset = svm->nested.ctl.msrpm_base_pa + (p * 4);
234
235 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
236 return false;
237
238 svm->nested.msrpm[p] = svm->msrpm[p] | value;
239 }
240
241 svm->nested.force_msr_bitmap_recalc = false;
242
243 set_msrpm_base_pa:
244 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
245
246 return true;
247 }
248
249 /*
250 * Bits 11:0 of bitmap address are ignored by hardware
251 */
nested_svm_check_bitmap_pa(struct kvm_vcpu * vcpu,u64 pa,u32 size)252 static bool nested_svm_check_bitmap_pa(struct kvm_vcpu *vcpu, u64 pa, u32 size)
253 {
254 u64 addr = PAGE_ALIGN(pa);
255
256 return kvm_vcpu_is_legal_gpa(vcpu, addr) &&
257 kvm_vcpu_is_legal_gpa(vcpu, addr + size - 1);
258 }
259
nested_svm_check_tlb_ctl(struct kvm_vcpu * vcpu,u8 tlb_ctl)260 static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
261 {
262 /* Nested FLUSHBYASID is not supported yet. */
263 switch(tlb_ctl) {
264 case TLB_CONTROL_DO_NOTHING:
265 case TLB_CONTROL_FLUSH_ALL_ASID:
266 return true;
267 default:
268 return false;
269 }
270 }
271
__nested_vmcb_check_controls(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * control)272 static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
273 struct vmcb_ctrl_area_cached *control)
274 {
275 if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
276 return false;
277
278 if (CC(control->asid == 0))
279 return false;
280
281 if (CC((control->nested_ctl & SVM_NESTED_CTL_NP_ENABLE) && !npt_enabled))
282 return false;
283
284 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->msrpm_base_pa,
285 MSRPM_SIZE)))
286 return false;
287 if (CC(!nested_svm_check_bitmap_pa(vcpu, control->iopm_base_pa,
288 IOPM_SIZE)))
289 return false;
290
291 if (CC(!nested_svm_check_tlb_ctl(vcpu, control->tlb_ctl)))
292 return false;
293
294 return true;
295 }
296
297 /* Common checks that apply to both L1 and L2 state. */
__nested_vmcb_check_save(struct kvm_vcpu * vcpu,struct vmcb_save_area_cached * save)298 static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
299 struct vmcb_save_area_cached *save)
300 {
301 if (CC(!(save->efer & EFER_SVME)))
302 return false;
303
304 if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
305 CC(save->cr0 & ~0xffffffffULL))
306 return false;
307
308 if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
309 return false;
310
311 /*
312 * These checks are also performed by KVM_SET_SREGS,
313 * except that EFER.LMA is not checked by SVM against
314 * CR0.PG && EFER.LME.
315 */
316 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) {
317 if (CC(!(save->cr4 & X86_CR4_PAE)) ||
318 CC(!(save->cr0 & X86_CR0_PE)) ||
319 CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3)))
320 return false;
321 }
322
323 /* Note, SVM doesn't have any additional restrictions on CR4. */
324 if (CC(!__kvm_is_valid_cr4(vcpu, save->cr4)))
325 return false;
326
327 if (CC(!kvm_valid_efer(vcpu, save->efer)))
328 return false;
329
330 return true;
331 }
332
nested_vmcb_check_save(struct kvm_vcpu * vcpu)333 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
334 {
335 struct vcpu_svm *svm = to_svm(vcpu);
336 struct vmcb_save_area_cached *save = &svm->nested.save;
337
338 return __nested_vmcb_check_save(vcpu, save);
339 }
340
nested_vmcb_check_controls(struct kvm_vcpu * vcpu)341 static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
342 {
343 struct vcpu_svm *svm = to_svm(vcpu);
344 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
345
346 return __nested_vmcb_check_controls(vcpu, ctl);
347 }
348
349 static
__nested_copy_vmcb_control_to_cache(struct kvm_vcpu * vcpu,struct vmcb_ctrl_area_cached * to,struct vmcb_control_area * from)350 void __nested_copy_vmcb_control_to_cache(struct kvm_vcpu *vcpu,
351 struct vmcb_ctrl_area_cached *to,
352 struct vmcb_control_area *from)
353 {
354 unsigned int i;
355
356 for (i = 0; i < MAX_INTERCEPT; i++)
357 to->intercepts[i] = from->intercepts[i];
358
359 to->iopm_base_pa = from->iopm_base_pa;
360 to->msrpm_base_pa = from->msrpm_base_pa;
361 to->tsc_offset = from->tsc_offset;
362 to->tlb_ctl = from->tlb_ctl;
363 to->int_ctl = from->int_ctl;
364 to->int_vector = from->int_vector;
365 to->int_state = from->int_state;
366 to->exit_code = from->exit_code;
367 to->exit_code_hi = from->exit_code_hi;
368 to->exit_info_1 = from->exit_info_1;
369 to->exit_info_2 = from->exit_info_2;
370 to->exit_int_info = from->exit_int_info;
371 to->exit_int_info_err = from->exit_int_info_err;
372 to->nested_ctl = from->nested_ctl;
373 to->event_inj = from->event_inj;
374 to->event_inj_err = from->event_inj_err;
375 to->nested_cr3 = from->nested_cr3;
376 to->virt_ext = from->virt_ext;
377 to->pause_filter_count = from->pause_filter_count;
378 to->pause_filter_thresh = from->pause_filter_thresh;
379
380 /* Copy asid here because nested_vmcb_check_controls will check it. */
381 to->asid = from->asid;
382 to->msrpm_base_pa &= ~0x0fffULL;
383 to->iopm_base_pa &= ~0x0fffULL;
384
385 /* Hyper-V extensions (Enlightened VMCB) */
386 if (kvm_hv_hypercall_enabled(vcpu)) {
387 to->clean = from->clean;
388 memcpy(to->reserved_sw, from->reserved_sw,
389 sizeof(struct hv_enlightenments));
390 }
391 }
392
nested_copy_vmcb_control_to_cache(struct vcpu_svm * svm,struct vmcb_control_area * control)393 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
394 struct vmcb_control_area *control)
395 {
396 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control);
397 }
398
__nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached * to,struct vmcb_save_area * from)399 static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
400 struct vmcb_save_area *from)
401 {
402 /*
403 * Copy only fields that are validated, as we need them
404 * to avoid TOC/TOU races.
405 */
406 to->efer = from->efer;
407 to->cr0 = from->cr0;
408 to->cr3 = from->cr3;
409 to->cr4 = from->cr4;
410
411 to->dr6 = from->dr6;
412 to->dr7 = from->dr7;
413 }
414
nested_copy_vmcb_save_to_cache(struct vcpu_svm * svm,struct vmcb_save_area * save)415 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
416 struct vmcb_save_area *save)
417 {
418 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
419 }
420
421 /*
422 * Synchronize fields that are written by the processor, so that
423 * they can be copied back into the vmcb12.
424 */
nested_sync_control_from_vmcb02(struct vcpu_svm * svm)425 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm)
426 {
427 u32 mask;
428 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj;
429 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err;
430
431 /* Only a few fields of int_ctl are written by the processor. */
432 mask = V_IRQ_MASK | V_TPR_MASK;
433 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) &&
434 svm_is_intercept(svm, INTERCEPT_VINTR)) {
435 /*
436 * In order to request an interrupt window, L0 is usurping
437 * svm->vmcb->control.int_ctl and possibly setting V_IRQ
438 * even if it was clear in L1's VMCB. Restoring it would be
439 * wrong. However, in this case V_IRQ will remain true until
440 * interrupt_window_interception calls svm_clear_vintr and
441 * restores int_ctl. We can just leave it aside.
442 */
443 mask &= ~V_IRQ_MASK;
444 }
445
446 if (nested_vgif_enabled(svm))
447 mask |= V_GIF_MASK;
448
449 svm->nested.ctl.int_ctl &= ~mask;
450 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask;
451 }
452
453 /*
454 * Transfer any event that L0 or L1 wanted to inject into L2 to
455 * EXIT_INT_INFO.
456 */
nested_save_pending_event_to_vmcb12(struct vcpu_svm * svm,struct vmcb * vmcb12)457 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm,
458 struct vmcb *vmcb12)
459 {
460 struct kvm_vcpu *vcpu = &svm->vcpu;
461 u32 exit_int_info = 0;
462 unsigned int nr;
463
464 if (vcpu->arch.exception.injected) {
465 nr = vcpu->arch.exception.nr;
466 exit_int_info = nr | SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_EXEPT;
467
468 if (vcpu->arch.exception.has_error_code) {
469 exit_int_info |= SVM_EVTINJ_VALID_ERR;
470 vmcb12->control.exit_int_info_err =
471 vcpu->arch.exception.error_code;
472 }
473
474 } else if (vcpu->arch.nmi_injected) {
475 exit_int_info = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
476
477 } else if (vcpu->arch.interrupt.injected) {
478 nr = vcpu->arch.interrupt.nr;
479 exit_int_info = nr | SVM_EVTINJ_VALID;
480
481 if (vcpu->arch.interrupt.soft)
482 exit_int_info |= SVM_EVTINJ_TYPE_SOFT;
483 else
484 exit_int_info |= SVM_EVTINJ_TYPE_INTR;
485 }
486
487 vmcb12->control.exit_int_info = exit_int_info;
488 }
489
nested_svm_transition_tlb_flush(struct kvm_vcpu * vcpu)490 static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu)
491 {
492 /*
493 * TODO: optimize unconditional TLB flush/MMU sync. A partial list of
494 * things to fix before this can be conditional:
495 *
496 * - Flush TLBs for both L1 and L2 remote TLB flush
497 * - Honor L1's request to flush an ASID on nested VMRUN
498 * - Sync nested NPT MMU on VMRUN that flushes L2's ASID[*]
499 * - Don't crush a pending TLB flush in vmcb02 on nested VMRUN
500 * - Flush L1's ASID on KVM_REQ_TLB_FLUSH_GUEST
501 *
502 * [*] Unlike nested EPT, SVM's ASID management can invalidate nested
503 * NPT guest-physical mappings on VMRUN.
504 */
505 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
506 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
507 }
508
509 /*
510 * Load guest's/host's cr3 on nested vmentry or vmexit. @nested_npt is true
511 * if we are emulating VM-Entry into a guest with NPT enabled.
512 */
nested_svm_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_npt,bool reload_pdptrs)513 static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
514 bool nested_npt, bool reload_pdptrs)
515 {
516 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3)))
517 return -EINVAL;
518
519 if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
520 CC(!load_pdptrs(vcpu, cr3)))
521 return -EINVAL;
522
523 vcpu->arch.cr3 = cr3;
524
525 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
526 kvm_init_mmu(vcpu);
527
528 if (!nested_npt)
529 kvm_mmu_new_pgd(vcpu, cr3);
530
531 return 0;
532 }
533
nested_vmcb02_compute_g_pat(struct vcpu_svm * svm)534 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm)
535 {
536 if (!svm->nested.vmcb02.ptr)
537 return;
538
539 /* FIXME: merge g_pat from vmcb01 and vmcb12. */
540 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat;
541 }
542
nested_vmcb02_prepare_save(struct vcpu_svm * svm,struct vmcb * vmcb12)543 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
544 {
545 bool new_vmcb12 = false;
546 struct vmcb *vmcb01 = svm->vmcb01.ptr;
547 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
548
549 nested_vmcb02_compute_g_pat(svm);
550
551 /* Load the nested guest state */
552 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) {
553 new_vmcb12 = true;
554 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa;
555 svm->nested.force_msr_bitmap_recalc = true;
556 }
557
558 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_SEG))) {
559 vmcb02->save.es = vmcb12->save.es;
560 vmcb02->save.cs = vmcb12->save.cs;
561 vmcb02->save.ss = vmcb12->save.ss;
562 vmcb02->save.ds = vmcb12->save.ds;
563 vmcb02->save.cpl = vmcb12->save.cpl;
564 vmcb_mark_dirty(vmcb02, VMCB_SEG);
565 }
566
567 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DT))) {
568 vmcb02->save.gdtr = vmcb12->save.gdtr;
569 vmcb02->save.idtr = vmcb12->save.idtr;
570 vmcb_mark_dirty(vmcb02, VMCB_DT);
571 }
572
573 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
574
575 svm_set_efer(&svm->vcpu, svm->nested.save.efer);
576
577 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
578 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
579
580 svm->vcpu.arch.cr2 = vmcb12->save.cr2;
581
582 kvm_rax_write(&svm->vcpu, vmcb12->save.rax);
583 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp);
584 kvm_rip_write(&svm->vcpu, vmcb12->save.rip);
585
586 /* In case we don't even reach vcpu_run, the fields are not updated */
587 vmcb02->save.rax = vmcb12->save.rax;
588 vmcb02->save.rsp = vmcb12->save.rsp;
589 vmcb02->save.rip = vmcb12->save.rip;
590
591 /* These bits will be set properly on the first execution when new_vmc12 is true */
592 if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
593 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
594 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
595 vmcb_mark_dirty(vmcb02, VMCB_DR);
596 }
597
598 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
599 /*
600 * Reserved bits of DEBUGCTL are ignored. Be consistent with
601 * svm_set_msr's definition of reserved bits.
602 */
603 svm_copy_lbrs(vmcb02, vmcb12);
604 vmcb02->save.dbgctl &= ~DEBUGCTL_RESERVED_BITS;
605 svm_update_lbrv(&svm->vcpu);
606
607 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
608 svm_copy_lbrs(vmcb02, vmcb01);
609 }
610 }
611
nested_vmcb02_prepare_control(struct vcpu_svm * svm)612 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm)
613 {
614 u32 int_ctl_vmcb01_bits = V_INTR_MASKING_MASK;
615 u32 int_ctl_vmcb12_bits = V_TPR_MASK | V_IRQ_INJECTION_BITS_MASK;
616
617 struct kvm_vcpu *vcpu = &svm->vcpu;
618 struct vmcb *vmcb01 = svm->vmcb01.ptr;
619 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
620 u32 pause_count12;
621 u32 pause_thresh12;
622
623 /*
624 * Filled at exit: exit_code, exit_code_hi, exit_info_1, exit_info_2,
625 * exit_int_info, exit_int_info_err, next_rip, insn_len, insn_bytes.
626 */
627
628 if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK))
629 int_ctl_vmcb12_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
630 else
631 int_ctl_vmcb01_bits |= (V_GIF_MASK | V_GIF_ENABLE_MASK);
632
633 /* Copied from vmcb01. msrpm_base can be overwritten later. */
634 vmcb02->control.nested_ctl = vmcb01->control.nested_ctl;
635 vmcb02->control.iopm_base_pa = vmcb01->control.iopm_base_pa;
636 vmcb02->control.msrpm_base_pa = vmcb01->control.msrpm_base_pa;
637
638 /* Done at vmrun: asid. */
639
640 /* Also overwritten later if necessary. */
641 vmcb02->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
642
643 /* nested_cr3. */
644 if (nested_npt_enabled(svm))
645 nested_svm_init_mmu_context(vcpu);
646
647 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
648 vcpu->arch.l1_tsc_offset,
649 svm->nested.ctl.tsc_offset,
650 svm->tsc_ratio_msr);
651
652 vmcb02->control.tsc_offset = vcpu->arch.tsc_offset;
653
654 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
655 WARN_ON(!svm->tsc_scaling_enabled);
656 nested_svm_update_tsc_ratio_msr(vcpu);
657 }
658
659 vmcb02->control.int_ctl =
660 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) |
661 (vmcb01->control.int_ctl & int_ctl_vmcb01_bits);
662
663 vmcb02->control.int_vector = svm->nested.ctl.int_vector;
664 vmcb02->control.int_state = svm->nested.ctl.int_state;
665 vmcb02->control.event_inj = svm->nested.ctl.event_inj;
666 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err;
667
668 vmcb02->control.virt_ext = vmcb01->control.virt_ext &
669 LBR_CTL_ENABLE_MASK;
670 if (svm->lbrv_enabled)
671 vmcb02->control.virt_ext |=
672 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK);
673
674 if (!nested_vmcb_needs_vls_intercept(svm))
675 vmcb02->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
676
677 pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0;
678 pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0;
679 if (kvm_pause_in_guest(svm->vcpu.kvm)) {
680 /* use guest values since host doesn't intercept PAUSE */
681 vmcb02->control.pause_filter_count = pause_count12;
682 vmcb02->control.pause_filter_thresh = pause_thresh12;
683
684 } else {
685 /* start from host values otherwise */
686 vmcb02->control.pause_filter_count = vmcb01->control.pause_filter_count;
687 vmcb02->control.pause_filter_thresh = vmcb01->control.pause_filter_thresh;
688
689 /* ... but ensure filtering is disabled if so requested. */
690 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) {
691 if (!pause_count12)
692 vmcb02->control.pause_filter_count = 0;
693 if (!pause_thresh12)
694 vmcb02->control.pause_filter_thresh = 0;
695 }
696 }
697
698 nested_svm_transition_tlb_flush(vcpu);
699
700 /* Enter Guest-Mode */
701 enter_guest_mode(vcpu);
702
703 /*
704 * Merge guest and host intercepts - must be called with vcpu in
705 * guest-mode to take effect.
706 */
707 recalc_intercepts(svm);
708 }
709
nested_svm_copy_common_state(struct vmcb * from_vmcb,struct vmcb * to_vmcb)710 static void nested_svm_copy_common_state(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
711 {
712 /*
713 * Some VMCB state is shared between L1 and L2 and thus has to be
714 * moved at the time of nested vmrun and vmexit.
715 *
716 * VMLOAD/VMSAVE state would also belong in this category, but KVM
717 * always performs VMLOAD and VMSAVE from the VMCB01.
718 */
719 to_vmcb->save.spec_ctrl = from_vmcb->save.spec_ctrl;
720 }
721
enter_svm_guest_mode(struct kvm_vcpu * vcpu,u64 vmcb12_gpa,struct vmcb * vmcb12,bool from_vmrun)722 int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
723 struct vmcb *vmcb12, bool from_vmrun)
724 {
725 struct vcpu_svm *svm = to_svm(vcpu);
726 int ret;
727
728 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
729 vmcb12->save.rip,
730 vmcb12->control.int_ctl,
731 vmcb12->control.event_inj,
732 vmcb12->control.nested_ctl);
733
734 trace_kvm_nested_intercepts(vmcb12->control.intercepts[INTERCEPT_CR] & 0xffff,
735 vmcb12->control.intercepts[INTERCEPT_CR] >> 16,
736 vmcb12->control.intercepts[INTERCEPT_EXCEPTION],
737 vmcb12->control.intercepts[INTERCEPT_WORD3],
738 vmcb12->control.intercepts[INTERCEPT_WORD4],
739 vmcb12->control.intercepts[INTERCEPT_WORD5]);
740
741
742 svm->nested.vmcb12_gpa = vmcb12_gpa;
743
744 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr);
745
746 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr);
747
748 svm_switch_vmcb(svm, &svm->nested.vmcb02);
749 nested_vmcb02_prepare_control(svm);
750 nested_vmcb02_prepare_save(svm, vmcb12);
751
752 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
753 nested_npt_enabled(svm), from_vmrun);
754 if (ret)
755 return ret;
756
757 if (!from_vmrun)
758 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
759
760 svm_set_gif(svm, true);
761
762 if (kvm_vcpu_apicv_active(vcpu))
763 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
764
765 return 0;
766 }
767
nested_svm_vmrun(struct kvm_vcpu * vcpu)768 int nested_svm_vmrun(struct kvm_vcpu *vcpu)
769 {
770 struct vcpu_svm *svm = to_svm(vcpu);
771 int ret;
772 struct vmcb *vmcb12;
773 struct kvm_host_map map;
774 u64 vmcb12_gpa;
775 struct vmcb *vmcb01 = svm->vmcb01.ptr;
776
777 if (!svm->nested.hsave_msr) {
778 kvm_inject_gp(vcpu, 0);
779 return 1;
780 }
781
782 if (is_smm(vcpu)) {
783 kvm_queue_exception(vcpu, UD_VECTOR);
784 return 1;
785 }
786
787 vmcb12_gpa = svm->vmcb->save.rax;
788 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
789 if (ret == -EINVAL) {
790 kvm_inject_gp(vcpu, 0);
791 return 1;
792 } else if (ret) {
793 return kvm_skip_emulated_instruction(vcpu);
794 }
795
796 ret = kvm_skip_emulated_instruction(vcpu);
797
798 vmcb12 = map.hva;
799
800 if (WARN_ON_ONCE(!svm->nested.initialized))
801 return -EINVAL;
802
803 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
804 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
805
806 if (!nested_vmcb_check_save(vcpu) ||
807 !nested_vmcb_check_controls(vcpu)) {
808 vmcb12->control.exit_code = SVM_EXIT_ERR;
809 vmcb12->control.exit_code_hi = 0;
810 vmcb12->control.exit_info_1 = 0;
811 vmcb12->control.exit_info_2 = 0;
812 goto out;
813 }
814
815 /*
816 * Since vmcb01 is not in use, we can use it to store some of the L1
817 * state.
818 */
819 vmcb01->save.efer = vcpu->arch.efer;
820 vmcb01->save.cr0 = kvm_read_cr0(vcpu);
821 vmcb01->save.cr4 = vcpu->arch.cr4;
822 vmcb01->save.rflags = kvm_get_rflags(vcpu);
823 vmcb01->save.rip = kvm_rip_read(vcpu);
824
825 if (!npt_enabled)
826 vmcb01->save.cr3 = kvm_read_cr3(vcpu);
827
828 svm->nested.nested_run_pending = 1;
829
830 if (enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, true))
831 goto out_exit_err;
832
833 if (nested_svm_vmrun_msrpm(svm))
834 goto out;
835
836 out_exit_err:
837 svm->nested.nested_run_pending = 0;
838
839 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
840 svm->vmcb->control.exit_code_hi = 0;
841 svm->vmcb->control.exit_info_1 = 0;
842 svm->vmcb->control.exit_info_2 = 0;
843
844 nested_svm_vmexit(svm);
845
846 out:
847 kvm_vcpu_unmap(vcpu, &map, true);
848
849 return ret;
850 }
851
852 /* Copy state save area fields which are handled by VMRUN */
svm_copy_vmrun_state(struct vmcb_save_area * to_save,struct vmcb_save_area * from_save)853 void svm_copy_vmrun_state(struct vmcb_save_area *to_save,
854 struct vmcb_save_area *from_save)
855 {
856 to_save->es = from_save->es;
857 to_save->cs = from_save->cs;
858 to_save->ss = from_save->ss;
859 to_save->ds = from_save->ds;
860 to_save->gdtr = from_save->gdtr;
861 to_save->idtr = from_save->idtr;
862 to_save->rflags = from_save->rflags | X86_EFLAGS_FIXED;
863 to_save->efer = from_save->efer;
864 to_save->cr0 = from_save->cr0;
865 to_save->cr3 = from_save->cr3;
866 to_save->cr4 = from_save->cr4;
867 to_save->rax = from_save->rax;
868 to_save->rsp = from_save->rsp;
869 to_save->rip = from_save->rip;
870 to_save->cpl = 0;
871 }
872
svm_copy_vmloadsave_state(struct vmcb * to_vmcb,struct vmcb * from_vmcb)873 void svm_copy_vmloadsave_state(struct vmcb *to_vmcb, struct vmcb *from_vmcb)
874 {
875 to_vmcb->save.fs = from_vmcb->save.fs;
876 to_vmcb->save.gs = from_vmcb->save.gs;
877 to_vmcb->save.tr = from_vmcb->save.tr;
878 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
879 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
880 to_vmcb->save.star = from_vmcb->save.star;
881 to_vmcb->save.lstar = from_vmcb->save.lstar;
882 to_vmcb->save.cstar = from_vmcb->save.cstar;
883 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
884 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
885 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
886 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
887 }
888
nested_svm_vmexit(struct vcpu_svm * svm)889 int nested_svm_vmexit(struct vcpu_svm *svm)
890 {
891 struct kvm_vcpu *vcpu = &svm->vcpu;
892 struct vmcb *vmcb01 = svm->vmcb01.ptr;
893 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr;
894 struct vmcb *vmcb12;
895 struct kvm_host_map map;
896 int rc;
897
898 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map);
899 if (rc) {
900 if (rc == -EINVAL)
901 kvm_inject_gp(vcpu, 0);
902 return 1;
903 }
904
905 vmcb12 = map.hva;
906
907 /* Exit Guest-Mode */
908 leave_guest_mode(vcpu);
909 svm->nested.vmcb12_gpa = 0;
910 WARN_ON_ONCE(svm->nested.nested_run_pending);
911
912 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
913
914 /* in case we halted in L2 */
915 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE;
916
917 /* Give the current vmcb to the guest */
918
919 vmcb12->save.es = vmcb02->save.es;
920 vmcb12->save.cs = vmcb02->save.cs;
921 vmcb12->save.ss = vmcb02->save.ss;
922 vmcb12->save.ds = vmcb02->save.ds;
923 vmcb12->save.gdtr = vmcb02->save.gdtr;
924 vmcb12->save.idtr = vmcb02->save.idtr;
925 vmcb12->save.efer = svm->vcpu.arch.efer;
926 vmcb12->save.cr0 = kvm_read_cr0(vcpu);
927 vmcb12->save.cr3 = kvm_read_cr3(vcpu);
928 vmcb12->save.cr2 = vmcb02->save.cr2;
929 vmcb12->save.cr4 = svm->vcpu.arch.cr4;
930 vmcb12->save.rflags = kvm_get_rflags(vcpu);
931 vmcb12->save.rip = kvm_rip_read(vcpu);
932 vmcb12->save.rsp = kvm_rsp_read(vcpu);
933 vmcb12->save.rax = kvm_rax_read(vcpu);
934 vmcb12->save.dr7 = vmcb02->save.dr7;
935 vmcb12->save.dr6 = svm->vcpu.arch.dr6;
936 vmcb12->save.cpl = vmcb02->save.cpl;
937
938 vmcb12->control.int_state = vmcb02->control.int_state;
939 vmcb12->control.exit_code = vmcb02->control.exit_code;
940 vmcb12->control.exit_code_hi = vmcb02->control.exit_code_hi;
941 vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
942 vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
943
944 if (vmcb12->control.exit_code != SVM_EXIT_ERR)
945 nested_save_pending_event_to_vmcb12(svm, vmcb12);
946
947 if (svm->nrips_enabled)
948 vmcb12->control.next_rip = vmcb02->control.next_rip;
949
950 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl;
951 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl;
952 vmcb12->control.event_inj = svm->nested.ctl.event_inj;
953 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err;
954
955 if (!kvm_pause_in_guest(vcpu->kvm)) {
956 vmcb01->control.pause_filter_count = vmcb02->control.pause_filter_count;
957 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
958
959 }
960
961 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
962
963 svm_switch_vmcb(svm, &svm->vmcb01);
964
965 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) {
966 svm_copy_lbrs(vmcb12, vmcb02);
967 svm_update_lbrv(vcpu);
968 } else if (unlikely(vmcb01->control.virt_ext & LBR_CTL_ENABLE_MASK)) {
969 svm_copy_lbrs(vmcb01, vmcb02);
970 svm_update_lbrv(vcpu);
971 }
972
973 /*
974 * On vmexit the GIF is set to false and
975 * no event can be injected in L1.
976 */
977 svm_set_gif(svm, false);
978 vmcb01->control.exit_int_info = 0;
979
980 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset;
981 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) {
982 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset;
983 vmcb_mark_dirty(vmcb01, VMCB_INTERCEPTS);
984 }
985
986 if (svm->tsc_ratio_msr != kvm_default_tsc_scaling_ratio) {
987 WARN_ON(!svm->tsc_scaling_enabled);
988 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
989 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
990 }
991
992 svm->nested.ctl.nested_cr3 = 0;
993
994 /*
995 * Restore processor state that had been saved in vmcb01
996 */
997 kvm_set_rflags(vcpu, vmcb01->save.rflags);
998 svm_set_efer(vcpu, vmcb01->save.efer);
999 svm_set_cr0(vcpu, vmcb01->save.cr0 | X86_CR0_PE);
1000 svm_set_cr4(vcpu, vmcb01->save.cr4);
1001 kvm_rax_write(vcpu, vmcb01->save.rax);
1002 kvm_rsp_write(vcpu, vmcb01->save.rsp);
1003 kvm_rip_write(vcpu, vmcb01->save.rip);
1004
1005 svm->vcpu.arch.dr7 = DR7_FIXED_1;
1006 kvm_update_dr7(&svm->vcpu);
1007
1008 trace_kvm_nested_vmexit_inject(vmcb12->control.exit_code,
1009 vmcb12->control.exit_info_1,
1010 vmcb12->control.exit_info_2,
1011 vmcb12->control.exit_int_info,
1012 vmcb12->control.exit_int_info_err,
1013 KVM_ISA_SVM);
1014
1015 kvm_vcpu_unmap(vcpu, &map, true);
1016
1017 nested_svm_transition_tlb_flush(vcpu);
1018
1019 nested_svm_uninit_mmu_context(vcpu);
1020
1021 rc = nested_svm_load_cr3(vcpu, vmcb01->save.cr3, false, true);
1022 if (rc)
1023 return 1;
1024
1025 /*
1026 * Drop what we picked up for L2 via svm_complete_interrupts() so it
1027 * doesn't end up in L1.
1028 */
1029 svm->vcpu.arch.nmi_injected = false;
1030 kvm_clear_exception_queue(vcpu);
1031 kvm_clear_interrupt_queue(vcpu);
1032
1033 /*
1034 * If we are here following the completion of a VMRUN that
1035 * is being single-stepped, queue the pending #DB intercept
1036 * right now so that it an be accounted for before we execute
1037 * L1's next instruction.
1038 */
1039 if (unlikely(vmcb01->save.rflags & X86_EFLAGS_TF))
1040 kvm_queue_exception(&(svm->vcpu), DB_VECTOR);
1041
1042 /*
1043 * Un-inhibit the AVIC right away, so that other vCPUs can start
1044 * to benefit from it right away.
1045 */
1046 if (kvm_apicv_activated(vcpu->kvm))
1047 kvm_vcpu_update_apicv(vcpu);
1048
1049 return 0;
1050 }
1051
nested_svm_triple_fault(struct kvm_vcpu * vcpu)1052 static void nested_svm_triple_fault(struct kvm_vcpu *vcpu)
1053 {
1054 nested_svm_simple_vmexit(to_svm(vcpu), SVM_EXIT_SHUTDOWN);
1055 }
1056
svm_allocate_nested(struct vcpu_svm * svm)1057 int svm_allocate_nested(struct vcpu_svm *svm)
1058 {
1059 struct page *vmcb02_page;
1060
1061 if (svm->nested.initialized)
1062 return 0;
1063
1064 vmcb02_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
1065 if (!vmcb02_page)
1066 return -ENOMEM;
1067 svm->nested.vmcb02.ptr = page_address(vmcb02_page);
1068 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT);
1069
1070 svm->nested.msrpm = svm_vcpu_alloc_msrpm();
1071 if (!svm->nested.msrpm)
1072 goto err_free_vmcb02;
1073 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm);
1074
1075 svm->nested.initialized = true;
1076 return 0;
1077
1078 err_free_vmcb02:
1079 __free_page(vmcb02_page);
1080 return -ENOMEM;
1081 }
1082
svm_free_nested(struct vcpu_svm * svm)1083 void svm_free_nested(struct vcpu_svm *svm)
1084 {
1085 if (!svm->nested.initialized)
1086 return;
1087
1088 svm_vcpu_free_msrpm(svm->nested.msrpm);
1089 svm->nested.msrpm = NULL;
1090
1091 __free_page(virt_to_page(svm->nested.vmcb02.ptr));
1092 svm->nested.vmcb02.ptr = NULL;
1093
1094 /*
1095 * When last_vmcb12_gpa matches the current vmcb12 gpa,
1096 * some vmcb12 fields are not loaded if they are marked clean
1097 * in the vmcb12, since in this case they are up to date already.
1098 *
1099 * When the vmcb02 is freed, this optimization becomes invalid.
1100 */
1101 svm->nested.last_vmcb12_gpa = INVALID_GPA;
1102
1103 svm->nested.initialized = false;
1104 }
1105
1106 /*
1107 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
1108 */
svm_leave_nested(struct kvm_vcpu * vcpu)1109 void svm_leave_nested(struct kvm_vcpu *vcpu)
1110 {
1111 struct vcpu_svm *svm = to_svm(vcpu);
1112
1113 if (is_guest_mode(vcpu)) {
1114 svm->nested.nested_run_pending = 0;
1115 svm->nested.vmcb12_gpa = INVALID_GPA;
1116
1117 leave_guest_mode(vcpu);
1118
1119 svm_switch_vmcb(svm, &svm->vmcb01);
1120
1121 nested_svm_uninit_mmu_context(vcpu);
1122 vmcb_mark_all_dirty(svm->vmcb);
1123 }
1124
1125 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1126 }
1127
nested_svm_exit_handled_msr(struct vcpu_svm * svm)1128 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
1129 {
1130 u32 offset, msr, value;
1131 int write, mask;
1132
1133 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
1134 return NESTED_EXIT_HOST;
1135
1136 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1137 offset = svm_msrpm_offset(msr);
1138 write = svm->vmcb->control.exit_info_1 & 1;
1139 mask = 1 << ((2 * (msr & 0xf)) + write);
1140
1141 if (offset == MSR_INVALID)
1142 return NESTED_EXIT_DONE;
1143
1144 /* Offset is in 32 bit units but need in 8 bit units */
1145 offset *= 4;
1146
1147 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4))
1148 return NESTED_EXIT_DONE;
1149
1150 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1151 }
1152
nested_svm_intercept_ioio(struct vcpu_svm * svm)1153 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
1154 {
1155 unsigned port, size, iopm_len;
1156 u16 val, mask;
1157 u8 start_bit;
1158 u64 gpa;
1159
1160 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
1161 return NESTED_EXIT_HOST;
1162
1163 port = svm->vmcb->control.exit_info_1 >> 16;
1164 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
1165 SVM_IOIO_SIZE_SHIFT;
1166 gpa = svm->nested.ctl.iopm_base_pa + (port / 8);
1167 start_bit = port % 8;
1168 iopm_len = (start_bit + size > 8) ? 2 : 1;
1169 mask = (0xf >> (4 - size)) << start_bit;
1170 val = 0;
1171
1172 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
1173 return NESTED_EXIT_DONE;
1174
1175 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
1176 }
1177
nested_svm_intercept(struct vcpu_svm * svm)1178 static int nested_svm_intercept(struct vcpu_svm *svm)
1179 {
1180 u32 exit_code = svm->vmcb->control.exit_code;
1181 int vmexit = NESTED_EXIT_HOST;
1182
1183 switch (exit_code) {
1184 case SVM_EXIT_MSR:
1185 vmexit = nested_svm_exit_handled_msr(svm);
1186 break;
1187 case SVM_EXIT_IOIO:
1188 vmexit = nested_svm_intercept_ioio(svm);
1189 break;
1190 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
1191 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1192 vmexit = NESTED_EXIT_DONE;
1193 break;
1194 }
1195 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
1196 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1197 vmexit = NESTED_EXIT_DONE;
1198 break;
1199 }
1200 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1201 /*
1202 * Host-intercepted exceptions have been checked already in
1203 * nested_svm_exit_special. There is nothing to do here,
1204 * the vmexit is injected by svm_check_nested_events.
1205 */
1206 vmexit = NESTED_EXIT_DONE;
1207 break;
1208 }
1209 case SVM_EXIT_ERR: {
1210 vmexit = NESTED_EXIT_DONE;
1211 break;
1212 }
1213 default: {
1214 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
1215 vmexit = NESTED_EXIT_DONE;
1216 }
1217 }
1218
1219 return vmexit;
1220 }
1221
nested_svm_exit_handled(struct vcpu_svm * svm)1222 int nested_svm_exit_handled(struct vcpu_svm *svm)
1223 {
1224 int vmexit;
1225
1226 vmexit = nested_svm_intercept(svm);
1227
1228 if (vmexit == NESTED_EXIT_DONE)
1229 nested_svm_vmexit(svm);
1230
1231 return vmexit;
1232 }
1233
nested_svm_check_permissions(struct kvm_vcpu * vcpu)1234 int nested_svm_check_permissions(struct kvm_vcpu *vcpu)
1235 {
1236 if (!(vcpu->arch.efer & EFER_SVME) || !is_paging(vcpu)) {
1237 kvm_queue_exception(vcpu, UD_VECTOR);
1238 return 1;
1239 }
1240
1241 if (to_svm(vcpu)->vmcb->save.cpl) {
1242 kvm_inject_gp(vcpu, 0);
1243 return 1;
1244 }
1245
1246 return 0;
1247 }
1248
nested_exit_on_exception(struct vcpu_svm * svm)1249 static bool nested_exit_on_exception(struct vcpu_svm *svm)
1250 {
1251 unsigned int nr = svm->vcpu.arch.exception.nr;
1252
1253 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr));
1254 }
1255
nested_svm_inject_exception_vmexit(struct vcpu_svm * svm)1256 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
1257 {
1258 unsigned int nr = svm->vcpu.arch.exception.nr;
1259 struct vmcb *vmcb = svm->vmcb;
1260
1261 vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1262 vmcb->control.exit_code_hi = 0;
1263
1264 if (svm->vcpu.arch.exception.has_error_code)
1265 vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code;
1266
1267 /*
1268 * EXITINFO2 is undefined for all exception intercepts other
1269 * than #PF.
1270 */
1271 if (nr == PF_VECTOR) {
1272 if (svm->vcpu.arch.exception.nested_apf)
1273 vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
1274 else if (svm->vcpu.arch.exception.has_payload)
1275 vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload;
1276 else
1277 vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1278 } else if (nr == DB_VECTOR) {
1279 /* See inject_pending_event. */
1280 kvm_deliver_exception_payload(&svm->vcpu);
1281 if (svm->vcpu.arch.dr7 & DR7_GD) {
1282 svm->vcpu.arch.dr7 &= ~DR7_GD;
1283 kvm_update_dr7(&svm->vcpu);
1284 }
1285 } else
1286 WARN_ON(svm->vcpu.arch.exception.has_payload);
1287
1288 nested_svm_vmexit(svm);
1289 }
1290
nested_exit_on_init(struct vcpu_svm * svm)1291 static inline bool nested_exit_on_init(struct vcpu_svm *svm)
1292 {
1293 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
1294 }
1295
svm_check_nested_events(struct kvm_vcpu * vcpu)1296 static int svm_check_nested_events(struct kvm_vcpu *vcpu)
1297 {
1298 struct vcpu_svm *svm = to_svm(vcpu);
1299 bool block_nested_events =
1300 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending;
1301 struct kvm_lapic *apic = vcpu->arch.apic;
1302
1303 if (lapic_in_kernel(vcpu) &&
1304 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
1305 if (block_nested_events)
1306 return -EBUSY;
1307 if (!nested_exit_on_init(svm))
1308 return 0;
1309 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT);
1310 return 0;
1311 }
1312
1313 if (vcpu->arch.exception.pending) {
1314 /*
1315 * Only a pending nested run can block a pending exception.
1316 * Otherwise an injected NMI/interrupt should either be
1317 * lost or delivered to the nested hypervisor in the EXITINTINFO
1318 * vmcb field, while delivering the pending exception.
1319 */
1320 if (svm->nested.nested_run_pending)
1321 return -EBUSY;
1322 if (!nested_exit_on_exception(svm))
1323 return 0;
1324 nested_svm_inject_exception_vmexit(svm);
1325 return 0;
1326 }
1327
1328 if (vcpu->arch.smi_pending && !svm_smi_blocked(vcpu)) {
1329 if (block_nested_events)
1330 return -EBUSY;
1331 if (!nested_exit_on_smi(svm))
1332 return 0;
1333 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI);
1334 return 0;
1335 }
1336
1337 if (vcpu->arch.nmi_pending && !svm_nmi_blocked(vcpu)) {
1338 if (block_nested_events)
1339 return -EBUSY;
1340 if (!nested_exit_on_nmi(svm))
1341 return 0;
1342 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI);
1343 return 0;
1344 }
1345
1346 if (kvm_cpu_has_interrupt(vcpu) && !svm_interrupt_blocked(vcpu)) {
1347 if (block_nested_events)
1348 return -EBUSY;
1349 if (!nested_exit_on_intr(svm))
1350 return 0;
1351 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
1352 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR);
1353 return 0;
1354 }
1355
1356 return 0;
1357 }
1358
nested_svm_exit_special(struct vcpu_svm * svm)1359 int nested_svm_exit_special(struct vcpu_svm *svm)
1360 {
1361 u32 exit_code = svm->vmcb->control.exit_code;
1362
1363 switch (exit_code) {
1364 case SVM_EXIT_INTR:
1365 case SVM_EXIT_NMI:
1366 case SVM_EXIT_NPF:
1367 return NESTED_EXIT_HOST;
1368 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1369 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1370
1371 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] &
1372 excp_bits)
1373 return NESTED_EXIT_HOST;
1374 else if (exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1375 svm->vcpu.arch.apf.host_apf_flags)
1376 /* Trap async PF even if not shadowing */
1377 return NESTED_EXIT_HOST;
1378 break;
1379 }
1380 default:
1381 break;
1382 }
1383
1384 return NESTED_EXIT_CONTINUE;
1385 }
1386
nested_svm_update_tsc_ratio_msr(struct kvm_vcpu * vcpu)1387 void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
1388 {
1389 struct vcpu_svm *svm = to_svm(vcpu);
1390
1391 vcpu->arch.tsc_scaling_ratio =
1392 kvm_calc_nested_tsc_multiplier(vcpu->arch.l1_tsc_scaling_ratio,
1393 svm->tsc_ratio_msr);
1394 __svm_write_tsc_multiplier(vcpu->arch.tsc_scaling_ratio);
1395 }
1396
1397 /* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
nested_copy_vmcb_cache_to_control(struct vmcb_control_area * dst,struct vmcb_ctrl_area_cached * from)1398 static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
1399 struct vmcb_ctrl_area_cached *from)
1400 {
1401 unsigned int i;
1402
1403 memset(dst, 0, sizeof(struct vmcb_control_area));
1404
1405 for (i = 0; i < MAX_INTERCEPT; i++)
1406 dst->intercepts[i] = from->intercepts[i];
1407
1408 dst->iopm_base_pa = from->iopm_base_pa;
1409 dst->msrpm_base_pa = from->msrpm_base_pa;
1410 dst->tsc_offset = from->tsc_offset;
1411 dst->asid = from->asid;
1412 dst->tlb_ctl = from->tlb_ctl;
1413 dst->int_ctl = from->int_ctl;
1414 dst->int_vector = from->int_vector;
1415 dst->int_state = from->int_state;
1416 dst->exit_code = from->exit_code;
1417 dst->exit_code_hi = from->exit_code_hi;
1418 dst->exit_info_1 = from->exit_info_1;
1419 dst->exit_info_2 = from->exit_info_2;
1420 dst->exit_int_info = from->exit_int_info;
1421 dst->exit_int_info_err = from->exit_int_info_err;
1422 dst->nested_ctl = from->nested_ctl;
1423 dst->event_inj = from->event_inj;
1424 dst->event_inj_err = from->event_inj_err;
1425 dst->nested_cr3 = from->nested_cr3;
1426 dst->virt_ext = from->virt_ext;
1427 dst->pause_filter_count = from->pause_filter_count;
1428 dst->pause_filter_thresh = from->pause_filter_thresh;
1429 /* 'clean' and 'reserved_sw' are not changed by KVM */
1430 }
1431
svm_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)1432 static int svm_get_nested_state(struct kvm_vcpu *vcpu,
1433 struct kvm_nested_state __user *user_kvm_nested_state,
1434 u32 user_data_size)
1435 {
1436 struct vcpu_svm *svm;
1437 struct vmcb_control_area *ctl;
1438 unsigned long r;
1439 struct kvm_nested_state kvm_state = {
1440 .flags = 0,
1441 .format = KVM_STATE_NESTED_FORMAT_SVM,
1442 .size = sizeof(kvm_state),
1443 };
1444 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1445 &user_kvm_nested_state->data.svm[0];
1446
1447 if (!vcpu)
1448 return kvm_state.size + KVM_STATE_NESTED_SVM_VMCB_SIZE;
1449
1450 svm = to_svm(vcpu);
1451
1452 if (user_data_size < kvm_state.size)
1453 goto out;
1454
1455 /* First fill in the header and copy it out. */
1456 if (is_guest_mode(vcpu)) {
1457 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa;
1458 kvm_state.size += KVM_STATE_NESTED_SVM_VMCB_SIZE;
1459 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
1460
1461 if (svm->nested.nested_run_pending)
1462 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
1463 }
1464
1465 if (gif_set(svm))
1466 kvm_state.flags |= KVM_STATE_NESTED_GIF_SET;
1467
1468 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
1469 return -EFAULT;
1470
1471 if (!is_guest_mode(vcpu))
1472 goto out;
1473
1474 /*
1475 * Copy over the full size of the VMCB rather than just the size
1476 * of the structs.
1477 */
1478 if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
1479 return -EFAULT;
1480
1481 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
1482 if (!ctl)
1483 return -ENOMEM;
1484
1485 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
1486 r = copy_to_user(&user_vmcb->control, ctl,
1487 sizeof(user_vmcb->control));
1488 kfree(ctl);
1489 if (r)
1490 return -EFAULT;
1491
1492 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
1493 sizeof(user_vmcb->save)))
1494 return -EFAULT;
1495 out:
1496 return kvm_state.size;
1497 }
1498
svm_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)1499 static int svm_set_nested_state(struct kvm_vcpu *vcpu,
1500 struct kvm_nested_state __user *user_kvm_nested_state,
1501 struct kvm_nested_state *kvm_state)
1502 {
1503 struct vcpu_svm *svm = to_svm(vcpu);
1504 struct vmcb __user *user_vmcb = (struct vmcb __user *)
1505 &user_kvm_nested_state->data.svm[0];
1506 struct vmcb_control_area *ctl;
1507 struct vmcb_save_area *save;
1508 struct vmcb_save_area_cached save_cached;
1509 struct vmcb_ctrl_area_cached ctl_cached;
1510 unsigned long cr0;
1511 int ret;
1512
1513 BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) >
1514 KVM_STATE_NESTED_SVM_VMCB_SIZE);
1515
1516 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
1517 return -EINVAL;
1518
1519 if (kvm_state->flags & ~(KVM_STATE_NESTED_GUEST_MODE |
1520 KVM_STATE_NESTED_RUN_PENDING |
1521 KVM_STATE_NESTED_GIF_SET))
1522 return -EINVAL;
1523
1524 /*
1525 * If in guest mode, vcpu->arch.efer actually refers to the L2 guest's
1526 * EFER.SVME, but EFER.SVME still has to be 1 for VMRUN to succeed.
1527 */
1528 if (!(vcpu->arch.efer & EFER_SVME)) {
1529 /* GIF=1 and no guest mode are required if SVME=0. */
1530 if (kvm_state->flags != KVM_STATE_NESTED_GIF_SET)
1531 return -EINVAL;
1532 }
1533
1534 /* SMM temporarily disables SVM, so we cannot be in guest mode. */
1535 if (is_smm(vcpu) && (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
1536 return -EINVAL;
1537
1538 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
1539 svm_leave_nested(vcpu);
1540 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1541 return 0;
1542 }
1543
1544 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa))
1545 return -EINVAL;
1546 if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
1547 return -EINVAL;
1548
1549 ret = -ENOMEM;
1550 ctl = kzalloc(sizeof(*ctl), GFP_KERNEL_ACCOUNT);
1551 save = kzalloc(sizeof(*save), GFP_KERNEL_ACCOUNT);
1552 if (!ctl || !save)
1553 goto out_free;
1554
1555 ret = -EFAULT;
1556 if (copy_from_user(ctl, &user_vmcb->control, sizeof(*ctl)))
1557 goto out_free;
1558 if (copy_from_user(save, &user_vmcb->save, sizeof(*save)))
1559 goto out_free;
1560
1561 ret = -EINVAL;
1562 __nested_copy_vmcb_control_to_cache(vcpu, &ctl_cached, ctl);
1563 if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
1564 goto out_free;
1565
1566 /*
1567 * Processor state contains L2 state. Check that it is
1568 * valid for guest mode (see nested_vmcb_check_save).
1569 */
1570 cr0 = kvm_read_cr0(vcpu);
1571 if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
1572 goto out_free;
1573
1574 /*
1575 * Validate host state saved from before VMRUN (see
1576 * nested_svm_check_permissions).
1577 */
1578 __nested_copy_vmcb_save_to_cache(&save_cached, save);
1579 if (!(save->cr0 & X86_CR0_PG) ||
1580 !(save->cr0 & X86_CR0_PE) ||
1581 (save->rflags & X86_EFLAGS_VM) ||
1582 !__nested_vmcb_check_save(vcpu, &save_cached))
1583 goto out_free;
1584
1585
1586 /*
1587 * All checks done, we can enter guest mode. Userspace provides
1588 * vmcb12.control, which will be combined with L1 and stored into
1589 * vmcb02, and the L1 save state which we store in vmcb01.
1590 * L2 registers if needed are moved from the current VMCB to VMCB02.
1591 */
1592
1593 if (is_guest_mode(vcpu))
1594 svm_leave_nested(vcpu);
1595 else
1596 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
1597
1598 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
1599
1600 svm->nested.nested_run_pending =
1601 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
1602
1603 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
1604
1605 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
1606 nested_copy_vmcb_control_to_cache(svm, ctl);
1607
1608 svm_switch_vmcb(svm, &svm->nested.vmcb02);
1609 nested_vmcb02_prepare_control(svm);
1610
1611 /*
1612 * While the nested guest CR3 is already checked and set by
1613 * KVM_SET_SREGS, it was set when nested state was yet loaded,
1614 * thus MMU might not be initialized correctly.
1615 * Set it again to fix this.
1616 */
1617
1618 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
1619 nested_npt_enabled(svm), false);
1620 if (WARN_ON_ONCE(ret))
1621 goto out_free;
1622
1623 svm->nested.force_msr_bitmap_recalc = true;
1624
1625 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
1626 ret = 0;
1627 out_free:
1628 kfree(save);
1629 kfree(ctl);
1630
1631 return ret;
1632 }
1633
svm_get_nested_state_pages(struct kvm_vcpu * vcpu)1634 static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
1635 {
1636 struct vcpu_svm *svm = to_svm(vcpu);
1637
1638 if (WARN_ON(!is_guest_mode(vcpu)))
1639 return true;
1640
1641 if (!vcpu->arch.pdptrs_from_userspace &&
1642 !nested_npt_enabled(svm) && is_pae_paging(vcpu))
1643 /*
1644 * Reload the guest's PDPTRs since after a migration
1645 * the guest CR3 might be restored prior to setting the nested
1646 * state which can lead to a load of wrong PDPTRs.
1647 */
1648 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
1649 return false;
1650
1651 if (!nested_svm_vmrun_msrpm(svm)) {
1652 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1653 vcpu->run->internal.suberror =
1654 KVM_INTERNAL_ERROR_EMULATION;
1655 vcpu->run->internal.ndata = 0;
1656 return false;
1657 }
1658
1659 return true;
1660 }
1661
1662 struct kvm_x86_nested_ops svm_nested_ops = {
1663 .leave_nested = svm_leave_nested,
1664 .check_events = svm_check_nested_events,
1665 .handle_page_fault_workaround = nested_svm_handle_page_fault_workaround,
1666 .triple_fault = nested_svm_triple_fault,
1667 .get_nested_state_pages = svm_get_nested_state_pages,
1668 .get_state = svm_get_nested_state,
1669 .set_state = svm_set_nested_state,
1670 };
1671