1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/objtool.h>
4 #include <linux/percpu.h>
5
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
8
9 #include "cpuid.h"
10 #include "evmcs.h"
11 #include "hyperv.h"
12 #include "mmu.h"
13 #include "nested.h"
14 #include "pmu.h"
15 #include "sgx.h"
16 #include "trace.h"
17 #include "vmx.h"
18 #include "x86.h"
19
20 static bool __read_mostly enable_shadow_vmcs = 1;
21 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
22
23 static bool __read_mostly nested_early_check = 0;
24 module_param(nested_early_check, bool, S_IRUGO);
25
26 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
27
28 /*
29 * Hyper-V requires all of these, so mark them as supported even though
30 * they are just treated the same as all-context.
31 */
32 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
33 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
34 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
35 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
36 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
37
38 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
39
40 enum {
41 VMX_VMREAD_BITMAP,
42 VMX_VMWRITE_BITMAP,
43 VMX_BITMAP_NR
44 };
45 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
46
47 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
48 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
49
50 struct shadow_vmcs_field {
51 u16 encoding;
52 u16 offset;
53 };
54 static struct shadow_vmcs_field shadow_read_only_fields[] = {
55 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
56 #include "vmcs_shadow_fields.h"
57 };
58 static int max_shadow_read_only_fields =
59 ARRAY_SIZE(shadow_read_only_fields);
60
61 static struct shadow_vmcs_field shadow_read_write_fields[] = {
62 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
63 #include "vmcs_shadow_fields.h"
64 };
65 static int max_shadow_read_write_fields =
66 ARRAY_SIZE(shadow_read_write_fields);
67
init_vmcs_shadow_fields(void)68 static void init_vmcs_shadow_fields(void)
69 {
70 int i, j;
71
72 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
73 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
74
75 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
76 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
77 u16 field = entry.encoding;
78
79 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
80 (i + 1 == max_shadow_read_only_fields ||
81 shadow_read_only_fields[i + 1].encoding != field + 1))
82 pr_err("Missing field from shadow_read_only_field %x\n",
83 field + 1);
84
85 clear_bit(field, vmx_vmread_bitmap);
86 if (field & 1)
87 #ifdef CONFIG_X86_64
88 continue;
89 #else
90 entry.offset += sizeof(u32);
91 #endif
92 shadow_read_only_fields[j++] = entry;
93 }
94 max_shadow_read_only_fields = j;
95
96 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
97 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
98 u16 field = entry.encoding;
99
100 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
101 (i + 1 == max_shadow_read_write_fields ||
102 shadow_read_write_fields[i + 1].encoding != field + 1))
103 pr_err("Missing field from shadow_read_write_field %x\n",
104 field + 1);
105
106 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
107 field <= GUEST_TR_AR_BYTES,
108 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
109
110 /*
111 * PML and the preemption timer can be emulated, but the
112 * processor cannot vmwrite to fields that don't exist
113 * on bare metal.
114 */
115 switch (field) {
116 case GUEST_PML_INDEX:
117 if (!cpu_has_vmx_pml())
118 continue;
119 break;
120 case VMX_PREEMPTION_TIMER_VALUE:
121 if (!cpu_has_vmx_preemption_timer())
122 continue;
123 break;
124 case GUEST_INTR_STATUS:
125 if (!cpu_has_vmx_apicv())
126 continue;
127 break;
128 default:
129 break;
130 }
131
132 clear_bit(field, vmx_vmwrite_bitmap);
133 clear_bit(field, vmx_vmread_bitmap);
134 if (field & 1)
135 #ifdef CONFIG_X86_64
136 continue;
137 #else
138 entry.offset += sizeof(u32);
139 #endif
140 shadow_read_write_fields[j++] = entry;
141 }
142 max_shadow_read_write_fields = j;
143 }
144
145 /*
146 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
147 * set the success or error code of an emulated VMX instruction (as specified
148 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
149 * instruction.
150 */
nested_vmx_succeed(struct kvm_vcpu * vcpu)151 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
152 {
153 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
154 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
155 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
156 return kvm_skip_emulated_instruction(vcpu);
157 }
158
nested_vmx_failInvalid(struct kvm_vcpu * vcpu)159 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
160 {
161 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
162 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
163 X86_EFLAGS_SF | X86_EFLAGS_OF))
164 | X86_EFLAGS_CF);
165 return kvm_skip_emulated_instruction(vcpu);
166 }
167
nested_vmx_failValid(struct kvm_vcpu * vcpu,u32 vm_instruction_error)168 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
169 u32 vm_instruction_error)
170 {
171 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
172 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
173 X86_EFLAGS_SF | X86_EFLAGS_OF))
174 | X86_EFLAGS_ZF);
175 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
176 /*
177 * We don't need to force sync to shadow VMCS because
178 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
179 * fields and thus must be synced.
180 */
181 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
182 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
183
184 return kvm_skip_emulated_instruction(vcpu);
185 }
186
nested_vmx_fail(struct kvm_vcpu * vcpu,u32 vm_instruction_error)187 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
188 {
189 struct vcpu_vmx *vmx = to_vmx(vcpu);
190
191 /*
192 * failValid writes the error number to the current VMCS, which
193 * can't be done if there isn't a current VMCS.
194 */
195 if (vmx->nested.current_vmptr == INVALID_GPA &&
196 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
197 return nested_vmx_failInvalid(vcpu);
198
199 return nested_vmx_failValid(vcpu, vm_instruction_error);
200 }
201
nested_vmx_abort(struct kvm_vcpu * vcpu,u32 indicator)202 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
203 {
204 /* TODO: not to reset guest simply here. */
205 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
206 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
207 }
208
vmx_control_verify(u32 control,u32 low,u32 high)209 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
210 {
211 return fixed_bits_valid(control, low, high);
212 }
213
vmx_control_msr(u32 low,u32 high)214 static inline u64 vmx_control_msr(u32 low, u32 high)
215 {
216 return low | ((u64)high << 32);
217 }
218
vmx_disable_shadow_vmcs(struct vcpu_vmx * vmx)219 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
220 {
221 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
222 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
223 vmx->nested.need_vmcs12_to_shadow_sync = false;
224 }
225
nested_release_evmcs(struct kvm_vcpu * vcpu)226 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
227 {
228 struct vcpu_vmx *vmx = to_vmx(vcpu);
229
230 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
232 vmx->nested.hv_evmcs = NULL;
233 }
234
235 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
236 }
237
vmx_sync_vmcs_host_state(struct vcpu_vmx * vmx,struct loaded_vmcs * prev)238 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
239 struct loaded_vmcs *prev)
240 {
241 struct vmcs_host_state *dest, *src;
242
243 if (unlikely(!vmx->guest_state_loaded))
244 return;
245
246 src = &prev->host_state;
247 dest = &vmx->loaded_vmcs->host_state;
248
249 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
250 dest->ldt_sel = src->ldt_sel;
251 #ifdef CONFIG_X86_64
252 dest->ds_sel = src->ds_sel;
253 dest->es_sel = src->es_sel;
254 #endif
255 }
256
vmx_switch_vmcs(struct kvm_vcpu * vcpu,struct loaded_vmcs * vmcs)257 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
258 {
259 struct vcpu_vmx *vmx = to_vmx(vcpu);
260 struct loaded_vmcs *prev;
261 int cpu;
262
263 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
264 return;
265
266 cpu = get_cpu();
267 prev = vmx->loaded_vmcs;
268 vmx->loaded_vmcs = vmcs;
269 vmx_vcpu_load_vmcs(vcpu, cpu, prev);
270 vmx_sync_vmcs_host_state(vmx, prev);
271 put_cpu();
272
273 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
274
275 /*
276 * All lazily updated registers will be reloaded from VMCS12 on both
277 * vmentry and vmexit.
278 */
279 vcpu->arch.regs_dirty = 0;
280 }
281
282 /*
283 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
284 * just stops using VMX.
285 */
free_nested(struct kvm_vcpu * vcpu)286 static void free_nested(struct kvm_vcpu *vcpu)
287 {
288 struct vcpu_vmx *vmx = to_vmx(vcpu);
289
290 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
291 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
292
293 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
294 return;
295
296 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
297
298 vmx->nested.vmxon = false;
299 vmx->nested.smm.vmxon = false;
300 vmx->nested.vmxon_ptr = INVALID_GPA;
301 free_vpid(vmx->nested.vpid02);
302 vmx->nested.posted_intr_nv = -1;
303 vmx->nested.current_vmptr = INVALID_GPA;
304 if (enable_shadow_vmcs) {
305 vmx_disable_shadow_vmcs(vmx);
306 vmcs_clear(vmx->vmcs01.shadow_vmcs);
307 free_vmcs(vmx->vmcs01.shadow_vmcs);
308 vmx->vmcs01.shadow_vmcs = NULL;
309 }
310 kfree(vmx->nested.cached_vmcs12);
311 vmx->nested.cached_vmcs12 = NULL;
312 kfree(vmx->nested.cached_shadow_vmcs12);
313 vmx->nested.cached_shadow_vmcs12 = NULL;
314 /* Unpin physical memory we referred to in the vmcs02 */
315 if (vmx->nested.apic_access_page) {
316 kvm_release_page_clean(vmx->nested.apic_access_page);
317 vmx->nested.apic_access_page = NULL;
318 }
319 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
320 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
321 vmx->nested.pi_desc = NULL;
322
323 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
324
325 nested_release_evmcs(vcpu);
326
327 free_loaded_vmcs(&vmx->nested.vmcs02);
328 }
329
330 /*
331 * Ensure that the current vmcs of the logical processor is the
332 * vmcs01 of the vcpu before calling free_nested().
333 */
nested_vmx_free_vcpu(struct kvm_vcpu * vcpu)334 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
335 {
336 vcpu_load(vcpu);
337 vmx_leave_nested(vcpu);
338 vcpu_put(vcpu);
339 }
340
341 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
342
nested_ept_root_matches(hpa_t root_hpa,u64 root_eptp,u64 eptp)343 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
344 {
345 return VALID_PAGE(root_hpa) &&
346 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
347 }
348
nested_ept_invalidate_addr(struct kvm_vcpu * vcpu,gpa_t eptp,gpa_t addr)349 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
350 gpa_t addr)
351 {
352 uint i;
353 struct kvm_mmu_root_info *cached_root;
354
355 WARN_ON_ONCE(!mmu_is_nested(vcpu));
356
357 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
358 cached_root = &vcpu->arch.mmu->prev_roots[i];
359
360 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
361 eptp))
362 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
363 }
364 }
365
nested_ept_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)366 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
367 struct x86_exception *fault)
368 {
369 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
370 struct vcpu_vmx *vmx = to_vmx(vcpu);
371 u32 vm_exit_reason;
372 unsigned long exit_qualification = vcpu->arch.exit_qualification;
373
374 if (vmx->nested.pml_full) {
375 vm_exit_reason = EXIT_REASON_PML_FULL;
376 vmx->nested.pml_full = false;
377 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
378 } else {
379 if (fault->error_code & PFERR_RSVD_MASK)
380 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
381 else
382 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
383
384 /*
385 * Although the caller (kvm_inject_emulated_page_fault) would
386 * have already synced the faulting address in the shadow EPT
387 * tables for the current EPTP12, we also need to sync it for
388 * any other cached EPTP02s based on the same EP4TA, since the
389 * TLB associates mappings to the EP4TA rather than the full EPTP.
390 */
391 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
392 fault->address);
393 }
394
395 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
396 vmcs12->guest_physical_address = fault->address;
397 }
398
nested_ept_new_eptp(struct kvm_vcpu * vcpu)399 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
400 {
401 struct vcpu_vmx *vmx = to_vmx(vcpu);
402 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
403 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
404
405 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,
406 nested_ept_ad_enabled(vcpu),
407 nested_ept_get_eptp(vcpu));
408 }
409
nested_ept_init_mmu_context(struct kvm_vcpu * vcpu)410 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
411 {
412 WARN_ON(mmu_is_nested(vcpu));
413
414 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
415 nested_ept_new_eptp(vcpu);
416 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
417 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
418 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
419
420 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
421 }
422
nested_ept_uninit_mmu_context(struct kvm_vcpu * vcpu)423 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
424 {
425 vcpu->arch.mmu = &vcpu->arch.root_mmu;
426 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
427 }
428
nested_vmx_is_page_fault_vmexit(struct vmcs12 * vmcs12,u16 error_code)429 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
430 u16 error_code)
431 {
432 bool inequality, bit;
433
434 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
435 inequality =
436 (error_code & vmcs12->page_fault_error_code_mask) !=
437 vmcs12->page_fault_error_code_match;
438 return inequality ^ bit;
439 }
440
441
442 /*
443 * KVM wants to inject page-faults which it got to the guest. This function
444 * checks whether in a nested guest, we need to inject them to L1 or L2.
445 */
nested_vmx_check_exception(struct kvm_vcpu * vcpu,unsigned long * exit_qual)446 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
447 {
448 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
449 unsigned int nr = vcpu->arch.exception.nr;
450 bool has_payload = vcpu->arch.exception.has_payload;
451 unsigned long payload = vcpu->arch.exception.payload;
452
453 if (nr == PF_VECTOR) {
454 if (vcpu->arch.exception.nested_apf) {
455 *exit_qual = vcpu->arch.apf.nested_apf_token;
456 return 1;
457 }
458 if (nested_vmx_is_page_fault_vmexit(vmcs12,
459 vcpu->arch.exception.error_code)) {
460 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
461 return 1;
462 }
463 } else if (vmcs12->exception_bitmap & (1u << nr)) {
464 if (nr == DB_VECTOR) {
465 if (!has_payload) {
466 payload = vcpu->arch.dr6;
467 payload &= ~DR6_BT;
468 payload ^= DR6_ACTIVE_LOW;
469 }
470 *exit_qual = payload;
471 } else
472 *exit_qual = 0;
473 return 1;
474 }
475
476 return 0;
477 }
478
nested_vmx_handle_page_fault_workaround(struct kvm_vcpu * vcpu,struct x86_exception * fault)479 static bool nested_vmx_handle_page_fault_workaround(struct kvm_vcpu *vcpu,
480 struct x86_exception *fault)
481 {
482 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
483
484 WARN_ON(!is_guest_mode(vcpu));
485
486 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
487 !WARN_ON_ONCE(to_vmx(vcpu)->nested.nested_run_pending)) {
488 vmcs12->vm_exit_intr_error_code = fault->error_code;
489 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
490 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
491 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
492 fault->address);
493 return true;
494 }
495 return false;
496 }
497
nested_vmx_check_io_bitmap_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)498 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
499 struct vmcs12 *vmcs12)
500 {
501 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
502 return 0;
503
504 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
505 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
506 return -EINVAL;
507
508 return 0;
509 }
510
nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)511 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
512 struct vmcs12 *vmcs12)
513 {
514 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
515 return 0;
516
517 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
518 return -EINVAL;
519
520 return 0;
521 }
522
nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)523 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
524 struct vmcs12 *vmcs12)
525 {
526 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
527 return 0;
528
529 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
530 return -EINVAL;
531
532 return 0;
533 }
534
535 /*
536 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1
537 * itself utilizing x2APIC. All MSRs were previously set to be intercepted,
538 * only the "disable intercept" case needs to be handled.
539 */
nested_vmx_disable_intercept_for_x2apic_msr(unsigned long * msr_bitmap_l1,unsigned long * msr_bitmap_l0,u32 msr,int type)540 static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1,
541 unsigned long *msr_bitmap_l0,
542 u32 msr, int type)
543 {
544 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
545 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
546
547 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
548 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
549 }
550
enable_x2apic_msr_intercepts(unsigned long * msr_bitmap)551 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
552 {
553 int msr;
554
555 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
556 unsigned word = msr / BITS_PER_LONG;
557
558 msr_bitmap[word] = ~0;
559 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
560 }
561 }
562
563 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \
564 static inline \
565 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
566 unsigned long *msr_bitmap_l1, \
567 unsigned long *msr_bitmap_l0, u32 msr) \
568 { \
569 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
570 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
571 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
572 else \
573 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
574 }
575 BUILD_NVMX_MSR_INTERCEPT_HELPER(read)
BUILD_NVMX_MSR_INTERCEPT_HELPER(write)576 BUILD_NVMX_MSR_INTERCEPT_HELPER(write)
577
578 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
579 unsigned long *msr_bitmap_l1,
580 unsigned long *msr_bitmap_l0,
581 u32 msr, int types)
582 {
583 if (types & MSR_TYPE_R)
584 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
585 msr_bitmap_l0, msr);
586 if (types & MSR_TYPE_W)
587 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
588 msr_bitmap_l0, msr);
589 }
590
591 /*
592 * Merge L0's and L1's MSR bitmap, return false to indicate that
593 * we do not use the hardware.
594 */
nested_vmx_prepare_msr_bitmap(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)595 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
596 struct vmcs12 *vmcs12)
597 {
598 struct vcpu_vmx *vmx = to_vmx(vcpu);
599 int msr;
600 unsigned long *msr_bitmap_l1;
601 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
602 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
603 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
604
605 /* Nothing to do if the MSR bitmap is not in use. */
606 if (!cpu_has_vmx_msr_bitmap() ||
607 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
608 return false;
609
610 /*
611 * MSR bitmap update can be skipped when:
612 * - MSR bitmap for L1 hasn't changed.
613 * - Nested hypervisor (L1) is attempting to launch the same L2 as
614 * before.
615 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature
616 * and tells KVM (L0) there were no changes in MSR bitmap for L2.
617 */
618 if (!vmx->nested.force_msr_bitmap_recalc && evmcs &&
619 evmcs->hv_enlightenments_control.msr_bitmap &&
620 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
621 return true;
622
623 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
624 return false;
625
626 msr_bitmap_l1 = (unsigned long *)map->hva;
627
628 /*
629 * To keep the control flow simple, pay eight 8-byte writes (sixteen
630 * 4-byte writes on 32-bit systems) up front to enable intercepts for
631 * the x2APIC MSR range and selectively toggle those relevant to L2.
632 */
633 enable_x2apic_msr_intercepts(msr_bitmap_l0);
634
635 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
636 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
637 /*
638 * L0 need not intercept reads for MSRs between 0x800
639 * and 0x8ff, it just lets the processor take the value
640 * from the virtual-APIC page; take those 256 bits
641 * directly from the L1 bitmap.
642 */
643 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
644 unsigned word = msr / BITS_PER_LONG;
645
646 msr_bitmap_l0[word] = msr_bitmap_l1[word];
647 }
648 }
649
650 nested_vmx_disable_intercept_for_x2apic_msr(
651 msr_bitmap_l1, msr_bitmap_l0,
652 X2APIC_MSR(APIC_TASKPRI),
653 MSR_TYPE_R | MSR_TYPE_W);
654
655 if (nested_cpu_has_vid(vmcs12)) {
656 nested_vmx_disable_intercept_for_x2apic_msr(
657 msr_bitmap_l1, msr_bitmap_l0,
658 X2APIC_MSR(APIC_EOI),
659 MSR_TYPE_W);
660 nested_vmx_disable_intercept_for_x2apic_msr(
661 msr_bitmap_l1, msr_bitmap_l0,
662 X2APIC_MSR(APIC_SELF_IPI),
663 MSR_TYPE_W);
664 }
665 }
666
667 /*
668 * Always check vmcs01's bitmap to honor userspace MSR filters and any
669 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
670 */
671 #ifdef CONFIG_X86_64
672 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
673 MSR_FS_BASE, MSR_TYPE_RW);
674
675 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
676 MSR_GS_BASE, MSR_TYPE_RW);
677
678 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
679 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
680 #endif
681 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
682 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
683
684 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
685 MSR_IA32_PRED_CMD, MSR_TYPE_W);
686
687 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
688
689 vmx->nested.force_msr_bitmap_recalc = false;
690
691 return true;
692 }
693
nested_cache_shadow_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)694 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
695 struct vmcs12 *vmcs12)
696 {
697 struct vcpu_vmx *vmx = to_vmx(vcpu);
698 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
699
700 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
701 vmcs12->vmcs_link_pointer == INVALID_GPA)
702 return;
703
704 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
705 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
706 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
707 return;
708
709 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
710 VMCS12_SIZE);
711 }
712
nested_flush_cached_shadow_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)713 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
714 struct vmcs12 *vmcs12)
715 {
716 struct vcpu_vmx *vmx = to_vmx(vcpu);
717 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
718
719 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
720 vmcs12->vmcs_link_pointer == INVALID_GPA)
721 return;
722
723 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
724 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
725 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
726 return;
727
728 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
729 VMCS12_SIZE);
730 }
731
732 /*
733 * In nested virtualization, check if L1 has set
734 * VM_EXIT_ACK_INTR_ON_EXIT
735 */
nested_exit_intr_ack_set(struct kvm_vcpu * vcpu)736 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
737 {
738 return get_vmcs12(vcpu)->vm_exit_controls &
739 VM_EXIT_ACK_INTR_ON_EXIT;
740 }
741
nested_vmx_check_apic_access_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)742 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
743 struct vmcs12 *vmcs12)
744 {
745 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
746 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
747 return -EINVAL;
748 else
749 return 0;
750 }
751
nested_vmx_check_apicv_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)752 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
753 struct vmcs12 *vmcs12)
754 {
755 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
756 !nested_cpu_has_apic_reg_virt(vmcs12) &&
757 !nested_cpu_has_vid(vmcs12) &&
758 !nested_cpu_has_posted_intr(vmcs12))
759 return 0;
760
761 /*
762 * If virtualize x2apic mode is enabled,
763 * virtualize apic access must be disabled.
764 */
765 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
766 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
767 return -EINVAL;
768
769 /*
770 * If virtual interrupt delivery is enabled,
771 * we must exit on external interrupts.
772 */
773 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
774 return -EINVAL;
775
776 /*
777 * bits 15:8 should be zero in posted_intr_nv,
778 * the descriptor address has been already checked
779 * in nested_get_vmcs12_pages.
780 *
781 * bits 5:0 of posted_intr_desc_addr should be zero.
782 */
783 if (nested_cpu_has_posted_intr(vmcs12) &&
784 (CC(!nested_cpu_has_vid(vmcs12)) ||
785 CC(!nested_exit_intr_ack_set(vcpu)) ||
786 CC((vmcs12->posted_intr_nv & 0xff00)) ||
787 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
788 return -EINVAL;
789
790 /* tpr shadow is needed by all apicv features. */
791 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
792 return -EINVAL;
793
794 return 0;
795 }
796
nested_vmx_check_msr_switch(struct kvm_vcpu * vcpu,u32 count,u64 addr)797 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
798 u32 count, u64 addr)
799 {
800 if (count == 0)
801 return 0;
802
803 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
804 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
805 return -EINVAL;
806
807 return 0;
808 }
809
nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)810 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
811 struct vmcs12 *vmcs12)
812 {
813 if (CC(nested_vmx_check_msr_switch(vcpu,
814 vmcs12->vm_exit_msr_load_count,
815 vmcs12->vm_exit_msr_load_addr)) ||
816 CC(nested_vmx_check_msr_switch(vcpu,
817 vmcs12->vm_exit_msr_store_count,
818 vmcs12->vm_exit_msr_store_addr)))
819 return -EINVAL;
820
821 return 0;
822 }
823
nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)824 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
825 struct vmcs12 *vmcs12)
826 {
827 if (CC(nested_vmx_check_msr_switch(vcpu,
828 vmcs12->vm_entry_msr_load_count,
829 vmcs12->vm_entry_msr_load_addr)))
830 return -EINVAL;
831
832 return 0;
833 }
834
nested_vmx_check_pml_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)835 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
836 struct vmcs12 *vmcs12)
837 {
838 if (!nested_cpu_has_pml(vmcs12))
839 return 0;
840
841 if (CC(!nested_cpu_has_ept(vmcs12)) ||
842 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
843 return -EINVAL;
844
845 return 0;
846 }
847
nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)848 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
849 struct vmcs12 *vmcs12)
850 {
851 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
852 !nested_cpu_has_ept(vmcs12)))
853 return -EINVAL;
854 return 0;
855 }
856
nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)857 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
858 struct vmcs12 *vmcs12)
859 {
860 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
861 !nested_cpu_has_ept(vmcs12)))
862 return -EINVAL;
863 return 0;
864 }
865
nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)866 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
867 struct vmcs12 *vmcs12)
868 {
869 if (!nested_cpu_has_shadow_vmcs(vmcs12))
870 return 0;
871
872 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
873 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
874 return -EINVAL;
875
876 return 0;
877 }
878
nested_vmx_msr_check_common(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)879 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
880 struct vmx_msr_entry *e)
881 {
882 /* x2APIC MSR accesses are not allowed */
883 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
884 return -EINVAL;
885 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
886 CC(e->index == MSR_IA32_UCODE_REV))
887 return -EINVAL;
888 if (CC(e->reserved != 0))
889 return -EINVAL;
890 return 0;
891 }
892
nested_vmx_load_msr_check(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)893 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
894 struct vmx_msr_entry *e)
895 {
896 if (CC(e->index == MSR_FS_BASE) ||
897 CC(e->index == MSR_GS_BASE) ||
898 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
899 nested_vmx_msr_check_common(vcpu, e))
900 return -EINVAL;
901 return 0;
902 }
903
nested_vmx_store_msr_check(struct kvm_vcpu * vcpu,struct vmx_msr_entry * e)904 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
905 struct vmx_msr_entry *e)
906 {
907 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
908 nested_vmx_msr_check_common(vcpu, e))
909 return -EINVAL;
910 return 0;
911 }
912
nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu * vcpu)913 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
914 {
915 struct vcpu_vmx *vmx = to_vmx(vcpu);
916 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
917 vmx->nested.msrs.misc_high);
918
919 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
920 }
921
922 /*
923 * Load guest's/host's msr at nested entry/exit.
924 * return 0 for success, entry index for failure.
925 *
926 * One of the failure modes for MSR load/store is when a list exceeds the
927 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
928 * as possible, process all valid entries before failing rather than precheck
929 * for a capacity violation.
930 */
nested_vmx_load_msr(struct kvm_vcpu * vcpu,u64 gpa,u32 count)931 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
932 {
933 u32 i;
934 struct vmx_msr_entry e;
935 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
936
937 for (i = 0; i < count; i++) {
938 if (unlikely(i >= max_msr_list_size))
939 goto fail;
940
941 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
942 &e, sizeof(e))) {
943 pr_debug_ratelimited(
944 "%s cannot read MSR entry (%u, 0x%08llx)\n",
945 __func__, i, gpa + i * sizeof(e));
946 goto fail;
947 }
948 if (nested_vmx_load_msr_check(vcpu, &e)) {
949 pr_debug_ratelimited(
950 "%s check failed (%u, 0x%x, 0x%x)\n",
951 __func__, i, e.index, e.reserved);
952 goto fail;
953 }
954 if (kvm_set_msr(vcpu, e.index, e.value)) {
955 pr_debug_ratelimited(
956 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
957 __func__, i, e.index, e.value);
958 goto fail;
959 }
960 }
961 return 0;
962 fail:
963 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
964 return i + 1;
965 }
966
nested_vmx_get_vmexit_msr_value(struct kvm_vcpu * vcpu,u32 msr_index,u64 * data)967 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
968 u32 msr_index,
969 u64 *data)
970 {
971 struct vcpu_vmx *vmx = to_vmx(vcpu);
972
973 /*
974 * If the L0 hypervisor stored a more accurate value for the TSC that
975 * does not include the time taken for emulation of the L2->L1
976 * VM-exit in L0, use the more accurate value.
977 */
978 if (msr_index == MSR_IA32_TSC) {
979 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
980 MSR_IA32_TSC);
981
982 if (i >= 0) {
983 u64 val = vmx->msr_autostore.guest.val[i].value;
984
985 *data = kvm_read_l1_tsc(vcpu, val);
986 return true;
987 }
988 }
989
990 if (kvm_get_msr(vcpu, msr_index, data)) {
991 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
992 msr_index);
993 return false;
994 }
995 return true;
996 }
997
read_and_check_msr_entry(struct kvm_vcpu * vcpu,u64 gpa,int i,struct vmx_msr_entry * e)998 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
999 struct vmx_msr_entry *e)
1000 {
1001 if (kvm_vcpu_read_guest(vcpu,
1002 gpa + i * sizeof(*e),
1003 e, 2 * sizeof(u32))) {
1004 pr_debug_ratelimited(
1005 "%s cannot read MSR entry (%u, 0x%08llx)\n",
1006 __func__, i, gpa + i * sizeof(*e));
1007 return false;
1008 }
1009 if (nested_vmx_store_msr_check(vcpu, e)) {
1010 pr_debug_ratelimited(
1011 "%s check failed (%u, 0x%x, 0x%x)\n",
1012 __func__, i, e->index, e->reserved);
1013 return false;
1014 }
1015 return true;
1016 }
1017
nested_vmx_store_msr(struct kvm_vcpu * vcpu,u64 gpa,u32 count)1018 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
1019 {
1020 u64 data;
1021 u32 i;
1022 struct vmx_msr_entry e;
1023 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
1024
1025 for (i = 0; i < count; i++) {
1026 if (unlikely(i >= max_msr_list_size))
1027 return -EINVAL;
1028
1029 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1030 return -EINVAL;
1031
1032 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
1033 return -EINVAL;
1034
1035 if (kvm_vcpu_write_guest(vcpu,
1036 gpa + i * sizeof(e) +
1037 offsetof(struct vmx_msr_entry, value),
1038 &data, sizeof(data))) {
1039 pr_debug_ratelimited(
1040 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1041 __func__, i, e.index, data);
1042 return -EINVAL;
1043 }
1044 }
1045 return 0;
1046 }
1047
nested_msr_store_list_has_msr(struct kvm_vcpu * vcpu,u32 msr_index)1048 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1049 {
1050 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1051 u32 count = vmcs12->vm_exit_msr_store_count;
1052 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1053 struct vmx_msr_entry e;
1054 u32 i;
1055
1056 for (i = 0; i < count; i++) {
1057 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1058 return false;
1059
1060 if (e.index == msr_index)
1061 return true;
1062 }
1063 return false;
1064 }
1065
prepare_vmx_msr_autostore_list(struct kvm_vcpu * vcpu,u32 msr_index)1066 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1067 u32 msr_index)
1068 {
1069 struct vcpu_vmx *vmx = to_vmx(vcpu);
1070 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1071 bool in_vmcs12_store_list;
1072 int msr_autostore_slot;
1073 bool in_autostore_list;
1074 int last;
1075
1076 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1077 in_autostore_list = msr_autostore_slot >= 0;
1078 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1079
1080 if (in_vmcs12_store_list && !in_autostore_list) {
1081 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
1082 /*
1083 * Emulated VMEntry does not fail here. Instead a less
1084 * accurate value will be returned by
1085 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1086 * instead of reading the value from the vmcs02 VMExit
1087 * MSR-store area.
1088 */
1089 pr_warn_ratelimited(
1090 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1091 msr_index);
1092 return;
1093 }
1094 last = autostore->nr++;
1095 autostore->val[last].index = msr_index;
1096 } else if (!in_vmcs12_store_list && in_autostore_list) {
1097 last = --autostore->nr;
1098 autostore->val[msr_autostore_slot] = autostore->val[last];
1099 }
1100 }
1101
1102 /*
1103 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1104 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1105 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1106 * @entry_failure_code.
1107 */
nested_vmx_load_cr3(struct kvm_vcpu * vcpu,unsigned long cr3,bool nested_ept,bool reload_pdptrs,enum vm_entry_failure_code * entry_failure_code)1108 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
1109 bool nested_ept, bool reload_pdptrs,
1110 enum vm_entry_failure_code *entry_failure_code)
1111 {
1112 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
1113 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1114 return -EINVAL;
1115 }
1116
1117 /*
1118 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1119 * must not be dereferenced.
1120 */
1121 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
1122 CC(!load_pdptrs(vcpu, cr3))) {
1123 *entry_failure_code = ENTRY_FAIL_PDPTE;
1124 return -EINVAL;
1125 }
1126
1127 vcpu->arch.cr3 = cr3;
1128 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1129
1130 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
1131 kvm_init_mmu(vcpu);
1132
1133 if (!nested_ept)
1134 kvm_mmu_new_pgd(vcpu, cr3);
1135
1136 return 0;
1137 }
1138
1139 /*
1140 * Returns if KVM is able to config CPU to tag TLB entries
1141 * populated by L2 differently than TLB entries populated
1142 * by L1.
1143 *
1144 * If L0 uses EPT, L1 and L2 run with different EPTP because
1145 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1146 * are tagged with different EPTP.
1147 *
1148 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1149 * with different VPID (L1 entries are tagged with vmx->vpid
1150 * while L2 entries are tagged with vmx->nested.vpid02).
1151 */
nested_has_guest_tlb_tag(struct kvm_vcpu * vcpu)1152 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1153 {
1154 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1155
1156 return enable_ept ||
1157 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1158 }
1159
nested_vmx_transition_tlb_flush(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,bool is_vmenter)1160 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1161 struct vmcs12 *vmcs12,
1162 bool is_vmenter)
1163 {
1164 struct vcpu_vmx *vmx = to_vmx(vcpu);
1165
1166 /*
1167 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1168 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1169 * full TLB flush from the guest's perspective. This is required even
1170 * if VPID is disabled in the host as KVM may need to synchronize the
1171 * MMU in response to the guest TLB flush.
1172 *
1173 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1174 * EPT is a special snowflake, as guest-physical mappings aren't
1175 * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1176 * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1177 * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1178 * those mappings.
1179 */
1180 if (!nested_cpu_has_vpid(vmcs12)) {
1181 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1182 return;
1183 }
1184
1185 /* L2 should never have a VPID if VPID is disabled. */
1186 WARN_ON(!enable_vpid);
1187
1188 /*
1189 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
1190 * emulate a guest TLB flush as KVM does not track vpid12 history nor
1191 * is the VPID incorporated into the MMU context. I.e. KVM must assume
1192 * that the new vpid12 has never been used and thus represents a new
1193 * guest ASID that cannot have entries in the TLB.
1194 */
1195 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1196 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1197 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1198 return;
1199 }
1200
1201 /*
1202 * If VPID is enabled, used by vmc12, and vpid12 is not changing but
1203 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
1204 * KVM was unable to allocate a VPID for L2, flush the current context
1205 * as the effective ASID is common to both L1 and L2.
1206 */
1207 if (!nested_has_guest_tlb_tag(vcpu))
1208 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1209 }
1210
is_bitwise_subset(u64 superset,u64 subset,u64 mask)1211 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1212 {
1213 superset &= mask;
1214 subset &= mask;
1215
1216 return (superset | subset) == superset;
1217 }
1218
vmx_restore_vmx_basic(struct vcpu_vmx * vmx,u64 data)1219 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1220 {
1221 const u64 feature_and_reserved =
1222 /* feature (except bit 48; see below) */
1223 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1224 /* reserved */
1225 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1226 u64 vmx_basic = vmcs_config.nested.basic;
1227
1228 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1229 return -EINVAL;
1230
1231 /*
1232 * KVM does not emulate a version of VMX that constrains physical
1233 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1234 */
1235 if (data & BIT_ULL(48))
1236 return -EINVAL;
1237
1238 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1239 vmx_basic_vmcs_revision_id(data))
1240 return -EINVAL;
1241
1242 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1243 return -EINVAL;
1244
1245 vmx->nested.msrs.basic = data;
1246 return 0;
1247 }
1248
vmx_get_control_msr(struct nested_vmx_msrs * msrs,u32 msr_index,u32 ** low,u32 ** high)1249 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
1250 u32 **low, u32 **high)
1251 {
1252 switch (msr_index) {
1253 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1254 *low = &msrs->pinbased_ctls_low;
1255 *high = &msrs->pinbased_ctls_high;
1256 break;
1257 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1258 *low = &msrs->procbased_ctls_low;
1259 *high = &msrs->procbased_ctls_high;
1260 break;
1261 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1262 *low = &msrs->exit_ctls_low;
1263 *high = &msrs->exit_ctls_high;
1264 break;
1265 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1266 *low = &msrs->entry_ctls_low;
1267 *high = &msrs->entry_ctls_high;
1268 break;
1269 case MSR_IA32_VMX_PROCBASED_CTLS2:
1270 *low = &msrs->secondary_ctls_low;
1271 *high = &msrs->secondary_ctls_high;
1272 break;
1273 default:
1274 BUG();
1275 }
1276 }
1277
1278 static int
vmx_restore_control_msr(struct vcpu_vmx * vmx,u32 msr_index,u64 data)1279 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1280 {
1281 u32 *lowp, *highp;
1282 u64 supported;
1283
1284 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1285
1286 supported = vmx_control_msr(*lowp, *highp);
1287
1288 /* Check must-be-1 bits are still 1. */
1289 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1290 return -EINVAL;
1291
1292 /* Check must-be-0 bits are still 0. */
1293 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1294 return -EINVAL;
1295
1296 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1297 *lowp = data;
1298 *highp = data >> 32;
1299 return 0;
1300 }
1301
vmx_restore_vmx_misc(struct vcpu_vmx * vmx,u64 data)1302 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1303 {
1304 const u64 feature_and_reserved_bits =
1305 /* feature */
1306 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1307 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1308 /* reserved */
1309 GENMASK_ULL(13, 9) | BIT_ULL(31);
1310 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1311 vmcs_config.nested.misc_high);
1312
1313 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1314 return -EINVAL;
1315
1316 if ((vmx->nested.msrs.pinbased_ctls_high &
1317 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1318 vmx_misc_preemption_timer_rate(data) !=
1319 vmx_misc_preemption_timer_rate(vmx_misc))
1320 return -EINVAL;
1321
1322 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1323 return -EINVAL;
1324
1325 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1326 return -EINVAL;
1327
1328 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1329 return -EINVAL;
1330
1331 vmx->nested.msrs.misc_low = data;
1332 vmx->nested.msrs.misc_high = data >> 32;
1333
1334 return 0;
1335 }
1336
vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx * vmx,u64 data)1337 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1338 {
1339 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1340 vmcs_config.nested.vpid_caps);
1341
1342 /* Every bit is either reserved or a feature bit. */
1343 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1344 return -EINVAL;
1345
1346 vmx->nested.msrs.ept_caps = data;
1347 vmx->nested.msrs.vpid_caps = data >> 32;
1348 return 0;
1349 }
1350
vmx_get_fixed0_msr(struct nested_vmx_msrs * msrs,u32 msr_index)1351 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
1352 {
1353 switch (msr_index) {
1354 case MSR_IA32_VMX_CR0_FIXED0:
1355 return &msrs->cr0_fixed0;
1356 case MSR_IA32_VMX_CR4_FIXED0:
1357 return &msrs->cr4_fixed0;
1358 default:
1359 BUG();
1360 }
1361 }
1362
vmx_restore_fixed0_msr(struct vcpu_vmx * vmx,u32 msr_index,u64 data)1363 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1364 {
1365 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1366
1367 /*
1368 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1369 * must be 1 in the restored value.
1370 */
1371 if (!is_bitwise_subset(data, *msr, -1ULL))
1372 return -EINVAL;
1373
1374 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1375 return 0;
1376 }
1377
1378 /*
1379 * Called when userspace is restoring VMX MSRs.
1380 *
1381 * Returns 0 on success, non-0 otherwise.
1382 */
vmx_set_vmx_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 data)1383 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1384 {
1385 struct vcpu_vmx *vmx = to_vmx(vcpu);
1386
1387 /*
1388 * Don't allow changes to the VMX capability MSRs while the vCPU
1389 * is in VMX operation.
1390 */
1391 if (vmx->nested.vmxon)
1392 return -EBUSY;
1393
1394 switch (msr_index) {
1395 case MSR_IA32_VMX_BASIC:
1396 return vmx_restore_vmx_basic(vmx, data);
1397 case MSR_IA32_VMX_PINBASED_CTLS:
1398 case MSR_IA32_VMX_PROCBASED_CTLS:
1399 case MSR_IA32_VMX_EXIT_CTLS:
1400 case MSR_IA32_VMX_ENTRY_CTLS:
1401 /*
1402 * The "non-true" VMX capability MSRs are generated from the
1403 * "true" MSRs, so we do not support restoring them directly.
1404 *
1405 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1406 * should restore the "true" MSRs with the must-be-1 bits
1407 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1408 * DEFAULT SETTINGS".
1409 */
1410 return -EINVAL;
1411 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1412 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1413 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1414 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1415 case MSR_IA32_VMX_PROCBASED_CTLS2:
1416 return vmx_restore_control_msr(vmx, msr_index, data);
1417 case MSR_IA32_VMX_MISC:
1418 return vmx_restore_vmx_misc(vmx, data);
1419 case MSR_IA32_VMX_CR0_FIXED0:
1420 case MSR_IA32_VMX_CR4_FIXED0:
1421 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1422 case MSR_IA32_VMX_CR0_FIXED1:
1423 case MSR_IA32_VMX_CR4_FIXED1:
1424 /*
1425 * These MSRs are generated based on the vCPU's CPUID, so we
1426 * do not support restoring them directly.
1427 */
1428 return -EINVAL;
1429 case MSR_IA32_VMX_EPT_VPID_CAP:
1430 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1431 case MSR_IA32_VMX_VMCS_ENUM:
1432 vmx->nested.msrs.vmcs_enum = data;
1433 return 0;
1434 case MSR_IA32_VMX_VMFUNC:
1435 if (data & ~vmcs_config.nested.vmfunc_controls)
1436 return -EINVAL;
1437 vmx->nested.msrs.vmfunc_controls = data;
1438 return 0;
1439 default:
1440 /*
1441 * The rest of the VMX capability MSRs do not support restore.
1442 */
1443 return -EINVAL;
1444 }
1445 }
1446
1447 /* Returns 0 on success, non-0 otherwise. */
vmx_get_vmx_msr(struct nested_vmx_msrs * msrs,u32 msr_index,u64 * pdata)1448 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1449 {
1450 switch (msr_index) {
1451 case MSR_IA32_VMX_BASIC:
1452 *pdata = msrs->basic;
1453 break;
1454 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1455 case MSR_IA32_VMX_PINBASED_CTLS:
1456 *pdata = vmx_control_msr(
1457 msrs->pinbased_ctls_low,
1458 msrs->pinbased_ctls_high);
1459 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1460 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1461 break;
1462 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1463 case MSR_IA32_VMX_PROCBASED_CTLS:
1464 *pdata = vmx_control_msr(
1465 msrs->procbased_ctls_low,
1466 msrs->procbased_ctls_high);
1467 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1468 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1469 break;
1470 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1471 case MSR_IA32_VMX_EXIT_CTLS:
1472 *pdata = vmx_control_msr(
1473 msrs->exit_ctls_low,
1474 msrs->exit_ctls_high);
1475 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1476 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1477 break;
1478 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1479 case MSR_IA32_VMX_ENTRY_CTLS:
1480 *pdata = vmx_control_msr(
1481 msrs->entry_ctls_low,
1482 msrs->entry_ctls_high);
1483 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1484 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1485 break;
1486 case MSR_IA32_VMX_MISC:
1487 *pdata = vmx_control_msr(
1488 msrs->misc_low,
1489 msrs->misc_high);
1490 break;
1491 case MSR_IA32_VMX_CR0_FIXED0:
1492 *pdata = msrs->cr0_fixed0;
1493 break;
1494 case MSR_IA32_VMX_CR0_FIXED1:
1495 *pdata = msrs->cr0_fixed1;
1496 break;
1497 case MSR_IA32_VMX_CR4_FIXED0:
1498 *pdata = msrs->cr4_fixed0;
1499 break;
1500 case MSR_IA32_VMX_CR4_FIXED1:
1501 *pdata = msrs->cr4_fixed1;
1502 break;
1503 case MSR_IA32_VMX_VMCS_ENUM:
1504 *pdata = msrs->vmcs_enum;
1505 break;
1506 case MSR_IA32_VMX_PROCBASED_CTLS2:
1507 *pdata = vmx_control_msr(
1508 msrs->secondary_ctls_low,
1509 msrs->secondary_ctls_high);
1510 break;
1511 case MSR_IA32_VMX_EPT_VPID_CAP:
1512 *pdata = msrs->ept_caps |
1513 ((u64)msrs->vpid_caps << 32);
1514 break;
1515 case MSR_IA32_VMX_VMFUNC:
1516 *pdata = msrs->vmfunc_controls;
1517 break;
1518 default:
1519 return 1;
1520 }
1521
1522 return 0;
1523 }
1524
1525 /*
1526 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1527 * been modified by the L1 guest. Note, "writable" in this context means
1528 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1529 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1530 * VM-exit information fields (which are actually writable if the vCPU is
1531 * configured to support "VMWRITE to any supported field in the VMCS").
1532 */
copy_shadow_to_vmcs12(struct vcpu_vmx * vmx)1533 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1534 {
1535 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1536 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1537 struct shadow_vmcs_field field;
1538 unsigned long val;
1539 int i;
1540
1541 if (WARN_ON(!shadow_vmcs))
1542 return;
1543
1544 preempt_disable();
1545
1546 vmcs_load(shadow_vmcs);
1547
1548 for (i = 0; i < max_shadow_read_write_fields; i++) {
1549 field = shadow_read_write_fields[i];
1550 val = __vmcs_readl(field.encoding);
1551 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1552 }
1553
1554 vmcs_clear(shadow_vmcs);
1555 vmcs_load(vmx->loaded_vmcs->vmcs);
1556
1557 preempt_enable();
1558 }
1559
copy_vmcs12_to_shadow(struct vcpu_vmx * vmx)1560 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1561 {
1562 const struct shadow_vmcs_field *fields[] = {
1563 shadow_read_write_fields,
1564 shadow_read_only_fields
1565 };
1566 const int max_fields[] = {
1567 max_shadow_read_write_fields,
1568 max_shadow_read_only_fields
1569 };
1570 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1571 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1572 struct shadow_vmcs_field field;
1573 unsigned long val;
1574 int i, q;
1575
1576 if (WARN_ON(!shadow_vmcs))
1577 return;
1578
1579 vmcs_load(shadow_vmcs);
1580
1581 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1582 for (i = 0; i < max_fields[q]; i++) {
1583 field = fields[q][i];
1584 val = vmcs12_read_any(vmcs12, field.encoding,
1585 field.offset);
1586 __vmcs_writel(field.encoding, val);
1587 }
1588 }
1589
1590 vmcs_clear(shadow_vmcs);
1591 vmcs_load(vmx->loaded_vmcs->vmcs);
1592 }
1593
copy_enlightened_to_vmcs12(struct vcpu_vmx * vmx,u32 hv_clean_fields)1594 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
1595 {
1596 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1597 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1598
1599 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1600 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1601 vmcs12->guest_rip = evmcs->guest_rip;
1602
1603 if (unlikely(!(hv_clean_fields &
1604 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1605 vmcs12->guest_rsp = evmcs->guest_rsp;
1606 vmcs12->guest_rflags = evmcs->guest_rflags;
1607 vmcs12->guest_interruptibility_info =
1608 evmcs->guest_interruptibility_info;
1609 }
1610
1611 if (unlikely(!(hv_clean_fields &
1612 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1613 vmcs12->cpu_based_vm_exec_control =
1614 evmcs->cpu_based_vm_exec_control;
1615 }
1616
1617 if (unlikely(!(hv_clean_fields &
1618 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1619 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1620 }
1621
1622 if (unlikely(!(hv_clean_fields &
1623 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1624 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1625 }
1626
1627 if (unlikely(!(hv_clean_fields &
1628 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1629 vmcs12->vm_entry_intr_info_field =
1630 evmcs->vm_entry_intr_info_field;
1631 vmcs12->vm_entry_exception_error_code =
1632 evmcs->vm_entry_exception_error_code;
1633 vmcs12->vm_entry_instruction_len =
1634 evmcs->vm_entry_instruction_len;
1635 }
1636
1637 if (unlikely(!(hv_clean_fields &
1638 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1639 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1640 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1641 vmcs12->host_cr0 = evmcs->host_cr0;
1642 vmcs12->host_cr3 = evmcs->host_cr3;
1643 vmcs12->host_cr4 = evmcs->host_cr4;
1644 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1645 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1646 vmcs12->host_rip = evmcs->host_rip;
1647 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1648 vmcs12->host_es_selector = evmcs->host_es_selector;
1649 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1650 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1651 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1652 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1653 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1654 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1655 }
1656
1657 if (unlikely(!(hv_clean_fields &
1658 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1659 vmcs12->pin_based_vm_exec_control =
1660 evmcs->pin_based_vm_exec_control;
1661 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1662 vmcs12->secondary_vm_exec_control =
1663 evmcs->secondary_vm_exec_control;
1664 }
1665
1666 if (unlikely(!(hv_clean_fields &
1667 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1668 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1669 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1670 }
1671
1672 if (unlikely(!(hv_clean_fields &
1673 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1674 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1675 }
1676
1677 if (unlikely(!(hv_clean_fields &
1678 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1679 vmcs12->guest_es_base = evmcs->guest_es_base;
1680 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1681 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1682 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1683 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1684 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1685 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1686 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1687 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1688 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1689 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1690 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1691 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1692 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1693 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1694 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1695 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1696 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1697 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1698 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1699 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1700 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1701 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1702 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1703 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1704 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1705 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1706 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1707 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1708 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1709 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1710 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1711 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1712 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1713 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1714 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1715 }
1716
1717 if (unlikely(!(hv_clean_fields &
1718 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1719 vmcs12->tsc_offset = evmcs->tsc_offset;
1720 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1721 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1722 }
1723
1724 if (unlikely(!(hv_clean_fields &
1725 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1726 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1727 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1728 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1729 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1730 vmcs12->guest_cr0 = evmcs->guest_cr0;
1731 vmcs12->guest_cr3 = evmcs->guest_cr3;
1732 vmcs12->guest_cr4 = evmcs->guest_cr4;
1733 vmcs12->guest_dr7 = evmcs->guest_dr7;
1734 }
1735
1736 if (unlikely(!(hv_clean_fields &
1737 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1738 vmcs12->host_fs_base = evmcs->host_fs_base;
1739 vmcs12->host_gs_base = evmcs->host_gs_base;
1740 vmcs12->host_tr_base = evmcs->host_tr_base;
1741 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1742 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1743 vmcs12->host_rsp = evmcs->host_rsp;
1744 }
1745
1746 if (unlikely(!(hv_clean_fields &
1747 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1748 vmcs12->ept_pointer = evmcs->ept_pointer;
1749 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1750 }
1751
1752 if (unlikely(!(hv_clean_fields &
1753 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1754 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1755 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1756 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1757 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1758 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1759 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1760 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1761 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1762 vmcs12->guest_pending_dbg_exceptions =
1763 evmcs->guest_pending_dbg_exceptions;
1764 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1765 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1766 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1767 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1768 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1769 }
1770
1771 /*
1772 * Not used?
1773 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1774 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1775 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1776 * vmcs12->page_fault_error_code_mask =
1777 * evmcs->page_fault_error_code_mask;
1778 * vmcs12->page_fault_error_code_match =
1779 * evmcs->page_fault_error_code_match;
1780 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1781 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1782 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1783 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1784 */
1785
1786 /*
1787 * Read only fields:
1788 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1789 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1790 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1791 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1792 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1793 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1794 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1795 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1796 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1797 * vmcs12->exit_qualification = evmcs->exit_qualification;
1798 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1799 *
1800 * Not present in struct vmcs12:
1801 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1802 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1803 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1804 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1805 */
1806
1807 return;
1808 }
1809
copy_vmcs12_to_enlightened(struct vcpu_vmx * vmx)1810 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1811 {
1812 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1813 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1814
1815 /*
1816 * Should not be changed by KVM:
1817 *
1818 * evmcs->host_es_selector = vmcs12->host_es_selector;
1819 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1820 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1821 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1822 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1823 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1824 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1825 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1826 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1827 * evmcs->host_cr0 = vmcs12->host_cr0;
1828 * evmcs->host_cr3 = vmcs12->host_cr3;
1829 * evmcs->host_cr4 = vmcs12->host_cr4;
1830 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1831 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1832 * evmcs->host_rip = vmcs12->host_rip;
1833 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1834 * evmcs->host_fs_base = vmcs12->host_fs_base;
1835 * evmcs->host_gs_base = vmcs12->host_gs_base;
1836 * evmcs->host_tr_base = vmcs12->host_tr_base;
1837 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1838 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1839 * evmcs->host_rsp = vmcs12->host_rsp;
1840 * sync_vmcs02_to_vmcs12() doesn't read these:
1841 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1842 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1843 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1844 * evmcs->ept_pointer = vmcs12->ept_pointer;
1845 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1846 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1847 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1848 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1849 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1850 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1851 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1852 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1853 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1854 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1855 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1856 * evmcs->page_fault_error_code_mask =
1857 * vmcs12->page_fault_error_code_mask;
1858 * evmcs->page_fault_error_code_match =
1859 * vmcs12->page_fault_error_code_match;
1860 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1861 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1862 * evmcs->tsc_offset = vmcs12->tsc_offset;
1863 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1864 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1865 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1866 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1867 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1868 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1869 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1870 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1871 *
1872 * Not present in struct vmcs12:
1873 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1874 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1875 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1876 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1877 */
1878
1879 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1880 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1881 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1882 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1883 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1884 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1885 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1886 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1887
1888 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1889 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1890 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1891 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1892 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1893 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1894 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1895 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1896 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1897 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1898
1899 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1900 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1901 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1902 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1903 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1904 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1905 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1906 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1907
1908 evmcs->guest_es_base = vmcs12->guest_es_base;
1909 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1910 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1911 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1912 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1913 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1914 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1915 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1916 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1917 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1918
1919 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1920 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1921
1922 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1923 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1924 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1925 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1926
1927 evmcs->guest_pending_dbg_exceptions =
1928 vmcs12->guest_pending_dbg_exceptions;
1929 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1930 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1931
1932 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1933 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1934
1935 evmcs->guest_cr0 = vmcs12->guest_cr0;
1936 evmcs->guest_cr3 = vmcs12->guest_cr3;
1937 evmcs->guest_cr4 = vmcs12->guest_cr4;
1938 evmcs->guest_dr7 = vmcs12->guest_dr7;
1939
1940 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1941
1942 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1943 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1944 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1945 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1946 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1947 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1948 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1949 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1950
1951 evmcs->exit_qualification = vmcs12->exit_qualification;
1952
1953 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1954 evmcs->guest_rsp = vmcs12->guest_rsp;
1955 evmcs->guest_rflags = vmcs12->guest_rflags;
1956
1957 evmcs->guest_interruptibility_info =
1958 vmcs12->guest_interruptibility_info;
1959 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1960 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1961 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1962 evmcs->vm_entry_exception_error_code =
1963 vmcs12->vm_entry_exception_error_code;
1964 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1965
1966 evmcs->guest_rip = vmcs12->guest_rip;
1967
1968 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1969
1970 return;
1971 }
1972
1973 /*
1974 * This is an equivalent of the nested hypervisor executing the vmptrld
1975 * instruction.
1976 */
nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu * vcpu,bool from_launch)1977 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
1978 struct kvm_vcpu *vcpu, bool from_launch)
1979 {
1980 struct vcpu_vmx *vmx = to_vmx(vcpu);
1981 bool evmcs_gpa_changed = false;
1982 u64 evmcs_gpa;
1983
1984 if (likely(!vmx->nested.enlightened_vmcs_enabled))
1985 return EVMPTRLD_DISABLED;
1986
1987 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
1988 nested_release_evmcs(vcpu);
1989 return EVMPTRLD_DISABLED;
1990 }
1991
1992 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
1993 vmx->nested.current_vmptr = INVALID_GPA;
1994
1995 nested_release_evmcs(vcpu);
1996
1997 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
1998 &vmx->nested.hv_evmcs_map))
1999 return EVMPTRLD_ERROR;
2000
2001 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
2002
2003 /*
2004 * Currently, KVM only supports eVMCS version 1
2005 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2006 * value to first u32 field of eVMCS which should specify eVMCS
2007 * VersionNumber.
2008 *
2009 * Guest should be aware of supported eVMCS versions by host by
2010 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2011 * expected to set this CPUID leaf according to the value
2012 * returned in vmcs_version from nested_enable_evmcs().
2013 *
2014 * However, it turns out that Microsoft Hyper-V fails to comply
2015 * to their own invented interface: When Hyper-V use eVMCS, it
2016 * just sets first u32 field of eVMCS to revision_id specified
2017 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2018 * which is one of the supported versions specified in
2019 * CPUID.0x4000000A.EAX[0:15].
2020 *
2021 * To overcome Hyper-V bug, we accept here either a supported
2022 * eVMCS version or VMCS12 revision_id as valid values for first
2023 * u32 field of eVMCS.
2024 */
2025 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2026 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2027 nested_release_evmcs(vcpu);
2028 return EVMPTRLD_VMFAIL;
2029 }
2030
2031 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2032
2033 evmcs_gpa_changed = true;
2034 /*
2035 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2036 * reloaded from guest's memory (read only fields, fields not
2037 * present in struct hv_enlightened_vmcs, ...). Make sure there
2038 * are no leftovers.
2039 */
2040 if (from_launch) {
2041 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2042 memset(vmcs12, 0, sizeof(*vmcs12));
2043 vmcs12->hdr.revision_id = VMCS12_REVISION;
2044 }
2045
2046 }
2047
2048 /*
2049 * Clean fields data can't be used on VMLAUNCH and when we switch
2050 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2051 */
2052 if (from_launch || evmcs_gpa_changed) {
2053 vmx->nested.hv_evmcs->hv_clean_fields &=
2054 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2055
2056 vmx->nested.force_msr_bitmap_recalc = true;
2057 }
2058
2059 return EVMPTRLD_SUCCEEDED;
2060 }
2061
nested_sync_vmcs12_to_shadow(struct kvm_vcpu * vcpu)2062 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
2063 {
2064 struct vcpu_vmx *vmx = to_vmx(vcpu);
2065
2066 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2067 copy_vmcs12_to_enlightened(vmx);
2068 else
2069 copy_vmcs12_to_shadow(vmx);
2070
2071 vmx->nested.need_vmcs12_to_shadow_sync = false;
2072 }
2073
vmx_preemption_timer_fn(struct hrtimer * timer)2074 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2075 {
2076 struct vcpu_vmx *vmx =
2077 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2078
2079 vmx->nested.preemption_timer_expired = true;
2080 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2081 kvm_vcpu_kick(&vmx->vcpu);
2082
2083 return HRTIMER_NORESTART;
2084 }
2085
vmx_calc_preemption_timer_value(struct kvm_vcpu * vcpu)2086 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
2087 {
2088 struct vcpu_vmx *vmx = to_vmx(vcpu);
2089 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2090
2091 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2092 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2093
2094 if (!vmx->nested.has_preemption_timer_deadline) {
2095 vmx->nested.preemption_timer_deadline =
2096 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2097 vmx->nested.has_preemption_timer_deadline = true;
2098 }
2099 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2100 }
2101
vmx_start_preemption_timer(struct kvm_vcpu * vcpu,u64 preemption_timeout)2102 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
2103 u64 preemption_timeout)
2104 {
2105 struct vcpu_vmx *vmx = to_vmx(vcpu);
2106
2107 /*
2108 * A timer value of zero is architecturally guaranteed to cause
2109 * a VMExit prior to executing any instructions in the guest.
2110 */
2111 if (preemption_timeout == 0) {
2112 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2113 return;
2114 }
2115
2116 if (vcpu->arch.virtual_tsc_khz == 0)
2117 return;
2118
2119 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2120 preemption_timeout *= 1000000;
2121 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2122 hrtimer_start(&vmx->nested.preemption_timer,
2123 ktime_add_ns(ktime_get(), preemption_timeout),
2124 HRTIMER_MODE_ABS_PINNED);
2125 }
2126
nested_vmx_calc_efer(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2127 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2128 {
2129 if (vmx->nested.nested_run_pending &&
2130 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2131 return vmcs12->guest_ia32_efer;
2132 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2133 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2134 else
2135 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2136 }
2137
prepare_vmcs02_constant_state(struct vcpu_vmx * vmx)2138 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2139 {
2140 /*
2141 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2142 * according to L0's settings (vmcs12 is irrelevant here). Host
2143 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2144 * will be set as needed prior to VMLAUNCH/VMRESUME.
2145 */
2146 if (vmx->nested.vmcs02_initialized)
2147 return;
2148 vmx->nested.vmcs02_initialized = true;
2149
2150 /*
2151 * We don't care what the EPTP value is we just need to guarantee
2152 * it's valid so we don't get a false positive when doing early
2153 * consistency checks.
2154 */
2155 if (enable_ept && nested_early_check)
2156 vmcs_write64(EPT_POINTER,
2157 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
2158
2159 /* All VMFUNCs are currently emulated through L0 vmexits. */
2160 if (cpu_has_vmx_vmfunc())
2161 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2162
2163 if (cpu_has_vmx_posted_intr())
2164 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2165
2166 if (cpu_has_vmx_msr_bitmap())
2167 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2168
2169 /*
2170 * PML is emulated for L2, but never enabled in hardware as the MMU
2171 * handles A/D emulation. Disabling PML for L2 also avoids having to
2172 * deal with filtering out L2 GPAs from the buffer.
2173 */
2174 if (enable_pml) {
2175 vmcs_write64(PML_ADDRESS, 0);
2176 vmcs_write16(GUEST_PML_INDEX, -1);
2177 }
2178
2179 if (cpu_has_vmx_encls_vmexit())
2180 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA);
2181
2182 /*
2183 * Set the MSR load/store lists to match L0's settings. Only the
2184 * addresses are constant (for vmcs02), the counts can change based
2185 * on L2's behavior, e.g. switching to/from long mode.
2186 */
2187 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2188 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2189 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2190
2191 vmx_set_constant_host_state(vmx);
2192 }
2193
prepare_vmcs02_early_rare(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2194 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2195 struct vmcs12 *vmcs12)
2196 {
2197 prepare_vmcs02_constant_state(vmx);
2198
2199 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
2200
2201 if (enable_vpid) {
2202 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2203 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2204 else
2205 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2206 }
2207 }
2208
prepare_vmcs02_early(struct vcpu_vmx * vmx,struct loaded_vmcs * vmcs01,struct vmcs12 * vmcs12)2209 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2210 struct vmcs12 *vmcs12)
2211 {
2212 u32 exec_control;
2213 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2214
2215 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2216 prepare_vmcs02_early_rare(vmx, vmcs12);
2217
2218 /*
2219 * PIN CONTROLS
2220 */
2221 exec_control = __pin_controls_get(vmcs01);
2222 exec_control |= (vmcs12->pin_based_vm_exec_control &
2223 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2224
2225 /* Posted interrupts setting is only taken from vmcs12. */
2226 vmx->nested.pi_pending = false;
2227 if (nested_cpu_has_posted_intr(vmcs12))
2228 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2229 else
2230 exec_control &= ~PIN_BASED_POSTED_INTR;
2231 pin_controls_set(vmx, exec_control);
2232
2233 /*
2234 * EXEC CONTROLS
2235 */
2236 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
2237 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2238 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2239 exec_control &= ~CPU_BASED_TPR_SHADOW;
2240 exec_control |= vmcs12->cpu_based_vm_exec_control;
2241
2242 vmx->nested.l1_tpr_threshold = -1;
2243 if (exec_control & CPU_BASED_TPR_SHADOW)
2244 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2245 #ifdef CONFIG_X86_64
2246 else
2247 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2248 CPU_BASED_CR8_STORE_EXITING;
2249 #endif
2250
2251 /*
2252 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2253 * for I/O port accesses.
2254 */
2255 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2256 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2257
2258 /*
2259 * This bit will be computed in nested_get_vmcs12_pages, because
2260 * we do not have access to L1's MSR bitmap yet. For now, keep
2261 * the same bit as before, hoping to avoid multiple VMWRITEs that
2262 * only set/clear this bit.
2263 */
2264 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2265 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2266
2267 exec_controls_set(vmx, exec_control);
2268
2269 /*
2270 * SECONDARY EXEC CONTROLS
2271 */
2272 if (cpu_has_secondary_exec_ctrls()) {
2273 exec_control = __secondary_exec_controls_get(vmcs01);
2274
2275 /* Take the following fields only from vmcs12 */
2276 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2277 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2278 SECONDARY_EXEC_ENABLE_INVPCID |
2279 SECONDARY_EXEC_ENABLE_RDTSCP |
2280 SECONDARY_EXEC_XSAVES |
2281 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2282 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2283 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2284 SECONDARY_EXEC_ENABLE_VMFUNC |
2285 SECONDARY_EXEC_DESC);
2286
2287 if (nested_cpu_has(vmcs12,
2288 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2289 exec_control |= vmcs12->secondary_vm_exec_control;
2290
2291 /* PML is emulated and never enabled in hardware for L2. */
2292 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
2293
2294 /* VMCS shadowing for L2 is emulated for now */
2295 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2296
2297 /*
2298 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2299 * will not have to rewrite the controls just for this bit.
2300 */
2301 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2302 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2303 exec_control |= SECONDARY_EXEC_DESC;
2304
2305 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2306 vmcs_write16(GUEST_INTR_STATUS,
2307 vmcs12->guest_intr_status);
2308
2309 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2310 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2311
2312 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2313 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2314
2315 secondary_exec_controls_set(vmx, exec_control);
2316 }
2317
2318 /*
2319 * ENTRY CONTROLS
2320 *
2321 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2322 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2323 * on the related bits (if supported by the CPU) in the hope that
2324 * we can avoid VMWrites during vmx_set_efer().
2325 */
2326 exec_control = __vm_entry_controls_get(vmcs01);
2327 exec_control |= vmcs12->vm_entry_controls;
2328 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
2329 if (cpu_has_load_ia32_efer()) {
2330 if (guest_efer & EFER_LMA)
2331 exec_control |= VM_ENTRY_IA32E_MODE;
2332 if (guest_efer != host_efer)
2333 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2334 }
2335 vm_entry_controls_set(vmx, exec_control);
2336
2337 /*
2338 * EXIT CONTROLS
2339 *
2340 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2341 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2342 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2343 */
2344 exec_control = __vm_exit_controls_get(vmcs01);
2345 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2346 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2347 else
2348 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
2349 vm_exit_controls_set(vmx, exec_control);
2350
2351 /*
2352 * Interrupt/Exception Fields
2353 */
2354 if (vmx->nested.nested_run_pending) {
2355 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2356 vmcs12->vm_entry_intr_info_field);
2357 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2358 vmcs12->vm_entry_exception_error_code);
2359 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2360 vmcs12->vm_entry_instruction_len);
2361 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2362 vmcs12->guest_interruptibility_info);
2363 vmx->loaded_vmcs->nmi_known_unmasked =
2364 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2365 } else {
2366 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2367 }
2368 }
2369
prepare_vmcs02_rare(struct vcpu_vmx * vmx,struct vmcs12 * vmcs12)2370 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2371 {
2372 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2373
2374 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2375 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2376 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2377 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2378 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2379 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2380 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2381 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2382 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2383 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2384 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2385 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2386 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2387 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2388 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2389 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2390 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2391 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2392 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2393 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2394 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2395 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2396 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2397 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2398 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2399 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2400 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2401 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2402 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2403 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2404 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2405 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2406 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2407 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2408 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2409 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2410 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2411 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2412
2413 vmx->segment_cache.bitmask = 0;
2414 }
2415
2416 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2417 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2418 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2419 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2420 vmcs12->guest_pending_dbg_exceptions);
2421 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2422 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2423
2424 /*
2425 * L1 may access the L2's PDPTR, so save them to construct
2426 * vmcs12
2427 */
2428 if (enable_ept) {
2429 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2430 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2431 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2432 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2433 }
2434
2435 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2436 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2437 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2438 }
2439
2440 if (nested_cpu_has_xsaves(vmcs12))
2441 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2442
2443 /*
2444 * Whether page-faults are trapped is determined by a combination of
2445 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2446 * doesn't care about page faults then we should set all of these to
2447 * L1's desires. However, if L0 does care about (some) page faults, it
2448 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2449 * simply ask to exit on each and every L2 page fault. This is done by
2450 * setting MASK=MATCH=0 and (see below) EB.PF=1.
2451 * Note that below we don't need special code to set EB.PF beyond the
2452 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2453 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2454 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2455 */
2456 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2457 /*
2458 * TODO: if both L0 and L1 need the same MASK and MATCH,
2459 * go ahead and use it?
2460 */
2461 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2462 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2463 } else {
2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2465 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2466 }
2467
2468 if (cpu_has_vmx_apicv()) {
2469 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2470 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2471 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2472 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2473 }
2474
2475 /*
2476 * Make sure the msr_autostore list is up to date before we set the
2477 * count in the vmcs02.
2478 */
2479 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2480
2481 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2482 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2483 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2484
2485 set_cr4_guest_host_mask(vmx);
2486 }
2487
2488 /*
2489 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2490 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2491 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2492 * guest in a way that will both be appropriate to L1's requests, and our
2493 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2494 * function also has additional necessary side-effects, like setting various
2495 * vcpu->arch fields.
2496 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2497 * is assigned to entry_failure_code on failure.
2498 */
prepare_vmcs02(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,bool from_vmentry,enum vm_entry_failure_code * entry_failure_code)2499 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2500 bool from_vmentry,
2501 enum vm_entry_failure_code *entry_failure_code)
2502 {
2503 struct vcpu_vmx *vmx = to_vmx(vcpu);
2504 bool load_guest_pdptrs_vmcs12 = false;
2505
2506 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
2507 prepare_vmcs02_rare(vmx, vmcs12);
2508 vmx->nested.dirty_vmcs12 = false;
2509
2510 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
2511 !(vmx->nested.hv_evmcs->hv_clean_fields &
2512 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2513 }
2514
2515 if (vmx->nested.nested_run_pending &&
2516 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2517 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2518 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2519 } else {
2520 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2521 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2522 }
2523 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2524 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2525 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2526 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2527
2528 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2529 * bitwise-or of what L1 wants to trap for L2, and what we want to
2530 * trap. Note that CR0.TS also needs updating - we do this later.
2531 */
2532 vmx_update_exception_bitmap(vcpu);
2533 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2534 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2535
2536 if (vmx->nested.nested_run_pending &&
2537 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2538 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2539 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2540 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2541 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2542 }
2543
2544 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2545 vcpu->arch.l1_tsc_offset,
2546 vmx_get_l2_tsc_offset(vcpu),
2547 vmx_get_l2_tsc_multiplier(vcpu));
2548
2549 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2550 vcpu->arch.l1_tsc_scaling_ratio,
2551 vmx_get_l2_tsc_multiplier(vcpu));
2552
2553 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2554 if (kvm_has_tsc_control)
2555 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2556
2557 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2558
2559 if (nested_cpu_has_ept(vmcs12))
2560 nested_ept_init_mmu_context(vcpu);
2561
2562 /*
2563 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2564 * bits which we consider mandatory enabled.
2565 * The CR0_READ_SHADOW is what L2 should have expected to read given
2566 * the specifications by L1; It's not enough to take
2567 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2568 * have more bits than L1 expected.
2569 */
2570 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2571 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2572
2573 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2574 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2575
2576 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2577 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2578 vmx_set_efer(vcpu, vcpu->arch.efer);
2579
2580 /*
2581 * Guest state is invalid and unrestricted guest is disabled,
2582 * which means L1 attempted VMEntry to L2 with invalid state.
2583 * Fail the VMEntry.
2584 *
2585 * However when force loading the guest state (SMM exit or
2586 * loading nested state after migration, it is possible to
2587 * have invalid guest state now, which will be later fixed by
2588 * restoring L2 register state
2589 */
2590 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
2591 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2592 return -EINVAL;
2593 }
2594
2595 /* Shadow page tables on either EPT or shadow page tables. */
2596 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2597 from_vmentry, entry_failure_code))
2598 return -EINVAL;
2599
2600 /*
2601 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2602 * on nested VM-Exit, which can occur without actually running L2 and
2603 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2604 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2605 * transition to HLT instead of running L2.
2606 */
2607 if (enable_ept)
2608 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2609
2610 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2611 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2612 is_pae_paging(vcpu)) {
2613 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2614 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2615 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2616 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2617 }
2618
2619 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2620 intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
2621 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2622 vmcs12->guest_ia32_perf_global_ctrl))) {
2623 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2624 return -EINVAL;
2625 }
2626
2627 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2628 kvm_rip_write(vcpu, vmcs12->guest_rip);
2629
2630 /*
2631 * It was observed that genuine Hyper-V running in L1 doesn't reset
2632 * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2633 * bits when it changes a field in eVMCS. Mark all fields as clean
2634 * here.
2635 */
2636 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2637 vmx->nested.hv_evmcs->hv_clean_fields |=
2638 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2639
2640 return 0;
2641 }
2642
nested_vmx_check_nmi_controls(struct vmcs12 * vmcs12)2643 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2644 {
2645 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2646 nested_cpu_has_virtual_nmis(vmcs12)))
2647 return -EINVAL;
2648
2649 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2650 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2651 return -EINVAL;
2652
2653 return 0;
2654 }
2655
nested_vmx_check_eptp(struct kvm_vcpu * vcpu,u64 new_eptp)2656 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
2657 {
2658 struct vcpu_vmx *vmx = to_vmx(vcpu);
2659
2660 /* Check for memory type validity */
2661 switch (new_eptp & VMX_EPTP_MT_MASK) {
2662 case VMX_EPTP_MT_UC:
2663 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2664 return false;
2665 break;
2666 case VMX_EPTP_MT_WB:
2667 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2668 return false;
2669 break;
2670 default:
2671 return false;
2672 }
2673
2674 /* Page-walk levels validity. */
2675 switch (new_eptp & VMX_EPTP_PWL_MASK) {
2676 case VMX_EPTP_PWL_5:
2677 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2678 return false;
2679 break;
2680 case VMX_EPTP_PWL_4:
2681 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2682 return false;
2683 break;
2684 default:
2685 return false;
2686 }
2687
2688 /* Reserved bits should not be set */
2689 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
2690 return false;
2691
2692 /* AD, if set, should be supported */
2693 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2694 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2695 return false;
2696 }
2697
2698 return true;
2699 }
2700
2701 /*
2702 * Checks related to VM-Execution Control Fields
2703 */
nested_check_vm_execution_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2704 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2705 struct vmcs12 *vmcs12)
2706 {
2707 struct vcpu_vmx *vmx = to_vmx(vcpu);
2708
2709 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2710 vmx->nested.msrs.pinbased_ctls_low,
2711 vmx->nested.msrs.pinbased_ctls_high)) ||
2712 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2713 vmx->nested.msrs.procbased_ctls_low,
2714 vmx->nested.msrs.procbased_ctls_high)))
2715 return -EINVAL;
2716
2717 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2718 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2719 vmx->nested.msrs.secondary_ctls_low,
2720 vmx->nested.msrs.secondary_ctls_high)))
2721 return -EINVAL;
2722
2723 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2724 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2725 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2726 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2727 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2728 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2729 nested_vmx_check_nmi_controls(vmcs12) ||
2730 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2731 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2733 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2734 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2735 return -EINVAL;
2736
2737 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2738 nested_cpu_has_save_preemption_timer(vmcs12))
2739 return -EINVAL;
2740
2741 if (nested_cpu_has_ept(vmcs12) &&
2742 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2743 return -EINVAL;
2744
2745 if (nested_cpu_has_vmfunc(vmcs12)) {
2746 if (CC(vmcs12->vm_function_control &
2747 ~vmx->nested.msrs.vmfunc_controls))
2748 return -EINVAL;
2749
2750 if (nested_cpu_has_eptp_switching(vmcs12)) {
2751 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2752 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2753 return -EINVAL;
2754 }
2755 }
2756
2757 return 0;
2758 }
2759
2760 /*
2761 * Checks related to VM-Exit Control Fields
2762 */
nested_check_vm_exit_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2763 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2764 struct vmcs12 *vmcs12)
2765 {
2766 struct vcpu_vmx *vmx = to_vmx(vcpu);
2767
2768 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2769 vmx->nested.msrs.exit_ctls_low,
2770 vmx->nested.msrs.exit_ctls_high)) ||
2771 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2772 return -EINVAL;
2773
2774 return 0;
2775 }
2776
2777 /*
2778 * Checks related to VM-Entry Control Fields
2779 */
nested_check_vm_entry_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2780 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2781 struct vmcs12 *vmcs12)
2782 {
2783 struct vcpu_vmx *vmx = to_vmx(vcpu);
2784
2785 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2786 vmx->nested.msrs.entry_ctls_low,
2787 vmx->nested.msrs.entry_ctls_high)))
2788 return -EINVAL;
2789
2790 /*
2791 * From the Intel SDM, volume 3:
2792 * Fields relevant to VM-entry event injection must be set properly.
2793 * These fields are the VM-entry interruption-information field, the
2794 * VM-entry exception error code, and the VM-entry instruction length.
2795 */
2796 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2797 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2798 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2799 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2800 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2801 bool should_have_error_code;
2802 bool urg = nested_cpu_has2(vmcs12,
2803 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2804 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2805
2806 /* VM-entry interruption-info field: interruption type */
2807 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2808 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2809 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2810 return -EINVAL;
2811
2812 /* VM-entry interruption-info field: vector */
2813 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2814 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2815 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2816 return -EINVAL;
2817
2818 /* VM-entry interruption-info field: deliver error code */
2819 should_have_error_code =
2820 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2821 x86_exception_has_error_code(vector);
2822 if (CC(has_error_code != should_have_error_code))
2823 return -EINVAL;
2824
2825 /* VM-entry exception error code */
2826 if (CC(has_error_code &&
2827 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2828 return -EINVAL;
2829
2830 /* VM-entry interruption-info field: reserved bits */
2831 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2832 return -EINVAL;
2833
2834 /* VM-entry instruction length */
2835 switch (intr_type) {
2836 case INTR_TYPE_SOFT_EXCEPTION:
2837 case INTR_TYPE_SOFT_INTR:
2838 case INTR_TYPE_PRIV_SW_EXCEPTION:
2839 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2840 CC(vmcs12->vm_entry_instruction_len == 0 &&
2841 CC(!nested_cpu_has_zero_length_injection(vcpu))))
2842 return -EINVAL;
2843 }
2844 }
2845
2846 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2847 return -EINVAL;
2848
2849 return 0;
2850 }
2851
nested_vmx_check_controls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2852 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2853 struct vmcs12 *vmcs12)
2854 {
2855 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2856 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2857 nested_check_vm_entry_controls(vcpu, vmcs12))
2858 return -EINVAL;
2859
2860 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
2861 return nested_evmcs_check_controls(vmcs12);
2862
2863 return 0;
2864 }
2865
nested_vmx_check_address_space_size(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2866 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
2867 struct vmcs12 *vmcs12)
2868 {
2869 #ifdef CONFIG_X86_64
2870 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2871 !!(vcpu->arch.efer & EFER_LMA)))
2872 return -EINVAL;
2873 #endif
2874 return 0;
2875 }
2876
nested_vmx_check_host_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2877 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2878 struct vmcs12 *vmcs12)
2879 {
2880 bool ia32e;
2881
2882 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2883 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2884 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
2885 return -EINVAL;
2886
2887 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2888 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2889 return -EINVAL;
2890
2891 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2892 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2893 return -EINVAL;
2894
2895 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2896 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2897 vmcs12->host_ia32_perf_global_ctrl)))
2898 return -EINVAL;
2899
2900 #ifdef CONFIG_X86_64
2901 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2902 #else
2903 ia32e = false;
2904 #endif
2905
2906 if (ia32e) {
2907 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2908 return -EINVAL;
2909 } else {
2910 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2911 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2912 CC((vmcs12->host_rip) >> 32))
2913 return -EINVAL;
2914 }
2915
2916 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2917 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2918 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2919 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2920 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2921 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2922 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2923 CC(vmcs12->host_cs_selector == 0) ||
2924 CC(vmcs12->host_tr_selector == 0) ||
2925 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2926 return -EINVAL;
2927
2928 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2929 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2930 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2931 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2932 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2933 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2934 return -EINVAL;
2935
2936 /*
2937 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2938 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2939 * the values of the LMA and LME bits in the field must each be that of
2940 * the host address-space size VM-exit control.
2941 */
2942 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2943 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2944 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2945 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2946 return -EINVAL;
2947 }
2948
2949 return 0;
2950 }
2951
nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)2952 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2953 struct vmcs12 *vmcs12)
2954 {
2955 struct vcpu_vmx *vmx = to_vmx(vcpu);
2956 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
2957 struct vmcs_hdr hdr;
2958
2959 if (vmcs12->vmcs_link_pointer == INVALID_GPA)
2960 return 0;
2961
2962 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2963 return -EINVAL;
2964
2965 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
2966 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
2967 vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
2968 return -EINVAL;
2969
2970 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
2971 offsetof(struct vmcs12, hdr),
2972 sizeof(hdr))))
2973 return -EINVAL;
2974
2975 if (CC(hdr.revision_id != VMCS12_REVISION) ||
2976 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2977 return -EINVAL;
2978
2979 return 0;
2980 }
2981
2982 /*
2983 * Checks related to Guest Non-register State
2984 */
nested_check_guest_non_reg_state(struct vmcs12 * vmcs12)2985 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2986 {
2987 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2988 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
2989 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
2990 return -EINVAL;
2991
2992 return 0;
2993 }
2994
nested_vmx_check_guest_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,enum vm_entry_failure_code * entry_failure_code)2995 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2996 struct vmcs12 *vmcs12,
2997 enum vm_entry_failure_code *entry_failure_code)
2998 {
2999 bool ia32e;
3000
3001 *entry_failure_code = ENTRY_FAIL_DEFAULT;
3002
3003 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3004 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
3005 return -EINVAL;
3006
3007 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3008 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3009 return -EINVAL;
3010
3011 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3012 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
3013 return -EINVAL;
3014
3015 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
3016 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
3017 return -EINVAL;
3018 }
3019
3020 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3021 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
3022 vmcs12->guest_ia32_perf_global_ctrl)))
3023 return -EINVAL;
3024
3025 /*
3026 * If the load IA32_EFER VM-entry control is 1, the following checks
3027 * are performed on the field for the IA32_EFER MSR:
3028 * - Bits reserved in the IA32_EFER MSR must be 0.
3029 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3030 * the IA-32e mode guest VM-exit control. It must also be identical
3031 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3032 * CR0.PG) is 1.
3033 */
3034 if (to_vmx(vcpu)->nested.nested_run_pending &&
3035 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3036 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
3037 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3038 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3039 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3040 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3041 return -EINVAL;
3042 }
3043
3044 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3045 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3046 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3047 return -EINVAL;
3048
3049 if (nested_check_guest_non_reg_state(vmcs12))
3050 return -EINVAL;
3051
3052 return 0;
3053 }
3054
nested_vmx_check_vmentry_hw(struct kvm_vcpu * vcpu)3055 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
3056 {
3057 struct vcpu_vmx *vmx = to_vmx(vcpu);
3058 unsigned long cr3, cr4;
3059 bool vm_fail;
3060
3061 if (!nested_early_check)
3062 return 0;
3063
3064 if (vmx->msr_autoload.host.nr)
3065 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3066 if (vmx->msr_autoload.guest.nr)
3067 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3068
3069 preempt_disable();
3070
3071 vmx_prepare_switch_to_guest(vcpu);
3072
3073 /*
3074 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3075 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3076 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3077 * there is no need to preserve other bits or save/restore the field.
3078 */
3079 vmcs_writel(GUEST_RFLAGS, 0);
3080
3081 cr3 = __get_current_cr3_fast();
3082 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3083 vmcs_writel(HOST_CR3, cr3);
3084 vmx->loaded_vmcs->host_state.cr3 = cr3;
3085 }
3086
3087 cr4 = cr4_read_shadow();
3088 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3089 vmcs_writel(HOST_CR4, cr4);
3090 vmx->loaded_vmcs->host_state.cr4 = cr4;
3091 }
3092
3093 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3094 __vmx_vcpu_run_flags(vmx));
3095
3096 if (vmx->msr_autoload.host.nr)
3097 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3098 if (vmx->msr_autoload.guest.nr)
3099 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3100
3101 if (vm_fail) {
3102 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3103
3104 preempt_enable();
3105
3106 trace_kvm_nested_vmenter_failed(
3107 "early hardware check VM-instruction error: ", error);
3108 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3109 return 1;
3110 }
3111
3112 /*
3113 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3114 */
3115 if (hw_breakpoint_active())
3116 set_debugreg(__this_cpu_read(cpu_dr7), 7);
3117 local_irq_enable();
3118 preempt_enable();
3119
3120 /*
3121 * A non-failing VMEntry means we somehow entered guest mode with
3122 * an illegal RIP, and that's just the tip of the iceberg. There
3123 * is no telling what memory has been modified or what state has
3124 * been exposed to unknown code. Hitting this all but guarantees
3125 * a (very critical) hardware issue.
3126 */
3127 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3128 VMX_EXIT_REASONS_FAILED_VMENTRY));
3129
3130 return 0;
3131 }
3132
nested_get_evmcs_page(struct kvm_vcpu * vcpu)3133 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
3134 {
3135 struct vcpu_vmx *vmx = to_vmx(vcpu);
3136
3137 /*
3138 * hv_evmcs may end up being not mapped after migration (when
3139 * L2 was running), map it here to make sure vmcs12 changes are
3140 * properly reflected.
3141 */
3142 if (vmx->nested.enlightened_vmcs_enabled &&
3143 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
3144 enum nested_evmptrld_status evmptrld_status =
3145 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3146
3147 if (evmptrld_status == EVMPTRLD_VMFAIL ||
3148 evmptrld_status == EVMPTRLD_ERROR)
3149 return false;
3150
3151 /*
3152 * Post migration VMCS12 always provides the most actual
3153 * information, copy it to eVMCS upon entry.
3154 */
3155 vmx->nested.need_vmcs12_to_shadow_sync = true;
3156 }
3157
3158 return true;
3159 }
3160
nested_get_vmcs12_pages(struct kvm_vcpu * vcpu)3161 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3162 {
3163 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3164 struct vcpu_vmx *vmx = to_vmx(vcpu);
3165 struct kvm_host_map *map;
3166 struct page *page;
3167 u64 hpa;
3168
3169 if (!vcpu->arch.pdptrs_from_userspace &&
3170 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3171 /*
3172 * Reload the guest's PDPTRs since after a migration
3173 * the guest CR3 might be restored prior to setting the nested
3174 * state which can lead to a load of wrong PDPTRs.
3175 */
3176 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
3177 return false;
3178 }
3179
3180
3181 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3182 /*
3183 * Translate L1 physical address to host physical
3184 * address for vmcs02. Keep the page pinned, so this
3185 * physical address remains valid. We keep a reference
3186 * to it so we can release it later.
3187 */
3188 if (vmx->nested.apic_access_page) { /* shouldn't happen */
3189 kvm_release_page_clean(vmx->nested.apic_access_page);
3190 vmx->nested.apic_access_page = NULL;
3191 }
3192 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
3193 if (!is_error_page(page)) {
3194 vmx->nested.apic_access_page = page;
3195 hpa = page_to_phys(vmx->nested.apic_access_page);
3196 vmcs_write64(APIC_ACCESS_ADDR, hpa);
3197 } else {
3198 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3199 __func__);
3200 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3201 vcpu->run->internal.suberror =
3202 KVM_INTERNAL_ERROR_EMULATION;
3203 vcpu->run->internal.ndata = 0;
3204 return false;
3205 }
3206 }
3207
3208 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3209 map = &vmx->nested.virtual_apic_map;
3210
3211 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3212 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3213 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3214 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3215 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3216 /*
3217 * The processor will never use the TPR shadow, simply
3218 * clear the bit from the execution control. Such a
3219 * configuration is useless, but it happens in tests.
3220 * For any other configuration, failing the vm entry is
3221 * _not_ what the processor does but it's basically the
3222 * only possibility we have.
3223 */
3224 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3225 } else {
3226 /*
3227 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3228 * force VM-Entry to fail.
3229 */
3230 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA);
3231 }
3232 }
3233
3234 if (nested_cpu_has_posted_intr(vmcs12)) {
3235 map = &vmx->nested.pi_desc_map;
3236
3237 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3238 vmx->nested.pi_desc =
3239 (struct pi_desc *)(((void *)map->hva) +
3240 offset_in_page(vmcs12->posted_intr_desc_addr));
3241 vmcs_write64(POSTED_INTR_DESC_ADDR,
3242 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3243 } else {
3244 /*
3245 * Defer the KVM_INTERNAL_EXIT until KVM tries to
3246 * access the contents of the VMCS12 posted interrupt
3247 * descriptor. (Note that KVM may do this when it
3248 * should not, per the architectural specification.)
3249 */
3250 vmx->nested.pi_desc = NULL;
3251 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
3252 }
3253 }
3254 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3255 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3256 else
3257 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3258
3259 return true;
3260 }
3261
vmx_get_nested_state_pages(struct kvm_vcpu * vcpu)3262 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
3263 {
3264 if (!nested_get_evmcs_page(vcpu)) {
3265 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3266 __func__);
3267 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3268 vcpu->run->internal.suberror =
3269 KVM_INTERNAL_ERROR_EMULATION;
3270 vcpu->run->internal.ndata = 0;
3271
3272 return false;
3273 }
3274
3275 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3276 return false;
3277
3278 return true;
3279 }
3280
nested_vmx_write_pml_buffer(struct kvm_vcpu * vcpu,gpa_t gpa)3281 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
3282 {
3283 struct vmcs12 *vmcs12;
3284 struct vcpu_vmx *vmx = to_vmx(vcpu);
3285 gpa_t dst;
3286
3287 if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3288 return 0;
3289
3290 if (WARN_ON_ONCE(vmx->nested.pml_full))
3291 return 1;
3292
3293 /*
3294 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3295 * set is already checked as part of A/D emulation.
3296 */
3297 vmcs12 = get_vmcs12(vcpu);
3298 if (!nested_cpu_has_pml(vmcs12))
3299 return 0;
3300
3301 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3302 vmx->nested.pml_full = true;
3303 return 1;
3304 }
3305
3306 gpa &= ~0xFFFull;
3307 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3308
3309 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3310 offset_in_page(dst), sizeof(gpa)))
3311 return 0;
3312
3313 vmcs12->guest_pml_index--;
3314
3315 return 0;
3316 }
3317
3318 /*
3319 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3320 * for running VMX instructions (except VMXON, whose prerequisites are
3321 * slightly different). It also specifies what exception to inject otherwise.
3322 * Note that many of these exceptions have priority over VM exits, so they
3323 * don't have to be checked again here.
3324 */
nested_vmx_check_permission(struct kvm_vcpu * vcpu)3325 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3326 {
3327 if (!to_vmx(vcpu)->nested.vmxon) {
3328 kvm_queue_exception(vcpu, UD_VECTOR);
3329 return 0;
3330 }
3331
3332 if (vmx_get_cpl(vcpu)) {
3333 kvm_inject_gp(vcpu, 0);
3334 return 0;
3335 }
3336
3337 return 1;
3338 }
3339
vmx_has_apicv_interrupt(struct kvm_vcpu * vcpu)3340 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3341 {
3342 u8 rvi = vmx_get_rvi();
3343 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3344
3345 return ((rvi & 0xf0) > (vppr & 0xf0));
3346 }
3347
3348 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3349 struct vmcs12 *vmcs12);
3350
3351 /*
3352 * If from_vmentry is false, this is being called from state restore (either RSM
3353 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3354 *
3355 * Returns:
3356 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3357 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3358 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3359 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3360 */
nested_vmx_enter_non_root_mode(struct kvm_vcpu * vcpu,bool from_vmentry)3361 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3362 bool from_vmentry)
3363 {
3364 struct vcpu_vmx *vmx = to_vmx(vcpu);
3365 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3366 enum vm_entry_failure_code entry_failure_code;
3367 bool evaluate_pending_interrupts;
3368 union vmx_exit_reason exit_reason = {
3369 .basic = EXIT_REASON_INVALID_STATE,
3370 .failed_vmentry = 1,
3371 };
3372 u32 failed_index;
3373
3374 kvm_service_local_tlb_flush_requests(vcpu);
3375
3376 evaluate_pending_interrupts = exec_controls_get(vmx) &
3377 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3378 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3379 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3380
3381 if (!vmx->nested.nested_run_pending ||
3382 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3383 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3384 if (kvm_mpx_supported() &&
3385 (!vmx->nested.nested_run_pending ||
3386 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3387 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3388
3389 /*
3390 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3391 * nested early checks are disabled. In the event of a "late" VM-Fail,
3392 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3393 * software model to the pre-VMEntry host state. When EPT is disabled,
3394 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3395 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3396 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3397 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3398 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3399 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3400 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3401 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3402 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3403 * path would need to manually save/restore vmcs01.GUEST_CR3.
3404 */
3405 if (!enable_ept && !nested_early_check)
3406 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3407
3408 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3409
3410 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3411
3412 if (from_vmentry) {
3413 if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3414 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3415 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3416 }
3417
3418 if (nested_vmx_check_vmentry_hw(vcpu)) {
3419 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3420 return NVMX_VMENTRY_VMFAIL;
3421 }
3422
3423 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3424 &entry_failure_code)) {
3425 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3426 vmcs12->exit_qualification = entry_failure_code;
3427 goto vmentry_fail_vmexit;
3428 }
3429 }
3430
3431 enter_guest_mode(vcpu);
3432
3433 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
3434 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3435 vmcs12->exit_qualification = entry_failure_code;
3436 goto vmentry_fail_vmexit_guest_mode;
3437 }
3438
3439 if (from_vmentry) {
3440 failed_index = nested_vmx_load_msr(vcpu,
3441 vmcs12->vm_entry_msr_load_addr,
3442 vmcs12->vm_entry_msr_load_count);
3443 if (failed_index) {
3444 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
3445 vmcs12->exit_qualification = failed_index;
3446 goto vmentry_fail_vmexit_guest_mode;
3447 }
3448 } else {
3449 /*
3450 * The MMU is not initialized to point at the right entities yet and
3451 * "get pages" would need to read data from the guest (i.e. we will
3452 * need to perform gpa to hpa translation). Request a call
3453 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3454 * have already been set at vmentry time and should not be reset.
3455 */
3456 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3457 }
3458
3459 /*
3460 * If L1 had a pending IRQ/NMI until it executed
3461 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3462 * disallowed (e.g. interrupts disabled), L0 needs to
3463 * evaluate if this pending event should cause an exit from L2
3464 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3465 * intercept EXTERNAL_INTERRUPT).
3466 *
3467 * Usually this would be handled by the processor noticing an
3468 * IRQ/NMI window request, or checking RVI during evaluation of
3469 * pending virtual interrupts. However, this setting was done
3470 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3471 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3472 */
3473 if (unlikely(evaluate_pending_interrupts))
3474 kvm_make_request(KVM_REQ_EVENT, vcpu);
3475
3476 /*
3477 * Do not start the preemption timer hrtimer until after we know
3478 * we are successful, so that only nested_vmx_vmexit needs to cancel
3479 * the timer.
3480 */
3481 vmx->nested.preemption_timer_expired = false;
3482 if (nested_cpu_has_preemption_timer(vmcs12)) {
3483 u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3484 vmx_start_preemption_timer(vcpu, timer_value);
3485 }
3486
3487 /*
3488 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3489 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3490 * returned as far as L1 is concerned. It will only return (and set
3491 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3492 */
3493 return NVMX_VMENTRY_SUCCESS;
3494
3495 /*
3496 * A failed consistency check that leads to a VMExit during L1's
3497 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3498 * 26.7 "VM-entry failures during or after loading guest state".
3499 */
3500 vmentry_fail_vmexit_guest_mode:
3501 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3502 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3503 leave_guest_mode(vcpu);
3504
3505 vmentry_fail_vmexit:
3506 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3507
3508 if (!from_vmentry)
3509 return NVMX_VMENTRY_VMEXIT;
3510
3511 load_vmcs12_host_state(vcpu, vmcs12);
3512 vmcs12->vm_exit_reason = exit_reason.full;
3513 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
3514 vmx->nested.need_vmcs12_to_shadow_sync = true;
3515 return NVMX_VMENTRY_VMEXIT;
3516 }
3517
3518 /*
3519 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3520 * for running an L2 nested guest.
3521 */
nested_vmx_run(struct kvm_vcpu * vcpu,bool launch)3522 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3523 {
3524 struct vmcs12 *vmcs12;
3525 enum nvmx_vmentry_status status;
3526 struct vcpu_vmx *vmx = to_vmx(vcpu);
3527 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3528 enum nested_evmptrld_status evmptrld_status;
3529
3530 if (!nested_vmx_check_permission(vcpu))
3531 return 1;
3532
3533 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3534 if (evmptrld_status == EVMPTRLD_ERROR) {
3535 kvm_queue_exception(vcpu, UD_VECTOR);
3536 return 1;
3537 }
3538
3539 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
3540
3541 if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
3542 return nested_vmx_failInvalid(vcpu);
3543
3544 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
3545 vmx->nested.current_vmptr == INVALID_GPA))
3546 return nested_vmx_failInvalid(vcpu);
3547
3548 vmcs12 = get_vmcs12(vcpu);
3549
3550 /*
3551 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3552 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3553 * rather than RFLAGS.ZF, and no error number is stored to the
3554 * VM-instruction error field.
3555 */
3556 if (CC(vmcs12->hdr.shadow_vmcs))
3557 return nested_vmx_failInvalid(vcpu);
3558
3559 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
3560 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
3561 /* Enlightened VMCS doesn't have launch state */
3562 vmcs12->launch_state = !launch;
3563 } else if (enable_shadow_vmcs) {
3564 copy_shadow_to_vmcs12(vmx);
3565 }
3566
3567 /*
3568 * The nested entry process starts with enforcing various prerequisites
3569 * on vmcs12 as required by the Intel SDM, and act appropriately when
3570 * they fail: As the SDM explains, some conditions should cause the
3571 * instruction to fail, while others will cause the instruction to seem
3572 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3573 * To speed up the normal (success) code path, we should avoid checking
3574 * for misconfigurations which will anyway be caught by the processor
3575 * when using the merged vmcs02.
3576 */
3577 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
3578 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3579
3580 if (CC(vmcs12->launch_state == launch))
3581 return nested_vmx_fail(vcpu,
3582 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3583 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3584
3585 if (nested_vmx_check_controls(vcpu, vmcs12))
3586 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3587
3588 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3589 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3590
3591 if (nested_vmx_check_host_state(vcpu, vmcs12))
3592 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3593
3594 /*
3595 * We're finally done with prerequisite checking, and can start with
3596 * the nested entry.
3597 */
3598 vmx->nested.nested_run_pending = 1;
3599 vmx->nested.has_preemption_timer_deadline = false;
3600 status = nested_vmx_enter_non_root_mode(vcpu, true);
3601 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3602 goto vmentry_failed;
3603
3604 /* Emulate processing of posted interrupts on VM-Enter. */
3605 if (nested_cpu_has_posted_intr(vmcs12) &&
3606 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3607 vmx->nested.pi_pending = true;
3608 kvm_make_request(KVM_REQ_EVENT, vcpu);
3609 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3610 }
3611
3612 /* Hide L1D cache contents from the nested guest. */
3613 vmx->vcpu.arch.l1tf_flush_l1d = true;
3614
3615 /*
3616 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3617 * also be used as part of restoring nVMX state for
3618 * snapshot restore (migration).
3619 *
3620 * In this flow, it is assumed that vmcs12 cache was
3621 * transferred as part of captured nVMX state and should
3622 * therefore not be read from guest memory (which may not
3623 * exist on destination host yet).
3624 */
3625 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3626
3627 switch (vmcs12->guest_activity_state) {
3628 case GUEST_ACTIVITY_HLT:
3629 /*
3630 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3631 * awakened by event injection or by an NMI-window VM-exit or
3632 * by an interrupt-window VM-exit, halt the vcpu.
3633 */
3634 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3635 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3636 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3637 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3638 vmx->nested.nested_run_pending = 0;
3639 return kvm_emulate_halt_noskip(vcpu);
3640 }
3641 break;
3642 case GUEST_ACTIVITY_WAIT_SIPI:
3643 vmx->nested.nested_run_pending = 0;
3644 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3645 break;
3646 default:
3647 break;
3648 }
3649
3650 return 1;
3651
3652 vmentry_failed:
3653 vmx->nested.nested_run_pending = 0;
3654 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3655 return 0;
3656 if (status == NVMX_VMENTRY_VMEXIT)
3657 return 1;
3658 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3659 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3660 }
3661
3662 /*
3663 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3664 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3665 * This function returns the new value we should put in vmcs12.guest_cr0.
3666 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3667 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3668 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3669 * didn't trap the bit, because if L1 did, so would L0).
3670 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3671 * been modified by L2, and L1 knows it. So just leave the old value of
3672 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3673 * isn't relevant, because if L0 traps this bit it can set it to anything.
3674 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3675 * changed these bits, and therefore they need to be updated, but L0
3676 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3677 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3678 */
3679 static inline unsigned long
vmcs12_guest_cr0(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3680 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3681 {
3682 return
3683 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3684 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3685 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3686 vcpu->arch.cr0_guest_owned_bits));
3687 }
3688
3689 static inline unsigned long
vmcs12_guest_cr4(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)3690 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3691 {
3692 return
3693 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3694 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3695 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3696 vcpu->arch.cr4_guest_owned_bits));
3697 }
3698
vmcs12_save_pending_event(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 vm_exit_reason,u32 exit_intr_info)3699 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3700 struct vmcs12 *vmcs12,
3701 u32 vm_exit_reason, u32 exit_intr_info)
3702 {
3703 u32 idt_vectoring;
3704 unsigned int nr;
3705
3706 /*
3707 * Per the SDM, VM-Exits due to double and triple faults are never
3708 * considered to occur during event delivery, even if the double/triple
3709 * fault is the result of an escalating vectoring issue.
3710 *
3711 * Note, the SDM qualifies the double fault behavior with "The original
3712 * event results in a double-fault exception". It's unclear why the
3713 * qualification exists since exits due to double fault can occur only
3714 * while vectoring a different exception (injected events are never
3715 * subject to interception), i.e. there's _always_ an original event.
3716 *
3717 * The SDM also uses NMI as a confusing example for the "original event
3718 * causes the VM exit directly" clause. NMI isn't special in any way,
3719 * the same rule applies to all events that cause an exit directly.
3720 * NMI is an odd choice for the example because NMIs can only occur on
3721 * instruction boundaries, i.e. they _can't_ occur during vectoring.
3722 */
3723 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
3724 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
3725 is_double_fault(exit_intr_info))) {
3726 vmcs12->idt_vectoring_info_field = 0;
3727 } else if (vcpu->arch.exception.injected) {
3728 nr = vcpu->arch.exception.nr;
3729 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3730
3731 if (kvm_exception_is_soft(nr)) {
3732 vmcs12->vm_exit_instruction_len =
3733 vcpu->arch.event_exit_inst_len;
3734 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3735 } else
3736 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3737
3738 if (vcpu->arch.exception.has_error_code) {
3739 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3740 vmcs12->idt_vectoring_error_code =
3741 vcpu->arch.exception.error_code;
3742 }
3743
3744 vmcs12->idt_vectoring_info_field = idt_vectoring;
3745 } else if (vcpu->arch.nmi_injected) {
3746 vmcs12->idt_vectoring_info_field =
3747 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3748 } else if (vcpu->arch.interrupt.injected) {
3749 nr = vcpu->arch.interrupt.nr;
3750 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3751
3752 if (vcpu->arch.interrupt.soft) {
3753 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3754 vmcs12->vm_entry_instruction_len =
3755 vcpu->arch.event_exit_inst_len;
3756 } else
3757 idt_vectoring |= INTR_TYPE_EXT_INTR;
3758
3759 vmcs12->idt_vectoring_info_field = idt_vectoring;
3760 } else {
3761 vmcs12->idt_vectoring_info_field = 0;
3762 }
3763 }
3764
3765
nested_mark_vmcs12_pages_dirty(struct kvm_vcpu * vcpu)3766 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3767 {
3768 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3769 gfn_t gfn;
3770
3771 /*
3772 * Don't need to mark the APIC access page dirty; it is never
3773 * written to by the CPU during APIC virtualization.
3774 */
3775
3776 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3777 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3778 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3779 }
3780
3781 if (nested_cpu_has_posted_intr(vmcs12)) {
3782 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3783 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3784 }
3785 }
3786
vmx_complete_nested_posted_interrupt(struct kvm_vcpu * vcpu)3787 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3788 {
3789 struct vcpu_vmx *vmx = to_vmx(vcpu);
3790 int max_irr;
3791 void *vapic_page;
3792 u16 status;
3793
3794 if (!vmx->nested.pi_pending)
3795 return 0;
3796
3797 if (!vmx->nested.pi_desc)
3798 goto mmio_needed;
3799
3800 vmx->nested.pi_pending = false;
3801
3802 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3803 return 0;
3804
3805 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3806 if (max_irr != 256) {
3807 vapic_page = vmx->nested.virtual_apic_map.hva;
3808 if (!vapic_page)
3809 goto mmio_needed;
3810
3811 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3812 vapic_page, &max_irr);
3813 status = vmcs_read16(GUEST_INTR_STATUS);
3814 if ((u8)max_irr > ((u8)status & 0xff)) {
3815 status &= ~0xff;
3816 status |= (u8)max_irr;
3817 vmcs_write16(GUEST_INTR_STATUS, status);
3818 }
3819 }
3820
3821 nested_mark_vmcs12_pages_dirty(vcpu);
3822 return 0;
3823
3824 mmio_needed:
3825 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
3826 return -ENXIO;
3827 }
3828
nested_vmx_inject_exception_vmexit(struct kvm_vcpu * vcpu,unsigned long exit_qual)3829 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3830 unsigned long exit_qual)
3831 {
3832 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3833 unsigned int nr = vcpu->arch.exception.nr;
3834 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3835
3836 if (vcpu->arch.exception.has_error_code) {
3837 vmcs12->vm_exit_intr_error_code = vcpu->arch.exception.error_code;
3838 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3839 }
3840
3841 if (kvm_exception_is_soft(nr))
3842 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3843 else
3844 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3845
3846 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3847 vmx_get_nmi_mask(vcpu))
3848 intr_info |= INTR_INFO_UNBLOCK_NMI;
3849
3850 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3851 }
3852
3853 /*
3854 * Returns true if a debug trap is pending delivery.
3855 *
3856 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3857 * exception may be inferred from the presence of an exception payload.
3858 */
vmx_pending_dbg_trap(struct kvm_vcpu * vcpu)3859 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
3860 {
3861 return vcpu->arch.exception.pending &&
3862 vcpu->arch.exception.nr == DB_VECTOR &&
3863 vcpu->arch.exception.payload;
3864 }
3865
3866 /*
3867 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3868 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3869 * represents these debug traps with a payload that is said to be compatible
3870 * with the 'pending debug exceptions' field, write the payload to the VMCS
3871 * field if a VM-exit is delivered before the debug trap.
3872 */
nested_vmx_update_pending_dbg(struct kvm_vcpu * vcpu)3873 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3874 {
3875 if (vmx_pending_dbg_trap(vcpu))
3876 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
3877 vcpu->arch.exception.payload);
3878 }
3879
nested_vmx_preemption_timer_pending(struct kvm_vcpu * vcpu)3880 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
3881 {
3882 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3883 to_vmx(vcpu)->nested.preemption_timer_expired;
3884 }
3885
vmx_check_nested_events(struct kvm_vcpu * vcpu)3886 static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
3887 {
3888 struct vcpu_vmx *vmx = to_vmx(vcpu);
3889 unsigned long exit_qual;
3890 bool block_nested_events =
3891 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3892 bool mtf_pending = vmx->nested.mtf_pending;
3893 struct kvm_lapic *apic = vcpu->arch.apic;
3894
3895 /*
3896 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3897 * this state is discarded.
3898 */
3899 if (!block_nested_events)
3900 vmx->nested.mtf_pending = false;
3901
3902 if (lapic_in_kernel(vcpu) &&
3903 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3904 if (block_nested_events)
3905 return -EBUSY;
3906 nested_vmx_update_pending_dbg(vcpu);
3907 clear_bit(KVM_APIC_INIT, &apic->pending_events);
3908 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
3909 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3910 return 0;
3911 }
3912
3913 if (lapic_in_kernel(vcpu) &&
3914 test_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3915 if (block_nested_events)
3916 return -EBUSY;
3917
3918 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3919 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
3920 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
3921 apic->sipi_vector & 0xFFUL);
3922 return 0;
3923 }
3924
3925 /*
3926 * Process any exceptions that are not debug traps before MTF.
3927 *
3928 * Note that only a pending nested run can block a pending exception.
3929 * Otherwise an injected NMI/interrupt should either be
3930 * lost or delivered to the nested hypervisor in the IDT_VECTORING_INFO,
3931 * while delivering the pending exception.
3932 */
3933
3934 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
3935 if (vmx->nested.nested_run_pending)
3936 return -EBUSY;
3937 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3938 goto no_vmexit;
3939 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3940 return 0;
3941 }
3942
3943 if (mtf_pending) {
3944 if (block_nested_events)
3945 return -EBUSY;
3946 nested_vmx_update_pending_dbg(vcpu);
3947 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
3948 return 0;
3949 }
3950
3951 if (vcpu->arch.exception.pending) {
3952 if (vmx->nested.nested_run_pending)
3953 return -EBUSY;
3954 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3955 goto no_vmexit;
3956 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3957 return 0;
3958 }
3959
3960 if (nested_vmx_preemption_timer_pending(vcpu)) {
3961 if (block_nested_events)
3962 return -EBUSY;
3963 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3964 return 0;
3965 }
3966
3967 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
3968 if (block_nested_events)
3969 return -EBUSY;
3970 goto no_vmexit;
3971 }
3972
3973 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
3974 if (block_nested_events)
3975 return -EBUSY;
3976 if (!nested_exit_on_nmi(vcpu))
3977 goto no_vmexit;
3978
3979 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3980 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3981 INTR_INFO_VALID_MASK, 0);
3982 /*
3983 * The NMI-triggered VM exit counts as injection:
3984 * clear this one and block further NMIs.
3985 */
3986 vcpu->arch.nmi_pending = 0;
3987 vmx_set_nmi_mask(vcpu, true);
3988 return 0;
3989 }
3990
3991 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
3992 if (block_nested_events)
3993 return -EBUSY;
3994 if (!nested_exit_on_intr(vcpu))
3995 goto no_vmexit;
3996 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3997 return 0;
3998 }
3999
4000 no_vmexit:
4001 return vmx_complete_nested_posted_interrupt(vcpu);
4002 }
4003
vmx_get_preemption_timer_value(struct kvm_vcpu * vcpu)4004 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
4005 {
4006 ktime_t remaining =
4007 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
4008 u64 value;
4009
4010 if (ktime_to_ns(remaining) <= 0)
4011 return 0;
4012
4013 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
4014 do_div(value, 1000000);
4015 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
4016 }
4017
is_vmcs12_ext_field(unsigned long field)4018 static bool is_vmcs12_ext_field(unsigned long field)
4019 {
4020 switch (field) {
4021 case GUEST_ES_SELECTOR:
4022 case GUEST_CS_SELECTOR:
4023 case GUEST_SS_SELECTOR:
4024 case GUEST_DS_SELECTOR:
4025 case GUEST_FS_SELECTOR:
4026 case GUEST_GS_SELECTOR:
4027 case GUEST_LDTR_SELECTOR:
4028 case GUEST_TR_SELECTOR:
4029 case GUEST_ES_LIMIT:
4030 case GUEST_CS_LIMIT:
4031 case GUEST_SS_LIMIT:
4032 case GUEST_DS_LIMIT:
4033 case GUEST_FS_LIMIT:
4034 case GUEST_GS_LIMIT:
4035 case GUEST_LDTR_LIMIT:
4036 case GUEST_TR_LIMIT:
4037 case GUEST_GDTR_LIMIT:
4038 case GUEST_IDTR_LIMIT:
4039 case GUEST_ES_AR_BYTES:
4040 case GUEST_DS_AR_BYTES:
4041 case GUEST_FS_AR_BYTES:
4042 case GUEST_GS_AR_BYTES:
4043 case GUEST_LDTR_AR_BYTES:
4044 case GUEST_TR_AR_BYTES:
4045 case GUEST_ES_BASE:
4046 case GUEST_CS_BASE:
4047 case GUEST_SS_BASE:
4048 case GUEST_DS_BASE:
4049 case GUEST_FS_BASE:
4050 case GUEST_GS_BASE:
4051 case GUEST_LDTR_BASE:
4052 case GUEST_TR_BASE:
4053 case GUEST_GDTR_BASE:
4054 case GUEST_IDTR_BASE:
4055 case GUEST_PENDING_DBG_EXCEPTIONS:
4056 case GUEST_BNDCFGS:
4057 return true;
4058 default:
4059 break;
4060 }
4061
4062 return false;
4063 }
4064
sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4065 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4066 struct vmcs12 *vmcs12)
4067 {
4068 struct vcpu_vmx *vmx = to_vmx(vcpu);
4069
4070 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4071 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4072 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4073 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4074 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4075 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4076 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4077 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4078 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4079 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4080 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4081 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4082 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4083 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4084 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4085 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4086 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4087 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4088 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4089 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4090 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4091 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4092 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4093 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4094 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4095 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4096 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4097 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4098 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4099 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4100 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4101 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4102 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4103 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4104 vmcs12->guest_pending_dbg_exceptions =
4105 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4106 if (kvm_mpx_supported())
4107 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
4108
4109 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4110 }
4111
copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4112 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4113 struct vmcs12 *vmcs12)
4114 {
4115 struct vcpu_vmx *vmx = to_vmx(vcpu);
4116 int cpu;
4117
4118 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4119 return;
4120
4121
4122 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4123
4124 cpu = get_cpu();
4125 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4126 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4127
4128 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4129
4130 vmx->loaded_vmcs = &vmx->vmcs01;
4131 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4132 put_cpu();
4133 }
4134
4135 /*
4136 * Update the guest state fields of vmcs12 to reflect changes that
4137 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4138 * VM-entry controls is also updated, since this is really a guest
4139 * state bit.)
4140 */
sync_vmcs02_to_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4141 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4142 {
4143 struct vcpu_vmx *vmx = to_vmx(vcpu);
4144
4145 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
4146 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4147
4148 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4149 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
4150
4151 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4152 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4153
4154 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4155 vmcs12->guest_rip = kvm_rip_read(vcpu);
4156 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4157
4158 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4159 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4160
4161 vmcs12->guest_interruptibility_info =
4162 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
4163
4164 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4165 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4166 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4167 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
4168 else
4169 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4170
4171 if (nested_cpu_has_preemption_timer(vmcs12) &&
4172 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4173 !vmx->nested.nested_run_pending)
4174 vmcs12->vmx_preemption_timer_value =
4175 vmx_get_preemption_timer_value(vcpu);
4176
4177 /*
4178 * In some cases (usually, nested EPT), L2 is allowed to change its
4179 * own CR3 without exiting. If it has changed it, we must keep it.
4180 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4181 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4182 *
4183 * Additionally, restore L2's PDPTR to vmcs12.
4184 */
4185 if (enable_ept) {
4186 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4187 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4188 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4189 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4190 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4191 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4192 }
4193 }
4194
4195 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4196
4197 if (nested_cpu_has_vid(vmcs12))
4198 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4199
4200 vmcs12->vm_entry_controls =
4201 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4202 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4203
4204 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4205 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4206
4207 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4208 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4209 }
4210
4211 /*
4212 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4213 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4214 * and this function updates it to reflect the changes to the guest state while
4215 * L2 was running (and perhaps made some exits which were handled directly by L0
4216 * without going back to L1), and to reflect the exit reason.
4217 * Note that we do not have to copy here all VMCS fields, just those that
4218 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4219 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4220 * which already writes to vmcs12 directly.
4221 */
prepare_vmcs12(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,u32 vm_exit_reason,u32 exit_intr_info,unsigned long exit_qualification)4222 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4223 u32 vm_exit_reason, u32 exit_intr_info,
4224 unsigned long exit_qualification)
4225 {
4226 /* update exit information fields: */
4227 vmcs12->vm_exit_reason = vm_exit_reason;
4228 if (to_vmx(vcpu)->exit_reason.enclave_mode)
4229 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
4230 vmcs12->exit_qualification = exit_qualification;
4231
4232 /*
4233 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
4234 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
4235 * exit info fields are unmodified.
4236 */
4237 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4238 vmcs12->launch_state = 1;
4239
4240 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4241 * instead of reading the real value. */
4242 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4243
4244 /*
4245 * Transfer the event that L0 or L1 may wanted to inject into
4246 * L2 to IDT_VECTORING_INFO_FIELD.
4247 */
4248 vmcs12_save_pending_event(vcpu, vmcs12,
4249 vm_exit_reason, exit_intr_info);
4250
4251 vmcs12->vm_exit_intr_info = exit_intr_info;
4252 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4253 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4254
4255 /*
4256 * According to spec, there's no need to store the guest's
4257 * MSRs if the exit is due to a VM-entry failure that occurs
4258 * during or after loading the guest state. Since this exit
4259 * does not fall in that category, we need to save the MSRs.
4260 */
4261 if (nested_vmx_store_msr(vcpu,
4262 vmcs12->vm_exit_msr_store_addr,
4263 vmcs12->vm_exit_msr_store_count))
4264 nested_vmx_abort(vcpu,
4265 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
4266 }
4267
4268 /*
4269 * Drop what we picked up for L2 via vmx_complete_interrupts. It is
4270 * preserved above and would only end up incorrectly in L1.
4271 */
4272 vcpu->arch.nmi_injected = false;
4273 kvm_clear_exception_queue(vcpu);
4274 kvm_clear_interrupt_queue(vcpu);
4275 }
4276
4277 /*
4278 * A part of what we need to when the nested L2 guest exits and we want to
4279 * run its L1 parent, is to reset L1's guest state to the host state specified
4280 * in vmcs12.
4281 * This function is to be called not only on normal nested exit, but also on
4282 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4283 * Failures During or After Loading Guest State").
4284 * This function should be called when the active VMCS is L1's (vmcs01).
4285 */
load_vmcs12_host_state(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)4286 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4287 struct vmcs12 *vmcs12)
4288 {
4289 enum vm_entry_failure_code ignored;
4290 struct kvm_segment seg;
4291
4292 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4293 vcpu->arch.efer = vmcs12->host_ia32_efer;
4294 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4295 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4296 else
4297 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4298 vmx_set_efer(vcpu, vcpu->arch.efer);
4299
4300 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4301 kvm_rip_write(vcpu, vmcs12->host_rip);
4302 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4303 vmx_set_interrupt_shadow(vcpu, 0);
4304
4305 /*
4306 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4307 * actually changed, because vmx_set_cr0 refers to efer set above.
4308 *
4309 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4310 * (KVM doesn't change it);
4311 */
4312 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4313 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4314
4315 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4316 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4317 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4318
4319 nested_ept_uninit_mmu_context(vcpu);
4320
4321 /*
4322 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4323 * couldn't have changed.
4324 */
4325 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
4326 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4327
4328 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4329
4330 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4331 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4332 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4333 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4334 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4335 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4336 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4337
4338 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4339 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4340 vmcs_write64(GUEST_BNDCFGS, 0);
4341
4342 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4343 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4344 vcpu->arch.pat = vmcs12->host_ia32_pat;
4345 }
4346 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
4347 intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
4348 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4349 vmcs12->host_ia32_perf_global_ctrl));
4350
4351 /* Set L1 segment info according to Intel SDM
4352 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4353 seg = (struct kvm_segment) {
4354 .base = 0,
4355 .limit = 0xFFFFFFFF,
4356 .selector = vmcs12->host_cs_selector,
4357 .type = 11,
4358 .present = 1,
4359 .s = 1,
4360 .g = 1
4361 };
4362 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4363 seg.l = 1;
4364 else
4365 seg.db = 1;
4366 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
4367 seg = (struct kvm_segment) {
4368 .base = 0,
4369 .limit = 0xFFFFFFFF,
4370 .type = 3,
4371 .present = 1,
4372 .s = 1,
4373 .db = 1,
4374 .g = 1
4375 };
4376 seg.selector = vmcs12->host_ds_selector;
4377 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
4378 seg.selector = vmcs12->host_es_selector;
4379 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
4380 seg.selector = vmcs12->host_ss_selector;
4381 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
4382 seg.selector = vmcs12->host_fs_selector;
4383 seg.base = vmcs12->host_fs_base;
4384 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
4385 seg.selector = vmcs12->host_gs_selector;
4386 seg.base = vmcs12->host_gs_base;
4387 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
4388 seg = (struct kvm_segment) {
4389 .base = vmcs12->host_tr_base,
4390 .limit = 0x67,
4391 .selector = vmcs12->host_tr_selector,
4392 .type = 11,
4393 .present = 1
4394 };
4395 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
4396
4397 memset(&seg, 0, sizeof(seg));
4398 seg.unusable = 1;
4399 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
4400
4401 kvm_set_dr(vcpu, 7, 0x400);
4402 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4403
4404 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4405 vmcs12->vm_exit_msr_load_count))
4406 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4407
4408 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
4409 }
4410
nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx * vmx)4411 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4412 {
4413 struct vmx_uret_msr *efer_msr;
4414 unsigned int i;
4415
4416 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4417 return vmcs_read64(GUEST_IA32_EFER);
4418
4419 if (cpu_has_load_ia32_efer())
4420 return host_efer;
4421
4422 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4423 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4424 return vmx->msr_autoload.guest.val[i].value;
4425 }
4426
4427 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
4428 if (efer_msr)
4429 return efer_msr->data;
4430
4431 return host_efer;
4432 }
4433
nested_vmx_restore_host_state(struct kvm_vcpu * vcpu)4434 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4435 {
4436 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4437 struct vcpu_vmx *vmx = to_vmx(vcpu);
4438 struct vmx_msr_entry g, h;
4439 gpa_t gpa;
4440 u32 i, j;
4441
4442 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4443
4444 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4445 /*
4446 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4447 * as vmcs01.GUEST_DR7 contains a userspace defined value
4448 * and vcpu->arch.dr7 is not squirreled away before the
4449 * nested VMENTER (not worth adding a variable in nested_vmx).
4450 */
4451 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4452 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4453 else
4454 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4455 }
4456
4457 /*
4458 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4459 * handle a variety of side effects to KVM's software model.
4460 */
4461 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4462
4463 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4464 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4465
4466 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4467 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4468
4469 nested_ept_uninit_mmu_context(vcpu);
4470 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4471 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
4472
4473 /*
4474 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4475 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4476 * VMFail, like everything else we just need to ensure our
4477 * software model is up-to-date.
4478 */
4479 if (enable_ept && is_pae_paging(vcpu))
4480 ept_save_pdptrs(vcpu);
4481
4482 kvm_mmu_reset_context(vcpu);
4483
4484 /*
4485 * This nasty bit of open coding is a compromise between blindly
4486 * loading L1's MSRs using the exit load lists (incorrect emulation
4487 * of VMFail), leaving the nested VM's MSRs in the software model
4488 * (incorrect behavior) and snapshotting the modified MSRs (too
4489 * expensive since the lists are unbound by hardware). For each
4490 * MSR that was (prematurely) loaded from the nested VMEntry load
4491 * list, reload it from the exit load list if it exists and differs
4492 * from the guest value. The intent is to stuff host state as
4493 * silently as possible, not to fully process the exit load list.
4494 */
4495 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4496 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4497 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4498 pr_debug_ratelimited(
4499 "%s read MSR index failed (%u, 0x%08llx)\n",
4500 __func__, i, gpa);
4501 goto vmabort;
4502 }
4503
4504 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4505 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4506 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4507 pr_debug_ratelimited(
4508 "%s read MSR failed (%u, 0x%08llx)\n",
4509 __func__, j, gpa);
4510 goto vmabort;
4511 }
4512 if (h.index != g.index)
4513 continue;
4514 if (h.value == g.value)
4515 break;
4516
4517 if (nested_vmx_load_msr_check(vcpu, &h)) {
4518 pr_debug_ratelimited(
4519 "%s check failed (%u, 0x%x, 0x%x)\n",
4520 __func__, j, h.index, h.reserved);
4521 goto vmabort;
4522 }
4523
4524 if (kvm_set_msr(vcpu, h.index, h.value)) {
4525 pr_debug_ratelimited(
4526 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4527 __func__, j, h.index, h.value);
4528 goto vmabort;
4529 }
4530 }
4531 }
4532
4533 return;
4534
4535 vmabort:
4536 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4537 }
4538
4539 /*
4540 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4541 * and modify vmcs12 to make it see what it would expect to see there if
4542 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4543 */
nested_vmx_vmexit(struct kvm_vcpu * vcpu,u32 vm_exit_reason,u32 exit_intr_info,unsigned long exit_qualification)4544 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
4545 u32 exit_intr_info, unsigned long exit_qualification)
4546 {
4547 struct vcpu_vmx *vmx = to_vmx(vcpu);
4548 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4549
4550 /* trying to cancel vmlaunch/vmresume is a bug */
4551 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4552
4553 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4554 /*
4555 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4556 * Enlightened VMCS after migration and we still need to
4557 * do that when something is forcing L2->L1 exit prior to
4558 * the first L2 run.
4559 */
4560 (void)nested_get_evmcs_page(vcpu);
4561 }
4562
4563 /* Service pending TLB flush requests for L2 before switching to L1. */
4564 kvm_service_local_tlb_flush_requests(vcpu);
4565
4566 /*
4567 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4568 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4569 * up-to-date before switching to L1.
4570 */
4571 if (enable_ept && is_pae_paging(vcpu))
4572 vmx_ept_load_pdptrs(vcpu);
4573
4574 leave_guest_mode(vcpu);
4575
4576 if (nested_cpu_has_preemption_timer(vmcs12))
4577 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4578
4579 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4580 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4581 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4582 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4583 }
4584
4585 if (likely(!vmx->fail)) {
4586 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4587
4588 if (vm_exit_reason != -1)
4589 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4590 exit_intr_info, exit_qualification);
4591
4592 /*
4593 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4594 * also be used to capture vmcs12 cache as part of
4595 * capturing nVMX state for snapshot (migration).
4596 *
4597 * Otherwise, this flush will dirty guest memory at a
4598 * point it is already assumed by user-space to be
4599 * immutable.
4600 */
4601 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4602 } else {
4603 /*
4604 * The only expected VM-instruction error is "VM entry with
4605 * invalid control field(s)." Anything else indicates a
4606 * problem with L0. And we should never get here with a
4607 * VMFail of any type if early consistency checks are enabled.
4608 */
4609 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4610 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4611 WARN_ON_ONCE(nested_early_check);
4612 }
4613
4614 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4615
4616 /* Update any VMCS fields that might have changed while L2 ran */
4617 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4618 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4619 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4620 if (kvm_has_tsc_control)
4621 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4622
4623 if (vmx->nested.l1_tpr_threshold != -1)
4624 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4625
4626 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4627 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4628 vmx_set_virtual_apic_mode(vcpu);
4629 }
4630
4631 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4632 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4633 vmx_update_cpu_dirty_logging(vcpu);
4634 }
4635
4636 /* Unpin physical memory we referred to in vmcs02 */
4637 if (vmx->nested.apic_access_page) {
4638 kvm_release_page_clean(vmx->nested.apic_access_page);
4639 vmx->nested.apic_access_page = NULL;
4640 }
4641 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4642 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4643 vmx->nested.pi_desc = NULL;
4644
4645 if (vmx->nested.reload_vmcs01_apic_access_page) {
4646 vmx->nested.reload_vmcs01_apic_access_page = false;
4647 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4648 }
4649
4650 if (vmx->nested.update_vmcs01_apicv_status) {
4651 vmx->nested.update_vmcs01_apicv_status = false;
4652 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4653 }
4654
4655 if ((vm_exit_reason != -1) &&
4656 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
4657 vmx->nested.need_vmcs12_to_shadow_sync = true;
4658
4659 /* in case we halted in L2 */
4660 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4661
4662 if (likely(!vmx->fail)) {
4663 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4664 nested_exit_intr_ack_set(vcpu)) {
4665 int irq = kvm_cpu_get_interrupt(vcpu);
4666 WARN_ON(irq < 0);
4667 vmcs12->vm_exit_intr_info = irq |
4668 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4669 }
4670
4671 if (vm_exit_reason != -1)
4672 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4673 vmcs12->exit_qualification,
4674 vmcs12->idt_vectoring_info_field,
4675 vmcs12->vm_exit_intr_info,
4676 vmcs12->vm_exit_intr_error_code,
4677 KVM_ISA_VMX);
4678
4679 load_vmcs12_host_state(vcpu, vmcs12);
4680
4681 return;
4682 }
4683
4684 /*
4685 * After an early L2 VM-entry failure, we're now back
4686 * in L1 which thinks it just finished a VMLAUNCH or
4687 * VMRESUME instruction, so we need to set the failure
4688 * flag and the VM-instruction error field of the VMCS
4689 * accordingly, and skip the emulated instruction.
4690 */
4691 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4692
4693 /*
4694 * Restore L1's host state to KVM's software model. We're here
4695 * because a consistency check was caught by hardware, which
4696 * means some amount of guest state has been propagated to KVM's
4697 * model and needs to be unwound to the host's state.
4698 */
4699 nested_vmx_restore_host_state(vcpu);
4700
4701 vmx->fail = 0;
4702 }
4703
nested_vmx_triple_fault(struct kvm_vcpu * vcpu)4704 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
4705 {
4706 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
4707 }
4708
4709 /*
4710 * Decode the memory-address operand of a vmx instruction, as recorded on an
4711 * exit caused by such an instruction (run by a guest hypervisor).
4712 * On success, returns 0. When the operand is invalid, returns 1 and throws
4713 * #UD, #GP, or #SS.
4714 */
get_vmx_mem_address(struct kvm_vcpu * vcpu,unsigned long exit_qualification,u32 vmx_instruction_info,bool wr,int len,gva_t * ret)4715 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4716 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4717 {
4718 gva_t off;
4719 bool exn;
4720 struct kvm_segment s;
4721
4722 /*
4723 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4724 * Execution", on an exit, vmx_instruction_info holds most of the
4725 * addressing components of the operand. Only the displacement part
4726 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4727 * For how an actual address is calculated from all these components,
4728 * refer to Vol. 1, "Operand Addressing".
4729 */
4730 int scaling = vmx_instruction_info & 3;
4731 int addr_size = (vmx_instruction_info >> 7) & 7;
4732 bool is_reg = vmx_instruction_info & (1u << 10);
4733 int seg_reg = (vmx_instruction_info >> 15) & 7;
4734 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4735 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4736 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4737 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4738
4739 if (is_reg) {
4740 kvm_queue_exception(vcpu, UD_VECTOR);
4741 return 1;
4742 }
4743
4744 /* Addr = segment_base + offset */
4745 /* offset = base + [index * scale] + displacement */
4746 off = exit_qualification; /* holds the displacement */
4747 if (addr_size == 1)
4748 off = (gva_t)sign_extend64(off, 31);
4749 else if (addr_size == 0)
4750 off = (gva_t)sign_extend64(off, 15);
4751 if (base_is_valid)
4752 off += kvm_register_read(vcpu, base_reg);
4753 if (index_is_valid)
4754 off += kvm_register_read(vcpu, index_reg) << scaling;
4755 vmx_get_segment(vcpu, &s, seg_reg);
4756
4757 /*
4758 * The effective address, i.e. @off, of a memory operand is truncated
4759 * based on the address size of the instruction. Note that this is
4760 * the *effective address*, i.e. the address prior to accounting for
4761 * the segment's base.
4762 */
4763 if (addr_size == 1) /* 32 bit */
4764 off &= 0xffffffff;
4765 else if (addr_size == 0) /* 16 bit */
4766 off &= 0xffff;
4767
4768 /* Checks for #GP/#SS exceptions. */
4769 exn = false;
4770 if (is_long_mode(vcpu)) {
4771 /*
4772 * The virtual/linear address is never truncated in 64-bit
4773 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4774 * address when using FS/GS with a non-zero base.
4775 */
4776 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4777 *ret = s.base + off;
4778 else
4779 *ret = off;
4780
4781 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4782 * non-canonical form. This is the only check on the memory
4783 * destination for long mode!
4784 */
4785 exn = is_noncanonical_address(*ret, vcpu);
4786 } else {
4787 /*
4788 * When not in long mode, the virtual/linear address is
4789 * unconditionally truncated to 32 bits regardless of the
4790 * address size.
4791 */
4792 *ret = (s.base + off) & 0xffffffff;
4793
4794 /* Protected mode: apply checks for segment validity in the
4795 * following order:
4796 * - segment type check (#GP(0) may be thrown)
4797 * - usability check (#GP(0)/#SS(0))
4798 * - limit check (#GP(0)/#SS(0))
4799 */
4800 if (wr)
4801 /* #GP(0) if the destination operand is located in a
4802 * read-only data segment or any code segment.
4803 */
4804 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4805 else
4806 /* #GP(0) if the source operand is located in an
4807 * execute-only code segment
4808 */
4809 exn = ((s.type & 0xa) == 8);
4810 if (exn) {
4811 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4812 return 1;
4813 }
4814 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4815 */
4816 exn = (s.unusable != 0);
4817
4818 /*
4819 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4820 * outside the segment limit. All CPUs that support VMX ignore
4821 * limit checks for flat segments, i.e. segments with base==0,
4822 * limit==0xffffffff and of type expand-up data or code.
4823 */
4824 if (!(s.base == 0 && s.limit == 0xffffffff &&
4825 ((s.type & 8) || !(s.type & 4))))
4826 exn = exn || ((u64)off + len - 1 > s.limit);
4827 }
4828 if (exn) {
4829 kvm_queue_exception_e(vcpu,
4830 seg_reg == VCPU_SREG_SS ?
4831 SS_VECTOR : GP_VECTOR,
4832 0);
4833 return 1;
4834 }
4835
4836 return 0;
4837 }
4838
nested_vmx_pmu_refresh(struct kvm_vcpu * vcpu,bool vcpu_has_perf_global_ctrl)4839 void nested_vmx_pmu_refresh(struct kvm_vcpu *vcpu,
4840 bool vcpu_has_perf_global_ctrl)
4841 {
4842 struct vcpu_vmx *vmx;
4843
4844 if (!nested_vmx_allowed(vcpu))
4845 return;
4846
4847 vmx = to_vmx(vcpu);
4848 if (vcpu_has_perf_global_ctrl) {
4849 vmx->nested.msrs.entry_ctls_high |=
4850 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4851 vmx->nested.msrs.exit_ctls_high |=
4852 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4853 } else {
4854 vmx->nested.msrs.entry_ctls_high &=
4855 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4856 vmx->nested.msrs.exit_ctls_high &=
4857 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4858 }
4859 }
4860
nested_vmx_get_vmptr(struct kvm_vcpu * vcpu,gpa_t * vmpointer,int * ret)4861 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
4862 int *ret)
4863 {
4864 gva_t gva;
4865 struct x86_exception e;
4866 int r;
4867
4868 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
4869 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4870 sizeof(*vmpointer), &gva)) {
4871 *ret = 1;
4872 return -EINVAL;
4873 }
4874
4875 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
4876 if (r != X86EMUL_CONTINUE) {
4877 *ret = kvm_handle_memory_failure(vcpu, r, &e);
4878 return -EINVAL;
4879 }
4880
4881 return 0;
4882 }
4883
4884 /*
4885 * Allocate a shadow VMCS and associate it with the currently loaded
4886 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4887 * VMCS is also VMCLEARed, so that it is ready for use.
4888 */
alloc_shadow_vmcs(struct kvm_vcpu * vcpu)4889 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4890 {
4891 struct vcpu_vmx *vmx = to_vmx(vcpu);
4892 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4893
4894 /*
4895 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
4896 * when L1 executes VMXOFF or the vCPU is forced out of nested
4897 * operation. VMXON faults if the CPU is already post-VMXON, so it
4898 * should be impossible to already have an allocated shadow VMCS. KVM
4899 * doesn't support virtualization of VMCS shadowing, so vmcs01 should
4900 * always be the loaded VMCS.
4901 */
4902 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
4903 return loaded_vmcs->shadow_vmcs;
4904
4905 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4906 if (loaded_vmcs->shadow_vmcs)
4907 vmcs_clear(loaded_vmcs->shadow_vmcs);
4908
4909 return loaded_vmcs->shadow_vmcs;
4910 }
4911
enter_vmx_operation(struct kvm_vcpu * vcpu)4912 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4913 {
4914 struct vcpu_vmx *vmx = to_vmx(vcpu);
4915 int r;
4916
4917 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4918 if (r < 0)
4919 goto out_vmcs02;
4920
4921 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4922 if (!vmx->nested.cached_vmcs12)
4923 goto out_cached_vmcs12;
4924
4925 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
4926 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4927 if (!vmx->nested.cached_shadow_vmcs12)
4928 goto out_cached_shadow_vmcs12;
4929
4930 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4931 goto out_shadow_vmcs;
4932
4933 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4934 HRTIMER_MODE_ABS_PINNED);
4935 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4936
4937 vmx->nested.vpid02 = allocate_vpid();
4938
4939 vmx->nested.vmcs02_initialized = false;
4940 vmx->nested.vmxon = true;
4941
4942 if (vmx_pt_mode_is_host_guest()) {
4943 vmx->pt_desc.guest.ctl = 0;
4944 pt_update_intercept_for_msr(vcpu);
4945 }
4946
4947 return 0;
4948
4949 out_shadow_vmcs:
4950 kfree(vmx->nested.cached_shadow_vmcs12);
4951
4952 out_cached_shadow_vmcs12:
4953 kfree(vmx->nested.cached_vmcs12);
4954
4955 out_cached_vmcs12:
4956 free_loaded_vmcs(&vmx->nested.vmcs02);
4957
4958 out_vmcs02:
4959 return -ENOMEM;
4960 }
4961
4962 /* Emulate the VMXON instruction. */
handle_vmon(struct kvm_vcpu * vcpu)4963 static int handle_vmon(struct kvm_vcpu *vcpu)
4964 {
4965 int ret;
4966 gpa_t vmptr;
4967 uint32_t revision;
4968 struct vcpu_vmx *vmx = to_vmx(vcpu);
4969 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
4970 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
4971
4972 /*
4973 * Note, KVM cannot rely on hardware to perform the CR0/CR4 #UD checks
4974 * that have higher priority than VM-Exit (see Intel SDM's pseudocode
4975 * for VMXON), as KVM must load valid CR0/CR4 values into hardware while
4976 * running the guest, i.e. KVM needs to check the _guest_ values.
4977 *
4978 * Rely on hardware for the other two pre-VM-Exit checks, !VM86 and
4979 * !COMPATIBILITY modes. KVM may run the guest in VM86 to emulate Real
4980 * Mode, but KVM will never take the guest out of those modes.
4981 */
4982 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
4983 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
4984 kvm_queue_exception(vcpu, UD_VECTOR);
4985 return 1;
4986 }
4987
4988 /*
4989 * CPL=0 and all other checks that are lower priority than VM-Exit must
4990 * be checked manually.
4991 */
4992 if (vmx_get_cpl(vcpu)) {
4993 kvm_inject_gp(vcpu, 0);
4994 return 1;
4995 }
4996
4997 if (vmx->nested.vmxon)
4998 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4999
5000 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5001 != VMXON_NEEDED_FEATURES) {
5002 kvm_inject_gp(vcpu, 0);
5003 return 1;
5004 }
5005
5006 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
5007 return ret;
5008
5009 /*
5010 * SDM 3: 24.11.5
5011 * The first 4 bytes of VMXON region contain the supported
5012 * VMCS revision identifier
5013 *
5014 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
5015 * which replaces physical address width with 32
5016 */
5017 if (!page_address_valid(vcpu, vmptr))
5018 return nested_vmx_failInvalid(vcpu);
5019
5020 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
5021 revision != VMCS12_REVISION)
5022 return nested_vmx_failInvalid(vcpu);
5023
5024 vmx->nested.vmxon_ptr = vmptr;
5025 ret = enter_vmx_operation(vcpu);
5026 if (ret)
5027 return ret;
5028
5029 return nested_vmx_succeed(vcpu);
5030 }
5031
nested_release_vmcs12(struct kvm_vcpu * vcpu)5032 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
5033 {
5034 struct vcpu_vmx *vmx = to_vmx(vcpu);
5035
5036 if (vmx->nested.current_vmptr == INVALID_GPA)
5037 return;
5038
5039 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
5040
5041 if (enable_shadow_vmcs) {
5042 /* copy to memory all shadowed fields in case
5043 they were modified */
5044 copy_shadow_to_vmcs12(vmx);
5045 vmx_disable_shadow_vmcs(vmx);
5046 }
5047 vmx->nested.posted_intr_nv = -1;
5048
5049 /* Flush VMCS12 to guest memory */
5050 kvm_vcpu_write_guest_page(vcpu,
5051 vmx->nested.current_vmptr >> PAGE_SHIFT,
5052 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5053
5054 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5055
5056 vmx->nested.current_vmptr = INVALID_GPA;
5057 }
5058
5059 /* Emulate the VMXOFF instruction */
handle_vmoff(struct kvm_vcpu * vcpu)5060 static int handle_vmoff(struct kvm_vcpu *vcpu)
5061 {
5062 if (!nested_vmx_check_permission(vcpu))
5063 return 1;
5064
5065 free_nested(vcpu);
5066
5067 /* Process a latched INIT during time CPU was in VMX operation */
5068 kvm_make_request(KVM_REQ_EVENT, vcpu);
5069
5070 return nested_vmx_succeed(vcpu);
5071 }
5072
5073 /* Emulate the VMCLEAR instruction */
handle_vmclear(struct kvm_vcpu * vcpu)5074 static int handle_vmclear(struct kvm_vcpu *vcpu)
5075 {
5076 struct vcpu_vmx *vmx = to_vmx(vcpu);
5077 u32 zero = 0;
5078 gpa_t vmptr;
5079 u64 evmcs_gpa;
5080 int r;
5081
5082 if (!nested_vmx_check_permission(vcpu))
5083 return 1;
5084
5085 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5086 return r;
5087
5088 if (!page_address_valid(vcpu, vmptr))
5089 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5090
5091 if (vmptr == vmx->nested.vmxon_ptr)
5092 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
5093
5094 /*
5095 * When Enlightened VMEntry is enabled on the calling CPU we treat
5096 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5097 * way to distinguish it from VMCS12) and we must not corrupt it by
5098 * writing to the non-existent 'launch_state' field. The area doesn't
5099 * have to be the currently active EVMCS on the calling CPU and there's
5100 * nothing KVM has to do to transition it from 'active' to 'non-active'
5101 * state. It is possible that the area will stay mapped as
5102 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5103 */
5104 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
5105 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
5106 if (vmptr == vmx->nested.current_vmptr)
5107 nested_release_vmcs12(vcpu);
5108
5109 kvm_vcpu_write_guest(vcpu,
5110 vmptr + offsetof(struct vmcs12,
5111 launch_state),
5112 &zero, sizeof(zero));
5113 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
5114 nested_release_evmcs(vcpu);
5115 }
5116
5117 return nested_vmx_succeed(vcpu);
5118 }
5119
5120 /* Emulate the VMLAUNCH instruction */
handle_vmlaunch(struct kvm_vcpu * vcpu)5121 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5122 {
5123 return nested_vmx_run(vcpu, true);
5124 }
5125
5126 /* Emulate the VMRESUME instruction */
handle_vmresume(struct kvm_vcpu * vcpu)5127 static int handle_vmresume(struct kvm_vcpu *vcpu)
5128 {
5129
5130 return nested_vmx_run(vcpu, false);
5131 }
5132
handle_vmread(struct kvm_vcpu * vcpu)5133 static int handle_vmread(struct kvm_vcpu *vcpu)
5134 {
5135 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5136 : get_vmcs12(vcpu);
5137 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5138 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5139 struct vcpu_vmx *vmx = to_vmx(vcpu);
5140 struct x86_exception e;
5141 unsigned long field;
5142 u64 value;
5143 gva_t gva = 0;
5144 short offset;
5145 int len, r;
5146
5147 if (!nested_vmx_check_permission(vcpu))
5148 return 1;
5149
5150 /* Decode instruction info and find the field to read */
5151 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5152
5153 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
5154 /*
5155 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5156 * any VMREAD sets the ALU flags for VMfailInvalid.
5157 */
5158 if (vmx->nested.current_vmptr == INVALID_GPA ||
5159 (is_guest_mode(vcpu) &&
5160 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5161 return nested_vmx_failInvalid(vcpu);
5162
5163 offset = get_vmcs12_field_offset(field);
5164 if (offset < 0)
5165 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5166
5167 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5168 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5169
5170 /* Read the field, zero-extended to a u64 value */
5171 value = vmcs12_read_any(vmcs12, field, offset);
5172 } else {
5173 /*
5174 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
5175 * enlightened VMCS is active VMREAD/VMWRITE instructions are
5176 * unsupported. Unfortunately, certain versions of Windows 11
5177 * don't comply with this requirement which is not enforced in
5178 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
5179 * workaround, as misbehaving guests will panic on VM-Fail.
5180 * Note, enlightened VMCS is incompatible with shadow VMCS so
5181 * all VMREADs from L2 should go to L1.
5182 */
5183 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
5184 return nested_vmx_failInvalid(vcpu);
5185
5186 offset = evmcs_field_offset(field, NULL);
5187 if (offset < 0)
5188 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5189
5190 /* Read the field, zero-extended to a u64 value */
5191 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
5192 }
5193
5194 /*
5195 * Now copy part of this value to register or memory, as requested.
5196 * Note that the number of bits actually copied is 32 or 64 depending
5197 * on the guest's mode (32 or 64 bit), not on the given field's length.
5198 */
5199 if (instr_info & BIT(10)) {
5200 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
5201 } else {
5202 len = is_64_bit_mode(vcpu) ? 8 : 4;
5203 if (get_vmx_mem_address(vcpu, exit_qualification,
5204 instr_info, true, len, &gva))
5205 return 1;
5206 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5207 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5208 if (r != X86EMUL_CONTINUE)
5209 return kvm_handle_memory_failure(vcpu, r, &e);
5210 }
5211
5212 return nested_vmx_succeed(vcpu);
5213 }
5214
is_shadow_field_rw(unsigned long field)5215 static bool is_shadow_field_rw(unsigned long field)
5216 {
5217 switch (field) {
5218 #define SHADOW_FIELD_RW(x, y) case x:
5219 #include "vmcs_shadow_fields.h"
5220 return true;
5221 default:
5222 break;
5223 }
5224 return false;
5225 }
5226
is_shadow_field_ro(unsigned long field)5227 static bool is_shadow_field_ro(unsigned long field)
5228 {
5229 switch (field) {
5230 #define SHADOW_FIELD_RO(x, y) case x:
5231 #include "vmcs_shadow_fields.h"
5232 return true;
5233 default:
5234 break;
5235 }
5236 return false;
5237 }
5238
handle_vmwrite(struct kvm_vcpu * vcpu)5239 static int handle_vmwrite(struct kvm_vcpu *vcpu)
5240 {
5241 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5242 : get_vmcs12(vcpu);
5243 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5244 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5245 struct vcpu_vmx *vmx = to_vmx(vcpu);
5246 struct x86_exception e;
5247 unsigned long field;
5248 short offset;
5249 gva_t gva;
5250 int len, r;
5251
5252 /*
5253 * The value to write might be 32 or 64 bits, depending on L1's long
5254 * mode, and eventually we need to write that into a field of several
5255 * possible lengths. The code below first zero-extends the value to 64
5256 * bit (value), and then copies only the appropriate number of
5257 * bits into the vmcs12 field.
5258 */
5259 u64 value = 0;
5260
5261 if (!nested_vmx_check_permission(vcpu))
5262 return 1;
5263
5264 /*
5265 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5266 * any VMWRITE sets the ALU flags for VMfailInvalid.
5267 */
5268 if (vmx->nested.current_vmptr == INVALID_GPA ||
5269 (is_guest_mode(vcpu) &&
5270 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5271 return nested_vmx_failInvalid(vcpu);
5272
5273 if (instr_info & BIT(10))
5274 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
5275 else {
5276 len = is_64_bit_mode(vcpu) ? 8 : 4;
5277 if (get_vmx_mem_address(vcpu, exit_qualification,
5278 instr_info, false, len, &gva))
5279 return 1;
5280 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5281 if (r != X86EMUL_CONTINUE)
5282 return kvm_handle_memory_failure(vcpu, r, &e);
5283 }
5284
5285 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5286
5287 offset = get_vmcs12_field_offset(field);
5288 if (offset < 0)
5289 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5290
5291 /*
5292 * If the vCPU supports "VMWRITE to any supported field in the
5293 * VMCS," then the "read-only" fields are actually read/write.
5294 */
5295 if (vmcs_field_readonly(field) &&
5296 !nested_cpu_has_vmwrite_any_field(vcpu))
5297 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5298
5299 /*
5300 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5301 * vmcs12, else we may crush a field or consume a stale value.
5302 */
5303 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5304 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5305
5306 /*
5307 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5308 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5309 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5310 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5311 * from L1 will return a different value than VMREAD from L2 (L1 sees
5312 * the stripped down value, L2 sees the full value as stored by KVM).
5313 */
5314 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
5315 value &= 0x1f0ff;
5316
5317 vmcs12_write_any(vmcs12, field, offset, value);
5318
5319 /*
5320 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5321 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5322 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5323 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5324 */
5325 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5326 /*
5327 * L1 can read these fields without exiting, ensure the
5328 * shadow VMCS is up-to-date.
5329 */
5330 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5331 preempt_disable();
5332 vmcs_load(vmx->vmcs01.shadow_vmcs);
5333
5334 __vmcs_writel(field, value);
5335
5336 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5337 vmcs_load(vmx->loaded_vmcs->vmcs);
5338 preempt_enable();
5339 }
5340 vmx->nested.dirty_vmcs12 = true;
5341 }
5342
5343 return nested_vmx_succeed(vcpu);
5344 }
5345
set_current_vmptr(struct vcpu_vmx * vmx,gpa_t vmptr)5346 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5347 {
5348 vmx->nested.current_vmptr = vmptr;
5349 if (enable_shadow_vmcs) {
5350 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5351 vmcs_write64(VMCS_LINK_POINTER,
5352 __pa(vmx->vmcs01.shadow_vmcs));
5353 vmx->nested.need_vmcs12_to_shadow_sync = true;
5354 }
5355 vmx->nested.dirty_vmcs12 = true;
5356 vmx->nested.force_msr_bitmap_recalc = true;
5357 }
5358
5359 /* Emulate the VMPTRLD instruction */
handle_vmptrld(struct kvm_vcpu * vcpu)5360 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5361 {
5362 struct vcpu_vmx *vmx = to_vmx(vcpu);
5363 gpa_t vmptr;
5364 int r;
5365
5366 if (!nested_vmx_check_permission(vcpu))
5367 return 1;
5368
5369 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5370 return r;
5371
5372 if (!page_address_valid(vcpu, vmptr))
5373 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5374
5375 if (vmptr == vmx->nested.vmxon_ptr)
5376 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
5377
5378 /* Forbid normal VMPTRLD if Enlightened version was used */
5379 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
5380 return 1;
5381
5382 if (vmx->nested.current_vmptr != vmptr) {
5383 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5384 struct vmcs_hdr hdr;
5385
5386 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
5387 /*
5388 * Reads from an unbacked page return all 1s,
5389 * which means that the 32 bits located at the
5390 * given physical address won't match the required
5391 * VMCS12_REVISION identifier.
5392 */
5393 return nested_vmx_fail(vcpu,
5394 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5395 }
5396
5397 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
5398 offsetof(struct vmcs12, hdr),
5399 sizeof(hdr))) {
5400 return nested_vmx_fail(vcpu,
5401 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5402 }
5403
5404 if (hdr.revision_id != VMCS12_REVISION ||
5405 (hdr.shadow_vmcs &&
5406 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
5407 return nested_vmx_fail(vcpu,
5408 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5409 }
5410
5411 nested_release_vmcs12(vcpu);
5412
5413 /*
5414 * Load VMCS12 from guest memory since it is not already
5415 * cached.
5416 */
5417 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5418 VMCS12_SIZE)) {
5419 return nested_vmx_fail(vcpu,
5420 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5421 }
5422
5423 set_current_vmptr(vmx, vmptr);
5424 }
5425
5426 return nested_vmx_succeed(vcpu);
5427 }
5428
5429 /* Emulate the VMPTRST instruction */
handle_vmptrst(struct kvm_vcpu * vcpu)5430 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5431 {
5432 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
5433 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5434 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5435 struct x86_exception e;
5436 gva_t gva;
5437 int r;
5438
5439 if (!nested_vmx_check_permission(vcpu))
5440 return 1;
5441
5442 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
5443 return 1;
5444
5445 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5446 true, sizeof(gpa_t), &gva))
5447 return 1;
5448 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5449 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
5450 sizeof(gpa_t), &e);
5451 if (r != X86EMUL_CONTINUE)
5452 return kvm_handle_memory_failure(vcpu, r, &e);
5453
5454 return nested_vmx_succeed(vcpu);
5455 }
5456
5457 /* Emulate the INVEPT instruction */
handle_invept(struct kvm_vcpu * vcpu)5458 static int handle_invept(struct kvm_vcpu *vcpu)
5459 {
5460 struct vcpu_vmx *vmx = to_vmx(vcpu);
5461 u32 vmx_instruction_info, types;
5462 unsigned long type, roots_to_free;
5463 struct kvm_mmu *mmu;
5464 gva_t gva;
5465 struct x86_exception e;
5466 struct {
5467 u64 eptp, gpa;
5468 } operand;
5469 int i, r, gpr_index;
5470
5471 if (!(vmx->nested.msrs.secondary_ctls_high &
5472 SECONDARY_EXEC_ENABLE_EPT) ||
5473 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5474 kvm_queue_exception(vcpu, UD_VECTOR);
5475 return 1;
5476 }
5477
5478 if (!nested_vmx_check_permission(vcpu))
5479 return 1;
5480
5481 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5482 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5483 type = kvm_register_read(vcpu, gpr_index);
5484
5485 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5486
5487 if (type >= 32 || !(types & (1 << type)))
5488 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5489
5490 /* According to the Intel VMX instruction reference, the memory
5491 * operand is read even if it isn't needed (e.g., for type==global)
5492 */
5493 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5494 vmx_instruction_info, false, sizeof(operand), &gva))
5495 return 1;
5496 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5497 if (r != X86EMUL_CONTINUE)
5498 return kvm_handle_memory_failure(vcpu, r, &e);
5499
5500 /*
5501 * Nested EPT roots are always held through guest_mmu,
5502 * not root_mmu.
5503 */
5504 mmu = &vcpu->arch.guest_mmu;
5505
5506 switch (type) {
5507 case VMX_EPT_EXTENT_CONTEXT:
5508 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
5509 return nested_vmx_fail(vcpu,
5510 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5511
5512 roots_to_free = 0;
5513 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd,
5514 operand.eptp))
5515 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5516
5517 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5518 if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
5519 mmu->prev_roots[i].pgd,
5520 operand.eptp))
5521 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5522 }
5523 break;
5524 case VMX_EPT_EXTENT_GLOBAL:
5525 roots_to_free = KVM_MMU_ROOTS_ALL;
5526 break;
5527 default:
5528 BUG();
5529 break;
5530 }
5531
5532 if (roots_to_free)
5533 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
5534
5535 return nested_vmx_succeed(vcpu);
5536 }
5537
handle_invvpid(struct kvm_vcpu * vcpu)5538 static int handle_invvpid(struct kvm_vcpu *vcpu)
5539 {
5540 struct vcpu_vmx *vmx = to_vmx(vcpu);
5541 u32 vmx_instruction_info;
5542 unsigned long type, types;
5543 gva_t gva;
5544 struct x86_exception e;
5545 struct {
5546 u64 vpid;
5547 u64 gla;
5548 } operand;
5549 u16 vpid02;
5550 int r, gpr_index;
5551
5552 if (!(vmx->nested.msrs.secondary_ctls_high &
5553 SECONDARY_EXEC_ENABLE_VPID) ||
5554 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5555 kvm_queue_exception(vcpu, UD_VECTOR);
5556 return 1;
5557 }
5558
5559 if (!nested_vmx_check_permission(vcpu))
5560 return 1;
5561
5562 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5563 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5564 type = kvm_register_read(vcpu, gpr_index);
5565
5566 types = (vmx->nested.msrs.vpid_caps &
5567 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5568
5569 if (type >= 32 || !(types & (1 << type)))
5570 return nested_vmx_fail(vcpu,
5571 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5572
5573 /* according to the intel vmx instruction reference, the memory
5574 * operand is read even if it isn't needed (e.g., for type==global)
5575 */
5576 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5577 vmx_instruction_info, false, sizeof(operand), &gva))
5578 return 1;
5579 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5580 if (r != X86EMUL_CONTINUE)
5581 return kvm_handle_memory_failure(vcpu, r, &e);
5582
5583 if (operand.vpid >> 16)
5584 return nested_vmx_fail(vcpu,
5585 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5586
5587 vpid02 = nested_get_vpid02(vcpu);
5588 switch (type) {
5589 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5590 if (!operand.vpid ||
5591 is_noncanonical_address(operand.gla, vcpu))
5592 return nested_vmx_fail(vcpu,
5593 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5594 vpid_sync_vcpu_addr(vpid02, operand.gla);
5595 break;
5596 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5597 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5598 if (!operand.vpid)
5599 return nested_vmx_fail(vcpu,
5600 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5601 vpid_sync_context(vpid02);
5602 break;
5603 case VMX_VPID_EXTENT_ALL_CONTEXT:
5604 vpid_sync_context(vpid02);
5605 break;
5606 default:
5607 WARN_ON_ONCE(1);
5608 return kvm_skip_emulated_instruction(vcpu);
5609 }
5610
5611 /*
5612 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5613 * linear mappings for L2 (tagged with L2's VPID). Free all guest
5614 * roots as VPIDs are not tracked in the MMU role.
5615 *
5616 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5617 * an MMU when EPT is disabled.
5618 *
5619 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5620 */
5621 if (!enable_ept)
5622 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5623
5624 return nested_vmx_succeed(vcpu);
5625 }
5626
nested_vmx_eptp_switching(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5627 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5628 struct vmcs12 *vmcs12)
5629 {
5630 u32 index = kvm_rcx_read(vcpu);
5631 u64 new_eptp;
5632
5633 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
5634 return 1;
5635 if (index >= VMFUNC_EPTP_ENTRIES)
5636 return 1;
5637
5638 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5639 &new_eptp, index * 8, 8))
5640 return 1;
5641
5642 /*
5643 * If the (L2) guest does a vmfunc to the currently
5644 * active ept pointer, we don't have to do anything else
5645 */
5646 if (vmcs12->ept_pointer != new_eptp) {
5647 if (!nested_vmx_check_eptp(vcpu, new_eptp))
5648 return 1;
5649
5650 vmcs12->ept_pointer = new_eptp;
5651 nested_ept_new_eptp(vcpu);
5652
5653 if (!nested_cpu_has_vpid(vmcs12))
5654 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
5655 }
5656
5657 return 0;
5658 }
5659
handle_vmfunc(struct kvm_vcpu * vcpu)5660 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5661 {
5662 struct vcpu_vmx *vmx = to_vmx(vcpu);
5663 struct vmcs12 *vmcs12;
5664 u32 function = kvm_rax_read(vcpu);
5665
5666 /*
5667 * VMFUNC is only supported for nested guests, but we always enable the
5668 * secondary control for simplicity; for non-nested mode, fake that we
5669 * didn't by injecting #UD.
5670 */
5671 if (!is_guest_mode(vcpu)) {
5672 kvm_queue_exception(vcpu, UD_VECTOR);
5673 return 1;
5674 }
5675
5676 vmcs12 = get_vmcs12(vcpu);
5677
5678 /*
5679 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5680 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5681 */
5682 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5683 kvm_queue_exception(vcpu, UD_VECTOR);
5684 return 1;
5685 }
5686
5687 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5688 goto fail;
5689
5690 switch (function) {
5691 case 0:
5692 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5693 goto fail;
5694 break;
5695 default:
5696 goto fail;
5697 }
5698 return kvm_skip_emulated_instruction(vcpu);
5699
5700 fail:
5701 /*
5702 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5703 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5704 * EXIT_REASON_VMFUNC as the exit reason.
5705 */
5706 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
5707 vmx_get_intr_info(vcpu),
5708 vmx_get_exit_qual(vcpu));
5709 return 1;
5710 }
5711
5712 /*
5713 * Return true if an IO instruction with the specified port and size should cause
5714 * a VM-exit into L1.
5715 */
nested_vmx_check_io_bitmaps(struct kvm_vcpu * vcpu,unsigned int port,int size)5716 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5717 int size)
5718 {
5719 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5720 gpa_t bitmap, last_bitmap;
5721 u8 b;
5722
5723 last_bitmap = INVALID_GPA;
5724 b = -1;
5725
5726 while (size > 0) {
5727 if (port < 0x8000)
5728 bitmap = vmcs12->io_bitmap_a;
5729 else if (port < 0x10000)
5730 bitmap = vmcs12->io_bitmap_b;
5731 else
5732 return true;
5733 bitmap += (port & 0x7fff) / 8;
5734
5735 if (last_bitmap != bitmap)
5736 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5737 return true;
5738 if (b & (1 << (port & 7)))
5739 return true;
5740
5741 port++;
5742 size--;
5743 last_bitmap = bitmap;
5744 }
5745
5746 return false;
5747 }
5748
nested_vmx_exit_handled_io(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5749 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5750 struct vmcs12 *vmcs12)
5751 {
5752 unsigned long exit_qualification;
5753 unsigned short port;
5754 int size;
5755
5756 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5757 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5758
5759 exit_qualification = vmx_get_exit_qual(vcpu);
5760
5761 port = exit_qualification >> 16;
5762 size = (exit_qualification & 7) + 1;
5763
5764 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5765 }
5766
5767 /*
5768 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5769 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5770 * disinterest in the current event (read or write a specific MSR) by using an
5771 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5772 */
nested_vmx_exit_handled_msr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,union vmx_exit_reason exit_reason)5773 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5774 struct vmcs12 *vmcs12,
5775 union vmx_exit_reason exit_reason)
5776 {
5777 u32 msr_index = kvm_rcx_read(vcpu);
5778 gpa_t bitmap;
5779
5780 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5781 return true;
5782
5783 /*
5784 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5785 * for the four combinations of read/write and low/high MSR numbers.
5786 * First we need to figure out which of the four to use:
5787 */
5788 bitmap = vmcs12->msr_bitmap;
5789 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
5790 bitmap += 2048;
5791 if (msr_index >= 0xc0000000) {
5792 msr_index -= 0xc0000000;
5793 bitmap += 1024;
5794 }
5795
5796 /* Then read the msr_index'th bit from this bitmap: */
5797 if (msr_index < 1024*8) {
5798 unsigned char b;
5799 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5800 return true;
5801 return 1 & (b >> (msr_index & 7));
5802 } else
5803 return true; /* let L1 handle the wrong parameter */
5804 }
5805
5806 /*
5807 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5808 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5809 * intercept (via guest_host_mask etc.) the current event.
5810 */
nested_vmx_exit_handled_cr(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5811 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5812 struct vmcs12 *vmcs12)
5813 {
5814 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5815 int cr = exit_qualification & 15;
5816 int reg;
5817 unsigned long val;
5818
5819 switch ((exit_qualification >> 4) & 3) {
5820 case 0: /* mov to cr */
5821 reg = (exit_qualification >> 8) & 15;
5822 val = kvm_register_read(vcpu, reg);
5823 switch (cr) {
5824 case 0:
5825 if (vmcs12->cr0_guest_host_mask &
5826 (val ^ vmcs12->cr0_read_shadow))
5827 return true;
5828 break;
5829 case 3:
5830 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5831 return true;
5832 break;
5833 case 4:
5834 if (vmcs12->cr4_guest_host_mask &
5835 (vmcs12->cr4_read_shadow ^ val))
5836 return true;
5837 break;
5838 case 8:
5839 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5840 return true;
5841 break;
5842 }
5843 break;
5844 case 2: /* clts */
5845 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5846 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5847 return true;
5848 break;
5849 case 1: /* mov from cr */
5850 switch (cr) {
5851 case 3:
5852 if (vmcs12->cpu_based_vm_exec_control &
5853 CPU_BASED_CR3_STORE_EXITING)
5854 return true;
5855 break;
5856 case 8:
5857 if (vmcs12->cpu_based_vm_exec_control &
5858 CPU_BASED_CR8_STORE_EXITING)
5859 return true;
5860 break;
5861 }
5862 break;
5863 case 3: /* lmsw */
5864 /*
5865 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5866 * cr0. Other attempted changes are ignored, with no exit.
5867 */
5868 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5869 if (vmcs12->cr0_guest_host_mask & 0xe &
5870 (val ^ vmcs12->cr0_read_shadow))
5871 return true;
5872 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5873 !(vmcs12->cr0_read_shadow & 0x1) &&
5874 (val & 0x1))
5875 return true;
5876 break;
5877 }
5878 return false;
5879 }
5880
nested_vmx_exit_handled_encls(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12)5881 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
5882 struct vmcs12 *vmcs12)
5883 {
5884 u32 encls_leaf;
5885
5886 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
5887 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
5888 return false;
5889
5890 encls_leaf = kvm_rax_read(vcpu);
5891 if (encls_leaf > 62)
5892 encls_leaf = 63;
5893 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
5894 }
5895
nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu * vcpu,struct vmcs12 * vmcs12,gpa_t bitmap)5896 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5897 struct vmcs12 *vmcs12, gpa_t bitmap)
5898 {
5899 u32 vmx_instruction_info;
5900 unsigned long field;
5901 u8 b;
5902
5903 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5904 return true;
5905
5906 /* Decode instruction info and find the field to access */
5907 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5908 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5909
5910 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5911 if (field >> 15)
5912 return true;
5913
5914 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5915 return true;
5916
5917 return 1 & (b >> (field & 7));
5918 }
5919
nested_vmx_exit_handled_mtf(struct vmcs12 * vmcs12)5920 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5921 {
5922 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5923
5924 if (nested_cpu_has_mtf(vmcs12))
5925 return true;
5926
5927 /*
5928 * An MTF VM-exit may be injected into the guest by setting the
5929 * interruption-type to 7 (other event) and the vector field to 0. Such
5930 * is the case regardless of the 'monitor trap flag' VM-execution
5931 * control.
5932 */
5933 return entry_intr_info == (INTR_INFO_VALID_MASK
5934 | INTR_TYPE_OTHER_EVENT);
5935 }
5936
5937 /*
5938 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5939 * L1 wants the exit. Only call this when in is_guest_mode (L2).
5940 */
nested_vmx_l0_wants_exit(struct kvm_vcpu * vcpu,union vmx_exit_reason exit_reason)5941 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
5942 union vmx_exit_reason exit_reason)
5943 {
5944 u32 intr_info;
5945
5946 switch ((u16)exit_reason.basic) {
5947 case EXIT_REASON_EXCEPTION_NMI:
5948 intr_info = vmx_get_intr_info(vcpu);
5949 if (is_nmi(intr_info))
5950 return true;
5951 else if (is_page_fault(intr_info))
5952 return vcpu->arch.apf.host_apf_flags ||
5953 vmx_need_pf_intercept(vcpu);
5954 else if (is_debug(intr_info) &&
5955 vcpu->guest_debug &
5956 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5957 return true;
5958 else if (is_breakpoint(intr_info) &&
5959 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5960 return true;
5961 else if (is_alignment_check(intr_info) &&
5962 !vmx_guest_inject_ac(vcpu))
5963 return true;
5964 return false;
5965 case EXIT_REASON_EXTERNAL_INTERRUPT:
5966 return true;
5967 case EXIT_REASON_MCE_DURING_VMENTRY:
5968 return true;
5969 case EXIT_REASON_EPT_VIOLATION:
5970 /*
5971 * L0 always deals with the EPT violation. If nested EPT is
5972 * used, and the nested mmu code discovers that the address is
5973 * missing in the guest EPT table (EPT12), the EPT violation
5974 * will be injected with nested_ept_inject_page_fault()
5975 */
5976 return true;
5977 case EXIT_REASON_EPT_MISCONFIG:
5978 /*
5979 * L2 never uses directly L1's EPT, but rather L0's own EPT
5980 * table (shadow on EPT) or a merged EPT table that L0 built
5981 * (EPT on EPT). So any problems with the structure of the
5982 * table is L0's fault.
5983 */
5984 return true;
5985 case EXIT_REASON_PREEMPTION_TIMER:
5986 return true;
5987 case EXIT_REASON_PML_FULL:
5988 /*
5989 * PML is emulated for an L1 VMM and should never be enabled in
5990 * vmcs02, always "handle" PML_FULL by exiting to userspace.
5991 */
5992 return true;
5993 case EXIT_REASON_VMFUNC:
5994 /* VM functions are emulated through L2->L0 vmexits. */
5995 return true;
5996 case EXIT_REASON_BUS_LOCK:
5997 /*
5998 * At present, bus lock VM exit is never exposed to L1.
5999 * Handle L2's bus locks in L0 directly.
6000 */
6001 return true;
6002 default:
6003 break;
6004 }
6005 return false;
6006 }
6007
6008 /*
6009 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
6010 * is_guest_mode (L2).
6011 */
nested_vmx_l1_wants_exit(struct kvm_vcpu * vcpu,union vmx_exit_reason exit_reason)6012 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
6013 union vmx_exit_reason exit_reason)
6014 {
6015 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6016 u32 intr_info;
6017
6018 switch ((u16)exit_reason.basic) {
6019 case EXIT_REASON_EXCEPTION_NMI:
6020 intr_info = vmx_get_intr_info(vcpu);
6021 if (is_nmi(intr_info))
6022 return true;
6023 else if (is_page_fault(intr_info))
6024 return true;
6025 return vmcs12->exception_bitmap &
6026 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6027 case EXIT_REASON_EXTERNAL_INTERRUPT:
6028 return nested_exit_on_intr(vcpu);
6029 case EXIT_REASON_TRIPLE_FAULT:
6030 return true;
6031 case EXIT_REASON_INTERRUPT_WINDOW:
6032 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
6033 case EXIT_REASON_NMI_WINDOW:
6034 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
6035 case EXIT_REASON_TASK_SWITCH:
6036 return true;
6037 case EXIT_REASON_CPUID:
6038 return true;
6039 case EXIT_REASON_HLT:
6040 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6041 case EXIT_REASON_INVD:
6042 return true;
6043 case EXIT_REASON_INVLPG:
6044 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6045 case EXIT_REASON_RDPMC:
6046 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6047 case EXIT_REASON_RDRAND:
6048 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
6049 case EXIT_REASON_RDSEED:
6050 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
6051 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
6052 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6053 case EXIT_REASON_VMREAD:
6054 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6055 vmcs12->vmread_bitmap);
6056 case EXIT_REASON_VMWRITE:
6057 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6058 vmcs12->vmwrite_bitmap);
6059 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6060 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6061 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
6062 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6063 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
6064 /*
6065 * VMX instructions trap unconditionally. This allows L1 to
6066 * emulate them for its L2 guest, i.e., allows 3-level nesting!
6067 */
6068 return true;
6069 case EXIT_REASON_CR_ACCESS:
6070 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6071 case EXIT_REASON_DR_ACCESS:
6072 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6073 case EXIT_REASON_IO_INSTRUCTION:
6074 return nested_vmx_exit_handled_io(vcpu, vmcs12);
6075 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
6076 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
6077 case EXIT_REASON_MSR_READ:
6078 case EXIT_REASON_MSR_WRITE:
6079 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6080 case EXIT_REASON_INVALID_STATE:
6081 return true;
6082 case EXIT_REASON_MWAIT_INSTRUCTION:
6083 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6084 case EXIT_REASON_MONITOR_TRAP_FLAG:
6085 return nested_vmx_exit_handled_mtf(vmcs12);
6086 case EXIT_REASON_MONITOR_INSTRUCTION:
6087 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6088 case EXIT_REASON_PAUSE_INSTRUCTION:
6089 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6090 nested_cpu_has2(vmcs12,
6091 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6092 case EXIT_REASON_MCE_DURING_VMENTRY:
6093 return true;
6094 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6095 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6096 case EXIT_REASON_APIC_ACCESS:
6097 case EXIT_REASON_APIC_WRITE:
6098 case EXIT_REASON_EOI_INDUCED:
6099 /*
6100 * The controls for "virtualize APIC accesses," "APIC-
6101 * register virtualization," and "virtual-interrupt
6102 * delivery" only come from vmcs12.
6103 */
6104 return true;
6105 case EXIT_REASON_INVPCID:
6106 return
6107 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6108 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6109 case EXIT_REASON_WBINVD:
6110 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6111 case EXIT_REASON_XSETBV:
6112 return true;
6113 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6114 /*
6115 * This should never happen, since it is not possible to
6116 * set XSS to a non-zero value---neither in L1 nor in L2.
6117 * If if it were, XSS would have to be checked against
6118 * the XSS exit bitmap in vmcs12.
6119 */
6120 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
6121 case EXIT_REASON_UMWAIT:
6122 case EXIT_REASON_TPAUSE:
6123 return nested_cpu_has2(vmcs12,
6124 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
6125 case EXIT_REASON_ENCLS:
6126 return nested_vmx_exit_handled_encls(vcpu, vmcs12);
6127 default:
6128 return true;
6129 }
6130 }
6131
6132 /*
6133 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6134 * reflected into L1.
6135 */
nested_vmx_reflect_vmexit(struct kvm_vcpu * vcpu)6136 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
6137 {
6138 struct vcpu_vmx *vmx = to_vmx(vcpu);
6139 union vmx_exit_reason exit_reason = vmx->exit_reason;
6140 unsigned long exit_qual;
6141 u32 exit_intr_info;
6142
6143 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6144
6145 /*
6146 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6147 * has already loaded L2's state.
6148 */
6149 if (unlikely(vmx->fail)) {
6150 trace_kvm_nested_vmenter_failed(
6151 "hardware VM-instruction error: ",
6152 vmcs_read32(VM_INSTRUCTION_ERROR));
6153 exit_intr_info = 0;
6154 exit_qual = 0;
6155 goto reflect_vmexit;
6156 }
6157
6158 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX);
6159
6160 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6161 if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6162 return false;
6163
6164 /* If L1 doesn't want the exit, handle it in L0. */
6165 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
6166 return false;
6167
6168 /*
6169 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6170 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6171 * need to be synthesized by querying the in-kernel LAPIC, but external
6172 * interrupts are never reflected to L1 so it's a non-issue.
6173 */
6174 exit_intr_info = vmx_get_intr_info(vcpu);
6175 if (is_exception_with_error_code(exit_intr_info)) {
6176 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6177
6178 vmcs12->vm_exit_intr_error_code =
6179 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6180 }
6181 exit_qual = vmx_get_exit_qual(vcpu);
6182
6183 reflect_vmexit:
6184 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
6185 return true;
6186 }
6187
vmx_get_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,u32 user_data_size)6188 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
6189 struct kvm_nested_state __user *user_kvm_nested_state,
6190 u32 user_data_size)
6191 {
6192 struct vcpu_vmx *vmx;
6193 struct vmcs12 *vmcs12;
6194 struct kvm_nested_state kvm_state = {
6195 .flags = 0,
6196 .format = KVM_STATE_NESTED_FORMAT_VMX,
6197 .size = sizeof(kvm_state),
6198 .hdr.vmx.flags = 0,
6199 .hdr.vmx.vmxon_pa = INVALID_GPA,
6200 .hdr.vmx.vmcs12_pa = INVALID_GPA,
6201 .hdr.vmx.preemption_timer_deadline = 0,
6202 };
6203 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6204 &user_kvm_nested_state->data.vmx[0];
6205
6206 if (!vcpu)
6207 return kvm_state.size + sizeof(*user_vmx_nested_state);
6208
6209 vmx = to_vmx(vcpu);
6210 vmcs12 = get_vmcs12(vcpu);
6211
6212 if (nested_vmx_allowed(vcpu) &&
6213 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6214 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6215 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6216
6217 if (vmx_has_valid_vmcs12(vcpu)) {
6218 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6219
6220 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6221 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
6222 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6223
6224 if (is_guest_mode(vcpu) &&
6225 nested_cpu_has_shadow_vmcs(vmcs12) &&
6226 vmcs12->vmcs_link_pointer != INVALID_GPA)
6227 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
6228 }
6229
6230 if (vmx->nested.smm.vmxon)
6231 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6232
6233 if (vmx->nested.smm.guest_mode)
6234 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6235
6236 if (is_guest_mode(vcpu)) {
6237 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6238
6239 if (vmx->nested.nested_run_pending)
6240 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
6241
6242 if (vmx->nested.mtf_pending)
6243 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
6244
6245 if (nested_cpu_has_preemption_timer(vmcs12) &&
6246 vmx->nested.has_preemption_timer_deadline) {
6247 kvm_state.hdr.vmx.flags |=
6248 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6249 kvm_state.hdr.vmx.preemption_timer_deadline =
6250 vmx->nested.preemption_timer_deadline;
6251 }
6252 }
6253 }
6254
6255 if (user_data_size < kvm_state.size)
6256 goto out;
6257
6258 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6259 return -EFAULT;
6260
6261 if (!vmx_has_valid_vmcs12(vcpu))
6262 goto out;
6263
6264 /*
6265 * When running L2, the authoritative vmcs12 state is in the
6266 * vmcs02. When running L1, the authoritative vmcs12 state is
6267 * in the shadow or enlightened vmcs linked to vmcs01, unless
6268 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6269 * vmcs12 state is in the vmcs12 already.
6270 */
6271 if (is_guest_mode(vcpu)) {
6272 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
6273 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
6274 } else {
6275 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
6276 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6277 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
6278 /*
6279 * L1 hypervisor is not obliged to keep eVMCS
6280 * clean fields data always up-to-date while
6281 * not in guest mode, 'hv_clean_fields' is only
6282 * supposed to be actual upon vmentry so we need
6283 * to ignore it here and do full copy.
6284 */
6285 copy_enlightened_to_vmcs12(vmx, 0);
6286 else if (enable_shadow_vmcs)
6287 copy_shadow_to_vmcs12(vmx);
6288 }
6289 }
6290
6291 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6292 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6293
6294 /*
6295 * Copy over the full allocated size of vmcs12 rather than just the size
6296 * of the struct.
6297 */
6298 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6299 return -EFAULT;
6300
6301 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6302 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6303 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
6304 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
6305 return -EFAULT;
6306 }
6307 out:
6308 return kvm_state.size;
6309 }
6310
6311 /*
6312 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6313 */
vmx_leave_nested(struct kvm_vcpu * vcpu)6314 void vmx_leave_nested(struct kvm_vcpu *vcpu)
6315 {
6316 if (is_guest_mode(vcpu)) {
6317 to_vmx(vcpu)->nested.nested_run_pending = 0;
6318 nested_vmx_vmexit(vcpu, -1, 0, 0);
6319 }
6320 free_nested(vcpu);
6321 }
6322
vmx_set_nested_state(struct kvm_vcpu * vcpu,struct kvm_nested_state __user * user_kvm_nested_state,struct kvm_nested_state * kvm_state)6323 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6324 struct kvm_nested_state __user *user_kvm_nested_state,
6325 struct kvm_nested_state *kvm_state)
6326 {
6327 struct vcpu_vmx *vmx = to_vmx(vcpu);
6328 struct vmcs12 *vmcs12;
6329 enum vm_entry_failure_code ignored;
6330 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6331 &user_kvm_nested_state->data.vmx[0];
6332 int ret;
6333
6334 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
6335 return -EINVAL;
6336
6337 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
6338 if (kvm_state->hdr.vmx.smm.flags)
6339 return -EINVAL;
6340
6341 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
6342 return -EINVAL;
6343
6344 /*
6345 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6346 * enable eVMCS capability on vCPU. However, since then
6347 * code was changed such that flag signals vmcs12 should
6348 * be copied into eVMCS in guest memory.
6349 *
6350 * To preserve backwards compatability, allow user
6351 * to set this flag even when there is no VMXON region.
6352 */
6353 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6354 return -EINVAL;
6355 } else {
6356 if (!nested_vmx_allowed(vcpu))
6357 return -EINVAL;
6358
6359 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6360 return -EINVAL;
6361 }
6362
6363 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6364 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6365 return -EINVAL;
6366
6367 if (kvm_state->hdr.vmx.smm.flags &
6368 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6369 return -EINVAL;
6370
6371 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6372 return -EINVAL;
6373
6374 /*
6375 * SMM temporarily disables VMX, so we cannot be in guest mode,
6376 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6377 * must be zero.
6378 */
6379 if (is_smm(vcpu) ?
6380 (kvm_state->flags &
6381 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6382 : kvm_state->hdr.vmx.smm.flags)
6383 return -EINVAL;
6384
6385 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6386 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6387 return -EINVAL;
6388
6389 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6390 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
6391 return -EINVAL;
6392
6393 vmx_leave_nested(vcpu);
6394
6395 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
6396 return 0;
6397
6398 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6399 ret = enter_vmx_operation(vcpu);
6400 if (ret)
6401 return ret;
6402
6403 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6404 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6405 /* See vmx_has_valid_vmcs12. */
6406 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6407 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
6408 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
6409 return -EINVAL;
6410 else
6411 return 0;
6412 }
6413
6414 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
6415 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6416 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
6417 return -EINVAL;
6418
6419 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
6420 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6421 /*
6422 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6423 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6424 * restored yet. EVMCS will be mapped from
6425 * nested_get_vmcs12_pages().
6426 */
6427 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6428 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
6429 } else {
6430 return -EINVAL;
6431 }
6432
6433 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6434 vmx->nested.smm.vmxon = true;
6435 vmx->nested.vmxon = false;
6436
6437 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6438 vmx->nested.smm.guest_mode = true;
6439 }
6440
6441 vmcs12 = get_vmcs12(vcpu);
6442 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6443 return -EFAULT;
6444
6445 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6446 return -EINVAL;
6447
6448 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6449 return 0;
6450
6451 vmx->nested.nested_run_pending =
6452 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6453
6454 vmx->nested.mtf_pending =
6455 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6456
6457 ret = -EINVAL;
6458 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6459 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6460 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6461
6462 if (kvm_state->size <
6463 sizeof(*kvm_state) +
6464 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6465 goto error_guest_mode;
6466
6467 if (copy_from_user(shadow_vmcs12,
6468 user_vmx_nested_state->shadow_vmcs12,
6469 sizeof(*shadow_vmcs12))) {
6470 ret = -EFAULT;
6471 goto error_guest_mode;
6472 }
6473
6474 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6475 !shadow_vmcs12->hdr.shadow_vmcs)
6476 goto error_guest_mode;
6477 }
6478
6479 vmx->nested.has_preemption_timer_deadline = false;
6480 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6481 vmx->nested.has_preemption_timer_deadline = true;
6482 vmx->nested.preemption_timer_deadline =
6483 kvm_state->hdr.vmx.preemption_timer_deadline;
6484 }
6485
6486 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6487 nested_vmx_check_host_state(vcpu, vmcs12) ||
6488 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6489 goto error_guest_mode;
6490
6491 vmx->nested.dirty_vmcs12 = true;
6492 vmx->nested.force_msr_bitmap_recalc = true;
6493 ret = nested_vmx_enter_non_root_mode(vcpu, false);
6494 if (ret)
6495 goto error_guest_mode;
6496
6497 return 0;
6498
6499 error_guest_mode:
6500 vmx->nested.nested_run_pending = 0;
6501 return ret;
6502 }
6503
nested_vmx_set_vmcs_shadowing_bitmap(void)6504 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6505 {
6506 if (enable_shadow_vmcs) {
6507 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6508 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6509 }
6510 }
6511
6512 /*
6513 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6514 * that madness to get the encoding for comparison.
6515 */
6516 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6517
nested_vmx_calc_vmcs_enum_msr(void)6518 static u64 nested_vmx_calc_vmcs_enum_msr(void)
6519 {
6520 /*
6521 * Note these are the so called "index" of the VMCS field encoding, not
6522 * the index into vmcs12.
6523 */
6524 unsigned int max_idx, idx;
6525 int i;
6526
6527 /*
6528 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6529 * vmcs12, regardless of whether or not the associated feature is
6530 * exposed to L1. Simply find the field with the highest index.
6531 */
6532 max_idx = 0;
6533 for (i = 0; i < nr_vmcs12_fields; i++) {
6534 /* The vmcs12 table is very, very sparsely populated. */
6535 if (!vmcs12_field_offsets[i])
6536 continue;
6537
6538 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
6539 if (idx > max_idx)
6540 max_idx = idx;
6541 }
6542
6543 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
6544 }
6545
6546 /*
6547 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6548 * returned for the various VMX controls MSRs when nested VMX is enabled.
6549 * The same values should also be used to verify that vmcs12 control fields are
6550 * valid during nested entry from L1 to L2.
6551 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6552 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6553 * bit in the high half is on if the corresponding bit in the control field
6554 * may be on. See also vmx_control_verify().
6555 */
nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs * msrs,u32 ept_caps)6556 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
6557 {
6558 /*
6559 * Note that as a general rule, the high half of the MSRs (bits in
6560 * the control fields which may be 1) should be initialized by the
6561 * intersection of the underlying hardware's MSR (i.e., features which
6562 * can be supported) and the list of features we want to expose -
6563 * because they are known to be properly supported in our code.
6564 * Also, usually, the low half of the MSRs (bits which must be 1) can
6565 * be set to 0, meaning that L1 may turn off any of these bits. The
6566 * reason is that if one of these bits is necessary, it will appear
6567 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6568 * fields of vmcs01 and vmcs02, will turn these bits off - and
6569 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
6570 * These rules have exceptions below.
6571 */
6572
6573 /* pin-based controls */
6574 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
6575 msrs->pinbased_ctls_low,
6576 msrs->pinbased_ctls_high);
6577 msrs->pinbased_ctls_low |=
6578 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6579 msrs->pinbased_ctls_high &=
6580 PIN_BASED_EXT_INTR_MASK |
6581 PIN_BASED_NMI_EXITING |
6582 PIN_BASED_VIRTUAL_NMIS |
6583 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
6584 msrs->pinbased_ctls_high |=
6585 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6586 PIN_BASED_VMX_PREEMPTION_TIMER;
6587
6588 /* exit controls */
6589 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
6590 msrs->exit_ctls_low,
6591 msrs->exit_ctls_high);
6592 msrs->exit_ctls_low =
6593 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6594
6595 msrs->exit_ctls_high &=
6596 #ifdef CONFIG_X86_64
6597 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6598 #endif
6599 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6600 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
6601 msrs->exit_ctls_high |=
6602 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6603 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6604 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
6605
6606 /* We support free control of debug control saving. */
6607 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6608
6609 /* entry controls */
6610 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
6611 msrs->entry_ctls_low,
6612 msrs->entry_ctls_high);
6613 msrs->entry_ctls_low =
6614 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6615 msrs->entry_ctls_high &=
6616 #ifdef CONFIG_X86_64
6617 VM_ENTRY_IA32E_MODE |
6618 #endif
6619 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
6620 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
6621 msrs->entry_ctls_high |=
6622 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
6623
6624 /* We support free control of debug control loading. */
6625 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6626
6627 /* cpu-based controls */
6628 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
6629 msrs->procbased_ctls_low,
6630 msrs->procbased_ctls_high);
6631 msrs->procbased_ctls_low =
6632 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6633 msrs->procbased_ctls_high &=
6634 CPU_BASED_INTR_WINDOW_EXITING |
6635 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6636 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6637 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6638 CPU_BASED_CR3_STORE_EXITING |
6639 #ifdef CONFIG_X86_64
6640 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6641 #endif
6642 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6643 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6644 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6645 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6646 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6647 /*
6648 * We can allow some features even when not supported by the
6649 * hardware. For example, L1 can specify an MSR bitmap - and we
6650 * can use it to avoid exits to L1 - even when L0 runs L2
6651 * without MSR bitmaps.
6652 */
6653 msrs->procbased_ctls_high |=
6654 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6655 CPU_BASED_USE_MSR_BITMAPS;
6656
6657 /* We support free control of CR3 access interception. */
6658 msrs->procbased_ctls_low &=
6659 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6660
6661 /*
6662 * secondary cpu-based controls. Do not include those that
6663 * depend on CPUID bits, they are added later by
6664 * vmx_vcpu_after_set_cpuid.
6665 */
6666 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
6667 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
6668 msrs->secondary_ctls_low,
6669 msrs->secondary_ctls_high);
6670
6671 msrs->secondary_ctls_low = 0;
6672 msrs->secondary_ctls_high &=
6673 SECONDARY_EXEC_DESC |
6674 SECONDARY_EXEC_ENABLE_RDTSCP |
6675 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6676 SECONDARY_EXEC_WBINVD_EXITING |
6677 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6678 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6679 SECONDARY_EXEC_RDRAND_EXITING |
6680 SECONDARY_EXEC_ENABLE_INVPCID |
6681 SECONDARY_EXEC_RDSEED_EXITING |
6682 SECONDARY_EXEC_XSAVES |
6683 SECONDARY_EXEC_TSC_SCALING;
6684
6685 /*
6686 * We can emulate "VMCS shadowing," even if the hardware
6687 * doesn't support it.
6688 */
6689 msrs->secondary_ctls_high |=
6690 SECONDARY_EXEC_SHADOW_VMCS;
6691
6692 if (enable_ept) {
6693 /* nested EPT: emulate EPT also to L1 */
6694 msrs->secondary_ctls_high |=
6695 SECONDARY_EXEC_ENABLE_EPT;
6696 msrs->ept_caps =
6697 VMX_EPT_PAGE_WALK_4_BIT |
6698 VMX_EPT_PAGE_WALK_5_BIT |
6699 VMX_EPTP_WB_BIT |
6700 VMX_EPT_INVEPT_BIT |
6701 VMX_EPT_EXECUTE_ONLY_BIT;
6702
6703 msrs->ept_caps &= ept_caps;
6704 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6705 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6706 VMX_EPT_1GB_PAGE_BIT;
6707 if (enable_ept_ad_bits) {
6708 msrs->secondary_ctls_high |=
6709 SECONDARY_EXEC_ENABLE_PML;
6710 msrs->ept_caps |= VMX_EPT_AD_BIT;
6711 }
6712 }
6713
6714 if (cpu_has_vmx_vmfunc()) {
6715 msrs->secondary_ctls_high |=
6716 SECONDARY_EXEC_ENABLE_VMFUNC;
6717 /*
6718 * Advertise EPTP switching unconditionally
6719 * since we emulate it
6720 */
6721 if (enable_ept)
6722 msrs->vmfunc_controls =
6723 VMX_VMFUNC_EPTP_SWITCHING;
6724 }
6725
6726 /*
6727 * Old versions of KVM use the single-context version without
6728 * checking for support, so declare that it is supported even
6729 * though it is treated as global context. The alternative is
6730 * not failing the single-context invvpid, and it is worse.
6731 */
6732 if (enable_vpid) {
6733 msrs->secondary_ctls_high |=
6734 SECONDARY_EXEC_ENABLE_VPID;
6735 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6736 VMX_VPID_EXTENT_SUPPORTED_MASK;
6737 }
6738
6739 if (enable_unrestricted_guest)
6740 msrs->secondary_ctls_high |=
6741 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6742
6743 if (flexpriority_enabled)
6744 msrs->secondary_ctls_high |=
6745 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6746
6747 if (enable_sgx)
6748 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
6749
6750 /* miscellaneous data */
6751 rdmsr(MSR_IA32_VMX_MISC,
6752 msrs->misc_low,
6753 msrs->misc_high);
6754 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
6755 msrs->misc_low |=
6756 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6757 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
6758 VMX_MISC_ACTIVITY_HLT |
6759 VMX_MISC_ACTIVITY_WAIT_SIPI;
6760 msrs->misc_high = 0;
6761
6762 /*
6763 * This MSR reports some information about VMX support. We
6764 * should return information about the VMX we emulate for the
6765 * guest, and the VMCS structure we give it - not about the
6766 * VMX support of the underlying hardware.
6767 */
6768 msrs->basic =
6769 VMCS12_REVISION |
6770 VMX_BASIC_TRUE_CTLS |
6771 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6772 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6773
6774 if (cpu_has_vmx_basic_inout())
6775 msrs->basic |= VMX_BASIC_INOUT;
6776
6777 /*
6778 * These MSRs specify bits which the guest must keep fixed on
6779 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6780 * We picked the standard core2 setting.
6781 */
6782 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6783 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6784 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6785 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6786
6787 /* These MSRs specify bits which the guest must keep fixed off. */
6788 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6789 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6790
6791 if (vmx_umip_emulated())
6792 msrs->cr4_fixed1 |= X86_CR4_UMIP;
6793
6794 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
6795 }
6796
nested_vmx_hardware_unsetup(void)6797 void nested_vmx_hardware_unsetup(void)
6798 {
6799 int i;
6800
6801 if (enable_shadow_vmcs) {
6802 for (i = 0; i < VMX_BITMAP_NR; i++)
6803 free_page((unsigned long)vmx_bitmap[i]);
6804 }
6805 }
6806
nested_vmx_hardware_setup(int (* exit_handlers[])(struct kvm_vcpu *))6807 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
6808 {
6809 int i;
6810
6811 if (!cpu_has_vmx_shadow_vmcs())
6812 enable_shadow_vmcs = 0;
6813 if (enable_shadow_vmcs) {
6814 for (i = 0; i < VMX_BITMAP_NR; i++) {
6815 /*
6816 * The vmx_bitmap is not tied to a VM and so should
6817 * not be charged to a memcg.
6818 */
6819 vmx_bitmap[i] = (unsigned long *)
6820 __get_free_page(GFP_KERNEL);
6821 if (!vmx_bitmap[i]) {
6822 nested_vmx_hardware_unsetup();
6823 return -ENOMEM;
6824 }
6825 }
6826
6827 init_vmcs_shadow_fields();
6828 }
6829
6830 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
6831 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
6832 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
6833 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
6834 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
6835 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
6836 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
6837 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
6838 exit_handlers[EXIT_REASON_VMON] = handle_vmon;
6839 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
6840 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
6841 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
6842
6843 return 0;
6844 }
6845
6846 struct kvm_x86_nested_ops vmx_nested_ops = {
6847 .leave_nested = vmx_leave_nested,
6848 .check_events = vmx_check_nested_events,
6849 .handle_page_fault_workaround = nested_vmx_handle_page_fault_workaround,
6850 .hv_timer_pending = nested_vmx_preemption_timer_pending,
6851 .triple_fault = nested_vmx_triple_fault,
6852 .get_state = vmx_get_nested_state,
6853 .set_state = vmx_set_nested_state,
6854 .get_nested_state_pages = vmx_get_nested_state_pages,
6855 .write_log_dirty = nested_vmx_write_pml_buffer,
6856 .enable_evmcs = nested_enable_evmcs,
6857 .get_evmcs_version = nested_get_evmcs_version,
6858 };
6859