1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * derived from drivers/kvm/kvm_main.c
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2008 Qumranet, Inc.
8  * Copyright IBM Corporation, 2008
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  *   Amit Shah    <amit.shah@qumranet.com>
15  *   Ben-Ami Yassour <benami@il.ibm.com>
16  *
17  * This work is licensed under the terms of the GNU GPL, version 2.  See
18  * the COPYING file in the top-level directory.
19  *
20  */
21 
22 #include <linux/kvm_host.h>
23 #include "irq.h"
24 #include "mmu.h"
25 #include "i8254.h"
26 #include "tss.h"
27 #include "kvm_cache_regs.h"
28 #include "x86.h"
29 #include "cpuid.h"
30 
31 #include <linux/clocksource.h>
32 #include <linux/interrupt.h>
33 #include <linux/kvm.h>
34 #include <linux/fs.h>
35 #include <linux/vmalloc.h>
36 #include <linux/module.h>
37 #include <linux/mman.h>
38 #include <linux/highmem.h>
39 #include <linux/iommu.h>
40 #include <linux/intel-iommu.h>
41 #include <linux/cpufreq.h>
42 #include <linux/user-return-notifier.h>
43 #include <linux/srcu.h>
44 #include <linux/slab.h>
45 #include <linux/perf_event.h>
46 #include <linux/uaccess.h>
47 #include <linux/hash.h>
48 #include <linux/pci.h>
49 #include <trace/events/kvm.h>
50 
51 #define CREATE_TRACE_POINTS
52 #include "trace.h"
53 
54 #include <asm/debugreg.h>
55 #include <asm/msr.h>
56 #include <asm/desc.h>
57 #include <asm/mtrr.h>
58 #include <asm/mce.h>
59 #include <asm/i387.h>
60 #include <asm/fpu-internal.h> /* Ugh! */
61 #include <asm/xcr.h>
62 #include <asm/pvclock.h>
63 #include <asm/div64.h>
64 
65 #define MAX_IO_MSRS 256
66 #define KVM_MAX_MCE_BANKS 32
67 #define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
68 
69 #define emul_to_vcpu(ctxt) \
70 	container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
71 
72 /* EFER defaults:
73  * - enable syscall per default because its emulated by KVM
74  * - enable LME and LMA per default on 64 bit KVM
75  */
76 #ifdef CONFIG_X86_64
77 static
78 u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
79 #else
80 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
81 #endif
82 
83 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
84 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
85 
86 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
87 static void process_nmi(struct kvm_vcpu *vcpu);
88 
89 struct kvm_x86_ops *kvm_x86_ops;
90 EXPORT_SYMBOL_GPL(kvm_x86_ops);
91 
92 static bool ignore_msrs = 0;
93 module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
94 
95 bool kvm_has_tsc_control;
96 EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
97 u32  kvm_max_guest_tsc_khz;
98 EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
99 
100 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
101 static u32 tsc_tolerance_ppm = 250;
102 module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
103 
104 #define KVM_NR_SHARED_MSRS 16
105 
106 struct kvm_shared_msrs_global {
107 	int nr;
108 	u32 msrs[KVM_NR_SHARED_MSRS];
109 };
110 
111 struct kvm_shared_msrs {
112 	struct user_return_notifier urn;
113 	bool registered;
114 	struct kvm_shared_msr_values {
115 		u64 host;
116 		u64 curr;
117 	} values[KVM_NR_SHARED_MSRS];
118 };
119 
120 static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
121 static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs);
122 
123 struct kvm_stats_debugfs_item debugfs_entries[] = {
124 	{ "pf_fixed", VCPU_STAT(pf_fixed) },
125 	{ "pf_guest", VCPU_STAT(pf_guest) },
126 	{ "tlb_flush", VCPU_STAT(tlb_flush) },
127 	{ "invlpg", VCPU_STAT(invlpg) },
128 	{ "exits", VCPU_STAT(exits) },
129 	{ "io_exits", VCPU_STAT(io_exits) },
130 	{ "mmio_exits", VCPU_STAT(mmio_exits) },
131 	{ "signal_exits", VCPU_STAT(signal_exits) },
132 	{ "irq_window", VCPU_STAT(irq_window_exits) },
133 	{ "nmi_window", VCPU_STAT(nmi_window_exits) },
134 	{ "halt_exits", VCPU_STAT(halt_exits) },
135 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
136 	{ "hypercalls", VCPU_STAT(hypercalls) },
137 	{ "request_irq", VCPU_STAT(request_irq_exits) },
138 	{ "irq_exits", VCPU_STAT(irq_exits) },
139 	{ "host_state_reload", VCPU_STAT(host_state_reload) },
140 	{ "efer_reload", VCPU_STAT(efer_reload) },
141 	{ "fpu_reload", VCPU_STAT(fpu_reload) },
142 	{ "insn_emulation", VCPU_STAT(insn_emulation) },
143 	{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
144 	{ "irq_injections", VCPU_STAT(irq_injections) },
145 	{ "nmi_injections", VCPU_STAT(nmi_injections) },
146 	{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
147 	{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
148 	{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
149 	{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
150 	{ "mmu_flooded", VM_STAT(mmu_flooded) },
151 	{ "mmu_recycled", VM_STAT(mmu_recycled) },
152 	{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
153 	{ "mmu_unsync", VM_STAT(mmu_unsync) },
154 	{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
155 	{ "largepages", VM_STAT(lpages) },
156 	{ NULL }
157 };
158 
159 u64 __read_mostly host_xcr0;
160 
161 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
162 
kvm_async_pf_hash_reset(struct kvm_vcpu * vcpu)163 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
164 {
165 	int i;
166 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
167 		vcpu->arch.apf.gfns[i] = ~0;
168 }
169 
kvm_on_user_return(struct user_return_notifier * urn)170 static void kvm_on_user_return(struct user_return_notifier *urn)
171 {
172 	unsigned slot;
173 	struct kvm_shared_msrs *locals
174 		= container_of(urn, struct kvm_shared_msrs, urn);
175 	struct kvm_shared_msr_values *values;
176 
177 	for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
178 		values = &locals->values[slot];
179 		if (values->host != values->curr) {
180 			wrmsrl(shared_msrs_global.msrs[slot], values->host);
181 			values->curr = values->host;
182 		}
183 	}
184 	locals->registered = false;
185 	user_return_notifier_unregister(urn);
186 }
187 
shared_msr_update(unsigned slot,u32 msr)188 static void shared_msr_update(unsigned slot, u32 msr)
189 {
190 	struct kvm_shared_msrs *smsr;
191 	u64 value;
192 
193 	smsr = &__get_cpu_var(shared_msrs);
194 	/* only read, and nobody should modify it at this time,
195 	 * so don't need lock */
196 	if (slot >= shared_msrs_global.nr) {
197 		printk(KERN_ERR "kvm: invalid MSR slot!");
198 		return;
199 	}
200 	rdmsrl_safe(msr, &value);
201 	smsr->values[slot].host = value;
202 	smsr->values[slot].curr = value;
203 }
204 
kvm_define_shared_msr(unsigned slot,u32 msr)205 void kvm_define_shared_msr(unsigned slot, u32 msr)
206 {
207 	if (slot >= shared_msrs_global.nr)
208 		shared_msrs_global.nr = slot + 1;
209 	shared_msrs_global.msrs[slot] = msr;
210 	/* we need ensured the shared_msr_global have been updated */
211 	smp_wmb();
212 }
213 EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
214 
kvm_shared_msr_cpu_online(void)215 static void kvm_shared_msr_cpu_online(void)
216 {
217 	unsigned i;
218 
219 	for (i = 0; i < shared_msrs_global.nr; ++i)
220 		shared_msr_update(i, shared_msrs_global.msrs[i]);
221 }
222 
kvm_set_shared_msr(unsigned slot,u64 value,u64 mask)223 void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
224 {
225 	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
226 
227 	if (((value ^ smsr->values[slot].curr) & mask) == 0)
228 		return;
229 	smsr->values[slot].curr = value;
230 	wrmsrl(shared_msrs_global.msrs[slot], value);
231 	if (!smsr->registered) {
232 		smsr->urn.on_user_return = kvm_on_user_return;
233 		user_return_notifier_register(&smsr->urn);
234 		smsr->registered = true;
235 	}
236 }
237 EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
238 
drop_user_return_notifiers(void * ignore)239 static void drop_user_return_notifiers(void *ignore)
240 {
241 	struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs);
242 
243 	if (smsr->registered)
244 		kvm_on_user_return(&smsr->urn);
245 }
246 
kvm_get_apic_base(struct kvm_vcpu * vcpu)247 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
248 {
249 	if (irqchip_in_kernel(vcpu->kvm))
250 		return vcpu->arch.apic_base;
251 	else
252 		return vcpu->arch.apic_base;
253 }
254 EXPORT_SYMBOL_GPL(kvm_get_apic_base);
255 
kvm_set_apic_base(struct kvm_vcpu * vcpu,u64 data)256 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
257 {
258 	/* TODO: reserve bits check */
259 	if (irqchip_in_kernel(vcpu->kvm))
260 		kvm_lapic_set_base(vcpu, data);
261 	else
262 		vcpu->arch.apic_base = data;
263 }
264 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
265 
266 #define EXCPT_BENIGN		0
267 #define EXCPT_CONTRIBUTORY	1
268 #define EXCPT_PF		2
269 
exception_class(int vector)270 static int exception_class(int vector)
271 {
272 	switch (vector) {
273 	case PF_VECTOR:
274 		return EXCPT_PF;
275 	case DE_VECTOR:
276 	case TS_VECTOR:
277 	case NP_VECTOR:
278 	case SS_VECTOR:
279 	case GP_VECTOR:
280 		return EXCPT_CONTRIBUTORY;
281 	default:
282 		break;
283 	}
284 	return EXCPT_BENIGN;
285 }
286 
kvm_multiple_exception(struct kvm_vcpu * vcpu,unsigned nr,bool has_error,u32 error_code,bool reinject)287 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
288 		unsigned nr, bool has_error, u32 error_code,
289 		bool reinject)
290 {
291 	u32 prev_nr;
292 	int class1, class2;
293 
294 	kvm_make_request(KVM_REQ_EVENT, vcpu);
295 
296 	if (!vcpu->arch.exception.pending) {
297 	queue:
298 		vcpu->arch.exception.pending = true;
299 		vcpu->arch.exception.has_error_code = has_error;
300 		vcpu->arch.exception.nr = nr;
301 		vcpu->arch.exception.error_code = error_code;
302 		vcpu->arch.exception.reinject = reinject;
303 		return;
304 	}
305 
306 	/* to check exception */
307 	prev_nr = vcpu->arch.exception.nr;
308 	if (prev_nr == DF_VECTOR) {
309 		/* triple fault -> shutdown */
310 		kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
311 		return;
312 	}
313 	class1 = exception_class(prev_nr);
314 	class2 = exception_class(nr);
315 	if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
316 		|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
317 		/* generate double fault per SDM Table 5-5 */
318 		vcpu->arch.exception.pending = true;
319 		vcpu->arch.exception.has_error_code = true;
320 		vcpu->arch.exception.nr = DF_VECTOR;
321 		vcpu->arch.exception.error_code = 0;
322 	} else
323 		/* replace previous exception with a new one in a hope
324 		   that instruction re-execution will regenerate lost
325 		   exception */
326 		goto queue;
327 }
328 
kvm_queue_exception(struct kvm_vcpu * vcpu,unsigned nr)329 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
330 {
331 	kvm_multiple_exception(vcpu, nr, false, 0, false);
332 }
333 EXPORT_SYMBOL_GPL(kvm_queue_exception);
334 
kvm_requeue_exception(struct kvm_vcpu * vcpu,unsigned nr)335 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
336 {
337 	kvm_multiple_exception(vcpu, nr, false, 0, true);
338 }
339 EXPORT_SYMBOL_GPL(kvm_requeue_exception);
340 
kvm_complete_insn_gp(struct kvm_vcpu * vcpu,int err)341 void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
342 {
343 	if (err)
344 		kvm_inject_gp(vcpu, 0);
345 	else
346 		kvm_x86_ops->skip_emulated_instruction(vcpu);
347 }
348 EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
349 
kvm_inject_page_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)350 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
351 {
352 	++vcpu->stat.pf_guest;
353 	vcpu->arch.cr2 = fault->address;
354 	kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
355 }
356 EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
357 
kvm_propagate_fault(struct kvm_vcpu * vcpu,struct x86_exception * fault)358 void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
359 {
360 	if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
361 		vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
362 	else
363 		vcpu->arch.mmu.inject_page_fault(vcpu, fault);
364 }
365 
kvm_inject_nmi(struct kvm_vcpu * vcpu)366 void kvm_inject_nmi(struct kvm_vcpu *vcpu)
367 {
368 	atomic_inc(&vcpu->arch.nmi_queued);
369 	kvm_make_request(KVM_REQ_NMI, vcpu);
370 }
371 EXPORT_SYMBOL_GPL(kvm_inject_nmi);
372 
kvm_queue_exception_e(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code)373 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
374 {
375 	kvm_multiple_exception(vcpu, nr, true, error_code, false);
376 }
377 EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
378 
kvm_requeue_exception_e(struct kvm_vcpu * vcpu,unsigned nr,u32 error_code)379 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
380 {
381 	kvm_multiple_exception(vcpu, nr, true, error_code, true);
382 }
383 EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
384 
385 /*
386  * Checks if cpl <= required_cpl; if true, return true.  Otherwise queue
387  * a #GP and return false.
388  */
kvm_require_cpl(struct kvm_vcpu * vcpu,int required_cpl)389 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
390 {
391 	if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
392 		return true;
393 	kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
394 	return false;
395 }
396 EXPORT_SYMBOL_GPL(kvm_require_cpl);
397 
398 /*
399  * This function will be used to read from the physical memory of the currently
400  * running guest. The difference to kvm_read_guest_page is that this function
401  * can read from guest physical or from the guest's guest physical memory.
402  */
kvm_read_guest_page_mmu(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gfn_t ngfn,void * data,int offset,int len,u32 access)403 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
404 			    gfn_t ngfn, void *data, int offset, int len,
405 			    u32 access)
406 {
407 	gfn_t real_gfn;
408 	gpa_t ngpa;
409 
410 	ngpa     = gfn_to_gpa(ngfn);
411 	real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
412 	if (real_gfn == UNMAPPED_GVA)
413 		return -EFAULT;
414 
415 	real_gfn = gpa_to_gfn(real_gfn);
416 
417 	return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
418 }
419 EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
420 
kvm_read_nested_guest_page(struct kvm_vcpu * vcpu,gfn_t gfn,void * data,int offset,int len,u32 access)421 int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
422 			       void *data, int offset, int len, u32 access)
423 {
424 	return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
425 				       data, offset, len, access);
426 }
427 
428 /*
429  * Load the pae pdptrs.  Return true is they are all valid.
430  */
load_pdptrs(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,unsigned long cr3)431 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
432 {
433 	gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
434 	unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
435 	int i;
436 	int ret;
437 	u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
438 
439 	ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
440 				      offset * sizeof(u64), sizeof(pdpte),
441 				      PFERR_USER_MASK|PFERR_WRITE_MASK);
442 	if (ret < 0) {
443 		ret = 0;
444 		goto out;
445 	}
446 	for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
447 		if (is_present_gpte(pdpte[i]) &&
448 		    (pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
449 			ret = 0;
450 			goto out;
451 		}
452 	}
453 	ret = 1;
454 
455 	memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
456 	__set_bit(VCPU_EXREG_PDPTR,
457 		  (unsigned long *)&vcpu->arch.regs_avail);
458 	__set_bit(VCPU_EXREG_PDPTR,
459 		  (unsigned long *)&vcpu->arch.regs_dirty);
460 out:
461 
462 	return ret;
463 }
464 EXPORT_SYMBOL_GPL(load_pdptrs);
465 
pdptrs_changed(struct kvm_vcpu * vcpu)466 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
467 {
468 	u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
469 	bool changed = true;
470 	int offset;
471 	gfn_t gfn;
472 	int r;
473 
474 	if (is_long_mode(vcpu) || !is_pae(vcpu))
475 		return false;
476 
477 	if (!test_bit(VCPU_EXREG_PDPTR,
478 		      (unsigned long *)&vcpu->arch.regs_avail))
479 		return true;
480 
481 	gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
482 	offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
483 	r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
484 				       PFERR_USER_MASK | PFERR_WRITE_MASK);
485 	if (r < 0)
486 		goto out;
487 	changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
488 out:
489 
490 	return changed;
491 }
492 
kvm_set_cr0(struct kvm_vcpu * vcpu,unsigned long cr0)493 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
494 {
495 	unsigned long old_cr0 = kvm_read_cr0(vcpu);
496 	unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
497 				    X86_CR0_CD | X86_CR0_NW;
498 
499 	cr0 |= X86_CR0_ET;
500 
501 #ifdef CONFIG_X86_64
502 	if (cr0 & 0xffffffff00000000UL)
503 		return 1;
504 #endif
505 
506 	cr0 &= ~CR0_RESERVED_BITS;
507 
508 	if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
509 		return 1;
510 
511 	if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
512 		return 1;
513 
514 	if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
515 #ifdef CONFIG_X86_64
516 		if ((vcpu->arch.efer & EFER_LME)) {
517 			int cs_db, cs_l;
518 
519 			if (!is_pae(vcpu))
520 				return 1;
521 			kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
522 			if (cs_l)
523 				return 1;
524 		} else
525 #endif
526 		if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
527 						 kvm_read_cr3(vcpu)))
528 			return 1;
529 	}
530 
531 	kvm_x86_ops->set_cr0(vcpu, cr0);
532 
533 	if ((cr0 ^ old_cr0) & X86_CR0_PG) {
534 		kvm_clear_async_pf_completion_queue(vcpu);
535 		kvm_async_pf_hash_reset(vcpu);
536 	}
537 
538 	if ((cr0 ^ old_cr0) & update_bits)
539 		kvm_mmu_reset_context(vcpu);
540 	return 0;
541 }
542 EXPORT_SYMBOL_GPL(kvm_set_cr0);
543 
kvm_lmsw(struct kvm_vcpu * vcpu,unsigned long msw)544 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
545 {
546 	(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
547 }
548 EXPORT_SYMBOL_GPL(kvm_lmsw);
549 
__kvm_set_xcr(struct kvm_vcpu * vcpu,u32 index,u64 xcr)550 int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
551 {
552 	u64 xcr0;
553 
554 	/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now  */
555 	if (index != XCR_XFEATURE_ENABLED_MASK)
556 		return 1;
557 	xcr0 = xcr;
558 	if (!(xcr0 & XSTATE_FP))
559 		return 1;
560 	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
561 		return 1;
562 	if (xcr0 & ~host_xcr0)
563 		return 1;
564 	vcpu->arch.xcr0 = xcr0;
565 	vcpu->guest_xcr0_loaded = 0;
566 	return 0;
567 }
568 
kvm_set_xcr(struct kvm_vcpu * vcpu,u32 index,u64 xcr)569 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
570 {
571 	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
572 	    __kvm_set_xcr(vcpu, index, xcr)) {
573 		kvm_inject_gp(vcpu, 0);
574 		return 1;
575 	}
576 	return 0;
577 }
578 EXPORT_SYMBOL_GPL(kvm_set_xcr);
579 
kvm_set_cr4(struct kvm_vcpu * vcpu,unsigned long cr4)580 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
581 {
582 	unsigned long old_cr4 = kvm_read_cr4(vcpu);
583 	unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
584 				   X86_CR4_PAE | X86_CR4_SMEP;
585 	if (cr4 & CR4_RESERVED_BITS)
586 		return 1;
587 
588 	if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
589 		return 1;
590 
591 	if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
592 		return 1;
593 
594 	if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
595 		return 1;
596 
597 	if (is_long_mode(vcpu)) {
598 		if (!(cr4 & X86_CR4_PAE))
599 			return 1;
600 	} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
601 		   && ((cr4 ^ old_cr4) & pdptr_bits)
602 		   && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
603 				   kvm_read_cr3(vcpu)))
604 		return 1;
605 
606 	if (kvm_x86_ops->set_cr4(vcpu, cr4))
607 		return 1;
608 
609 	if ((cr4 ^ old_cr4) & pdptr_bits)
610 		kvm_mmu_reset_context(vcpu);
611 
612 	if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
613 		kvm_update_cpuid(vcpu);
614 
615 	return 0;
616 }
617 EXPORT_SYMBOL_GPL(kvm_set_cr4);
618 
kvm_set_cr3(struct kvm_vcpu * vcpu,unsigned long cr3)619 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
620 {
621 	if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
622 		kvm_mmu_sync_roots(vcpu);
623 		kvm_mmu_flush_tlb(vcpu);
624 		return 0;
625 	}
626 
627 	if (is_long_mode(vcpu)) {
628 		if (cr3 & CR3_L_MODE_RESERVED_BITS)
629 			return 1;
630 	} else {
631 		if (is_pae(vcpu)) {
632 			if (cr3 & CR3_PAE_RESERVED_BITS)
633 				return 1;
634 			if (is_paging(vcpu) &&
635 			    !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
636 				return 1;
637 		}
638 		/*
639 		 * We don't check reserved bits in nonpae mode, because
640 		 * this isn't enforced, and VMware depends on this.
641 		 */
642 	}
643 
644 	/*
645 	 * Does the new cr3 value map to physical memory? (Note, we
646 	 * catch an invalid cr3 even in real-mode, because it would
647 	 * cause trouble later on when we turn on paging anyway.)
648 	 *
649 	 * A real CPU would silently accept an invalid cr3 and would
650 	 * attempt to use it - with largely undefined (and often hard
651 	 * to debug) behavior on the guest side.
652 	 */
653 	if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
654 		return 1;
655 	vcpu->arch.cr3 = cr3;
656 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
657 	vcpu->arch.mmu.new_cr3(vcpu);
658 	return 0;
659 }
660 EXPORT_SYMBOL_GPL(kvm_set_cr3);
661 
kvm_set_cr8(struct kvm_vcpu * vcpu,unsigned long cr8)662 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
663 {
664 	if (cr8 & CR8_RESERVED_BITS)
665 		return 1;
666 	if (irqchip_in_kernel(vcpu->kvm))
667 		kvm_lapic_set_tpr(vcpu, cr8);
668 	else
669 		vcpu->arch.cr8 = cr8;
670 	return 0;
671 }
672 EXPORT_SYMBOL_GPL(kvm_set_cr8);
673 
kvm_get_cr8(struct kvm_vcpu * vcpu)674 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
675 {
676 	if (irqchip_in_kernel(vcpu->kvm))
677 		return kvm_lapic_get_cr8(vcpu);
678 	else
679 		return vcpu->arch.cr8;
680 }
681 EXPORT_SYMBOL_GPL(kvm_get_cr8);
682 
__kvm_set_dr(struct kvm_vcpu * vcpu,int dr,unsigned long val)683 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
684 {
685 	switch (dr) {
686 	case 0 ... 3:
687 		vcpu->arch.db[dr] = val;
688 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
689 			vcpu->arch.eff_db[dr] = val;
690 		break;
691 	case 4:
692 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
693 			return 1; /* #UD */
694 		/* fall through */
695 	case 6:
696 		if (val & 0xffffffff00000000ULL)
697 			return -1; /* #GP */
698 		vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
699 		break;
700 	case 5:
701 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
702 			return 1; /* #UD */
703 		/* fall through */
704 	default: /* 7 */
705 		if (val & 0xffffffff00000000ULL)
706 			return -1; /* #GP */
707 		vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
708 		if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
709 			kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
710 			vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
711 		}
712 		break;
713 	}
714 
715 	return 0;
716 }
717 
kvm_set_dr(struct kvm_vcpu * vcpu,int dr,unsigned long val)718 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
719 {
720 	int res;
721 
722 	res = __kvm_set_dr(vcpu, dr, val);
723 	if (res > 0)
724 		kvm_queue_exception(vcpu, UD_VECTOR);
725 	else if (res < 0)
726 		kvm_inject_gp(vcpu, 0);
727 
728 	return res;
729 }
730 EXPORT_SYMBOL_GPL(kvm_set_dr);
731 
_kvm_get_dr(struct kvm_vcpu * vcpu,int dr,unsigned long * val)732 static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
733 {
734 	switch (dr) {
735 	case 0 ... 3:
736 		*val = vcpu->arch.db[dr];
737 		break;
738 	case 4:
739 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
740 			return 1;
741 		/* fall through */
742 	case 6:
743 		*val = vcpu->arch.dr6;
744 		break;
745 	case 5:
746 		if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
747 			return 1;
748 		/* fall through */
749 	default: /* 7 */
750 		*val = vcpu->arch.dr7;
751 		break;
752 	}
753 
754 	return 0;
755 }
756 
kvm_get_dr(struct kvm_vcpu * vcpu,int dr,unsigned long * val)757 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
758 {
759 	if (_kvm_get_dr(vcpu, dr, val)) {
760 		kvm_queue_exception(vcpu, UD_VECTOR);
761 		return 1;
762 	}
763 	return 0;
764 }
765 EXPORT_SYMBOL_GPL(kvm_get_dr);
766 
kvm_rdpmc(struct kvm_vcpu * vcpu)767 bool kvm_rdpmc(struct kvm_vcpu *vcpu)
768 {
769 	u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
770 	u64 data;
771 	int err;
772 
773 	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
774 	if (err)
775 		return err;
776 	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
777 	kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
778 	return err;
779 }
780 EXPORT_SYMBOL_GPL(kvm_rdpmc);
781 
782 /*
783  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
784  * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
785  *
786  * This list is modified at module load time to reflect the
787  * capabilities of the host cpu. This capabilities test skips MSRs that are
788  * kvm-specific. Those are put in the beginning of the list.
789  */
790 
791 #define KVM_SAVE_MSRS_BEGIN	9
792 static u32 msrs_to_save[] = {
793 	MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
794 	MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
795 	HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
796 	HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
797 	MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
798 	MSR_STAR,
799 #ifdef CONFIG_X86_64
800 	MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
801 #endif
802 	MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
803 };
804 
805 static unsigned num_msrs_to_save;
806 
807 static u32 emulated_msrs[] = {
808 	MSR_IA32_TSCDEADLINE,
809 	MSR_IA32_MISC_ENABLE,
810 	MSR_IA32_MCG_STATUS,
811 	MSR_IA32_MCG_CTL,
812 };
813 
set_efer(struct kvm_vcpu * vcpu,u64 efer)814 static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
815 {
816 	u64 old_efer = vcpu->arch.efer;
817 
818 	if (efer & efer_reserved_bits)
819 		return 1;
820 
821 	if (is_paging(vcpu)
822 	    && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
823 		return 1;
824 
825 	if (efer & EFER_FFXSR) {
826 		struct kvm_cpuid_entry2 *feat;
827 
828 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
829 		if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
830 			return 1;
831 	}
832 
833 	if (efer & EFER_SVME) {
834 		struct kvm_cpuid_entry2 *feat;
835 
836 		feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
837 		if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
838 			return 1;
839 	}
840 
841 	efer &= ~EFER_LMA;
842 	efer |= vcpu->arch.efer & EFER_LMA;
843 
844 	kvm_x86_ops->set_efer(vcpu, efer);
845 
846 	vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
847 
848 	/* Update reserved bits */
849 	if ((efer ^ old_efer) & EFER_NX)
850 		kvm_mmu_reset_context(vcpu);
851 
852 	return 0;
853 }
854 
kvm_enable_efer_bits(u64 mask)855 void kvm_enable_efer_bits(u64 mask)
856 {
857        efer_reserved_bits &= ~mask;
858 }
859 EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
860 
861 
862 /*
863  * Writes msr value into into the appropriate "register".
864  * Returns 0 on success, non-0 otherwise.
865  * Assumes vcpu_load() was already called.
866  */
kvm_set_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 data)867 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
868 {
869 	return kvm_x86_ops->set_msr(vcpu, msr_index, data);
870 }
871 
872 /*
873  * Adapt set_msr() to msr_io()'s calling convention
874  */
do_set_msr(struct kvm_vcpu * vcpu,unsigned index,u64 * data)875 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
876 {
877 	return kvm_set_msr(vcpu, index, *data);
878 }
879 
kvm_write_wall_clock(struct kvm * kvm,gpa_t wall_clock)880 static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
881 {
882 	int version;
883 	int r;
884 	struct pvclock_wall_clock wc;
885 	struct timespec boot;
886 
887 	if (!wall_clock)
888 		return;
889 
890 	r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
891 	if (r)
892 		return;
893 
894 	if (version & 1)
895 		++version;  /* first time write, random junk */
896 
897 	++version;
898 
899 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
900 
901 	/*
902 	 * The guest calculates current wall clock time by adding
903 	 * system time (updated by kvm_guest_time_update below) to the
904 	 * wall clock specified here.  guest system time equals host
905 	 * system time for us, thus we must fill in host boot time here.
906 	 */
907 	getboottime(&boot);
908 
909 	wc.sec = boot.tv_sec;
910 	wc.nsec = boot.tv_nsec;
911 	wc.version = version;
912 
913 	kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
914 
915 	version++;
916 	kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
917 }
918 
div_frac(uint32_t dividend,uint32_t divisor)919 static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
920 {
921 	uint32_t quotient, remainder;
922 
923 	/* Don't try to replace with do_div(), this one calculates
924 	 * "(dividend << 32) / divisor" */
925 	__asm__ ( "divl %4"
926 		  : "=a" (quotient), "=d" (remainder)
927 		  : "0" (0), "1" (dividend), "r" (divisor) );
928 	return quotient;
929 }
930 
kvm_get_time_scale(uint32_t scaled_khz,uint32_t base_khz,s8 * pshift,u32 * pmultiplier)931 static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
932 			       s8 *pshift, u32 *pmultiplier)
933 {
934 	uint64_t scaled64;
935 	int32_t  shift = 0;
936 	uint64_t tps64;
937 	uint32_t tps32;
938 
939 	tps64 = base_khz * 1000LL;
940 	scaled64 = scaled_khz * 1000LL;
941 	while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
942 		tps64 >>= 1;
943 		shift--;
944 	}
945 
946 	tps32 = (uint32_t)tps64;
947 	while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
948 		if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
949 			scaled64 >>= 1;
950 		else
951 			tps32 <<= 1;
952 		shift++;
953 	}
954 
955 	*pshift = shift;
956 	*pmultiplier = div_frac(scaled64, tps32);
957 
958 	pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
959 		 __func__, base_khz, scaled_khz, shift, *pmultiplier);
960 }
961 
get_kernel_ns(void)962 static inline u64 get_kernel_ns(void)
963 {
964 	struct timespec ts;
965 
966 	WARN_ON(preemptible());
967 	ktime_get_ts(&ts);
968 	monotonic_to_bootbased(&ts);
969 	return timespec_to_ns(&ts);
970 }
971 
972 static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
973 unsigned long max_tsc_khz;
974 
nsec_to_cycles(struct kvm_vcpu * vcpu,u64 nsec)975 static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
976 {
977 	return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
978 				   vcpu->arch.virtual_tsc_shift);
979 }
980 
adjust_tsc_khz(u32 khz,s32 ppm)981 static u32 adjust_tsc_khz(u32 khz, s32 ppm)
982 {
983 	u64 v = (u64)khz * (1000000 + ppm);
984 	do_div(v, 1000000);
985 	return v;
986 }
987 
kvm_set_tsc_khz(struct kvm_vcpu * vcpu,u32 this_tsc_khz)988 static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
989 {
990 	u32 thresh_lo, thresh_hi;
991 	int use_scaling = 0;
992 
993 	/* Compute a scale to convert nanoseconds in TSC cycles */
994 	kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
995 			   &vcpu->arch.virtual_tsc_shift,
996 			   &vcpu->arch.virtual_tsc_mult);
997 	vcpu->arch.virtual_tsc_khz = this_tsc_khz;
998 
999 	/*
1000 	 * Compute the variation in TSC rate which is acceptable
1001 	 * within the range of tolerance and decide if the
1002 	 * rate being applied is within that bounds of the hardware
1003 	 * rate.  If so, no scaling or compensation need be done.
1004 	 */
1005 	thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
1006 	thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
1007 	if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
1008 		pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
1009 		use_scaling = 1;
1010 	}
1011 	kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
1012 }
1013 
compute_guest_tsc(struct kvm_vcpu * vcpu,s64 kernel_ns)1014 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1015 {
1016 	u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1017 				      vcpu->arch.virtual_tsc_mult,
1018 				      vcpu->arch.virtual_tsc_shift);
1019 	tsc += vcpu->arch.this_tsc_write;
1020 	return tsc;
1021 }
1022 
kvm_write_tsc(struct kvm_vcpu * vcpu,u64 data)1023 void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1024 {
1025 	struct kvm *kvm = vcpu->kvm;
1026 	u64 offset, ns, elapsed;
1027 	unsigned long flags;
1028 	s64 usdiff;
1029 
1030 	raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1031 	offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1032 	ns = get_kernel_ns();
1033 	elapsed = ns - kvm->arch.last_tsc_nsec;
1034 
1035 	/* n.b - signed multiplication and division required */
1036 	usdiff = data - kvm->arch.last_tsc_write;
1037 #ifdef CONFIG_X86_64
1038 	usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1039 #else
1040 	/* do_div() only does unsigned */
1041 	asm("idivl %2; xor %%edx, %%edx"
1042 	    : "=A"(usdiff)
1043 	    : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
1044 #endif
1045 	do_div(elapsed, 1000);
1046 	usdiff -= elapsed;
1047 	if (usdiff < 0)
1048 		usdiff = -usdiff;
1049 
1050 	/*
1051 	 * Special case: TSC write with a small delta (1 second) of virtual
1052 	 * cycle time against real time is interpreted as an attempt to
1053 	 * synchronize the CPU.
1054          *
1055 	 * For a reliable TSC, we can match TSC offsets, and for an unstable
1056 	 * TSC, we add elapsed time in this computation.  We could let the
1057 	 * compensation code attempt to catch up if we fall behind, but
1058 	 * it's better to try to match offsets from the beginning.
1059          */
1060 	if (usdiff < USEC_PER_SEC &&
1061 	    vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1062 		if (!check_tsc_unstable()) {
1063 			offset = kvm->arch.cur_tsc_offset;
1064 			pr_debug("kvm: matched tsc offset for %llu\n", data);
1065 		} else {
1066 			u64 delta = nsec_to_cycles(vcpu, elapsed);
1067 			data += delta;
1068 			offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1069 			pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1070 		}
1071 	} else {
1072 		/*
1073 		 * We split periods of matched TSC writes into generations.
1074 		 * For each generation, we track the original measured
1075 		 * nanosecond time, offset, and write, so if TSCs are in
1076 		 * sync, we can match exact offset, and if not, we can match
1077 		 * exact software computaion in compute_guest_tsc()
1078 		 *
1079 		 * These values are tracked in kvm->arch.cur_xxx variables.
1080 		 */
1081 		kvm->arch.cur_tsc_generation++;
1082 		kvm->arch.cur_tsc_nsec = ns;
1083 		kvm->arch.cur_tsc_write = data;
1084 		kvm->arch.cur_tsc_offset = offset;
1085 		pr_debug("kvm: new tsc generation %u, clock %llu\n",
1086 			 kvm->arch.cur_tsc_generation, data);
1087 	}
1088 
1089 	/*
1090 	 * We also track th most recent recorded KHZ, write and time to
1091 	 * allow the matching interval to be extended at each write.
1092 	 */
1093 	kvm->arch.last_tsc_nsec = ns;
1094 	kvm->arch.last_tsc_write = data;
1095 	kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1096 
1097 	/* Reset of TSC must disable overshoot protection below */
1098 	vcpu->arch.hv_clock.tsc_timestamp = 0;
1099 	vcpu->arch.last_guest_tsc = data;
1100 
1101 	/* Keep track of which generation this VCPU has synchronized to */
1102 	vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1103 	vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1104 	vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1105 
1106 	kvm_x86_ops->write_tsc_offset(vcpu, offset);
1107 	raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1108 }
1109 
1110 EXPORT_SYMBOL_GPL(kvm_write_tsc);
1111 
kvm_guest_time_update(struct kvm_vcpu * v)1112 static int kvm_guest_time_update(struct kvm_vcpu *v)
1113 {
1114 	unsigned long flags;
1115 	struct kvm_vcpu_arch *vcpu = &v->arch;
1116 	unsigned long this_tsc_khz;
1117 	s64 kernel_ns, max_kernel_ns;
1118 	u64 tsc_timestamp;
1119 
1120 	/* Keep irq disabled to prevent changes to the clock */
1121 	local_irq_save(flags);
1122 	tsc_timestamp = kvm_x86_ops->read_l1_tsc(v);
1123 	kernel_ns = get_kernel_ns();
1124 	this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
1125 	if (unlikely(this_tsc_khz == 0)) {
1126 		local_irq_restore(flags);
1127 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
1128 		return 1;
1129 	}
1130 
1131 	/*
1132 	 * We may have to catch up the TSC to match elapsed wall clock
1133 	 * time for two reasons, even if kvmclock is used.
1134 	 *   1) CPU could have been running below the maximum TSC rate
1135 	 *   2) Broken TSC compensation resets the base at each VCPU
1136 	 *      entry to avoid unknown leaps of TSC even when running
1137 	 *      again on the same CPU.  This may cause apparent elapsed
1138 	 *      time to disappear, and the guest to stand still or run
1139 	 *	very slowly.
1140 	 */
1141 	if (vcpu->tsc_catchup) {
1142 		u64 tsc = compute_guest_tsc(v, kernel_ns);
1143 		if (tsc > tsc_timestamp) {
1144 			adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1145 			tsc_timestamp = tsc;
1146 		}
1147 	}
1148 
1149 	local_irq_restore(flags);
1150 
1151 	if (!vcpu->pv_time_enabled)
1152 		return 0;
1153 
1154 	/*
1155 	 * Time as measured by the TSC may go backwards when resetting the base
1156 	 * tsc_timestamp.  The reason for this is that the TSC resolution is
1157 	 * higher than the resolution of the other clock scales.  Thus, many
1158 	 * possible measurments of the TSC correspond to one measurement of any
1159 	 * other clock, and so a spread of values is possible.  This is not a
1160 	 * problem for the computation of the nanosecond clock; with TSC rates
1161 	 * around 1GHZ, there can only be a few cycles which correspond to one
1162 	 * nanosecond value, and any path through this code will inevitably
1163 	 * take longer than that.  However, with the kernel_ns value itself,
1164 	 * the precision may be much lower, down to HZ granularity.  If the
1165 	 * first sampling of TSC against kernel_ns ends in the low part of the
1166 	 * range, and the second in the high end of the range, we can get:
1167 	 *
1168 	 * (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
1169 	 *
1170 	 * As the sampling errors potentially range in the thousands of cycles,
1171 	 * it is possible such a time value has already been observed by the
1172 	 * guest.  To protect against this, we must compute the system time as
1173 	 * observed by the guest and ensure the new system time is greater.
1174 	 */
1175 	max_kernel_ns = 0;
1176 	if (vcpu->hv_clock.tsc_timestamp) {
1177 		max_kernel_ns = vcpu->last_guest_tsc -
1178 				vcpu->hv_clock.tsc_timestamp;
1179 		max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
1180 				    vcpu->hv_clock.tsc_to_system_mul,
1181 				    vcpu->hv_clock.tsc_shift);
1182 		max_kernel_ns += vcpu->last_kernel_ns;
1183 	}
1184 
1185 	if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
1186 		kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
1187 				   &vcpu->hv_clock.tsc_shift,
1188 				   &vcpu->hv_clock.tsc_to_system_mul);
1189 		vcpu->hw_tsc_khz = this_tsc_khz;
1190 	}
1191 
1192 	if (max_kernel_ns > kernel_ns)
1193 		kernel_ns = max_kernel_ns;
1194 
1195 	/* With all the info we got, fill in the values */
1196 	vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
1197 	vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
1198 	vcpu->last_kernel_ns = kernel_ns;
1199 	vcpu->last_guest_tsc = tsc_timestamp;
1200 	vcpu->hv_clock.flags = 0;
1201 
1202 	/*
1203 	 * The interface expects us to write an even number signaling that the
1204 	 * update is finished. Since the guest won't see the intermediate
1205 	 * state, we just increase by 2 at the end.
1206 	 */
1207 	vcpu->hv_clock.version += 2;
1208 
1209 	kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
1210 				&vcpu->hv_clock,
1211 				sizeof(vcpu->hv_clock));
1212 	return 0;
1213 }
1214 
msr_mtrr_valid(unsigned msr)1215 static bool msr_mtrr_valid(unsigned msr)
1216 {
1217 	switch (msr) {
1218 	case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
1219 	case MSR_MTRRfix64K_00000:
1220 	case MSR_MTRRfix16K_80000:
1221 	case MSR_MTRRfix16K_A0000:
1222 	case MSR_MTRRfix4K_C0000:
1223 	case MSR_MTRRfix4K_C8000:
1224 	case MSR_MTRRfix4K_D0000:
1225 	case MSR_MTRRfix4K_D8000:
1226 	case MSR_MTRRfix4K_E0000:
1227 	case MSR_MTRRfix4K_E8000:
1228 	case MSR_MTRRfix4K_F0000:
1229 	case MSR_MTRRfix4K_F8000:
1230 	case MSR_MTRRdefType:
1231 	case MSR_IA32_CR_PAT:
1232 		return true;
1233 	case 0x2f8:
1234 		return true;
1235 	}
1236 	return false;
1237 }
1238 
valid_pat_type(unsigned t)1239 static bool valid_pat_type(unsigned t)
1240 {
1241 	return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
1242 }
1243 
valid_mtrr_type(unsigned t)1244 static bool valid_mtrr_type(unsigned t)
1245 {
1246 	return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
1247 }
1248 
mtrr_valid(struct kvm_vcpu * vcpu,u32 msr,u64 data)1249 static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1250 {
1251 	int i;
1252 
1253 	if (!msr_mtrr_valid(msr))
1254 		return false;
1255 
1256 	if (msr == MSR_IA32_CR_PAT) {
1257 		for (i = 0; i < 8; i++)
1258 			if (!valid_pat_type((data >> (i * 8)) & 0xff))
1259 				return false;
1260 		return true;
1261 	} else if (msr == MSR_MTRRdefType) {
1262 		if (data & ~0xcff)
1263 			return false;
1264 		return valid_mtrr_type(data & 0xff);
1265 	} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
1266 		for (i = 0; i < 8 ; i++)
1267 			if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
1268 				return false;
1269 		return true;
1270 	}
1271 
1272 	/* variable MTRRs */
1273 	return valid_mtrr_type(data & 0xff);
1274 }
1275 
set_msr_mtrr(struct kvm_vcpu * vcpu,u32 msr,u64 data)1276 static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1277 {
1278 	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1279 
1280 	if (!mtrr_valid(vcpu, msr, data))
1281 		return 1;
1282 
1283 	if (msr == MSR_MTRRdefType) {
1284 		vcpu->arch.mtrr_state.def_type = data;
1285 		vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
1286 	} else if (msr == MSR_MTRRfix64K_00000)
1287 		p[0] = data;
1288 	else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1289 		p[1 + msr - MSR_MTRRfix16K_80000] = data;
1290 	else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1291 		p[3 + msr - MSR_MTRRfix4K_C0000] = data;
1292 	else if (msr == MSR_IA32_CR_PAT)
1293 		vcpu->arch.pat = data;
1294 	else {	/* Variable MTRRs */
1295 		int idx, is_mtrr_mask;
1296 		u64 *pt;
1297 
1298 		idx = (msr - 0x200) / 2;
1299 		is_mtrr_mask = msr - 0x200 - 2 * idx;
1300 		if (!is_mtrr_mask)
1301 			pt =
1302 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1303 		else
1304 			pt =
1305 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1306 		*pt = data;
1307 	}
1308 
1309 	kvm_mmu_reset_context(vcpu);
1310 	return 0;
1311 }
1312 
set_msr_mce(struct kvm_vcpu * vcpu,u32 msr,u64 data)1313 static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1314 {
1315 	u64 mcg_cap = vcpu->arch.mcg_cap;
1316 	unsigned bank_num = mcg_cap & 0xff;
1317 
1318 	switch (msr) {
1319 	case MSR_IA32_MCG_STATUS:
1320 		vcpu->arch.mcg_status = data;
1321 		break;
1322 	case MSR_IA32_MCG_CTL:
1323 		if (!(mcg_cap & MCG_CTL_P))
1324 			return 1;
1325 		if (data != 0 && data != ~(u64)0)
1326 			return -1;
1327 		vcpu->arch.mcg_ctl = data;
1328 		break;
1329 	default:
1330 		if (msr >= MSR_IA32_MC0_CTL &&
1331 		    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1332 			u32 offset = msr - MSR_IA32_MC0_CTL;
1333 			/* only 0 or all 1s can be written to IA32_MCi_CTL
1334 			 * some Linux kernels though clear bit 10 in bank 4 to
1335 			 * workaround a BIOS/GART TBL issue on AMD K8s, ignore
1336 			 * this to avoid an uncatched #GP in the guest
1337 			 */
1338 			if ((offset & 0x3) == 0 &&
1339 			    data != 0 && (data | (1 << 10)) != ~(u64)0)
1340 				return -1;
1341 			vcpu->arch.mce_banks[offset] = data;
1342 			break;
1343 		}
1344 		return 1;
1345 	}
1346 	return 0;
1347 }
1348 
xen_hvm_config(struct kvm_vcpu * vcpu,u64 data)1349 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
1350 {
1351 	struct kvm *kvm = vcpu->kvm;
1352 	int lm = is_long_mode(vcpu);
1353 	u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
1354 		: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
1355 	u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
1356 		: kvm->arch.xen_hvm_config.blob_size_32;
1357 	u32 page_num = data & ~PAGE_MASK;
1358 	u64 page_addr = data & PAGE_MASK;
1359 	u8 *page;
1360 	int r;
1361 
1362 	r = -E2BIG;
1363 	if (page_num >= blob_size)
1364 		goto out;
1365 	r = -ENOMEM;
1366 	page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
1367 	if (IS_ERR(page)) {
1368 		r = PTR_ERR(page);
1369 		goto out;
1370 	}
1371 	if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
1372 		goto out_free;
1373 	r = 0;
1374 out_free:
1375 	kfree(page);
1376 out:
1377 	return r;
1378 }
1379 
kvm_hv_hypercall_enabled(struct kvm * kvm)1380 static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
1381 {
1382 	return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
1383 }
1384 
kvm_hv_msr_partition_wide(u32 msr)1385 static bool kvm_hv_msr_partition_wide(u32 msr)
1386 {
1387 	bool r = false;
1388 	switch (msr) {
1389 	case HV_X64_MSR_GUEST_OS_ID:
1390 	case HV_X64_MSR_HYPERCALL:
1391 		r = true;
1392 		break;
1393 	}
1394 
1395 	return r;
1396 }
1397 
set_msr_hyperv_pw(struct kvm_vcpu * vcpu,u32 msr,u64 data)1398 static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1399 {
1400 	struct kvm *kvm = vcpu->kvm;
1401 
1402 	switch (msr) {
1403 	case HV_X64_MSR_GUEST_OS_ID:
1404 		kvm->arch.hv_guest_os_id = data;
1405 		/* setting guest os id to zero disables hypercall page */
1406 		if (!kvm->arch.hv_guest_os_id)
1407 			kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1408 		break;
1409 	case HV_X64_MSR_HYPERCALL: {
1410 		u64 gfn;
1411 		unsigned long addr;
1412 		u8 instructions[4];
1413 
1414 		/* if guest os id is not set hypercall should remain disabled */
1415 		if (!kvm->arch.hv_guest_os_id)
1416 			break;
1417 		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1418 			kvm->arch.hv_hypercall = data;
1419 			break;
1420 		}
1421 		gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
1422 		addr = gfn_to_hva(kvm, gfn);
1423 		if (kvm_is_error_hva(addr))
1424 			return 1;
1425 		kvm_x86_ops->patch_hypercall(vcpu, instructions);
1426 		((unsigned char *)instructions)[3] = 0xc3; /* ret */
1427 		if (__copy_to_user((void __user *)addr, instructions, 4))
1428 			return 1;
1429 		kvm->arch.hv_hypercall = data;
1430 		break;
1431 	}
1432 	default:
1433 		pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1434 			  "data 0x%llx\n", msr, data);
1435 		return 1;
1436 	}
1437 	return 0;
1438 }
1439 
set_msr_hyperv(struct kvm_vcpu * vcpu,u32 msr,u64 data)1440 static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1441 {
1442 	switch (msr) {
1443 	case HV_X64_MSR_APIC_ASSIST_PAGE: {
1444 		unsigned long addr;
1445 
1446 		if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
1447 			vcpu->arch.hv_vapic = data;
1448 			break;
1449 		}
1450 		addr = gfn_to_hva(vcpu->kvm, data >>
1451 				  HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
1452 		if (kvm_is_error_hva(addr))
1453 			return 1;
1454 		if (__clear_user((void __user *)addr, PAGE_SIZE))
1455 			return 1;
1456 		vcpu->arch.hv_vapic = data;
1457 		break;
1458 	}
1459 	case HV_X64_MSR_EOI:
1460 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1461 	case HV_X64_MSR_ICR:
1462 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1463 	case HV_X64_MSR_TPR:
1464 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1465 	default:
1466 		pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
1467 			  "data 0x%llx\n", msr, data);
1468 		return 1;
1469 	}
1470 
1471 	return 0;
1472 }
1473 
kvm_pv_enable_async_pf(struct kvm_vcpu * vcpu,u64 data)1474 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
1475 {
1476 	gpa_t gpa = data & ~0x3f;
1477 
1478 	/* Bits 2:5 are resrved, Should be zero */
1479 	if (data & 0x3c)
1480 		return 1;
1481 
1482 	vcpu->arch.apf.msr_val = data;
1483 
1484 	if (!(data & KVM_ASYNC_PF_ENABLED)) {
1485 		kvm_clear_async_pf_completion_queue(vcpu);
1486 		kvm_async_pf_hash_reset(vcpu);
1487 		return 0;
1488 	}
1489 
1490 	if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
1491 					sizeof(u32)))
1492 		return 1;
1493 
1494 	vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
1495 	kvm_async_pf_wakeup_all(vcpu);
1496 	return 0;
1497 }
1498 
kvmclock_reset(struct kvm_vcpu * vcpu)1499 static void kvmclock_reset(struct kvm_vcpu *vcpu)
1500 {
1501 	vcpu->arch.pv_time_enabled = false;
1502 }
1503 
accumulate_steal_time(struct kvm_vcpu * vcpu)1504 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
1505 {
1506 	u64 delta;
1507 
1508 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1509 		return;
1510 
1511 	delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
1512 	vcpu->arch.st.last_steal = current->sched_info.run_delay;
1513 	vcpu->arch.st.accum_steal = delta;
1514 }
1515 
record_steal_time(struct kvm_vcpu * vcpu)1516 static void record_steal_time(struct kvm_vcpu *vcpu)
1517 {
1518 	if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
1519 		return;
1520 
1521 	if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1522 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
1523 		return;
1524 
1525 	vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
1526 	vcpu->arch.st.steal.version += 2;
1527 	vcpu->arch.st.accum_steal = 0;
1528 
1529 	kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
1530 		&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
1531 }
1532 
kvm_set_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 data)1533 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1534 {
1535 	bool pr = false;
1536 
1537 	switch (msr) {
1538 	case MSR_EFER:
1539 		return set_efer(vcpu, data);
1540 	case MSR_K7_HWCR:
1541 		data &= ~(u64)0x40;	/* ignore flush filter disable */
1542 		data &= ~(u64)0x100;	/* ignore ignne emulation enable */
1543 		data &= ~(u64)0x8;	/* ignore TLB cache disable */
1544 		if (data != 0) {
1545 			pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
1546 				data);
1547 			return 1;
1548 		}
1549 		break;
1550 	case MSR_FAM10H_MMIO_CONF_BASE:
1551 		if (data != 0) {
1552 			pr_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
1553 				"0x%llx\n", data);
1554 			return 1;
1555 		}
1556 		break;
1557 	case MSR_AMD64_NB_CFG:
1558 		break;
1559 	case MSR_IA32_DEBUGCTLMSR:
1560 		if (!data) {
1561 			/* We support the non-activated case already */
1562 			break;
1563 		} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
1564 			/* Values other than LBR and BTF are vendor-specific,
1565 			   thus reserved and should throw a #GP */
1566 			return 1;
1567 		}
1568 		pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
1569 			__func__, data);
1570 		break;
1571 	case MSR_IA32_UCODE_REV:
1572 	case MSR_IA32_UCODE_WRITE:
1573 	case MSR_VM_HSAVE_PA:
1574 	case MSR_AMD64_PATCH_LOADER:
1575 		break;
1576 	case 0x200 ... 0x2ff:
1577 		return set_msr_mtrr(vcpu, msr, data);
1578 	case MSR_IA32_APICBASE:
1579 		kvm_set_apic_base(vcpu, data);
1580 		break;
1581 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1582 		return kvm_x2apic_msr_write(vcpu, msr, data);
1583 	case MSR_IA32_TSCDEADLINE:
1584 		kvm_set_lapic_tscdeadline_msr(vcpu, data);
1585 		break;
1586 	case MSR_IA32_MISC_ENABLE:
1587 		vcpu->arch.ia32_misc_enable_msr = data;
1588 		break;
1589 	case MSR_KVM_WALL_CLOCK_NEW:
1590 	case MSR_KVM_WALL_CLOCK:
1591 		vcpu->kvm->arch.wall_clock = data;
1592 		kvm_write_wall_clock(vcpu->kvm, data);
1593 		break;
1594 	case MSR_KVM_SYSTEM_TIME_NEW:
1595 	case MSR_KVM_SYSTEM_TIME: {
1596 		u64 gpa_offset;
1597 		kvmclock_reset(vcpu);
1598 
1599 		vcpu->arch.time = data;
1600 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
1601 
1602 		/* we verify if the enable bit is set... */
1603 		if (!(data & 1))
1604 			break;
1605 
1606 		gpa_offset = data & ~(PAGE_MASK | 1);
1607 
1608 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
1609 		     &vcpu->arch.pv_time, data & ~1ULL,
1610 		     sizeof(struct pvclock_vcpu_time_info)))
1611 			vcpu->arch.pv_time_enabled = false;
1612 		else
1613 			vcpu->arch.pv_time_enabled = true;
1614 		break;
1615 	}
1616 	case MSR_KVM_ASYNC_PF_EN:
1617 		if (kvm_pv_enable_async_pf(vcpu, data))
1618 			return 1;
1619 		break;
1620 	case MSR_KVM_STEAL_TIME:
1621 
1622 		if (unlikely(!sched_info_on()))
1623 			return 1;
1624 
1625 		if (data & KVM_STEAL_RESERVED_MASK)
1626 			return 1;
1627 
1628 		if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
1629 						data & KVM_STEAL_VALID_BITS,
1630 						sizeof(struct kvm_steal_time)))
1631 			return 1;
1632 
1633 		vcpu->arch.st.msr_val = data;
1634 
1635 		if (!(data & KVM_MSR_ENABLED))
1636 			break;
1637 
1638 		vcpu->arch.st.last_steal = current->sched_info.run_delay;
1639 
1640 		preempt_disable();
1641 		accumulate_steal_time(vcpu);
1642 		preempt_enable();
1643 
1644 		kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1645 
1646 		break;
1647 
1648 	case MSR_IA32_MCG_CTL:
1649 	case MSR_IA32_MCG_STATUS:
1650 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1651 		return set_msr_mce(vcpu, msr, data);
1652 
1653 	/* Performance counters are not protected by a CPUID bit,
1654 	 * so we should check all of them in the generic path for the sake of
1655 	 * cross vendor migration.
1656 	 * Writing a zero into the event select MSRs disables them,
1657 	 * which we perfectly emulate ;-). Any other value should be at least
1658 	 * reported, some guests depend on them.
1659 	 */
1660 	case MSR_K7_EVNTSEL0:
1661 	case MSR_K7_EVNTSEL1:
1662 	case MSR_K7_EVNTSEL2:
1663 	case MSR_K7_EVNTSEL3:
1664 		if (data != 0)
1665 			pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1666 				"0x%x data 0x%llx\n", msr, data);
1667 		break;
1668 	/* at least RHEL 4 unconditionally writes to the perfctr registers,
1669 	 * so we ignore writes to make it happy.
1670 	 */
1671 	case MSR_K7_PERFCTR0:
1672 	case MSR_K7_PERFCTR1:
1673 	case MSR_K7_PERFCTR2:
1674 	case MSR_K7_PERFCTR3:
1675 		pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
1676 			"0x%x data 0x%llx\n", msr, data);
1677 		break;
1678 	case MSR_P6_PERFCTR0:
1679 	case MSR_P6_PERFCTR1:
1680 		pr = true;
1681 	case MSR_P6_EVNTSEL0:
1682 	case MSR_P6_EVNTSEL1:
1683 		if (kvm_pmu_msr(vcpu, msr))
1684 			return kvm_pmu_set_msr(vcpu, msr, data);
1685 
1686 		if (pr || data != 0)
1687 			pr_unimpl(vcpu, "disabled perfctr wrmsr: "
1688 				"0x%x data 0x%llx\n", msr, data);
1689 		break;
1690 	case MSR_K7_CLK_CTL:
1691 		/*
1692 		 * Ignore all writes to this no longer documented MSR.
1693 		 * Writes are only relevant for old K7 processors,
1694 		 * all pre-dating SVM, but a recommended workaround from
1695 		 * AMD for these chips. It is possible to speicify the
1696 		 * affected processor models on the command line, hence
1697 		 * the need to ignore the workaround.
1698 		 */
1699 		break;
1700 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1701 		if (kvm_hv_msr_partition_wide(msr)) {
1702 			int r;
1703 			mutex_lock(&vcpu->kvm->lock);
1704 			r = set_msr_hyperv_pw(vcpu, msr, data);
1705 			mutex_unlock(&vcpu->kvm->lock);
1706 			return r;
1707 		} else
1708 			return set_msr_hyperv(vcpu, msr, data);
1709 		break;
1710 	case MSR_IA32_BBL_CR_CTL3:
1711 		/* Drop writes to this legacy MSR -- see rdmsr
1712 		 * counterpart for further detail.
1713 		 */
1714 		pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
1715 		break;
1716 	case MSR_AMD64_OSVW_ID_LENGTH:
1717 		if (!guest_cpuid_has_osvw(vcpu))
1718 			return 1;
1719 		vcpu->arch.osvw.length = data;
1720 		break;
1721 	case MSR_AMD64_OSVW_STATUS:
1722 		if (!guest_cpuid_has_osvw(vcpu))
1723 			return 1;
1724 		vcpu->arch.osvw.status = data;
1725 		break;
1726 	default:
1727 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
1728 			return xen_hvm_config(vcpu, data);
1729 		if (kvm_pmu_msr(vcpu, msr))
1730 			return kvm_pmu_set_msr(vcpu, msr, data);
1731 		if (!ignore_msrs) {
1732 			pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
1733 				msr, data);
1734 			return 1;
1735 		} else {
1736 			pr_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
1737 				msr, data);
1738 			break;
1739 		}
1740 	}
1741 	return 0;
1742 }
1743 EXPORT_SYMBOL_GPL(kvm_set_msr_common);
1744 
1745 
1746 /*
1747  * Reads an msr value (of 'msr_index') into 'pdata'.
1748  * Returns 0 on success, non-0 otherwise.
1749  * Assumes vcpu_load() was already called.
1750  */
kvm_get_msr(struct kvm_vcpu * vcpu,u32 msr_index,u64 * pdata)1751 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1752 {
1753 	return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
1754 }
1755 
get_msr_mtrr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)1756 static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1757 {
1758 	u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
1759 
1760 	if (!msr_mtrr_valid(msr))
1761 		return 1;
1762 
1763 	if (msr == MSR_MTRRdefType)
1764 		*pdata = vcpu->arch.mtrr_state.def_type +
1765 			 (vcpu->arch.mtrr_state.enabled << 10);
1766 	else if (msr == MSR_MTRRfix64K_00000)
1767 		*pdata = p[0];
1768 	else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
1769 		*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
1770 	else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
1771 		*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
1772 	else if (msr == MSR_IA32_CR_PAT)
1773 		*pdata = vcpu->arch.pat;
1774 	else {	/* Variable MTRRs */
1775 		int idx, is_mtrr_mask;
1776 		u64 *pt;
1777 
1778 		idx = (msr - 0x200) / 2;
1779 		is_mtrr_mask = msr - 0x200 - 2 * idx;
1780 		if (!is_mtrr_mask)
1781 			pt =
1782 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
1783 		else
1784 			pt =
1785 			  (u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
1786 		*pdata = *pt;
1787 	}
1788 
1789 	return 0;
1790 }
1791 
get_msr_mce(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)1792 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1793 {
1794 	u64 data;
1795 	u64 mcg_cap = vcpu->arch.mcg_cap;
1796 	unsigned bank_num = mcg_cap & 0xff;
1797 
1798 	switch (msr) {
1799 	case MSR_IA32_P5_MC_ADDR:
1800 	case MSR_IA32_P5_MC_TYPE:
1801 		data = 0;
1802 		break;
1803 	case MSR_IA32_MCG_CAP:
1804 		data = vcpu->arch.mcg_cap;
1805 		break;
1806 	case MSR_IA32_MCG_CTL:
1807 		if (!(mcg_cap & MCG_CTL_P))
1808 			return 1;
1809 		data = vcpu->arch.mcg_ctl;
1810 		break;
1811 	case MSR_IA32_MCG_STATUS:
1812 		data = vcpu->arch.mcg_status;
1813 		break;
1814 	default:
1815 		if (msr >= MSR_IA32_MC0_CTL &&
1816 		    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
1817 			u32 offset = msr - MSR_IA32_MC0_CTL;
1818 			data = vcpu->arch.mce_banks[offset];
1819 			break;
1820 		}
1821 		return 1;
1822 	}
1823 	*pdata = data;
1824 	return 0;
1825 }
1826 
get_msr_hyperv_pw(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)1827 static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1828 {
1829 	u64 data = 0;
1830 	struct kvm *kvm = vcpu->kvm;
1831 
1832 	switch (msr) {
1833 	case HV_X64_MSR_GUEST_OS_ID:
1834 		data = kvm->arch.hv_guest_os_id;
1835 		break;
1836 	case HV_X64_MSR_HYPERCALL:
1837 		data = kvm->arch.hv_hypercall;
1838 		break;
1839 	default:
1840 		pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1841 		return 1;
1842 	}
1843 
1844 	*pdata = data;
1845 	return 0;
1846 }
1847 
get_msr_hyperv(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)1848 static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1849 {
1850 	u64 data = 0;
1851 
1852 	switch (msr) {
1853 	case HV_X64_MSR_VP_INDEX: {
1854 		int r;
1855 		struct kvm_vcpu *v;
1856 		kvm_for_each_vcpu(r, v, vcpu->kvm)
1857 			if (v == vcpu)
1858 				data = r;
1859 		break;
1860 	}
1861 	case HV_X64_MSR_EOI:
1862 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1863 	case HV_X64_MSR_ICR:
1864 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1865 	case HV_X64_MSR_TPR:
1866 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1867 	case HV_X64_MSR_APIC_ASSIST_PAGE:
1868 		data = vcpu->arch.hv_vapic;
1869 		break;
1870 	default:
1871 		pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1872 		return 1;
1873 	}
1874 	*pdata = data;
1875 	return 0;
1876 }
1877 
kvm_get_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata)1878 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
1879 {
1880 	u64 data;
1881 
1882 	switch (msr) {
1883 	case MSR_IA32_PLATFORM_ID:
1884 	case MSR_IA32_EBL_CR_POWERON:
1885 	case MSR_IA32_DEBUGCTLMSR:
1886 	case MSR_IA32_LASTBRANCHFROMIP:
1887 	case MSR_IA32_LASTBRANCHTOIP:
1888 	case MSR_IA32_LASTINTFROMIP:
1889 	case MSR_IA32_LASTINTTOIP:
1890 	case MSR_K8_SYSCFG:
1891 	case MSR_K7_HWCR:
1892 	case MSR_VM_HSAVE_PA:
1893 	case MSR_K7_EVNTSEL0:
1894 	case MSR_K7_PERFCTR0:
1895 	case MSR_K8_INT_PENDING_MSG:
1896 	case MSR_AMD64_NB_CFG:
1897 	case MSR_FAM10H_MMIO_CONF_BASE:
1898 		data = 0;
1899 		break;
1900 	case MSR_P6_PERFCTR0:
1901 	case MSR_P6_PERFCTR1:
1902 	case MSR_P6_EVNTSEL0:
1903 	case MSR_P6_EVNTSEL1:
1904 		if (kvm_pmu_msr(vcpu, msr))
1905 			return kvm_pmu_get_msr(vcpu, msr, pdata);
1906 		data = 0;
1907 		break;
1908 	case MSR_IA32_UCODE_REV:
1909 		data = 0x100000000ULL;
1910 		break;
1911 	case MSR_MTRRcap:
1912 		data = 0x500 | KVM_NR_VAR_MTRR;
1913 		break;
1914 	case 0x200 ... 0x2ff:
1915 		return get_msr_mtrr(vcpu, msr, pdata);
1916 	case 0xcd: /* fsb frequency */
1917 		data = 3;
1918 		break;
1919 		/*
1920 		 * MSR_EBC_FREQUENCY_ID
1921 		 * Conservative value valid for even the basic CPU models.
1922 		 * Models 0,1: 000 in bits 23:21 indicating a bus speed of
1923 		 * 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
1924 		 * and 266MHz for model 3, or 4. Set Core Clock
1925 		 * Frequency to System Bus Frequency Ratio to 1 (bits
1926 		 * 31:24) even though these are only valid for CPU
1927 		 * models > 2, however guests may end up dividing or
1928 		 * multiplying by zero otherwise.
1929 		 */
1930 	case MSR_EBC_FREQUENCY_ID:
1931 		data = 1 << 24;
1932 		break;
1933 	case MSR_IA32_APICBASE:
1934 		data = kvm_get_apic_base(vcpu);
1935 		break;
1936 	case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
1937 		return kvm_x2apic_msr_read(vcpu, msr, pdata);
1938 		break;
1939 	case MSR_IA32_TSCDEADLINE:
1940 		data = kvm_get_lapic_tscdeadline_msr(vcpu);
1941 		break;
1942 	case MSR_IA32_MISC_ENABLE:
1943 		data = vcpu->arch.ia32_misc_enable_msr;
1944 		break;
1945 	case MSR_IA32_PERF_STATUS:
1946 		/* TSC increment by tick */
1947 		data = 1000ULL;
1948 		/* CPU multiplier */
1949 		data |= (((uint64_t)4ULL) << 40);
1950 		break;
1951 	case MSR_EFER:
1952 		data = vcpu->arch.efer;
1953 		break;
1954 	case MSR_KVM_WALL_CLOCK:
1955 	case MSR_KVM_WALL_CLOCK_NEW:
1956 		data = vcpu->kvm->arch.wall_clock;
1957 		break;
1958 	case MSR_KVM_SYSTEM_TIME:
1959 	case MSR_KVM_SYSTEM_TIME_NEW:
1960 		data = vcpu->arch.time;
1961 		break;
1962 	case MSR_KVM_ASYNC_PF_EN:
1963 		data = vcpu->arch.apf.msr_val;
1964 		break;
1965 	case MSR_KVM_STEAL_TIME:
1966 		data = vcpu->arch.st.msr_val;
1967 		break;
1968 	case MSR_IA32_P5_MC_ADDR:
1969 	case MSR_IA32_P5_MC_TYPE:
1970 	case MSR_IA32_MCG_CAP:
1971 	case MSR_IA32_MCG_CTL:
1972 	case MSR_IA32_MCG_STATUS:
1973 	case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
1974 		return get_msr_mce(vcpu, msr, pdata);
1975 	case MSR_K7_CLK_CTL:
1976 		/*
1977 		 * Provide expected ramp-up count for K7. All other
1978 		 * are set to zero, indicating minimum divisors for
1979 		 * every field.
1980 		 *
1981 		 * This prevents guest kernels on AMD host with CPU
1982 		 * type 6, model 8 and higher from exploding due to
1983 		 * the rdmsr failing.
1984 		 */
1985 		data = 0x20000000;
1986 		break;
1987 	case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
1988 		if (kvm_hv_msr_partition_wide(msr)) {
1989 			int r;
1990 			mutex_lock(&vcpu->kvm->lock);
1991 			r = get_msr_hyperv_pw(vcpu, msr, pdata);
1992 			mutex_unlock(&vcpu->kvm->lock);
1993 			return r;
1994 		} else
1995 			return get_msr_hyperv(vcpu, msr, pdata);
1996 		break;
1997 	case MSR_IA32_BBL_CR_CTL3:
1998 		/* This legacy MSR exists but isn't fully documented in current
1999 		 * silicon.  It is however accessed by winxp in very narrow
2000 		 * scenarios where it sets bit #19, itself documented as
2001 		 * a "reserved" bit.  Best effort attempt to source coherent
2002 		 * read data here should the balance of the register be
2003 		 * interpreted by the guest:
2004 		 *
2005 		 * L2 cache control register 3: 64GB range, 256KB size,
2006 		 * enabled, latency 0x1, configured
2007 		 */
2008 		data = 0xbe702111;
2009 		break;
2010 	case MSR_AMD64_OSVW_ID_LENGTH:
2011 		if (!guest_cpuid_has_osvw(vcpu))
2012 			return 1;
2013 		data = vcpu->arch.osvw.length;
2014 		break;
2015 	case MSR_AMD64_OSVW_STATUS:
2016 		if (!guest_cpuid_has_osvw(vcpu))
2017 			return 1;
2018 		data = vcpu->arch.osvw.status;
2019 		break;
2020 	default:
2021 		if (kvm_pmu_msr(vcpu, msr))
2022 			return kvm_pmu_get_msr(vcpu, msr, pdata);
2023 		if (!ignore_msrs) {
2024 			pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
2025 			return 1;
2026 		} else {
2027 			pr_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
2028 			data = 0;
2029 		}
2030 		break;
2031 	}
2032 	*pdata = data;
2033 	return 0;
2034 }
2035 EXPORT_SYMBOL_GPL(kvm_get_msr_common);
2036 
2037 /*
2038  * Read or write a bunch of msrs. All parameters are kernel addresses.
2039  *
2040  * @return number of msrs set successfully.
2041  */
__msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs * msrs,struct kvm_msr_entry * entries,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data))2042 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
2043 		    struct kvm_msr_entry *entries,
2044 		    int (*do_msr)(struct kvm_vcpu *vcpu,
2045 				  unsigned index, u64 *data))
2046 {
2047 	int i, idx;
2048 
2049 	idx = srcu_read_lock(&vcpu->kvm->srcu);
2050 	for (i = 0; i < msrs->nmsrs; ++i)
2051 		if (do_msr(vcpu, entries[i].index, &entries[i].data))
2052 			break;
2053 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
2054 
2055 	return i;
2056 }
2057 
2058 /*
2059  * Read or write a bunch of msrs. Parameters are user addresses.
2060  *
2061  * @return number of msrs set successfully.
2062  */
msr_io(struct kvm_vcpu * vcpu,struct kvm_msrs __user * user_msrs,int (* do_msr)(struct kvm_vcpu * vcpu,unsigned index,u64 * data),int writeback)2063 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
2064 		  int (*do_msr)(struct kvm_vcpu *vcpu,
2065 				unsigned index, u64 *data),
2066 		  int writeback)
2067 {
2068 	struct kvm_msrs msrs;
2069 	struct kvm_msr_entry *entries;
2070 	int r, n;
2071 	unsigned size;
2072 
2073 	r = -EFAULT;
2074 	if (copy_from_user(&msrs, user_msrs, sizeof msrs))
2075 		goto out;
2076 
2077 	r = -E2BIG;
2078 	if (msrs.nmsrs >= MAX_IO_MSRS)
2079 		goto out;
2080 
2081 	size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
2082 	entries = memdup_user(user_msrs->entries, size);
2083 	if (IS_ERR(entries)) {
2084 		r = PTR_ERR(entries);
2085 		goto out;
2086 	}
2087 
2088 	r = n = __msr_io(vcpu, &msrs, entries, do_msr);
2089 	if (r < 0)
2090 		goto out_free;
2091 
2092 	r = -EFAULT;
2093 	if (writeback && copy_to_user(user_msrs->entries, entries, size))
2094 		goto out_free;
2095 
2096 	r = n;
2097 
2098 out_free:
2099 	kfree(entries);
2100 out:
2101 	return r;
2102 }
2103 
kvm_dev_ioctl_check_extension(long ext)2104 int kvm_dev_ioctl_check_extension(long ext)
2105 {
2106 	int r;
2107 
2108 	switch (ext) {
2109 	case KVM_CAP_IRQCHIP:
2110 	case KVM_CAP_HLT:
2111 	case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
2112 	case KVM_CAP_SET_TSS_ADDR:
2113 	case KVM_CAP_EXT_CPUID:
2114 	case KVM_CAP_CLOCKSOURCE:
2115 	case KVM_CAP_PIT:
2116 	case KVM_CAP_NOP_IO_DELAY:
2117 	case KVM_CAP_MP_STATE:
2118 	case KVM_CAP_SYNC_MMU:
2119 	case KVM_CAP_USER_NMI:
2120 	case KVM_CAP_REINJECT_CONTROL:
2121 	case KVM_CAP_IRQ_INJECT_STATUS:
2122 	case KVM_CAP_ASSIGN_DEV_IRQ:
2123 	case KVM_CAP_IRQFD:
2124 	case KVM_CAP_IOEVENTFD:
2125 	case KVM_CAP_PIT2:
2126 	case KVM_CAP_PIT_STATE2:
2127 	case KVM_CAP_SET_IDENTITY_MAP_ADDR:
2128 	case KVM_CAP_XEN_HVM:
2129 	case KVM_CAP_ADJUST_CLOCK:
2130 	case KVM_CAP_VCPU_EVENTS:
2131 	case KVM_CAP_HYPERV:
2132 	case KVM_CAP_HYPERV_VAPIC:
2133 	case KVM_CAP_HYPERV_SPIN:
2134 	case KVM_CAP_PCI_SEGMENT:
2135 	case KVM_CAP_DEBUGREGS:
2136 	case KVM_CAP_X86_ROBUST_SINGLESTEP:
2137 	case KVM_CAP_XSAVE:
2138 	case KVM_CAP_ASYNC_PF:
2139 	case KVM_CAP_GET_TSC_KHZ:
2140 	case KVM_CAP_PCI_2_3:
2141 		r = 1;
2142 		break;
2143 	case KVM_CAP_COALESCED_MMIO:
2144 		r = KVM_COALESCED_MMIO_PAGE_OFFSET;
2145 		break;
2146 	case KVM_CAP_VAPIC:
2147 		r = !kvm_x86_ops->cpu_has_accelerated_tpr();
2148 		break;
2149 	case KVM_CAP_NR_VCPUS:
2150 		r = KVM_SOFT_MAX_VCPUS;
2151 		break;
2152 	case KVM_CAP_MAX_VCPUS:
2153 		r = KVM_MAX_VCPUS;
2154 		break;
2155 	case KVM_CAP_NR_MEMSLOTS:
2156 		r = KVM_MEMORY_SLOTS;
2157 		break;
2158 	case KVM_CAP_PV_MMU:	/* obsolete */
2159 		r = 0;
2160 		break;
2161 	case KVM_CAP_IOMMU:
2162 		r = iommu_present(&pci_bus_type);
2163 		break;
2164 	case KVM_CAP_MCE:
2165 		r = KVM_MAX_MCE_BANKS;
2166 		break;
2167 	case KVM_CAP_XCRS:
2168 		r = cpu_has_xsave;
2169 		break;
2170 	case KVM_CAP_TSC_CONTROL:
2171 		r = kvm_has_tsc_control;
2172 		break;
2173 	case KVM_CAP_TSC_DEADLINE_TIMER:
2174 		r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
2175 		break;
2176 	default:
2177 		r = 0;
2178 		break;
2179 	}
2180 	return r;
2181 
2182 }
2183 
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2184 long kvm_arch_dev_ioctl(struct file *filp,
2185 			unsigned int ioctl, unsigned long arg)
2186 {
2187 	void __user *argp = (void __user *)arg;
2188 	long r;
2189 
2190 	switch (ioctl) {
2191 	case KVM_GET_MSR_INDEX_LIST: {
2192 		struct kvm_msr_list __user *user_msr_list = argp;
2193 		struct kvm_msr_list msr_list;
2194 		unsigned n;
2195 
2196 		r = -EFAULT;
2197 		if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
2198 			goto out;
2199 		n = msr_list.nmsrs;
2200 		msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
2201 		if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
2202 			goto out;
2203 		r = -E2BIG;
2204 		if (n < msr_list.nmsrs)
2205 			goto out;
2206 		r = -EFAULT;
2207 		if (copy_to_user(user_msr_list->indices, &msrs_to_save,
2208 				 num_msrs_to_save * sizeof(u32)))
2209 			goto out;
2210 		if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
2211 				 &emulated_msrs,
2212 				 ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
2213 			goto out;
2214 		r = 0;
2215 		break;
2216 	}
2217 	case KVM_GET_SUPPORTED_CPUID: {
2218 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2219 		struct kvm_cpuid2 cpuid;
2220 
2221 		r = -EFAULT;
2222 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2223 			goto out;
2224 		r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
2225 						      cpuid_arg->entries);
2226 		if (r)
2227 			goto out;
2228 
2229 		r = -EFAULT;
2230 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2231 			goto out;
2232 		r = 0;
2233 		break;
2234 	}
2235 	case KVM_X86_GET_MCE_CAP_SUPPORTED: {
2236 		u64 mce_cap;
2237 
2238 		mce_cap = KVM_MCE_CAP_SUPPORTED;
2239 		r = -EFAULT;
2240 		if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
2241 			goto out;
2242 		r = 0;
2243 		break;
2244 	}
2245 	default:
2246 		r = -EINVAL;
2247 	}
2248 out:
2249 	return r;
2250 }
2251 
wbinvd_ipi(void * garbage)2252 static void wbinvd_ipi(void *garbage)
2253 {
2254 	wbinvd();
2255 }
2256 
need_emulate_wbinvd(struct kvm_vcpu * vcpu)2257 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
2258 {
2259 	return vcpu->kvm->arch.iommu_domain &&
2260 		!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
2261 }
2262 
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2263 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2264 {
2265 	/* Address WBINVD may be executed by guest */
2266 	if (need_emulate_wbinvd(vcpu)) {
2267 		if (kvm_x86_ops->has_wbinvd_exit())
2268 			cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
2269 		else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
2270 			smp_call_function_single(vcpu->cpu,
2271 					wbinvd_ipi, NULL, 1);
2272 	}
2273 
2274 	kvm_x86_ops->vcpu_load(vcpu, cpu);
2275 
2276 	/* Apply any externally detected TSC adjustments (due to suspend) */
2277 	if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
2278 		adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
2279 		vcpu->arch.tsc_offset_adjustment = 0;
2280 		set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
2281 	}
2282 
2283 	if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
2284 		s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
2285 				native_read_tsc() - vcpu->arch.last_host_tsc;
2286 		if (tsc_delta < 0)
2287 			mark_tsc_unstable("KVM discovered backwards TSC");
2288 		if (check_tsc_unstable()) {
2289 			u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
2290 						vcpu->arch.last_guest_tsc);
2291 			kvm_x86_ops->write_tsc_offset(vcpu, offset);
2292 			vcpu->arch.tsc_catchup = 1;
2293 		}
2294 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2295 		if (vcpu->cpu != cpu)
2296 			kvm_migrate_timers(vcpu);
2297 		vcpu->cpu = cpu;
2298 	}
2299 
2300 	accumulate_steal_time(vcpu);
2301 	kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
2302 }
2303 
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)2304 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2305 {
2306 	kvm_x86_ops->vcpu_put(vcpu);
2307 	kvm_put_guest_fpu(vcpu);
2308 	vcpu->arch.last_host_tsc = native_read_tsc();
2309 }
2310 
kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)2311 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2312 				    struct kvm_lapic_state *s)
2313 {
2314 	memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2315 
2316 	return 0;
2317 }
2318 
kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu * vcpu,struct kvm_lapic_state * s)2319 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
2320 				    struct kvm_lapic_state *s)
2321 {
2322 	memcpy(vcpu->arch.apic->regs, s->regs, sizeof *s);
2323 	kvm_apic_post_state_restore(vcpu);
2324 	update_cr8_intercept(vcpu);
2325 
2326 	return 0;
2327 }
2328 
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)2329 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
2330 				    struct kvm_interrupt *irq)
2331 {
2332 	if (irq->irq < 0 || irq->irq >= 256)
2333 		return -EINVAL;
2334 	if (irqchip_in_kernel(vcpu->kvm))
2335 		return -ENXIO;
2336 
2337 	kvm_queue_interrupt(vcpu, irq->irq, false);
2338 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2339 
2340 	return 0;
2341 }
2342 
kvm_vcpu_ioctl_nmi(struct kvm_vcpu * vcpu)2343 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
2344 {
2345 	kvm_inject_nmi(vcpu);
2346 
2347 	return 0;
2348 }
2349 
vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu * vcpu,struct kvm_tpr_access_ctl * tac)2350 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
2351 					   struct kvm_tpr_access_ctl *tac)
2352 {
2353 	if (tac->flags)
2354 		return -EINVAL;
2355 	vcpu->arch.tpr_access_reporting = !!tac->enabled;
2356 	return 0;
2357 }
2358 
kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu * vcpu,u64 mcg_cap)2359 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
2360 					u64 mcg_cap)
2361 {
2362 	int r;
2363 	unsigned bank_num = mcg_cap & 0xff, bank;
2364 
2365 	r = -EINVAL;
2366 	if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
2367 		goto out;
2368 	if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
2369 		goto out;
2370 	r = 0;
2371 	vcpu->arch.mcg_cap = mcg_cap;
2372 	/* Init IA32_MCG_CTL to all 1s */
2373 	if (mcg_cap & MCG_CTL_P)
2374 		vcpu->arch.mcg_ctl = ~(u64)0;
2375 	/* Init IA32_MCi_CTL to all 1s */
2376 	for (bank = 0; bank < bank_num; bank++)
2377 		vcpu->arch.mce_banks[bank*4] = ~(u64)0;
2378 out:
2379 	return r;
2380 }
2381 
kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu * vcpu,struct kvm_x86_mce * mce)2382 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
2383 				      struct kvm_x86_mce *mce)
2384 {
2385 	u64 mcg_cap = vcpu->arch.mcg_cap;
2386 	unsigned bank_num = mcg_cap & 0xff;
2387 	u64 *banks = vcpu->arch.mce_banks;
2388 
2389 	if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
2390 		return -EINVAL;
2391 	/*
2392 	 * if IA32_MCG_CTL is not all 1s, the uncorrected error
2393 	 * reporting is disabled
2394 	 */
2395 	if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
2396 	    vcpu->arch.mcg_ctl != ~(u64)0)
2397 		return 0;
2398 	banks += 4 * mce->bank;
2399 	/*
2400 	 * if IA32_MCi_CTL is not all 1s, the uncorrected error
2401 	 * reporting is disabled for the bank
2402 	 */
2403 	if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
2404 		return 0;
2405 	if (mce->status & MCI_STATUS_UC) {
2406 		if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
2407 		    !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
2408 			kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
2409 			return 0;
2410 		}
2411 		if (banks[1] & MCI_STATUS_VAL)
2412 			mce->status |= MCI_STATUS_OVER;
2413 		banks[2] = mce->addr;
2414 		banks[3] = mce->misc;
2415 		vcpu->arch.mcg_status = mce->mcg_status;
2416 		banks[1] = mce->status;
2417 		kvm_queue_exception(vcpu, MC_VECTOR);
2418 	} else if (!(banks[1] & MCI_STATUS_VAL)
2419 		   || !(banks[1] & MCI_STATUS_UC)) {
2420 		if (banks[1] & MCI_STATUS_VAL)
2421 			mce->status |= MCI_STATUS_OVER;
2422 		banks[2] = mce->addr;
2423 		banks[3] = mce->misc;
2424 		banks[1] = mce->status;
2425 	} else
2426 		banks[1] |= MCI_STATUS_OVER;
2427 	return 0;
2428 }
2429 
kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)2430 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
2431 					       struct kvm_vcpu_events *events)
2432 {
2433 	process_nmi(vcpu);
2434 	events->exception.injected =
2435 		vcpu->arch.exception.pending &&
2436 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
2437 	events->exception.nr = vcpu->arch.exception.nr;
2438 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
2439 	events->exception.pad = 0;
2440 	events->exception.error_code = vcpu->arch.exception.error_code;
2441 
2442 	events->interrupt.injected =
2443 		vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
2444 	events->interrupt.nr = vcpu->arch.interrupt.nr;
2445 	events->interrupt.soft = 0;
2446 	events->interrupt.shadow =
2447 		kvm_x86_ops->get_interrupt_shadow(vcpu,
2448 			KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
2449 
2450 	events->nmi.injected = vcpu->arch.nmi_injected;
2451 	events->nmi.pending = vcpu->arch.nmi_pending != 0;
2452 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
2453 	events->nmi.pad = 0;
2454 
2455 	events->sipi_vector = vcpu->arch.sipi_vector;
2456 
2457 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
2458 			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2459 			 | KVM_VCPUEVENT_VALID_SHADOW);
2460 	memset(&events->reserved, 0, sizeof(events->reserved));
2461 }
2462 
kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu * vcpu,struct kvm_vcpu_events * events)2463 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
2464 					      struct kvm_vcpu_events *events)
2465 {
2466 	if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
2467 			      | KVM_VCPUEVENT_VALID_SIPI_VECTOR
2468 			      | KVM_VCPUEVENT_VALID_SHADOW))
2469 		return -EINVAL;
2470 
2471 	process_nmi(vcpu);
2472 	vcpu->arch.exception.pending = events->exception.injected;
2473 	vcpu->arch.exception.nr = events->exception.nr;
2474 	vcpu->arch.exception.has_error_code = events->exception.has_error_code;
2475 	vcpu->arch.exception.error_code = events->exception.error_code;
2476 
2477 	vcpu->arch.interrupt.pending = events->interrupt.injected;
2478 	vcpu->arch.interrupt.nr = events->interrupt.nr;
2479 	vcpu->arch.interrupt.soft = events->interrupt.soft;
2480 	if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
2481 		kvm_x86_ops->set_interrupt_shadow(vcpu,
2482 						  events->interrupt.shadow);
2483 
2484 	vcpu->arch.nmi_injected = events->nmi.injected;
2485 	if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
2486 		vcpu->arch.nmi_pending = events->nmi.pending;
2487 	kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
2488 
2489 	if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
2490 		vcpu->arch.sipi_vector = events->sipi_vector;
2491 
2492 	kvm_make_request(KVM_REQ_EVENT, vcpu);
2493 
2494 	return 0;
2495 }
2496 
kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)2497 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
2498 					     struct kvm_debugregs *dbgregs)
2499 {
2500 	memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
2501 	dbgregs->dr6 = vcpu->arch.dr6;
2502 	dbgregs->dr7 = vcpu->arch.dr7;
2503 	dbgregs->flags = 0;
2504 	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
2505 }
2506 
kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu * vcpu,struct kvm_debugregs * dbgregs)2507 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
2508 					    struct kvm_debugregs *dbgregs)
2509 {
2510 	if (dbgregs->flags)
2511 		return -EINVAL;
2512 
2513 	memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
2514 	vcpu->arch.dr6 = dbgregs->dr6;
2515 	vcpu->arch.dr7 = dbgregs->dr7;
2516 
2517 	return 0;
2518 }
2519 
kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)2520 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
2521 					 struct kvm_xsave *guest_xsave)
2522 {
2523 	if (cpu_has_xsave)
2524 		memcpy(guest_xsave->region,
2525 			&vcpu->arch.guest_fpu.state->xsave,
2526 			xstate_size);
2527 	else {
2528 		memcpy(guest_xsave->region,
2529 			&vcpu->arch.guest_fpu.state->fxsave,
2530 			sizeof(struct i387_fxsave_struct));
2531 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
2532 			XSTATE_FPSSE;
2533 	}
2534 }
2535 
kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu * vcpu,struct kvm_xsave * guest_xsave)2536 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
2537 					struct kvm_xsave *guest_xsave)
2538 {
2539 	u64 xstate_bv =
2540 		*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
2541 
2542 	if (cpu_has_xsave)
2543 		memcpy(&vcpu->arch.guest_fpu.state->xsave,
2544 			guest_xsave->region, xstate_size);
2545 	else {
2546 		if (xstate_bv & ~XSTATE_FPSSE)
2547 			return -EINVAL;
2548 		memcpy(&vcpu->arch.guest_fpu.state->fxsave,
2549 			guest_xsave->region, sizeof(struct i387_fxsave_struct));
2550 	}
2551 	return 0;
2552 }
2553 
kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)2554 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
2555 					struct kvm_xcrs *guest_xcrs)
2556 {
2557 	if (!cpu_has_xsave) {
2558 		guest_xcrs->nr_xcrs = 0;
2559 		return;
2560 	}
2561 
2562 	guest_xcrs->nr_xcrs = 1;
2563 	guest_xcrs->flags = 0;
2564 	guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
2565 	guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
2566 }
2567 
kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu * vcpu,struct kvm_xcrs * guest_xcrs)2568 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2569 				       struct kvm_xcrs *guest_xcrs)
2570 {
2571 	int i, r = 0;
2572 
2573 	if (!cpu_has_xsave)
2574 		return -EINVAL;
2575 
2576 	if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
2577 		return -EINVAL;
2578 
2579 	for (i = 0; i < guest_xcrs->nr_xcrs; i++)
2580 		/* Only support XCR0 currently */
2581 		if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
2582 			r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
2583 				guest_xcrs->xcrs[0].value);
2584 			break;
2585 		}
2586 	if (r)
2587 		r = -EINVAL;
2588 	return r;
2589 }
2590 
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2591 long kvm_arch_vcpu_ioctl(struct file *filp,
2592 			 unsigned int ioctl, unsigned long arg)
2593 {
2594 	struct kvm_vcpu *vcpu = filp->private_data;
2595 	void __user *argp = (void __user *)arg;
2596 	int r;
2597 	union {
2598 		struct kvm_lapic_state *lapic;
2599 		struct kvm_xsave *xsave;
2600 		struct kvm_xcrs *xcrs;
2601 		void *buffer;
2602 	} u;
2603 
2604 	u.buffer = NULL;
2605 	switch (ioctl) {
2606 	case KVM_GET_LAPIC: {
2607 		r = -EINVAL;
2608 		if (!vcpu->arch.apic)
2609 			goto out;
2610 		u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
2611 
2612 		r = -ENOMEM;
2613 		if (!u.lapic)
2614 			goto out;
2615 		r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
2616 		if (r)
2617 			goto out;
2618 		r = -EFAULT;
2619 		if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
2620 			goto out;
2621 		r = 0;
2622 		break;
2623 	}
2624 	case KVM_SET_LAPIC: {
2625 		r = -EINVAL;
2626 		if (!vcpu->arch.apic)
2627 			goto out;
2628 		u.lapic = memdup_user(argp, sizeof(*u.lapic));
2629 		if (IS_ERR(u.lapic)) {
2630 			r = PTR_ERR(u.lapic);
2631 			goto out;
2632 		}
2633 
2634 		r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
2635 		if (r)
2636 			goto out;
2637 		r = 0;
2638 		break;
2639 	}
2640 	case KVM_INTERRUPT: {
2641 		struct kvm_interrupt irq;
2642 
2643 		r = -EFAULT;
2644 		if (copy_from_user(&irq, argp, sizeof irq))
2645 			goto out;
2646 		r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2647 		if (r)
2648 			goto out;
2649 		r = 0;
2650 		break;
2651 	}
2652 	case KVM_NMI: {
2653 		r = kvm_vcpu_ioctl_nmi(vcpu);
2654 		if (r)
2655 			goto out;
2656 		r = 0;
2657 		break;
2658 	}
2659 	case KVM_SET_CPUID: {
2660 		struct kvm_cpuid __user *cpuid_arg = argp;
2661 		struct kvm_cpuid cpuid;
2662 
2663 		r = -EFAULT;
2664 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2665 			goto out;
2666 		r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
2667 		if (r)
2668 			goto out;
2669 		break;
2670 	}
2671 	case KVM_SET_CPUID2: {
2672 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2673 		struct kvm_cpuid2 cpuid;
2674 
2675 		r = -EFAULT;
2676 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2677 			goto out;
2678 		r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
2679 					      cpuid_arg->entries);
2680 		if (r)
2681 			goto out;
2682 		break;
2683 	}
2684 	case KVM_GET_CPUID2: {
2685 		struct kvm_cpuid2 __user *cpuid_arg = argp;
2686 		struct kvm_cpuid2 cpuid;
2687 
2688 		r = -EFAULT;
2689 		if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
2690 			goto out;
2691 		r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
2692 					      cpuid_arg->entries);
2693 		if (r)
2694 			goto out;
2695 		r = -EFAULT;
2696 		if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
2697 			goto out;
2698 		r = 0;
2699 		break;
2700 	}
2701 	case KVM_GET_MSRS:
2702 		r = msr_io(vcpu, argp, kvm_get_msr, 1);
2703 		break;
2704 	case KVM_SET_MSRS:
2705 		r = msr_io(vcpu, argp, do_set_msr, 0);
2706 		break;
2707 	case KVM_TPR_ACCESS_REPORTING: {
2708 		struct kvm_tpr_access_ctl tac;
2709 
2710 		r = -EFAULT;
2711 		if (copy_from_user(&tac, argp, sizeof tac))
2712 			goto out;
2713 		r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
2714 		if (r)
2715 			goto out;
2716 		r = -EFAULT;
2717 		if (copy_to_user(argp, &tac, sizeof tac))
2718 			goto out;
2719 		r = 0;
2720 		break;
2721 	};
2722 	case KVM_SET_VAPIC_ADDR: {
2723 		struct kvm_vapic_addr va;
2724 
2725 		r = -EINVAL;
2726 		if (!irqchip_in_kernel(vcpu->kvm))
2727 			goto out;
2728 		r = -EFAULT;
2729 		if (copy_from_user(&va, argp, sizeof va))
2730 			goto out;
2731 		r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
2732 		break;
2733 	}
2734 	case KVM_X86_SETUP_MCE: {
2735 		u64 mcg_cap;
2736 
2737 		r = -EFAULT;
2738 		if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
2739 			goto out;
2740 		r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
2741 		break;
2742 	}
2743 	case KVM_X86_SET_MCE: {
2744 		struct kvm_x86_mce mce;
2745 
2746 		r = -EFAULT;
2747 		if (copy_from_user(&mce, argp, sizeof mce))
2748 			goto out;
2749 		r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
2750 		break;
2751 	}
2752 	case KVM_GET_VCPU_EVENTS: {
2753 		struct kvm_vcpu_events events;
2754 
2755 		kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
2756 
2757 		r = -EFAULT;
2758 		if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
2759 			break;
2760 		r = 0;
2761 		break;
2762 	}
2763 	case KVM_SET_VCPU_EVENTS: {
2764 		struct kvm_vcpu_events events;
2765 
2766 		r = -EFAULT;
2767 		if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
2768 			break;
2769 
2770 		r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
2771 		break;
2772 	}
2773 	case KVM_GET_DEBUGREGS: {
2774 		struct kvm_debugregs dbgregs;
2775 
2776 		kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
2777 
2778 		r = -EFAULT;
2779 		if (copy_to_user(argp, &dbgregs,
2780 				 sizeof(struct kvm_debugregs)))
2781 			break;
2782 		r = 0;
2783 		break;
2784 	}
2785 	case KVM_SET_DEBUGREGS: {
2786 		struct kvm_debugregs dbgregs;
2787 
2788 		r = -EFAULT;
2789 		if (copy_from_user(&dbgregs, argp,
2790 				   sizeof(struct kvm_debugregs)))
2791 			break;
2792 
2793 		r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
2794 		break;
2795 	}
2796 	case KVM_GET_XSAVE: {
2797 		u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
2798 		r = -ENOMEM;
2799 		if (!u.xsave)
2800 			break;
2801 
2802 		kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
2803 
2804 		r = -EFAULT;
2805 		if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
2806 			break;
2807 		r = 0;
2808 		break;
2809 	}
2810 	case KVM_SET_XSAVE: {
2811 		u.xsave = memdup_user(argp, sizeof(*u.xsave));
2812 		if (IS_ERR(u.xsave)) {
2813 			r = PTR_ERR(u.xsave);
2814 			goto out;
2815 		}
2816 
2817 		r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
2818 		break;
2819 	}
2820 	case KVM_GET_XCRS: {
2821 		u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
2822 		r = -ENOMEM;
2823 		if (!u.xcrs)
2824 			break;
2825 
2826 		kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
2827 
2828 		r = -EFAULT;
2829 		if (copy_to_user(argp, u.xcrs,
2830 				 sizeof(struct kvm_xcrs)))
2831 			break;
2832 		r = 0;
2833 		break;
2834 	}
2835 	case KVM_SET_XCRS: {
2836 		u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
2837 		if (IS_ERR(u.xcrs)) {
2838 			r = PTR_ERR(u.xcrs);
2839 			goto out;
2840 		}
2841 
2842 		r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
2843 		break;
2844 	}
2845 	case KVM_SET_TSC_KHZ: {
2846 		u32 user_tsc_khz;
2847 
2848 		r = -EINVAL;
2849 		user_tsc_khz = (u32)arg;
2850 
2851 		if (user_tsc_khz >= kvm_max_guest_tsc_khz)
2852 			goto out;
2853 
2854 		if (user_tsc_khz == 0)
2855 			user_tsc_khz = tsc_khz;
2856 
2857 		kvm_set_tsc_khz(vcpu, user_tsc_khz);
2858 
2859 		r = 0;
2860 		goto out;
2861 	}
2862 	case KVM_GET_TSC_KHZ: {
2863 		r = vcpu->arch.virtual_tsc_khz;
2864 		goto out;
2865 	}
2866 	default:
2867 		r = -EINVAL;
2868 	}
2869 out:
2870 	kfree(u.buffer);
2871 	return r;
2872 }
2873 
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)2874 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2875 {
2876 	return VM_FAULT_SIGBUS;
2877 }
2878 
kvm_vm_ioctl_set_tss_addr(struct kvm * kvm,unsigned long addr)2879 static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
2880 {
2881 	int ret;
2882 
2883 	if (addr > (unsigned int)(-3 * PAGE_SIZE))
2884 		return -1;
2885 	ret = kvm_x86_ops->set_tss_addr(kvm, addr);
2886 	return ret;
2887 }
2888 
kvm_vm_ioctl_set_identity_map_addr(struct kvm * kvm,u64 ident_addr)2889 static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
2890 					      u64 ident_addr)
2891 {
2892 	kvm->arch.ept_identity_map_addr = ident_addr;
2893 	return 0;
2894 }
2895 
kvm_vm_ioctl_set_nr_mmu_pages(struct kvm * kvm,u32 kvm_nr_mmu_pages)2896 static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
2897 					  u32 kvm_nr_mmu_pages)
2898 {
2899 	if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
2900 		return -EINVAL;
2901 
2902 	mutex_lock(&kvm->slots_lock);
2903 	spin_lock(&kvm->mmu_lock);
2904 
2905 	kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
2906 	kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
2907 
2908 	spin_unlock(&kvm->mmu_lock);
2909 	mutex_unlock(&kvm->slots_lock);
2910 	return 0;
2911 }
2912 
kvm_vm_ioctl_get_nr_mmu_pages(struct kvm * kvm)2913 static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
2914 {
2915 	return kvm->arch.n_max_mmu_pages;
2916 }
2917 
kvm_vm_ioctl_get_irqchip(struct kvm * kvm,struct kvm_irqchip * chip)2918 static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2919 {
2920 	int r;
2921 
2922 	r = 0;
2923 	switch (chip->chip_id) {
2924 	case KVM_IRQCHIP_PIC_MASTER:
2925 		memcpy(&chip->chip.pic,
2926 			&pic_irqchip(kvm)->pics[0],
2927 			sizeof(struct kvm_pic_state));
2928 		break;
2929 	case KVM_IRQCHIP_PIC_SLAVE:
2930 		memcpy(&chip->chip.pic,
2931 			&pic_irqchip(kvm)->pics[1],
2932 			sizeof(struct kvm_pic_state));
2933 		break;
2934 	case KVM_IRQCHIP_IOAPIC:
2935 		r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
2936 		break;
2937 	default:
2938 		r = -EINVAL;
2939 		break;
2940 	}
2941 	return r;
2942 }
2943 
kvm_vm_ioctl_set_irqchip(struct kvm * kvm,struct kvm_irqchip * chip)2944 static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
2945 {
2946 	int r;
2947 
2948 	r = 0;
2949 	switch (chip->chip_id) {
2950 	case KVM_IRQCHIP_PIC_MASTER:
2951 		spin_lock(&pic_irqchip(kvm)->lock);
2952 		memcpy(&pic_irqchip(kvm)->pics[0],
2953 			&chip->chip.pic,
2954 			sizeof(struct kvm_pic_state));
2955 		spin_unlock(&pic_irqchip(kvm)->lock);
2956 		break;
2957 	case KVM_IRQCHIP_PIC_SLAVE:
2958 		spin_lock(&pic_irqchip(kvm)->lock);
2959 		memcpy(&pic_irqchip(kvm)->pics[1],
2960 			&chip->chip.pic,
2961 			sizeof(struct kvm_pic_state));
2962 		spin_unlock(&pic_irqchip(kvm)->lock);
2963 		break;
2964 	case KVM_IRQCHIP_IOAPIC:
2965 		r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
2966 		break;
2967 	default:
2968 		r = -EINVAL;
2969 		break;
2970 	}
2971 	kvm_pic_update_irq(pic_irqchip(kvm));
2972 	return r;
2973 }
2974 
kvm_vm_ioctl_get_pit(struct kvm * kvm,struct kvm_pit_state * ps)2975 static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2976 {
2977 	int r = 0;
2978 
2979 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
2980 	memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
2981 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2982 	return r;
2983 }
2984 
kvm_vm_ioctl_set_pit(struct kvm * kvm,struct kvm_pit_state * ps)2985 static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
2986 {
2987 	int r = 0;
2988 
2989 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
2990 	memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
2991 	kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
2992 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
2993 	return r;
2994 }
2995 
kvm_vm_ioctl_get_pit2(struct kvm * kvm,struct kvm_pit_state2 * ps)2996 static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
2997 {
2998 	int r = 0;
2999 
3000 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3001 	memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
3002 		sizeof(ps->channels));
3003 	ps->flags = kvm->arch.vpit->pit_state.flags;
3004 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3005 	memset(&ps->reserved, 0, sizeof(ps->reserved));
3006 	return r;
3007 }
3008 
kvm_vm_ioctl_set_pit2(struct kvm * kvm,struct kvm_pit_state2 * ps)3009 static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
3010 {
3011 	int r = 0, start = 0;
3012 	u32 prev_legacy, cur_legacy;
3013 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3014 	prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
3015 	cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
3016 	if (!prev_legacy && cur_legacy)
3017 		start = 1;
3018 	memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
3019 	       sizeof(kvm->arch.vpit->pit_state.channels));
3020 	kvm->arch.vpit->pit_state.flags = ps->flags;
3021 	kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
3022 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3023 	return r;
3024 }
3025 
kvm_vm_ioctl_reinject(struct kvm * kvm,struct kvm_reinject_control * control)3026 static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3027 				 struct kvm_reinject_control *control)
3028 {
3029 	if (!kvm->arch.vpit)
3030 		return -ENXIO;
3031 	mutex_lock(&kvm->arch.vpit->pit_state.lock);
3032 	kvm->arch.vpit->pit_state.pit_timer.reinject = control->pit_reinject;
3033 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
3034 	return 0;
3035 }
3036 
3037 /**
3038  * write_protect_slot - write protect a slot for dirty logging
3039  * @kvm: the kvm instance
3040  * @memslot: the slot we protect
3041  * @dirty_bitmap: the bitmap indicating which pages are dirty
3042  * @nr_dirty_pages: the number of dirty pages
3043  *
3044  * We have two ways to find all sptes to protect:
3045  * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
3046  *    checks ones that have a spte mapping a page in the slot.
3047  * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
3048  *
3049  * Generally speaking, if there are not so many dirty pages compared to the
3050  * number of shadow pages, we should use the latter.
3051  *
3052  * Note that letting others write into a page marked dirty in the old bitmap
3053  * by using the remaining tlb entry is not a problem.  That page will become
3054  * write protected again when we flush the tlb and then be reported dirty to
3055  * the user space by copying the old bitmap.
3056  */
write_protect_slot(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * dirty_bitmap,unsigned long nr_dirty_pages)3057 static void write_protect_slot(struct kvm *kvm,
3058 			       struct kvm_memory_slot *memslot,
3059 			       unsigned long *dirty_bitmap,
3060 			       unsigned long nr_dirty_pages)
3061 {
3062 	spin_lock(&kvm->mmu_lock);
3063 
3064 	/* Not many dirty pages compared to # of shadow pages. */
3065 	if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
3066 		unsigned long gfn_offset;
3067 
3068 		for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
3069 			unsigned long gfn = memslot->base_gfn + gfn_offset;
3070 
3071 			kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
3072 		}
3073 		kvm_flush_remote_tlbs(kvm);
3074 	} else
3075 		kvm_mmu_slot_remove_write_access(kvm, memslot->id);
3076 
3077 	spin_unlock(&kvm->mmu_lock);
3078 }
3079 
3080 /*
3081  * Get (and clear) the dirty memory log for a memory slot.
3082  */
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)3083 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3084 				      struct kvm_dirty_log *log)
3085 {
3086 	int r;
3087 	struct kvm_memory_slot *memslot;
3088 	unsigned long n, nr_dirty_pages;
3089 
3090 	mutex_lock(&kvm->slots_lock);
3091 
3092 	r = -EINVAL;
3093 	if (log->slot >= KVM_MEMORY_SLOTS)
3094 		goto out;
3095 
3096 	memslot = id_to_memslot(kvm->memslots, log->slot);
3097 	r = -ENOENT;
3098 	if (!memslot->dirty_bitmap)
3099 		goto out;
3100 
3101 	n = kvm_dirty_bitmap_bytes(memslot);
3102 	nr_dirty_pages = memslot->nr_dirty_pages;
3103 
3104 	/* If nothing is dirty, don't bother messing with page tables. */
3105 	if (nr_dirty_pages) {
3106 		struct kvm_memslots *slots, *old_slots;
3107 		unsigned long *dirty_bitmap, *dirty_bitmap_head;
3108 
3109 		dirty_bitmap = memslot->dirty_bitmap;
3110 		dirty_bitmap_head = memslot->dirty_bitmap_head;
3111 		if (dirty_bitmap == dirty_bitmap_head)
3112 			dirty_bitmap_head += n / sizeof(long);
3113 		memset(dirty_bitmap_head, 0, n);
3114 
3115 		r = -ENOMEM;
3116 		slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL);
3117 		if (!slots)
3118 			goto out;
3119 
3120 		memslot = id_to_memslot(slots, log->slot);
3121 		memslot->nr_dirty_pages = 0;
3122 		memslot->dirty_bitmap = dirty_bitmap_head;
3123 		update_memslots(slots, NULL);
3124 
3125 		old_slots = kvm->memslots;
3126 		rcu_assign_pointer(kvm->memslots, slots);
3127 		synchronize_srcu_expedited(&kvm->srcu);
3128 		kfree(old_slots);
3129 
3130 		write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);
3131 
3132 		r = -EFAULT;
3133 		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
3134 			goto out;
3135 	} else {
3136 		r = -EFAULT;
3137 		if (clear_user(log->dirty_bitmap, n))
3138 			goto out;
3139 	}
3140 
3141 	r = 0;
3142 out:
3143 	mutex_unlock(&kvm->slots_lock);
3144 	return r;
3145 }
3146 
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)3147 long kvm_arch_vm_ioctl(struct file *filp,
3148 		       unsigned int ioctl, unsigned long arg)
3149 {
3150 	struct kvm *kvm = filp->private_data;
3151 	void __user *argp = (void __user *)arg;
3152 	int r = -ENOTTY;
3153 	/*
3154 	 * This union makes it completely explicit to gcc-3.x
3155 	 * that these two variables' stack usage should be
3156 	 * combined, not added together.
3157 	 */
3158 	union {
3159 		struct kvm_pit_state ps;
3160 		struct kvm_pit_state2 ps2;
3161 		struct kvm_pit_config pit_config;
3162 	} u;
3163 
3164 	switch (ioctl) {
3165 	case KVM_SET_TSS_ADDR:
3166 		r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
3167 		if (r < 0)
3168 			goto out;
3169 		break;
3170 	case KVM_SET_IDENTITY_MAP_ADDR: {
3171 		u64 ident_addr;
3172 
3173 		r = -EFAULT;
3174 		if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
3175 			goto out;
3176 		r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
3177 		if (r < 0)
3178 			goto out;
3179 		break;
3180 	}
3181 	case KVM_SET_NR_MMU_PAGES:
3182 		r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
3183 		if (r)
3184 			goto out;
3185 		break;
3186 	case KVM_GET_NR_MMU_PAGES:
3187 		r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
3188 		break;
3189 	case KVM_CREATE_IRQCHIP: {
3190 		struct kvm_pic *vpic;
3191 
3192 		mutex_lock(&kvm->lock);
3193 		r = -EEXIST;
3194 		if (kvm->arch.vpic)
3195 			goto create_irqchip_unlock;
3196 		r = -EINVAL;
3197 		if (atomic_read(&kvm->online_vcpus))
3198 			goto create_irqchip_unlock;
3199 		r = -ENOMEM;
3200 		vpic = kvm_create_pic(kvm);
3201 		if (vpic) {
3202 			r = kvm_ioapic_init(kvm);
3203 			if (r) {
3204 				mutex_lock(&kvm->slots_lock);
3205 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3206 							  &vpic->dev_master);
3207 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3208 							  &vpic->dev_slave);
3209 				kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
3210 							  &vpic->dev_eclr);
3211 				mutex_unlock(&kvm->slots_lock);
3212 				kfree(vpic);
3213 				goto create_irqchip_unlock;
3214 			}
3215 		} else
3216 			goto create_irqchip_unlock;
3217 		smp_wmb();
3218 		kvm->arch.vpic = vpic;
3219 		smp_wmb();
3220 		r = kvm_setup_default_irq_routing(kvm);
3221 		if (r) {
3222 			mutex_lock(&kvm->slots_lock);
3223 			mutex_lock(&kvm->irq_lock);
3224 			kvm_ioapic_destroy(kvm);
3225 			kvm_destroy_pic(kvm);
3226 			mutex_unlock(&kvm->irq_lock);
3227 			mutex_unlock(&kvm->slots_lock);
3228 		}
3229 	create_irqchip_unlock:
3230 		mutex_unlock(&kvm->lock);
3231 		break;
3232 	}
3233 	case KVM_CREATE_PIT:
3234 		u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
3235 		goto create_pit;
3236 	case KVM_CREATE_PIT2:
3237 		r = -EFAULT;
3238 		if (copy_from_user(&u.pit_config, argp,
3239 				   sizeof(struct kvm_pit_config)))
3240 			goto out;
3241 	create_pit:
3242 		mutex_lock(&kvm->slots_lock);
3243 		r = -EEXIST;
3244 		if (kvm->arch.vpit)
3245 			goto create_pit_unlock;
3246 		r = -ENOMEM;
3247 		kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
3248 		if (kvm->arch.vpit)
3249 			r = 0;
3250 	create_pit_unlock:
3251 		mutex_unlock(&kvm->slots_lock);
3252 		break;
3253 	case KVM_IRQ_LINE_STATUS:
3254 	case KVM_IRQ_LINE: {
3255 		struct kvm_irq_level irq_event;
3256 
3257 		r = -EFAULT;
3258 		if (copy_from_user(&irq_event, argp, sizeof irq_event))
3259 			goto out;
3260 		r = -ENXIO;
3261 		if (irqchip_in_kernel(kvm)) {
3262 			__s32 status;
3263 			status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
3264 					irq_event.irq, irq_event.level);
3265 			if (ioctl == KVM_IRQ_LINE_STATUS) {
3266 				r = -EFAULT;
3267 				irq_event.status = status;
3268 				if (copy_to_user(argp, &irq_event,
3269 							sizeof irq_event))
3270 					goto out;
3271 			}
3272 			r = 0;
3273 		}
3274 		break;
3275 	}
3276 	case KVM_GET_IRQCHIP: {
3277 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3278 		struct kvm_irqchip *chip;
3279 
3280 		chip = memdup_user(argp, sizeof(*chip));
3281 		if (IS_ERR(chip)) {
3282 			r = PTR_ERR(chip);
3283 			goto out;
3284 		}
3285 
3286 		r = -ENXIO;
3287 		if (!irqchip_in_kernel(kvm))
3288 			goto get_irqchip_out;
3289 		r = kvm_vm_ioctl_get_irqchip(kvm, chip);
3290 		if (r)
3291 			goto get_irqchip_out;
3292 		r = -EFAULT;
3293 		if (copy_to_user(argp, chip, sizeof *chip))
3294 			goto get_irqchip_out;
3295 		r = 0;
3296 	get_irqchip_out:
3297 		kfree(chip);
3298 		if (r)
3299 			goto out;
3300 		break;
3301 	}
3302 	case KVM_SET_IRQCHIP: {
3303 		/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
3304 		struct kvm_irqchip *chip;
3305 
3306 		chip = memdup_user(argp, sizeof(*chip));
3307 		if (IS_ERR(chip)) {
3308 			r = PTR_ERR(chip);
3309 			goto out;
3310 		}
3311 
3312 		r = -ENXIO;
3313 		if (!irqchip_in_kernel(kvm))
3314 			goto set_irqchip_out;
3315 		r = kvm_vm_ioctl_set_irqchip(kvm, chip);
3316 		if (r)
3317 			goto set_irqchip_out;
3318 		r = 0;
3319 	set_irqchip_out:
3320 		kfree(chip);
3321 		if (r)
3322 			goto out;
3323 		break;
3324 	}
3325 	case KVM_GET_PIT: {
3326 		r = -EFAULT;
3327 		if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
3328 			goto out;
3329 		r = -ENXIO;
3330 		if (!kvm->arch.vpit)
3331 			goto out;
3332 		r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
3333 		if (r)
3334 			goto out;
3335 		r = -EFAULT;
3336 		if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
3337 			goto out;
3338 		r = 0;
3339 		break;
3340 	}
3341 	case KVM_SET_PIT: {
3342 		r = -EFAULT;
3343 		if (copy_from_user(&u.ps, argp, sizeof u.ps))
3344 			goto out;
3345 		r = -ENXIO;
3346 		if (!kvm->arch.vpit)
3347 			goto out;
3348 		r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
3349 		if (r)
3350 			goto out;
3351 		r = 0;
3352 		break;
3353 	}
3354 	case KVM_GET_PIT2: {
3355 		r = -ENXIO;
3356 		if (!kvm->arch.vpit)
3357 			goto out;
3358 		r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
3359 		if (r)
3360 			goto out;
3361 		r = -EFAULT;
3362 		if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
3363 			goto out;
3364 		r = 0;
3365 		break;
3366 	}
3367 	case KVM_SET_PIT2: {
3368 		r = -EFAULT;
3369 		if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
3370 			goto out;
3371 		r = -ENXIO;
3372 		if (!kvm->arch.vpit)
3373 			goto out;
3374 		r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
3375 		if (r)
3376 			goto out;
3377 		r = 0;
3378 		break;
3379 	}
3380 	case KVM_REINJECT_CONTROL: {
3381 		struct kvm_reinject_control control;
3382 		r =  -EFAULT;
3383 		if (copy_from_user(&control, argp, sizeof(control)))
3384 			goto out;
3385 		r = kvm_vm_ioctl_reinject(kvm, &control);
3386 		if (r)
3387 			goto out;
3388 		r = 0;
3389 		break;
3390 	}
3391 	case KVM_XEN_HVM_CONFIG: {
3392 		r = -EFAULT;
3393 		if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
3394 				   sizeof(struct kvm_xen_hvm_config)))
3395 			goto out;
3396 		r = -EINVAL;
3397 		if (kvm->arch.xen_hvm_config.flags)
3398 			goto out;
3399 		r = 0;
3400 		break;
3401 	}
3402 	case KVM_SET_CLOCK: {
3403 		struct kvm_clock_data user_ns;
3404 		u64 now_ns;
3405 		s64 delta;
3406 
3407 		r = -EFAULT;
3408 		if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
3409 			goto out;
3410 
3411 		r = -EINVAL;
3412 		if (user_ns.flags)
3413 			goto out;
3414 
3415 		r = 0;
3416 		local_irq_disable();
3417 		now_ns = get_kernel_ns();
3418 		delta = user_ns.clock - now_ns;
3419 		local_irq_enable();
3420 		kvm->arch.kvmclock_offset = delta;
3421 		break;
3422 	}
3423 	case KVM_GET_CLOCK: {
3424 		struct kvm_clock_data user_ns;
3425 		u64 now_ns;
3426 
3427 		local_irq_disable();
3428 		now_ns = get_kernel_ns();
3429 		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
3430 		local_irq_enable();
3431 		user_ns.flags = 0;
3432 		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
3433 
3434 		r = -EFAULT;
3435 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
3436 			goto out;
3437 		r = 0;
3438 		break;
3439 	}
3440 
3441 	default:
3442 		;
3443 	}
3444 out:
3445 	return r;
3446 }
3447 
kvm_init_msr_list(void)3448 static void kvm_init_msr_list(void)
3449 {
3450 	u32 dummy[2];
3451 	unsigned i, j;
3452 
3453 	/* skip the first msrs in the list. KVM-specific */
3454 	for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
3455 		if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
3456 			continue;
3457 		if (j < i)
3458 			msrs_to_save[j] = msrs_to_save[i];
3459 		j++;
3460 	}
3461 	num_msrs_to_save = j;
3462 }
3463 
vcpu_mmio_write(struct kvm_vcpu * vcpu,gpa_t addr,int len,const void * v)3464 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
3465 			   const void *v)
3466 {
3467 	int handled = 0;
3468 	int n;
3469 
3470 	do {
3471 		n = min(len, 8);
3472 		if (!(vcpu->arch.apic &&
3473 		      !kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
3474 		    && kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3475 			break;
3476 		handled += n;
3477 		addr += n;
3478 		len -= n;
3479 		v += n;
3480 	} while (len);
3481 
3482 	return handled;
3483 }
3484 
vcpu_mmio_read(struct kvm_vcpu * vcpu,gpa_t addr,int len,void * v)3485 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3486 {
3487 	int handled = 0;
3488 	int n;
3489 
3490 	do {
3491 		n = min(len, 8);
3492 		if (!(vcpu->arch.apic &&
3493 		      !kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
3494 		    && kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
3495 			break;
3496 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
3497 		handled += n;
3498 		addr += n;
3499 		len -= n;
3500 		v += n;
3501 	} while (len);
3502 
3503 	return handled;
3504 }
3505 
kvm_set_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3506 static void kvm_set_segment(struct kvm_vcpu *vcpu,
3507 			struct kvm_segment *var, int seg)
3508 {
3509 	kvm_x86_ops->set_segment(vcpu, var, seg);
3510 }
3511 
kvm_get_segment(struct kvm_vcpu * vcpu,struct kvm_segment * var,int seg)3512 void kvm_get_segment(struct kvm_vcpu *vcpu,
3513 		     struct kvm_segment *var, int seg)
3514 {
3515 	kvm_x86_ops->get_segment(vcpu, var, seg);
3516 }
3517 
translate_nested_gpa(struct kvm_vcpu * vcpu,gpa_t gpa,u32 access)3518 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
3519 {
3520 	gpa_t t_gpa;
3521 	struct x86_exception exception;
3522 
3523 	BUG_ON(!mmu_is_nested(vcpu));
3524 
3525 	/* NPT walks are always user-walks */
3526 	access |= PFERR_USER_MASK;
3527 	t_gpa  = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
3528 
3529 	return t_gpa;
3530 }
3531 
kvm_mmu_gva_to_gpa_read(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)3532 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
3533 			      struct x86_exception *exception)
3534 {
3535 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3536 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3537 }
3538 
kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)3539  gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
3540 				struct x86_exception *exception)
3541 {
3542 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3543 	access |= PFERR_FETCH_MASK;
3544 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3545 }
3546 
kvm_mmu_gva_to_gpa_write(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)3547 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
3548 			       struct x86_exception *exception)
3549 {
3550 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3551 	access |= PFERR_WRITE_MASK;
3552 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3553 }
3554 
3555 /* uses this to access any guest's mapped memory without checking CPL */
kvm_mmu_gva_to_gpa_system(struct kvm_vcpu * vcpu,gva_t gva,struct x86_exception * exception)3556 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
3557 				struct x86_exception *exception)
3558 {
3559 	return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
3560 }
3561 
kvm_read_guest_virt_helper(gva_t addr,void * val,unsigned int bytes,struct kvm_vcpu * vcpu,u32 access,struct x86_exception * exception)3562 static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3563 				      struct kvm_vcpu *vcpu, u32 access,
3564 				      struct x86_exception *exception)
3565 {
3566 	void *data = val;
3567 	int r = X86EMUL_CONTINUE;
3568 
3569 	while (bytes) {
3570 		gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3571 							    exception);
3572 		unsigned offset = addr & (PAGE_SIZE-1);
3573 		unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3574 		int ret;
3575 
3576 		if (gpa == UNMAPPED_GVA)
3577 			return X86EMUL_PROPAGATE_FAULT;
3578 		ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3579 		if (ret < 0) {
3580 			r = X86EMUL_IO_NEEDED;
3581 			goto out;
3582 		}
3583 
3584 		bytes -= toread;
3585 		data += toread;
3586 		addr += toread;
3587 	}
3588 out:
3589 	return r;
3590 }
3591 
3592 /* used for instruction fetching */
kvm_fetch_guest_virt(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)3593 static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
3594 				gva_t addr, void *val, unsigned int bytes,
3595 				struct x86_exception *exception)
3596 {
3597 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3598 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3599 
3600 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3601 					  access | PFERR_FETCH_MASK,
3602 					  exception);
3603 }
3604 
kvm_read_guest_virt(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)3605 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
3606 			       gva_t addr, void *val, unsigned int bytes,
3607 			       struct x86_exception *exception)
3608 {
3609 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3610 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3611 
3612 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3613 					  exception);
3614 }
3615 EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
3616 
kvm_read_guest_virt_system(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)3617 static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3618 				      gva_t addr, void *val, unsigned int bytes,
3619 				      struct x86_exception *exception)
3620 {
3621 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3622 	return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
3623 }
3624 
kvm_write_guest_virt_system(struct x86_emulate_ctxt * ctxt,gva_t addr,void * val,unsigned int bytes,struct x86_exception * exception)3625 int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3626 				       gva_t addr, void *val,
3627 				       unsigned int bytes,
3628 				       struct x86_exception *exception)
3629 {
3630 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3631 	void *data = val;
3632 	int r = X86EMUL_CONTINUE;
3633 
3634 	while (bytes) {
3635 		gpa_t gpa =  vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3636 							     PFERR_WRITE_MASK,
3637 							     exception);
3638 		unsigned offset = addr & (PAGE_SIZE-1);
3639 		unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3640 		int ret;
3641 
3642 		if (gpa == UNMAPPED_GVA)
3643 			return X86EMUL_PROPAGATE_FAULT;
3644 		ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3645 		if (ret < 0) {
3646 			r = X86EMUL_IO_NEEDED;
3647 			goto out;
3648 		}
3649 
3650 		bytes -= towrite;
3651 		data += towrite;
3652 		addr += towrite;
3653 	}
3654 out:
3655 	return r;
3656 }
3657 EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
3658 
vcpu_mmio_gva_to_gpa(struct kvm_vcpu * vcpu,unsigned long gva,gpa_t * gpa,struct x86_exception * exception,bool write)3659 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
3660 				gpa_t *gpa, struct x86_exception *exception,
3661 				bool write)
3662 {
3663 	u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3664 
3665 	if (vcpu_match_mmio_gva(vcpu, gva) &&
3666 		  check_write_user_access(vcpu, write, access,
3667 		  vcpu->arch.access)) {
3668 		*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
3669 					(gva & (PAGE_SIZE - 1));
3670 		trace_vcpu_match_mmio(gva, *gpa, write, false);
3671 		return 1;
3672 	}
3673 
3674 	if (write)
3675 		access |= PFERR_WRITE_MASK;
3676 
3677 	*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
3678 
3679 	if (*gpa == UNMAPPED_GVA)
3680 		return -1;
3681 
3682 	/* For APIC access vmexit */
3683 	if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3684 		return 1;
3685 
3686 	if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
3687 		trace_vcpu_match_mmio(gva, *gpa, write, true);
3688 		return 1;
3689 	}
3690 
3691 	return 0;
3692 }
3693 
emulator_write_phys(struct kvm_vcpu * vcpu,gpa_t gpa,const void * val,int bytes)3694 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3695 			const void *val, int bytes)
3696 {
3697 	int ret;
3698 
3699 	ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
3700 	if (ret < 0)
3701 		return 0;
3702 	kvm_mmu_pte_write(vcpu, gpa, val, bytes);
3703 	return 1;
3704 }
3705 
3706 struct read_write_emulator_ops {
3707 	int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
3708 				  int bytes);
3709 	int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
3710 				  void *val, int bytes);
3711 	int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
3712 			       int bytes, void *val);
3713 	int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
3714 				    void *val, int bytes);
3715 	bool write;
3716 };
3717 
read_prepare(struct kvm_vcpu * vcpu,void * val,int bytes)3718 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
3719 {
3720 	if (vcpu->mmio_read_completed) {
3721 		memcpy(val, vcpu->mmio_data, bytes);
3722 		trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3723 			       vcpu->mmio_phys_addr, *(u64 *)val);
3724 		vcpu->mmio_read_completed = 0;
3725 		return 1;
3726 	}
3727 
3728 	return 0;
3729 }
3730 
read_emulate(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)3731 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
3732 			void *val, int bytes)
3733 {
3734 	return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
3735 }
3736 
write_emulate(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)3737 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
3738 			 void *val, int bytes)
3739 {
3740 	return emulator_write_phys(vcpu, gpa, val, bytes);
3741 }
3742 
write_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,int bytes,void * val)3743 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
3744 {
3745 	trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
3746 	return vcpu_mmio_write(vcpu, gpa, bytes, val);
3747 }
3748 
read_exit_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)3749 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3750 			  void *val, int bytes)
3751 {
3752 	trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
3753 	return X86EMUL_IO_NEEDED;
3754 }
3755 
write_exit_mmio(struct kvm_vcpu * vcpu,gpa_t gpa,void * val,int bytes)3756 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3757 			   void *val, int bytes)
3758 {
3759 	memcpy(vcpu->mmio_data, val, bytes);
3760 	memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
3761 	return X86EMUL_CONTINUE;
3762 }
3763 
3764 static struct read_write_emulator_ops read_emultor = {
3765 	.read_write_prepare = read_prepare,
3766 	.read_write_emulate = read_emulate,
3767 	.read_write_mmio = vcpu_mmio_read,
3768 	.read_write_exit_mmio = read_exit_mmio,
3769 };
3770 
3771 static struct read_write_emulator_ops write_emultor = {
3772 	.read_write_emulate = write_emulate,
3773 	.read_write_mmio = write_mmio,
3774 	.read_write_exit_mmio = write_exit_mmio,
3775 	.write = true,
3776 };
3777 
emulator_read_write_onepage(unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,struct kvm_vcpu * vcpu,struct read_write_emulator_ops * ops)3778 static int emulator_read_write_onepage(unsigned long addr, void *val,
3779 				       unsigned int bytes,
3780 				       struct x86_exception *exception,
3781 				       struct kvm_vcpu *vcpu,
3782 				       struct read_write_emulator_ops *ops)
3783 {
3784 	gpa_t gpa;
3785 	int handled, ret;
3786 	bool write = ops->write;
3787 
3788 	if (ops->read_write_prepare &&
3789 		  ops->read_write_prepare(vcpu, val, bytes))
3790 		return X86EMUL_CONTINUE;
3791 
3792 	ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
3793 
3794 	if (ret < 0)
3795 		return X86EMUL_PROPAGATE_FAULT;
3796 
3797 	/* For APIC access vmexit */
3798 	if (ret)
3799 		goto mmio;
3800 
3801 	if (ops->read_write_emulate(vcpu, gpa, val, bytes))
3802 		return X86EMUL_CONTINUE;
3803 
3804 mmio:
3805 	/*
3806 	 * Is this MMIO handled locally?
3807 	 */
3808 	handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
3809 	if (handled == bytes)
3810 		return X86EMUL_CONTINUE;
3811 
3812 	gpa += handled;
3813 	bytes -= handled;
3814 	val += handled;
3815 
3816 	vcpu->mmio_needed = 1;
3817 	vcpu->run->exit_reason = KVM_EXIT_MMIO;
3818 	vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3819 	vcpu->mmio_size = bytes;
3820 	vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
3821 	vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
3822 	vcpu->mmio_index = 0;
3823 
3824 	return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
3825 }
3826 
emulator_read_write(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception,struct read_write_emulator_ops * ops)3827 int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3828 			void *val, unsigned int bytes,
3829 			struct x86_exception *exception,
3830 			struct read_write_emulator_ops *ops)
3831 {
3832 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3833 
3834 	/* Crossing a page boundary? */
3835 	if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3836 		int rc, now;
3837 
3838 		now = -addr & ~PAGE_MASK;
3839 		rc = emulator_read_write_onepage(addr, val, now, exception,
3840 						 vcpu, ops);
3841 
3842 		if (rc != X86EMUL_CONTINUE)
3843 			return rc;
3844 		addr += now;
3845 		val += now;
3846 		bytes -= now;
3847 	}
3848 
3849 	return emulator_read_write_onepage(addr, val, bytes, exception,
3850 					   vcpu, ops);
3851 }
3852 
emulator_read_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,void * val,unsigned int bytes,struct x86_exception * exception)3853 static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
3854 				  unsigned long addr,
3855 				  void *val,
3856 				  unsigned int bytes,
3857 				  struct x86_exception *exception)
3858 {
3859 	return emulator_read_write(ctxt, addr, val, bytes,
3860 				   exception, &read_emultor);
3861 }
3862 
emulator_write_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * val,unsigned int bytes,struct x86_exception * exception)3863 int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
3864 			    unsigned long addr,
3865 			    const void *val,
3866 			    unsigned int bytes,
3867 			    struct x86_exception *exception)
3868 {
3869 	return emulator_read_write(ctxt, addr, (void *)val, bytes,
3870 				   exception, &write_emultor);
3871 }
3872 
3873 #define CMPXCHG_TYPE(t, ptr, old, new) \
3874 	(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
3875 
3876 #ifdef CONFIG_X86_64
3877 #  define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
3878 #else
3879 #  define CMPXCHG64(ptr, old, new) \
3880 	(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3881 #endif
3882 
emulator_cmpxchg_emulated(struct x86_emulate_ctxt * ctxt,unsigned long addr,const void * old,const void * new,unsigned int bytes,struct x86_exception * exception)3883 static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
3884 				     unsigned long addr,
3885 				     const void *old,
3886 				     const void *new,
3887 				     unsigned int bytes,
3888 				     struct x86_exception *exception)
3889 {
3890 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3891 	gpa_t gpa;
3892 	struct page *page;
3893 	char *kaddr;
3894 	bool exchanged;
3895 
3896 	/* guests cmpxchg8b have to be emulated atomically */
3897 	if (bytes > 8 || (bytes & (bytes - 1)))
3898 		goto emul_write;
3899 
3900 	gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3901 
3902 	if (gpa == UNMAPPED_GVA ||
3903 	    (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3904 		goto emul_write;
3905 
3906 	if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
3907 		goto emul_write;
3908 
3909 	page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
3910 	if (is_error_page(page)) {
3911 		kvm_release_page_clean(page);
3912 		goto emul_write;
3913 	}
3914 
3915 	kaddr = kmap_atomic(page);
3916 	kaddr += offset_in_page(gpa);
3917 	switch (bytes) {
3918 	case 1:
3919 		exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
3920 		break;
3921 	case 2:
3922 		exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
3923 		break;
3924 	case 4:
3925 		exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
3926 		break;
3927 	case 8:
3928 		exchanged = CMPXCHG64(kaddr, old, new);
3929 		break;
3930 	default:
3931 		BUG();
3932 	}
3933 	kunmap_atomic(kaddr);
3934 	kvm_release_page_dirty(page);
3935 
3936 	if (!exchanged)
3937 		return X86EMUL_CMPXCHG_FAILED;
3938 
3939 	kvm_mmu_pte_write(vcpu, gpa, new, bytes);
3940 
3941 	return X86EMUL_CONTINUE;
3942 
3943 emul_write:
3944 	printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3945 
3946 	return emulator_write_emulated(ctxt, addr, new, bytes, exception);
3947 }
3948 
kernel_pio(struct kvm_vcpu * vcpu,void * pd)3949 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
3950 {
3951 	/* TODO: String I/O for in kernel device */
3952 	int r;
3953 
3954 	if (vcpu->arch.pio.in)
3955 		r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
3956 				    vcpu->arch.pio.size, pd);
3957 	else
3958 		r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
3959 				     vcpu->arch.pio.port, vcpu->arch.pio.size,
3960 				     pd);
3961 	return r;
3962 }
3963 
emulator_pio_in_out(struct kvm_vcpu * vcpu,int size,unsigned short port,void * val,unsigned int count,bool in)3964 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
3965 			       unsigned short port, void *val,
3966 			       unsigned int count, bool in)
3967 {
3968 	trace_kvm_pio(!in, port, size, count);
3969 
3970 	vcpu->arch.pio.port = port;
3971 	vcpu->arch.pio.in = in;
3972 	vcpu->arch.pio.count  = count;
3973 	vcpu->arch.pio.size = size;
3974 
3975 	if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
3976 		vcpu->arch.pio.count = 0;
3977 		return 1;
3978 	}
3979 
3980 	vcpu->run->exit_reason = KVM_EXIT_IO;
3981 	vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
3982 	vcpu->run->io.size = size;
3983 	vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
3984 	vcpu->run->io.count = count;
3985 	vcpu->run->io.port = port;
3986 
3987 	return 0;
3988 }
3989 
emulator_pio_in_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,void * val,unsigned int count)3990 static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
3991 				    int size, unsigned short port, void *val,
3992 				    unsigned int count)
3993 {
3994 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3995 	int ret;
3996 
3997 	if (vcpu->arch.pio.count)
3998 		goto data_avail;
3999 
4000 	ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
4001 	if (ret) {
4002 data_avail:
4003 		memcpy(val, vcpu->arch.pio_data, size * count);
4004 		vcpu->arch.pio.count = 0;
4005 		return 1;
4006 	}
4007 
4008 	return 0;
4009 }
4010 
emulator_pio_out_emulated(struct x86_emulate_ctxt * ctxt,int size,unsigned short port,const void * val,unsigned int count)4011 static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
4012 				     int size, unsigned short port,
4013 				     const void *val, unsigned int count)
4014 {
4015 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4016 
4017 	memcpy(vcpu->arch.pio_data, val, size * count);
4018 	return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
4019 }
4020 
get_segment_base(struct kvm_vcpu * vcpu,int seg)4021 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
4022 {
4023 	return kvm_x86_ops->get_segment_base(vcpu, seg);
4024 }
4025 
emulator_invlpg(struct x86_emulate_ctxt * ctxt,ulong address)4026 static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
4027 {
4028 	kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
4029 }
4030 
kvm_emulate_wbinvd(struct kvm_vcpu * vcpu)4031 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
4032 {
4033 	if (!need_emulate_wbinvd(vcpu))
4034 		return X86EMUL_CONTINUE;
4035 
4036 	if (kvm_x86_ops->has_wbinvd_exit()) {
4037 		int cpu = get_cpu();
4038 
4039 		cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
4040 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
4041 				wbinvd_ipi, NULL, 1);
4042 		put_cpu();
4043 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
4044 	} else
4045 		wbinvd();
4046 	return X86EMUL_CONTINUE;
4047 }
4048 EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
4049 
emulator_wbinvd(struct x86_emulate_ctxt * ctxt)4050 static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
4051 {
4052 	kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
4053 }
4054 
emulator_get_dr(struct x86_emulate_ctxt * ctxt,int dr,unsigned long * dest)4055 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4056 {
4057 	return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4058 }
4059 
emulator_set_dr(struct x86_emulate_ctxt * ctxt,int dr,unsigned long value)4060 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4061 {
4062 
4063 	return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4064 }
4065 
mk_cr_64(u64 curr_cr,u32 new_val)4066 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4067 {
4068 	return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4069 }
4070 
emulator_get_cr(struct x86_emulate_ctxt * ctxt,int cr)4071 static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4072 {
4073 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4074 	unsigned long value;
4075 
4076 	switch (cr) {
4077 	case 0:
4078 		value = kvm_read_cr0(vcpu);
4079 		break;
4080 	case 2:
4081 		value = vcpu->arch.cr2;
4082 		break;
4083 	case 3:
4084 		value = kvm_read_cr3(vcpu);
4085 		break;
4086 	case 4:
4087 		value = kvm_read_cr4(vcpu);
4088 		break;
4089 	case 8:
4090 		value = kvm_get_cr8(vcpu);
4091 		break;
4092 	default:
4093 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4094 		return 0;
4095 	}
4096 
4097 	return value;
4098 }
4099 
emulator_set_cr(struct x86_emulate_ctxt * ctxt,int cr,ulong val)4100 static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4101 {
4102 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4103 	int res = 0;
4104 
4105 	switch (cr) {
4106 	case 0:
4107 		res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
4108 		break;
4109 	case 2:
4110 		vcpu->arch.cr2 = val;
4111 		break;
4112 	case 3:
4113 		res = kvm_set_cr3(vcpu, val);
4114 		break;
4115 	case 4:
4116 		res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
4117 		break;
4118 	case 8:
4119 		res = kvm_set_cr8(vcpu, val);
4120 		break;
4121 	default:
4122 		vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
4123 		res = -1;
4124 	}
4125 
4126 	return res;
4127 }
4128 
emulator_set_rflags(struct x86_emulate_ctxt * ctxt,ulong val)4129 static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
4130 {
4131 	kvm_set_rflags(emul_to_vcpu(ctxt), val);
4132 }
4133 
emulator_get_cpl(struct x86_emulate_ctxt * ctxt)4134 static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4135 {
4136 	return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4137 }
4138 
emulator_get_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)4139 static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4140 {
4141 	kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4142 }
4143 
emulator_get_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)4144 static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4145 {
4146 	kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4147 }
4148 
emulator_set_gdt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)4149 static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4150 {
4151 	kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
4152 }
4153 
emulator_set_idt(struct x86_emulate_ctxt * ctxt,struct desc_ptr * dt)4154 static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4155 {
4156 	kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
4157 }
4158 
emulator_get_cached_segment_base(struct x86_emulate_ctxt * ctxt,int seg)4159 static unsigned long emulator_get_cached_segment_base(
4160 	struct x86_emulate_ctxt *ctxt, int seg)
4161 {
4162 	return get_segment_base(emul_to_vcpu(ctxt), seg);
4163 }
4164 
emulator_get_segment(struct x86_emulate_ctxt * ctxt,u16 * selector,struct desc_struct * desc,u32 * base3,int seg)4165 static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
4166 				 struct desc_struct *desc, u32 *base3,
4167 				 int seg)
4168 {
4169 	struct kvm_segment var;
4170 
4171 	kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4172 	*selector = var.selector;
4173 
4174 	if (var.unusable)
4175 		return false;
4176 
4177 	if (var.g)
4178 		var.limit >>= 12;
4179 	set_desc_limit(desc, var.limit);
4180 	set_desc_base(desc, (unsigned long)var.base);
4181 #ifdef CONFIG_X86_64
4182 	if (base3)
4183 		*base3 = var.base >> 32;
4184 #endif
4185 	desc->type = var.type;
4186 	desc->s = var.s;
4187 	desc->dpl = var.dpl;
4188 	desc->p = var.present;
4189 	desc->avl = var.avl;
4190 	desc->l = var.l;
4191 	desc->d = var.db;
4192 	desc->g = var.g;
4193 
4194 	return true;
4195 }
4196 
emulator_set_segment(struct x86_emulate_ctxt * ctxt,u16 selector,struct desc_struct * desc,u32 base3,int seg)4197 static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
4198 				 struct desc_struct *desc, u32 base3,
4199 				 int seg)
4200 {
4201 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4202 	struct kvm_segment var;
4203 
4204 	var.selector = selector;
4205 	var.base = get_desc_base(desc);
4206 #ifdef CONFIG_X86_64
4207 	var.base |= ((u64)base3) << 32;
4208 #endif
4209 	var.limit = get_desc_limit(desc);
4210 	if (desc->g)
4211 		var.limit = (var.limit << 12) | 0xfff;
4212 	var.type = desc->type;
4213 	var.present = desc->p;
4214 	var.dpl = desc->dpl;
4215 	var.db = desc->d;
4216 	var.s = desc->s;
4217 	var.l = desc->l;
4218 	var.g = desc->g;
4219 	var.avl = desc->avl;
4220 	var.present = desc->p;
4221 	var.unusable = !var.present;
4222 	var.padding = 0;
4223 
4224 	kvm_set_segment(vcpu, &var, seg);
4225 	return;
4226 }
4227 
emulator_get_msr(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 * pdata)4228 static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4229 			    u32 msr_index, u64 *pdata)
4230 {
4231 	return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4232 }
4233 
emulator_set_msr(struct x86_emulate_ctxt * ctxt,u32 msr_index,u64 data)4234 static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4235 			    u32 msr_index, u64 data)
4236 {
4237 	return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
4238 }
4239 
emulator_read_pmc(struct x86_emulate_ctxt * ctxt,u32 pmc,u64 * pdata)4240 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
4241 			     u32 pmc, u64 *pdata)
4242 {
4243 	return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
4244 }
4245 
emulator_halt(struct x86_emulate_ctxt * ctxt)4246 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
4247 {
4248 	emul_to_vcpu(ctxt)->arch.halt_request = 1;
4249 }
4250 
emulator_get_fpu(struct x86_emulate_ctxt * ctxt)4251 static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4252 {
4253 	preempt_disable();
4254 	kvm_load_guest_fpu(emul_to_vcpu(ctxt));
4255 	/*
4256 	 * CR0.TS may reference the host fpu state, not the guest fpu state,
4257 	 * so it may be clear at this point.
4258 	 */
4259 	clts();
4260 }
4261 
emulator_put_fpu(struct x86_emulate_ctxt * ctxt)4262 static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4263 {
4264 	preempt_enable();
4265 }
4266 
emulator_intercept(struct x86_emulate_ctxt * ctxt,struct x86_instruction_info * info,enum x86_intercept_stage stage)4267 static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4268 			      struct x86_instruction_info *info,
4269 			      enum x86_intercept_stage stage)
4270 {
4271 	return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
4272 }
4273 
emulator_get_cpuid(struct x86_emulate_ctxt * ctxt,u32 * eax,u32 * ebx,u32 * ecx,u32 * edx)4274 static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
4275 			       u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
4276 {
4277 	struct kvm_cpuid_entry2 *cpuid = NULL;
4278 
4279 	if (eax && ecx)
4280 		cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
4281 					    *eax, *ecx);
4282 
4283 	if (cpuid) {
4284 		*eax = cpuid->eax;
4285 		*ecx = cpuid->ecx;
4286 		if (ebx)
4287 			*ebx = cpuid->ebx;
4288 		if (edx)
4289 			*edx = cpuid->edx;
4290 		return true;
4291 	}
4292 
4293 	return false;
4294 }
4295 
4296 static struct x86_emulate_ops emulate_ops = {
4297 	.read_std            = kvm_read_guest_virt_system,
4298 	.write_std           = kvm_write_guest_virt_system,
4299 	.fetch               = kvm_fetch_guest_virt,
4300 	.read_emulated       = emulator_read_emulated,
4301 	.write_emulated      = emulator_write_emulated,
4302 	.cmpxchg_emulated    = emulator_cmpxchg_emulated,
4303 	.invlpg              = emulator_invlpg,
4304 	.pio_in_emulated     = emulator_pio_in_emulated,
4305 	.pio_out_emulated    = emulator_pio_out_emulated,
4306 	.get_segment         = emulator_get_segment,
4307 	.set_segment         = emulator_set_segment,
4308 	.get_cached_segment_base = emulator_get_cached_segment_base,
4309 	.get_gdt             = emulator_get_gdt,
4310 	.get_idt	     = emulator_get_idt,
4311 	.set_gdt             = emulator_set_gdt,
4312 	.set_idt	     = emulator_set_idt,
4313 	.get_cr              = emulator_get_cr,
4314 	.set_cr              = emulator_set_cr,
4315 	.set_rflags          = emulator_set_rflags,
4316 	.cpl                 = emulator_get_cpl,
4317 	.get_dr              = emulator_get_dr,
4318 	.set_dr              = emulator_set_dr,
4319 	.set_msr             = emulator_set_msr,
4320 	.get_msr             = emulator_get_msr,
4321 	.read_pmc            = emulator_read_pmc,
4322 	.halt                = emulator_halt,
4323 	.wbinvd              = emulator_wbinvd,
4324 	.fix_hypercall       = emulator_fix_hypercall,
4325 	.get_fpu             = emulator_get_fpu,
4326 	.put_fpu             = emulator_put_fpu,
4327 	.intercept           = emulator_intercept,
4328 	.get_cpuid           = emulator_get_cpuid,
4329 };
4330 
cache_all_regs(struct kvm_vcpu * vcpu)4331 static void cache_all_regs(struct kvm_vcpu *vcpu)
4332 {
4333 	kvm_register_read(vcpu, VCPU_REGS_RAX);
4334 	kvm_register_read(vcpu, VCPU_REGS_RSP);
4335 	kvm_register_read(vcpu, VCPU_REGS_RIP);
4336 	vcpu->arch.regs_dirty = ~0;
4337 }
4338 
toggle_interruptibility(struct kvm_vcpu * vcpu,u32 mask)4339 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4340 {
4341 	u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
4342 	/*
4343 	 * an sti; sti; sequence only disable interrupts for the first
4344 	 * instruction. So, if the last instruction, be it emulated or
4345 	 * not, left the system with the INT_STI flag enabled, it
4346 	 * means that the last instruction is an sti. We should not
4347 	 * leave the flag on in this case. The same goes for mov ss
4348 	 */
4349 	if (!(int_shadow & mask))
4350 		kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4351 }
4352 
inject_emulated_exception(struct kvm_vcpu * vcpu)4353 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
4354 {
4355 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4356 	if (ctxt->exception.vector == PF_VECTOR)
4357 		kvm_propagate_fault(vcpu, &ctxt->exception);
4358 	else if (ctxt->exception.error_code_valid)
4359 		kvm_queue_exception_e(vcpu, ctxt->exception.vector,
4360 				      ctxt->exception.error_code);
4361 	else
4362 		kvm_queue_exception(vcpu, ctxt->exception.vector);
4363 }
4364 
init_decode_cache(struct x86_emulate_ctxt * ctxt,const unsigned long * regs)4365 static void init_decode_cache(struct x86_emulate_ctxt *ctxt,
4366 			      const unsigned long *regs)
4367 {
4368 	memset(&ctxt->twobyte, 0,
4369 	       (void *)&ctxt->regs - (void *)&ctxt->twobyte);
4370 	memcpy(ctxt->regs, regs, sizeof(ctxt->regs));
4371 
4372 	ctxt->fetch.start = 0;
4373 	ctxt->fetch.end = 0;
4374 	ctxt->io_read.pos = 0;
4375 	ctxt->io_read.end = 0;
4376 	ctxt->mem_read.pos = 0;
4377 	ctxt->mem_read.end = 0;
4378 }
4379 
init_emulate_ctxt(struct kvm_vcpu * vcpu)4380 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4381 {
4382 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4383 	int cs_db, cs_l;
4384 
4385 	/*
4386 	 * TODO: fix emulate.c to use guest_read/write_register
4387 	 * instead of direct ->regs accesses, can save hundred cycles
4388 	 * on Intel for instructions that don't read/change RSP, for
4389 	 * for example.
4390 	 */
4391 	cache_all_regs(vcpu);
4392 
4393 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4394 
4395 	ctxt->eflags = kvm_get_rflags(vcpu);
4396 	ctxt->eip = kvm_rip_read(vcpu);
4397 	ctxt->mode = (!is_protmode(vcpu))		? X86EMUL_MODE_REAL :
4398 		     (ctxt->eflags & X86_EFLAGS_VM)	? X86EMUL_MODE_VM86 :
4399 		     cs_l				? X86EMUL_MODE_PROT64 :
4400 		     cs_db				? X86EMUL_MODE_PROT32 :
4401 							  X86EMUL_MODE_PROT16;
4402 	ctxt->guest_mode = is_guest_mode(vcpu);
4403 
4404 	init_decode_cache(ctxt, vcpu->arch.regs);
4405 	vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4406 }
4407 
kvm_inject_realmode_interrupt(struct kvm_vcpu * vcpu,int irq,int inc_eip)4408 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4409 {
4410 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4411 	int ret;
4412 
4413 	init_emulate_ctxt(vcpu);
4414 
4415 	ctxt->op_bytes = 2;
4416 	ctxt->ad_bytes = 2;
4417 	ctxt->_eip = ctxt->eip + inc_eip;
4418 	ret = emulate_int_real(ctxt, irq);
4419 
4420 	if (ret != X86EMUL_CONTINUE)
4421 		return EMULATE_FAIL;
4422 
4423 	ctxt->eip = ctxt->_eip;
4424 	memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4425 	kvm_rip_write(vcpu, ctxt->eip);
4426 	kvm_set_rflags(vcpu, ctxt->eflags);
4427 
4428 	if (irq == NMI_VECTOR)
4429 		vcpu->arch.nmi_pending = 0;
4430 	else
4431 		vcpu->arch.interrupt.pending = false;
4432 
4433 	return EMULATE_DONE;
4434 }
4435 EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
4436 
handle_emulation_failure(struct kvm_vcpu * vcpu)4437 static int handle_emulation_failure(struct kvm_vcpu *vcpu)
4438 {
4439 	int r = EMULATE_DONE;
4440 
4441 	++vcpu->stat.insn_emulation_fail;
4442 	trace_kvm_emulate_insn_failed(vcpu);
4443 	if (!is_guest_mode(vcpu)) {
4444 		vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
4445 		vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
4446 		vcpu->run->internal.ndata = 0;
4447 		r = EMULATE_FAIL;
4448 	}
4449 	kvm_queue_exception(vcpu, UD_VECTOR);
4450 
4451 	return r;
4452 }
4453 
reexecute_instruction(struct kvm_vcpu * vcpu,gva_t gva)4454 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4455 {
4456 	gpa_t gpa;
4457 
4458 	if (tdp_enabled)
4459 		return false;
4460 
4461 	/*
4462 	 * if emulation was due to access to shadowed page table
4463 	 * and it failed try to unshadow page and re-entetr the
4464 	 * guest to let CPU execute the instruction.
4465 	 */
4466 	if (kvm_mmu_unprotect_page_virt(vcpu, gva))
4467 		return true;
4468 
4469 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, gva, NULL);
4470 
4471 	if (gpa == UNMAPPED_GVA)
4472 		return true; /* let cpu generate fault */
4473 
4474 	if (!kvm_is_error_hva(gfn_to_hva(vcpu->kvm, gpa >> PAGE_SHIFT)))
4475 		return true;
4476 
4477 	return false;
4478 }
4479 
retry_instruction(struct x86_emulate_ctxt * ctxt,unsigned long cr2,int emulation_type)4480 static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
4481 			      unsigned long cr2,  int emulation_type)
4482 {
4483 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4484 	unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
4485 
4486 	last_retry_eip = vcpu->arch.last_retry_eip;
4487 	last_retry_addr = vcpu->arch.last_retry_addr;
4488 
4489 	/*
4490 	 * If the emulation is caused by #PF and it is non-page_table
4491 	 * writing instruction, it means the VM-EXIT is caused by shadow
4492 	 * page protected, we can zap the shadow page and retry this
4493 	 * instruction directly.
4494 	 *
4495 	 * Note: if the guest uses a non-page-table modifying instruction
4496 	 * on the PDE that points to the instruction, then we will unmap
4497 	 * the instruction and go to an infinite loop. So, we cache the
4498 	 * last retried eip and the last fault address, if we meet the eip
4499 	 * and the address again, we can break out of the potential infinite
4500 	 * loop.
4501 	 */
4502 	vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
4503 
4504 	if (!(emulation_type & EMULTYPE_RETRY))
4505 		return false;
4506 
4507 	if (x86_page_table_writing_insn(ctxt))
4508 		return false;
4509 
4510 	if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
4511 		return false;
4512 
4513 	vcpu->arch.last_retry_eip = ctxt->eip;
4514 	vcpu->arch.last_retry_addr = cr2;
4515 
4516 	if (!vcpu->arch.mmu.direct_map)
4517 		gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
4518 
4519 	kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4520 
4521 	return true;
4522 }
4523 
x86_emulate_instruction(struct kvm_vcpu * vcpu,unsigned long cr2,int emulation_type,void * insn,int insn_len)4524 int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4525 			    unsigned long cr2,
4526 			    int emulation_type,
4527 			    void *insn,
4528 			    int insn_len)
4529 {
4530 	int r;
4531 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
4532 	bool writeback = true;
4533 
4534 	kvm_clear_exception_queue(vcpu);
4535 
4536 	if (!(emulation_type & EMULTYPE_NO_DECODE)) {
4537 		init_emulate_ctxt(vcpu);
4538 		ctxt->interruptibility = 0;
4539 		ctxt->have_exception = false;
4540 		ctxt->perm_ok = false;
4541 
4542 		ctxt->only_vendor_specific_insn
4543 			= emulation_type & EMULTYPE_TRAP_UD;
4544 
4545 		r = x86_decode_insn(ctxt, insn, insn_len);
4546 
4547 		trace_kvm_emulate_insn_start(vcpu);
4548 		++vcpu->stat.insn_emulation;
4549 		if (r != EMULATION_OK)  {
4550 			if (emulation_type & EMULTYPE_TRAP_UD)
4551 				return EMULATE_FAIL;
4552 			if (reexecute_instruction(vcpu, cr2))
4553 				return EMULATE_DONE;
4554 			if (emulation_type & EMULTYPE_SKIP)
4555 				return EMULATE_FAIL;
4556 			return handle_emulation_failure(vcpu);
4557 		}
4558 	}
4559 
4560 	if (emulation_type & EMULTYPE_SKIP) {
4561 		kvm_rip_write(vcpu, ctxt->_eip);
4562 		return EMULATE_DONE;
4563 	}
4564 
4565 	if (retry_instruction(ctxt, cr2, emulation_type))
4566 		return EMULATE_DONE;
4567 
4568 	/* this is needed for vmware backdoor interface to work since it
4569 	   changes registers values  during IO operation */
4570 	if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
4571 		vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
4572 		memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
4573 	}
4574 
4575 restart:
4576 	r = x86_emulate_insn(ctxt);
4577 
4578 	if (r == EMULATION_INTERCEPTED)
4579 		return EMULATE_DONE;
4580 
4581 	if (r == EMULATION_FAILED) {
4582 		if (reexecute_instruction(vcpu, cr2))
4583 			return EMULATE_DONE;
4584 
4585 		return handle_emulation_failure(vcpu);
4586 	}
4587 
4588 	if (ctxt->have_exception) {
4589 		inject_emulated_exception(vcpu);
4590 		r = EMULATE_DONE;
4591 	} else if (vcpu->arch.pio.count) {
4592 		if (!vcpu->arch.pio.in)
4593 			vcpu->arch.pio.count = 0;
4594 		else
4595 			writeback = false;
4596 		r = EMULATE_DO_MMIO;
4597 	} else if (vcpu->mmio_needed) {
4598 		if (!vcpu->mmio_is_write)
4599 			writeback = false;
4600 		r = EMULATE_DO_MMIO;
4601 	} else if (r == EMULATION_RESTART)
4602 		goto restart;
4603 	else
4604 		r = EMULATE_DONE;
4605 
4606 	if (writeback) {
4607 		toggle_interruptibility(vcpu, ctxt->interruptibility);
4608 		kvm_set_rflags(vcpu, ctxt->eflags);
4609 		kvm_make_request(KVM_REQ_EVENT, vcpu);
4610 		memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
4611 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
4612 		kvm_rip_write(vcpu, ctxt->eip);
4613 	} else
4614 		vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
4615 
4616 	return r;
4617 }
4618 EXPORT_SYMBOL_GPL(x86_emulate_instruction);
4619 
kvm_fast_pio_out(struct kvm_vcpu * vcpu,int size,unsigned short port)4620 int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
4621 {
4622 	unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
4623 	int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
4624 					    size, port, &val, 1);
4625 	/* do not return to emulator after return from userspace */
4626 	vcpu->arch.pio.count = 0;
4627 	return ret;
4628 }
4629 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
4630 
tsc_bad(void * info)4631 static void tsc_bad(void *info)
4632 {
4633 	__this_cpu_write(cpu_tsc_khz, 0);
4634 }
4635 
tsc_khz_changed(void * data)4636 static void tsc_khz_changed(void *data)
4637 {
4638 	struct cpufreq_freqs *freq = data;
4639 	unsigned long khz = 0;
4640 
4641 	if (data)
4642 		khz = freq->new;
4643 	else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4644 		khz = cpufreq_quick_get(raw_smp_processor_id());
4645 	if (!khz)
4646 		khz = tsc_khz;
4647 	__this_cpu_write(cpu_tsc_khz, khz);
4648 }
4649 
kvmclock_cpufreq_notifier(struct notifier_block * nb,unsigned long val,void * data)4650 static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
4651 				     void *data)
4652 {
4653 	struct cpufreq_freqs *freq = data;
4654 	struct kvm *kvm;
4655 	struct kvm_vcpu *vcpu;
4656 	int i, send_ipi = 0;
4657 
4658 	/*
4659 	 * We allow guests to temporarily run on slowing clocks,
4660 	 * provided we notify them after, or to run on accelerating
4661 	 * clocks, provided we notify them before.  Thus time never
4662 	 * goes backwards.
4663 	 *
4664 	 * However, we have a problem.  We can't atomically update
4665 	 * the frequency of a given CPU from this function; it is
4666 	 * merely a notifier, which can be called from any CPU.
4667 	 * Changing the TSC frequency at arbitrary points in time
4668 	 * requires a recomputation of local variables related to
4669 	 * the TSC for each VCPU.  We must flag these local variables
4670 	 * to be updated and be sure the update takes place with the
4671 	 * new frequency before any guests proceed.
4672 	 *
4673 	 * Unfortunately, the combination of hotplug CPU and frequency
4674 	 * change creates an intractable locking scenario; the order
4675 	 * of when these callouts happen is undefined with respect to
4676 	 * CPU hotplug, and they can race with each other.  As such,
4677 	 * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
4678 	 * undefined; you can actually have a CPU frequency change take
4679 	 * place in between the computation of X and the setting of the
4680 	 * variable.  To protect against this problem, all updates of
4681 	 * the per_cpu tsc_khz variable are done in an interrupt
4682 	 * protected IPI, and all callers wishing to update the value
4683 	 * must wait for a synchronous IPI to complete (which is trivial
4684 	 * if the caller is on the CPU already).  This establishes the
4685 	 * necessary total order on variable updates.
4686 	 *
4687 	 * Note that because a guest time update may take place
4688 	 * anytime after the setting of the VCPU's request bit, the
4689 	 * correct TSC value must be set before the request.  However,
4690 	 * to ensure the update actually makes it to any guest which
4691 	 * starts running in hardware virtualization between the set
4692 	 * and the acquisition of the spinlock, we must also ping the
4693 	 * CPU after setting the request bit.
4694 	 *
4695 	 */
4696 
4697 	if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
4698 		return 0;
4699 	if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
4700 		return 0;
4701 
4702 	smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4703 
4704 	raw_spin_lock(&kvm_lock);
4705 	list_for_each_entry(kvm, &vm_list, vm_list) {
4706 		kvm_for_each_vcpu(i, vcpu, kvm) {
4707 			if (vcpu->cpu != freq->cpu)
4708 				continue;
4709 			kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
4710 			if (vcpu->cpu != smp_processor_id())
4711 				send_ipi = 1;
4712 		}
4713 	}
4714 	raw_spin_unlock(&kvm_lock);
4715 
4716 	if (freq->old < freq->new && send_ipi) {
4717 		/*
4718 		 * We upscale the frequency.  Must make the guest
4719 		 * doesn't see old kvmclock values while running with
4720 		 * the new frequency, otherwise we risk the guest sees
4721 		 * time go backwards.
4722 		 *
4723 		 * In case we update the frequency for another cpu
4724 		 * (which might be in guest context) send an interrupt
4725 		 * to kick the cpu out of guest context.  Next time
4726 		 * guest context is entered kvmclock will be updated,
4727 		 * so the guest will not see stale values.
4728 		 */
4729 		smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
4730 	}
4731 	return 0;
4732 }
4733 
4734 static struct notifier_block kvmclock_cpufreq_notifier_block = {
4735 	.notifier_call  = kvmclock_cpufreq_notifier
4736 };
4737 
kvmclock_cpu_notifier(struct notifier_block * nfb,unsigned long action,void * hcpu)4738 static int kvmclock_cpu_notifier(struct notifier_block *nfb,
4739 					unsigned long action, void *hcpu)
4740 {
4741 	unsigned int cpu = (unsigned long)hcpu;
4742 
4743 	switch (action) {
4744 		case CPU_ONLINE:
4745 		case CPU_DOWN_FAILED:
4746 			smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4747 			break;
4748 		case CPU_DOWN_PREPARE:
4749 			smp_call_function_single(cpu, tsc_bad, NULL, 1);
4750 			break;
4751 	}
4752 	return NOTIFY_OK;
4753 }
4754 
4755 static struct notifier_block kvmclock_cpu_notifier_block = {
4756 	.notifier_call  = kvmclock_cpu_notifier,
4757 	.priority = -INT_MAX
4758 };
4759 
kvm_timer_init(void)4760 static void kvm_timer_init(void)
4761 {
4762 	int cpu;
4763 
4764 	max_tsc_khz = tsc_khz;
4765 	register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4766 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
4767 #ifdef CONFIG_CPU_FREQ
4768 		struct cpufreq_policy policy;
4769 		memset(&policy, 0, sizeof(policy));
4770 		cpu = get_cpu();
4771 		cpufreq_get_policy(&policy, cpu);
4772 		if (policy.cpuinfo.max_freq)
4773 			max_tsc_khz = policy.cpuinfo.max_freq;
4774 		put_cpu();
4775 #endif
4776 		cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
4777 					  CPUFREQ_TRANSITION_NOTIFIER);
4778 	}
4779 	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
4780 	for_each_online_cpu(cpu)
4781 		smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
4782 }
4783 
4784 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
4785 
kvm_is_in_guest(void)4786 int kvm_is_in_guest(void)
4787 {
4788 	return __this_cpu_read(current_vcpu) != NULL;
4789 }
4790 
kvm_is_user_mode(void)4791 static int kvm_is_user_mode(void)
4792 {
4793 	int user_mode = 3;
4794 
4795 	if (__this_cpu_read(current_vcpu))
4796 		user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
4797 
4798 	return user_mode != 0;
4799 }
4800 
kvm_get_guest_ip(void)4801 static unsigned long kvm_get_guest_ip(void)
4802 {
4803 	unsigned long ip = 0;
4804 
4805 	if (__this_cpu_read(current_vcpu))
4806 		ip = kvm_rip_read(__this_cpu_read(current_vcpu));
4807 
4808 	return ip;
4809 }
4810 
4811 static struct perf_guest_info_callbacks kvm_guest_cbs = {
4812 	.is_in_guest		= kvm_is_in_guest,
4813 	.is_user_mode		= kvm_is_user_mode,
4814 	.get_guest_ip		= kvm_get_guest_ip,
4815 };
4816 
kvm_before_handle_nmi(struct kvm_vcpu * vcpu)4817 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
4818 {
4819 	__this_cpu_write(current_vcpu, vcpu);
4820 }
4821 EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
4822 
kvm_after_handle_nmi(struct kvm_vcpu * vcpu)4823 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
4824 {
4825 	__this_cpu_write(current_vcpu, NULL);
4826 }
4827 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
4828 
kvm_set_mmio_spte_mask(void)4829 static void kvm_set_mmio_spte_mask(void)
4830 {
4831 	u64 mask;
4832 	int maxphyaddr = boot_cpu_data.x86_phys_bits;
4833 
4834 	/*
4835 	 * Set the reserved bits and the present bit of an paging-structure
4836 	 * entry to generate page fault with PFER.RSV = 1.
4837 	 */
4838 	mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
4839 	mask |= 1ull;
4840 
4841 #ifdef CONFIG_X86_64
4842 	/*
4843 	 * If reserved bit is not supported, clear the present bit to disable
4844 	 * mmio page fault.
4845 	 */
4846 	if (maxphyaddr == 52)
4847 		mask &= ~1ull;
4848 #endif
4849 
4850 	kvm_mmu_set_mmio_spte_mask(mask);
4851 }
4852 
kvm_arch_init(void * opaque)4853 int kvm_arch_init(void *opaque)
4854 {
4855 	int r;
4856 	struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
4857 
4858 	if (kvm_x86_ops) {
4859 		printk(KERN_ERR "kvm: already loaded the other module\n");
4860 		r = -EEXIST;
4861 		goto out;
4862 	}
4863 
4864 	if (!ops->cpu_has_kvm_support()) {
4865 		printk(KERN_ERR "kvm: no hardware support\n");
4866 		r = -EOPNOTSUPP;
4867 		goto out;
4868 	}
4869 	if (ops->disabled_by_bios()) {
4870 		printk(KERN_ERR "kvm: disabled by bios\n");
4871 		r = -EOPNOTSUPP;
4872 		goto out;
4873 	}
4874 
4875 	r = kvm_mmu_module_init();
4876 	if (r)
4877 		goto out;
4878 
4879 	kvm_set_mmio_spte_mask();
4880 	kvm_init_msr_list();
4881 
4882 	kvm_x86_ops = ops;
4883 	kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
4884 			PT_DIRTY_MASK, PT64_NX_MASK, 0);
4885 
4886 	kvm_timer_init();
4887 
4888 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
4889 
4890 	if (cpu_has_xsave)
4891 		host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
4892 
4893 	return 0;
4894 
4895 out:
4896 	return r;
4897 }
4898 
kvm_arch_exit(void)4899 void kvm_arch_exit(void)
4900 {
4901 	perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
4902 
4903 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
4904 		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
4905 					    CPUFREQ_TRANSITION_NOTIFIER);
4906 	unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
4907 	kvm_x86_ops = NULL;
4908 	kvm_mmu_module_exit();
4909 }
4910 
kvm_emulate_halt(struct kvm_vcpu * vcpu)4911 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
4912 {
4913 	++vcpu->stat.halt_exits;
4914 	if (irqchip_in_kernel(vcpu->kvm)) {
4915 		vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
4916 		return 1;
4917 	} else {
4918 		vcpu->run->exit_reason = KVM_EXIT_HLT;
4919 		return 0;
4920 	}
4921 }
4922 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
4923 
kvm_hv_hypercall(struct kvm_vcpu * vcpu)4924 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
4925 {
4926 	u64 param, ingpa, outgpa, ret;
4927 	uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
4928 	bool fast, longmode;
4929 	int cs_db, cs_l;
4930 
4931 	/*
4932 	 * hypercall generates UD from non zero cpl and real mode
4933 	 * per HYPER-V spec
4934 	 */
4935 	if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
4936 		kvm_queue_exception(vcpu, UD_VECTOR);
4937 		return 0;
4938 	}
4939 
4940 	kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4941 	longmode = is_long_mode(vcpu) && cs_l == 1;
4942 
4943 	if (!longmode) {
4944 		param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
4945 			(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
4946 		ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
4947 			(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
4948 		outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
4949 			(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
4950 	}
4951 #ifdef CONFIG_X86_64
4952 	else {
4953 		param = kvm_register_read(vcpu, VCPU_REGS_RCX);
4954 		ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
4955 		outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
4956 	}
4957 #endif
4958 
4959 	code = param & 0xffff;
4960 	fast = (param >> 16) & 0x1;
4961 	rep_cnt = (param >> 32) & 0xfff;
4962 	rep_idx = (param >> 48) & 0xfff;
4963 
4964 	trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
4965 
4966 	switch (code) {
4967 	case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
4968 		kvm_vcpu_on_spin(vcpu);
4969 		break;
4970 	default:
4971 		res = HV_STATUS_INVALID_HYPERCALL_CODE;
4972 		break;
4973 	}
4974 
4975 	ret = res | (((u64)rep_done & 0xfff) << 32);
4976 	if (longmode) {
4977 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
4978 	} else {
4979 		kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
4980 		kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
4981 	}
4982 
4983 	return 1;
4984 }
4985 
kvm_emulate_hypercall(struct kvm_vcpu * vcpu)4986 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
4987 {
4988 	unsigned long nr, a0, a1, a2, a3, ret;
4989 	int r = 1;
4990 
4991 	if (kvm_hv_hypercall_enabled(vcpu->kvm))
4992 		return kvm_hv_hypercall(vcpu);
4993 
4994 	nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
4995 	a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
4996 	a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
4997 	a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
4998 	a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
4999 
5000 	trace_kvm_hypercall(nr, a0, a1, a2, a3);
5001 
5002 	if (!is_long_mode(vcpu)) {
5003 		nr &= 0xFFFFFFFF;
5004 		a0 &= 0xFFFFFFFF;
5005 		a1 &= 0xFFFFFFFF;
5006 		a2 &= 0xFFFFFFFF;
5007 		a3 &= 0xFFFFFFFF;
5008 	}
5009 
5010 	if (kvm_x86_ops->get_cpl(vcpu) != 0) {
5011 		ret = -KVM_EPERM;
5012 		goto out;
5013 	}
5014 
5015 	switch (nr) {
5016 	case KVM_HC_VAPIC_POLL_IRQ:
5017 		ret = 0;
5018 		break;
5019 	default:
5020 		ret = -KVM_ENOSYS;
5021 		break;
5022 	}
5023 out:
5024 	kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
5025 	++vcpu->stat.hypercalls;
5026 	return r;
5027 }
5028 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
5029 
emulator_fix_hypercall(struct x86_emulate_ctxt * ctxt)5030 int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
5031 {
5032 	struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
5033 	char instruction[3];
5034 	unsigned long rip = kvm_rip_read(vcpu);
5035 
5036 	/*
5037 	 * Blow out the MMU to ensure that no other VCPU has an active mapping
5038 	 * to ensure that the updated hypercall appears atomically across all
5039 	 * VCPUs.
5040 	 */
5041 	kvm_mmu_zap_all(vcpu->kvm);
5042 
5043 	kvm_x86_ops->patch_hypercall(vcpu, instruction);
5044 
5045 	return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
5046 }
5047 
5048 /*
5049  * Check if userspace requested an interrupt window, and that the
5050  * interrupt window is open.
5051  *
5052  * No need to exit to userspace if we already have an interrupt queued.
5053  */
dm_request_for_irq_injection(struct kvm_vcpu * vcpu)5054 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
5055 {
5056 	return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
5057 		vcpu->run->request_interrupt_window &&
5058 		kvm_arch_interrupt_allowed(vcpu));
5059 }
5060 
post_kvm_run_save(struct kvm_vcpu * vcpu)5061 static void post_kvm_run_save(struct kvm_vcpu *vcpu)
5062 {
5063 	struct kvm_run *kvm_run = vcpu->run;
5064 
5065 	kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
5066 	kvm_run->cr8 = kvm_get_cr8(vcpu);
5067 	kvm_run->apic_base = kvm_get_apic_base(vcpu);
5068 	if (irqchip_in_kernel(vcpu->kvm))
5069 		kvm_run->ready_for_interrupt_injection = 1;
5070 	else
5071 		kvm_run->ready_for_interrupt_injection =
5072 			kvm_arch_interrupt_allowed(vcpu) &&
5073 			!kvm_cpu_has_interrupt(vcpu) &&
5074 			!kvm_event_needs_reinjection(vcpu);
5075 }
5076 
update_cr8_intercept(struct kvm_vcpu * vcpu)5077 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5078 {
5079 	int max_irr, tpr;
5080 
5081 	if (!kvm_x86_ops->update_cr8_intercept)
5082 		return;
5083 
5084 	if (!vcpu->arch.apic)
5085 		return;
5086 
5087 	if (!vcpu->arch.apic->vapic_addr)
5088 		max_irr = kvm_lapic_find_highest_irr(vcpu);
5089 	else
5090 		max_irr = -1;
5091 
5092 	if (max_irr != -1)
5093 		max_irr >>= 4;
5094 
5095 	tpr = kvm_lapic_get_cr8(vcpu);
5096 
5097 	kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
5098 }
5099 
inject_pending_event(struct kvm_vcpu * vcpu)5100 static void inject_pending_event(struct kvm_vcpu *vcpu)
5101 {
5102 	/* try to reinject previous events if any */
5103 	if (vcpu->arch.exception.pending) {
5104 		trace_kvm_inj_exception(vcpu->arch.exception.nr,
5105 					vcpu->arch.exception.has_error_code,
5106 					vcpu->arch.exception.error_code);
5107 		kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
5108 					  vcpu->arch.exception.has_error_code,
5109 					  vcpu->arch.exception.error_code,
5110 					  vcpu->arch.exception.reinject);
5111 		return;
5112 	}
5113 
5114 	if (vcpu->arch.nmi_injected) {
5115 		kvm_x86_ops->set_nmi(vcpu);
5116 		return;
5117 	}
5118 
5119 	if (vcpu->arch.interrupt.pending) {
5120 		kvm_x86_ops->set_irq(vcpu);
5121 		return;
5122 	}
5123 
5124 	/* try to inject new event if pending */
5125 	if (vcpu->arch.nmi_pending) {
5126 		if (kvm_x86_ops->nmi_allowed(vcpu)) {
5127 			--vcpu->arch.nmi_pending;
5128 			vcpu->arch.nmi_injected = true;
5129 			kvm_x86_ops->set_nmi(vcpu);
5130 		}
5131 	} else if (kvm_cpu_has_interrupt(vcpu)) {
5132 		if (kvm_x86_ops->interrupt_allowed(vcpu)) {
5133 			kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
5134 					    false);
5135 			kvm_x86_ops->set_irq(vcpu);
5136 		}
5137 	}
5138 }
5139 
kvm_load_guest_xcr0(struct kvm_vcpu * vcpu)5140 static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
5141 {
5142 	if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
5143 			!vcpu->guest_xcr0_loaded) {
5144 		/* kvm_set_xcr() also depends on this */
5145 		xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
5146 		vcpu->guest_xcr0_loaded = 1;
5147 	}
5148 }
5149 
kvm_put_guest_xcr0(struct kvm_vcpu * vcpu)5150 static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
5151 {
5152 	if (vcpu->guest_xcr0_loaded) {
5153 		if (vcpu->arch.xcr0 != host_xcr0)
5154 			xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
5155 		vcpu->guest_xcr0_loaded = 0;
5156 	}
5157 }
5158 
process_nmi(struct kvm_vcpu * vcpu)5159 static void process_nmi(struct kvm_vcpu *vcpu)
5160 {
5161 	unsigned limit = 2;
5162 
5163 	/*
5164 	 * x86 is limited to one NMI running, and one NMI pending after it.
5165 	 * If an NMI is already in progress, limit further NMIs to just one.
5166 	 * Otherwise, allow two (and we'll inject the first one immediately).
5167 	 */
5168 	if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
5169 		limit = 1;
5170 
5171 	vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
5172 	vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
5173 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5174 }
5175 
vcpu_enter_guest(struct kvm_vcpu * vcpu)5176 static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5177 {
5178 	int r;
5179 	bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5180 		vcpu->run->request_interrupt_window;
5181 	bool req_immediate_exit = 0;
5182 
5183 	if (vcpu->requests) {
5184 		if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
5185 			kvm_mmu_unload(vcpu);
5186 		if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
5187 			__kvm_migrate_timers(vcpu);
5188 		if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
5189 			r = kvm_guest_time_update(vcpu);
5190 			if (unlikely(r))
5191 				goto out;
5192 		}
5193 		if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
5194 			kvm_mmu_sync_roots(vcpu);
5195 		if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
5196 			kvm_x86_ops->tlb_flush(vcpu);
5197 		if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
5198 			vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
5199 			r = 0;
5200 			goto out;
5201 		}
5202 		if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
5203 			vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
5204 			r = 0;
5205 			goto out;
5206 		}
5207 		if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
5208 			vcpu->fpu_active = 0;
5209 			kvm_x86_ops->fpu_deactivate(vcpu);
5210 		}
5211 		if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
5212 			/* Page is swapped out. Do synthetic halt */
5213 			vcpu->arch.apf.halted = true;
5214 			r = 1;
5215 			goto out;
5216 		}
5217 		if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
5218 			record_steal_time(vcpu);
5219 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
5220 			process_nmi(vcpu);
5221 		req_immediate_exit =
5222 			kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
5223 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
5224 			kvm_handle_pmu_event(vcpu);
5225 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
5226 			kvm_deliver_pmi(vcpu);
5227 	}
5228 
5229 	r = kvm_mmu_reload(vcpu);
5230 	if (unlikely(r))
5231 		goto out;
5232 
5233 	if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5234 		inject_pending_event(vcpu);
5235 
5236 		/* enable NMI/IRQ window open exits if needed */
5237 		if (vcpu->arch.nmi_pending)
5238 			kvm_x86_ops->enable_nmi_window(vcpu);
5239 		else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
5240 			kvm_x86_ops->enable_irq_window(vcpu);
5241 
5242 		if (kvm_lapic_enabled(vcpu)) {
5243 			update_cr8_intercept(vcpu);
5244 			kvm_lapic_sync_to_vapic(vcpu);
5245 		}
5246 	}
5247 
5248 	preempt_disable();
5249 
5250 	kvm_x86_ops->prepare_guest_switch(vcpu);
5251 	if (vcpu->fpu_active)
5252 		kvm_load_guest_fpu(vcpu);
5253 	kvm_load_guest_xcr0(vcpu);
5254 
5255 	vcpu->mode = IN_GUEST_MODE;
5256 
5257 	/* We should set ->mode before check ->requests,
5258 	 * see the comment in make_all_cpus_request.
5259 	 */
5260 	smp_mb();
5261 
5262 	local_irq_disable();
5263 
5264 	if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
5265 	    || need_resched() || signal_pending(current)) {
5266 		vcpu->mode = OUTSIDE_GUEST_MODE;
5267 		smp_wmb();
5268 		local_irq_enable();
5269 		preempt_enable();
5270 		kvm_x86_ops->cancel_injection(vcpu);
5271 		r = 1;
5272 		goto out;
5273 	}
5274 
5275 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5276 
5277 	if (req_immediate_exit)
5278 		smp_send_reschedule(vcpu->cpu);
5279 
5280 	kvm_guest_enter();
5281 
5282 	if (unlikely(vcpu->arch.switch_db_regs)) {
5283 		set_debugreg(0, 7);
5284 		set_debugreg(vcpu->arch.eff_db[0], 0);
5285 		set_debugreg(vcpu->arch.eff_db[1], 1);
5286 		set_debugreg(vcpu->arch.eff_db[2], 2);
5287 		set_debugreg(vcpu->arch.eff_db[3], 3);
5288 	}
5289 
5290 	trace_kvm_entry(vcpu->vcpu_id);
5291 	kvm_x86_ops->run(vcpu);
5292 
5293 	/*
5294 	 * If the guest has used debug registers, at least dr7
5295 	 * will be disabled while returning to the host.
5296 	 * If we don't have active breakpoints in the host, we don't
5297 	 * care about the messed up debug address registers. But if
5298 	 * we have some of them active, restore the old state.
5299 	 */
5300 	if (hw_breakpoint_active())
5301 		hw_breakpoint_restore();
5302 
5303 	vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
5304 
5305 	vcpu->mode = OUTSIDE_GUEST_MODE;
5306 	smp_wmb();
5307 	local_irq_enable();
5308 
5309 	++vcpu->stat.exits;
5310 
5311 	/*
5312 	 * We must have an instruction between local_irq_enable() and
5313 	 * kvm_guest_exit(), so the timer interrupt isn't delayed by
5314 	 * the interrupt shadow.  The stat.exits increment will do nicely.
5315 	 * But we need to prevent reordering, hence this barrier():
5316 	 */
5317 	barrier();
5318 
5319 	kvm_guest_exit();
5320 
5321 	preempt_enable();
5322 
5323 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5324 
5325 	/*
5326 	 * Profile KVM exit RIPs:
5327 	 */
5328 	if (unlikely(prof_on == KVM_PROFILING)) {
5329 		unsigned long rip = kvm_rip_read(vcpu);
5330 		profile_hit(KVM_PROFILING, (void *)rip);
5331 	}
5332 
5333 	if (unlikely(vcpu->arch.tsc_always_catchup))
5334 		kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
5335 
5336 	kvm_lapic_sync_from_vapic(vcpu);
5337 
5338 	r = kvm_x86_ops->handle_exit(vcpu);
5339 out:
5340 	return r;
5341 }
5342 
5343 
__vcpu_run(struct kvm_vcpu * vcpu)5344 static int __vcpu_run(struct kvm_vcpu *vcpu)
5345 {
5346 	int r;
5347 	struct kvm *kvm = vcpu->kvm;
5348 
5349 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
5350 		pr_debug("vcpu %d received sipi with vector # %x\n",
5351 			 vcpu->vcpu_id, vcpu->arch.sipi_vector);
5352 		kvm_lapic_reset(vcpu);
5353 		r = kvm_arch_vcpu_reset(vcpu);
5354 		if (r)
5355 			return r;
5356 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5357 	}
5358 
5359 	vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5360 
5361 	r = 1;
5362 	while (r > 0) {
5363 		if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
5364 		    !vcpu->arch.apf.halted)
5365 			r = vcpu_enter_guest(vcpu);
5366 		else {
5367 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5368 			kvm_vcpu_block(vcpu);
5369 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5370 			if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
5371 			{
5372 				switch(vcpu->arch.mp_state) {
5373 				case KVM_MP_STATE_HALTED:
5374 					vcpu->arch.mp_state =
5375 						KVM_MP_STATE_RUNNABLE;
5376 				case KVM_MP_STATE_RUNNABLE:
5377 					vcpu->arch.apf.halted = false;
5378 					break;
5379 				case KVM_MP_STATE_SIPI_RECEIVED:
5380 				default:
5381 					r = -EINTR;
5382 					break;
5383 				}
5384 			}
5385 		}
5386 
5387 		if (r <= 0)
5388 			break;
5389 
5390 		clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
5391 		if (kvm_cpu_has_pending_timer(vcpu))
5392 			kvm_inject_pending_timer_irqs(vcpu);
5393 
5394 		if (dm_request_for_irq_injection(vcpu)) {
5395 			r = -EINTR;
5396 			vcpu->run->exit_reason = KVM_EXIT_INTR;
5397 			++vcpu->stat.request_irq_exits;
5398 		}
5399 
5400 		kvm_check_async_pf_completion(vcpu);
5401 
5402 		if (signal_pending(current)) {
5403 			r = -EINTR;
5404 			vcpu->run->exit_reason = KVM_EXIT_INTR;
5405 			++vcpu->stat.signal_exits;
5406 		}
5407 		if (need_resched()) {
5408 			srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5409 			kvm_resched(vcpu);
5410 			vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
5411 		}
5412 	}
5413 
5414 	srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
5415 
5416 	return r;
5417 }
5418 
complete_mmio(struct kvm_vcpu * vcpu)5419 static int complete_mmio(struct kvm_vcpu *vcpu)
5420 {
5421 	struct kvm_run *run = vcpu->run;
5422 	int r;
5423 
5424 	if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
5425 		return 1;
5426 
5427 	if (vcpu->mmio_needed) {
5428 		vcpu->mmio_needed = 0;
5429 		if (!vcpu->mmio_is_write)
5430 			memcpy(vcpu->mmio_data + vcpu->mmio_index,
5431 			       run->mmio.data, 8);
5432 		vcpu->mmio_index += 8;
5433 		if (vcpu->mmio_index < vcpu->mmio_size) {
5434 			run->exit_reason = KVM_EXIT_MMIO;
5435 			run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index;
5436 			memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8);
5437 			run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
5438 			run->mmio.is_write = vcpu->mmio_is_write;
5439 			vcpu->mmio_needed = 1;
5440 			return 0;
5441 		}
5442 		if (vcpu->mmio_is_write)
5443 			return 1;
5444 		vcpu->mmio_read_completed = 1;
5445 	}
5446 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5447 	r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5448 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5449 	if (r != EMULATE_DONE)
5450 		return 0;
5451 	return 1;
5452 }
5453 
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu,struct kvm_run * kvm_run)5454 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
5455 {
5456 	int r;
5457 	sigset_t sigsaved;
5458 
5459 	if (!tsk_used_math(current) && init_fpu(current))
5460 		return -ENOMEM;
5461 
5462 	if (vcpu->sigset_active)
5463 		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5464 
5465 	if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
5466 		kvm_vcpu_block(vcpu);
5467 		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
5468 		r = -EAGAIN;
5469 		goto out;
5470 	}
5471 
5472 	/* re-sync apic's tpr */
5473 	if (!irqchip_in_kernel(vcpu->kvm)) {
5474 		if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
5475 			r = -EINVAL;
5476 			goto out;
5477 		}
5478 	}
5479 
5480 	r = complete_mmio(vcpu);
5481 	if (r <= 0)
5482 		goto out;
5483 
5484 	r = __vcpu_run(vcpu);
5485 
5486 out:
5487 	post_kvm_run_save(vcpu);
5488 	if (vcpu->sigset_active)
5489 		sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5490 
5491 	return r;
5492 }
5493 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)5494 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5495 {
5496 	if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
5497 		/*
5498 		 * We are here if userspace calls get_regs() in the middle of
5499 		 * instruction emulation. Registers state needs to be copied
5500 		 * back from emulation context to vcpu. Usrapace shouldn't do
5501 		 * that usually, but some bad designed PV devices (vmware
5502 		 * backdoor interface) need this to work
5503 		 */
5504 		struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5505 		memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
5506 		vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5507 	}
5508 	regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
5509 	regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
5510 	regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
5511 	regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
5512 	regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
5513 	regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
5514 	regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
5515 	regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
5516 #ifdef CONFIG_X86_64
5517 	regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
5518 	regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
5519 	regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
5520 	regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
5521 	regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
5522 	regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
5523 	regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
5524 	regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
5525 #endif
5526 
5527 	regs->rip = kvm_rip_read(vcpu);
5528 	regs->rflags = kvm_get_rflags(vcpu);
5529 
5530 	return 0;
5531 }
5532 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)5533 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
5534 {
5535 	vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
5536 	vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5537 
5538 	kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
5539 	kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
5540 	kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
5541 	kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
5542 	kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
5543 	kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
5544 	kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
5545 	kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
5546 #ifdef CONFIG_X86_64
5547 	kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
5548 	kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
5549 	kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
5550 	kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
5551 	kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
5552 	kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
5553 	kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
5554 	kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
5555 #endif
5556 
5557 	kvm_rip_write(vcpu, regs->rip);
5558 	kvm_set_rflags(vcpu, regs->rflags);
5559 
5560 	vcpu->arch.exception.pending = false;
5561 
5562 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5563 
5564 	return 0;
5565 }
5566 
kvm_get_cs_db_l_bits(struct kvm_vcpu * vcpu,int * db,int * l)5567 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
5568 {
5569 	struct kvm_segment cs;
5570 
5571 	kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
5572 	*db = cs.db;
5573 	*l = cs.l;
5574 }
5575 EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
5576 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)5577 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
5578 				  struct kvm_sregs *sregs)
5579 {
5580 	struct desc_ptr dt;
5581 
5582 	kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5583 	kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5584 	kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5585 	kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5586 	kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5587 	kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5588 
5589 	kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5590 	kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5591 
5592 	kvm_x86_ops->get_idt(vcpu, &dt);
5593 	sregs->idt.limit = dt.size;
5594 	sregs->idt.base = dt.address;
5595 	kvm_x86_ops->get_gdt(vcpu, &dt);
5596 	sregs->gdt.limit = dt.size;
5597 	sregs->gdt.base = dt.address;
5598 
5599 	sregs->cr0 = kvm_read_cr0(vcpu);
5600 	sregs->cr2 = vcpu->arch.cr2;
5601 	sregs->cr3 = kvm_read_cr3(vcpu);
5602 	sregs->cr4 = kvm_read_cr4(vcpu);
5603 	sregs->cr8 = kvm_get_cr8(vcpu);
5604 	sregs->efer = vcpu->arch.efer;
5605 	sregs->apic_base = kvm_get_apic_base(vcpu);
5606 
5607 	memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
5608 
5609 	if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
5610 		set_bit(vcpu->arch.interrupt.nr,
5611 			(unsigned long *)sregs->interrupt_bitmap);
5612 
5613 	return 0;
5614 }
5615 
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)5616 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
5617 				    struct kvm_mp_state *mp_state)
5618 {
5619 	mp_state->mp_state = vcpu->arch.mp_state;
5620 	return 0;
5621 }
5622 
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)5623 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
5624 				    struct kvm_mp_state *mp_state)
5625 {
5626 	vcpu->arch.mp_state = mp_state->mp_state;
5627 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5628 	return 0;
5629 }
5630 
kvm_task_switch(struct kvm_vcpu * vcpu,u16 tss_selector,int idt_index,int reason,bool has_error_code,u32 error_code)5631 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
5632 		    int reason, bool has_error_code, u32 error_code)
5633 {
5634 	struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
5635 	int ret;
5636 
5637 	init_emulate_ctxt(vcpu);
5638 
5639 	ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
5640 				   has_error_code, error_code);
5641 
5642 	if (ret)
5643 		return EMULATE_FAIL;
5644 
5645 	memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
5646 	kvm_rip_write(vcpu, ctxt->eip);
5647 	kvm_set_rflags(vcpu, ctxt->eflags);
5648 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5649 	return EMULATE_DONE;
5650 }
5651 EXPORT_SYMBOL_GPL(kvm_task_switch);
5652 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)5653 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
5654 				  struct kvm_sregs *sregs)
5655 {
5656 	int mmu_reset_needed = 0;
5657 	int pending_vec, max_bits, idx;
5658 	struct desc_ptr dt;
5659 
5660 	if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
5661 		return -EINVAL;
5662 
5663 	dt.size = sregs->idt.limit;
5664 	dt.address = sregs->idt.base;
5665 	kvm_x86_ops->set_idt(vcpu, &dt);
5666 	dt.size = sregs->gdt.limit;
5667 	dt.address = sregs->gdt.base;
5668 	kvm_x86_ops->set_gdt(vcpu, &dt);
5669 
5670 	vcpu->arch.cr2 = sregs->cr2;
5671 	mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
5672 	vcpu->arch.cr3 = sregs->cr3;
5673 	__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
5674 
5675 	kvm_set_cr8(vcpu, sregs->cr8);
5676 
5677 	mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
5678 	kvm_x86_ops->set_efer(vcpu, sregs->efer);
5679 	kvm_set_apic_base(vcpu, sregs->apic_base);
5680 
5681 	mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
5682 	kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
5683 	vcpu->arch.cr0 = sregs->cr0;
5684 
5685 	mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
5686 	kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
5687 	if (sregs->cr4 & X86_CR4_OSXSAVE)
5688 		kvm_update_cpuid(vcpu);
5689 
5690 	idx = srcu_read_lock(&vcpu->kvm->srcu);
5691 	if (!is_long_mode(vcpu) && is_pae(vcpu)) {
5692 		load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
5693 		mmu_reset_needed = 1;
5694 	}
5695 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
5696 
5697 	if (mmu_reset_needed)
5698 		kvm_mmu_reset_context(vcpu);
5699 
5700 	max_bits = (sizeof sregs->interrupt_bitmap) << 3;
5701 	pending_vec = find_first_bit(
5702 		(const unsigned long *)sregs->interrupt_bitmap, max_bits);
5703 	if (pending_vec < max_bits) {
5704 		kvm_queue_interrupt(vcpu, pending_vec, false);
5705 		pr_debug("Set back pending irq %d\n", pending_vec);
5706 	}
5707 
5708 	kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
5709 	kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
5710 	kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
5711 	kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
5712 	kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
5713 	kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
5714 
5715 	kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
5716 	kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
5717 
5718 	update_cr8_intercept(vcpu);
5719 
5720 	/* Older userspace won't unhalt the vcpu on reset. */
5721 	if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
5722 	    sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
5723 	    !is_protmode(vcpu))
5724 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
5725 
5726 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5727 
5728 	return 0;
5729 }
5730 
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)5731 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5732 					struct kvm_guest_debug *dbg)
5733 {
5734 	unsigned long rflags;
5735 	int i, r;
5736 
5737 	if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
5738 		r = -EBUSY;
5739 		if (vcpu->arch.exception.pending)
5740 			goto out;
5741 		if (dbg->control & KVM_GUESTDBG_INJECT_DB)
5742 			kvm_queue_exception(vcpu, DB_VECTOR);
5743 		else
5744 			kvm_queue_exception(vcpu, BP_VECTOR);
5745 	}
5746 
5747 	/*
5748 	 * Read rflags as long as potentially injected trace flags are still
5749 	 * filtered out.
5750 	 */
5751 	rflags = kvm_get_rflags(vcpu);
5752 
5753 	vcpu->guest_debug = dbg->control;
5754 	if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
5755 		vcpu->guest_debug = 0;
5756 
5757 	if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5758 		for (i = 0; i < KVM_NR_DB_REGS; ++i)
5759 			vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5760 		vcpu->arch.switch_db_regs =
5761 			(dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5762 	} else {
5763 		for (i = 0; i < KVM_NR_DB_REGS; i++)
5764 			vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5765 		vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5766 	}
5767 
5768 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5769 		vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
5770 			get_segment_base(vcpu, VCPU_SREG_CS);
5771 
5772 	/*
5773 	 * Trigger an rflags update that will inject or remove the trace
5774 	 * flags.
5775 	 */
5776 	kvm_set_rflags(vcpu, rflags);
5777 
5778 	kvm_x86_ops->set_guest_debug(vcpu, dbg);
5779 
5780 	r = 0;
5781 
5782 out:
5783 
5784 	return r;
5785 }
5786 
5787 /*
5788  * Translate a guest virtual address to a guest physical address.
5789  */
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)5790 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5791 				    struct kvm_translation *tr)
5792 {
5793 	unsigned long vaddr = tr->linear_address;
5794 	gpa_t gpa;
5795 	int idx;
5796 
5797 	idx = srcu_read_lock(&vcpu->kvm->srcu);
5798 	gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5799 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
5800 	tr->physical_address = gpa;
5801 	tr->valid = gpa != UNMAPPED_GVA;
5802 	tr->writeable = 1;
5803 	tr->usermode = 0;
5804 
5805 	return 0;
5806 }
5807 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)5808 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5809 {
5810 	struct i387_fxsave_struct *fxsave =
5811 			&vcpu->arch.guest_fpu.state->fxsave;
5812 
5813 	memcpy(fpu->fpr, fxsave->st_space, 128);
5814 	fpu->fcw = fxsave->cwd;
5815 	fpu->fsw = fxsave->swd;
5816 	fpu->ftwx = fxsave->twd;
5817 	fpu->last_opcode = fxsave->fop;
5818 	fpu->last_ip = fxsave->rip;
5819 	fpu->last_dp = fxsave->rdp;
5820 	memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
5821 
5822 	return 0;
5823 }
5824 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)5825 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
5826 {
5827 	struct i387_fxsave_struct *fxsave =
5828 			&vcpu->arch.guest_fpu.state->fxsave;
5829 
5830 	memcpy(fxsave->st_space, fpu->fpr, 128);
5831 	fxsave->cwd = fpu->fcw;
5832 	fxsave->swd = fpu->fsw;
5833 	fxsave->twd = fpu->ftwx;
5834 	fxsave->fop = fpu->last_opcode;
5835 	fxsave->rip = fpu->last_ip;
5836 	fxsave->rdp = fpu->last_dp;
5837 	memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
5838 
5839 	return 0;
5840 }
5841 
fx_init(struct kvm_vcpu * vcpu)5842 int fx_init(struct kvm_vcpu *vcpu)
5843 {
5844 	int err;
5845 
5846 	err = fpu_alloc(&vcpu->arch.guest_fpu);
5847 	if (err)
5848 		return err;
5849 
5850 	fpu_finit(&vcpu->arch.guest_fpu);
5851 
5852 	/*
5853 	 * Ensure guest xcr0 is valid for loading
5854 	 */
5855 	vcpu->arch.xcr0 = XSTATE_FP;
5856 
5857 	vcpu->arch.cr0 |= X86_CR0_ET;
5858 
5859 	return 0;
5860 }
5861 EXPORT_SYMBOL_GPL(fx_init);
5862 
fx_free(struct kvm_vcpu * vcpu)5863 static void fx_free(struct kvm_vcpu *vcpu)
5864 {
5865 	fpu_free(&vcpu->arch.guest_fpu);
5866 }
5867 
kvm_load_guest_fpu(struct kvm_vcpu * vcpu)5868 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
5869 {
5870 	if (vcpu->guest_fpu_loaded)
5871 		return;
5872 
5873 	/*
5874 	 * Restore all possible states in the guest,
5875 	 * and assume host would use all available bits.
5876 	 * Guest xcr0 would be loaded later.
5877 	 */
5878 	kvm_put_guest_xcr0(vcpu);
5879 	vcpu->guest_fpu_loaded = 1;
5880 	unlazy_fpu(current);
5881 	fpu_restore_checking(&vcpu->arch.guest_fpu);
5882 	trace_kvm_fpu(1);
5883 }
5884 
kvm_put_guest_fpu(struct kvm_vcpu * vcpu)5885 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
5886 {
5887 	kvm_put_guest_xcr0(vcpu);
5888 
5889 	if (!vcpu->guest_fpu_loaded)
5890 		return;
5891 
5892 	vcpu->guest_fpu_loaded = 0;
5893 	fpu_save_init(&vcpu->arch.guest_fpu);
5894 	++vcpu->stat.fpu_reload;
5895 	kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
5896 	trace_kvm_fpu(0);
5897 }
5898 
kvm_arch_vcpu_free(struct kvm_vcpu * vcpu)5899 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
5900 {
5901 	kvmclock_reset(vcpu);
5902 
5903 	free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
5904 	fx_free(vcpu);
5905 	kvm_x86_ops->vcpu_free(vcpu);
5906 }
5907 
kvm_arch_vcpu_create(struct kvm * kvm,unsigned int id)5908 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
5909 						unsigned int id)
5910 {
5911 	if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
5912 		printk_once(KERN_WARNING
5913 		"kvm: SMP vm created on host with unstable TSC; "
5914 		"guest TSC will not be reliable\n");
5915 	return kvm_x86_ops->vcpu_create(kvm, id);
5916 }
5917 
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)5918 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
5919 {
5920 	int r;
5921 
5922 	vcpu->arch.mtrr_state.have_fixed = 1;
5923 	vcpu_load(vcpu);
5924 	r = kvm_arch_vcpu_reset(vcpu);
5925 	if (r == 0)
5926 		r = kvm_mmu_setup(vcpu);
5927 	vcpu_put(vcpu);
5928 
5929 	return r;
5930 }
5931 
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)5932 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
5933 {
5934 	vcpu->arch.apf.msr_val = 0;
5935 
5936 	vcpu_load(vcpu);
5937 	kvm_mmu_unload(vcpu);
5938 	vcpu_put(vcpu);
5939 
5940 	fx_free(vcpu);
5941 	kvm_x86_ops->vcpu_free(vcpu);
5942 }
5943 
kvm_arch_vcpu_reset(struct kvm_vcpu * vcpu)5944 int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
5945 {
5946 	atomic_set(&vcpu->arch.nmi_queued, 0);
5947 	vcpu->arch.nmi_pending = 0;
5948 	vcpu->arch.nmi_injected = false;
5949 
5950 	vcpu->arch.switch_db_regs = 0;
5951 	memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
5952 	vcpu->arch.dr6 = DR6_FIXED_1;
5953 	vcpu->arch.dr7 = DR7_FIXED_1;
5954 
5955 	kvm_make_request(KVM_REQ_EVENT, vcpu);
5956 	vcpu->arch.apf.msr_val = 0;
5957 	vcpu->arch.st.msr_val = 0;
5958 
5959 	kvmclock_reset(vcpu);
5960 
5961 	kvm_clear_async_pf_completion_queue(vcpu);
5962 	kvm_async_pf_hash_reset(vcpu);
5963 	vcpu->arch.apf.halted = false;
5964 
5965 	kvm_pmu_reset(vcpu);
5966 
5967 	return kvm_x86_ops->vcpu_reset(vcpu);
5968 }
5969 
kvm_arch_hardware_enable(void * garbage)5970 int kvm_arch_hardware_enable(void *garbage)
5971 {
5972 	struct kvm *kvm;
5973 	struct kvm_vcpu *vcpu;
5974 	int i;
5975 	int ret;
5976 	u64 local_tsc;
5977 	u64 max_tsc = 0;
5978 	bool stable, backwards_tsc = false;
5979 
5980 	kvm_shared_msr_cpu_online();
5981 	ret = kvm_x86_ops->hardware_enable(garbage);
5982 	if (ret != 0)
5983 		return ret;
5984 
5985 	local_tsc = native_read_tsc();
5986 	stable = !check_tsc_unstable();
5987 	list_for_each_entry(kvm, &vm_list, vm_list) {
5988 		kvm_for_each_vcpu(i, vcpu, kvm) {
5989 			if (!stable && vcpu->cpu == smp_processor_id())
5990 				set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
5991 			if (stable && vcpu->arch.last_host_tsc > local_tsc) {
5992 				backwards_tsc = true;
5993 				if (vcpu->arch.last_host_tsc > max_tsc)
5994 					max_tsc = vcpu->arch.last_host_tsc;
5995 			}
5996 		}
5997 	}
5998 
5999 	/*
6000 	 * Sometimes, even reliable TSCs go backwards.  This happens on
6001 	 * platforms that reset TSC during suspend or hibernate actions, but
6002 	 * maintain synchronization.  We must compensate.  Fortunately, we can
6003 	 * detect that condition here, which happens early in CPU bringup,
6004 	 * before any KVM threads can be running.  Unfortunately, we can't
6005 	 * bring the TSCs fully up to date with real time, as we aren't yet far
6006 	 * enough into CPU bringup that we know how much real time has actually
6007 	 * elapsed; our helper function, get_kernel_ns() will be using boot
6008 	 * variables that haven't been updated yet.
6009 	 *
6010 	 * So we simply find the maximum observed TSC above, then record the
6011 	 * adjustment to TSC in each VCPU.  When the VCPU later gets loaded,
6012 	 * the adjustment will be applied.  Note that we accumulate
6013 	 * adjustments, in case multiple suspend cycles happen before some VCPU
6014 	 * gets a chance to run again.  In the event that no KVM threads get a
6015 	 * chance to run, we will miss the entire elapsed period, as we'll have
6016 	 * reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
6017 	 * loose cycle time.  This isn't too big a deal, since the loss will be
6018 	 * uniform across all VCPUs (not to mention the scenario is extremely
6019 	 * unlikely). It is possible that a second hibernate recovery happens
6020 	 * much faster than a first, causing the observed TSC here to be
6021 	 * smaller; this would require additional padding adjustment, which is
6022 	 * why we set last_host_tsc to the local tsc observed here.
6023 	 *
6024 	 * N.B. - this code below runs only on platforms with reliable TSC,
6025 	 * as that is the only way backwards_tsc is set above.  Also note
6026 	 * that this runs for ALL vcpus, which is not a bug; all VCPUs should
6027 	 * have the same delta_cyc adjustment applied if backwards_tsc
6028 	 * is detected.  Note further, this adjustment is only done once,
6029 	 * as we reset last_host_tsc on all VCPUs to stop this from being
6030 	 * called multiple times (one for each physical CPU bringup).
6031 	 *
6032 	 * Platforms with unnreliable TSCs don't have to deal with this, they
6033 	 * will be compensated by the logic in vcpu_load, which sets the TSC to
6034 	 * catchup mode.  This will catchup all VCPUs to real time, but cannot
6035 	 * guarantee that they stay in perfect synchronization.
6036 	 */
6037 	if (backwards_tsc) {
6038 		u64 delta_cyc = max_tsc - local_tsc;
6039 		list_for_each_entry(kvm, &vm_list, vm_list) {
6040 			kvm_for_each_vcpu(i, vcpu, kvm) {
6041 				vcpu->arch.tsc_offset_adjustment += delta_cyc;
6042 				vcpu->arch.last_host_tsc = local_tsc;
6043 			}
6044 
6045 			/*
6046 			 * We have to disable TSC offset matching.. if you were
6047 			 * booting a VM while issuing an S4 host suspend....
6048 			 * you may have some problem.  Solving this issue is
6049 			 * left as an exercise to the reader.
6050 			 */
6051 			kvm->arch.last_tsc_nsec = 0;
6052 			kvm->arch.last_tsc_write = 0;
6053 		}
6054 
6055 	}
6056 	return 0;
6057 }
6058 
kvm_arch_hardware_disable(void * garbage)6059 void kvm_arch_hardware_disable(void *garbage)
6060 {
6061 	kvm_x86_ops->hardware_disable(garbage);
6062 	drop_user_return_notifiers(garbage);
6063 }
6064 
kvm_arch_hardware_setup(void)6065 int kvm_arch_hardware_setup(void)
6066 {
6067 	return kvm_x86_ops->hardware_setup();
6068 }
6069 
kvm_arch_hardware_unsetup(void)6070 void kvm_arch_hardware_unsetup(void)
6071 {
6072 	kvm_x86_ops->hardware_unsetup();
6073 }
6074 
kvm_arch_check_processor_compat(void * rtn)6075 void kvm_arch_check_processor_compat(void *rtn)
6076 {
6077 	kvm_x86_ops->check_processor_compatibility(rtn);
6078 }
6079 
kvm_vcpu_compatible(struct kvm_vcpu * vcpu)6080 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
6081 {
6082 	return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
6083 }
6084 
kvm_arch_vcpu_init(struct kvm_vcpu * vcpu)6085 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6086 {
6087 	struct page *page;
6088 	struct kvm *kvm;
6089 	int r;
6090 
6091 	BUG_ON(vcpu->kvm == NULL);
6092 	kvm = vcpu->kvm;
6093 
6094 	vcpu->arch.emulate_ctxt.ops = &emulate_ops;
6095 	if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
6096 		vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6097 	else
6098 		vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
6099 
6100 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
6101 	if (!page) {
6102 		r = -ENOMEM;
6103 		goto fail;
6104 	}
6105 	vcpu->arch.pio_data = page_address(page);
6106 
6107 	kvm_set_tsc_khz(vcpu, max_tsc_khz);
6108 
6109 	r = kvm_mmu_create(vcpu);
6110 	if (r < 0)
6111 		goto fail_free_pio_data;
6112 
6113 	if (irqchip_in_kernel(kvm)) {
6114 		r = kvm_create_lapic(vcpu);
6115 		if (r < 0)
6116 			goto fail_mmu_destroy;
6117 	}
6118 
6119 	vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
6120 				       GFP_KERNEL);
6121 	if (!vcpu->arch.mce_banks) {
6122 		r = -ENOMEM;
6123 		goto fail_free_lapic;
6124 	}
6125 	vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
6126 
6127 	if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6128 		goto fail_free_mce_banks;
6129 
6130 	vcpu->arch.pv_time_enabled = false;
6131 	kvm_async_pf_hash_reset(vcpu);
6132 	kvm_pmu_init(vcpu);
6133 
6134 	return 0;
6135 fail_free_mce_banks:
6136 	kfree(vcpu->arch.mce_banks);
6137 fail_free_lapic:
6138 	kvm_free_lapic(vcpu);
6139 fail_mmu_destroy:
6140 	kvm_mmu_destroy(vcpu);
6141 fail_free_pio_data:
6142 	free_page((unsigned long)vcpu->arch.pio_data);
6143 fail:
6144 	return r;
6145 }
6146 
kvm_arch_vcpu_uninit(struct kvm_vcpu * vcpu)6147 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
6148 {
6149 	int idx;
6150 
6151 	kvm_pmu_destroy(vcpu);
6152 	kfree(vcpu->arch.mce_banks);
6153 	kvm_free_lapic(vcpu);
6154 	idx = srcu_read_lock(&vcpu->kvm->srcu);
6155 	kvm_mmu_destroy(vcpu);
6156 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
6157 	free_page((unsigned long)vcpu->arch.pio_data);
6158 }
6159 
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)6160 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
6161 {
6162 	if (type)
6163 		return -EINVAL;
6164 
6165 	INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
6166 	INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
6167 
6168 	/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
6169 	set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
6170 
6171 	raw_spin_lock_init(&kvm->arch.tsc_write_lock);
6172 
6173 	return 0;
6174 }
6175 
kvm_unload_vcpu_mmu(struct kvm_vcpu * vcpu)6176 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
6177 {
6178 	vcpu_load(vcpu);
6179 	kvm_mmu_unload(vcpu);
6180 	vcpu_put(vcpu);
6181 }
6182 
kvm_free_vcpus(struct kvm * kvm)6183 static void kvm_free_vcpus(struct kvm *kvm)
6184 {
6185 	unsigned int i;
6186 	struct kvm_vcpu *vcpu;
6187 
6188 	/*
6189 	 * Unpin any mmu pages first.
6190 	 */
6191 	kvm_for_each_vcpu(i, vcpu, kvm) {
6192 		kvm_clear_async_pf_completion_queue(vcpu);
6193 		kvm_unload_vcpu_mmu(vcpu);
6194 	}
6195 	kvm_for_each_vcpu(i, vcpu, kvm)
6196 		kvm_arch_vcpu_free(vcpu);
6197 
6198 	mutex_lock(&kvm->lock);
6199 	for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
6200 		kvm->vcpus[i] = NULL;
6201 
6202 	atomic_set(&kvm->online_vcpus, 0);
6203 	mutex_unlock(&kvm->lock);
6204 }
6205 
kvm_arch_sync_events(struct kvm * kvm)6206 void kvm_arch_sync_events(struct kvm *kvm)
6207 {
6208 	kvm_free_all_assigned_devices(kvm);
6209 	kvm_free_pit(kvm);
6210 }
6211 
kvm_arch_destroy_vm(struct kvm * kvm)6212 void kvm_arch_destroy_vm(struct kvm *kvm)
6213 {
6214 	kvm_iommu_unmap_guest(kvm);
6215 	kfree(kvm->arch.vpic);
6216 	kfree(kvm->arch.vioapic);
6217 	kvm_free_vcpus(kvm);
6218 	if (kvm->arch.apic_access_page)
6219 		put_page(kvm->arch.apic_access_page);
6220 	if (kvm->arch.ept_identity_pagetable)
6221 		put_page(kvm->arch.ept_identity_pagetable);
6222 }
6223 
kvm_arch_free_memslot(struct kvm_memory_slot * free,struct kvm_memory_slot * dont)6224 void kvm_arch_free_memslot(struct kvm_memory_slot *free,
6225 			   struct kvm_memory_slot *dont)
6226 {
6227 	int i;
6228 
6229 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6230 		if (!dont || free->arch.lpage_info[i] != dont->arch.lpage_info[i]) {
6231 			vfree(free->arch.lpage_info[i]);
6232 			free->arch.lpage_info[i] = NULL;
6233 		}
6234 	}
6235 }
6236 
kvm_arch_create_memslot(struct kvm_memory_slot * slot,unsigned long npages)6237 int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
6238 {
6239 	int i;
6240 
6241 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6242 		unsigned long ugfn;
6243 		int lpages;
6244 		int level = i + 2;
6245 
6246 		lpages = gfn_to_index(slot->base_gfn + npages - 1,
6247 				      slot->base_gfn, level) + 1;
6248 
6249 		slot->arch.lpage_info[i] =
6250 			vzalloc(lpages * sizeof(*slot->arch.lpage_info[i]));
6251 		if (!slot->arch.lpage_info[i])
6252 			goto out_free;
6253 
6254 		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
6255 			slot->arch.lpage_info[i][0].write_count = 1;
6256 		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
6257 			slot->arch.lpage_info[i][lpages - 1].write_count = 1;
6258 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
6259 		/*
6260 		 * If the gfn and userspace address are not aligned wrt each
6261 		 * other, or if explicitly asked to, disable large page
6262 		 * support for this slot
6263 		 */
6264 		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
6265 		    !kvm_largepages_enabled()) {
6266 			unsigned long j;
6267 
6268 			for (j = 0; j < lpages; ++j)
6269 				slot->arch.lpage_info[i][j].write_count = 1;
6270 		}
6271 	}
6272 
6273 	return 0;
6274 
6275 out_free:
6276 	for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
6277 		vfree(slot->arch.lpage_info[i]);
6278 		slot->arch.lpage_info[i] = NULL;
6279 	}
6280 	return -ENOMEM;
6281 }
6282 
kvm_arch_prepare_memory_region(struct kvm * kvm,struct kvm_memory_slot * memslot,struct kvm_memory_slot old,struct kvm_userspace_memory_region * mem,int user_alloc)6283 int kvm_arch_prepare_memory_region(struct kvm *kvm,
6284 				struct kvm_memory_slot *memslot,
6285 				struct kvm_memory_slot old,
6286 				struct kvm_userspace_memory_region *mem,
6287 				int user_alloc)
6288 {
6289 	int npages = memslot->npages;
6290 	int map_flags = MAP_PRIVATE | MAP_ANONYMOUS;
6291 
6292 	/* Prevent internal slot pages from being moved by fork()/COW. */
6293 	if (memslot->id >= KVM_MEMORY_SLOTS)
6294 		map_flags = MAP_SHARED | MAP_ANONYMOUS;
6295 
6296 	/*To keep backward compatibility with older userspace,
6297 	 *x86 needs to hanlde !user_alloc case.
6298 	 */
6299 	if (!user_alloc) {
6300 		if (npages && !old.rmap) {
6301 			unsigned long userspace_addr;
6302 
6303 			userspace_addr = vm_mmap(NULL, 0,
6304 						 npages * PAGE_SIZE,
6305 						 PROT_READ | PROT_WRITE,
6306 						 map_flags,
6307 						 0);
6308 
6309 			if (IS_ERR((void *)userspace_addr))
6310 				return PTR_ERR((void *)userspace_addr);
6311 
6312 			memslot->userspace_addr = userspace_addr;
6313 		}
6314 	}
6315 
6316 
6317 	return 0;
6318 }
6319 
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem,struct kvm_memory_slot old,int user_alloc)6320 void kvm_arch_commit_memory_region(struct kvm *kvm,
6321 				struct kvm_userspace_memory_region *mem,
6322 				struct kvm_memory_slot old,
6323 				int user_alloc)
6324 {
6325 
6326 	int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
6327 
6328 	if (!user_alloc && !old.user_alloc && old.rmap && !npages) {
6329 		int ret;
6330 
6331 		ret = vm_munmap(old.userspace_addr,
6332 				old.npages * PAGE_SIZE);
6333 		if (ret < 0)
6334 			printk(KERN_WARNING
6335 			       "kvm_vm_ioctl_set_memory_region: "
6336 			       "failed to munmap memory\n");
6337 	}
6338 
6339 	if (!kvm->arch.n_requested_mmu_pages)
6340 		nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
6341 
6342 	spin_lock(&kvm->mmu_lock);
6343 	if (nr_mmu_pages)
6344 		kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
6345 	kvm_mmu_slot_remove_write_access(kvm, mem->slot);
6346 	spin_unlock(&kvm->mmu_lock);
6347 }
6348 
kvm_arch_flush_shadow(struct kvm * kvm)6349 void kvm_arch_flush_shadow(struct kvm *kvm)
6350 {
6351 	kvm_mmu_zap_all(kvm);
6352 	kvm_reload_remote_mmus(kvm);
6353 }
6354 
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)6355 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6356 {
6357 	return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
6358 		!vcpu->arch.apf.halted)
6359 		|| !list_empty_careful(&vcpu->async_pf.done)
6360 		|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
6361 		|| atomic_read(&vcpu->arch.nmi_queued) ||
6362 		(kvm_arch_interrupt_allowed(vcpu) &&
6363 		 kvm_cpu_has_interrupt(vcpu));
6364 }
6365 
kvm_vcpu_kick(struct kvm_vcpu * vcpu)6366 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
6367 {
6368 	int me;
6369 	int cpu = vcpu->cpu;
6370 
6371 	if (waitqueue_active(&vcpu->wq)) {
6372 		wake_up_interruptible(&vcpu->wq);
6373 		++vcpu->stat.halt_wakeup;
6374 	}
6375 
6376 	me = get_cpu();
6377 	if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
6378 		if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
6379 			smp_send_reschedule(cpu);
6380 	put_cpu();
6381 }
6382 
kvm_arch_interrupt_allowed(struct kvm_vcpu * vcpu)6383 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
6384 {
6385 	return kvm_x86_ops->interrupt_allowed(vcpu);
6386 }
6387 
kvm_is_linear_rip(struct kvm_vcpu * vcpu,unsigned long linear_rip)6388 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
6389 {
6390 	unsigned long current_rip = kvm_rip_read(vcpu) +
6391 		get_segment_base(vcpu, VCPU_SREG_CS);
6392 
6393 	return current_rip == linear_rip;
6394 }
6395 EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
6396 
kvm_get_rflags(struct kvm_vcpu * vcpu)6397 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
6398 {
6399 	unsigned long rflags;
6400 
6401 	rflags = kvm_x86_ops->get_rflags(vcpu);
6402 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
6403 		rflags &= ~X86_EFLAGS_TF;
6404 	return rflags;
6405 }
6406 EXPORT_SYMBOL_GPL(kvm_get_rflags);
6407 
kvm_set_rflags(struct kvm_vcpu * vcpu,unsigned long rflags)6408 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6409 {
6410 	if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
6411 	    kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
6412 		rflags |= X86_EFLAGS_TF;
6413 	kvm_x86_ops->set_rflags(vcpu, rflags);
6414 	kvm_make_request(KVM_REQ_EVENT, vcpu);
6415 }
6416 EXPORT_SYMBOL_GPL(kvm_set_rflags);
6417 
kvm_arch_async_page_ready(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)6418 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
6419 {
6420 	int r;
6421 
6422 	if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
6423 	      is_error_page(work->page))
6424 		return;
6425 
6426 	r = kvm_mmu_reload(vcpu);
6427 	if (unlikely(r))
6428 		return;
6429 
6430 	if (!vcpu->arch.mmu.direct_map &&
6431 	      work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
6432 		return;
6433 
6434 	vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
6435 }
6436 
kvm_async_pf_hash_fn(gfn_t gfn)6437 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
6438 {
6439 	return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
6440 }
6441 
kvm_async_pf_next_probe(u32 key)6442 static inline u32 kvm_async_pf_next_probe(u32 key)
6443 {
6444 	return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
6445 }
6446 
kvm_add_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)6447 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6448 {
6449 	u32 key = kvm_async_pf_hash_fn(gfn);
6450 
6451 	while (vcpu->arch.apf.gfns[key] != ~0)
6452 		key = kvm_async_pf_next_probe(key);
6453 
6454 	vcpu->arch.apf.gfns[key] = gfn;
6455 }
6456 
kvm_async_pf_gfn_slot(struct kvm_vcpu * vcpu,gfn_t gfn)6457 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
6458 {
6459 	int i;
6460 	u32 key = kvm_async_pf_hash_fn(gfn);
6461 
6462 	for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
6463 		     (vcpu->arch.apf.gfns[key] != gfn &&
6464 		      vcpu->arch.apf.gfns[key] != ~0); i++)
6465 		key = kvm_async_pf_next_probe(key);
6466 
6467 	return key;
6468 }
6469 
kvm_find_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)6470 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6471 {
6472 	return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
6473 }
6474 
kvm_del_async_pf_gfn(struct kvm_vcpu * vcpu,gfn_t gfn)6475 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
6476 {
6477 	u32 i, j, k;
6478 
6479 	i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
6480 	while (true) {
6481 		vcpu->arch.apf.gfns[i] = ~0;
6482 		do {
6483 			j = kvm_async_pf_next_probe(j);
6484 			if (vcpu->arch.apf.gfns[j] == ~0)
6485 				return;
6486 			k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
6487 			/*
6488 			 * k lies cyclically in ]i,j]
6489 			 * |    i.k.j |
6490 			 * |....j i.k.| or  |.k..j i...|
6491 			 */
6492 		} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
6493 		vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
6494 		i = j;
6495 	}
6496 }
6497 
apf_put_user(struct kvm_vcpu * vcpu,u32 val)6498 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
6499 {
6500 
6501 	return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
6502 				      sizeof(val));
6503 }
6504 
kvm_arch_async_page_not_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)6505 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
6506 				     struct kvm_async_pf *work)
6507 {
6508 	struct x86_exception fault;
6509 
6510 	trace_kvm_async_pf_not_present(work->arch.token, work->gva);
6511 	kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
6512 
6513 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
6514 	    (vcpu->arch.apf.send_user_only &&
6515 	     kvm_x86_ops->get_cpl(vcpu) == 0))
6516 		kvm_make_request(KVM_REQ_APF_HALT, vcpu);
6517 	else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
6518 		fault.vector = PF_VECTOR;
6519 		fault.error_code_valid = true;
6520 		fault.error_code = 0;
6521 		fault.nested_page_fault = false;
6522 		fault.address = work->arch.token;
6523 		kvm_inject_page_fault(vcpu, &fault);
6524 	}
6525 }
6526 
kvm_arch_async_page_present(struct kvm_vcpu * vcpu,struct kvm_async_pf * work)6527 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
6528 				 struct kvm_async_pf *work)
6529 {
6530 	struct x86_exception fault;
6531 
6532 	trace_kvm_async_pf_ready(work->arch.token, work->gva);
6533 	if (is_error_page(work->page))
6534 		work->arch.token = ~0; /* broadcast wakeup */
6535 	else
6536 		kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
6537 
6538 	if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
6539 	    !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
6540 		fault.vector = PF_VECTOR;
6541 		fault.error_code_valid = true;
6542 		fault.error_code = 0;
6543 		fault.nested_page_fault = false;
6544 		fault.address = work->arch.token;
6545 		kvm_inject_page_fault(vcpu, &fault);
6546 	}
6547 	vcpu->arch.apf.halted = false;
6548 	vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
6549 }
6550 
kvm_arch_can_inject_async_page_present(struct kvm_vcpu * vcpu)6551 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
6552 {
6553 	if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
6554 		return true;
6555 	else
6556 		return !kvm_event_needs_reinjection(vcpu) &&
6557 			kvm_x86_ops->interrupt_allowed(vcpu);
6558 }
6559 
6560 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
6561 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
6562 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
6563 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
6564 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
6565 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
6566 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
6567 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
6568 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
6569 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
6570 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
6571 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
6572