1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM Microsoft Hyper-V emulation
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright (C) 2008 Qumranet, Inc.
9 * Copyright IBM Corporation, 2008
10 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11 * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12 *
13 * Authors:
14 * Avi Kivity <avi@qumranet.com>
15 * Yaniv Kamay <yaniv@qumranet.com>
16 * Amit Shah <amit.shah@qumranet.com>
17 * Ben-Ami Yassour <benami@il.ibm.com>
18 * Andrey Smetanin <asmetanin@virtuozzo.com>
19 */
20
21 #include "x86.h"
22 #include "lapic.h"
23 #include "ioapic.h"
24 #include "cpuid.h"
25 #include "hyperv.h"
26 #include "xen.h"
27
28 #include <linux/cpu.h>
29 #include <linux/kvm_host.h>
30 #include <linux/highmem.h>
31 #include <linux/sched/cputime.h>
32 #include <linux/eventfd.h>
33
34 #include <asm/apicdef.h>
35 #include <trace/events/kvm.h>
36
37 #include "trace.h"
38 #include "irq.h"
39 #include "fpu.h"
40
41 /* "Hv#1" signature */
42 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
43
44 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
45
46 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
47 bool vcpu_kick);
48
synic_read_sint(struct kvm_vcpu_hv_synic * synic,int sint)49 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
50 {
51 return atomic64_read(&synic->sint[sint]);
52 }
53
synic_get_sint_vector(u64 sint_value)54 static inline int synic_get_sint_vector(u64 sint_value)
55 {
56 if (sint_value & HV_SYNIC_SINT_MASKED)
57 return -1;
58 return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
59 }
60
synic_has_vector_connected(struct kvm_vcpu_hv_synic * synic,int vector)61 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
62 int vector)
63 {
64 int i;
65
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
67 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
68 return true;
69 }
70 return false;
71 }
72
synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic * synic,int vector)73 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
74 int vector)
75 {
76 int i;
77 u64 sint_value;
78
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
80 sint_value = synic_read_sint(synic, i);
81 if (synic_get_sint_vector(sint_value) == vector &&
82 sint_value & HV_SYNIC_SINT_AUTO_EOI)
83 return true;
84 }
85 return false;
86 }
87
synic_update_vector(struct kvm_vcpu_hv_synic * synic,int vector)88 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
89 int vector)
90 {
91 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
92 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
93 bool auto_eoi_old, auto_eoi_new;
94
95 if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
96 return;
97
98 if (synic_has_vector_connected(synic, vector))
99 __set_bit(vector, synic->vec_bitmap);
100 else
101 __clear_bit(vector, synic->vec_bitmap);
102
103 auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
104
105 if (synic_has_vector_auto_eoi(synic, vector))
106 __set_bit(vector, synic->auto_eoi_bitmap);
107 else
108 __clear_bit(vector, synic->auto_eoi_bitmap);
109
110 auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
111
112 if (auto_eoi_old == auto_eoi_new)
113 return;
114
115 if (!enable_apicv)
116 return;
117
118 down_write(&vcpu->kvm->arch.apicv_update_lock);
119
120 if (auto_eoi_new)
121 hv->synic_auto_eoi_used++;
122 else
123 hv->synic_auto_eoi_used--;
124
125 /*
126 * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
127 * the hypervisor to manually inject IRQs.
128 */
129 __kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
130 APICV_INHIBIT_REASON_HYPERV,
131 !!hv->synic_auto_eoi_used);
132
133 up_write(&vcpu->kvm->arch.apicv_update_lock);
134 }
135
synic_set_sint(struct kvm_vcpu_hv_synic * synic,int sint,u64 data,bool host)136 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
137 u64 data, bool host)
138 {
139 int vector, old_vector;
140 bool masked;
141
142 vector = data & HV_SYNIC_SINT_VECTOR_MASK;
143 masked = data & HV_SYNIC_SINT_MASKED;
144
145 /*
146 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
147 * default '0x10000' value on boot and this should not #GP. We need to
148 * allow zero-initing the register from host as well.
149 */
150 if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
151 return 1;
152 /*
153 * Guest may configure multiple SINTs to use the same vector, so
154 * we maintain a bitmap of vectors handled by synic, and a
155 * bitmap of vectors with auto-eoi behavior. The bitmaps are
156 * updated here, and atomically queried on fast paths.
157 */
158 old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
159
160 atomic64_set(&synic->sint[sint], data);
161
162 synic_update_vector(synic, old_vector);
163
164 synic_update_vector(synic, vector);
165
166 /* Load SynIC vectors into EOI exit bitmap */
167 kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
168 return 0;
169 }
170
get_vcpu_by_vpidx(struct kvm * kvm,u32 vpidx)171 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
172 {
173 struct kvm_vcpu *vcpu = NULL;
174 unsigned long i;
175
176 if (vpidx >= KVM_MAX_VCPUS)
177 return NULL;
178
179 vcpu = kvm_get_vcpu(kvm, vpidx);
180 if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
181 return vcpu;
182 kvm_for_each_vcpu(i, vcpu, kvm)
183 if (kvm_hv_get_vpindex(vcpu) == vpidx)
184 return vcpu;
185 return NULL;
186 }
187
synic_get(struct kvm * kvm,u32 vpidx)188 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
189 {
190 struct kvm_vcpu *vcpu;
191 struct kvm_vcpu_hv_synic *synic;
192
193 vcpu = get_vcpu_by_vpidx(kvm, vpidx);
194 if (!vcpu || !to_hv_vcpu(vcpu))
195 return NULL;
196 synic = to_hv_synic(vcpu);
197 return (synic->active) ? synic : NULL;
198 }
199
kvm_hv_notify_acked_sint(struct kvm_vcpu * vcpu,u32 sint)200 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
201 {
202 struct kvm *kvm = vcpu->kvm;
203 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
204 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
205 struct kvm_vcpu_hv_stimer *stimer;
206 int gsi, idx;
207
208 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
209
210 /* Try to deliver pending Hyper-V SynIC timers messages */
211 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
212 stimer = &hv_vcpu->stimer[idx];
213 if (stimer->msg_pending && stimer->config.enable &&
214 !stimer->config.direct_mode &&
215 stimer->config.sintx == sint)
216 stimer_mark_pending(stimer, false);
217 }
218
219 idx = srcu_read_lock(&kvm->irq_srcu);
220 gsi = atomic_read(&synic->sint_to_gsi[sint]);
221 if (gsi != -1)
222 kvm_notify_acked_gsi(kvm, gsi);
223 srcu_read_unlock(&kvm->irq_srcu, idx);
224 }
225
synic_exit(struct kvm_vcpu_hv_synic * synic,u32 msr)226 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
227 {
228 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
229 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
230
231 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
232 hv_vcpu->exit.u.synic.msr = msr;
233 hv_vcpu->exit.u.synic.control = synic->control;
234 hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
235 hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
236
237 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
238 }
239
synic_set_msr(struct kvm_vcpu_hv_synic * synic,u32 msr,u64 data,bool host)240 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
241 u32 msr, u64 data, bool host)
242 {
243 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
244 int ret;
245
246 if (!synic->active && (!host || data))
247 return 1;
248
249 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
250
251 ret = 0;
252 switch (msr) {
253 case HV_X64_MSR_SCONTROL:
254 synic->control = data;
255 if (!host)
256 synic_exit(synic, msr);
257 break;
258 case HV_X64_MSR_SVERSION:
259 if (!host) {
260 ret = 1;
261 break;
262 }
263 synic->version = data;
264 break;
265 case HV_X64_MSR_SIEFP:
266 if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
267 !synic->dont_zero_synic_pages)
268 if (kvm_clear_guest(vcpu->kvm,
269 data & PAGE_MASK, PAGE_SIZE)) {
270 ret = 1;
271 break;
272 }
273 synic->evt_page = data;
274 if (!host)
275 synic_exit(synic, msr);
276 break;
277 case HV_X64_MSR_SIMP:
278 if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
279 !synic->dont_zero_synic_pages)
280 if (kvm_clear_guest(vcpu->kvm,
281 data & PAGE_MASK, PAGE_SIZE)) {
282 ret = 1;
283 break;
284 }
285 synic->msg_page = data;
286 if (!host)
287 synic_exit(synic, msr);
288 break;
289 case HV_X64_MSR_EOM: {
290 int i;
291
292 if (!synic->active)
293 break;
294
295 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
296 kvm_hv_notify_acked_sint(vcpu, i);
297 break;
298 }
299 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
300 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
301 break;
302 default:
303 ret = 1;
304 break;
305 }
306 return ret;
307 }
308
kvm_hv_is_syndbg_enabled(struct kvm_vcpu * vcpu)309 static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
310 {
311 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
312
313 return hv_vcpu->cpuid_cache.syndbg_cap_eax &
314 HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
315 }
316
kvm_hv_syndbg_complete_userspace(struct kvm_vcpu * vcpu)317 static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
318 {
319 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
320
321 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
322 hv->hv_syndbg.control.status =
323 vcpu->run->hyperv.u.syndbg.status;
324 return 1;
325 }
326
syndbg_exit(struct kvm_vcpu * vcpu,u32 msr)327 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
328 {
329 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
330 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
331
332 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
333 hv_vcpu->exit.u.syndbg.msr = msr;
334 hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
335 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
336 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
337 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
338 vcpu->arch.complete_userspace_io =
339 kvm_hv_syndbg_complete_userspace;
340
341 kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
342 }
343
syndbg_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)344 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
345 {
346 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
347
348 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
349 return 1;
350
351 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
352 to_hv_vcpu(vcpu)->vp_index, msr, data);
353 switch (msr) {
354 case HV_X64_MSR_SYNDBG_CONTROL:
355 syndbg->control.control = data;
356 if (!host)
357 syndbg_exit(vcpu, msr);
358 break;
359 case HV_X64_MSR_SYNDBG_STATUS:
360 syndbg->control.status = data;
361 break;
362 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
363 syndbg->control.send_page = data;
364 break;
365 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
366 syndbg->control.recv_page = data;
367 break;
368 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
369 syndbg->control.pending_page = data;
370 if (!host)
371 syndbg_exit(vcpu, msr);
372 break;
373 case HV_X64_MSR_SYNDBG_OPTIONS:
374 syndbg->options = data;
375 break;
376 default:
377 break;
378 }
379
380 return 0;
381 }
382
syndbg_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)383 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
384 {
385 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
386
387 if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
388 return 1;
389
390 switch (msr) {
391 case HV_X64_MSR_SYNDBG_CONTROL:
392 *pdata = syndbg->control.control;
393 break;
394 case HV_X64_MSR_SYNDBG_STATUS:
395 *pdata = syndbg->control.status;
396 break;
397 case HV_X64_MSR_SYNDBG_SEND_BUFFER:
398 *pdata = syndbg->control.send_page;
399 break;
400 case HV_X64_MSR_SYNDBG_RECV_BUFFER:
401 *pdata = syndbg->control.recv_page;
402 break;
403 case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
404 *pdata = syndbg->control.pending_page;
405 break;
406 case HV_X64_MSR_SYNDBG_OPTIONS:
407 *pdata = syndbg->options;
408 break;
409 default:
410 break;
411 }
412
413 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
414
415 return 0;
416 }
417
synic_get_msr(struct kvm_vcpu_hv_synic * synic,u32 msr,u64 * pdata,bool host)418 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
419 bool host)
420 {
421 int ret;
422
423 if (!synic->active && !host)
424 return 1;
425
426 ret = 0;
427 switch (msr) {
428 case HV_X64_MSR_SCONTROL:
429 *pdata = synic->control;
430 break;
431 case HV_X64_MSR_SVERSION:
432 *pdata = synic->version;
433 break;
434 case HV_X64_MSR_SIEFP:
435 *pdata = synic->evt_page;
436 break;
437 case HV_X64_MSR_SIMP:
438 *pdata = synic->msg_page;
439 break;
440 case HV_X64_MSR_EOM:
441 *pdata = 0;
442 break;
443 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
444 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
445 break;
446 default:
447 ret = 1;
448 break;
449 }
450 return ret;
451 }
452
synic_set_irq(struct kvm_vcpu_hv_synic * synic,u32 sint)453 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
454 {
455 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
456 struct kvm_lapic_irq irq;
457 int ret, vector;
458
459 if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
460 return -EINVAL;
461
462 if (sint >= ARRAY_SIZE(synic->sint))
463 return -EINVAL;
464
465 vector = synic_get_sint_vector(synic_read_sint(synic, sint));
466 if (vector < 0)
467 return -ENOENT;
468
469 memset(&irq, 0, sizeof(irq));
470 irq.shorthand = APIC_DEST_SELF;
471 irq.dest_mode = APIC_DEST_PHYSICAL;
472 irq.delivery_mode = APIC_DM_FIXED;
473 irq.vector = vector;
474 irq.level = 1;
475
476 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
477 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
478 return ret;
479 }
480
kvm_hv_synic_set_irq(struct kvm * kvm,u32 vpidx,u32 sint)481 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
482 {
483 struct kvm_vcpu_hv_synic *synic;
484
485 synic = synic_get(kvm, vpidx);
486 if (!synic)
487 return -EINVAL;
488
489 return synic_set_irq(synic, sint);
490 }
491
kvm_hv_synic_send_eoi(struct kvm_vcpu * vcpu,int vector)492 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
493 {
494 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
495 int i;
496
497 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
498
499 for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
500 if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
501 kvm_hv_notify_acked_sint(vcpu, i);
502 }
503
kvm_hv_set_sint_gsi(struct kvm * kvm,u32 vpidx,u32 sint,int gsi)504 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
505 {
506 struct kvm_vcpu_hv_synic *synic;
507
508 synic = synic_get(kvm, vpidx);
509 if (!synic)
510 return -EINVAL;
511
512 if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
513 return -EINVAL;
514
515 atomic_set(&synic->sint_to_gsi[sint], gsi);
516 return 0;
517 }
518
kvm_hv_irq_routing_update(struct kvm * kvm)519 void kvm_hv_irq_routing_update(struct kvm *kvm)
520 {
521 struct kvm_irq_routing_table *irq_rt;
522 struct kvm_kernel_irq_routing_entry *e;
523 u32 gsi;
524
525 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
526 lockdep_is_held(&kvm->irq_lock));
527
528 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
529 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
530 if (e->type == KVM_IRQ_ROUTING_HV_SINT)
531 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
532 e->hv_sint.sint, gsi);
533 }
534 }
535 }
536
synic_init(struct kvm_vcpu_hv_synic * synic)537 static void synic_init(struct kvm_vcpu_hv_synic *synic)
538 {
539 int i;
540
541 memset(synic, 0, sizeof(*synic));
542 synic->version = HV_SYNIC_VERSION_1;
543 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
544 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
545 atomic_set(&synic->sint_to_gsi[i], -1);
546 }
547 }
548
get_time_ref_counter(struct kvm * kvm)549 static u64 get_time_ref_counter(struct kvm *kvm)
550 {
551 struct kvm_hv *hv = to_kvm_hv(kvm);
552 struct kvm_vcpu *vcpu;
553 u64 tsc;
554
555 /*
556 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
557 * is broken, disabled or being updated.
558 */
559 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
560 return div_u64(get_kvmclock_ns(kvm), 100);
561
562 vcpu = kvm_get_vcpu(kvm, 0);
563 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
564 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
565 + hv->tsc_ref.tsc_offset;
566 }
567
stimer_mark_pending(struct kvm_vcpu_hv_stimer * stimer,bool vcpu_kick)568 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
569 bool vcpu_kick)
570 {
571 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
572
573 set_bit(stimer->index,
574 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
575 kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
576 if (vcpu_kick)
577 kvm_vcpu_kick(vcpu);
578 }
579
stimer_cleanup(struct kvm_vcpu_hv_stimer * stimer)580 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
581 {
582 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
583
584 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
585 stimer->index);
586
587 hrtimer_cancel(&stimer->timer);
588 clear_bit(stimer->index,
589 to_hv_vcpu(vcpu)->stimer_pending_bitmap);
590 stimer->msg_pending = false;
591 stimer->exp_time = 0;
592 }
593
stimer_timer_callback(struct hrtimer * timer)594 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
595 {
596 struct kvm_vcpu_hv_stimer *stimer;
597
598 stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
599 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
600 stimer->index);
601 stimer_mark_pending(stimer, true);
602
603 return HRTIMER_NORESTART;
604 }
605
606 /*
607 * stimer_start() assumptions:
608 * a) stimer->count is not equal to 0
609 * b) stimer->config has HV_STIMER_ENABLE flag
610 */
stimer_start(struct kvm_vcpu_hv_stimer * stimer)611 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
612 {
613 u64 time_now;
614 ktime_t ktime_now;
615
616 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
617 ktime_now = ktime_get();
618
619 if (stimer->config.periodic) {
620 if (stimer->exp_time) {
621 if (time_now >= stimer->exp_time) {
622 u64 remainder;
623
624 div64_u64_rem(time_now - stimer->exp_time,
625 stimer->count, &remainder);
626 stimer->exp_time =
627 time_now + (stimer->count - remainder);
628 }
629 } else
630 stimer->exp_time = time_now + stimer->count;
631
632 trace_kvm_hv_stimer_start_periodic(
633 hv_stimer_to_vcpu(stimer)->vcpu_id,
634 stimer->index,
635 time_now, stimer->exp_time);
636
637 hrtimer_start(&stimer->timer,
638 ktime_add_ns(ktime_now,
639 100 * (stimer->exp_time - time_now)),
640 HRTIMER_MODE_ABS);
641 return 0;
642 }
643 stimer->exp_time = stimer->count;
644 if (time_now >= stimer->count) {
645 /*
646 * Expire timer according to Hypervisor Top-Level Functional
647 * specification v4(15.3.1):
648 * "If a one shot is enabled and the specified count is in
649 * the past, it will expire immediately."
650 */
651 stimer_mark_pending(stimer, false);
652 return 0;
653 }
654
655 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
656 stimer->index,
657 time_now, stimer->count);
658
659 hrtimer_start(&stimer->timer,
660 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
661 HRTIMER_MODE_ABS);
662 return 0;
663 }
664
stimer_set_config(struct kvm_vcpu_hv_stimer * stimer,u64 config,bool host)665 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
666 bool host)
667 {
668 union hv_stimer_config new_config = {.as_uint64 = config},
669 old_config = {.as_uint64 = stimer->config.as_uint64};
670 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
671 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
672 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
673
674 if (!synic->active && (!host || config))
675 return 1;
676
677 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
678 !(hv_vcpu->cpuid_cache.features_edx &
679 HV_STIMER_DIRECT_MODE_AVAILABLE)))
680 return 1;
681
682 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
683 stimer->index, config, host);
684
685 stimer_cleanup(stimer);
686 if (old_config.enable &&
687 !new_config.direct_mode && new_config.sintx == 0)
688 new_config.enable = 0;
689 stimer->config.as_uint64 = new_config.as_uint64;
690
691 if (stimer->config.enable)
692 stimer_mark_pending(stimer, false);
693
694 return 0;
695 }
696
stimer_set_count(struct kvm_vcpu_hv_stimer * stimer,u64 count,bool host)697 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
698 bool host)
699 {
700 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
701 struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
702
703 if (!synic->active && (!host || count))
704 return 1;
705
706 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
707 stimer->index, count, host);
708
709 stimer_cleanup(stimer);
710 stimer->count = count;
711 if (stimer->count == 0)
712 stimer->config.enable = 0;
713 else if (stimer->config.auto_enable)
714 stimer->config.enable = 1;
715
716 if (stimer->config.enable)
717 stimer_mark_pending(stimer, false);
718
719 return 0;
720 }
721
stimer_get_config(struct kvm_vcpu_hv_stimer * stimer,u64 * pconfig)722 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
723 {
724 *pconfig = stimer->config.as_uint64;
725 return 0;
726 }
727
stimer_get_count(struct kvm_vcpu_hv_stimer * stimer,u64 * pcount)728 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
729 {
730 *pcount = stimer->count;
731 return 0;
732 }
733
synic_deliver_msg(struct kvm_vcpu_hv_synic * synic,u32 sint,struct hv_message * src_msg,bool no_retry)734 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
735 struct hv_message *src_msg, bool no_retry)
736 {
737 struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
738 int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
739 gfn_t msg_page_gfn;
740 struct hv_message_header hv_hdr;
741 int r;
742
743 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
744 return -ENOENT;
745
746 msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
747
748 /*
749 * Strictly following the spec-mandated ordering would assume setting
750 * .msg_pending before checking .message_type. However, this function
751 * is only called in vcpu context so the entire update is atomic from
752 * guest POV and thus the exact order here doesn't matter.
753 */
754 r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
755 msg_off + offsetof(struct hv_message,
756 header.message_type),
757 sizeof(hv_hdr.message_type));
758 if (r < 0)
759 return r;
760
761 if (hv_hdr.message_type != HVMSG_NONE) {
762 if (no_retry)
763 return 0;
764
765 hv_hdr.message_flags.msg_pending = 1;
766 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
767 &hv_hdr.message_flags,
768 msg_off +
769 offsetof(struct hv_message,
770 header.message_flags),
771 sizeof(hv_hdr.message_flags));
772 if (r < 0)
773 return r;
774 return -EAGAIN;
775 }
776
777 r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
778 sizeof(src_msg->header) +
779 src_msg->header.payload_size);
780 if (r < 0)
781 return r;
782
783 r = synic_set_irq(synic, sint);
784 if (r < 0)
785 return r;
786 if (r == 0)
787 return -EFAULT;
788 return 0;
789 }
790
stimer_send_msg(struct kvm_vcpu_hv_stimer * stimer)791 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
792 {
793 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
794 struct hv_message *msg = &stimer->msg;
795 struct hv_timer_message_payload *payload =
796 (struct hv_timer_message_payload *)&msg->u.payload;
797
798 /*
799 * To avoid piling up periodic ticks, don't retry message
800 * delivery for them (within "lazy" lost ticks policy).
801 */
802 bool no_retry = stimer->config.periodic;
803
804 payload->expiration_time = stimer->exp_time;
805 payload->delivery_time = get_time_ref_counter(vcpu->kvm);
806 return synic_deliver_msg(to_hv_synic(vcpu),
807 stimer->config.sintx, msg,
808 no_retry);
809 }
810
stimer_notify_direct(struct kvm_vcpu_hv_stimer * stimer)811 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
812 {
813 struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
814 struct kvm_lapic_irq irq = {
815 .delivery_mode = APIC_DM_FIXED,
816 .vector = stimer->config.apic_vector
817 };
818
819 if (lapic_in_kernel(vcpu))
820 return !kvm_apic_set_irq(vcpu, &irq, NULL);
821 return 0;
822 }
823
stimer_expiration(struct kvm_vcpu_hv_stimer * stimer)824 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
825 {
826 int r, direct = stimer->config.direct_mode;
827
828 stimer->msg_pending = true;
829 if (!direct)
830 r = stimer_send_msg(stimer);
831 else
832 r = stimer_notify_direct(stimer);
833 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
834 stimer->index, direct, r);
835 if (!r) {
836 stimer->msg_pending = false;
837 if (!(stimer->config.periodic))
838 stimer->config.enable = 0;
839 }
840 }
841
kvm_hv_process_stimers(struct kvm_vcpu * vcpu)842 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
843 {
844 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
845 struct kvm_vcpu_hv_stimer *stimer;
846 u64 time_now, exp_time;
847 int i;
848
849 if (!hv_vcpu)
850 return;
851
852 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
853 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
854 stimer = &hv_vcpu->stimer[i];
855 if (stimer->config.enable) {
856 exp_time = stimer->exp_time;
857
858 if (exp_time) {
859 time_now =
860 get_time_ref_counter(vcpu->kvm);
861 if (time_now >= exp_time)
862 stimer_expiration(stimer);
863 }
864
865 if ((stimer->config.enable) &&
866 stimer->count) {
867 if (!stimer->msg_pending)
868 stimer_start(stimer);
869 } else
870 stimer_cleanup(stimer);
871 }
872 }
873 }
874
kvm_hv_vcpu_uninit(struct kvm_vcpu * vcpu)875 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
876 {
877 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
878 int i;
879
880 if (!hv_vcpu)
881 return;
882
883 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
884 stimer_cleanup(&hv_vcpu->stimer[i]);
885
886 kfree(hv_vcpu);
887 vcpu->arch.hyperv = NULL;
888 }
889
kvm_hv_assist_page_enabled(struct kvm_vcpu * vcpu)890 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
891 {
892 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
893
894 if (!hv_vcpu)
895 return false;
896
897 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
898 return false;
899 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
900 }
901 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
902
kvm_hv_get_assist_page(struct kvm_vcpu * vcpu,struct hv_vp_assist_page * assist_page)903 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
904 struct hv_vp_assist_page *assist_page)
905 {
906 if (!kvm_hv_assist_page_enabled(vcpu))
907 return false;
908 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
909 assist_page, sizeof(*assist_page));
910 }
911 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
912
stimer_prepare_msg(struct kvm_vcpu_hv_stimer * stimer)913 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
914 {
915 struct hv_message *msg = &stimer->msg;
916 struct hv_timer_message_payload *payload =
917 (struct hv_timer_message_payload *)&msg->u.payload;
918
919 memset(&msg->header, 0, sizeof(msg->header));
920 msg->header.message_type = HVMSG_TIMER_EXPIRED;
921 msg->header.payload_size = sizeof(*payload);
922
923 payload->timer_index = stimer->index;
924 payload->expiration_time = 0;
925 payload->delivery_time = 0;
926 }
927
stimer_init(struct kvm_vcpu_hv_stimer * stimer,int timer_index)928 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
929 {
930 memset(stimer, 0, sizeof(*stimer));
931 stimer->index = timer_index;
932 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
933 stimer->timer.function = stimer_timer_callback;
934 stimer_prepare_msg(stimer);
935 }
936
kvm_hv_vcpu_init(struct kvm_vcpu * vcpu)937 static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
938 {
939 struct kvm_vcpu_hv *hv_vcpu;
940 int i;
941
942 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
943 if (!hv_vcpu)
944 return -ENOMEM;
945
946 vcpu->arch.hyperv = hv_vcpu;
947 hv_vcpu->vcpu = vcpu;
948
949 synic_init(&hv_vcpu->synic);
950
951 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
952 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
953 stimer_init(&hv_vcpu->stimer[i], i);
954
955 hv_vcpu->vp_index = vcpu->vcpu_idx;
956
957 return 0;
958 }
959
kvm_hv_activate_synic(struct kvm_vcpu * vcpu,bool dont_zero_synic_pages)960 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
961 {
962 struct kvm_vcpu_hv_synic *synic;
963 int r;
964
965 if (!to_hv_vcpu(vcpu)) {
966 r = kvm_hv_vcpu_init(vcpu);
967 if (r)
968 return r;
969 }
970
971 synic = to_hv_synic(vcpu);
972
973 synic->active = true;
974 synic->dont_zero_synic_pages = dont_zero_synic_pages;
975 synic->control = HV_SYNIC_CONTROL_ENABLE;
976 return 0;
977 }
978
kvm_hv_msr_partition_wide(u32 msr)979 static bool kvm_hv_msr_partition_wide(u32 msr)
980 {
981 bool r = false;
982
983 switch (msr) {
984 case HV_X64_MSR_GUEST_OS_ID:
985 case HV_X64_MSR_HYPERCALL:
986 case HV_X64_MSR_REFERENCE_TSC:
987 case HV_X64_MSR_TIME_REF_COUNT:
988 case HV_X64_MSR_CRASH_CTL:
989 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
990 case HV_X64_MSR_RESET:
991 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
992 case HV_X64_MSR_TSC_EMULATION_CONTROL:
993 case HV_X64_MSR_TSC_EMULATION_STATUS:
994 case HV_X64_MSR_SYNDBG_OPTIONS:
995 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
996 r = true;
997 break;
998 }
999
1000 return r;
1001 }
1002
kvm_hv_msr_get_crash_data(struct kvm * kvm,u32 index,u64 * pdata)1003 static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
1004 {
1005 struct kvm_hv *hv = to_kvm_hv(kvm);
1006 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1007
1008 if (WARN_ON_ONCE(index >= size))
1009 return -EINVAL;
1010
1011 *pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1012 return 0;
1013 }
1014
kvm_hv_msr_get_crash_ctl(struct kvm * kvm,u64 * pdata)1015 static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1016 {
1017 struct kvm_hv *hv = to_kvm_hv(kvm);
1018
1019 *pdata = hv->hv_crash_ctl;
1020 return 0;
1021 }
1022
kvm_hv_msr_set_crash_ctl(struct kvm * kvm,u64 data)1023 static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1024 {
1025 struct kvm_hv *hv = to_kvm_hv(kvm);
1026
1027 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1028
1029 return 0;
1030 }
1031
kvm_hv_msr_set_crash_data(struct kvm * kvm,u32 index,u64 data)1032 static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1033 {
1034 struct kvm_hv *hv = to_kvm_hv(kvm);
1035 size_t size = ARRAY_SIZE(hv->hv_crash_param);
1036
1037 if (WARN_ON_ONCE(index >= size))
1038 return -EINVAL;
1039
1040 hv->hv_crash_param[array_index_nospec(index, size)] = data;
1041 return 0;
1042 }
1043
1044 /*
1045 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1046 * between them is possible:
1047 *
1048 * kvmclock formula:
1049 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1050 * + system_time
1051 *
1052 * Hyper-V formula:
1053 * nsec/100 = ticks * scale / 2^64 + offset
1054 *
1055 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1056 * By dividing the kvmclock formula by 100 and equating what's left we get:
1057 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1058 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1059 * scale = tsc_to_system_mul * 2^(32+tsc_shift) / 100
1060 *
1061 * Now expand the kvmclock formula and divide by 100:
1062 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1063 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1064 * + system_time
1065 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1066 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1067 * + system_time / 100
1068 *
1069 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1070 * nsec/100 = ticks * scale / 2^64
1071 * - tsc_timestamp * scale / 2^64
1072 * + system_time / 100
1073 *
1074 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1075 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1076 *
1077 * These two equivalencies are implemented in this function.
1078 */
compute_tsc_page_parameters(struct pvclock_vcpu_time_info * hv_clock,struct ms_hyperv_tsc_page * tsc_ref)1079 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1080 struct ms_hyperv_tsc_page *tsc_ref)
1081 {
1082 u64 max_mul;
1083
1084 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1085 return false;
1086
1087 /*
1088 * check if scale would overflow, if so we use the time ref counter
1089 * tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1090 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1091 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1092 */
1093 max_mul = 100ull << (32 - hv_clock->tsc_shift);
1094 if (hv_clock->tsc_to_system_mul >= max_mul)
1095 return false;
1096
1097 /*
1098 * Otherwise compute the scale and offset according to the formulas
1099 * derived above.
1100 */
1101 tsc_ref->tsc_scale =
1102 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1103 hv_clock->tsc_to_system_mul,
1104 100);
1105
1106 tsc_ref->tsc_offset = hv_clock->system_time;
1107 do_div(tsc_ref->tsc_offset, 100);
1108 tsc_ref->tsc_offset -=
1109 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1110 return true;
1111 }
1112
1113 /*
1114 * Don't touch TSC page values if the guest has opted for TSC emulation after
1115 * migration. KVM doesn't fully support reenlightenment notifications and TSC
1116 * access emulation and Hyper-V is known to expect the values in TSC page to
1117 * stay constant before TSC access emulation is disabled from guest side
1118 * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1119 * frequency and guest visible TSC value across migration (and prevent it when
1120 * TSC scaling is unsupported).
1121 */
tsc_page_update_unsafe(struct kvm_hv * hv)1122 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1123 {
1124 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1125 hv->hv_tsc_emulation_control;
1126 }
1127
kvm_hv_setup_tsc_page(struct kvm * kvm,struct pvclock_vcpu_time_info * hv_clock)1128 void kvm_hv_setup_tsc_page(struct kvm *kvm,
1129 struct pvclock_vcpu_time_info *hv_clock)
1130 {
1131 struct kvm_hv *hv = to_kvm_hv(kvm);
1132 u32 tsc_seq;
1133 u64 gfn;
1134
1135 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1136 BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1137
1138 mutex_lock(&hv->hv_lock);
1139
1140 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1141 hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
1142 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1143 goto out_unlock;
1144
1145 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1146 goto out_unlock;
1147
1148 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1149 /*
1150 * Because the TSC parameters only vary when there is a
1151 * change in the master clock, do not bother with caching.
1152 */
1153 if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1154 &tsc_seq, sizeof(tsc_seq))))
1155 goto out_err;
1156
1157 if (tsc_seq && tsc_page_update_unsafe(hv)) {
1158 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1159 goto out_err;
1160
1161 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1162 goto out_unlock;
1163 }
1164
1165 /*
1166 * While we're computing and writing the parameters, force the
1167 * guest to use the time reference count MSR.
1168 */
1169 hv->tsc_ref.tsc_sequence = 0;
1170 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1171 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1172 goto out_err;
1173
1174 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1175 goto out_err;
1176
1177 /* Ensure sequence is zero before writing the rest of the struct. */
1178 smp_wmb();
1179 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1180 goto out_err;
1181
1182 /*
1183 * Now switch to the TSC page mechanism by writing the sequence.
1184 */
1185 tsc_seq++;
1186 if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1187 tsc_seq = 1;
1188
1189 /* Write the struct entirely before the non-zero sequence. */
1190 smp_wmb();
1191
1192 hv->tsc_ref.tsc_sequence = tsc_seq;
1193 if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1194 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1195 goto out_err;
1196
1197 hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1198 goto out_unlock;
1199
1200 out_err:
1201 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1202 out_unlock:
1203 mutex_unlock(&hv->hv_lock);
1204 }
1205
kvm_hv_request_tsc_page_update(struct kvm * kvm)1206 void kvm_hv_request_tsc_page_update(struct kvm *kvm)
1207 {
1208 struct kvm_hv *hv = to_kvm_hv(kvm);
1209
1210 mutex_lock(&hv->hv_lock);
1211
1212 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
1213 !tsc_page_update_unsafe(hv))
1214 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1215
1216 mutex_unlock(&hv->hv_lock);
1217 }
1218
hv_check_msr_access(struct kvm_vcpu_hv * hv_vcpu,u32 msr)1219 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1220 {
1221 if (!hv_vcpu->enforce_cpuid)
1222 return true;
1223
1224 switch (msr) {
1225 case HV_X64_MSR_GUEST_OS_ID:
1226 case HV_X64_MSR_HYPERCALL:
1227 return hv_vcpu->cpuid_cache.features_eax &
1228 HV_MSR_HYPERCALL_AVAILABLE;
1229 case HV_X64_MSR_VP_RUNTIME:
1230 return hv_vcpu->cpuid_cache.features_eax &
1231 HV_MSR_VP_RUNTIME_AVAILABLE;
1232 case HV_X64_MSR_TIME_REF_COUNT:
1233 return hv_vcpu->cpuid_cache.features_eax &
1234 HV_MSR_TIME_REF_COUNT_AVAILABLE;
1235 case HV_X64_MSR_VP_INDEX:
1236 return hv_vcpu->cpuid_cache.features_eax &
1237 HV_MSR_VP_INDEX_AVAILABLE;
1238 case HV_X64_MSR_RESET:
1239 return hv_vcpu->cpuid_cache.features_eax &
1240 HV_MSR_RESET_AVAILABLE;
1241 case HV_X64_MSR_REFERENCE_TSC:
1242 return hv_vcpu->cpuid_cache.features_eax &
1243 HV_MSR_REFERENCE_TSC_AVAILABLE;
1244 case HV_X64_MSR_SCONTROL:
1245 case HV_X64_MSR_SVERSION:
1246 case HV_X64_MSR_SIEFP:
1247 case HV_X64_MSR_SIMP:
1248 case HV_X64_MSR_EOM:
1249 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1250 return hv_vcpu->cpuid_cache.features_eax &
1251 HV_MSR_SYNIC_AVAILABLE;
1252 case HV_X64_MSR_STIMER0_CONFIG:
1253 case HV_X64_MSR_STIMER1_CONFIG:
1254 case HV_X64_MSR_STIMER2_CONFIG:
1255 case HV_X64_MSR_STIMER3_CONFIG:
1256 case HV_X64_MSR_STIMER0_COUNT:
1257 case HV_X64_MSR_STIMER1_COUNT:
1258 case HV_X64_MSR_STIMER2_COUNT:
1259 case HV_X64_MSR_STIMER3_COUNT:
1260 return hv_vcpu->cpuid_cache.features_eax &
1261 HV_MSR_SYNTIMER_AVAILABLE;
1262 case HV_X64_MSR_EOI:
1263 case HV_X64_MSR_ICR:
1264 case HV_X64_MSR_TPR:
1265 case HV_X64_MSR_VP_ASSIST_PAGE:
1266 return hv_vcpu->cpuid_cache.features_eax &
1267 HV_MSR_APIC_ACCESS_AVAILABLE;
1268 break;
1269 case HV_X64_MSR_TSC_FREQUENCY:
1270 case HV_X64_MSR_APIC_FREQUENCY:
1271 return hv_vcpu->cpuid_cache.features_eax &
1272 HV_ACCESS_FREQUENCY_MSRS;
1273 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1274 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1275 case HV_X64_MSR_TSC_EMULATION_STATUS:
1276 return hv_vcpu->cpuid_cache.features_eax &
1277 HV_ACCESS_REENLIGHTENMENT;
1278 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1279 case HV_X64_MSR_CRASH_CTL:
1280 return hv_vcpu->cpuid_cache.features_edx &
1281 HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1282 case HV_X64_MSR_SYNDBG_OPTIONS:
1283 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1284 return hv_vcpu->cpuid_cache.features_edx &
1285 HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1286 default:
1287 break;
1288 }
1289
1290 return false;
1291 }
1292
kvm_hv_set_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1293 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1294 bool host)
1295 {
1296 struct kvm *kvm = vcpu->kvm;
1297 struct kvm_hv *hv = to_kvm_hv(kvm);
1298
1299 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1300 return 1;
1301
1302 switch (msr) {
1303 case HV_X64_MSR_GUEST_OS_ID:
1304 hv->hv_guest_os_id = data;
1305 /* setting guest os id to zero disables hypercall page */
1306 if (!hv->hv_guest_os_id)
1307 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1308 break;
1309 case HV_X64_MSR_HYPERCALL: {
1310 u8 instructions[9];
1311 int i = 0;
1312 u64 addr;
1313
1314 /* if guest os id is not set hypercall should remain disabled */
1315 if (!hv->hv_guest_os_id)
1316 break;
1317 if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1318 hv->hv_hypercall = data;
1319 break;
1320 }
1321
1322 /*
1323 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
1324 * the same way Xen itself does, by setting the bit 31 of EAX
1325 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1326 * going to be clobbered on 64-bit.
1327 */
1328 if (kvm_xen_hypercall_enabled(kvm)) {
1329 /* orl $0x80000000, %eax */
1330 instructions[i++] = 0x0d;
1331 instructions[i++] = 0x00;
1332 instructions[i++] = 0x00;
1333 instructions[i++] = 0x00;
1334 instructions[i++] = 0x80;
1335 }
1336
1337 /* vmcall/vmmcall */
1338 static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1339 i += 3;
1340
1341 /* ret */
1342 ((unsigned char *)instructions)[i++] = 0xc3;
1343
1344 addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1345 if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1346 return 1;
1347 hv->hv_hypercall = data;
1348 break;
1349 }
1350 case HV_X64_MSR_REFERENCE_TSC:
1351 hv->hv_tsc_page = data;
1352 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1353 if (!host)
1354 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1355 else
1356 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1357 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1358 } else {
1359 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1360 }
1361 break;
1362 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1363 return kvm_hv_msr_set_crash_data(kvm,
1364 msr - HV_X64_MSR_CRASH_P0,
1365 data);
1366 case HV_X64_MSR_CRASH_CTL:
1367 if (host)
1368 return kvm_hv_msr_set_crash_ctl(kvm, data);
1369
1370 if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1371 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1372 hv->hv_crash_param[0],
1373 hv->hv_crash_param[1],
1374 hv->hv_crash_param[2],
1375 hv->hv_crash_param[3],
1376 hv->hv_crash_param[4]);
1377
1378 /* Send notification about crash to user space */
1379 kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1380 }
1381 break;
1382 case HV_X64_MSR_RESET:
1383 if (data == 1) {
1384 vcpu_debug(vcpu, "hyper-v reset requested\n");
1385 kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1386 }
1387 break;
1388 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1389 hv->hv_reenlightenment_control = data;
1390 break;
1391 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1392 hv->hv_tsc_emulation_control = data;
1393 break;
1394 case HV_X64_MSR_TSC_EMULATION_STATUS:
1395 if (data && !host)
1396 return 1;
1397
1398 hv->hv_tsc_emulation_status = data;
1399 break;
1400 case HV_X64_MSR_TIME_REF_COUNT:
1401 /* read-only, but still ignore it if host-initiated */
1402 if (!host)
1403 return 1;
1404 break;
1405 case HV_X64_MSR_SYNDBG_OPTIONS:
1406 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1407 return syndbg_set_msr(vcpu, msr, data, host);
1408 default:
1409 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1410 msr, data);
1411 return 1;
1412 }
1413 return 0;
1414 }
1415
1416 /* Calculate cpu time spent by current task in 100ns units */
current_task_runtime_100ns(void)1417 static u64 current_task_runtime_100ns(void)
1418 {
1419 u64 utime, stime;
1420
1421 task_cputime_adjusted(current, &utime, &stime);
1422
1423 return div_u64(utime + stime, 100);
1424 }
1425
kvm_hv_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1426 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1427 {
1428 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1429
1430 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1431 return 1;
1432
1433 switch (msr) {
1434 case HV_X64_MSR_VP_INDEX: {
1435 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1436 u32 new_vp_index = (u32)data;
1437
1438 if (!host || new_vp_index >= KVM_MAX_VCPUS)
1439 return 1;
1440
1441 if (new_vp_index == hv_vcpu->vp_index)
1442 return 0;
1443
1444 /*
1445 * The VP index is initialized to vcpu_index by
1446 * kvm_hv_vcpu_postcreate so they initially match. Now the
1447 * VP index is changing, adjust num_mismatched_vp_indexes if
1448 * it now matches or no longer matches vcpu_idx.
1449 */
1450 if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1451 atomic_inc(&hv->num_mismatched_vp_indexes);
1452 else if (new_vp_index == vcpu->vcpu_idx)
1453 atomic_dec(&hv->num_mismatched_vp_indexes);
1454
1455 hv_vcpu->vp_index = new_vp_index;
1456 break;
1457 }
1458 case HV_X64_MSR_VP_ASSIST_PAGE: {
1459 u64 gfn;
1460 unsigned long addr;
1461
1462 if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1463 hv_vcpu->hv_vapic = data;
1464 if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1465 return 1;
1466 break;
1467 }
1468 gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1469 addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1470 if (kvm_is_error_hva(addr))
1471 return 1;
1472
1473 /*
1474 * Clear apic_assist portion of struct hv_vp_assist_page
1475 * only, there can be valuable data in the rest which needs
1476 * to be preserved e.g. on migration.
1477 */
1478 if (__put_user(0, (u32 __user *)addr))
1479 return 1;
1480 hv_vcpu->hv_vapic = data;
1481 kvm_vcpu_mark_page_dirty(vcpu, gfn);
1482 if (kvm_lapic_set_pv_eoi(vcpu,
1483 gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1484 sizeof(struct hv_vp_assist_page)))
1485 return 1;
1486 break;
1487 }
1488 case HV_X64_MSR_EOI:
1489 return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1490 case HV_X64_MSR_ICR:
1491 return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1492 case HV_X64_MSR_TPR:
1493 return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1494 case HV_X64_MSR_VP_RUNTIME:
1495 if (!host)
1496 return 1;
1497 hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1498 break;
1499 case HV_X64_MSR_SCONTROL:
1500 case HV_X64_MSR_SVERSION:
1501 case HV_X64_MSR_SIEFP:
1502 case HV_X64_MSR_SIMP:
1503 case HV_X64_MSR_EOM:
1504 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1505 return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1506 case HV_X64_MSR_STIMER0_CONFIG:
1507 case HV_X64_MSR_STIMER1_CONFIG:
1508 case HV_X64_MSR_STIMER2_CONFIG:
1509 case HV_X64_MSR_STIMER3_CONFIG: {
1510 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1511
1512 return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1513 data, host);
1514 }
1515 case HV_X64_MSR_STIMER0_COUNT:
1516 case HV_X64_MSR_STIMER1_COUNT:
1517 case HV_X64_MSR_STIMER2_COUNT:
1518 case HV_X64_MSR_STIMER3_COUNT: {
1519 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1520
1521 return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1522 data, host);
1523 }
1524 case HV_X64_MSR_TSC_FREQUENCY:
1525 case HV_X64_MSR_APIC_FREQUENCY:
1526 /* read-only, but still ignore it if host-initiated */
1527 if (!host)
1528 return 1;
1529 break;
1530 default:
1531 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
1532 msr, data);
1533 return 1;
1534 }
1535
1536 return 0;
1537 }
1538
kvm_hv_get_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1539 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1540 bool host)
1541 {
1542 u64 data = 0;
1543 struct kvm *kvm = vcpu->kvm;
1544 struct kvm_hv *hv = to_kvm_hv(kvm);
1545
1546 if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1547 return 1;
1548
1549 switch (msr) {
1550 case HV_X64_MSR_GUEST_OS_ID:
1551 data = hv->hv_guest_os_id;
1552 break;
1553 case HV_X64_MSR_HYPERCALL:
1554 data = hv->hv_hypercall;
1555 break;
1556 case HV_X64_MSR_TIME_REF_COUNT:
1557 data = get_time_ref_counter(kvm);
1558 break;
1559 case HV_X64_MSR_REFERENCE_TSC:
1560 data = hv->hv_tsc_page;
1561 break;
1562 case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1563 return kvm_hv_msr_get_crash_data(kvm,
1564 msr - HV_X64_MSR_CRASH_P0,
1565 pdata);
1566 case HV_X64_MSR_CRASH_CTL:
1567 return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1568 case HV_X64_MSR_RESET:
1569 data = 0;
1570 break;
1571 case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1572 data = hv->hv_reenlightenment_control;
1573 break;
1574 case HV_X64_MSR_TSC_EMULATION_CONTROL:
1575 data = hv->hv_tsc_emulation_control;
1576 break;
1577 case HV_X64_MSR_TSC_EMULATION_STATUS:
1578 data = hv->hv_tsc_emulation_status;
1579 break;
1580 case HV_X64_MSR_SYNDBG_OPTIONS:
1581 case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1582 return syndbg_get_msr(vcpu, msr, pdata, host);
1583 default:
1584 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1585 return 1;
1586 }
1587
1588 *pdata = data;
1589 return 0;
1590 }
1591
kvm_hv_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1592 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1593 bool host)
1594 {
1595 u64 data = 0;
1596 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1597
1598 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1599 return 1;
1600
1601 switch (msr) {
1602 case HV_X64_MSR_VP_INDEX:
1603 data = hv_vcpu->vp_index;
1604 break;
1605 case HV_X64_MSR_EOI:
1606 return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1607 case HV_X64_MSR_ICR:
1608 return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1609 case HV_X64_MSR_TPR:
1610 return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1611 case HV_X64_MSR_VP_ASSIST_PAGE:
1612 data = hv_vcpu->hv_vapic;
1613 break;
1614 case HV_X64_MSR_VP_RUNTIME:
1615 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1616 break;
1617 case HV_X64_MSR_SCONTROL:
1618 case HV_X64_MSR_SVERSION:
1619 case HV_X64_MSR_SIEFP:
1620 case HV_X64_MSR_SIMP:
1621 case HV_X64_MSR_EOM:
1622 case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1623 return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1624 case HV_X64_MSR_STIMER0_CONFIG:
1625 case HV_X64_MSR_STIMER1_CONFIG:
1626 case HV_X64_MSR_STIMER2_CONFIG:
1627 case HV_X64_MSR_STIMER3_CONFIG: {
1628 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1629
1630 return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1631 pdata);
1632 }
1633 case HV_X64_MSR_STIMER0_COUNT:
1634 case HV_X64_MSR_STIMER1_COUNT:
1635 case HV_X64_MSR_STIMER2_COUNT:
1636 case HV_X64_MSR_STIMER3_COUNT: {
1637 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1638
1639 return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1640 pdata);
1641 }
1642 case HV_X64_MSR_TSC_FREQUENCY:
1643 data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1644 break;
1645 case HV_X64_MSR_APIC_FREQUENCY:
1646 data = APIC_BUS_FREQUENCY;
1647 break;
1648 default:
1649 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
1650 return 1;
1651 }
1652 *pdata = data;
1653 return 0;
1654 }
1655
kvm_hv_set_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1656 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1657 {
1658 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1659
1660 if (!host && !vcpu->arch.hyperv_enabled)
1661 return 1;
1662
1663 if (!to_hv_vcpu(vcpu)) {
1664 if (kvm_hv_vcpu_init(vcpu))
1665 return 1;
1666 }
1667
1668 if (kvm_hv_msr_partition_wide(msr)) {
1669 int r;
1670
1671 mutex_lock(&hv->hv_lock);
1672 r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1673 mutex_unlock(&hv->hv_lock);
1674 return r;
1675 } else
1676 return kvm_hv_set_msr(vcpu, msr, data, host);
1677 }
1678
kvm_hv_get_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1679 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1680 {
1681 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1682
1683 if (!host && !vcpu->arch.hyperv_enabled)
1684 return 1;
1685
1686 if (!to_hv_vcpu(vcpu)) {
1687 if (kvm_hv_vcpu_init(vcpu))
1688 return 1;
1689 }
1690
1691 if (kvm_hv_msr_partition_wide(msr)) {
1692 int r;
1693
1694 mutex_lock(&hv->hv_lock);
1695 r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1696 mutex_unlock(&hv->hv_lock);
1697 return r;
1698 } else
1699 return kvm_hv_get_msr(vcpu, msr, pdata, host);
1700 }
1701
sparse_set_to_vcpu_mask(struct kvm * kvm,u64 * sparse_banks,u64 valid_bank_mask,unsigned long * vcpu_mask)1702 static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
1703 u64 valid_bank_mask, unsigned long *vcpu_mask)
1704 {
1705 struct kvm_hv *hv = to_kvm_hv(kvm);
1706 bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
1707 u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1708 struct kvm_vcpu *vcpu;
1709 int bank, sbank = 0;
1710 unsigned long i;
1711 u64 *bitmap;
1712
1713 BUILD_BUG_ON(sizeof(vp_bitmap) >
1714 sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
1715
1716 /*
1717 * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
1718 * fill a temporary buffer and manually test each vCPU's VP index.
1719 */
1720 if (likely(!has_mismatch))
1721 bitmap = (u64 *)vcpu_mask;
1722 else
1723 bitmap = vp_bitmap;
1724
1725 /*
1726 * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
1727 * having a '1' for each bank that exists in sparse_banks. Sets must
1728 * be in ascending order, i.e. bank0..bankN.
1729 */
1730 memset(bitmap, 0, sizeof(vp_bitmap));
1731 for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1732 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1733 bitmap[bank] = sparse_banks[sbank++];
1734
1735 if (likely(!has_mismatch))
1736 return;
1737
1738 bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1739 kvm_for_each_vcpu(i, vcpu, kvm) {
1740 if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1741 __set_bit(i, vcpu_mask);
1742 }
1743 }
1744
1745 struct kvm_hv_hcall {
1746 u64 param;
1747 u64 ingpa;
1748 u64 outgpa;
1749 u16 code;
1750 u16 var_cnt;
1751 u16 rep_cnt;
1752 u16 rep_idx;
1753 bool fast;
1754 bool rep;
1755 sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1756 };
1757
kvm_get_sparse_vp_set(struct kvm * kvm,struct kvm_hv_hcall * hc,int consumed_xmm_halves,u64 * sparse_banks,gpa_t offset)1758 static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1759 int consumed_xmm_halves,
1760 u64 *sparse_banks, gpa_t offset)
1761 {
1762 u16 var_cnt;
1763 int i;
1764
1765 if (hc->var_cnt > 64)
1766 return -EINVAL;
1767
1768 /* Ignore banks that cannot possibly contain a legal VP index. */
1769 var_cnt = min_t(u16, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS);
1770
1771 if (hc->fast) {
1772 /*
1773 * Each XMM holds two sparse banks, but do not count halves that
1774 * have already been consumed for hypercall parameters.
1775 */
1776 if (hc->var_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves)
1777 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1778 for (i = 0; i < var_cnt; i++) {
1779 int j = i + consumed_xmm_halves;
1780 if (j % 2)
1781 sparse_banks[i] = sse128_hi(hc->xmm[j / 2]);
1782 else
1783 sparse_banks[i] = sse128_lo(hc->xmm[j / 2]);
1784 }
1785 return 0;
1786 }
1787
1788 return kvm_read_guest(kvm, hc->ingpa + offset, sparse_banks,
1789 var_cnt * sizeof(*sparse_banks));
1790 }
1791
kvm_hv_flush_tlb(struct kvm_vcpu * vcpu,struct kvm_hv_hcall * hc)1792 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
1793 {
1794 struct kvm *kvm = vcpu->kvm;
1795 struct hv_tlb_flush_ex flush_ex;
1796 struct hv_tlb_flush flush;
1797 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1798 u64 valid_bank_mask;
1799 u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1800 bool all_cpus;
1801
1802 /*
1803 * The Hyper-V TLFS doesn't allow more than 64 sparse banks, e.g. the
1804 * valid mask is a u64. Fail the build if KVM's max allowed number of
1805 * vCPUs (>4096) would exceed this limit, KVM will additional changes
1806 * for Hyper-V support to avoid setting the guest up to fail.
1807 */
1808 BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > 64);
1809
1810 if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
1811 hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
1812 if (hc->fast) {
1813 flush.address_space = hc->ingpa;
1814 flush.flags = hc->outgpa;
1815 flush.processor_mask = sse128_lo(hc->xmm[0]);
1816 } else {
1817 if (unlikely(kvm_read_guest(kvm, hc->ingpa,
1818 &flush, sizeof(flush))))
1819 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1820 }
1821
1822 trace_kvm_hv_flush_tlb(flush.processor_mask,
1823 flush.address_space, flush.flags);
1824
1825 valid_bank_mask = BIT_ULL(0);
1826 sparse_banks[0] = flush.processor_mask;
1827
1828 /*
1829 * Work around possible WS2012 bug: it sends hypercalls
1830 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
1831 * while also expecting us to flush something and crashing if
1832 * we don't. Let's treat processor_mask == 0 same as
1833 * HV_FLUSH_ALL_PROCESSORS.
1834 */
1835 all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
1836 flush.processor_mask == 0;
1837 } else {
1838 if (hc->fast) {
1839 flush_ex.address_space = hc->ingpa;
1840 flush_ex.flags = hc->outgpa;
1841 memcpy(&flush_ex.hv_vp_set,
1842 &hc->xmm[0], sizeof(hc->xmm[0]));
1843 } else {
1844 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
1845 sizeof(flush_ex))))
1846 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1847 }
1848
1849 trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
1850 flush_ex.hv_vp_set.format,
1851 flush_ex.address_space,
1852 flush_ex.flags);
1853
1854 valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
1855 all_cpus = flush_ex.hv_vp_set.format !=
1856 HV_GENERIC_SET_SPARSE_4K;
1857
1858 if (hc->var_cnt != hweight64(valid_bank_mask))
1859 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1860
1861 if (all_cpus)
1862 goto do_flush;
1863
1864 if (!hc->var_cnt)
1865 goto ret_success;
1866
1867 if (kvm_get_sparse_vp_set(kvm, hc, 2, sparse_banks,
1868 offsetof(struct hv_tlb_flush_ex,
1869 hv_vp_set.bank_contents)))
1870 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1871 }
1872
1873 do_flush:
1874 /*
1875 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
1876 * analyze it here, flush TLB regardless of the specified address space.
1877 */
1878 if (all_cpus) {
1879 kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST);
1880 } else {
1881 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
1882
1883 kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST, vcpu_mask);
1884 }
1885
1886 ret_success:
1887 /* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
1888 return (u64)HV_STATUS_SUCCESS |
1889 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
1890 }
1891
kvm_send_ipi_to_many(struct kvm * kvm,u32 vector,unsigned long * vcpu_bitmap)1892 static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
1893 unsigned long *vcpu_bitmap)
1894 {
1895 struct kvm_lapic_irq irq = {
1896 .delivery_mode = APIC_DM_FIXED,
1897 .vector = vector
1898 };
1899 struct kvm_vcpu *vcpu;
1900 unsigned long i;
1901
1902 kvm_for_each_vcpu(i, vcpu, kvm) {
1903 if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
1904 continue;
1905
1906 /* We fail only when APIC is disabled */
1907 kvm_apic_set_irq(vcpu, &irq, NULL);
1908 }
1909 }
1910
kvm_hv_send_ipi(struct kvm_vcpu * vcpu,struct kvm_hv_hcall * hc)1911 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
1912 {
1913 struct kvm *kvm = vcpu->kvm;
1914 struct hv_send_ipi_ex send_ipi_ex;
1915 struct hv_send_ipi send_ipi;
1916 DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1917 u64 valid_bank_mask;
1918 u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1919 u32 vector;
1920 bool all_cpus;
1921
1922 if (hc->code == HVCALL_SEND_IPI) {
1923 if (!hc->fast) {
1924 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
1925 sizeof(send_ipi))))
1926 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1927 sparse_banks[0] = send_ipi.cpu_mask;
1928 vector = send_ipi.vector;
1929 } else {
1930 /* 'reserved' part of hv_send_ipi should be 0 */
1931 if (unlikely(hc->ingpa >> 32 != 0))
1932 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1933 sparse_banks[0] = hc->outgpa;
1934 vector = (u32)hc->ingpa;
1935 }
1936 all_cpus = false;
1937 valid_bank_mask = BIT_ULL(0);
1938
1939 trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
1940 } else {
1941 if (!hc->fast) {
1942 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
1943 sizeof(send_ipi_ex))))
1944 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1945 } else {
1946 send_ipi_ex.vector = (u32)hc->ingpa;
1947 send_ipi_ex.vp_set.format = hc->outgpa;
1948 send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
1949 }
1950
1951 trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
1952 send_ipi_ex.vp_set.format,
1953 send_ipi_ex.vp_set.valid_bank_mask);
1954
1955 vector = send_ipi_ex.vector;
1956 valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
1957 all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
1958
1959 if (hc->var_cnt != hweight64(valid_bank_mask))
1960 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1961
1962 if (all_cpus)
1963 goto check_and_send_ipi;
1964
1965 if (!hc->var_cnt)
1966 goto ret_success;
1967
1968 if (kvm_get_sparse_vp_set(kvm, hc, 1, sparse_banks,
1969 offsetof(struct hv_send_ipi_ex,
1970 vp_set.bank_contents)))
1971 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1972 }
1973
1974 check_and_send_ipi:
1975 if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
1976 return HV_STATUS_INVALID_HYPERCALL_INPUT;
1977
1978 if (all_cpus) {
1979 kvm_send_ipi_to_many(kvm, vector, NULL);
1980 } else {
1981 sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
1982
1983 kvm_send_ipi_to_many(kvm, vector, vcpu_mask);
1984 }
1985
1986 ret_success:
1987 return HV_STATUS_SUCCESS;
1988 }
1989
kvm_hv_set_cpuid(struct kvm_vcpu * vcpu)1990 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
1991 {
1992 struct kvm_cpuid_entry2 *entry;
1993 struct kvm_vcpu_hv *hv_vcpu;
1994
1995 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE, 0);
1996 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
1997 vcpu->arch.hyperv_enabled = true;
1998 } else {
1999 vcpu->arch.hyperv_enabled = false;
2000 return;
2001 }
2002
2003 if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
2004 return;
2005
2006 hv_vcpu = to_hv_vcpu(vcpu);
2007
2008 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES, 0);
2009 if (entry) {
2010 hv_vcpu->cpuid_cache.features_eax = entry->eax;
2011 hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2012 hv_vcpu->cpuid_cache.features_edx = entry->edx;
2013 } else {
2014 hv_vcpu->cpuid_cache.features_eax = 0;
2015 hv_vcpu->cpuid_cache.features_ebx = 0;
2016 hv_vcpu->cpuid_cache.features_edx = 0;
2017 }
2018
2019 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO, 0);
2020 if (entry) {
2021 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2022 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2023 } else {
2024 hv_vcpu->cpuid_cache.enlightenments_eax = 0;
2025 hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
2026 }
2027
2028 entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES, 0);
2029 if (entry)
2030 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2031 else
2032 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
2033 }
2034
kvm_hv_set_enforce_cpuid(struct kvm_vcpu * vcpu,bool enforce)2035 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2036 {
2037 struct kvm_vcpu_hv *hv_vcpu;
2038 int ret = 0;
2039
2040 if (!to_hv_vcpu(vcpu)) {
2041 if (enforce) {
2042 ret = kvm_hv_vcpu_init(vcpu);
2043 if (ret)
2044 return ret;
2045 } else {
2046 return 0;
2047 }
2048 }
2049
2050 hv_vcpu = to_hv_vcpu(vcpu);
2051 hv_vcpu->enforce_cpuid = enforce;
2052
2053 return ret;
2054 }
2055
kvm_hv_hypercall_set_result(struct kvm_vcpu * vcpu,u64 result)2056 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2057 {
2058 bool longmode;
2059
2060 longmode = is_64_bit_hypercall(vcpu);
2061 if (longmode)
2062 kvm_rax_write(vcpu, result);
2063 else {
2064 kvm_rdx_write(vcpu, result >> 32);
2065 kvm_rax_write(vcpu, result & 0xffffffff);
2066 }
2067 }
2068
kvm_hv_hypercall_complete(struct kvm_vcpu * vcpu,u64 result)2069 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2070 {
2071 trace_kvm_hv_hypercall_done(result);
2072 kvm_hv_hypercall_set_result(vcpu, result);
2073 ++vcpu->stat.hypercalls;
2074 return kvm_skip_emulated_instruction(vcpu);
2075 }
2076
kvm_hv_hypercall_complete_userspace(struct kvm_vcpu * vcpu)2077 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2078 {
2079 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2080 }
2081
kvm_hvcall_signal_event(struct kvm_vcpu * vcpu,struct kvm_hv_hcall * hc)2082 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2083 {
2084 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2085 struct eventfd_ctx *eventfd;
2086
2087 if (unlikely(!hc->fast)) {
2088 int ret;
2089 gpa_t gpa = hc->ingpa;
2090
2091 if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2092 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2093 return HV_STATUS_INVALID_ALIGNMENT;
2094
2095 ret = kvm_vcpu_read_guest(vcpu, gpa,
2096 &hc->ingpa, sizeof(hc->ingpa));
2097 if (ret < 0)
2098 return HV_STATUS_INVALID_ALIGNMENT;
2099 }
2100
2101 /*
2102 * Per spec, bits 32-47 contain the extra "flag number". However, we
2103 * have no use for it, and in all known usecases it is zero, so just
2104 * report lookup failure if it isn't.
2105 */
2106 if (hc->ingpa & 0xffff00000000ULL)
2107 return HV_STATUS_INVALID_PORT_ID;
2108 /* remaining bits are reserved-zero */
2109 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2110 return HV_STATUS_INVALID_HYPERCALL_INPUT;
2111
2112 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2113 rcu_read_lock();
2114 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2115 rcu_read_unlock();
2116 if (!eventfd)
2117 return HV_STATUS_INVALID_PORT_ID;
2118
2119 eventfd_signal(eventfd, 1);
2120 return HV_STATUS_SUCCESS;
2121 }
2122
is_xmm_fast_hypercall(struct kvm_hv_hcall * hc)2123 static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2124 {
2125 switch (hc->code) {
2126 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2127 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2128 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2129 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2130 case HVCALL_SEND_IPI_EX:
2131 return true;
2132 }
2133
2134 return false;
2135 }
2136
kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall * hc)2137 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2138 {
2139 int reg;
2140
2141 kvm_fpu_get();
2142 for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2143 _kvm_read_sse_reg(reg, &hc->xmm[reg]);
2144 kvm_fpu_put();
2145 }
2146
hv_check_hypercall_access(struct kvm_vcpu_hv * hv_vcpu,u16 code)2147 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2148 {
2149 if (!hv_vcpu->enforce_cpuid)
2150 return true;
2151
2152 switch (code) {
2153 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2154 return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2155 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2156 case HVCALL_POST_MESSAGE:
2157 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2158 case HVCALL_SIGNAL_EVENT:
2159 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2160 case HVCALL_POST_DEBUG_DATA:
2161 case HVCALL_RETRIEVE_DEBUG_DATA:
2162 case HVCALL_RESET_DEBUG_SESSION:
2163 /*
2164 * Return 'true' when SynDBG is disabled so the resulting code
2165 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
2166 */
2167 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2168 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2169 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2170 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2171 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2172 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2173 return false;
2174 fallthrough;
2175 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2176 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2177 return hv_vcpu->cpuid_cache.enlightenments_eax &
2178 HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2179 case HVCALL_SEND_IPI_EX:
2180 if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2181 HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2182 return false;
2183 fallthrough;
2184 case HVCALL_SEND_IPI:
2185 return hv_vcpu->cpuid_cache.enlightenments_eax &
2186 HV_X64_CLUSTER_IPI_RECOMMENDED;
2187 default:
2188 break;
2189 }
2190
2191 return true;
2192 }
2193
kvm_hv_hypercall(struct kvm_vcpu * vcpu)2194 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2195 {
2196 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2197 struct kvm_hv_hcall hc;
2198 u64 ret = HV_STATUS_SUCCESS;
2199
2200 /*
2201 * hypercall generates UD from non zero cpl and real mode
2202 * per HYPER-V spec
2203 */
2204 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2205 kvm_queue_exception(vcpu, UD_VECTOR);
2206 return 1;
2207 }
2208
2209 #ifdef CONFIG_X86_64
2210 if (is_64_bit_hypercall(vcpu)) {
2211 hc.param = kvm_rcx_read(vcpu);
2212 hc.ingpa = kvm_rdx_read(vcpu);
2213 hc.outgpa = kvm_r8_read(vcpu);
2214 } else
2215 #endif
2216 {
2217 hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2218 (kvm_rax_read(vcpu) & 0xffffffff);
2219 hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2220 (kvm_rcx_read(vcpu) & 0xffffffff);
2221 hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2222 (kvm_rsi_read(vcpu) & 0xffffffff);
2223 }
2224
2225 hc.code = hc.param & 0xffff;
2226 hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2227 hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2228 hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2229 hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2230 hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2231
2232 trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2233 hc.rep_idx, hc.ingpa, hc.outgpa);
2234
2235 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2236 ret = HV_STATUS_ACCESS_DENIED;
2237 goto hypercall_complete;
2238 }
2239
2240 if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2241 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2242 goto hypercall_complete;
2243 }
2244
2245 if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2246 if (unlikely(hv_vcpu->enforce_cpuid &&
2247 !(hv_vcpu->cpuid_cache.features_edx &
2248 HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2249 kvm_queue_exception(vcpu, UD_VECTOR);
2250 return 1;
2251 }
2252
2253 kvm_hv_hypercall_read_xmm(&hc);
2254 }
2255
2256 switch (hc.code) {
2257 case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2258 if (unlikely(hc.rep || hc.var_cnt)) {
2259 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2260 break;
2261 }
2262 kvm_vcpu_on_spin(vcpu, true);
2263 break;
2264 case HVCALL_SIGNAL_EVENT:
2265 if (unlikely(hc.rep || hc.var_cnt)) {
2266 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2267 break;
2268 }
2269 ret = kvm_hvcall_signal_event(vcpu, &hc);
2270 if (ret != HV_STATUS_INVALID_PORT_ID)
2271 break;
2272 fallthrough; /* maybe userspace knows this conn_id */
2273 case HVCALL_POST_MESSAGE:
2274 /* don't bother userspace if it has no way to handle it */
2275 if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2276 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2277 break;
2278 }
2279 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2280 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2281 vcpu->run->hyperv.u.hcall.input = hc.param;
2282 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2283 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2284 vcpu->arch.complete_userspace_io =
2285 kvm_hv_hypercall_complete_userspace;
2286 return 0;
2287 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2288 if (unlikely(hc.var_cnt)) {
2289 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2290 break;
2291 }
2292 fallthrough;
2293 case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2294 if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2295 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2296 break;
2297 }
2298 ret = kvm_hv_flush_tlb(vcpu, &hc);
2299 break;
2300 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2301 if (unlikely(hc.var_cnt)) {
2302 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2303 break;
2304 }
2305 fallthrough;
2306 case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2307 if (unlikely(hc.rep)) {
2308 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2309 break;
2310 }
2311 ret = kvm_hv_flush_tlb(vcpu, &hc);
2312 break;
2313 case HVCALL_SEND_IPI:
2314 if (unlikely(hc.var_cnt)) {
2315 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2316 break;
2317 }
2318 fallthrough;
2319 case HVCALL_SEND_IPI_EX:
2320 if (unlikely(hc.rep)) {
2321 ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2322 break;
2323 }
2324 ret = kvm_hv_send_ipi(vcpu, &hc);
2325 break;
2326 case HVCALL_POST_DEBUG_DATA:
2327 case HVCALL_RETRIEVE_DEBUG_DATA:
2328 if (unlikely(hc.fast)) {
2329 ret = HV_STATUS_INVALID_PARAMETER;
2330 break;
2331 }
2332 fallthrough;
2333 case HVCALL_RESET_DEBUG_SESSION: {
2334 struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2335
2336 if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2337 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2338 break;
2339 }
2340
2341 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2342 ret = HV_STATUS_OPERATION_DENIED;
2343 break;
2344 }
2345 vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2346 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2347 vcpu->run->hyperv.u.hcall.input = hc.param;
2348 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2349 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2350 vcpu->arch.complete_userspace_io =
2351 kvm_hv_hypercall_complete_userspace;
2352 return 0;
2353 }
2354 default:
2355 ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2356 break;
2357 }
2358
2359 hypercall_complete:
2360 return kvm_hv_hypercall_complete(vcpu, ret);
2361 }
2362
kvm_hv_init_vm(struct kvm * kvm)2363 void kvm_hv_init_vm(struct kvm *kvm)
2364 {
2365 struct kvm_hv *hv = to_kvm_hv(kvm);
2366
2367 mutex_init(&hv->hv_lock);
2368 idr_init(&hv->conn_to_evt);
2369 }
2370
kvm_hv_destroy_vm(struct kvm * kvm)2371 void kvm_hv_destroy_vm(struct kvm *kvm)
2372 {
2373 struct kvm_hv *hv = to_kvm_hv(kvm);
2374 struct eventfd_ctx *eventfd;
2375 int i;
2376
2377 idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2378 eventfd_ctx_put(eventfd);
2379 idr_destroy(&hv->conn_to_evt);
2380 }
2381
kvm_hv_eventfd_assign(struct kvm * kvm,u32 conn_id,int fd)2382 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2383 {
2384 struct kvm_hv *hv = to_kvm_hv(kvm);
2385 struct eventfd_ctx *eventfd;
2386 int ret;
2387
2388 eventfd = eventfd_ctx_fdget(fd);
2389 if (IS_ERR(eventfd))
2390 return PTR_ERR(eventfd);
2391
2392 mutex_lock(&hv->hv_lock);
2393 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2394 GFP_KERNEL_ACCOUNT);
2395 mutex_unlock(&hv->hv_lock);
2396
2397 if (ret >= 0)
2398 return 0;
2399
2400 if (ret == -ENOSPC)
2401 ret = -EEXIST;
2402 eventfd_ctx_put(eventfd);
2403 return ret;
2404 }
2405
kvm_hv_eventfd_deassign(struct kvm * kvm,u32 conn_id)2406 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2407 {
2408 struct kvm_hv *hv = to_kvm_hv(kvm);
2409 struct eventfd_ctx *eventfd;
2410
2411 mutex_lock(&hv->hv_lock);
2412 eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2413 mutex_unlock(&hv->hv_lock);
2414
2415 if (!eventfd)
2416 return -ENOENT;
2417
2418 synchronize_srcu(&kvm->srcu);
2419 eventfd_ctx_put(eventfd);
2420 return 0;
2421 }
2422
kvm_vm_ioctl_hv_eventfd(struct kvm * kvm,struct kvm_hyperv_eventfd * args)2423 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2424 {
2425 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2426 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2427 return -EINVAL;
2428
2429 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2430 return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2431 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2432 }
2433
kvm_get_hv_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)2434 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2435 struct kvm_cpuid_entry2 __user *entries)
2436 {
2437 uint16_t evmcs_ver = 0;
2438 struct kvm_cpuid_entry2 cpuid_entries[] = {
2439 { .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2440 { .function = HYPERV_CPUID_INTERFACE },
2441 { .function = HYPERV_CPUID_VERSION },
2442 { .function = HYPERV_CPUID_FEATURES },
2443 { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2444 { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2445 { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2446 { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2447 { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
2448 { .function = HYPERV_CPUID_NESTED_FEATURES },
2449 };
2450 int i, nent = ARRAY_SIZE(cpuid_entries);
2451
2452 if (kvm_x86_ops.nested_ops->get_evmcs_version)
2453 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2454
2455 if (cpuid->nent < nent)
2456 return -E2BIG;
2457
2458 if (cpuid->nent > nent)
2459 cpuid->nent = nent;
2460
2461 for (i = 0; i < nent; i++) {
2462 struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2463 u32 signature[3];
2464
2465 switch (ent->function) {
2466 case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2467 memcpy(signature, "Linux KVM Hv", 12);
2468
2469 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2470 ent->ebx = signature[0];
2471 ent->ecx = signature[1];
2472 ent->edx = signature[2];
2473 break;
2474
2475 case HYPERV_CPUID_INTERFACE:
2476 ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2477 break;
2478
2479 case HYPERV_CPUID_VERSION:
2480 /*
2481 * We implement some Hyper-V 2016 functions so let's use
2482 * this version.
2483 */
2484 ent->eax = 0x00003839;
2485 ent->ebx = 0x000A0000;
2486 break;
2487
2488 case HYPERV_CPUID_FEATURES:
2489 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2490 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2491 ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2492 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2493 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2494 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2495 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2496 ent->eax |= HV_MSR_RESET_AVAILABLE;
2497 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2498 ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2499 ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2500
2501 ent->ebx |= HV_POST_MESSAGES;
2502 ent->ebx |= HV_SIGNAL_EVENTS;
2503
2504 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2505 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2506 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2507
2508 ent->ebx |= HV_DEBUGGING;
2509 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2510 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2511
2512 /*
2513 * Direct Synthetic timers only make sense with in-kernel
2514 * LAPIC
2515 */
2516 if (!vcpu || lapic_in_kernel(vcpu))
2517 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2518
2519 break;
2520
2521 case HYPERV_CPUID_ENLIGHTMENT_INFO:
2522 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2523 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2524 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2525 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2526 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2527 if (evmcs_ver)
2528 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2529 if (!cpu_smt_possible())
2530 ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2531
2532 ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2533 /*
2534 * Default number of spinlock retry attempts, matches
2535 * HyperV 2016.
2536 */
2537 ent->ebx = 0x00000FFF;
2538
2539 break;
2540
2541 case HYPERV_CPUID_IMPLEMENT_LIMITS:
2542 /* Maximum number of virtual processors */
2543 ent->eax = KVM_MAX_VCPUS;
2544 /*
2545 * Maximum number of logical processors, matches
2546 * HyperV 2016.
2547 */
2548 ent->ebx = 64;
2549
2550 break;
2551
2552 case HYPERV_CPUID_NESTED_FEATURES:
2553 ent->eax = evmcs_ver;
2554 ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2555
2556 break;
2557
2558 case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2559 memcpy(signature, "Linux KVM Hv", 12);
2560
2561 ent->eax = 0;
2562 ent->ebx = signature[0];
2563 ent->ecx = signature[1];
2564 ent->edx = signature[2];
2565 break;
2566
2567 case HYPERV_CPUID_SYNDBG_INTERFACE:
2568 memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2569 ent->eax = signature[0];
2570 break;
2571
2572 case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2573 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2574 break;
2575
2576 default:
2577 break;
2578 }
2579 }
2580
2581 if (copy_to_user(entries, cpuid_entries,
2582 nent * sizeof(struct kvm_cpuid_entry2)))
2583 return -EFAULT;
2584
2585 return 0;
2586 }
2587