1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * KVM paravirt_ops implementation
4 *
5 * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6 * Copyright IBM Corporation, 2007
7 * Authors: Anthony Liguori <aliguori@us.ibm.com>
8 */
9
10 #define pr_fmt(fmt) "kvm-guest: " fmt
11
12 #include <linux/context_tracking.h>
13 #include <linux/init.h>
14 #include <linux/irq.h>
15 #include <linux/kernel.h>
16 #include <linux/kvm_para.h>
17 #include <linux/cpu.h>
18 #include <linux/mm.h>
19 #include <linux/highmem.h>
20 #include <linux/hardirq.h>
21 #include <linux/notifier.h>
22 #include <linux/reboot.h>
23 #include <linux/hash.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/kprobes.h>
27 #include <linux/nmi.h>
28 #include <linux/swait.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/cc_platform.h>
31 #include <linux/efi.h>
32 #include <asm/timer.h>
33 #include <asm/cpu.h>
34 #include <asm/traps.h>
35 #include <asm/desc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/apic.h>
38 #include <asm/apicdef.h>
39 #include <asm/hypervisor.h>
40 #include <asm/tlb.h>
41 #include <asm/cpuidle_haltpoll.h>
42 #include <asm/ptrace.h>
43 #include <asm/reboot.h>
44 #include <asm/svm.h>
45 #include <asm/e820/api.h>
46
47 DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
48
49 static int kvmapf = 1;
50
parse_no_kvmapf(char * arg)51 static int __init parse_no_kvmapf(char *arg)
52 {
53 kvmapf = 0;
54 return 0;
55 }
56
57 early_param("no-kvmapf", parse_no_kvmapf);
58
59 static int steal_acc = 1;
parse_no_stealacc(char * arg)60 static int __init parse_no_stealacc(char *arg)
61 {
62 steal_acc = 0;
63 return 0;
64 }
65
66 early_param("no-steal-acc", parse_no_stealacc);
67
68 static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
69 DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
70 static int has_steal_clock = 0;
71
72 static int has_guest_poll = 0;
73 /*
74 * No need for any "IO delay" on KVM
75 */
kvm_io_delay(void)76 static void kvm_io_delay(void)
77 {
78 }
79
80 #define KVM_TASK_SLEEP_HASHBITS 8
81 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
82
83 struct kvm_task_sleep_node {
84 struct hlist_node link;
85 struct swait_queue_head wq;
86 u32 token;
87 int cpu;
88 };
89
90 static struct kvm_task_sleep_head {
91 raw_spinlock_t lock;
92 struct hlist_head list;
93 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
94
_find_apf_task(struct kvm_task_sleep_head * b,u32 token)95 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
96 u32 token)
97 {
98 struct hlist_node *p;
99
100 hlist_for_each(p, &b->list) {
101 struct kvm_task_sleep_node *n =
102 hlist_entry(p, typeof(*n), link);
103 if (n->token == token)
104 return n;
105 }
106
107 return NULL;
108 }
109
kvm_async_pf_queue_task(u32 token,struct kvm_task_sleep_node * n)110 static bool kvm_async_pf_queue_task(u32 token, struct kvm_task_sleep_node *n)
111 {
112 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
113 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
114 struct kvm_task_sleep_node *e;
115
116 raw_spin_lock(&b->lock);
117 e = _find_apf_task(b, token);
118 if (e) {
119 /* dummy entry exist -> wake up was delivered ahead of PF */
120 hlist_del(&e->link);
121 raw_spin_unlock(&b->lock);
122 kfree(e);
123 return false;
124 }
125
126 n->token = token;
127 n->cpu = smp_processor_id();
128 init_swait_queue_head(&n->wq);
129 hlist_add_head(&n->link, &b->list);
130 raw_spin_unlock(&b->lock);
131 return true;
132 }
133
134 /*
135 * kvm_async_pf_task_wait_schedule - Wait for pagefault to be handled
136 * @token: Token to identify the sleep node entry
137 *
138 * Invoked from the async pagefault handling code or from the VM exit page
139 * fault handler. In both cases RCU is watching.
140 */
kvm_async_pf_task_wait_schedule(u32 token)141 void kvm_async_pf_task_wait_schedule(u32 token)
142 {
143 struct kvm_task_sleep_node n;
144 DECLARE_SWAITQUEUE(wait);
145
146 lockdep_assert_irqs_disabled();
147
148 if (!kvm_async_pf_queue_task(token, &n))
149 return;
150
151 for (;;) {
152 prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
153 if (hlist_unhashed(&n.link))
154 break;
155
156 local_irq_enable();
157 schedule();
158 local_irq_disable();
159 }
160 finish_swait(&n.wq, &wait);
161 }
162 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait_schedule);
163
apf_task_wake_one(struct kvm_task_sleep_node * n)164 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
165 {
166 hlist_del_init(&n->link);
167 if (swq_has_sleeper(&n->wq))
168 swake_up_one(&n->wq);
169 }
170
apf_task_wake_all(void)171 static void apf_task_wake_all(void)
172 {
173 int i;
174
175 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
176 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
177 struct kvm_task_sleep_node *n;
178 struct hlist_node *p, *next;
179
180 raw_spin_lock(&b->lock);
181 hlist_for_each_safe(p, next, &b->list) {
182 n = hlist_entry(p, typeof(*n), link);
183 if (n->cpu == smp_processor_id())
184 apf_task_wake_one(n);
185 }
186 raw_spin_unlock(&b->lock);
187 }
188 }
189
kvm_async_pf_task_wake(u32 token)190 void kvm_async_pf_task_wake(u32 token)
191 {
192 u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
193 struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
194 struct kvm_task_sleep_node *n, *dummy = NULL;
195
196 if (token == ~0) {
197 apf_task_wake_all();
198 return;
199 }
200
201 again:
202 raw_spin_lock(&b->lock);
203 n = _find_apf_task(b, token);
204 if (!n) {
205 /*
206 * Async #PF not yet handled, add a dummy entry for the token.
207 * Allocating the token must be down outside of the raw lock
208 * as the allocator is preemptible on PREEMPT_RT kernels.
209 */
210 if (!dummy) {
211 raw_spin_unlock(&b->lock);
212 dummy = kzalloc(sizeof(*dummy), GFP_ATOMIC);
213
214 /*
215 * Continue looping on allocation failure, eventually
216 * the async #PF will be handled and allocating a new
217 * node will be unnecessary.
218 */
219 if (!dummy)
220 cpu_relax();
221
222 /*
223 * Recheck for async #PF completion before enqueueing
224 * the dummy token to avoid duplicate list entries.
225 */
226 goto again;
227 }
228 dummy->token = token;
229 dummy->cpu = smp_processor_id();
230 init_swait_queue_head(&dummy->wq);
231 hlist_add_head(&dummy->link, &b->list);
232 dummy = NULL;
233 } else {
234 apf_task_wake_one(n);
235 }
236 raw_spin_unlock(&b->lock);
237
238 /* A dummy token might be allocated and ultimately not used. */
239 if (dummy)
240 kfree(dummy);
241 }
242 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
243
kvm_read_and_reset_apf_flags(void)244 noinstr u32 kvm_read_and_reset_apf_flags(void)
245 {
246 u32 flags = 0;
247
248 if (__this_cpu_read(apf_reason.enabled)) {
249 flags = __this_cpu_read(apf_reason.flags);
250 __this_cpu_write(apf_reason.flags, 0);
251 }
252
253 return flags;
254 }
255 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
256
__kvm_handle_async_pf(struct pt_regs * regs,u32 token)257 noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
258 {
259 u32 flags = kvm_read_and_reset_apf_flags();
260 irqentry_state_t state;
261
262 if (!flags)
263 return false;
264
265 state = irqentry_enter(regs);
266 instrumentation_begin();
267
268 /*
269 * If the host managed to inject an async #PF into an interrupt
270 * disabled region, then die hard as this is not going to end well
271 * and the host side is seriously broken.
272 */
273 if (unlikely(!(regs->flags & X86_EFLAGS_IF)))
274 panic("Host injected async #PF in interrupt disabled region\n");
275
276 if (flags & KVM_PV_REASON_PAGE_NOT_PRESENT) {
277 if (unlikely(!(user_mode(regs))))
278 panic("Host injected async #PF in kernel mode\n");
279 /* Page is swapped out by the host. */
280 kvm_async_pf_task_wait_schedule(token);
281 } else {
282 WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
283 }
284
285 instrumentation_end();
286 irqentry_exit(regs, state);
287 return true;
288 }
289
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)290 DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
291 {
292 struct pt_regs *old_regs = set_irq_regs(regs);
293 u32 token;
294
295 ack_APIC_irq();
296
297 inc_irq_stat(irq_hv_callback_count);
298
299 if (__this_cpu_read(apf_reason.enabled)) {
300 token = __this_cpu_read(apf_reason.token);
301 kvm_async_pf_task_wake(token);
302 __this_cpu_write(apf_reason.token, 0);
303 wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
304 }
305
306 set_irq_regs(old_regs);
307 }
308
paravirt_ops_setup(void)309 static void __init paravirt_ops_setup(void)
310 {
311 pv_info.name = "KVM";
312
313 if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
314 pv_ops.cpu.io_delay = kvm_io_delay;
315
316 #ifdef CONFIG_X86_IO_APIC
317 no_timer_check = 1;
318 #endif
319 }
320
kvm_register_steal_time(void)321 static void kvm_register_steal_time(void)
322 {
323 int cpu = smp_processor_id();
324 struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
325
326 if (!has_steal_clock)
327 return;
328
329 wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
330 pr_debug("stealtime: cpu %d, msr %llx\n", cpu,
331 (unsigned long long) slow_virt_to_phys(st));
332 }
333
334 static DEFINE_PER_CPU_DECRYPTED(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
335
kvm_guest_apic_eoi_write(u32 reg,u32 val)336 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
337 {
338 /**
339 * This relies on __test_and_clear_bit to modify the memory
340 * in a way that is atomic with respect to the local CPU.
341 * The hypervisor only accesses this memory from the local CPU so
342 * there's no need for lock or memory barriers.
343 * An optimization barrier is implied in apic write.
344 */
345 if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
346 return;
347 apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
348 }
349
kvm_guest_cpu_init(void)350 static void kvm_guest_cpu_init(void)
351 {
352 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
353 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
354
355 WARN_ON_ONCE(!static_branch_likely(&kvm_async_pf_enabled));
356
357 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
358 pa |= KVM_ASYNC_PF_ENABLED | KVM_ASYNC_PF_DELIVERY_AS_INT;
359
360 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT))
361 pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT;
362
363 wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
364
365 wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
366 __this_cpu_write(apf_reason.enabled, 1);
367 pr_debug("setup async PF for cpu %d\n", smp_processor_id());
368 }
369
370 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
371 unsigned long pa;
372
373 /* Size alignment is implied but just to make it explicit. */
374 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
375 __this_cpu_write(kvm_apic_eoi, 0);
376 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
377 | KVM_MSR_ENABLED;
378 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
379 }
380
381 if (has_steal_clock)
382 kvm_register_steal_time();
383 }
384
kvm_pv_disable_apf(void)385 static void kvm_pv_disable_apf(void)
386 {
387 if (!__this_cpu_read(apf_reason.enabled))
388 return;
389
390 wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
391 __this_cpu_write(apf_reason.enabled, 0);
392
393 pr_debug("disable async PF for cpu %d\n", smp_processor_id());
394 }
395
kvm_disable_steal_time(void)396 static void kvm_disable_steal_time(void)
397 {
398 if (!has_steal_clock)
399 return;
400
401 wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
402 }
403
kvm_steal_clock(int cpu)404 static u64 kvm_steal_clock(int cpu)
405 {
406 u64 steal;
407 struct kvm_steal_time *src;
408 int version;
409
410 src = &per_cpu(steal_time, cpu);
411 do {
412 version = src->version;
413 virt_rmb();
414 steal = src->steal;
415 virt_rmb();
416 } while ((version & 1) || (version != src->version));
417
418 return steal;
419 }
420
__set_percpu_decrypted(void * ptr,unsigned long size)421 static inline void __set_percpu_decrypted(void *ptr, unsigned long size)
422 {
423 early_set_memory_decrypted((unsigned long) ptr, size);
424 }
425
426 /*
427 * Iterate through all possible CPUs and map the memory region pointed
428 * by apf_reason, steal_time and kvm_apic_eoi as decrypted at once.
429 *
430 * Note: we iterate through all possible CPUs to ensure that CPUs
431 * hotplugged will have their per-cpu variable already mapped as
432 * decrypted.
433 */
sev_map_percpu_data(void)434 static void __init sev_map_percpu_data(void)
435 {
436 int cpu;
437
438 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
439 return;
440
441 for_each_possible_cpu(cpu) {
442 __set_percpu_decrypted(&per_cpu(apf_reason, cpu), sizeof(apf_reason));
443 __set_percpu_decrypted(&per_cpu(steal_time, cpu), sizeof(steal_time));
444 __set_percpu_decrypted(&per_cpu(kvm_apic_eoi, cpu), sizeof(kvm_apic_eoi));
445 }
446 }
447
kvm_guest_cpu_offline(bool shutdown)448 static void kvm_guest_cpu_offline(bool shutdown)
449 {
450 kvm_disable_steal_time();
451 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
452 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
453 if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
454 wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
455 kvm_pv_disable_apf();
456 if (!shutdown)
457 apf_task_wake_all();
458 kvmclock_disable();
459 }
460
kvm_cpu_online(unsigned int cpu)461 static int kvm_cpu_online(unsigned int cpu)
462 {
463 unsigned long flags;
464
465 local_irq_save(flags);
466 kvm_guest_cpu_init();
467 local_irq_restore(flags);
468 return 0;
469 }
470
471 #ifdef CONFIG_SMP
472
473 static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
474
pv_tlb_flush_supported(void)475 static bool pv_tlb_flush_supported(void)
476 {
477 return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
478 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
479 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
480 !boot_cpu_has(X86_FEATURE_MWAIT) &&
481 (num_possible_cpus() != 1));
482 }
483
pv_ipi_supported(void)484 static bool pv_ipi_supported(void)
485 {
486 return (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI) &&
487 (num_possible_cpus() != 1));
488 }
489
pv_sched_yield_supported(void)490 static bool pv_sched_yield_supported(void)
491 {
492 return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
493 !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
494 kvm_para_has_feature(KVM_FEATURE_STEAL_TIME) &&
495 !boot_cpu_has(X86_FEATURE_MWAIT) &&
496 (num_possible_cpus() != 1));
497 }
498
499 #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
500
__send_ipi_mask(const struct cpumask * mask,int vector)501 static void __send_ipi_mask(const struct cpumask *mask, int vector)
502 {
503 unsigned long flags;
504 int cpu, apic_id, icr;
505 int min = 0, max = 0;
506 #ifdef CONFIG_X86_64
507 __uint128_t ipi_bitmap = 0;
508 #else
509 u64 ipi_bitmap = 0;
510 #endif
511 long ret;
512
513 if (cpumask_empty(mask))
514 return;
515
516 local_irq_save(flags);
517
518 switch (vector) {
519 default:
520 icr = APIC_DM_FIXED | vector;
521 break;
522 case NMI_VECTOR:
523 icr = APIC_DM_NMI;
524 break;
525 }
526
527 for_each_cpu(cpu, mask) {
528 apic_id = per_cpu(x86_cpu_to_apicid, cpu);
529 if (!ipi_bitmap) {
530 min = max = apic_id;
531 } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE) {
532 ipi_bitmap <<= min - apic_id;
533 min = apic_id;
534 } else if (apic_id > min && apic_id < min + KVM_IPI_CLUSTER_SIZE) {
535 max = apic_id < max ? max : apic_id;
536 } else {
537 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
538 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
539 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
540 ret);
541 min = max = apic_id;
542 ipi_bitmap = 0;
543 }
544 __set_bit(apic_id - min, (unsigned long *)&ipi_bitmap);
545 }
546
547 if (ipi_bitmap) {
548 ret = kvm_hypercall4(KVM_HC_SEND_IPI, (unsigned long)ipi_bitmap,
549 (unsigned long)(ipi_bitmap >> BITS_PER_LONG), min, icr);
550 WARN_ONCE(ret < 0, "kvm-guest: failed to send PV IPI: %ld",
551 ret);
552 }
553
554 local_irq_restore(flags);
555 }
556
kvm_send_ipi_mask(const struct cpumask * mask,int vector)557 static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
558 {
559 __send_ipi_mask(mask, vector);
560 }
561
kvm_send_ipi_mask_allbutself(const struct cpumask * mask,int vector)562 static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
563 {
564 unsigned int this_cpu = smp_processor_id();
565 struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
566 const struct cpumask *local_mask;
567
568 cpumask_copy(new_mask, mask);
569 cpumask_clear_cpu(this_cpu, new_mask);
570 local_mask = new_mask;
571 __send_ipi_mask(local_mask, vector);
572 }
573
setup_efi_kvm_sev_migration(void)574 static int __init setup_efi_kvm_sev_migration(void)
575 {
576 efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
577 efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
578 efi_status_t status;
579 unsigned long size;
580 bool enabled;
581
582 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
583 !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
584 return 0;
585
586 if (!efi_enabled(EFI_BOOT))
587 return 0;
588
589 if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
590 pr_info("%s : EFI runtime services are not enabled\n", __func__);
591 return 0;
592 }
593
594 size = sizeof(enabled);
595
596 /* Get variable contents into buffer */
597 status = efi.get_variable(efi_sev_live_migration_enabled,
598 &efi_variable_guid, NULL, &size, &enabled);
599
600 if (status == EFI_NOT_FOUND) {
601 pr_info("%s : EFI live migration variable not found\n", __func__);
602 return 0;
603 }
604
605 if (status != EFI_SUCCESS) {
606 pr_info("%s : EFI variable retrieval failed\n", __func__);
607 return 0;
608 }
609
610 if (enabled == 0) {
611 pr_info("%s: live migration disabled in EFI\n", __func__);
612 return 0;
613 }
614
615 pr_info("%s : live migration enabled in EFI\n", __func__);
616 wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
617
618 return 1;
619 }
620
621 late_initcall(setup_efi_kvm_sev_migration);
622
623 /*
624 * Set the IPI entry points
625 */
kvm_setup_pv_ipi(void)626 static void kvm_setup_pv_ipi(void)
627 {
628 apic->send_IPI_mask = kvm_send_ipi_mask;
629 apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself;
630 pr_info("setup PV IPIs\n");
631 }
632
kvm_smp_send_call_func_ipi(const struct cpumask * mask)633 static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
634 {
635 int cpu;
636
637 native_send_call_func_ipi(mask);
638
639 /* Make sure other vCPUs get a chance to run if they need to. */
640 for_each_cpu(cpu, mask) {
641 if (!idle_cpu(cpu) && vcpu_is_preempted(cpu)) {
642 kvm_hypercall1(KVM_HC_SCHED_YIELD, per_cpu(x86_cpu_to_apicid, cpu));
643 break;
644 }
645 }
646 }
647
kvm_flush_tlb_multi(const struct cpumask * cpumask,const struct flush_tlb_info * info)648 static void kvm_flush_tlb_multi(const struct cpumask *cpumask,
649 const struct flush_tlb_info *info)
650 {
651 u8 state;
652 int cpu;
653 struct kvm_steal_time *src;
654 struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
655
656 cpumask_copy(flushmask, cpumask);
657 /*
658 * We have to call flush only on online vCPUs. And
659 * queue flush_on_enter for pre-empted vCPUs
660 */
661 for_each_cpu(cpu, flushmask) {
662 /*
663 * The local vCPU is never preempted, so we do not explicitly
664 * skip check for local vCPU - it will never be cleared from
665 * flushmask.
666 */
667 src = &per_cpu(steal_time, cpu);
668 state = READ_ONCE(src->preempted);
669 if ((state & KVM_VCPU_PREEMPTED)) {
670 if (try_cmpxchg(&src->preempted, &state,
671 state | KVM_VCPU_FLUSH_TLB))
672 __cpumask_clear_cpu(cpu, flushmask);
673 }
674 }
675
676 native_flush_tlb_multi(flushmask, info);
677 }
678
kvm_alloc_cpumask(void)679 static __init int kvm_alloc_cpumask(void)
680 {
681 int cpu;
682
683 if (!kvm_para_available() || nopv)
684 return 0;
685
686 if (pv_tlb_flush_supported() || pv_ipi_supported())
687 for_each_possible_cpu(cpu) {
688 zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
689 GFP_KERNEL, cpu_to_node(cpu));
690 }
691
692 return 0;
693 }
694 arch_initcall(kvm_alloc_cpumask);
695
kvm_smp_prepare_boot_cpu(void)696 static void __init kvm_smp_prepare_boot_cpu(void)
697 {
698 /*
699 * Map the per-cpu variables as decrypted before kvm_guest_cpu_init()
700 * shares the guest physical address with the hypervisor.
701 */
702 sev_map_percpu_data();
703
704 kvm_guest_cpu_init();
705 native_smp_prepare_boot_cpu();
706 kvm_spinlock_init();
707 }
708
kvm_cpu_down_prepare(unsigned int cpu)709 static int kvm_cpu_down_prepare(unsigned int cpu)
710 {
711 unsigned long flags;
712
713 local_irq_save(flags);
714 kvm_guest_cpu_offline(false);
715 local_irq_restore(flags);
716 return 0;
717 }
718
719 #endif
720
kvm_suspend(void)721 static int kvm_suspend(void)
722 {
723 u64 val = 0;
724
725 kvm_guest_cpu_offline(false);
726
727 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
728 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
729 rdmsrl(MSR_KVM_POLL_CONTROL, val);
730 has_guest_poll = !(val & 1);
731 #endif
732 return 0;
733 }
734
kvm_resume(void)735 static void kvm_resume(void)
736 {
737 kvm_cpu_online(raw_smp_processor_id());
738
739 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
740 if (kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL) && has_guest_poll)
741 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
742 #endif
743 }
744
745 static struct syscore_ops kvm_syscore_ops = {
746 .suspend = kvm_suspend,
747 .resume = kvm_resume,
748 };
749
kvm_pv_guest_cpu_reboot(void * unused)750 static void kvm_pv_guest_cpu_reboot(void *unused)
751 {
752 kvm_guest_cpu_offline(true);
753 }
754
kvm_pv_reboot_notify(struct notifier_block * nb,unsigned long code,void * unused)755 static int kvm_pv_reboot_notify(struct notifier_block *nb,
756 unsigned long code, void *unused)
757 {
758 if (code == SYS_RESTART)
759 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
760 return NOTIFY_DONE;
761 }
762
763 static struct notifier_block kvm_pv_reboot_nb = {
764 .notifier_call = kvm_pv_reboot_notify,
765 };
766
767 /*
768 * After a PV feature is registered, the host will keep writing to the
769 * registered memory location. If the guest happens to shutdown, this memory
770 * won't be valid. In cases like kexec, in which you install a new kernel, this
771 * means a random memory location will be kept being written.
772 */
773 #ifdef CONFIG_KEXEC_CORE
kvm_crash_shutdown(struct pt_regs * regs)774 static void kvm_crash_shutdown(struct pt_regs *regs)
775 {
776 kvm_guest_cpu_offline(true);
777 native_machine_crash_shutdown(regs);
778 }
779 #endif
780
781 #if defined(CONFIG_X86_32) || !defined(CONFIG_SMP)
782 bool __kvm_vcpu_is_preempted(long cpu);
783
__kvm_vcpu_is_preempted(long cpu)784 __visible bool __kvm_vcpu_is_preempted(long cpu)
785 {
786 struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
787
788 return !!(src->preempted & KVM_VCPU_PREEMPTED);
789 }
790 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
791
792 #else
793
794 #include <asm/asm-offsets.h>
795
796 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
797
798 /*
799 * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
800 * restoring to/from the stack.
801 */
802 asm(
803 ".pushsection .text;"
804 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
805 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
806 "__raw_callee_save___kvm_vcpu_is_preempted:"
807 ASM_ENDBR
808 "movq __per_cpu_offset(,%rdi,8), %rax;"
809 "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
810 "setne %al;"
811 ASM_RET
812 ".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
813 ".popsection");
814
815 #endif
816
kvm_guest_init(void)817 static void __init kvm_guest_init(void)
818 {
819 int i;
820
821 paravirt_ops_setup();
822 register_reboot_notifier(&kvm_pv_reboot_nb);
823 for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
824 raw_spin_lock_init(&async_pf_sleepers[i].lock);
825
826 if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
827 has_steal_clock = 1;
828 static_call_update(pv_steal_clock, kvm_steal_clock);
829
830 pv_ops.lock.vcpu_is_preempted =
831 PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
832 }
833
834 if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
835 apic_set_eoi_write(kvm_guest_apic_eoi_write);
836
837 if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
838 static_branch_enable(&kvm_async_pf_enabled);
839 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
840 }
841
842 #ifdef CONFIG_SMP
843 if (pv_tlb_flush_supported()) {
844 pv_ops.mmu.flush_tlb_multi = kvm_flush_tlb_multi;
845 pv_ops.mmu.tlb_remove_table = tlb_remove_table;
846 pr_info("KVM setup pv remote TLB flush\n");
847 }
848
849 smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
850 if (pv_sched_yield_supported()) {
851 smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
852 pr_info("setup PV sched yield\n");
853 }
854 if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
855 kvm_cpu_online, kvm_cpu_down_prepare) < 0)
856 pr_err("failed to install cpu hotplug callbacks\n");
857 #else
858 sev_map_percpu_data();
859 kvm_guest_cpu_init();
860 #endif
861
862 #ifdef CONFIG_KEXEC_CORE
863 machine_ops.crash_shutdown = kvm_crash_shutdown;
864 #endif
865
866 register_syscore_ops(&kvm_syscore_ops);
867
868 /*
869 * Hard lockup detection is enabled by default. Disable it, as guests
870 * can get false positives too easily, for example if the host is
871 * overcommitted.
872 */
873 hardlockup_detector_disable();
874 }
875
__kvm_cpuid_base(void)876 static noinline uint32_t __kvm_cpuid_base(void)
877 {
878 if (boot_cpu_data.cpuid_level < 0)
879 return 0; /* So we don't blow up on old processors */
880
881 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
882 return hypervisor_cpuid_base(KVM_SIGNATURE, 0);
883
884 return 0;
885 }
886
kvm_cpuid_base(void)887 static inline uint32_t kvm_cpuid_base(void)
888 {
889 static int kvm_cpuid_base = -1;
890
891 if (kvm_cpuid_base == -1)
892 kvm_cpuid_base = __kvm_cpuid_base();
893
894 return kvm_cpuid_base;
895 }
896
kvm_para_available(void)897 bool kvm_para_available(void)
898 {
899 return kvm_cpuid_base() != 0;
900 }
901 EXPORT_SYMBOL_GPL(kvm_para_available);
902
kvm_arch_para_features(void)903 unsigned int kvm_arch_para_features(void)
904 {
905 return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
906 }
907
kvm_arch_para_hints(void)908 unsigned int kvm_arch_para_hints(void)
909 {
910 return cpuid_edx(kvm_cpuid_base() | KVM_CPUID_FEATURES);
911 }
912 EXPORT_SYMBOL_GPL(kvm_arch_para_hints);
913
kvm_detect(void)914 static uint32_t __init kvm_detect(void)
915 {
916 return kvm_cpuid_base();
917 }
918
kvm_apic_init(void)919 static void __init kvm_apic_init(void)
920 {
921 #ifdef CONFIG_SMP
922 if (pv_ipi_supported())
923 kvm_setup_pv_ipi();
924 #endif
925 }
926
kvm_msi_ext_dest_id(void)927 static bool __init kvm_msi_ext_dest_id(void)
928 {
929 return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
930 }
931
kvm_sev_hc_page_enc_status(unsigned long pfn,int npages,bool enc)932 static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
933 {
934 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
935 KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
936 }
937
kvm_init_platform(void)938 static void __init kvm_init_platform(void)
939 {
940 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
941 kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
942 unsigned long nr_pages;
943 int i;
944
945 pv_ops.mmu.notify_page_enc_status_changed =
946 kvm_sev_hc_page_enc_status;
947
948 /*
949 * Reset the host's shared pages list related to kernel
950 * specific page encryption status settings before we load a
951 * new kernel by kexec. Reset the page encryption status
952 * during early boot intead of just before kexec to avoid SMP
953 * races during kvm_pv_guest_cpu_reboot().
954 * NOTE: We cannot reset the complete shared pages list
955 * here as we need to retain the UEFI/OVMF firmware
956 * specific settings.
957 */
958
959 for (i = 0; i < e820_table->nr_entries; i++) {
960 struct e820_entry *entry = &e820_table->entries[i];
961
962 if (entry->type != E820_TYPE_RAM)
963 continue;
964
965 nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
966
967 kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
968 nr_pages,
969 KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
970 }
971
972 /*
973 * Ensure that _bss_decrypted section is marked as decrypted in the
974 * shared pages list.
975 */
976 nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
977 PAGE_SIZE);
978 early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
979 nr_pages, 0);
980
981 /*
982 * If not booted using EFI, enable Live migration support.
983 */
984 if (!efi_enabled(EFI_BOOT))
985 wrmsrl(MSR_KVM_MIGRATION_CONTROL,
986 KVM_MIGRATION_READY);
987 }
988 kvmclock_init();
989 x86_platform.apic_post_init = kvm_apic_init;
990 }
991
992 #if defined(CONFIG_AMD_MEM_ENCRYPT)
kvm_sev_es_hcall_prepare(struct ghcb * ghcb,struct pt_regs * regs)993 static void kvm_sev_es_hcall_prepare(struct ghcb *ghcb, struct pt_regs *regs)
994 {
995 /* RAX and CPL are already in the GHCB */
996 ghcb_set_rbx(ghcb, regs->bx);
997 ghcb_set_rcx(ghcb, regs->cx);
998 ghcb_set_rdx(ghcb, regs->dx);
999 ghcb_set_rsi(ghcb, regs->si);
1000 }
1001
kvm_sev_es_hcall_finish(struct ghcb * ghcb,struct pt_regs * regs)1002 static bool kvm_sev_es_hcall_finish(struct ghcb *ghcb, struct pt_regs *regs)
1003 {
1004 /* No checking of the return state needed */
1005 return true;
1006 }
1007 #endif
1008
1009 const __initconst struct hypervisor_x86 x86_hyper_kvm = {
1010 .name = "KVM",
1011 .detect = kvm_detect,
1012 .type = X86_HYPER_KVM,
1013 .init.guest_late_init = kvm_guest_init,
1014 .init.x2apic_available = kvm_para_available,
1015 .init.msi_ext_dest_id = kvm_msi_ext_dest_id,
1016 .init.init_platform = kvm_init_platform,
1017 #if defined(CONFIG_AMD_MEM_ENCRYPT)
1018 .runtime.sev_es_hcall_prepare = kvm_sev_es_hcall_prepare,
1019 .runtime.sev_es_hcall_finish = kvm_sev_es_hcall_finish,
1020 #endif
1021 };
1022
activate_jump_labels(void)1023 static __init int activate_jump_labels(void)
1024 {
1025 if (has_steal_clock) {
1026 static_key_slow_inc(¶virt_steal_enabled);
1027 if (steal_acc)
1028 static_key_slow_inc(¶virt_steal_rq_enabled);
1029 }
1030
1031 return 0;
1032 }
1033 arch_initcall(activate_jump_labels);
1034
1035 #ifdef CONFIG_PARAVIRT_SPINLOCKS
1036
1037 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
kvm_kick_cpu(int cpu)1038 static void kvm_kick_cpu(int cpu)
1039 {
1040 int apicid;
1041 unsigned long flags = 0;
1042
1043 apicid = per_cpu(x86_cpu_to_apicid, cpu);
1044 kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
1045 }
1046
1047 #include <asm/qspinlock.h>
1048
kvm_wait(u8 * ptr,u8 val)1049 static void kvm_wait(u8 *ptr, u8 val)
1050 {
1051 if (in_nmi())
1052 return;
1053
1054 /*
1055 * halt until it's our turn and kicked. Note that we do safe halt
1056 * for irq enabled case to avoid hang when lock info is overwritten
1057 * in irq spinlock slowpath and no spurious interrupt occur to save us.
1058 */
1059 if (irqs_disabled()) {
1060 if (READ_ONCE(*ptr) == val)
1061 halt();
1062 } else {
1063 local_irq_disable();
1064
1065 /* safe_halt() will enable IRQ */
1066 if (READ_ONCE(*ptr) == val)
1067 safe_halt();
1068 else
1069 local_irq_enable();
1070 }
1071 }
1072
1073 /*
1074 * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
1075 */
kvm_spinlock_init(void)1076 void __init kvm_spinlock_init(void)
1077 {
1078 /*
1079 * In case host doesn't support KVM_FEATURE_PV_UNHALT there is still an
1080 * advantage of keeping virt_spin_lock_key enabled: virt_spin_lock() is
1081 * preferred over native qspinlock when vCPU is preempted.
1082 */
1083 if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT)) {
1084 pr_info("PV spinlocks disabled, no host support\n");
1085 return;
1086 }
1087
1088 /*
1089 * Disable PV spinlocks and use native qspinlock when dedicated pCPUs
1090 * are available.
1091 */
1092 if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
1093 pr_info("PV spinlocks disabled with KVM_HINTS_REALTIME hints\n");
1094 goto out;
1095 }
1096
1097 if (num_possible_cpus() == 1) {
1098 pr_info("PV spinlocks disabled, single CPU\n");
1099 goto out;
1100 }
1101
1102 if (nopvspin) {
1103 pr_info("PV spinlocks disabled, forced by \"nopvspin\" parameter\n");
1104 goto out;
1105 }
1106
1107 pr_info("PV spinlocks enabled\n");
1108
1109 __pv_init_lock_hash();
1110 pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
1111 pv_ops.lock.queued_spin_unlock =
1112 PV_CALLEE_SAVE(__pv_queued_spin_unlock);
1113 pv_ops.lock.wait = kvm_wait;
1114 pv_ops.lock.kick = kvm_kick_cpu;
1115
1116 /*
1117 * When PV spinlock is enabled which is preferred over
1118 * virt_spin_lock(), virt_spin_lock_key's value is meaningless.
1119 * Just disable it anyway.
1120 */
1121 out:
1122 static_branch_disable(&virt_spin_lock_key);
1123 }
1124
1125 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
1126
1127 #ifdef CONFIG_ARCH_CPUIDLE_HALTPOLL
1128
kvm_disable_host_haltpoll(void * i)1129 static void kvm_disable_host_haltpoll(void *i)
1130 {
1131 wrmsrl(MSR_KVM_POLL_CONTROL, 0);
1132 }
1133
kvm_enable_host_haltpoll(void * i)1134 static void kvm_enable_host_haltpoll(void *i)
1135 {
1136 wrmsrl(MSR_KVM_POLL_CONTROL, 1);
1137 }
1138
arch_haltpoll_enable(unsigned int cpu)1139 void arch_haltpoll_enable(unsigned int cpu)
1140 {
1141 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL)) {
1142 pr_err_once("host does not support poll control\n");
1143 pr_err_once("host upgrade recommended\n");
1144 return;
1145 }
1146
1147 /* Enable guest halt poll disables host halt poll */
1148 smp_call_function_single(cpu, kvm_disable_host_haltpoll, NULL, 1);
1149 }
1150 EXPORT_SYMBOL_GPL(arch_haltpoll_enable);
1151
arch_haltpoll_disable(unsigned int cpu)1152 void arch_haltpoll_disable(unsigned int cpu)
1153 {
1154 if (!kvm_para_has_feature(KVM_FEATURE_POLL_CONTROL))
1155 return;
1156
1157 /* Disable guest halt poll enables host halt poll */
1158 smp_call_function_single(cpu, kvm_enable_host_haltpoll, NULL, 1);
1159 }
1160 EXPORT_SYMBOL_GPL(arch_haltpoll_disable);
1161 #endif
1162