1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * KVM Microsoft Hyper-V emulation
4  *
5  * derived from arch/x86/kvm/x86.c
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright (C) 2008 Qumranet, Inc.
9  * Copyright IBM Corporation, 2008
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12  *
13  * Authors:
14  *   Avi Kivity   <avi@qumranet.com>
15  *   Yaniv Kamay  <yaniv@qumranet.com>
16  *   Amit Shah    <amit.shah@qumranet.com>
17  *   Ben-Ami Yassour <benami@il.ibm.com>
18  *   Andrey Smetanin <asmetanin@virtuozzo.com>
19  */
20 
21 #ifndef __ARCH_X86_KVM_HYPERV_H__
22 #define __ARCH_X86_KVM_HYPERV_H__
23 
24 #include <linux/kvm_host.h>
25 
26 /* "Hv#1" signature */
27 #define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
28 
29 /*
30  * The #defines related to the synthetic debugger are required by KDNet, but
31  * they are not documented in the Hyper-V TLFS because the synthetic debugger
32  * functionality has been deprecated and is subject to removal in future
33  * versions of Windows.
34  */
35 #define HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS	0x40000080
36 #define HYPERV_CPUID_SYNDBG_INTERFACE			0x40000081
37 #define HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES	0x40000082
38 
39 /*
40  * Hyper-V synthetic debugger platform capabilities
41  * These are HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES.EAX bits.
42  */
43 #define HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING	BIT(1)
44 
45 /* Hyper-V Synthetic debug options MSR */
46 #define HV_X64_MSR_SYNDBG_CONTROL		0x400000F1
47 #define HV_X64_MSR_SYNDBG_STATUS		0x400000F2
48 #define HV_X64_MSR_SYNDBG_SEND_BUFFER		0x400000F3
49 #define HV_X64_MSR_SYNDBG_RECV_BUFFER		0x400000F4
50 #define HV_X64_MSR_SYNDBG_PENDING_BUFFER	0x400000F5
51 #define HV_X64_MSR_SYNDBG_OPTIONS		0x400000FF
52 
53 /* Hyper-V HV_X64_MSR_SYNDBG_OPTIONS bits */
54 #define HV_X64_SYNDBG_OPTION_USE_HCALLS		BIT(2)
55 
to_kvm_hv(struct kvm * kvm)56 static inline struct kvm_hv *to_kvm_hv(struct kvm *kvm)
57 {
58 	return &kvm->arch.hyperv;
59 }
60 
to_hv_vcpu(struct kvm_vcpu * vcpu)61 static inline struct kvm_vcpu_hv *to_hv_vcpu(struct kvm_vcpu *vcpu)
62 {
63 	return vcpu->arch.hyperv;
64 }
65 
to_hv_synic(struct kvm_vcpu * vcpu)66 static inline struct kvm_vcpu_hv_synic *to_hv_synic(struct kvm_vcpu *vcpu)
67 {
68 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
69 
70 	return &hv_vcpu->synic;
71 }
72 
hv_synic_to_vcpu(struct kvm_vcpu_hv_synic * synic)73 static inline struct kvm_vcpu *hv_synic_to_vcpu(struct kvm_vcpu_hv_synic *synic)
74 {
75 	struct kvm_vcpu_hv *hv_vcpu = container_of(synic, struct kvm_vcpu_hv, synic);
76 
77 	return hv_vcpu->vcpu;
78 }
79 
to_hv_syndbg(struct kvm_vcpu * vcpu)80 static inline struct kvm_hv_syndbg *to_hv_syndbg(struct kvm_vcpu *vcpu)
81 {
82 	return &vcpu->kvm->arch.hyperv.hv_syndbg;
83 }
84 
kvm_hv_get_vpindex(struct kvm_vcpu * vcpu)85 static inline u32 kvm_hv_get_vpindex(struct kvm_vcpu *vcpu)
86 {
87 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
88 
89 	return hv_vcpu ? hv_vcpu->vp_index : vcpu->vcpu_idx;
90 }
91 
92 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host);
93 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host);
94 
kvm_hv_hypercall_enabled(struct kvm_vcpu * vcpu)95 static inline bool kvm_hv_hypercall_enabled(struct kvm_vcpu *vcpu)
96 {
97 	return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id;
98 }
99 
100 int kvm_hv_hypercall(struct kvm_vcpu *vcpu);
101 
102 void kvm_hv_irq_routing_update(struct kvm *kvm);
103 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vcpu_id, u32 sint);
104 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector);
105 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages);
106 
107 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu);
108 
109 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu);
110 bool kvm_hv_get_assist_page(struct kvm_vcpu *vcpu,
111 			    struct hv_vp_assist_page *assist_page);
112 
to_hv_stimer(struct kvm_vcpu * vcpu,int timer_index)113 static inline struct kvm_vcpu_hv_stimer *to_hv_stimer(struct kvm_vcpu *vcpu,
114 						      int timer_index)
115 {
116 	return &to_hv_vcpu(vcpu)->stimer[timer_index];
117 }
118 
hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer * stimer)119 static inline struct kvm_vcpu *hv_stimer_to_vcpu(struct kvm_vcpu_hv_stimer *stimer)
120 {
121 	struct kvm_vcpu_hv *hv_vcpu;
122 
123 	hv_vcpu = container_of(stimer - stimer->index, struct kvm_vcpu_hv,
124 			       stimer[0]);
125 	return hv_vcpu->vcpu;
126 }
127 
kvm_hv_has_stimer_pending(struct kvm_vcpu * vcpu)128 static inline bool kvm_hv_has_stimer_pending(struct kvm_vcpu *vcpu)
129 {
130 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
131 
132 	if (!hv_vcpu)
133 		return false;
134 
135 	return !bitmap_empty(hv_vcpu->stimer_pending_bitmap,
136 			     HV_SYNIC_STIMER_COUNT);
137 }
138 
139 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu);
140 
141 void kvm_hv_setup_tsc_page(struct kvm *kvm,
142 			   struct pvclock_vcpu_time_info *hv_clock);
143 void kvm_hv_request_tsc_page_update(struct kvm *kvm);
144 
145 void kvm_hv_init_vm(struct kvm *kvm);
146 void kvm_hv_destroy_vm(struct kvm *kvm);
147 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu);
148 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled);
149 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce);
150 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args);
151 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
152 		     struct kvm_cpuid_entry2 __user *entries);
153 
154 #endif
155