1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 /*
4 * Linux-specific definitions for managing interactions with Microsoft's
5 * Hyper-V hypervisor. The definitions in this file are architecture
6 * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
7 * that are specific to architecture <arch>.
8 *
9 * Definitions that are specified in the Hyper-V Top Level Functional
10 * Spec (TLFS) should not go in this file, but should instead go in
11 * hyperv-tlfs.h.
12 *
13 * Copyright (C) 2019, Microsoft, Inc.
14 *
15 * Author : Michael Kelley <mikelley@microsoft.com>
16 */
17
18 #ifndef _ASM_GENERIC_MSHYPERV_H
19 #define _ASM_GENERIC_MSHYPERV_H
20
21 #include <linux/types.h>
22 #include <linux/atomic.h>
23 #include <linux/bitops.h>
24 #include <linux/cpumask.h>
25 #include <linux/nmi.h>
26 #include <asm/ptrace.h>
27 #include <asm/hyperv-tlfs.h>
28
29 struct ms_hyperv_info {
30 u32 features;
31 u32 priv_high;
32 u32 misc_features;
33 u32 hints;
34 u32 nested_features;
35 u32 max_vp_index;
36 u32 max_lp_index;
37 u32 isolation_config_a;
38 union {
39 u32 isolation_config_b;
40 struct {
41 u32 cvm_type : 4;
42 u32 reserved1 : 1;
43 u32 shared_gpa_boundary_active : 1;
44 u32 shared_gpa_boundary_bits : 6;
45 u32 reserved2 : 20;
46 };
47 };
48 u64 shared_gpa_boundary;
49 };
50 extern struct ms_hyperv_info ms_hyperv;
51
52 extern void * __percpu *hyperv_pcpu_input_arg;
53 extern void * __percpu *hyperv_pcpu_output_arg;
54
55 extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
56 extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
57 extern bool hv_isolation_type_snp(void);
58
59 /* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
hv_result(u64 status)60 static inline int hv_result(u64 status)
61 {
62 return status & HV_HYPERCALL_RESULT_MASK;
63 }
64
hv_result_success(u64 status)65 static inline bool hv_result_success(u64 status)
66 {
67 return hv_result(status) == HV_STATUS_SUCCESS;
68 }
69
hv_repcomp(u64 status)70 static inline unsigned int hv_repcomp(u64 status)
71 {
72 /* Bits [43:32] of status have 'Reps completed' data. */
73 return (status & HV_HYPERCALL_REP_COMP_MASK) >>
74 HV_HYPERCALL_REP_COMP_OFFSET;
75 }
76
77 /*
78 * Rep hypercalls. Callers of this functions are supposed to ensure that
79 * rep_count and varhead_size comply with Hyper-V hypercall definition.
80 */
hv_do_rep_hypercall(u16 code,u16 rep_count,u16 varhead_size,void * input,void * output)81 static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
82 void *input, void *output)
83 {
84 u64 control = code;
85 u64 status;
86 u16 rep_comp;
87
88 control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
89 control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
90
91 do {
92 status = hv_do_hypercall(control, input, output);
93 if (!hv_result_success(status))
94 return status;
95
96 rep_comp = hv_repcomp(status);
97
98 control &= ~HV_HYPERCALL_REP_START_MASK;
99 control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
100
101 touch_nmi_watchdog();
102 } while (rep_comp < rep_count);
103
104 return status;
105 }
106
107 /* Generate the guest OS identifier as described in the Hyper-V TLFS */
generate_guest_id(__u64 d_info1,__u64 kernel_version,__u64 d_info2)108 static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
109 __u64 d_info2)
110 {
111 __u64 guest_id = 0;
112
113 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
114 guest_id |= (d_info1 << 48);
115 guest_id |= (kernel_version << 16);
116 guest_id |= d_info2;
117
118 return guest_id;
119 }
120
121 /* Free the message slot and signal end-of-message if required */
vmbus_signal_eom(struct hv_message * msg,u32 old_msg_type)122 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
123 {
124 /*
125 * On crash we're reading some other CPU's message page and we need
126 * to be careful: this other CPU may already had cleared the header
127 * and the host may already had delivered some other message there.
128 * In case we blindly write msg->header.message_type we're going
129 * to lose it. We can still lose a message of the same type but
130 * we count on the fact that there can only be one
131 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
132 * on crash.
133 */
134 if (cmpxchg(&msg->header.message_type, old_msg_type,
135 HVMSG_NONE) != old_msg_type)
136 return;
137
138 /*
139 * The cmxchg() above does an implicit memory barrier to
140 * ensure the write to MessageType (ie set to
141 * HVMSG_NONE) happens before we read the
142 * MessagePending and EOMing. Otherwise, the EOMing
143 * will not deliver any more messages since there is
144 * no empty slot
145 */
146 if (msg->header.message_flags.msg_pending) {
147 /*
148 * This will cause message queue rescan to
149 * possibly deliver another msg from the
150 * hypervisor
151 */
152 hv_set_register(HV_REGISTER_EOM, 0);
153 }
154 }
155
156 void hv_setup_vmbus_handler(void (*handler)(void));
157 void hv_remove_vmbus_handler(void);
158 void hv_setup_stimer0_handler(void (*handler)(void));
159 void hv_remove_stimer0_handler(void);
160
161 void hv_setup_kexec_handler(void (*handler)(void));
162 void hv_remove_kexec_handler(void);
163 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
164 void hv_remove_crash_handler(void);
165
166 extern int vmbus_interrupt;
167 extern int vmbus_irq;
168
169 extern bool hv_root_partition;
170
171 #if IS_ENABLED(CONFIG_HYPERV)
172 /*
173 * Hypervisor's notion of virtual processor ID is different from
174 * Linux' notion of CPU ID. This information can only be retrieved
175 * in the context of the calling CPU. Setup a map for easy access
176 * to this information.
177 */
178 extern u32 *hv_vp_index;
179 extern u32 hv_max_vp_index;
180
181 extern u64 (*hv_read_reference_counter)(void);
182
183 /* Sentinel value for an uninitialized entry in hv_vp_index array */
184 #define VP_INVAL U32_MAX
185
186 int __init hv_common_init(void);
187 void __init hv_common_free(void);
188 int hv_common_cpu_init(unsigned int cpu);
189 int hv_common_cpu_die(unsigned int cpu);
190
191 void *hv_alloc_hyperv_page(void);
192 void *hv_alloc_hyperv_zeroed_page(void);
193 void hv_free_hyperv_page(unsigned long addr);
194
195 /**
196 * hv_cpu_number_to_vp_number() - Map CPU to VP.
197 * @cpu_number: CPU number in Linux terms
198 *
199 * This function returns the mapping between the Linux processor
200 * number and the hypervisor's virtual processor number, useful
201 * in making hypercalls and such that talk about specific
202 * processors.
203 *
204 * Return: Virtual processor number in Hyper-V terms
205 */
hv_cpu_number_to_vp_number(int cpu_number)206 static inline int hv_cpu_number_to_vp_number(int cpu_number)
207 {
208 return hv_vp_index[cpu_number];
209 }
210
__cpumask_to_vpset(struct hv_vpset * vpset,const struct cpumask * cpus,bool exclude_self)211 static inline int __cpumask_to_vpset(struct hv_vpset *vpset,
212 const struct cpumask *cpus,
213 bool exclude_self)
214 {
215 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
216 int this_cpu = smp_processor_id();
217
218 /* valid_bank_mask can represent up to 64 banks */
219 if (hv_max_vp_index / 64 >= 64)
220 return 0;
221
222 /*
223 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
224 * structs are not cleared between calls, we risk flushing unneeded
225 * vCPUs otherwise.
226 */
227 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
228 vpset->bank_contents[vcpu_bank] = 0;
229
230 /*
231 * Some banks may end up being empty but this is acceptable.
232 */
233 for_each_cpu(cpu, cpus) {
234 if (exclude_self && cpu == this_cpu)
235 continue;
236 vcpu = hv_cpu_number_to_vp_number(cpu);
237 if (vcpu == VP_INVAL)
238 return -1;
239 vcpu_bank = vcpu / 64;
240 vcpu_offset = vcpu % 64;
241 __set_bit(vcpu_offset, (unsigned long *)
242 &vpset->bank_contents[vcpu_bank]);
243 if (vcpu_bank >= nr_bank)
244 nr_bank = vcpu_bank + 1;
245 }
246 vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
247 return nr_bank;
248 }
249
cpumask_to_vpset(struct hv_vpset * vpset,const struct cpumask * cpus)250 static inline int cpumask_to_vpset(struct hv_vpset *vpset,
251 const struct cpumask *cpus)
252 {
253 return __cpumask_to_vpset(vpset, cpus, false);
254 }
255
cpumask_to_vpset_noself(struct hv_vpset * vpset,const struct cpumask * cpus)256 static inline int cpumask_to_vpset_noself(struct hv_vpset *vpset,
257 const struct cpumask *cpus)
258 {
259 WARN_ON_ONCE(preemptible());
260 return __cpumask_to_vpset(vpset, cpus, true);
261 }
262
263 void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
264 bool hv_is_hyperv_initialized(void);
265 bool hv_is_hibernation_supported(void);
266 enum hv_isolation_type hv_get_isolation_type(void);
267 bool hv_is_isolation_supported(void);
268 bool hv_isolation_type_snp(void);
269 u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
270 void hyperv_cleanup(void);
271 bool hv_query_ext_cap(u64 cap_query);
272 void hv_setup_dma_ops(struct device *dev, bool coherent);
273 void *hv_map_memory(void *addr, unsigned long size);
274 void hv_unmap_memory(void *addr);
275 #else /* CONFIG_HYPERV */
hv_is_hyperv_initialized(void)276 static inline bool hv_is_hyperv_initialized(void) { return false; }
hv_is_hibernation_supported(void)277 static inline bool hv_is_hibernation_supported(void) { return false; }
hyperv_cleanup(void)278 static inline void hyperv_cleanup(void) {}
hv_is_isolation_supported(void)279 static inline bool hv_is_isolation_supported(void) { return false; }
hv_get_isolation_type(void)280 static inline enum hv_isolation_type hv_get_isolation_type(void)
281 {
282 return HV_ISOLATION_TYPE_NONE;
283 }
284 #endif /* CONFIG_HYPERV */
285
286 #endif
287