1 /*
2 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Authors:
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 *
21 */
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
24 #include <linux/kernel.h>
25 #include <linux/mm.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hyperv.h>
29 #include <asm/hyperv.h>
30 #include "hyperv_vmbus.h"
31
32 /* The one and only */
33 struct hv_context hv_context = {
34 .synic_initialized = false,
35 .hypercall_page = NULL,
36 .signal_event_param = NULL,
37 .signal_event_buffer = NULL,
38 };
39
40 /*
41 * query_hypervisor_presence
42 * - Query the cpuid for presence of windows hypervisor
43 */
query_hypervisor_presence(void)44 static int query_hypervisor_presence(void)
45 {
46 unsigned int eax;
47 unsigned int ebx;
48 unsigned int ecx;
49 unsigned int edx;
50 unsigned int op;
51
52 eax = 0;
53 ebx = 0;
54 ecx = 0;
55 edx = 0;
56 op = HVCPUID_VERSION_FEATURES;
57 cpuid(op, &eax, &ebx, &ecx, &edx);
58
59 return ecx & HV_PRESENT_BIT;
60 }
61
62 /*
63 * query_hypervisor_info - Get version info of the windows hypervisor
64 */
query_hypervisor_info(void)65 static int query_hypervisor_info(void)
66 {
67 unsigned int eax;
68 unsigned int ebx;
69 unsigned int ecx;
70 unsigned int edx;
71 unsigned int max_leaf;
72 unsigned int op;
73
74 /*
75 * Its assumed that this is called after confirming that Viridian
76 * is present. Query id and revision.
77 */
78 eax = 0;
79 ebx = 0;
80 ecx = 0;
81 edx = 0;
82 op = HVCPUID_VENDOR_MAXFUNCTION;
83 cpuid(op, &eax, &ebx, &ecx, &edx);
84
85 max_leaf = eax;
86
87 if (max_leaf >= HVCPUID_VERSION) {
88 eax = 0;
89 ebx = 0;
90 ecx = 0;
91 edx = 0;
92 op = HVCPUID_VERSION;
93 cpuid(op, &eax, &ebx, &ecx, &edx);
94 pr_info("Hyper-V Host OS Build:%d-%d.%d-%d-%d.%d\n",
95 eax,
96 ebx >> 16,
97 ebx & 0xFFFF,
98 ecx,
99 edx >> 24,
100 edx & 0xFFFFFF);
101 }
102 return max_leaf;
103 }
104
105 /*
106 * do_hypercall- Invoke the specified hypercall
107 */
do_hypercall(u64 control,void * input,void * output)108 static u64 do_hypercall(u64 control, void *input, void *output)
109 {
110 #ifdef CONFIG_X86_64
111 u64 hv_status = 0;
112 u64 input_address = (input) ? virt_to_phys(input) : 0;
113 u64 output_address = (output) ? virt_to_phys(output) : 0;
114 void *hypercall_page = hv_context.hypercall_page;
115
116 __asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
117 __asm__ __volatile__("call *%3" : "=a" (hv_status) :
118 "c" (control), "d" (input_address),
119 "m" (hypercall_page));
120
121 return hv_status;
122
123 #else
124
125 u32 control_hi = control >> 32;
126 u32 control_lo = control & 0xFFFFFFFF;
127 u32 hv_status_hi = 1;
128 u32 hv_status_lo = 1;
129 u64 input_address = (input) ? virt_to_phys(input) : 0;
130 u32 input_address_hi = input_address >> 32;
131 u32 input_address_lo = input_address & 0xFFFFFFFF;
132 u64 output_address = (output) ? virt_to_phys(output) : 0;
133 u32 output_address_hi = output_address >> 32;
134 u32 output_address_lo = output_address & 0xFFFFFFFF;
135 void *hypercall_page = hv_context.hypercall_page;
136
137 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
138 "=a"(hv_status_lo) : "d" (control_hi),
139 "a" (control_lo), "b" (input_address_hi),
140 "c" (input_address_lo), "D"(output_address_hi),
141 "S"(output_address_lo), "m" (hypercall_page));
142
143 return hv_status_lo | ((u64)hv_status_hi << 32);
144 #endif /* !x86_64 */
145 }
146
147 /*
148 * hv_init - Main initialization routine.
149 *
150 * This routine must be called before any other routines in here are called
151 */
hv_init(void)152 int hv_init(void)
153 {
154 int max_leaf;
155 union hv_x64_msr_hypercall_contents hypercall_msr;
156 void *virtaddr = NULL;
157
158 memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
159 memset(hv_context.synic_message_page, 0,
160 sizeof(void *) * NR_CPUS);
161
162 if (!query_hypervisor_presence())
163 goto cleanup;
164
165 max_leaf = query_hypervisor_info();
166
167 /* Write our OS info */
168 wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
169 hv_context.guestid = HV_LINUX_GUEST_ID;
170
171 /* See if the hypercall page is already set */
172 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
173
174 virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
175
176 if (!virtaddr)
177 goto cleanup;
178
179 hypercall_msr.enable = 1;
180
181 hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
182 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
183
184 /* Confirm that hypercall page did get setup. */
185 hypercall_msr.as_uint64 = 0;
186 rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
187
188 if (!hypercall_msr.enable)
189 goto cleanup;
190
191 hv_context.hypercall_page = virtaddr;
192
193 /* Setup the global signal event param for the signal event hypercall */
194 hv_context.signal_event_buffer =
195 kmalloc(sizeof(struct hv_input_signal_event_buffer),
196 GFP_KERNEL);
197 if (!hv_context.signal_event_buffer)
198 goto cleanup;
199
200 hv_context.signal_event_param =
201 (struct hv_input_signal_event *)
202 (ALIGN((unsigned long)
203 hv_context.signal_event_buffer,
204 HV_HYPERCALL_PARAM_ALIGN));
205 hv_context.signal_event_param->connectionid.asu32 = 0;
206 hv_context.signal_event_param->connectionid.u.id =
207 VMBUS_EVENT_CONNECTION_ID;
208 hv_context.signal_event_param->flag_number = 0;
209 hv_context.signal_event_param->rsvdz = 0;
210
211 return 0;
212
213 cleanup:
214 if (virtaddr) {
215 if (hypercall_msr.enable) {
216 hypercall_msr.as_uint64 = 0;
217 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
218 }
219
220 vfree(virtaddr);
221 }
222
223 return -ENOTSUPP;
224 }
225
226 /*
227 * hv_cleanup - Cleanup routine.
228 *
229 * This routine is called normally during driver unloading or exiting.
230 */
hv_cleanup(void)231 void hv_cleanup(void)
232 {
233 union hv_x64_msr_hypercall_contents hypercall_msr;
234
235 /* Reset our OS id */
236 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
237
238 kfree(hv_context.signal_event_buffer);
239 hv_context.signal_event_buffer = NULL;
240 hv_context.signal_event_param = NULL;
241
242 if (hv_context.hypercall_page) {
243 hypercall_msr.as_uint64 = 0;
244 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
245 vfree(hv_context.hypercall_page);
246 hv_context.hypercall_page = NULL;
247 }
248 }
249
250 /*
251 * hv_post_message - Post a message using the hypervisor message IPC.
252 *
253 * This involves a hypercall.
254 */
hv_post_message(union hv_connection_id connection_id,enum hv_message_type message_type,void * payload,size_t payload_size)255 u16 hv_post_message(union hv_connection_id connection_id,
256 enum hv_message_type message_type,
257 void *payload, size_t payload_size)
258 {
259 struct aligned_input {
260 u64 alignment8;
261 struct hv_input_post_message msg;
262 };
263
264 struct hv_input_post_message *aligned_msg;
265 u16 status;
266 unsigned long addr;
267
268 if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
269 return -EMSGSIZE;
270
271 addr = (unsigned long)kmalloc(sizeof(struct aligned_input), GFP_ATOMIC);
272 if (!addr)
273 return -ENOMEM;
274
275 aligned_msg = (struct hv_input_post_message *)
276 (ALIGN(addr, HV_HYPERCALL_PARAM_ALIGN));
277
278 aligned_msg->connectionid = connection_id;
279 aligned_msg->message_type = message_type;
280 aligned_msg->payload_size = payload_size;
281 memcpy((void *)aligned_msg->payload, payload, payload_size);
282
283 status = do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL)
284 & 0xFFFF;
285
286 kfree((void *)addr);
287
288 return status;
289 }
290
291
292 /*
293 * hv_signal_event -
294 * Signal an event on the specified connection using the hypervisor event IPC.
295 *
296 * This involves a hypercall.
297 */
hv_signal_event(void)298 u16 hv_signal_event(void)
299 {
300 u16 status;
301
302 status = do_hypercall(HVCALL_SIGNAL_EVENT,
303 hv_context.signal_event_param,
304 NULL) & 0xFFFF;
305 return status;
306 }
307
308 /*
309 * hv_synic_init - Initialize the Synthethic Interrupt Controller.
310 *
311 * If it is already initialized by another entity (ie x2v shim), we need to
312 * retrieve the initialized message and event pages. Otherwise, we create and
313 * initialize the message and event pages.
314 */
hv_synic_init(void * irqarg)315 void hv_synic_init(void *irqarg)
316 {
317 u64 version;
318 union hv_synic_simp simp;
319 union hv_synic_siefp siefp;
320 union hv_synic_sint shared_sint;
321 union hv_synic_scontrol sctrl;
322
323 u32 irq_vector = *((u32 *)(irqarg));
324 int cpu = smp_processor_id();
325
326 if (!hv_context.hypercall_page)
327 return;
328
329 /* Check the version */
330 rdmsrl(HV_X64_MSR_SVERSION, version);
331
332 hv_context.synic_message_page[cpu] =
333 (void *)get_zeroed_page(GFP_ATOMIC);
334
335 if (hv_context.synic_message_page[cpu] == NULL) {
336 pr_err("Unable to allocate SYNIC message page\n");
337 goto cleanup;
338 }
339
340 hv_context.synic_event_page[cpu] =
341 (void *)get_zeroed_page(GFP_ATOMIC);
342
343 if (hv_context.synic_event_page[cpu] == NULL) {
344 pr_err("Unable to allocate SYNIC event page\n");
345 goto cleanup;
346 }
347
348 /* Setup the Synic's message page */
349 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
350 simp.simp_enabled = 1;
351 simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
352 >> PAGE_SHIFT;
353
354 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
355
356 /* Setup the Synic's event page */
357 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
358 siefp.siefp_enabled = 1;
359 siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
360 >> PAGE_SHIFT;
361
362 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
363
364 /* Setup the shared SINT. */
365 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
366
367 shared_sint.as_uint64 = 0;
368 shared_sint.vector = irq_vector; /* HV_SHARED_SINT_IDT_VECTOR + 0x20; */
369 shared_sint.masked = false;
370 shared_sint.auto_eoi = false;
371
372 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
373
374 /* Enable the global synic bit */
375 rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
376 sctrl.enable = 1;
377
378 wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
379
380 hv_context.synic_initialized = true;
381 return;
382
383 cleanup:
384 if (hv_context.synic_event_page[cpu])
385 free_page((unsigned long)hv_context.synic_event_page[cpu]);
386
387 if (hv_context.synic_message_page[cpu])
388 free_page((unsigned long)hv_context.synic_message_page[cpu]);
389 return;
390 }
391
392 /*
393 * hv_synic_cleanup - Cleanup routine for hv_synic_init().
394 */
hv_synic_cleanup(void * arg)395 void hv_synic_cleanup(void *arg)
396 {
397 union hv_synic_sint shared_sint;
398 union hv_synic_simp simp;
399 union hv_synic_siefp siefp;
400 int cpu = smp_processor_id();
401
402 if (!hv_context.synic_initialized)
403 return;
404
405 rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
406
407 shared_sint.masked = 1;
408
409 /* Need to correctly cleanup in the case of SMP!!! */
410 /* Disable the interrupt */
411 wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
412
413 rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
414 simp.simp_enabled = 0;
415 simp.base_simp_gpa = 0;
416
417 wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
418
419 rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
420 siefp.siefp_enabled = 0;
421 siefp.base_siefp_gpa = 0;
422
423 wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
424
425 free_page((unsigned long)hv_context.synic_message_page[cpu]);
426 free_page((unsigned long)hv_context.synic_event_page[cpu]);
427 }
428