1 /******************************************************************************
2 * hypercall.h
3 *
4 * Linux-specific hypervisor handling.
5 *
6 * Copyright (c) 2002-2004, K A Fraser
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33 #ifndef _ASM_X86_XEN_HYPERCALL_H
34 #define _ASM_X86_XEN_HYPERCALL_H
35
36 #include <linux/kernel.h>
37 #include <linux/spinlock.h>
38 #include <linux/errno.h>
39 #include <linux/string.h>
40 #include <linux/types.h>
41
42 #include <asm/page.h>
43 #include <asm/pgtable.h>
44
45 #include <xen/interface/xen.h>
46 #include <xen/interface/sched.h>
47 #include <xen/interface/physdev.h>
48
49 /*
50 * The hypercall asms have to meet several constraints:
51 * - Work on 32- and 64-bit.
52 * The two architectures put their arguments in different sets of
53 * registers.
54 *
55 * - Work around asm syntax quirks
56 * It isn't possible to specify one of the rNN registers in a
57 * constraint, so we use explicit register variables to get the
58 * args into the right place.
59 *
60 * - Mark all registers as potentially clobbered
61 * Even unused parameters can be clobbered by the hypervisor, so we
62 * need to make sure gcc knows it.
63 *
64 * - Avoid compiler bugs.
65 * This is the tricky part. Because x86_32 has such a constrained
66 * register set, gcc versions below 4.3 have trouble generating
67 * code when all the arg registers and memory are trashed by the
68 * asm. There are syntactically simpler ways of achieving the
69 * semantics below, but they cause the compiler to crash.
70 *
71 * The only combination I found which works is:
72 * - assign the __argX variables first
73 * - list all actually used parameters as "+r" (__argX)
74 * - clobber the rest
75 *
76 * The result certainly isn't pretty, and it really shows up cpp's
77 * weakness as as macro language. Sorry. (But let's just give thanks
78 * there aren't more than 5 arguments...)
79 */
80
81 extern struct { char _entry[32]; } hypercall_page[];
82
83 #define __HYPERCALL "call hypercall_page+%c[offset]"
84 #define __HYPERCALL_ENTRY(x) \
85 [offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
86
87 #ifdef CONFIG_X86_32
88 #define __HYPERCALL_RETREG "eax"
89 #define __HYPERCALL_ARG1REG "ebx"
90 #define __HYPERCALL_ARG2REG "ecx"
91 #define __HYPERCALL_ARG3REG "edx"
92 #define __HYPERCALL_ARG4REG "esi"
93 #define __HYPERCALL_ARG5REG "edi"
94 #else
95 #define __HYPERCALL_RETREG "rax"
96 #define __HYPERCALL_ARG1REG "rdi"
97 #define __HYPERCALL_ARG2REG "rsi"
98 #define __HYPERCALL_ARG3REG "rdx"
99 #define __HYPERCALL_ARG4REG "r10"
100 #define __HYPERCALL_ARG5REG "r8"
101 #endif
102
103 #define __HYPERCALL_DECLS \
104 register unsigned long __res asm(__HYPERCALL_RETREG); \
105 register unsigned long __arg1 asm(__HYPERCALL_ARG1REG) = __arg1; \
106 register unsigned long __arg2 asm(__HYPERCALL_ARG2REG) = __arg2; \
107 register unsigned long __arg3 asm(__HYPERCALL_ARG3REG) = __arg3; \
108 register unsigned long __arg4 asm(__HYPERCALL_ARG4REG) = __arg4; \
109 register unsigned long __arg5 asm(__HYPERCALL_ARG5REG) = __arg5;
110
111 #define __HYPERCALL_0PARAM "=r" (__res)
112 #define __HYPERCALL_1PARAM __HYPERCALL_0PARAM, "+r" (__arg1)
113 #define __HYPERCALL_2PARAM __HYPERCALL_1PARAM, "+r" (__arg2)
114 #define __HYPERCALL_3PARAM __HYPERCALL_2PARAM, "+r" (__arg3)
115 #define __HYPERCALL_4PARAM __HYPERCALL_3PARAM, "+r" (__arg4)
116 #define __HYPERCALL_5PARAM __HYPERCALL_4PARAM, "+r" (__arg5)
117
118 #define __HYPERCALL_0ARG()
119 #define __HYPERCALL_1ARG(a1) \
120 __HYPERCALL_0ARG() __arg1 = (unsigned long)(a1);
121 #define __HYPERCALL_2ARG(a1,a2) \
122 __HYPERCALL_1ARG(a1) __arg2 = (unsigned long)(a2);
123 #define __HYPERCALL_3ARG(a1,a2,a3) \
124 __HYPERCALL_2ARG(a1,a2) __arg3 = (unsigned long)(a3);
125 #define __HYPERCALL_4ARG(a1,a2,a3,a4) \
126 __HYPERCALL_3ARG(a1,a2,a3) __arg4 = (unsigned long)(a4);
127 #define __HYPERCALL_5ARG(a1,a2,a3,a4,a5) \
128 __HYPERCALL_4ARG(a1,a2,a3,a4) __arg5 = (unsigned long)(a5);
129
130 #define __HYPERCALL_CLOBBER5 "memory"
131 #define __HYPERCALL_CLOBBER4 __HYPERCALL_CLOBBER5, __HYPERCALL_ARG5REG
132 #define __HYPERCALL_CLOBBER3 __HYPERCALL_CLOBBER4, __HYPERCALL_ARG4REG
133 #define __HYPERCALL_CLOBBER2 __HYPERCALL_CLOBBER3, __HYPERCALL_ARG3REG
134 #define __HYPERCALL_CLOBBER1 __HYPERCALL_CLOBBER2, __HYPERCALL_ARG2REG
135 #define __HYPERCALL_CLOBBER0 __HYPERCALL_CLOBBER1, __HYPERCALL_ARG1REG
136
137 #define _hypercall0(type, name) \
138 ({ \
139 __HYPERCALL_DECLS; \
140 __HYPERCALL_0ARG(); \
141 asm volatile (__HYPERCALL \
142 : __HYPERCALL_0PARAM \
143 : __HYPERCALL_ENTRY(name) \
144 : __HYPERCALL_CLOBBER0); \
145 (type)__res; \
146 })
147
148 #define _hypercall1(type, name, a1) \
149 ({ \
150 __HYPERCALL_DECLS; \
151 __HYPERCALL_1ARG(a1); \
152 asm volatile (__HYPERCALL \
153 : __HYPERCALL_1PARAM \
154 : __HYPERCALL_ENTRY(name) \
155 : __HYPERCALL_CLOBBER1); \
156 (type)__res; \
157 })
158
159 #define _hypercall2(type, name, a1, a2) \
160 ({ \
161 __HYPERCALL_DECLS; \
162 __HYPERCALL_2ARG(a1, a2); \
163 asm volatile (__HYPERCALL \
164 : __HYPERCALL_2PARAM \
165 : __HYPERCALL_ENTRY(name) \
166 : __HYPERCALL_CLOBBER2); \
167 (type)__res; \
168 })
169
170 #define _hypercall3(type, name, a1, a2, a3) \
171 ({ \
172 __HYPERCALL_DECLS; \
173 __HYPERCALL_3ARG(a1, a2, a3); \
174 asm volatile (__HYPERCALL \
175 : __HYPERCALL_3PARAM \
176 : __HYPERCALL_ENTRY(name) \
177 : __HYPERCALL_CLOBBER3); \
178 (type)__res; \
179 })
180
181 #define _hypercall4(type, name, a1, a2, a3, a4) \
182 ({ \
183 __HYPERCALL_DECLS; \
184 __HYPERCALL_4ARG(a1, a2, a3, a4); \
185 asm volatile (__HYPERCALL \
186 : __HYPERCALL_4PARAM \
187 : __HYPERCALL_ENTRY(name) \
188 : __HYPERCALL_CLOBBER4); \
189 (type)__res; \
190 })
191
192 #define _hypercall5(type, name, a1, a2, a3, a4, a5) \
193 ({ \
194 __HYPERCALL_DECLS; \
195 __HYPERCALL_5ARG(a1, a2, a3, a4, a5); \
196 asm volatile (__HYPERCALL \
197 : __HYPERCALL_5PARAM \
198 : __HYPERCALL_ENTRY(name) \
199 : __HYPERCALL_CLOBBER5); \
200 (type)__res; \
201 })
202
203 static inline long
privcmd_call(unsigned call,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5)204 privcmd_call(unsigned call,
205 unsigned long a1, unsigned long a2,
206 unsigned long a3, unsigned long a4,
207 unsigned long a5)
208 {
209 __HYPERCALL_DECLS;
210 __HYPERCALL_5ARG(a1, a2, a3, a4, a5);
211
212 asm volatile("call *%[call]"
213 : __HYPERCALL_5PARAM
214 : [call] "a" (&hypercall_page[call])
215 : __HYPERCALL_CLOBBER5);
216
217 return (long)__res;
218 }
219
220 static inline int
HYPERVISOR_set_trap_table(struct trap_info * table)221 HYPERVISOR_set_trap_table(struct trap_info *table)
222 {
223 return _hypercall1(int, set_trap_table, table);
224 }
225
226 static inline int
HYPERVISOR_mmu_update(struct mmu_update * req,int count,int * success_count,domid_t domid)227 HYPERVISOR_mmu_update(struct mmu_update *req, int count,
228 int *success_count, domid_t domid)
229 {
230 return _hypercall4(int, mmu_update, req, count, success_count, domid);
231 }
232
233 static inline int
HYPERVISOR_mmuext_op(struct mmuext_op * op,int count,int * success_count,domid_t domid)234 HYPERVISOR_mmuext_op(struct mmuext_op *op, int count,
235 int *success_count, domid_t domid)
236 {
237 return _hypercall4(int, mmuext_op, op, count, success_count, domid);
238 }
239
240 static inline int
HYPERVISOR_set_gdt(unsigned long * frame_list,int entries)241 HYPERVISOR_set_gdt(unsigned long *frame_list, int entries)
242 {
243 return _hypercall2(int, set_gdt, frame_list, entries);
244 }
245
246 static inline int
HYPERVISOR_stack_switch(unsigned long ss,unsigned long esp)247 HYPERVISOR_stack_switch(unsigned long ss, unsigned long esp)
248 {
249 return _hypercall2(int, stack_switch, ss, esp);
250 }
251
252 #ifdef CONFIG_X86_32
253 static inline int
HYPERVISOR_set_callbacks(unsigned long event_selector,unsigned long event_address,unsigned long failsafe_selector,unsigned long failsafe_address)254 HYPERVISOR_set_callbacks(unsigned long event_selector,
255 unsigned long event_address,
256 unsigned long failsafe_selector,
257 unsigned long failsafe_address)
258 {
259 return _hypercall4(int, set_callbacks,
260 event_selector, event_address,
261 failsafe_selector, failsafe_address);
262 }
263 #else /* CONFIG_X86_64 */
264 static inline int
HYPERVISOR_set_callbacks(unsigned long event_address,unsigned long failsafe_address,unsigned long syscall_address)265 HYPERVISOR_set_callbacks(unsigned long event_address,
266 unsigned long failsafe_address,
267 unsigned long syscall_address)
268 {
269 return _hypercall3(int, set_callbacks,
270 event_address, failsafe_address,
271 syscall_address);
272 }
273 #endif /* CONFIG_X86_{32,64} */
274
275 static inline int
HYPERVISOR_callback_op(int cmd,void * arg)276 HYPERVISOR_callback_op(int cmd, void *arg)
277 {
278 return _hypercall2(int, callback_op, cmd, arg);
279 }
280
281 static inline int
HYPERVISOR_fpu_taskswitch(int set)282 HYPERVISOR_fpu_taskswitch(int set)
283 {
284 return _hypercall1(int, fpu_taskswitch, set);
285 }
286
287 static inline int
HYPERVISOR_sched_op(int cmd,void * arg)288 HYPERVISOR_sched_op(int cmd, void *arg)
289 {
290 return _hypercall2(int, sched_op, cmd, arg);
291 }
292
293 static inline long
HYPERVISOR_set_timer_op(u64 timeout)294 HYPERVISOR_set_timer_op(u64 timeout)
295 {
296 unsigned long timeout_hi = (unsigned long)(timeout>>32);
297 unsigned long timeout_lo = (unsigned long)timeout;
298 return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
299 }
300
301 static inline int
HYPERVISOR_set_debugreg(int reg,unsigned long value)302 HYPERVISOR_set_debugreg(int reg, unsigned long value)
303 {
304 return _hypercall2(int, set_debugreg, reg, value);
305 }
306
307 static inline unsigned long
HYPERVISOR_get_debugreg(int reg)308 HYPERVISOR_get_debugreg(int reg)
309 {
310 return _hypercall1(unsigned long, get_debugreg, reg);
311 }
312
313 static inline int
HYPERVISOR_update_descriptor(u64 ma,u64 desc)314 HYPERVISOR_update_descriptor(u64 ma, u64 desc)
315 {
316 if (sizeof(u64) == sizeof(long))
317 return _hypercall2(int, update_descriptor, ma, desc);
318 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
319 }
320
321 static inline int
HYPERVISOR_memory_op(unsigned int cmd,void * arg)322 HYPERVISOR_memory_op(unsigned int cmd, void *arg)
323 {
324 return _hypercall2(int, memory_op, cmd, arg);
325 }
326
327 static inline int
HYPERVISOR_multicall(void * call_list,int nr_calls)328 HYPERVISOR_multicall(void *call_list, int nr_calls)
329 {
330 return _hypercall2(int, multicall, call_list, nr_calls);
331 }
332
333 static inline int
HYPERVISOR_update_va_mapping(unsigned long va,pte_t new_val,unsigned long flags)334 HYPERVISOR_update_va_mapping(unsigned long va, pte_t new_val,
335 unsigned long flags)
336 {
337 if (sizeof(new_val) == sizeof(long))
338 return _hypercall3(int, update_va_mapping, va,
339 new_val.pte, flags);
340 else
341 return _hypercall4(int, update_va_mapping, va,
342 new_val.pte, new_val.pte >> 32, flags);
343 }
344
345 static inline int
HYPERVISOR_event_channel_op(int cmd,void * arg)346 HYPERVISOR_event_channel_op(int cmd, void *arg)
347 {
348 int rc = _hypercall2(int, event_channel_op, cmd, arg);
349 if (unlikely(rc == -ENOSYS)) {
350 struct evtchn_op op;
351 op.cmd = cmd;
352 memcpy(&op.u, arg, sizeof(op.u));
353 rc = _hypercall1(int, event_channel_op_compat, &op);
354 memcpy(arg, &op.u, sizeof(op.u));
355 }
356 return rc;
357 }
358
359 static inline int
HYPERVISOR_xen_version(int cmd,void * arg)360 HYPERVISOR_xen_version(int cmd, void *arg)
361 {
362 return _hypercall2(int, xen_version, cmd, arg);
363 }
364
365 static inline int
HYPERVISOR_console_io(int cmd,int count,char * str)366 HYPERVISOR_console_io(int cmd, int count, char *str)
367 {
368 return _hypercall3(int, console_io, cmd, count, str);
369 }
370
371 static inline int
HYPERVISOR_physdev_op(int cmd,void * arg)372 HYPERVISOR_physdev_op(int cmd, void *arg)
373 {
374 int rc = _hypercall2(int, physdev_op, cmd, arg);
375 if (unlikely(rc == -ENOSYS)) {
376 struct physdev_op op;
377 op.cmd = cmd;
378 memcpy(&op.u, arg, sizeof(op.u));
379 rc = _hypercall1(int, physdev_op_compat, &op);
380 memcpy(arg, &op.u, sizeof(op.u));
381 }
382 return rc;
383 }
384
385 static inline int
HYPERVISOR_grant_table_op(unsigned int cmd,void * uop,unsigned int count)386 HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
387 {
388 return _hypercall3(int, grant_table_op, cmd, uop, count);
389 }
390
391 static inline int
HYPERVISOR_update_va_mapping_otherdomain(unsigned long va,pte_t new_val,unsigned long flags,domid_t domid)392 HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, pte_t new_val,
393 unsigned long flags, domid_t domid)
394 {
395 if (sizeof(new_val) == sizeof(long))
396 return _hypercall4(int, update_va_mapping_otherdomain, va,
397 new_val.pte, flags, domid);
398 else
399 return _hypercall5(int, update_va_mapping_otherdomain, va,
400 new_val.pte, new_val.pte >> 32,
401 flags, domid);
402 }
403
404 static inline int
HYPERVISOR_vm_assist(unsigned int cmd,unsigned int type)405 HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type)
406 {
407 return _hypercall2(int, vm_assist, cmd, type);
408 }
409
410 static inline int
HYPERVISOR_vcpu_op(int cmd,int vcpuid,void * extra_args)411 HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args)
412 {
413 return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
414 }
415
416 #ifdef CONFIG_X86_64
417 static inline int
HYPERVISOR_set_segment_base(int reg,unsigned long value)418 HYPERVISOR_set_segment_base(int reg, unsigned long value)
419 {
420 return _hypercall2(int, set_segment_base, reg, value);
421 }
422 #endif
423
424 static inline int
HYPERVISOR_suspend(unsigned long start_info_mfn)425 HYPERVISOR_suspend(unsigned long start_info_mfn)
426 {
427 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
428
429 /*
430 * For a PV guest the tools require that the start_info mfn be
431 * present in rdx/edx when the hypercall is made. Per the
432 * hypercall calling convention this is the third hypercall
433 * argument, which is start_info_mfn here.
434 */
435 return _hypercall3(int, sched_op, SCHEDOP_shutdown, &r, start_info_mfn);
436 }
437
438 static inline int
HYPERVISOR_nmi_op(unsigned long op,unsigned long arg)439 HYPERVISOR_nmi_op(unsigned long op, unsigned long arg)
440 {
441 return _hypercall2(int, nmi_op, op, arg);
442 }
443
444 static inline unsigned long __must_check
HYPERVISOR_hvm_op(int op,void * arg)445 HYPERVISOR_hvm_op(int op, void *arg)
446 {
447 return _hypercall2(unsigned long, hvm_op, op, arg);
448 }
449
450 static inline void
MULTI_fpu_taskswitch(struct multicall_entry * mcl,int set)451 MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set)
452 {
453 mcl->op = __HYPERVISOR_fpu_taskswitch;
454 mcl->args[0] = set;
455 }
456
457 static inline void
MULTI_update_va_mapping(struct multicall_entry * mcl,unsigned long va,pte_t new_val,unsigned long flags)458 MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va,
459 pte_t new_val, unsigned long flags)
460 {
461 mcl->op = __HYPERVISOR_update_va_mapping;
462 mcl->args[0] = va;
463 if (sizeof(new_val) == sizeof(long)) {
464 mcl->args[1] = new_val.pte;
465 mcl->args[2] = flags;
466 } else {
467 mcl->args[1] = new_val.pte;
468 mcl->args[2] = new_val.pte >> 32;
469 mcl->args[3] = flags;
470 }
471 }
472
473 static inline void
MULTI_grant_table_op(struct multicall_entry * mcl,unsigned int cmd,void * uop,unsigned int count)474 MULTI_grant_table_op(struct multicall_entry *mcl, unsigned int cmd,
475 void *uop, unsigned int count)
476 {
477 mcl->op = __HYPERVISOR_grant_table_op;
478 mcl->args[0] = cmd;
479 mcl->args[1] = (unsigned long)uop;
480 mcl->args[2] = count;
481 }
482
483 static inline void
MULTI_update_va_mapping_otherdomain(struct multicall_entry * mcl,unsigned long va,pte_t new_val,unsigned long flags,domid_t domid)484 MULTI_update_va_mapping_otherdomain(struct multicall_entry *mcl, unsigned long va,
485 pte_t new_val, unsigned long flags,
486 domid_t domid)
487 {
488 mcl->op = __HYPERVISOR_update_va_mapping_otherdomain;
489 mcl->args[0] = va;
490 if (sizeof(new_val) == sizeof(long)) {
491 mcl->args[1] = new_val.pte;
492 mcl->args[2] = flags;
493 mcl->args[3] = domid;
494 } else {
495 mcl->args[1] = new_val.pte;
496 mcl->args[2] = new_val.pte >> 32;
497 mcl->args[3] = flags;
498 mcl->args[4] = domid;
499 }
500 }
501
502 static inline void
MULTI_update_descriptor(struct multicall_entry * mcl,u64 maddr,struct desc_struct desc)503 MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
504 struct desc_struct desc)
505 {
506 mcl->op = __HYPERVISOR_update_descriptor;
507 if (sizeof(maddr) == sizeof(long)) {
508 mcl->args[0] = maddr;
509 mcl->args[1] = *(unsigned long *)&desc;
510 } else {
511 mcl->args[0] = maddr;
512 mcl->args[1] = maddr >> 32;
513 mcl->args[2] = desc.a;
514 mcl->args[3] = desc.b;
515 }
516 }
517
518 static inline void
MULTI_memory_op(struct multicall_entry * mcl,unsigned int cmd,void * arg)519 MULTI_memory_op(struct multicall_entry *mcl, unsigned int cmd, void *arg)
520 {
521 mcl->op = __HYPERVISOR_memory_op;
522 mcl->args[0] = cmd;
523 mcl->args[1] = (unsigned long)arg;
524 }
525
526 static inline void
MULTI_mmu_update(struct multicall_entry * mcl,struct mmu_update * req,int count,int * success_count,domid_t domid)527 MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req,
528 int count, int *success_count, domid_t domid)
529 {
530 mcl->op = __HYPERVISOR_mmu_update;
531 mcl->args[0] = (unsigned long)req;
532 mcl->args[1] = count;
533 mcl->args[2] = (unsigned long)success_count;
534 mcl->args[3] = domid;
535 }
536
537 static inline void
MULTI_mmuext_op(struct multicall_entry * mcl,struct mmuext_op * op,int count,int * success_count,domid_t domid)538 MULTI_mmuext_op(struct multicall_entry *mcl, struct mmuext_op *op, int count,
539 int *success_count, domid_t domid)
540 {
541 mcl->op = __HYPERVISOR_mmuext_op;
542 mcl->args[0] = (unsigned long)op;
543 mcl->args[1] = count;
544 mcl->args[2] = (unsigned long)success_count;
545 mcl->args[3] = domid;
546 }
547
548 static inline void
MULTI_set_gdt(struct multicall_entry * mcl,unsigned long * frames,int entries)549 MULTI_set_gdt(struct multicall_entry *mcl, unsigned long *frames, int entries)
550 {
551 mcl->op = __HYPERVISOR_set_gdt;
552 mcl->args[0] = (unsigned long)frames;
553 mcl->args[1] = entries;
554 }
555
556 static inline void
MULTI_stack_switch(struct multicall_entry * mcl,unsigned long ss,unsigned long esp)557 MULTI_stack_switch(struct multicall_entry *mcl,
558 unsigned long ss, unsigned long esp)
559 {
560 mcl->op = __HYPERVISOR_stack_switch;
561 mcl->args[0] = ss;
562 mcl->args[1] = esp;
563 }
564
565 #endif /* _ASM_X86_XEN_HYPERCALL_H */
566