1 /*
2 * vcpu.h: vcpu routines
3 * Copyright (c) 2005, Intel Corporation.
4 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
5 * Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
6 *
7 * Copyright (c) 2007, Intel Corporation.
8 * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
9 * Xiantao Zhang (xiantao.zhang@intel.com)
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 */
25
26
27 #ifndef __KVM_VCPU_H__
28 #define __KVM_VCPU_H__
29
30 #include <asm/types.h>
31 #include <asm/fpu.h>
32 #include <asm/processor.h>
33
34 #ifndef __ASSEMBLY__
35 #include "vti.h"
36
37 #include <linux/kvm_host.h>
38 #include <linux/spinlock.h>
39
40 typedef unsigned long IA64_INST;
41
42 typedef union U_IA64_BUNDLE {
43 unsigned long i64[2];
44 struct { unsigned long template:5, slot0:41, slot1a:18,
45 slot1b:23, slot2:41; };
46 /* NOTE: following doesn't work because bitfields can't cross natural
47 size boundaries
48 struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
49 } IA64_BUNDLE;
50
51 typedef union U_INST64_A5 {
52 IA64_INST inst;
53 struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
54 imm9d:9, s:1, major:4; };
55 } INST64_A5;
56
57 typedef union U_INST64_B4 {
58 IA64_INST inst;
59 struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
60 wh:2, d:1, un1:1, major:4; };
61 } INST64_B4;
62
63 typedef union U_INST64_B8 {
64 IA64_INST inst;
65 struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
66 } INST64_B8;
67
68 typedef union U_INST64_B9 {
69 IA64_INST inst;
70 struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
71 } INST64_B9;
72
73 typedef union U_INST64_I19 {
74 IA64_INST inst;
75 struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
76 } INST64_I19;
77
78 typedef union U_INST64_I26 {
79 IA64_INST inst;
80 struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
81 } INST64_I26;
82
83 typedef union U_INST64_I27 {
84 IA64_INST inst;
85 struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
86 } INST64_I27;
87
88 typedef union U_INST64_I28 { /* not privileged (mov from AR) */
89 IA64_INST inst;
90 struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
91 } INST64_I28;
92
93 typedef union U_INST64_M28 {
94 IA64_INST inst;
95 struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
96 } INST64_M28;
97
98 typedef union U_INST64_M29 {
99 IA64_INST inst;
100 struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
101 } INST64_M29;
102
103 typedef union U_INST64_M30 {
104 IA64_INST inst;
105 struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
106 x3:3, s:1, major:4; };
107 } INST64_M30;
108
109 typedef union U_INST64_M31 {
110 IA64_INST inst;
111 struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
112 } INST64_M31;
113
114 typedef union U_INST64_M32 {
115 IA64_INST inst;
116 struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
117 } INST64_M32;
118
119 typedef union U_INST64_M33 {
120 IA64_INST inst;
121 struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
122 } INST64_M33;
123
124 typedef union U_INST64_M35 {
125 IA64_INST inst;
126 struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
127
128 } INST64_M35;
129
130 typedef union U_INST64_M36 {
131 IA64_INST inst;
132 struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
133 } INST64_M36;
134
135 typedef union U_INST64_M37 {
136 IA64_INST inst;
137 struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
138 i:1, major:4; };
139 } INST64_M37;
140
141 typedef union U_INST64_M41 {
142 IA64_INST inst;
143 struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
144 } INST64_M41;
145
146 typedef union U_INST64_M42 {
147 IA64_INST inst;
148 struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
149 } INST64_M42;
150
151 typedef union U_INST64_M43 {
152 IA64_INST inst;
153 struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
154 } INST64_M43;
155
156 typedef union U_INST64_M44 {
157 IA64_INST inst;
158 struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
159 } INST64_M44;
160
161 typedef union U_INST64_M45 {
162 IA64_INST inst;
163 struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
164 } INST64_M45;
165
166 typedef union U_INST64_M46 {
167 IA64_INST inst;
168 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
169 x3:3, un1:1, major:4; };
170 } INST64_M46;
171
172 typedef union U_INST64_M47 {
173 IA64_INST inst;
174 struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
175 } INST64_M47;
176
177 typedef union U_INST64_M1{
178 IA64_INST inst;
179 struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
180 x6:6, m:1, major:4; };
181 } INST64_M1;
182
183 typedef union U_INST64_M2{
184 IA64_INST inst;
185 struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
186 x6:6, m:1, major:4; };
187 } INST64_M2;
188
189 typedef union U_INST64_M3{
190 IA64_INST inst;
191 struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
192 x6:6, s:1, major:4; };
193 } INST64_M3;
194
195 typedef union U_INST64_M4 {
196 IA64_INST inst;
197 struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
198 x6:6, m:1, major:4; };
199 } INST64_M4;
200
201 typedef union U_INST64_M5 {
202 IA64_INST inst;
203 struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
204 x6:6, s:1, major:4; };
205 } INST64_M5;
206
207 typedef union U_INST64_M6 {
208 IA64_INST inst;
209 struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
210 x6:6, m:1, major:4; };
211 } INST64_M6;
212
213 typedef union U_INST64_M9 {
214 IA64_INST inst;
215 struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
216 x6:6, m:1, major:4; };
217 } INST64_M9;
218
219 typedef union U_INST64_M10 {
220 IA64_INST inst;
221 struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
222 x6:6, s:1, major:4; };
223 } INST64_M10;
224
225 typedef union U_INST64_M12 {
226 IA64_INST inst;
227 struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
228 x6:6, m:1, major:4; };
229 } INST64_M12;
230
231 typedef union U_INST64_M15 {
232 IA64_INST inst;
233 struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
234 x6:6, s:1, major:4; };
235 } INST64_M15;
236
237 typedef union U_INST64 {
238 IA64_INST inst;
239 struct { unsigned long :37, major:4; } generic;
240 INST64_A5 A5; /* used in build_hypercall_bundle only */
241 INST64_B4 B4; /* used in build_hypercall_bundle only */
242 INST64_B8 B8; /* rfi, bsw.[01] */
243 INST64_B9 B9; /* break.b */
244 INST64_I19 I19; /* used in build_hypercall_bundle only */
245 INST64_I26 I26; /* mov register to ar (I unit) */
246 INST64_I27 I27; /* mov immediate to ar (I unit) */
247 INST64_I28 I28; /* mov from ar (I unit) */
248 INST64_M1 M1; /* ld integer */
249 INST64_M2 M2;
250 INST64_M3 M3;
251 INST64_M4 M4; /* st integer */
252 INST64_M5 M5;
253 INST64_M6 M6; /* ldfd floating pointer */
254 INST64_M9 M9; /* stfd floating pointer */
255 INST64_M10 M10; /* stfd floating pointer */
256 INST64_M12 M12; /* ldfd pair floating pointer */
257 INST64_M15 M15; /* lfetch + imm update */
258 INST64_M28 M28; /* purge translation cache entry */
259 INST64_M29 M29; /* mov register to ar (M unit) */
260 INST64_M30 M30; /* mov immediate to ar (M unit) */
261 INST64_M31 M31; /* mov from ar (M unit) */
262 INST64_M32 M32; /* mov reg to cr */
263 INST64_M33 M33; /* mov from cr */
264 INST64_M35 M35; /* mov to psr */
265 INST64_M36 M36; /* mov from psr */
266 INST64_M37 M37; /* break.m */
267 INST64_M41 M41; /* translation cache insert */
268 INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
269 INST64_M43 M43; /* mov from indirect reg */
270 INST64_M44 M44; /* set/reset system mask */
271 INST64_M45 M45; /* translation purge */
272 INST64_M46 M46; /* translation access (tpa,tak) */
273 INST64_M47 M47; /* purge translation entry */
274 } INST64;
275
276 #define MASK_41 ((unsigned long)0x1ffffffffff)
277
278 /* Virtual address memory attributes encoding */
279 #define VA_MATTR_WB 0x0
280 #define VA_MATTR_UC 0x4
281 #define VA_MATTR_UCE 0x5
282 #define VA_MATTR_WC 0x6
283 #define VA_MATTR_NATPAGE 0x7
284
285 #define PMASK(size) (~((size) - 1))
286 #define PSIZE(size) (1UL<<(size))
287 #define CLEARLSB(ppn, nbits) (((ppn) >> (nbits)) << (nbits))
288 #define PAGEALIGN(va, ps) CLEARLSB(va, ps)
289 #define PAGE_FLAGS_RV_MASK (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
290 #define _PAGE_MA_ST (0x1 << 2) /* is reserved for software use */
291
292 #define ARCH_PAGE_SHIFT 12
293
294 #define INVALID_TI_TAG (1UL << 63)
295
296 #define VTLB_PTE_P_BIT 0
297 #define VTLB_PTE_IO_BIT 60
298 #define VTLB_PTE_IO (1UL<<VTLB_PTE_IO_BIT)
299 #define VTLB_PTE_P (1UL<<VTLB_PTE_P_BIT)
300
301 #define vcpu_quick_region_check(_tr_regions,_ifa) \
302 (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
303
304 #define vcpu_quick_region_set(_tr_regions,_ifa) \
305 do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
306
vcpu_set_tr(struct thash_data * trp,u64 pte,u64 itir,u64 va,u64 rid)307 static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
308 u64 va, u64 rid)
309 {
310 trp->page_flags = pte;
311 trp->itir = itir;
312 trp->vadr = va;
313 trp->rid = rid;
314 }
315
316 extern u64 kvm_get_mpt_entry(u64 gpfn);
317
318 /* Return I/ */
__gpfn_is_io(u64 gpfn)319 static inline u64 __gpfn_is_io(u64 gpfn)
320 {
321 u64 pte;
322 pte = kvm_get_mpt_entry(gpfn);
323 if (!(pte & GPFN_INV_MASK)) {
324 pte = pte & GPFN_IO_MASK;
325 if (pte != GPFN_PHYS_MMIO)
326 return pte;
327 }
328 return 0;
329 }
330 #endif
331 #define IA64_NO_FAULT 0
332 #define IA64_FAULT 1
333
334 #define VMM_RBS_OFFSET ((VMM_TASK_SIZE + 15) & ~15)
335
336 #define SW_BAD 0 /* Bad mode transitition */
337 #define SW_V2P 1 /* Physical emulatino is activated */
338 #define SW_P2V 2 /* Exit physical mode emulation */
339 #define SW_SELF 3 /* No mode transition */
340 #define SW_NOP 4 /* Mode transition, but without action required */
341
342 #define GUEST_IN_PHY 0x1
343 #define GUEST_PHY_EMUL 0x2
344
345 #define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
346
347 #define VRN_SHIFT 61
348 #define VRN_MASK 0xe000000000000000
349 #define VRN0 0x0UL
350 #define VRN1 0x1UL
351 #define VRN2 0x2UL
352 #define VRN3 0x3UL
353 #define VRN4 0x4UL
354 #define VRN5 0x5UL
355 #define VRN6 0x6UL
356 #define VRN7 0x7UL
357
358 #define IRQ_NO_MASKED 0
359 #define IRQ_MASKED_BY_VTPR 1
360 #define IRQ_MASKED_BY_INSVC 2 /* masked by inservice IRQ */
361
362 #define PTA_BASE_SHIFT 15
363
364 #define IA64_PSR_VM_BIT 46
365 #define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
366
367 /* Interruption Function State */
368 #define IA64_IFS_V_BIT 63
369 #define IA64_IFS_V (__IA64_UL(1) << IA64_IFS_V_BIT)
370
371 #define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
372 #define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
373
374 #ifndef __ASSEMBLY__
375
376 #include <asm/gcc_intrin.h>
377
378 #define is_physical_mode(v) \
379 ((v->arch.mode_flags) & GUEST_IN_PHY)
380
381 #define is_virtual_mode(v) \
382 (!is_physical_mode(v))
383
384 #define MODE_IND(psr) \
385 (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
386
387 #ifndef CONFIG_SMP
388 #define _vmm_raw_spin_lock(x) do {}while(0)
389 #define _vmm_raw_spin_unlock(x) do {}while(0)
390 #else
391 typedef struct {
392 volatile unsigned int lock;
393 } vmm_spinlock_t;
394 #define _vmm_raw_spin_lock(x) \
395 do { \
396 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
397 __u64 ia64_spinlock_val; \
398 ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
399 if (unlikely(ia64_spinlock_val)) { \
400 do { \
401 while (*ia64_spinlock_ptr) \
402 ia64_barrier(); \
403 ia64_spinlock_val = \
404 ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
405 } while (ia64_spinlock_val); \
406 } \
407 } while (0)
408
409 #define _vmm_raw_spin_unlock(x) \
410 do { barrier(); \
411 ((vmm_spinlock_t *)x)->lock = 0; } \
412 while (0)
413 #endif
414
415 void vmm_spin_lock(vmm_spinlock_t *lock);
416 void vmm_spin_unlock(vmm_spinlock_t *lock);
417 enum {
418 I_TLB = 1,
419 D_TLB = 2
420 };
421
422 union kvm_va {
423 struct {
424 unsigned long off : 60; /* intra-region offset */
425 unsigned long reg : 4; /* region number */
426 } f;
427 unsigned long l;
428 void *p;
429 };
430
431 #define __kvm_pa(x) ({union kvm_va _v; _v.l = (long) (x); \
432 _v.f.reg = 0; _v.l; })
433 #define __kvm_va(x) ({union kvm_va _v; _v.l = (long) (x); \
434 _v.f.reg = -1; _v.p; })
435
436 #define _REGION_ID(x) ({union ia64_rr _v; _v.val = (long)(x); \
437 _v.rid; })
438 #define _REGION_PAGE_SIZE(x) ({union ia64_rr _v; _v.val = (long)(x); \
439 _v.ps; })
440 #define _REGION_HW_WALKER(x) ({union ia64_rr _v; _v.val = (long)(x); \
441 _v.ve; })
442
443 enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
444 enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
445
446 #define VCPU(_v, _x) ((_v)->arch.vpd->_x)
447 #define VMX(_v, _x) ((_v)->arch._x)
448
449 #define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
450 #define VLSAPIC_XTP(_v) VMX(_v, xtp)
451
itir_ps(unsigned long itir)452 static inline unsigned long itir_ps(unsigned long itir)
453 {
454 return ((itir >> 2) & 0x3f);
455 }
456
457
458 /**************************************************************************
459 VCPU control register access routines
460 **************************************************************************/
461
vcpu_get_itir(struct kvm_vcpu * vcpu)462 static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
463 {
464 return ((u64)VCPU(vcpu, itir));
465 }
466
vcpu_set_itir(struct kvm_vcpu * vcpu,u64 val)467 static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
468 {
469 VCPU(vcpu, itir) = val;
470 }
471
vcpu_get_ifa(struct kvm_vcpu * vcpu)472 static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
473 {
474 return ((u64)VCPU(vcpu, ifa));
475 }
476
vcpu_set_ifa(struct kvm_vcpu * vcpu,u64 val)477 static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
478 {
479 VCPU(vcpu, ifa) = val;
480 }
481
vcpu_get_iva(struct kvm_vcpu * vcpu)482 static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
483 {
484 return ((u64)VCPU(vcpu, iva));
485 }
486
vcpu_get_pta(struct kvm_vcpu * vcpu)487 static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
488 {
489 return ((u64)VCPU(vcpu, pta));
490 }
491
vcpu_get_lid(struct kvm_vcpu * vcpu)492 static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
493 {
494 return ((u64)VCPU(vcpu, lid));
495 }
496
vcpu_get_tpr(struct kvm_vcpu * vcpu)497 static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
498 {
499 return ((u64)VCPU(vcpu, tpr));
500 }
501
vcpu_get_eoi(struct kvm_vcpu * vcpu)502 static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
503 {
504 return (0UL); /*reads of eoi always return 0 */
505 }
506
vcpu_get_irr0(struct kvm_vcpu * vcpu)507 static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
508 {
509 return ((u64)VCPU(vcpu, irr[0]));
510 }
511
vcpu_get_irr1(struct kvm_vcpu * vcpu)512 static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
513 {
514 return ((u64)VCPU(vcpu, irr[1]));
515 }
516
vcpu_get_irr2(struct kvm_vcpu * vcpu)517 static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
518 {
519 return ((u64)VCPU(vcpu, irr[2]));
520 }
521
vcpu_get_irr3(struct kvm_vcpu * vcpu)522 static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
523 {
524 return ((u64)VCPU(vcpu, irr[3]));
525 }
526
vcpu_set_dcr(struct kvm_vcpu * vcpu,u64 val)527 static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
528 {
529 ia64_setreg(_IA64_REG_CR_DCR, val);
530 }
531
vcpu_set_isr(struct kvm_vcpu * vcpu,u64 val)532 static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
533 {
534 VCPU(vcpu, isr) = val;
535 }
536
vcpu_set_lid(struct kvm_vcpu * vcpu,u64 val)537 static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
538 {
539 VCPU(vcpu, lid) = val;
540 }
541
vcpu_set_ipsr(struct kvm_vcpu * vcpu,u64 val)542 static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
543 {
544 VCPU(vcpu, ipsr) = val;
545 }
546
vcpu_set_iip(struct kvm_vcpu * vcpu,u64 val)547 static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
548 {
549 VCPU(vcpu, iip) = val;
550 }
551
vcpu_set_ifs(struct kvm_vcpu * vcpu,u64 val)552 static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
553 {
554 VCPU(vcpu, ifs) = val;
555 }
556
vcpu_set_iipa(struct kvm_vcpu * vcpu,u64 val)557 static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
558 {
559 VCPU(vcpu, iipa) = val;
560 }
561
vcpu_set_iha(struct kvm_vcpu * vcpu,u64 val)562 static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
563 {
564 VCPU(vcpu, iha) = val;
565 }
566
567
vcpu_get_rr(struct kvm_vcpu * vcpu,u64 reg)568 static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
569 {
570 return vcpu->arch.vrr[reg>>61];
571 }
572
573 /**************************************************************************
574 VCPU debug breakpoint register access routines
575 **************************************************************************/
576
vcpu_set_dbr(struct kvm_vcpu * vcpu,u64 reg,u64 val)577 static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
578 {
579 __ia64_set_dbr(reg, val);
580 }
581
vcpu_set_ibr(struct kvm_vcpu * vcpu,u64 reg,u64 val)582 static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
583 {
584 ia64_set_ibr(reg, val);
585 }
586
vcpu_get_dbr(struct kvm_vcpu * vcpu,u64 reg)587 static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
588 {
589 return ((u64)__ia64_get_dbr(reg));
590 }
591
vcpu_get_ibr(struct kvm_vcpu * vcpu,u64 reg)592 static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
593 {
594 return ((u64)ia64_get_ibr(reg));
595 }
596
597 /**************************************************************************
598 VCPU performance monitor register access routines
599 **************************************************************************/
vcpu_set_pmc(struct kvm_vcpu * vcpu,u64 reg,u64 val)600 static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
601 {
602 /* NOTE: Writes to unimplemented PMC registers are discarded */
603 ia64_set_pmc(reg, val);
604 }
605
vcpu_set_pmd(struct kvm_vcpu * vcpu,u64 reg,u64 val)606 static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
607 {
608 /* NOTE: Writes to unimplemented PMD registers are discarded */
609 ia64_set_pmd(reg, val);
610 }
611
vcpu_get_pmc(struct kvm_vcpu * vcpu,u64 reg)612 static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
613 {
614 /* NOTE: Reads from unimplemented PMC registers return zero */
615 return ((u64)ia64_get_pmc(reg));
616 }
617
vcpu_get_pmd(struct kvm_vcpu * vcpu,u64 reg)618 static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
619 {
620 /* NOTE: Reads from unimplemented PMD registers return zero */
621 return ((u64)ia64_get_pmd(reg));
622 }
623
vrrtomrr(unsigned long val)624 static inline unsigned long vrrtomrr(unsigned long val)
625 {
626 union ia64_rr rr;
627 rr.val = val;
628 rr.rid = (rr.rid << 4) | 0xe;
629 if (rr.ps > PAGE_SHIFT)
630 rr.ps = PAGE_SHIFT;
631 rr.ve = 1;
632 return rr.val;
633 }
634
635
highest_bits(int * dat)636 static inline int highest_bits(int *dat)
637 {
638 u32 bits, bitnum;
639 int i;
640
641 /* loop for all 256 bits */
642 for (i = 7; i >= 0 ; i--) {
643 bits = dat[i];
644 if (bits) {
645 bitnum = fls(bits);
646 return i * 32 + bitnum - 1;
647 }
648 }
649 return NULL_VECTOR;
650 }
651
652 /*
653 * The pending irq is higher than the inservice one.
654 *
655 */
is_higher_irq(int pending,int inservice)656 static inline int is_higher_irq(int pending, int inservice)
657 {
658 return ((pending > inservice)
659 || ((pending != NULL_VECTOR)
660 && (inservice == NULL_VECTOR)));
661 }
662
is_higher_class(int pending,int mic)663 static inline int is_higher_class(int pending, int mic)
664 {
665 return ((pending >> 4) > mic);
666 }
667
668 /*
669 * Return 0-255 for pending irq.
670 * NULL_VECTOR: when no pending.
671 */
highest_pending_irq(struct kvm_vcpu * vcpu)672 static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
673 {
674 if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
675 return NMI_VECTOR;
676 if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
677 return ExtINT_VECTOR;
678
679 return highest_bits((int *)&VCPU(vcpu, irr[0]));
680 }
681
highest_inservice_irq(struct kvm_vcpu * vcpu)682 static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
683 {
684 if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
685 return NMI_VECTOR;
686 if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
687 return ExtINT_VECTOR;
688
689 return highest_bits((int *)&(VMX(vcpu, insvc[0])));
690 }
691
692 extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
693 struct ia64_fpreg *val);
694 extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
695 struct ia64_fpreg *val);
696 extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg);
697 extern void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg,
698 u64 val, int nat);
699 extern unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu);
700 extern void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val);
701 extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
702 extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
703 extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
704 u64 itir, u64 va, int type);
705 extern struct thash_data *vhpt_lookup(u64 va);
706 extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
707 extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
708 extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
709 extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
710 extern void thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
711 u64 itir, u64 ifa, int type);
712 extern void thash_purge_all(struct kvm_vcpu *v);
713 extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
714 u64 va, int is_data);
715 extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
716 u64 ps, int is_data);
717
718 extern void vcpu_increment_iip(struct kvm_vcpu *v);
719 extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
720 extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
721 extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
722 extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
723 extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
724 extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
725 extern void nested_dtlb(struct kvm_vcpu *vcpu);
726 extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
727 extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
728
729 extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
730 extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
731
732 extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
733 extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
734 extern void vmm_transition(struct kvm_vcpu *vcpu);
735 extern void vmm_trampoline(union context *from, union context *to);
736 extern int vmm_entry(void);
737 extern u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
738
739 extern void vmm_reset_entry(void);
740 void kvm_init_vtlb(struct kvm_vcpu *v);
741 void kvm_init_vhpt(struct kvm_vcpu *v);
742 void thash_init(struct thash_cb *hcb, u64 sz);
743
744 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...);
745 u64 kvm_gpa_to_mpa(u64 gpa);
746 extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
747 u64 arg4, u64 arg5, u64 arg6, u64 arg7);
748
749 extern long vmm_sanity;
750
751 #endif
752 #endif /* __VCPU_H__ */
753