1 /*
2 * kvm_host.h: used for kvm module, and hold ia64-specific sections.
3 *
4 * Copyright (C) 2007, Intel Corporation.
5 *
6 * Xiantao Zhang <xiantao.zhang@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
19 * Place - Suite 330, Boston, MA 02111-1307 USA.
20 *
21 */
22
23 #ifndef __ASM_KVM_HOST_H
24 #define __ASM_KVM_HOST_H
25
26 #define KVM_MEMORY_SLOTS 32
27 /* memory slots that does not exposed to userspace */
28 #define KVM_PRIVATE_MEM_SLOTS 4
29
30 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
31
32 /* define exit reasons from vmm to kvm*/
33 #define EXIT_REASON_VM_PANIC 0
34 #define EXIT_REASON_MMIO_INSTRUCTION 1
35 #define EXIT_REASON_PAL_CALL 2
36 #define EXIT_REASON_SAL_CALL 3
37 #define EXIT_REASON_SWITCH_RR6 4
38 #define EXIT_REASON_VM_DESTROY 5
39 #define EXIT_REASON_EXTERNAL_INTERRUPT 6
40 #define EXIT_REASON_IPI 7
41 #define EXIT_REASON_PTC_G 8
42 #define EXIT_REASON_DEBUG 20
43
44 /*Define vmm address space and vm data space.*/
45 #define KVM_VMM_SIZE (__IA64_UL_CONST(16)<<20)
46 #define KVM_VMM_SHIFT 24
47 #define KVM_VMM_BASE 0xD000000000000000
48 #define VMM_SIZE (__IA64_UL_CONST(8)<<20)
49
50 /*
51 * Define vm_buffer, used by PAL Services, base address.
52 * Note: vm_buffer is in the VMM-BLOCK, the size must be < 8M
53 */
54 #define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
55 #define KVM_VM_BUFFER_SIZE (__IA64_UL_CONST(8)<<20)
56
57 /*
58 * kvm guest's data area looks as follow:
59 *
60 * +----------------------+ ------- KVM_VM_DATA_SIZE
61 * | vcpu[n]'s data | | ___________________KVM_STK_OFFSET
62 * | | | / |
63 * | .......... | | /vcpu's struct&stack |
64 * | .......... | | /---------------------|---- 0
65 * | vcpu[5]'s data | | / vpd |
66 * | vcpu[4]'s data | |/-----------------------|
67 * | vcpu[3]'s data | / vtlb |
68 * | vcpu[2]'s data | /|------------------------|
69 * | vcpu[1]'s data |/ | vhpt |
70 * | vcpu[0]'s data |____________________________|
71 * +----------------------+ |
72 * | memory dirty log | |
73 * +----------------------+ |
74 * | vm's data struct | |
75 * +----------------------+ |
76 * | | |
77 * | | |
78 * | | |
79 * | | |
80 * | | |
81 * | | |
82 * | | |
83 * | vm's p2m table | |
84 * | | |
85 * | | |
86 * | | | |
87 * vm's data->| | | |
88 * +----------------------+ ------- 0
89 * To support large memory, needs to increase the size of p2m.
90 * To support more vcpus, needs to ensure it has enough space to
91 * hold vcpus' data.
92 */
93
94 #define KVM_VM_DATA_SHIFT 26
95 #define KVM_VM_DATA_SIZE (__IA64_UL_CONST(1) << KVM_VM_DATA_SHIFT)
96 #define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VM_DATA_SIZE)
97
98 #define KVM_P2M_BASE KVM_VM_DATA_BASE
99 #define KVM_P2M_SIZE (__IA64_UL_CONST(24) << 20)
100
101 #define VHPT_SHIFT 16
102 #define VHPT_SIZE (__IA64_UL_CONST(1) << VHPT_SHIFT)
103 #define VHPT_NUM_ENTRIES (__IA64_UL_CONST(1) << (VHPT_SHIFT-5))
104
105 #define VTLB_SHIFT 16
106 #define VTLB_SIZE (__IA64_UL_CONST(1) << VTLB_SHIFT)
107 #define VTLB_NUM_ENTRIES (1UL << (VHPT_SHIFT-5))
108
109 #define VPD_SHIFT 16
110 #define VPD_SIZE (__IA64_UL_CONST(1) << VPD_SHIFT)
111
112 #define VCPU_STRUCT_SHIFT 16
113 #define VCPU_STRUCT_SIZE (__IA64_UL_CONST(1) << VCPU_STRUCT_SHIFT)
114
115 /*
116 * This must match KVM_IA64_VCPU_STACK_{SHIFT,SIZE} arch/ia64/include/asm/kvm.h
117 */
118 #define KVM_STK_SHIFT 16
119 #define KVM_STK_OFFSET (__IA64_UL_CONST(1)<< KVM_STK_SHIFT)
120
121 #define KVM_VM_STRUCT_SHIFT 19
122 #define KVM_VM_STRUCT_SIZE (__IA64_UL_CONST(1) << KVM_VM_STRUCT_SHIFT)
123
124 #define KVM_MEM_DIRY_LOG_SHIFT 19
125 #define KVM_MEM_DIRTY_LOG_SIZE (__IA64_UL_CONST(1) << KVM_MEM_DIRY_LOG_SHIFT)
126
127 #ifndef __ASSEMBLY__
128
129 /*Define the max vcpus and memory for Guests.*/
130 #define KVM_MAX_VCPUS (KVM_VM_DATA_SIZE - KVM_P2M_SIZE - KVM_VM_STRUCT_SIZE -\
131 KVM_MEM_DIRTY_LOG_SIZE) / sizeof(struct kvm_vcpu_data)
132 #define KVM_MAX_MEM_SIZE (KVM_P2M_SIZE >> 3 << PAGE_SHIFT)
133
134 #define VMM_LOG_LEN 256
135
136 #include <linux/types.h>
137 #include <linux/mm.h>
138 #include <linux/kvm.h>
139 #include <linux/kvm_para.h>
140 #include <linux/kvm_types.h>
141
142 #include <asm/pal.h>
143 #include <asm/sal.h>
144 #include <asm/page.h>
145
146 struct kvm_vcpu_data {
147 char vcpu_vhpt[VHPT_SIZE];
148 char vcpu_vtlb[VTLB_SIZE];
149 char vcpu_vpd[VPD_SIZE];
150 char vcpu_struct[VCPU_STRUCT_SIZE];
151 };
152
153 struct kvm_vm_data {
154 char kvm_p2m[KVM_P2M_SIZE];
155 char kvm_vm_struct[KVM_VM_STRUCT_SIZE];
156 char kvm_mem_dirty_log[KVM_MEM_DIRTY_LOG_SIZE];
157 struct kvm_vcpu_data vcpu_data[KVM_MAX_VCPUS];
158 };
159
160 #define VCPU_BASE(n) (KVM_VM_DATA_BASE + \
161 offsetof(struct kvm_vm_data, vcpu_data[n]))
162 #define KVM_VM_BASE (KVM_VM_DATA_BASE + \
163 offsetof(struct kvm_vm_data, kvm_vm_struct))
164 #define KVM_MEM_DIRTY_LOG_BASE KVM_VM_DATA_BASE + \
165 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)
166
167 #define VHPT_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vhpt))
168 #define VTLB_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vtlb))
169 #define VPD_BASE(n) (VCPU_BASE(n) + offsetof(struct kvm_vcpu_data, vcpu_vpd))
170 #define VCPU_STRUCT_BASE(n) (VCPU_BASE(n) + \
171 offsetof(struct kvm_vcpu_data, vcpu_struct))
172
173 /*IO section definitions*/
174 #define IOREQ_READ 1
175 #define IOREQ_WRITE 0
176
177 #define STATE_IOREQ_NONE 0
178 #define STATE_IOREQ_READY 1
179 #define STATE_IOREQ_INPROCESS 2
180 #define STATE_IORESP_READY 3
181
182 /*Guest Physical address layout.*/
183 #define GPFN_MEM (0UL << 60) /* Guest pfn is normal mem */
184 #define GPFN_FRAME_BUFFER (1UL << 60) /* VGA framebuffer */
185 #define GPFN_LOW_MMIO (2UL << 60) /* Low MMIO range */
186 #define GPFN_PIB (3UL << 60) /* PIB base */
187 #define GPFN_IOSAPIC (4UL << 60) /* IOSAPIC base */
188 #define GPFN_LEGACY_IO (5UL << 60) /* Legacy I/O base */
189 #define GPFN_GFW (6UL << 60) /* Guest Firmware */
190 #define GPFN_PHYS_MMIO (7UL << 60) /* Directed MMIO Range */
191
192 #define GPFN_IO_MASK (7UL << 60) /* Guest pfn is I/O type */
193 #define GPFN_INV_MASK (1UL << 63) /* Guest pfn is invalid */
194 #define INVALID_MFN (~0UL)
195 #define MEM_G (1UL << 30)
196 #define MEM_M (1UL << 20)
197 #define MMIO_START (3 * MEM_G)
198 #define MMIO_SIZE (512 * MEM_M)
199 #define VGA_IO_START 0xA0000UL
200 #define VGA_IO_SIZE 0x20000
201 #define LEGACY_IO_START (MMIO_START + MMIO_SIZE)
202 #define LEGACY_IO_SIZE (64 * MEM_M)
203 #define IO_SAPIC_START 0xfec00000UL
204 #define IO_SAPIC_SIZE 0x100000
205 #define PIB_START 0xfee00000UL
206 #define PIB_SIZE 0x200000
207 #define GFW_START (4 * MEM_G - 16 * MEM_M)
208 #define GFW_SIZE (16 * MEM_M)
209
210 /*Deliver mode, defined for ioapic.c*/
211 #define dest_Fixed IOSAPIC_FIXED
212 #define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
213
214 #define NMI_VECTOR 2
215 #define ExtINT_VECTOR 0
216 #define NULL_VECTOR (-1)
217 #define IA64_SPURIOUS_INT_VECTOR 0x0f
218
219 #define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
220
221 /*
222 *Delivery mode
223 */
224 #define SAPIC_DELIV_SHIFT 8
225 #define SAPIC_FIXED 0x0
226 #define SAPIC_LOWEST_PRIORITY 0x1
227 #define SAPIC_PMI 0x2
228 #define SAPIC_NMI 0x4
229 #define SAPIC_INIT 0x5
230 #define SAPIC_EXTINT 0x7
231
232 /*
233 * vcpu->requests bit members for arch
234 */
235 #define KVM_REQ_PTC_G 32
236 #define KVM_REQ_RESUME 33
237
238 #define KVM_HPAGE_GFN_SHIFT(x) 0
239 #define KVM_NR_PAGE_SIZES 1
240 #define KVM_PAGES_PER_HPAGE(x) 1
241
242 struct kvm;
243 struct kvm_vcpu;
244
245 struct kvm_mmio_req {
246 uint64_t addr; /* physical address */
247 uint64_t size; /* size in bytes */
248 uint64_t data; /* data (or paddr of data) */
249 uint8_t state:4;
250 uint8_t dir:1; /* 1=read, 0=write */
251 };
252
253 /*Pal data struct */
254 struct kvm_pal_call{
255 /*In area*/
256 uint64_t gr28;
257 uint64_t gr29;
258 uint64_t gr30;
259 uint64_t gr31;
260 /*Out area*/
261 struct ia64_pal_retval ret;
262 };
263
264 /* Sal data structure */
265 struct kvm_sal_call{
266 /*In area*/
267 uint64_t in0;
268 uint64_t in1;
269 uint64_t in2;
270 uint64_t in3;
271 uint64_t in4;
272 uint64_t in5;
273 uint64_t in6;
274 uint64_t in7;
275 struct sal_ret_values ret;
276 };
277
278 /*Guest change rr6*/
279 struct kvm_switch_rr6 {
280 uint64_t old_rr;
281 uint64_t new_rr;
282 };
283
284 union ia64_ipi_a{
285 unsigned long val;
286 struct {
287 unsigned long rv : 3;
288 unsigned long ir : 1;
289 unsigned long eid : 8;
290 unsigned long id : 8;
291 unsigned long ib_base : 44;
292 };
293 };
294
295 union ia64_ipi_d {
296 unsigned long val;
297 struct {
298 unsigned long vector : 8;
299 unsigned long dm : 3;
300 unsigned long ig : 53;
301 };
302 };
303
304 /*ipi check exit data*/
305 struct kvm_ipi_data{
306 union ia64_ipi_a addr;
307 union ia64_ipi_d data;
308 };
309
310 /*global purge data*/
311 struct kvm_ptc_g {
312 unsigned long vaddr;
313 unsigned long rr;
314 unsigned long ps;
315 struct kvm_vcpu *vcpu;
316 };
317
318 /*Exit control data */
319 struct exit_ctl_data{
320 uint32_t exit_reason;
321 uint32_t vm_status;
322 union {
323 struct kvm_mmio_req ioreq;
324 struct kvm_pal_call pal_data;
325 struct kvm_sal_call sal_data;
326 struct kvm_switch_rr6 rr_data;
327 struct kvm_ipi_data ipi_data;
328 struct kvm_ptc_g ptc_g_data;
329 } u;
330 };
331
332 union pte_flags {
333 unsigned long val;
334 struct {
335 unsigned long p : 1; /*0 */
336 unsigned long : 1; /* 1 */
337 unsigned long ma : 3; /* 2-4 */
338 unsigned long a : 1; /* 5 */
339 unsigned long d : 1; /* 6 */
340 unsigned long pl : 2; /* 7-8 */
341 unsigned long ar : 3; /* 9-11 */
342 unsigned long ppn : 38; /* 12-49 */
343 unsigned long : 2; /* 50-51 */
344 unsigned long ed : 1; /* 52 */
345 };
346 };
347
348 union ia64_pta {
349 unsigned long val;
350 struct {
351 unsigned long ve : 1;
352 unsigned long reserved0 : 1;
353 unsigned long size : 6;
354 unsigned long vf : 1;
355 unsigned long reserved1 : 6;
356 unsigned long base : 49;
357 };
358 };
359
360 struct thash_cb {
361 /* THASH base information */
362 struct thash_data *hash; /* hash table pointer */
363 union ia64_pta pta;
364 int num;
365 };
366
367 struct kvm_vcpu_stat {
368 };
369
370 struct kvm_vcpu_arch {
371 int launched;
372 int last_exit;
373 int last_run_cpu;
374 int vmm_tr_slot;
375 int vm_tr_slot;
376 int sn_rtc_tr_slot;
377
378 #define KVM_MP_STATE_RUNNABLE 0
379 #define KVM_MP_STATE_UNINITIALIZED 1
380 #define KVM_MP_STATE_INIT_RECEIVED 2
381 #define KVM_MP_STATE_HALTED 3
382 int mp_state;
383
384 #define MAX_PTC_G_NUM 3
385 int ptc_g_count;
386 struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
387
388 /*halt timer to wake up sleepy vcpus*/
389 struct hrtimer hlt_timer;
390 long ht_active;
391
392 struct kvm_lapic *apic; /* kernel irqchip context */
393 struct vpd *vpd;
394
395 /* Exit data for vmm_transition*/
396 struct exit_ctl_data exit_data;
397
398 cpumask_t cache_coherent_map;
399
400 unsigned long vmm_rr;
401 unsigned long host_rr6;
402 unsigned long psbits[8];
403 unsigned long cr_iipa;
404 unsigned long cr_isr;
405 unsigned long vsa_base;
406 unsigned long dirty_log_lock_pa;
407 unsigned long __gp;
408 /* TR and TC. */
409 struct thash_data itrs[NITRS];
410 struct thash_data dtrs[NDTRS];
411 /* Bit is set if there is a tr/tc for the region. */
412 unsigned char itr_regions;
413 unsigned char dtr_regions;
414 unsigned char tc_regions;
415 /* purge all */
416 unsigned long ptce_base;
417 unsigned long ptce_count[2];
418 unsigned long ptce_stride[2];
419 /* itc/itm */
420 unsigned long last_itc;
421 long itc_offset;
422 unsigned long itc_check;
423 unsigned long timer_check;
424 unsigned int timer_pending;
425 unsigned int timer_fired;
426
427 unsigned long vrr[8];
428 unsigned long ibr[8];
429 unsigned long dbr[8];
430 unsigned long insvc[4]; /* Interrupt in service. */
431 unsigned long xtp;
432
433 unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
434 unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
435 unsigned long metaphysical_saved_rr0; /* from kvm_arch */
436 unsigned long metaphysical_saved_rr4; /* from kvm_arch */
437 unsigned long fp_psr; /*used for lazy float register */
438 unsigned long saved_gp;
439 /*for phycial emulation */
440 int mode_flags;
441 struct thash_cb vtlb;
442 struct thash_cb vhpt;
443 char irq_check;
444 char irq_new_pending;
445
446 unsigned long opcode;
447 unsigned long cause;
448 char log_buf[VMM_LOG_LEN];
449 union context host;
450 union context guest;
451 };
452
453 struct kvm_vm_stat {
454 u64 remote_tlb_flush;
455 };
456
457 struct kvm_sal_data {
458 unsigned long boot_ip;
459 unsigned long boot_gp;
460 };
461
462 struct kvm_arch_memory_slot {
463 };
464
465 struct kvm_arch {
466 spinlock_t dirty_log_lock;
467
468 unsigned long vm_base;
469 unsigned long metaphysical_rr0;
470 unsigned long metaphysical_rr4;
471 unsigned long vmm_init_rr;
472
473 int is_sn2;
474
475 struct kvm_ioapic *vioapic;
476 struct kvm_vm_stat stat;
477 struct kvm_sal_data rdv_sal_data;
478
479 struct list_head assigned_dev_head;
480 struct iommu_domain *iommu_domain;
481 int iommu_flags;
482
483 unsigned long irq_sources_bitmap;
484 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
485 };
486
487 union cpuid3_t {
488 u64 value;
489 struct {
490 u64 number : 8;
491 u64 revision : 8;
492 u64 model : 8;
493 u64 family : 8;
494 u64 archrev : 8;
495 u64 rv : 24;
496 };
497 };
498
499 struct kvm_pt_regs {
500 /* The following registers are saved by SAVE_MIN: */
501 unsigned long b6; /* scratch */
502 unsigned long b7; /* scratch */
503
504 unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
505 unsigned long ar_ssd; /* reserved for future use (scratch) */
506
507 unsigned long r8; /* scratch (return value register 0) */
508 unsigned long r9; /* scratch (return value register 1) */
509 unsigned long r10; /* scratch (return value register 2) */
510 unsigned long r11; /* scratch (return value register 3) */
511
512 unsigned long cr_ipsr; /* interrupted task's psr */
513 unsigned long cr_iip; /* interrupted task's instruction pointer */
514 unsigned long cr_ifs; /* interrupted task's function state */
515
516 unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
517 unsigned long ar_pfs; /* prev function state */
518 unsigned long ar_rsc; /* RSE configuration */
519 /* The following two are valid only if cr_ipsr.cpl > 0: */
520 unsigned long ar_rnat; /* RSE NaT */
521 unsigned long ar_bspstore; /* RSE bspstore */
522
523 unsigned long pr; /* 64 predicate registers (1 bit each) */
524 unsigned long b0; /* return pointer (bp) */
525 unsigned long loadrs; /* size of dirty partition << 16 */
526
527 unsigned long r1; /* the gp pointer */
528 unsigned long r12; /* interrupted task's memory stack pointer */
529 unsigned long r13; /* thread pointer */
530
531 unsigned long ar_fpsr; /* floating point status (preserved) */
532 unsigned long r15; /* scratch */
533
534 /* The remaining registers are NOT saved for system calls. */
535 unsigned long r14; /* scratch */
536 unsigned long r2; /* scratch */
537 unsigned long r3; /* scratch */
538 unsigned long r16; /* scratch */
539 unsigned long r17; /* scratch */
540 unsigned long r18; /* scratch */
541 unsigned long r19; /* scratch */
542 unsigned long r20; /* scratch */
543 unsigned long r21; /* scratch */
544 unsigned long r22; /* scratch */
545 unsigned long r23; /* scratch */
546 unsigned long r24; /* scratch */
547 unsigned long r25; /* scratch */
548 unsigned long r26; /* scratch */
549 unsigned long r27; /* scratch */
550 unsigned long r28; /* scratch */
551 unsigned long r29; /* scratch */
552 unsigned long r30; /* scratch */
553 unsigned long r31; /* scratch */
554 unsigned long ar_ccv; /* compare/exchange value (scratch) */
555
556 /*
557 * Floating point registers that the kernel considers scratch:
558 */
559 struct ia64_fpreg f6; /* scratch */
560 struct ia64_fpreg f7; /* scratch */
561 struct ia64_fpreg f8; /* scratch */
562 struct ia64_fpreg f9; /* scratch */
563 struct ia64_fpreg f10; /* scratch */
564 struct ia64_fpreg f11; /* scratch */
565
566 unsigned long r4; /* preserved */
567 unsigned long r5; /* preserved */
568 unsigned long r6; /* preserved */
569 unsigned long r7; /* preserved */
570 unsigned long eml_unat; /* used for emulating instruction */
571 unsigned long pad0; /* alignment pad */
572 };
573
vcpu_regs(struct kvm_vcpu * v)574 static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
575 {
576 return (struct kvm_pt_regs *) ((unsigned long) v + KVM_STK_OFFSET) - 1;
577 }
578
579 typedef int kvm_vmm_entry(void);
580 typedef void kvm_tramp_entry(union context *host, union context *guest);
581
582 struct kvm_vmm_info{
583 struct module *module;
584 kvm_vmm_entry *vmm_entry;
585 kvm_tramp_entry *tramp_entry;
586 unsigned long vmm_ivt;
587 unsigned long patch_mov_ar;
588 unsigned long patch_mov_ar_sn2;
589 };
590
591 int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
592 int kvm_emulate_halt(struct kvm_vcpu *vcpu);
593 int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
594 void kvm_sal_emul(struct kvm_vcpu *vcpu);
595
596 #define __KVM_HAVE_ARCH_VM_ALLOC 1
597 struct kvm *kvm_arch_alloc_vm(void);
598 void kvm_arch_free_vm(struct kvm *kvm);
599
600 #endif /* __ASSEMBLY__*/
601
602 #endif
603