1 /*
2  * kvm_vcpu.c: handling all virtual cpu related thing.
3  * Copyright (c) 2005, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  *  Shaofan Li (Susue Li) <susie.li@intel.com>
19  *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
20  *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
21  *  Xiantao Zhang <xiantao.zhang@intel.com>
22  */
23 
24 #include <linux/kvm_host.h>
25 #include <linux/types.h>
26 
27 #include <asm/processor.h>
28 #include <asm/ia64regs.h>
29 #include <asm/gcc_intrin.h>
30 #include <asm/kregs.h>
31 #include <asm/pgtable.h>
32 #include <asm/tlb.h>
33 
34 #include "asm-offsets.h"
35 #include "vcpu.h"
36 
37 /*
38  * Special notes:
39  * - Index by it/dt/rt sequence
40  * - Only existing mode transitions are allowed in this table
41  * - RSE is placed at lazy mode when emulating guest partial mode
42  * - If gva happens to be rr0 and rr4, only allowed case is identity
43  *   mapping (gva=gpa), or panic! (How?)
44  */
45 int mm_switch_table[8][8] = {
46 	/*  2004/09/12(Kevin): Allow switch to self */
47 	/*
48 	 *  (it,dt,rt): (0,0,0) -> (1,1,1)
49 	 *  This kind of transition usually occurs in the very early
50 	 *  stage of Linux boot up procedure. Another case is in efi
51 	 *  and pal calls. (see "arch/ia64/kernel/head.S")
52 	 *
53 	 *  (it,dt,rt): (0,0,0) -> (0,1,1)
54 	 *  This kind of transition is found when OSYa exits efi boot
55 	 *  service. Due to gva = gpa in this case (Same region),
56 	 *  data access can be satisfied though itlb entry for physical
57 	 *  emulation is hit.
58 	 */
59 	{SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
60 	{0,  0,  0,  0,  0,  0,  0,  0},
61 	{0,  0,  0,  0,  0,  0,  0,  0},
62 	/*
63 	 *  (it,dt,rt): (0,1,1) -> (1,1,1)
64 	 *  This kind of transition is found in OSYa.
65 	 *
66 	 *  (it,dt,rt): (0,1,1) -> (0,0,0)
67 	 *  This kind of transition is found in OSYa
68 	 */
69 	{SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
70 	/* (1,0,0)->(1,1,1) */
71 	{0,  0,  0,  0,  0,  0,  0,  SW_P2V},
72 	/*
73 	 *  (it,dt,rt): (1,0,1) -> (1,1,1)
74 	 *  This kind of transition usually occurs when Linux returns
75 	 *  from the low level TLB miss handlers.
76 	 *  (see "arch/ia64/kernel/ivt.S")
77 	 */
78 	{0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
79 	{0,  0,  0,  0,  0,  0,  0,  0},
80 	/*
81 	 *  (it,dt,rt): (1,1,1) -> (1,0,1)
82 	 *  This kind of transition usually occurs in Linux low level
83 	 *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
84 	 *
85 	 *  (it,dt,rt): (1,1,1) -> (0,0,0)
86 	 *  This kind of transition usually occurs in pal and efi calls,
87 	 *  which requires running in physical mode.
88 	 *  (see "arch/ia64/kernel/head.S")
89 	 *  (1,1,1)->(1,0,0)
90 	 */
91 
92 	{SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
93 };
94 
physical_mode_init(struct kvm_vcpu * vcpu)95 void physical_mode_init(struct kvm_vcpu  *vcpu)
96 {
97 	vcpu->arch.mode_flags = GUEST_IN_PHY;
98 }
99 
switch_to_physical_rid(struct kvm_vcpu * vcpu)100 void switch_to_physical_rid(struct kvm_vcpu *vcpu)
101 {
102 	unsigned long psr;
103 
104 	/* Save original virtual mode rr[0] and rr[4] */
105 	psr = ia64_clear_ic();
106 	ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
107 	ia64_srlz_d();
108 	ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
109 	ia64_srlz_d();
110 
111 	ia64_set_psr(psr);
112 	return;
113 }
114 
switch_to_virtual_rid(struct kvm_vcpu * vcpu)115 void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
116 {
117 	unsigned long psr;
118 
119 	psr = ia64_clear_ic();
120 	ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
121 	ia64_srlz_d();
122 	ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
123 	ia64_srlz_d();
124 	ia64_set_psr(psr);
125 	return;
126 }
127 
mm_switch_action(struct ia64_psr opsr,struct ia64_psr npsr)128 static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
129 {
130 	return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
131 }
132 
switch_mm_mode(struct kvm_vcpu * vcpu,struct ia64_psr old_psr,struct ia64_psr new_psr)133 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
134 					struct ia64_psr new_psr)
135 {
136 	int act;
137 	act = mm_switch_action(old_psr, new_psr);
138 	switch (act) {
139 	case SW_V2P:
140 		/*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
141 		old_psr.val, new_psr.val);*/
142 		switch_to_physical_rid(vcpu);
143 		/*
144 		 * Set rse to enforced lazy, to prevent active rse
145 		 *save/restor when guest physical mode.
146 		 */
147 		vcpu->arch.mode_flags |= GUEST_IN_PHY;
148 		break;
149 	case SW_P2V:
150 		switch_to_virtual_rid(vcpu);
151 		/*
152 		 * recover old mode which is saved when entering
153 		 * guest physical mode
154 		 */
155 		vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
156 		break;
157 	case SW_SELF:
158 		break;
159 	case SW_NOP:
160 		break;
161 	default:
162 		/* Sanity check */
163 		break;
164 	}
165 	return;
166 }
167 
168 /*
169  * In physical mode, insert tc/tr for region 0 and 4 uses
170  * RID[0] and RID[4] which is for physical mode emulation.
171  * However what those inserted tc/tr wants is rid for
172  * virtual mode. So original virtual rid needs to be restored
173  * before insert.
174  *
175  * Operations which required such switch include:
176  *  - insertions (itc.*, itr.*)
177  *  - purges (ptc.* and ptr.*)
178  *  - tpa
179  *  - tak
180  *  - thash?, ttag?
181  * All above needs actual virtual rid for destination entry.
182  */
183 
check_mm_mode_switch(struct kvm_vcpu * vcpu,struct ia64_psr old_psr,struct ia64_psr new_psr)184 void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
185 					struct ia64_psr new_psr)
186 {
187 
188 	if ((old_psr.dt != new_psr.dt)
189 			|| (old_psr.it != new_psr.it)
190 			|| (old_psr.rt != new_psr.rt))
191 		switch_mm_mode(vcpu, old_psr, new_psr);
192 
193 	return;
194 }
195 
196 
197 /*
198  * In physical mode, insert tc/tr for region 0 and 4 uses
199  * RID[0] and RID[4] which is for physical mode emulation.
200  * However what those inserted tc/tr wants is rid for
201  * virtual mode. So original virtual rid needs to be restored
202  * before insert.
203  *
204  * Operations which required such switch include:
205  *  - insertions (itc.*, itr.*)
206  *  - purges (ptc.* and ptr.*)
207  *  - tpa
208  *  - tak
209  *  - thash?, ttag?
210  * All above needs actual virtual rid for destination entry.
211  */
212 
prepare_if_physical_mode(struct kvm_vcpu * vcpu)213 void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
214 {
215 	if (is_physical_mode(vcpu)) {
216 		vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
217 		switch_to_virtual_rid(vcpu);
218 	}
219 	return;
220 }
221 
222 /* Recover always follows prepare */
recover_if_physical_mode(struct kvm_vcpu * vcpu)223 void recover_if_physical_mode(struct kvm_vcpu *vcpu)
224 {
225 	if (is_physical_mode(vcpu))
226 		switch_to_physical_rid(vcpu);
227 	vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
228 	return;
229 }
230 
231 #define RPT(x)	((u16) &((struct kvm_pt_regs *)0)->x)
232 
233 static u16 gr_info[32] = {
234 	0, 	/* r0 is read-only : WE SHOULD NEVER GET THIS */
235 	RPT(r1), RPT(r2), RPT(r3),
236 	RPT(r4), RPT(r5), RPT(r6), RPT(r7),
237 	RPT(r8), RPT(r9), RPT(r10), RPT(r11),
238 	RPT(r12), RPT(r13), RPT(r14), RPT(r15),
239 	RPT(r16), RPT(r17), RPT(r18), RPT(r19),
240 	RPT(r20), RPT(r21), RPT(r22), RPT(r23),
241 	RPT(r24), RPT(r25), RPT(r26), RPT(r27),
242 	RPT(r28), RPT(r29), RPT(r30), RPT(r31)
243 };
244 
245 #define IA64_FIRST_STACKED_GR   32
246 #define IA64_FIRST_ROTATING_FR  32
247 
248 static inline unsigned long
rotate_reg(unsigned long sor,unsigned long rrb,unsigned long reg)249 rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
250 {
251 	reg += rrb;
252 	if (reg >= sor)
253 		reg -= sor;
254 	return reg;
255 }
256 
257 /*
258  * Return the (rotated) index for floating point register
259  * be in the REGNUM (REGNUM must range from 32-127,
260  * result is in the range from 0-95.
261  */
fph_index(struct kvm_pt_regs * regs,long regnum)262 static inline unsigned long fph_index(struct kvm_pt_regs *regs,
263 						long regnum)
264 {
265 	unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
266 	return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
267 }
268 
269 /*
270  * The inverse of the above: given bspstore and the number of
271  * registers, calculate ar.bsp.
272  */
kvm_rse_skip_regs(unsigned long * addr,long num_regs)273 static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
274 							long num_regs)
275 {
276 	long delta = ia64_rse_slot_num(addr) + num_regs;
277 	int i = 0;
278 
279 	if (num_regs < 0)
280 		delta -= 0x3e;
281 	if (delta < 0) {
282 		while (delta <= -0x3f) {
283 			i--;
284 			delta += 0x3f;
285 		}
286 	} else {
287 		while (delta >= 0x3f) {
288 			i++;
289 			delta -= 0x3f;
290 		}
291 	}
292 
293 	return addr + num_regs + i;
294 }
295 
get_rse_reg(struct kvm_pt_regs * regs,unsigned long r1,unsigned long * val,int * nat)296 static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
297 					unsigned long *val, int *nat)
298 {
299 	unsigned long *bsp, *addr, *rnat_addr, *bspstore;
300 	unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
301 	unsigned long nat_mask;
302 	unsigned long old_rsc, new_rsc;
303 	long sof = (regs->cr_ifs) & 0x7f;
304 	long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
305 	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
306 	long ridx = r1 - 32;
307 
308 	if (ridx < sor)
309 		ridx = rotate_reg(sor, rrb_gr, ridx);
310 
311 	old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
312 	new_rsc = old_rsc&(~(0x3));
313 	ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
314 
315 	bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
316 	bsp = kbs + (regs->loadrs >> 19);
317 
318 	addr = kvm_rse_skip_regs(bsp, -sof + ridx);
319 	nat_mask = 1UL << ia64_rse_slot_num(addr);
320 	rnat_addr = ia64_rse_rnat_addr(addr);
321 
322 	if (addr >= bspstore) {
323 		ia64_flushrs();
324 		ia64_mf();
325 		bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
326 	}
327 	*val = *addr;
328 	if (nat) {
329 		if (bspstore < rnat_addr)
330 			*nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
331 							& nat_mask);
332 		else
333 			*nat = (int)!!((*rnat_addr) & nat_mask);
334 		ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
335 	}
336 }
337 
set_rse_reg(struct kvm_pt_regs * regs,unsigned long r1,unsigned long val,unsigned long nat)338 void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
339 				unsigned long val, unsigned long nat)
340 {
341 	unsigned long *bsp, *bspstore, *addr, *rnat_addr;
342 	unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
343 	unsigned long nat_mask;
344 	unsigned long old_rsc, new_rsc, psr;
345 	unsigned long rnat;
346 	long sof = (regs->cr_ifs) & 0x7f;
347 	long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
348 	long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
349 	long ridx = r1 - 32;
350 
351 	if (ridx < sor)
352 		ridx = rotate_reg(sor, rrb_gr, ridx);
353 
354 	old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
355 	/* put RSC to lazy mode, and set loadrs 0 */
356 	new_rsc = old_rsc & (~0x3fff0003);
357 	ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
358 	bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
359 
360 	addr = kvm_rse_skip_regs(bsp, -sof + ridx);
361 	nat_mask = 1UL << ia64_rse_slot_num(addr);
362 	rnat_addr = ia64_rse_rnat_addr(addr);
363 
364 	local_irq_save(psr);
365 	bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
366 	if (addr >= bspstore) {
367 
368 		ia64_flushrs();
369 		ia64_mf();
370 		*addr = val;
371 		bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
372 		rnat = ia64_getreg(_IA64_REG_AR_RNAT);
373 		if (bspstore < rnat_addr)
374 			rnat = rnat & (~nat_mask);
375 		else
376 			*rnat_addr = (*rnat_addr)&(~nat_mask);
377 
378 		ia64_mf();
379 		ia64_loadrs();
380 		ia64_setreg(_IA64_REG_AR_RNAT, rnat);
381 	} else {
382 		rnat = ia64_getreg(_IA64_REG_AR_RNAT);
383 		*addr = val;
384 		if (bspstore < rnat_addr)
385 			rnat = rnat&(~nat_mask);
386 		else
387 			*rnat_addr = (*rnat_addr) & (~nat_mask);
388 
389 		ia64_setreg(_IA64_REG_AR_BSPSTORE, (unsigned long)bspstore);
390 		ia64_setreg(_IA64_REG_AR_RNAT, rnat);
391 	}
392 	local_irq_restore(psr);
393 	ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
394 }
395 
getreg(unsigned long regnum,unsigned long * val,int * nat,struct kvm_pt_regs * regs)396 void getreg(unsigned long regnum, unsigned long *val,
397 				int *nat, struct kvm_pt_regs *regs)
398 {
399 	unsigned long addr, *unat;
400 	if (regnum >= IA64_FIRST_STACKED_GR) {
401 		get_rse_reg(regs, regnum, val, nat);
402 		return;
403 	}
404 
405 	/*
406 	 * Now look at registers in [0-31] range and init correct UNAT
407 	 */
408 	addr = (unsigned long)regs;
409 	unat = &regs->eml_unat;
410 
411 	addr += gr_info[regnum];
412 
413 	*val  = *(unsigned long *)addr;
414 	/*
415 	 * do it only when requested
416 	 */
417 	if (nat)
418 		*nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
419 }
420 
setreg(unsigned long regnum,unsigned long val,int nat,struct kvm_pt_regs * regs)421 void setreg(unsigned long regnum, unsigned long val,
422 			int nat, struct kvm_pt_regs *regs)
423 {
424 	unsigned long addr;
425 	unsigned long bitmask;
426 	unsigned long *unat;
427 
428 	/*
429 	 * First takes care of stacked registers
430 	 */
431 	if (regnum >= IA64_FIRST_STACKED_GR) {
432 		set_rse_reg(regs, regnum, val, nat);
433 		return;
434 	}
435 
436 	/*
437 	 * Now look at registers in [0-31] range and init correct UNAT
438 	 */
439 	addr = (unsigned long)regs;
440 	unat = &regs->eml_unat;
441 	/*
442 	 * add offset from base of struct
443 	 * and do it !
444 	 */
445 	addr += gr_info[regnum];
446 
447 	*(unsigned long *)addr = val;
448 
449 	/*
450 	 * We need to clear the corresponding UNAT bit to fully emulate the load
451 	 * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
452 	 */
453 	bitmask   = 1UL << ((addr >> 3) & 0x3f);
454 	if (nat)
455 		*unat |= bitmask;
456 	 else
457 		*unat &= ~bitmask;
458 
459 }
460 
vcpu_get_gr(struct kvm_vcpu * vcpu,unsigned long reg)461 u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
462 {
463 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
464 	unsigned long val;
465 
466 	if (!reg)
467 		return 0;
468 	getreg(reg, &val, 0, regs);
469 	return val;
470 }
471 
vcpu_set_gr(struct kvm_vcpu * vcpu,unsigned long reg,u64 value,int nat)472 void vcpu_set_gr(struct kvm_vcpu *vcpu, unsigned long reg, u64 value, int nat)
473 {
474 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
475 	long sof = (regs->cr_ifs) & 0x7f;
476 
477 	if (!reg)
478 		return;
479 	if (reg >= sof + 32)
480 		return;
481 	setreg(reg, value, nat, regs);	/* FIXME: handle NATs later*/
482 }
483 
getfpreg(unsigned long regnum,struct ia64_fpreg * fpval,struct kvm_pt_regs * regs)484 void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
485 				struct kvm_pt_regs *regs)
486 {
487 	/* Take floating register rotation into consideration*/
488 	if (regnum >= IA64_FIRST_ROTATING_FR)
489 		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
490 #define CASE_FIXED_FP(reg)			\
491 	case  (reg) :				\
492 		ia64_stf_spill(fpval, reg);	\
493 	break
494 
495 	switch (regnum) {
496 		CASE_FIXED_FP(0);
497 		CASE_FIXED_FP(1);
498 		CASE_FIXED_FP(2);
499 		CASE_FIXED_FP(3);
500 		CASE_FIXED_FP(4);
501 		CASE_FIXED_FP(5);
502 
503 		CASE_FIXED_FP(6);
504 		CASE_FIXED_FP(7);
505 		CASE_FIXED_FP(8);
506 		CASE_FIXED_FP(9);
507 		CASE_FIXED_FP(10);
508 		CASE_FIXED_FP(11);
509 
510 		CASE_FIXED_FP(12);
511 		CASE_FIXED_FP(13);
512 		CASE_FIXED_FP(14);
513 		CASE_FIXED_FP(15);
514 		CASE_FIXED_FP(16);
515 		CASE_FIXED_FP(17);
516 		CASE_FIXED_FP(18);
517 		CASE_FIXED_FP(19);
518 		CASE_FIXED_FP(20);
519 		CASE_FIXED_FP(21);
520 		CASE_FIXED_FP(22);
521 		CASE_FIXED_FP(23);
522 		CASE_FIXED_FP(24);
523 		CASE_FIXED_FP(25);
524 		CASE_FIXED_FP(26);
525 		CASE_FIXED_FP(27);
526 		CASE_FIXED_FP(28);
527 		CASE_FIXED_FP(29);
528 		CASE_FIXED_FP(30);
529 		CASE_FIXED_FP(31);
530 		CASE_FIXED_FP(32);
531 		CASE_FIXED_FP(33);
532 		CASE_FIXED_FP(34);
533 		CASE_FIXED_FP(35);
534 		CASE_FIXED_FP(36);
535 		CASE_FIXED_FP(37);
536 		CASE_FIXED_FP(38);
537 		CASE_FIXED_FP(39);
538 		CASE_FIXED_FP(40);
539 		CASE_FIXED_FP(41);
540 		CASE_FIXED_FP(42);
541 		CASE_FIXED_FP(43);
542 		CASE_FIXED_FP(44);
543 		CASE_FIXED_FP(45);
544 		CASE_FIXED_FP(46);
545 		CASE_FIXED_FP(47);
546 		CASE_FIXED_FP(48);
547 		CASE_FIXED_FP(49);
548 		CASE_FIXED_FP(50);
549 		CASE_FIXED_FP(51);
550 		CASE_FIXED_FP(52);
551 		CASE_FIXED_FP(53);
552 		CASE_FIXED_FP(54);
553 		CASE_FIXED_FP(55);
554 		CASE_FIXED_FP(56);
555 		CASE_FIXED_FP(57);
556 		CASE_FIXED_FP(58);
557 		CASE_FIXED_FP(59);
558 		CASE_FIXED_FP(60);
559 		CASE_FIXED_FP(61);
560 		CASE_FIXED_FP(62);
561 		CASE_FIXED_FP(63);
562 		CASE_FIXED_FP(64);
563 		CASE_FIXED_FP(65);
564 		CASE_FIXED_FP(66);
565 		CASE_FIXED_FP(67);
566 		CASE_FIXED_FP(68);
567 		CASE_FIXED_FP(69);
568 		CASE_FIXED_FP(70);
569 		CASE_FIXED_FP(71);
570 		CASE_FIXED_FP(72);
571 		CASE_FIXED_FP(73);
572 		CASE_FIXED_FP(74);
573 		CASE_FIXED_FP(75);
574 		CASE_FIXED_FP(76);
575 		CASE_FIXED_FP(77);
576 		CASE_FIXED_FP(78);
577 		CASE_FIXED_FP(79);
578 		CASE_FIXED_FP(80);
579 		CASE_FIXED_FP(81);
580 		CASE_FIXED_FP(82);
581 		CASE_FIXED_FP(83);
582 		CASE_FIXED_FP(84);
583 		CASE_FIXED_FP(85);
584 		CASE_FIXED_FP(86);
585 		CASE_FIXED_FP(87);
586 		CASE_FIXED_FP(88);
587 		CASE_FIXED_FP(89);
588 		CASE_FIXED_FP(90);
589 		CASE_FIXED_FP(91);
590 		CASE_FIXED_FP(92);
591 		CASE_FIXED_FP(93);
592 		CASE_FIXED_FP(94);
593 		CASE_FIXED_FP(95);
594 		CASE_FIXED_FP(96);
595 		CASE_FIXED_FP(97);
596 		CASE_FIXED_FP(98);
597 		CASE_FIXED_FP(99);
598 		CASE_FIXED_FP(100);
599 		CASE_FIXED_FP(101);
600 		CASE_FIXED_FP(102);
601 		CASE_FIXED_FP(103);
602 		CASE_FIXED_FP(104);
603 		CASE_FIXED_FP(105);
604 		CASE_FIXED_FP(106);
605 		CASE_FIXED_FP(107);
606 		CASE_FIXED_FP(108);
607 		CASE_FIXED_FP(109);
608 		CASE_FIXED_FP(110);
609 		CASE_FIXED_FP(111);
610 		CASE_FIXED_FP(112);
611 		CASE_FIXED_FP(113);
612 		CASE_FIXED_FP(114);
613 		CASE_FIXED_FP(115);
614 		CASE_FIXED_FP(116);
615 		CASE_FIXED_FP(117);
616 		CASE_FIXED_FP(118);
617 		CASE_FIXED_FP(119);
618 		CASE_FIXED_FP(120);
619 		CASE_FIXED_FP(121);
620 		CASE_FIXED_FP(122);
621 		CASE_FIXED_FP(123);
622 		CASE_FIXED_FP(124);
623 		CASE_FIXED_FP(125);
624 		CASE_FIXED_FP(126);
625 		CASE_FIXED_FP(127);
626 	}
627 #undef CASE_FIXED_FP
628 }
629 
setfpreg(unsigned long regnum,struct ia64_fpreg * fpval,struct kvm_pt_regs * regs)630 void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
631 					struct kvm_pt_regs *regs)
632 {
633 	/* Take floating register rotation into consideration*/
634 	if (regnum >= IA64_FIRST_ROTATING_FR)
635 		regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
636 
637 #define CASE_FIXED_FP(reg)			\
638 	case (reg) :				\
639 		ia64_ldf_fill(reg, fpval);	\
640 	break
641 
642 	switch (regnum) {
643 		CASE_FIXED_FP(2);
644 		CASE_FIXED_FP(3);
645 		CASE_FIXED_FP(4);
646 		CASE_FIXED_FP(5);
647 
648 		CASE_FIXED_FP(6);
649 		CASE_FIXED_FP(7);
650 		CASE_FIXED_FP(8);
651 		CASE_FIXED_FP(9);
652 		CASE_FIXED_FP(10);
653 		CASE_FIXED_FP(11);
654 
655 		CASE_FIXED_FP(12);
656 		CASE_FIXED_FP(13);
657 		CASE_FIXED_FP(14);
658 		CASE_FIXED_FP(15);
659 		CASE_FIXED_FP(16);
660 		CASE_FIXED_FP(17);
661 		CASE_FIXED_FP(18);
662 		CASE_FIXED_FP(19);
663 		CASE_FIXED_FP(20);
664 		CASE_FIXED_FP(21);
665 		CASE_FIXED_FP(22);
666 		CASE_FIXED_FP(23);
667 		CASE_FIXED_FP(24);
668 		CASE_FIXED_FP(25);
669 		CASE_FIXED_FP(26);
670 		CASE_FIXED_FP(27);
671 		CASE_FIXED_FP(28);
672 		CASE_FIXED_FP(29);
673 		CASE_FIXED_FP(30);
674 		CASE_FIXED_FP(31);
675 		CASE_FIXED_FP(32);
676 		CASE_FIXED_FP(33);
677 		CASE_FIXED_FP(34);
678 		CASE_FIXED_FP(35);
679 		CASE_FIXED_FP(36);
680 		CASE_FIXED_FP(37);
681 		CASE_FIXED_FP(38);
682 		CASE_FIXED_FP(39);
683 		CASE_FIXED_FP(40);
684 		CASE_FIXED_FP(41);
685 		CASE_FIXED_FP(42);
686 		CASE_FIXED_FP(43);
687 		CASE_FIXED_FP(44);
688 		CASE_FIXED_FP(45);
689 		CASE_FIXED_FP(46);
690 		CASE_FIXED_FP(47);
691 		CASE_FIXED_FP(48);
692 		CASE_FIXED_FP(49);
693 		CASE_FIXED_FP(50);
694 		CASE_FIXED_FP(51);
695 		CASE_FIXED_FP(52);
696 		CASE_FIXED_FP(53);
697 		CASE_FIXED_FP(54);
698 		CASE_FIXED_FP(55);
699 		CASE_FIXED_FP(56);
700 		CASE_FIXED_FP(57);
701 		CASE_FIXED_FP(58);
702 		CASE_FIXED_FP(59);
703 		CASE_FIXED_FP(60);
704 		CASE_FIXED_FP(61);
705 		CASE_FIXED_FP(62);
706 		CASE_FIXED_FP(63);
707 		CASE_FIXED_FP(64);
708 		CASE_FIXED_FP(65);
709 		CASE_FIXED_FP(66);
710 		CASE_FIXED_FP(67);
711 		CASE_FIXED_FP(68);
712 		CASE_FIXED_FP(69);
713 		CASE_FIXED_FP(70);
714 		CASE_FIXED_FP(71);
715 		CASE_FIXED_FP(72);
716 		CASE_FIXED_FP(73);
717 		CASE_FIXED_FP(74);
718 		CASE_FIXED_FP(75);
719 		CASE_FIXED_FP(76);
720 		CASE_FIXED_FP(77);
721 		CASE_FIXED_FP(78);
722 		CASE_FIXED_FP(79);
723 		CASE_FIXED_FP(80);
724 		CASE_FIXED_FP(81);
725 		CASE_FIXED_FP(82);
726 		CASE_FIXED_FP(83);
727 		CASE_FIXED_FP(84);
728 		CASE_FIXED_FP(85);
729 		CASE_FIXED_FP(86);
730 		CASE_FIXED_FP(87);
731 		CASE_FIXED_FP(88);
732 		CASE_FIXED_FP(89);
733 		CASE_FIXED_FP(90);
734 		CASE_FIXED_FP(91);
735 		CASE_FIXED_FP(92);
736 		CASE_FIXED_FP(93);
737 		CASE_FIXED_FP(94);
738 		CASE_FIXED_FP(95);
739 		CASE_FIXED_FP(96);
740 		CASE_FIXED_FP(97);
741 		CASE_FIXED_FP(98);
742 		CASE_FIXED_FP(99);
743 		CASE_FIXED_FP(100);
744 		CASE_FIXED_FP(101);
745 		CASE_FIXED_FP(102);
746 		CASE_FIXED_FP(103);
747 		CASE_FIXED_FP(104);
748 		CASE_FIXED_FP(105);
749 		CASE_FIXED_FP(106);
750 		CASE_FIXED_FP(107);
751 		CASE_FIXED_FP(108);
752 		CASE_FIXED_FP(109);
753 		CASE_FIXED_FP(110);
754 		CASE_FIXED_FP(111);
755 		CASE_FIXED_FP(112);
756 		CASE_FIXED_FP(113);
757 		CASE_FIXED_FP(114);
758 		CASE_FIXED_FP(115);
759 		CASE_FIXED_FP(116);
760 		CASE_FIXED_FP(117);
761 		CASE_FIXED_FP(118);
762 		CASE_FIXED_FP(119);
763 		CASE_FIXED_FP(120);
764 		CASE_FIXED_FP(121);
765 		CASE_FIXED_FP(122);
766 		CASE_FIXED_FP(123);
767 		CASE_FIXED_FP(124);
768 		CASE_FIXED_FP(125);
769 		CASE_FIXED_FP(126);
770 		CASE_FIXED_FP(127);
771 	}
772 }
773 
vcpu_get_fpreg(struct kvm_vcpu * vcpu,unsigned long reg,struct ia64_fpreg * val)774 void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
775 						struct ia64_fpreg *val)
776 {
777 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
778 
779 	getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
780 }
781 
vcpu_set_fpreg(struct kvm_vcpu * vcpu,unsigned long reg,struct ia64_fpreg * val)782 void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
783 						struct ia64_fpreg *val)
784 {
785 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
786 
787 	if (reg > 1)
788 		setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
789 }
790 
791 /*
792  * The Altix RTC is mapped specially here for the vmm module
793  */
794 #define SN_RTC_BASE	(u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
kvm_get_itc(struct kvm_vcpu * vcpu)795 static long kvm_get_itc(struct kvm_vcpu *vcpu)
796 {
797 #if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
798 	struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
799 
800 	if (kvm->arch.is_sn2)
801 		return (*SN_RTC_BASE);
802 	else
803 #endif
804 		return ia64_getreg(_IA64_REG_AR_ITC);
805 }
806 
807 /************************************************************************
808  * lsapic timer
809  ***********************************************************************/
vcpu_get_itc(struct kvm_vcpu * vcpu)810 u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
811 {
812 	unsigned long guest_itc;
813 	guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
814 
815 	if (guest_itc >= VMX(vcpu, last_itc)) {
816 		VMX(vcpu, last_itc) = guest_itc;
817 		return  guest_itc;
818 	} else
819 		return VMX(vcpu, last_itc);
820 }
821 
822 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
vcpu_set_itc(struct kvm_vcpu * vcpu,u64 val)823 static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
824 {
825 	struct kvm_vcpu *v;
826 	struct kvm *kvm;
827 	int i;
828 	long itc_offset = val - kvm_get_itc(vcpu);
829 	unsigned long vitv = VCPU(vcpu, itv);
830 
831 	kvm = (struct kvm *)KVM_VM_BASE;
832 
833 	if (kvm_vcpu_is_bsp(vcpu)) {
834 		for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) {
835 			v = (struct kvm_vcpu *)((char *)vcpu +
836 					sizeof(struct kvm_vcpu_data) * i);
837 			VMX(v, itc_offset) = itc_offset;
838 			VMX(v, last_itc) = 0;
839 		}
840 	}
841 	VMX(vcpu, last_itc) = 0;
842 	if (VCPU(vcpu, itm) <= val) {
843 		VMX(vcpu, itc_check) = 0;
844 		vcpu_unpend_interrupt(vcpu, vitv);
845 	} else {
846 		VMX(vcpu, itc_check) = 1;
847 		vcpu_set_itm(vcpu, VCPU(vcpu, itm));
848 	}
849 
850 }
851 
vcpu_get_itm(struct kvm_vcpu * vcpu)852 static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
853 {
854 	return ((u64)VCPU(vcpu, itm));
855 }
856 
vcpu_set_itm(struct kvm_vcpu * vcpu,u64 val)857 static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
858 {
859 	unsigned long vitv = VCPU(vcpu, itv);
860 	VCPU(vcpu, itm) = val;
861 
862 	if (val > vcpu_get_itc(vcpu)) {
863 		VMX(vcpu, itc_check) = 1;
864 		vcpu_unpend_interrupt(vcpu, vitv);
865 		VMX(vcpu, timer_pending) = 0;
866 	} else
867 		VMX(vcpu, itc_check) = 0;
868 }
869 
870 #define  ITV_VECTOR(itv)    (itv&0xff)
871 #define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
872 
vcpu_set_itv(struct kvm_vcpu * vcpu,u64 val)873 static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
874 {
875 	VCPU(vcpu, itv) = val;
876 	if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
877 		vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
878 		vcpu->arch.timer_pending = 0;
879 	}
880 }
881 
vcpu_set_eoi(struct kvm_vcpu * vcpu,u64 val)882 static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
883 {
884 	int vec;
885 
886 	vec = highest_inservice_irq(vcpu);
887 	if (vec == NULL_VECTOR)
888 		return;
889 	VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
890 	VCPU(vcpu, eoi) = 0;
891 	vcpu->arch.irq_new_pending = 1;
892 
893 }
894 
895 /* See Table 5-8 in SDM vol2 for the definition */
irq_masked(struct kvm_vcpu * vcpu,int h_pending,int h_inservice)896 int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
897 {
898 	union ia64_tpr vtpr;
899 
900 	vtpr.val = VCPU(vcpu, tpr);
901 
902 	if (h_inservice == NMI_VECTOR)
903 		return IRQ_MASKED_BY_INSVC;
904 
905 	if (h_pending == NMI_VECTOR) {
906 		/* Non Maskable Interrupt */
907 		return IRQ_NO_MASKED;
908 	}
909 
910 	if (h_inservice == ExtINT_VECTOR)
911 		return IRQ_MASKED_BY_INSVC;
912 
913 	if (h_pending == ExtINT_VECTOR) {
914 		if (vtpr.mmi) {
915 			/* mask all external IRQ */
916 			return IRQ_MASKED_BY_VTPR;
917 		} else
918 			return IRQ_NO_MASKED;
919 	}
920 
921 	if (is_higher_irq(h_pending, h_inservice)) {
922 		if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
923 			return IRQ_NO_MASKED;
924 		else
925 			return IRQ_MASKED_BY_VTPR;
926 	} else {
927 		return IRQ_MASKED_BY_INSVC;
928 	}
929 }
930 
vcpu_pend_interrupt(struct kvm_vcpu * vcpu,u8 vec)931 void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
932 {
933 	long spsr;
934 	int ret;
935 
936 	local_irq_save(spsr);
937 	ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
938 	local_irq_restore(spsr);
939 
940 	vcpu->arch.irq_new_pending = 1;
941 }
942 
vcpu_unpend_interrupt(struct kvm_vcpu * vcpu,u8 vec)943 void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
944 {
945 	long spsr;
946 	int ret;
947 
948 	local_irq_save(spsr);
949 	ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
950 	local_irq_restore(spsr);
951 	if (ret) {
952 		vcpu->arch.irq_new_pending = 1;
953 		wmb();
954 	}
955 }
956 
update_vhpi(struct kvm_vcpu * vcpu,int vec)957 void update_vhpi(struct kvm_vcpu *vcpu, int vec)
958 {
959 	u64 vhpi;
960 
961 	if (vec == NULL_VECTOR)
962 		vhpi = 0;
963 	else if (vec == NMI_VECTOR)
964 		vhpi = 32;
965 	else if (vec == ExtINT_VECTOR)
966 		vhpi = 16;
967 	else
968 		vhpi = vec >> 4;
969 
970 	VCPU(vcpu, vhpi) = vhpi;
971 	if (VCPU(vcpu, vac).a_int)
972 		ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
973 				(u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
974 }
975 
vcpu_get_ivr(struct kvm_vcpu * vcpu)976 u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
977 {
978 	int vec, h_inservice, mask;
979 
980 	vec = highest_pending_irq(vcpu);
981 	h_inservice = highest_inservice_irq(vcpu);
982 	mask = irq_masked(vcpu, vec, h_inservice);
983 	if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
984 		if (VCPU(vcpu, vhpi))
985 			update_vhpi(vcpu, NULL_VECTOR);
986 		return IA64_SPURIOUS_INT_VECTOR;
987 	}
988 	if (mask == IRQ_MASKED_BY_VTPR) {
989 		update_vhpi(vcpu, vec);
990 		return IA64_SPURIOUS_INT_VECTOR;
991 	}
992 	VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
993 	vcpu_unpend_interrupt(vcpu, vec);
994 	return  (u64)vec;
995 }
996 
997 /**************************************************************************
998   Privileged operation emulation routines
999  **************************************************************************/
vcpu_thash(struct kvm_vcpu * vcpu,u64 vadr)1000 u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
1001 {
1002 	union ia64_pta vpta;
1003 	union ia64_rr vrr;
1004 	u64 pval;
1005 	u64 vhpt_offset;
1006 
1007 	vpta.val = vcpu_get_pta(vcpu);
1008 	vrr.val = vcpu_get_rr(vcpu, vadr);
1009 	vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
1010 	if (vpta.vf) {
1011 		pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
1012 				vpta.val, 0, 0, 0, 0);
1013 	} else {
1014 		pval = (vadr & VRN_MASK) | vhpt_offset |
1015 			(vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
1016 	}
1017 	return  pval;
1018 }
1019 
vcpu_ttag(struct kvm_vcpu * vcpu,u64 vadr)1020 u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
1021 {
1022 	union ia64_rr vrr;
1023 	union ia64_pta vpta;
1024 	u64 pval;
1025 
1026 	vpta.val = vcpu_get_pta(vcpu);
1027 	vrr.val = vcpu_get_rr(vcpu, vadr);
1028 	if (vpta.vf) {
1029 		pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
1030 						0, 0, 0, 0, 0);
1031 	} else
1032 		pval = 1;
1033 
1034 	return  pval;
1035 }
1036 
vcpu_tak(struct kvm_vcpu * vcpu,u64 vadr)1037 u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
1038 {
1039 	struct thash_data *data;
1040 	union ia64_pta vpta;
1041 	u64 key;
1042 
1043 	vpta.val = vcpu_get_pta(vcpu);
1044 	if (vpta.vf == 0) {
1045 		key = 1;
1046 		return key;
1047 	}
1048 	data = vtlb_lookup(vcpu, vadr, D_TLB);
1049 	if (!data || !data->p)
1050 		key = 1;
1051 	else
1052 		key = data->key;
1053 
1054 	return key;
1055 }
1056 
kvm_thash(struct kvm_vcpu * vcpu,INST64 inst)1057 void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
1058 {
1059 	unsigned long thash, vadr;
1060 
1061 	vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1062 	thash = vcpu_thash(vcpu, vadr);
1063 	vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
1064 }
1065 
kvm_ttag(struct kvm_vcpu * vcpu,INST64 inst)1066 void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
1067 {
1068 	unsigned long tag, vadr;
1069 
1070 	vadr = vcpu_get_gr(vcpu, inst.M46.r3);
1071 	tag = vcpu_ttag(vcpu, vadr);
1072 	vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
1073 }
1074 
vcpu_tpa(struct kvm_vcpu * vcpu,u64 vadr,unsigned long * padr)1075 int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, unsigned long *padr)
1076 {
1077 	struct thash_data *data;
1078 	union ia64_isr visr, pt_isr;
1079 	struct kvm_pt_regs *regs;
1080 	struct ia64_psr vpsr;
1081 
1082 	regs = vcpu_regs(vcpu);
1083 	pt_isr.val = VMX(vcpu, cr_isr);
1084 	visr.val = 0;
1085 	visr.ei = pt_isr.ei;
1086 	visr.ir = pt_isr.ir;
1087 	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1088 	visr.na = 1;
1089 
1090 	data = vhpt_lookup(vadr);
1091 	if (data) {
1092 		if (data->p == 0) {
1093 			vcpu_set_isr(vcpu, visr.val);
1094 			data_page_not_present(vcpu, vadr);
1095 			return IA64_FAULT;
1096 		} else if (data->ma == VA_MATTR_NATPAGE) {
1097 			vcpu_set_isr(vcpu, visr.val);
1098 			dnat_page_consumption(vcpu, vadr);
1099 			return IA64_FAULT;
1100 		} else {
1101 			*padr = (data->gpaddr >> data->ps << data->ps) |
1102 				(vadr & (PSIZE(data->ps) - 1));
1103 			return IA64_NO_FAULT;
1104 		}
1105 	}
1106 
1107 	data = vtlb_lookup(vcpu, vadr, D_TLB);
1108 	if (data) {
1109 		if (data->p == 0) {
1110 			vcpu_set_isr(vcpu, visr.val);
1111 			data_page_not_present(vcpu, vadr);
1112 			return IA64_FAULT;
1113 		} else if (data->ma == VA_MATTR_NATPAGE) {
1114 			vcpu_set_isr(vcpu, visr.val);
1115 			dnat_page_consumption(vcpu, vadr);
1116 			return IA64_FAULT;
1117 		} else{
1118 			*padr = ((data->ppn >> (data->ps - 12)) << data->ps)
1119 				| (vadr & (PSIZE(data->ps) - 1));
1120 			return IA64_NO_FAULT;
1121 		}
1122 	}
1123 	if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
1124 		if (vpsr.ic) {
1125 			vcpu_set_isr(vcpu, visr.val);
1126 			alt_dtlb(vcpu, vadr);
1127 			return IA64_FAULT;
1128 		} else {
1129 			nested_dtlb(vcpu);
1130 			return IA64_FAULT;
1131 		}
1132 	} else {
1133 		if (vpsr.ic) {
1134 			vcpu_set_isr(vcpu, visr.val);
1135 			dvhpt_fault(vcpu, vadr);
1136 			return IA64_FAULT;
1137 		} else{
1138 			nested_dtlb(vcpu);
1139 			return IA64_FAULT;
1140 		}
1141 	}
1142 
1143 	return IA64_NO_FAULT;
1144 }
1145 
kvm_tpa(struct kvm_vcpu * vcpu,INST64 inst)1146 int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
1147 {
1148 	unsigned long r1, r3;
1149 
1150 	r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1151 
1152 	if (vcpu_tpa(vcpu, r3, &r1))
1153 		return IA64_FAULT;
1154 
1155 	vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1156 	return(IA64_NO_FAULT);
1157 }
1158 
kvm_tak(struct kvm_vcpu * vcpu,INST64 inst)1159 void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
1160 {
1161 	unsigned long r1, r3;
1162 
1163 	r3 = vcpu_get_gr(vcpu, inst.M46.r3);
1164 	r1 = vcpu_tak(vcpu, r3);
1165 	vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
1166 }
1167 
1168 /************************************
1169  * Insert/Purge translation register/cache
1170  ************************************/
vcpu_itc_i(struct kvm_vcpu * vcpu,u64 pte,u64 itir,u64 ifa)1171 void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1172 {
1173 	thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
1174 }
1175 
vcpu_itc_d(struct kvm_vcpu * vcpu,u64 pte,u64 itir,u64 ifa)1176 void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
1177 {
1178 	thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
1179 }
1180 
vcpu_itr_i(struct kvm_vcpu * vcpu,u64 slot,u64 pte,u64 itir,u64 ifa)1181 void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1182 {
1183 	u64 ps, va, rid;
1184 	struct thash_data *p_itr;
1185 
1186 	ps = itir_ps(itir);
1187 	va = PAGEALIGN(ifa, ps);
1188 	pte &= ~PAGE_FLAGS_RV_MASK;
1189 	rid = vcpu_get_rr(vcpu, ifa);
1190 	rid = rid & RR_RID_MASK;
1191 	p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
1192 	vcpu_set_tr(p_itr, pte, itir, va, rid);
1193 	vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
1194 }
1195 
1196 
vcpu_itr_d(struct kvm_vcpu * vcpu,u64 slot,u64 pte,u64 itir,u64 ifa)1197 void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
1198 {
1199 	u64 gpfn;
1200 	u64 ps, va, rid;
1201 	struct thash_data *p_dtr;
1202 
1203 	ps = itir_ps(itir);
1204 	va = PAGEALIGN(ifa, ps);
1205 	pte &= ~PAGE_FLAGS_RV_MASK;
1206 
1207 	if (ps != _PAGE_SIZE_16M)
1208 		thash_purge_entries(vcpu, va, ps);
1209 	gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
1210 	if (__gpfn_is_io(gpfn))
1211 		pte |= VTLB_PTE_IO;
1212 	rid = vcpu_get_rr(vcpu, va);
1213 	rid = rid & RR_RID_MASK;
1214 	p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
1215 	vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
1216 							pte, itir, va, rid);
1217 	vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
1218 }
1219 
vcpu_ptr_d(struct kvm_vcpu * vcpu,u64 ifa,u64 ps)1220 void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1221 {
1222 	int index;
1223 	u64 va;
1224 
1225 	va = PAGEALIGN(ifa, ps);
1226 	while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
1227 		vcpu->arch.dtrs[index].page_flags = 0;
1228 
1229 	thash_purge_entries(vcpu, va, ps);
1230 }
1231 
vcpu_ptr_i(struct kvm_vcpu * vcpu,u64 ifa,u64 ps)1232 void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
1233 {
1234 	int index;
1235 	u64 va;
1236 
1237 	va = PAGEALIGN(ifa, ps);
1238 	while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
1239 		vcpu->arch.itrs[index].page_flags = 0;
1240 
1241 	thash_purge_entries(vcpu, va, ps);
1242 }
1243 
vcpu_ptc_l(struct kvm_vcpu * vcpu,u64 va,u64 ps)1244 void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1245 {
1246 	va = PAGEALIGN(va, ps);
1247 	thash_purge_entries(vcpu, va, ps);
1248 }
1249 
vcpu_ptc_e(struct kvm_vcpu * vcpu,u64 va)1250 void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
1251 {
1252 	thash_purge_all(vcpu);
1253 }
1254 
vcpu_ptc_ga(struct kvm_vcpu * vcpu,u64 va,u64 ps)1255 void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1256 {
1257 	struct exit_ctl_data *p = &vcpu->arch.exit_data;
1258 	long psr;
1259 	local_irq_save(psr);
1260 	p->exit_reason = EXIT_REASON_PTC_G;
1261 
1262 	p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
1263 	p->u.ptc_g_data.vaddr = va;
1264 	p->u.ptc_g_data.ps = ps;
1265 	vmm_transition(vcpu);
1266 	/* Do Local Purge Here*/
1267 	vcpu_ptc_l(vcpu, va, ps);
1268 	local_irq_restore(psr);
1269 }
1270 
1271 
vcpu_ptc_g(struct kvm_vcpu * vcpu,u64 va,u64 ps)1272 void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
1273 {
1274 	vcpu_ptc_ga(vcpu, va, ps);
1275 }
1276 
kvm_ptc_e(struct kvm_vcpu * vcpu,INST64 inst)1277 void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
1278 {
1279 	unsigned long ifa;
1280 
1281 	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1282 	vcpu_ptc_e(vcpu, ifa);
1283 }
1284 
kvm_ptc_g(struct kvm_vcpu * vcpu,INST64 inst)1285 void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
1286 {
1287 	unsigned long ifa, itir;
1288 
1289 	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1290 	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1291 	vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
1292 }
1293 
kvm_ptc_ga(struct kvm_vcpu * vcpu,INST64 inst)1294 void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
1295 {
1296 	unsigned long ifa, itir;
1297 
1298 	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1299 	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1300 	vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
1301 }
1302 
kvm_ptc_l(struct kvm_vcpu * vcpu,INST64 inst)1303 void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
1304 {
1305 	unsigned long ifa, itir;
1306 
1307 	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1308 	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1309 	vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
1310 }
1311 
kvm_ptr_d(struct kvm_vcpu * vcpu,INST64 inst)1312 void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
1313 {
1314 	unsigned long ifa, itir;
1315 
1316 	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1317 	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1318 	vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
1319 }
1320 
kvm_ptr_i(struct kvm_vcpu * vcpu,INST64 inst)1321 void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
1322 {
1323 	unsigned long ifa, itir;
1324 
1325 	ifa = vcpu_get_gr(vcpu, inst.M45.r3);
1326 	itir = vcpu_get_gr(vcpu, inst.M45.r2);
1327 	vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
1328 }
1329 
kvm_itr_d(struct kvm_vcpu * vcpu,INST64 inst)1330 void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
1331 {
1332 	unsigned long itir, ifa, pte, slot;
1333 
1334 	slot = vcpu_get_gr(vcpu, inst.M45.r3);
1335 	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1336 	itir = vcpu_get_itir(vcpu);
1337 	ifa = vcpu_get_ifa(vcpu);
1338 	vcpu_itr_d(vcpu, slot, pte, itir, ifa);
1339 }
1340 
1341 
1342 
kvm_itr_i(struct kvm_vcpu * vcpu,INST64 inst)1343 void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
1344 {
1345 	unsigned long itir, ifa, pte, slot;
1346 
1347 	slot = vcpu_get_gr(vcpu, inst.M45.r3);
1348 	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1349 	itir = vcpu_get_itir(vcpu);
1350 	ifa = vcpu_get_ifa(vcpu);
1351 	vcpu_itr_i(vcpu, slot, pte, itir, ifa);
1352 }
1353 
kvm_itc_d(struct kvm_vcpu * vcpu,INST64 inst)1354 void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
1355 {
1356 	unsigned long itir, ifa, pte;
1357 
1358 	itir = vcpu_get_itir(vcpu);
1359 	ifa = vcpu_get_ifa(vcpu);
1360 	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1361 	vcpu_itc_d(vcpu, pte, itir, ifa);
1362 }
1363 
kvm_itc_i(struct kvm_vcpu * vcpu,INST64 inst)1364 void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
1365 {
1366 	unsigned long itir, ifa, pte;
1367 
1368 	itir = vcpu_get_itir(vcpu);
1369 	ifa = vcpu_get_ifa(vcpu);
1370 	pte = vcpu_get_gr(vcpu, inst.M45.r2);
1371 	vcpu_itc_i(vcpu, pte, itir, ifa);
1372 }
1373 
1374 /*************************************
1375  * Moves to semi-privileged registers
1376  *************************************/
1377 
kvm_mov_to_ar_imm(struct kvm_vcpu * vcpu,INST64 inst)1378 void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
1379 {
1380 	unsigned long imm;
1381 
1382 	if (inst.M30.s)
1383 		imm = -inst.M30.imm;
1384 	else
1385 		imm = inst.M30.imm;
1386 
1387 	vcpu_set_itc(vcpu, imm);
1388 }
1389 
kvm_mov_to_ar_reg(struct kvm_vcpu * vcpu,INST64 inst)1390 void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1391 {
1392 	unsigned long r2;
1393 
1394 	r2 = vcpu_get_gr(vcpu, inst.M29.r2);
1395 	vcpu_set_itc(vcpu, r2);
1396 }
1397 
kvm_mov_from_ar_reg(struct kvm_vcpu * vcpu,INST64 inst)1398 void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
1399 {
1400 	unsigned long r1;
1401 
1402 	r1 = vcpu_get_itc(vcpu);
1403 	vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
1404 }
1405 
1406 /**************************************************************************
1407   struct kvm_vcpu protection key register access routines
1408  **************************************************************************/
1409 
vcpu_get_pkr(struct kvm_vcpu * vcpu,unsigned long reg)1410 unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
1411 {
1412 	return ((unsigned long)ia64_get_pkr(reg));
1413 }
1414 
vcpu_set_pkr(struct kvm_vcpu * vcpu,unsigned long reg,unsigned long val)1415 void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
1416 {
1417 	ia64_set_pkr(reg, val);
1418 }
1419 
1420 /********************************
1421  * Moves to privileged registers
1422  ********************************/
vcpu_set_rr(struct kvm_vcpu * vcpu,unsigned long reg,unsigned long val)1423 unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
1424 					unsigned long val)
1425 {
1426 	union ia64_rr oldrr, newrr;
1427 	unsigned long rrval;
1428 	struct exit_ctl_data *p = &vcpu->arch.exit_data;
1429 	unsigned long psr;
1430 
1431 	oldrr.val = vcpu_get_rr(vcpu, reg);
1432 	newrr.val = val;
1433 	vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
1434 
1435 	switch ((unsigned long)(reg >> VRN_SHIFT)) {
1436 	case VRN6:
1437 		vcpu->arch.vmm_rr = vrrtomrr(val);
1438 		local_irq_save(psr);
1439 		p->exit_reason = EXIT_REASON_SWITCH_RR6;
1440 		vmm_transition(vcpu);
1441 		local_irq_restore(psr);
1442 		break;
1443 	case VRN4:
1444 		rrval = vrrtomrr(val);
1445 		vcpu->arch.metaphysical_saved_rr4 = rrval;
1446 		if (!is_physical_mode(vcpu))
1447 			ia64_set_rr(reg, rrval);
1448 		break;
1449 	case VRN0:
1450 		rrval = vrrtomrr(val);
1451 		vcpu->arch.metaphysical_saved_rr0 = rrval;
1452 		if (!is_physical_mode(vcpu))
1453 			ia64_set_rr(reg, rrval);
1454 		break;
1455 	default:
1456 		ia64_set_rr(reg, vrrtomrr(val));
1457 		break;
1458 	}
1459 
1460 	return (IA64_NO_FAULT);
1461 }
1462 
kvm_mov_to_rr(struct kvm_vcpu * vcpu,INST64 inst)1463 void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
1464 {
1465 	unsigned long r3, r2;
1466 
1467 	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1468 	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1469 	vcpu_set_rr(vcpu, r3, r2);
1470 }
1471 
kvm_mov_to_dbr(struct kvm_vcpu * vcpu,INST64 inst)1472 void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1473 {
1474 }
1475 
kvm_mov_to_ibr(struct kvm_vcpu * vcpu,INST64 inst)1476 void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1477 {
1478 }
1479 
kvm_mov_to_pmc(struct kvm_vcpu * vcpu,INST64 inst)1480 void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1481 {
1482 	unsigned long r3, r2;
1483 
1484 	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1485 	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1486 	vcpu_set_pmc(vcpu, r3, r2);
1487 }
1488 
kvm_mov_to_pmd(struct kvm_vcpu * vcpu,INST64 inst)1489 void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
1490 {
1491 	unsigned long r3, r2;
1492 
1493 	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1494 	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1495 	vcpu_set_pmd(vcpu, r3, r2);
1496 }
1497 
kvm_mov_to_pkr(struct kvm_vcpu * vcpu,INST64 inst)1498 void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1499 {
1500 	u64 r3, r2;
1501 
1502 	r3 = vcpu_get_gr(vcpu, inst.M42.r3);
1503 	r2 = vcpu_get_gr(vcpu, inst.M42.r2);
1504 	vcpu_set_pkr(vcpu, r3, r2);
1505 }
1506 
kvm_mov_from_rr(struct kvm_vcpu * vcpu,INST64 inst)1507 void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
1508 {
1509 	unsigned long r3, r1;
1510 
1511 	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1512 	r1 = vcpu_get_rr(vcpu, r3);
1513 	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1514 }
1515 
kvm_mov_from_pkr(struct kvm_vcpu * vcpu,INST64 inst)1516 void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
1517 {
1518 	unsigned long r3, r1;
1519 
1520 	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1521 	r1 = vcpu_get_pkr(vcpu, r3);
1522 	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1523 }
1524 
kvm_mov_from_dbr(struct kvm_vcpu * vcpu,INST64 inst)1525 void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
1526 {
1527 	unsigned long r3, r1;
1528 
1529 	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1530 	r1 = vcpu_get_dbr(vcpu, r3);
1531 	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1532 }
1533 
kvm_mov_from_ibr(struct kvm_vcpu * vcpu,INST64 inst)1534 void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
1535 {
1536 	unsigned long r3, r1;
1537 
1538 	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1539 	r1 = vcpu_get_ibr(vcpu, r3);
1540 	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1541 }
1542 
kvm_mov_from_pmc(struct kvm_vcpu * vcpu,INST64 inst)1543 void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
1544 {
1545 	unsigned long r3, r1;
1546 
1547 	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1548 	r1 = vcpu_get_pmc(vcpu, r3);
1549 	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1550 }
1551 
vcpu_get_cpuid(struct kvm_vcpu * vcpu,unsigned long reg)1552 unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
1553 {
1554 	/* FIXME: This could get called as a result of a rsvd-reg fault */
1555 	if (reg > (ia64_get_cpuid(3) & 0xff))
1556 		return 0;
1557 	else
1558 		return ia64_get_cpuid(reg);
1559 }
1560 
kvm_mov_from_cpuid(struct kvm_vcpu * vcpu,INST64 inst)1561 void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
1562 {
1563 	unsigned long r3, r1;
1564 
1565 	r3 = vcpu_get_gr(vcpu, inst.M43.r3);
1566 	r1 = vcpu_get_cpuid(vcpu, r3);
1567 	vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
1568 }
1569 
vcpu_set_tpr(struct kvm_vcpu * vcpu,unsigned long val)1570 void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
1571 {
1572 	VCPU(vcpu, tpr) = val;
1573 	vcpu->arch.irq_check = 1;
1574 }
1575 
kvm_mov_to_cr(struct kvm_vcpu * vcpu,INST64 inst)1576 unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
1577 {
1578 	unsigned long r2;
1579 
1580 	r2 = vcpu_get_gr(vcpu, inst.M32.r2);
1581 	VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
1582 
1583 	switch (inst.M32.cr3) {
1584 	case 0:
1585 		vcpu_set_dcr(vcpu, r2);
1586 		break;
1587 	case 1:
1588 		vcpu_set_itm(vcpu, r2);
1589 		break;
1590 	case 66:
1591 		vcpu_set_tpr(vcpu, r2);
1592 		break;
1593 	case 67:
1594 		vcpu_set_eoi(vcpu, r2);
1595 		break;
1596 	default:
1597 		break;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
kvm_mov_from_cr(struct kvm_vcpu * vcpu,INST64 inst)1603 unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
1604 {
1605 	unsigned long tgt = inst.M33.r1;
1606 	unsigned long val;
1607 
1608 	switch (inst.M33.cr3) {
1609 	case 65:
1610 		val = vcpu_get_ivr(vcpu);
1611 		vcpu_set_gr(vcpu, tgt, val, 0);
1612 		break;
1613 
1614 	case 67:
1615 		vcpu_set_gr(vcpu, tgt, 0L, 0);
1616 		break;
1617 	default:
1618 		val = VCPU(vcpu, vcr[inst.M33.cr3]);
1619 		vcpu_set_gr(vcpu, tgt, val, 0);
1620 		break;
1621 	}
1622 
1623 	return 0;
1624 }
1625 
vcpu_set_psr(struct kvm_vcpu * vcpu,unsigned long val)1626 void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1627 {
1628 
1629 	unsigned long mask;
1630 	struct kvm_pt_regs *regs;
1631 	struct ia64_psr old_psr, new_psr;
1632 
1633 	old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1634 
1635 	regs = vcpu_regs(vcpu);
1636 	/* We only support guest as:
1637 	 *  vpsr.pk = 0
1638 	 *  vpsr.is = 0
1639 	 * Otherwise panic
1640 	 */
1641 	if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1642 		panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
1643 				"& vpsr.is=0\n");
1644 
1645 	/*
1646 	 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
1647 	 * Since these bits will become 0, after success execution of each
1648 	 * instruction, we will change set them to mIA64_PSR
1649 	 */
1650 	VCPU(vcpu, vpsr) = val
1651 		& (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
1652 			IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
1653 
1654 	if (!old_psr.i && (val & IA64_PSR_I)) {
1655 		/* vpsr.i 0->1 */
1656 		vcpu->arch.irq_check = 1;
1657 	}
1658 	new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1659 
1660 	/*
1661 	 * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
1662 	 * , except for the following bits:
1663 	 *  ic/i/dt/si/rt/mc/it/bn/vm
1664 	 */
1665 	mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
1666 		IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
1667 		IA64_PSR_VM;
1668 
1669 	regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
1670 
1671 	check_mm_mode_switch(vcpu, old_psr, new_psr);
1672 
1673 	return ;
1674 }
1675 
vcpu_cover(struct kvm_vcpu * vcpu)1676 unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
1677 {
1678 	struct ia64_psr vpsr;
1679 
1680 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1681 	vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
1682 
1683 	if (!vpsr.ic)
1684 		VCPU(vcpu, ifs) = regs->cr_ifs;
1685 	regs->cr_ifs = IA64_IFS_V;
1686 	return (IA64_NO_FAULT);
1687 }
1688 
1689 
1690 
1691 /**************************************************************************
1692   VCPU banked general register access routines
1693  **************************************************************************/
1694 #define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)	\
1695 	do {     							\
1696 		__asm__ __volatile__ (					\
1697 				";;extr.u %0 = %3,%6,16;;\n"		\
1698 				"dep %1 = %0, %1, 0, 16;;\n"		\
1699 				"st8 [%4] = %1\n"			\
1700 				"extr.u %0 = %2, 16, 16;;\n"		\
1701 				"dep %3 = %0, %3, %6, 16;;\n"		\
1702 				"st8 [%5] = %3\n"			\
1703 				::"r"(i), "r"(*b1unat), "r"(*b0unat),	\
1704 				"r"(*runat), "r"(b1unat), "r"(runat),	\
1705 				"i"(VMM_PT_REGS_R16_SLOT) : "memory");	\
1706 	} while (0)
1707 
vcpu_bsw0(struct kvm_vcpu * vcpu)1708 void vcpu_bsw0(struct kvm_vcpu *vcpu)
1709 {
1710 	unsigned long i;
1711 
1712 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1713 	unsigned long *r = &regs->r16;
1714 	unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1715 	unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1716 	unsigned long *runat = &regs->eml_unat;
1717 	unsigned long *b0unat = &VCPU(vcpu, vbnat);
1718 	unsigned long *b1unat = &VCPU(vcpu, vnat);
1719 
1720 
1721 	if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
1722 		for (i = 0; i < 16; i++) {
1723 			*b1++ = *r;
1724 			*r++ = *b0++;
1725 		}
1726 		vcpu_bsw0_unat(i, b0unat, b1unat, runat,
1727 				VMM_PT_REGS_R16_SLOT);
1728 		VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
1729 	}
1730 }
1731 
1732 #define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT)	\
1733 	do {             						\
1734 		__asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"	\
1735 				"dep %1 = %0, %1, 16, 16;;\n"		\
1736 				"st8 [%4] = %1\n"			\
1737 				"extr.u %0 = %2, 0, 16;;\n"		\
1738 				"dep %3 = %0, %3, %6, 16;;\n"		\
1739 				"st8 [%5] = %3\n"			\
1740 				::"r"(i), "r"(*b0unat), "r"(*b1unat),	\
1741 				"r"(*runat), "r"(b0unat), "r"(runat),	\
1742 				"i"(VMM_PT_REGS_R16_SLOT) : "memory");	\
1743 	} while (0)
1744 
vcpu_bsw1(struct kvm_vcpu * vcpu)1745 void vcpu_bsw1(struct kvm_vcpu *vcpu)
1746 {
1747 	unsigned long i;
1748 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1749 	unsigned long *r = &regs->r16;
1750 	unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
1751 	unsigned long *b1 = &VCPU(vcpu, vgr[0]);
1752 	unsigned long *runat = &regs->eml_unat;
1753 	unsigned long *b0unat = &VCPU(vcpu, vbnat);
1754 	unsigned long *b1unat = &VCPU(vcpu, vnat);
1755 
1756 	if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
1757 		for (i = 0; i < 16; i++) {
1758 			*b0++ = *r;
1759 			*r++ = *b1++;
1760 		}
1761 		vcpu_bsw1_unat(i, b0unat, b1unat, runat,
1762 				VMM_PT_REGS_R16_SLOT);
1763 		VCPU(vcpu, vpsr) |= IA64_PSR_BN;
1764 	}
1765 }
1766 
vcpu_rfi(struct kvm_vcpu * vcpu)1767 void vcpu_rfi(struct kvm_vcpu *vcpu)
1768 {
1769 	unsigned long ifs, psr;
1770 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1771 
1772 	psr = VCPU(vcpu, ipsr);
1773 	if (psr & IA64_PSR_BN)
1774 		vcpu_bsw1(vcpu);
1775 	else
1776 		vcpu_bsw0(vcpu);
1777 	vcpu_set_psr(vcpu, psr);
1778 	ifs = VCPU(vcpu, ifs);
1779 	if (ifs >> 63)
1780 		regs->cr_ifs = ifs;
1781 	regs->cr_iip = VCPU(vcpu, iip);
1782 }
1783 
1784 /*
1785    VPSR can't keep track of below bits of guest PSR
1786    This function gets guest PSR
1787  */
1788 
vcpu_get_psr(struct kvm_vcpu * vcpu)1789 unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
1790 {
1791 	unsigned long mask;
1792 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1793 
1794 	mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
1795 		IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
1796 	return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
1797 }
1798 
kvm_rsm(struct kvm_vcpu * vcpu,INST64 inst)1799 void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
1800 {
1801 	unsigned long vpsr;
1802 	unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
1803 					| inst.M44.imm;
1804 
1805 	vpsr = vcpu_get_psr(vcpu);
1806 	vpsr &= (~imm24);
1807 	vcpu_set_psr(vcpu, vpsr);
1808 }
1809 
kvm_ssm(struct kvm_vcpu * vcpu,INST64 inst)1810 void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
1811 {
1812 	unsigned long vpsr;
1813 	unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
1814 				| inst.M44.imm;
1815 
1816 	vpsr = vcpu_get_psr(vcpu);
1817 	vpsr |= imm24;
1818 	vcpu_set_psr(vcpu, vpsr);
1819 }
1820 
1821 /* Generate Mask
1822  * Parameter:
1823  *  bit -- starting bit
1824  *  len -- how many bits
1825  */
1826 #define MASK(bit,len)				   	\
1827 ({							\
1828 		__u64	ret;				\
1829 							\
1830 		__asm __volatile("dep %0=-1, r0, %1, %2"\
1831 				: "=r" (ret):		\
1832 		  "M" (bit),				\
1833 		  "M" (len));				\
1834 		ret;					\
1835 })
1836 
vcpu_set_psr_l(struct kvm_vcpu * vcpu,unsigned long val)1837 void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
1838 {
1839 	val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
1840 	vcpu_set_psr(vcpu, val);
1841 }
1842 
kvm_mov_to_psr(struct kvm_vcpu * vcpu,INST64 inst)1843 void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
1844 {
1845 	unsigned long val;
1846 
1847 	val = vcpu_get_gr(vcpu, inst.M35.r2);
1848 	vcpu_set_psr_l(vcpu, val);
1849 }
1850 
kvm_mov_from_psr(struct kvm_vcpu * vcpu,INST64 inst)1851 void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
1852 {
1853 	unsigned long val;
1854 
1855 	val = vcpu_get_psr(vcpu);
1856 	val = (val & MASK(0, 32)) | (val & MASK(35, 2));
1857 	vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
1858 }
1859 
vcpu_increment_iip(struct kvm_vcpu * vcpu)1860 void vcpu_increment_iip(struct kvm_vcpu *vcpu)
1861 {
1862 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1863 	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1864 	if (ipsr->ri == 2) {
1865 		ipsr->ri = 0;
1866 		regs->cr_iip += 16;
1867 	} else
1868 		ipsr->ri++;
1869 }
1870 
vcpu_decrement_iip(struct kvm_vcpu * vcpu)1871 void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
1872 {
1873 	struct kvm_pt_regs *regs = vcpu_regs(vcpu);
1874 	struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
1875 
1876 	if (ipsr->ri == 0) {
1877 		ipsr->ri = 2;
1878 		regs->cr_iip -= 16;
1879 	} else
1880 		ipsr->ri--;
1881 }
1882 
1883 /** Emulate a privileged operation.
1884  *
1885  *
1886  * @param vcpu virtual cpu
1887  * @cause the reason cause virtualization fault
1888  * @opcode the instruction code which cause virtualization fault
1889  */
1890 
kvm_emulate(struct kvm_vcpu * vcpu,struct kvm_pt_regs * regs)1891 void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
1892 {
1893 	unsigned long status, cause, opcode ;
1894 	INST64 inst;
1895 
1896 	status = IA64_NO_FAULT;
1897 	cause = VMX(vcpu, cause);
1898 	opcode = VMX(vcpu, opcode);
1899 	inst.inst = opcode;
1900 	/*
1901 	 * Switch to actual virtual rid in rr0 and rr4,
1902 	 * which is required by some tlb related instructions.
1903 	 */
1904 	prepare_if_physical_mode(vcpu);
1905 
1906 	switch (cause) {
1907 	case EVENT_RSM:
1908 		kvm_rsm(vcpu, inst);
1909 		break;
1910 	case EVENT_SSM:
1911 		kvm_ssm(vcpu, inst);
1912 		break;
1913 	case EVENT_MOV_TO_PSR:
1914 		kvm_mov_to_psr(vcpu, inst);
1915 		break;
1916 	case EVENT_MOV_FROM_PSR:
1917 		kvm_mov_from_psr(vcpu, inst);
1918 		break;
1919 	case EVENT_MOV_FROM_CR:
1920 		kvm_mov_from_cr(vcpu, inst);
1921 		break;
1922 	case EVENT_MOV_TO_CR:
1923 		kvm_mov_to_cr(vcpu, inst);
1924 		break;
1925 	case EVENT_BSW_0:
1926 		vcpu_bsw0(vcpu);
1927 		break;
1928 	case EVENT_BSW_1:
1929 		vcpu_bsw1(vcpu);
1930 		break;
1931 	case EVENT_COVER:
1932 		vcpu_cover(vcpu);
1933 		break;
1934 	case EVENT_RFI:
1935 		vcpu_rfi(vcpu);
1936 		break;
1937 	case EVENT_ITR_D:
1938 		kvm_itr_d(vcpu, inst);
1939 		break;
1940 	case EVENT_ITR_I:
1941 		kvm_itr_i(vcpu, inst);
1942 		break;
1943 	case EVENT_PTR_D:
1944 		kvm_ptr_d(vcpu, inst);
1945 		break;
1946 	case EVENT_PTR_I:
1947 		kvm_ptr_i(vcpu, inst);
1948 		break;
1949 	case EVENT_ITC_D:
1950 		kvm_itc_d(vcpu, inst);
1951 		break;
1952 	case EVENT_ITC_I:
1953 		kvm_itc_i(vcpu, inst);
1954 		break;
1955 	case EVENT_PTC_L:
1956 		kvm_ptc_l(vcpu, inst);
1957 		break;
1958 	case EVENT_PTC_G:
1959 		kvm_ptc_g(vcpu, inst);
1960 		break;
1961 	case EVENT_PTC_GA:
1962 		kvm_ptc_ga(vcpu, inst);
1963 		break;
1964 	case EVENT_PTC_E:
1965 		kvm_ptc_e(vcpu, inst);
1966 		break;
1967 	case EVENT_MOV_TO_RR:
1968 		kvm_mov_to_rr(vcpu, inst);
1969 		break;
1970 	case EVENT_MOV_FROM_RR:
1971 		kvm_mov_from_rr(vcpu, inst);
1972 		break;
1973 	case EVENT_THASH:
1974 		kvm_thash(vcpu, inst);
1975 		break;
1976 	case EVENT_TTAG:
1977 		kvm_ttag(vcpu, inst);
1978 		break;
1979 	case EVENT_TPA:
1980 		status = kvm_tpa(vcpu, inst);
1981 		break;
1982 	case EVENT_TAK:
1983 		kvm_tak(vcpu, inst);
1984 		break;
1985 	case EVENT_MOV_TO_AR_IMM:
1986 		kvm_mov_to_ar_imm(vcpu, inst);
1987 		break;
1988 	case EVENT_MOV_TO_AR:
1989 		kvm_mov_to_ar_reg(vcpu, inst);
1990 		break;
1991 	case EVENT_MOV_FROM_AR:
1992 		kvm_mov_from_ar_reg(vcpu, inst);
1993 		break;
1994 	case EVENT_MOV_TO_DBR:
1995 		kvm_mov_to_dbr(vcpu, inst);
1996 		break;
1997 	case EVENT_MOV_TO_IBR:
1998 		kvm_mov_to_ibr(vcpu, inst);
1999 		break;
2000 	case EVENT_MOV_TO_PMC:
2001 		kvm_mov_to_pmc(vcpu, inst);
2002 		break;
2003 	case EVENT_MOV_TO_PMD:
2004 		kvm_mov_to_pmd(vcpu, inst);
2005 		break;
2006 	case EVENT_MOV_TO_PKR:
2007 		kvm_mov_to_pkr(vcpu, inst);
2008 		break;
2009 	case EVENT_MOV_FROM_DBR:
2010 		kvm_mov_from_dbr(vcpu, inst);
2011 		break;
2012 	case EVENT_MOV_FROM_IBR:
2013 		kvm_mov_from_ibr(vcpu, inst);
2014 		break;
2015 	case EVENT_MOV_FROM_PMC:
2016 		kvm_mov_from_pmc(vcpu, inst);
2017 		break;
2018 	case EVENT_MOV_FROM_PKR:
2019 		kvm_mov_from_pkr(vcpu, inst);
2020 		break;
2021 	case EVENT_MOV_FROM_CPUID:
2022 		kvm_mov_from_cpuid(vcpu, inst);
2023 		break;
2024 	case EVENT_VMSW:
2025 		status = IA64_FAULT;
2026 		break;
2027 	default:
2028 		break;
2029 	};
2030 	/*Assume all status is NO_FAULT ?*/
2031 	if (status == IA64_NO_FAULT && cause != EVENT_RFI)
2032 		vcpu_increment_iip(vcpu);
2033 
2034 	recover_if_physical_mode(vcpu);
2035 }
2036 
init_vcpu(struct kvm_vcpu * vcpu)2037 void init_vcpu(struct kvm_vcpu *vcpu)
2038 {
2039 	int i;
2040 
2041 	vcpu->arch.mode_flags = GUEST_IN_PHY;
2042 	VMX(vcpu, vrr[0]) = 0x38;
2043 	VMX(vcpu, vrr[1]) = 0x38;
2044 	VMX(vcpu, vrr[2]) = 0x38;
2045 	VMX(vcpu, vrr[3]) = 0x38;
2046 	VMX(vcpu, vrr[4]) = 0x38;
2047 	VMX(vcpu, vrr[5]) = 0x38;
2048 	VMX(vcpu, vrr[6]) = 0x38;
2049 	VMX(vcpu, vrr[7]) = 0x38;
2050 	VCPU(vcpu, vpsr) = IA64_PSR_BN;
2051 	VCPU(vcpu, dcr) = 0;
2052 	/* pta.size must not be 0.  The minimum is 15 (32k) */
2053 	VCPU(vcpu, pta) = 15 << 2;
2054 	VCPU(vcpu, itv) = 0x10000;
2055 	VCPU(vcpu, itm) = 0;
2056 	VMX(vcpu, last_itc) = 0;
2057 
2058 	VCPU(vcpu, lid) = VCPU_LID(vcpu);
2059 	VCPU(vcpu, ivr) = 0;
2060 	VCPU(vcpu, tpr) = 0x10000;
2061 	VCPU(vcpu, eoi) = 0;
2062 	VCPU(vcpu, irr[0]) = 0;
2063 	VCPU(vcpu, irr[1]) = 0;
2064 	VCPU(vcpu, irr[2]) = 0;
2065 	VCPU(vcpu, irr[3]) = 0;
2066 	VCPU(vcpu, pmv) = 0x10000;
2067 	VCPU(vcpu, cmcv) = 0x10000;
2068 	VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
2069 	VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
2070 	update_vhpi(vcpu, NULL_VECTOR);
2071 	VLSAPIC_XTP(vcpu) = 0x80;	/* disabled */
2072 
2073 	for (i = 0; i < 4; i++)
2074 		VLSAPIC_INSVC(vcpu, i) = 0;
2075 }
2076 
kvm_init_all_rr(struct kvm_vcpu * vcpu)2077 void kvm_init_all_rr(struct kvm_vcpu *vcpu)
2078 {
2079 	unsigned long psr;
2080 
2081 	local_irq_save(psr);
2082 
2083 	/* WARNING: not allow co-exist of both virtual mode and physical
2084 	 * mode in same region
2085 	 */
2086 
2087 	vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
2088 	vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
2089 
2090 	if (is_physical_mode(vcpu)) {
2091 		if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
2092 			panic_vm(vcpu, "Machine Status conflicts!\n");
2093 
2094 		ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
2095 		ia64_dv_serialize_data();
2096 		ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
2097 		ia64_dv_serialize_data();
2098 	} else {
2099 		ia64_set_rr((VRN0 << VRN_SHIFT),
2100 				vcpu->arch.metaphysical_saved_rr0);
2101 		ia64_dv_serialize_data();
2102 		ia64_set_rr((VRN4 << VRN_SHIFT),
2103 				vcpu->arch.metaphysical_saved_rr4);
2104 		ia64_dv_serialize_data();
2105 	}
2106 	ia64_set_rr((VRN1 << VRN_SHIFT),
2107 			vrrtomrr(VMX(vcpu, vrr[VRN1])));
2108 	ia64_dv_serialize_data();
2109 	ia64_set_rr((VRN2 << VRN_SHIFT),
2110 			vrrtomrr(VMX(vcpu, vrr[VRN2])));
2111 	ia64_dv_serialize_data();
2112 	ia64_set_rr((VRN3 << VRN_SHIFT),
2113 			vrrtomrr(VMX(vcpu, vrr[VRN3])));
2114 	ia64_dv_serialize_data();
2115 	ia64_set_rr((VRN5 << VRN_SHIFT),
2116 			vrrtomrr(VMX(vcpu, vrr[VRN5])));
2117 	ia64_dv_serialize_data();
2118 	ia64_set_rr((VRN7 << VRN_SHIFT),
2119 			vrrtomrr(VMX(vcpu, vrr[VRN7])));
2120 	ia64_dv_serialize_data();
2121 	ia64_srlz_d();
2122 	ia64_set_psr(psr);
2123 }
2124 
vmm_entry(void)2125 int vmm_entry(void)
2126 {
2127 	struct kvm_vcpu *v;
2128 	v = current_vcpu;
2129 
2130 	ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
2131 						0, 0, 0, 0, 0, 0);
2132 	kvm_init_vtlb(v);
2133 	kvm_init_vhpt(v);
2134 	init_vcpu(v);
2135 	kvm_init_all_rr(v);
2136 	vmm_reset_entry();
2137 
2138 	return 0;
2139 }
2140 
kvm_show_registers(struct kvm_pt_regs * regs)2141 static void kvm_show_registers(struct kvm_pt_regs *regs)
2142 {
2143 	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
2144 
2145 	struct kvm_vcpu *vcpu = current_vcpu;
2146 	if (vcpu != NULL)
2147 		printk("vcpu 0x%p vcpu %d\n",
2148 		       vcpu, vcpu->vcpu_id);
2149 
2150 	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]\n",
2151 	       regs->cr_ipsr, regs->cr_ifs, ip);
2152 
2153 	printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
2154 	       regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
2155 	printk("rnat: %016lx bspstore: %016lx pr  : %016lx\n",
2156 	       regs->ar_rnat, regs->ar_bspstore, regs->pr);
2157 	printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
2158 	       regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
2159 	printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
2160 	printk("b0  : %016lx b6  : %016lx b7  : %016lx\n", regs->b0,
2161 							regs->b6, regs->b7);
2162 	printk("f6  : %05lx%016lx f7  : %05lx%016lx\n",
2163 	       regs->f6.u.bits[1], regs->f6.u.bits[0],
2164 	       regs->f7.u.bits[1], regs->f7.u.bits[0]);
2165 	printk("f8  : %05lx%016lx f9  : %05lx%016lx\n",
2166 	       regs->f8.u.bits[1], regs->f8.u.bits[0],
2167 	       regs->f9.u.bits[1], regs->f9.u.bits[0]);
2168 	printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
2169 	       regs->f10.u.bits[1], regs->f10.u.bits[0],
2170 	       regs->f11.u.bits[1], regs->f11.u.bits[0]);
2171 
2172 	printk("r1  : %016lx r2  : %016lx r3  : %016lx\n", regs->r1,
2173 							regs->r2, regs->r3);
2174 	printk("r8  : %016lx r9  : %016lx r10 : %016lx\n", regs->r8,
2175 							regs->r9, regs->r10);
2176 	printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11,
2177 							regs->r12, regs->r13);
2178 	printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14,
2179 							regs->r15, regs->r16);
2180 	printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17,
2181 							regs->r18, regs->r19);
2182 	printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20,
2183 							regs->r21, regs->r22);
2184 	printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23,
2185 							regs->r24, regs->r25);
2186 	printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26,
2187 							regs->r27, regs->r28);
2188 	printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29,
2189 							regs->r30, regs->r31);
2190 
2191 }
2192 
panic_vm(struct kvm_vcpu * v,const char * fmt,...)2193 void panic_vm(struct kvm_vcpu *v, const char *fmt, ...)
2194 {
2195 	va_list args;
2196 	char buf[256];
2197 
2198 	struct kvm_pt_regs *regs = vcpu_regs(v);
2199 	struct exit_ctl_data *p = &v->arch.exit_data;
2200 	va_start(args, fmt);
2201 	vsnprintf(buf, sizeof(buf), fmt, args);
2202 	va_end(args);
2203 	printk(buf);
2204 	kvm_show_registers(regs);
2205 	p->exit_reason = EXIT_REASON_VM_PANIC;
2206 	vmm_transition(v);
2207 	/*Never to return*/
2208 	while (1);
2209 }
2210