1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  */
20 
21 #include <linux/jiffies.h>
22 #include <linux/hrtimer.h>
23 #include <linux/types.h>
24 #include <linux/string.h>
25 #include <linux/kvm_host.h>
26 
27 #include <asm/reg.h>
28 #include <asm/time.h>
29 #include <asm/byteorder.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/disassemble.h>
32 #include "timing.h"
33 #include "trace.h"
34 
35 #define OP_TRAP 3
36 #define OP_TRAP_64 2
37 
38 #define OP_31_XOP_LWZX      23
39 #define OP_31_XOP_DCBF      86
40 #define OP_31_XOP_LBZX      87
41 #define OP_31_XOP_STWX      151
42 #define OP_31_XOP_STBX      215
43 #define OP_31_XOP_LBZUX     119
44 #define OP_31_XOP_STBUX     247
45 #define OP_31_XOP_LHZX      279
46 #define OP_31_XOP_LHZUX     311
47 #define OP_31_XOP_MFSPR     339
48 #define OP_31_XOP_LHAX      343
49 #define OP_31_XOP_STHX      407
50 #define OP_31_XOP_STHUX     439
51 #define OP_31_XOP_MTSPR     467
52 #define OP_31_XOP_DCBI      470
53 #define OP_31_XOP_LWBRX     534
54 #define OP_31_XOP_TLBSYNC   566
55 #define OP_31_XOP_STWBRX    662
56 #define OP_31_XOP_LHBRX     790
57 #define OP_31_XOP_STHBRX    918
58 
59 #define OP_LWZ  32
60 #define OP_LWZU 33
61 #define OP_LBZ  34
62 #define OP_LBZU 35
63 #define OP_STW  36
64 #define OP_STWU 37
65 #define OP_STB  38
66 #define OP_STBU 39
67 #define OP_LHZ  40
68 #define OP_LHZU 41
69 #define OP_LHA  42
70 #define OP_LHAU 43
71 #define OP_STH  44
72 #define OP_STHU 45
73 
kvmppc_emulate_dec(struct kvm_vcpu * vcpu)74 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
75 {
76 	unsigned long dec_nsec;
77 	unsigned long long dec_time;
78 
79 	pr_debug("mtDEC: %x\n", vcpu->arch.dec);
80 	hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
81 
82 #ifdef CONFIG_PPC_BOOK3S
83 	/* mtdec lowers the interrupt line when positive. */
84 	kvmppc_core_dequeue_dec(vcpu);
85 
86 	/* POWER4+ triggers a dec interrupt if the value is < 0 */
87 	if (vcpu->arch.dec & 0x80000000) {
88 		kvmppc_core_queue_dec(vcpu);
89 		return;
90 	}
91 #endif
92 
93 #ifdef CONFIG_BOOKE
94 	/* On BOOKE, DEC = 0 is as good as decrementer not enabled */
95 	if (vcpu->arch.dec == 0)
96 		return;
97 #endif
98 
99 	/*
100 	 * The decrementer ticks at the same rate as the timebase, so
101 	 * that's how we convert the guest DEC value to the number of
102 	 * host ticks.
103 	 */
104 
105 	dec_time = vcpu->arch.dec;
106 	dec_time *= 1000;
107 	do_div(dec_time, tb_ticks_per_usec);
108 	dec_nsec = do_div(dec_time, NSEC_PER_SEC);
109 	hrtimer_start(&vcpu->arch.dec_timer,
110 		ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
111 	vcpu->arch.dec_jiffies = get_tb();
112 }
113 
kvmppc_get_dec(struct kvm_vcpu * vcpu,u64 tb)114 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
115 {
116 	u64 jd = tb - vcpu->arch.dec_jiffies;
117 
118 #ifdef CONFIG_BOOKE
119 	if (vcpu->arch.dec < jd)
120 		return 0;
121 #endif
122 
123 	return vcpu->arch.dec - jd;
124 }
125 
126 /* XXX to do:
127  * lhax
128  * lhaux
129  * lswx
130  * lswi
131  * stswx
132  * stswi
133  * lha
134  * lhau
135  * lmw
136  * stmw
137  *
138  * XXX is_bigendian should depend on MMU mapping or MSR[LE]
139  */
140 /* XXX Should probably auto-generate instruction decoding for a particular core
141  * from opcode tables in the future. */
kvmppc_emulate_instruction(struct kvm_run * run,struct kvm_vcpu * vcpu)142 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
143 {
144 	u32 inst = kvmppc_get_last_inst(vcpu);
145 	u32 ea;
146 	int ra;
147 	int rb;
148 	int rs;
149 	int rt;
150 	int sprn;
151 	enum emulation_result emulated = EMULATE_DONE;
152 	int advance = 1;
153 
154 	/* this default type might be overwritten by subcategories */
155 	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
156 
157 	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
158 
159 	switch (get_op(inst)) {
160 	case OP_TRAP:
161 #ifdef CONFIG_PPC_BOOK3S
162 	case OP_TRAP_64:
163 		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
164 #else
165 		kvmppc_core_queue_program(vcpu,
166 					  vcpu->arch.shared->esr | ESR_PTR);
167 #endif
168 		advance = 0;
169 		break;
170 
171 	case 31:
172 		switch (get_xop(inst)) {
173 
174 		case OP_31_XOP_LWZX:
175 			rt = get_rt(inst);
176 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
177 			break;
178 
179 		case OP_31_XOP_LBZX:
180 			rt = get_rt(inst);
181 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
182 			break;
183 
184 		case OP_31_XOP_LBZUX:
185 			rt = get_rt(inst);
186 			ra = get_ra(inst);
187 			rb = get_rb(inst);
188 
189 			ea = kvmppc_get_gpr(vcpu, rb);
190 			if (ra)
191 				ea += kvmppc_get_gpr(vcpu, ra);
192 
193 			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
194 			kvmppc_set_gpr(vcpu, ra, ea);
195 			break;
196 
197 		case OP_31_XOP_STWX:
198 			rs = get_rs(inst);
199 			emulated = kvmppc_handle_store(run, vcpu,
200 						       kvmppc_get_gpr(vcpu, rs),
201 			                               4, 1);
202 			break;
203 
204 		case OP_31_XOP_STBX:
205 			rs = get_rs(inst);
206 			emulated = kvmppc_handle_store(run, vcpu,
207 						       kvmppc_get_gpr(vcpu, rs),
208 			                               1, 1);
209 			break;
210 
211 		case OP_31_XOP_STBUX:
212 			rs = get_rs(inst);
213 			ra = get_ra(inst);
214 			rb = get_rb(inst);
215 
216 			ea = kvmppc_get_gpr(vcpu, rb);
217 			if (ra)
218 				ea += kvmppc_get_gpr(vcpu, ra);
219 
220 			emulated = kvmppc_handle_store(run, vcpu,
221 						       kvmppc_get_gpr(vcpu, rs),
222 			                               1, 1);
223 			kvmppc_set_gpr(vcpu, rs, ea);
224 			break;
225 
226 		case OP_31_XOP_LHAX:
227 			rt = get_rt(inst);
228 			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
229 			break;
230 
231 		case OP_31_XOP_LHZX:
232 			rt = get_rt(inst);
233 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
234 			break;
235 
236 		case OP_31_XOP_LHZUX:
237 			rt = get_rt(inst);
238 			ra = get_ra(inst);
239 			rb = get_rb(inst);
240 
241 			ea = kvmppc_get_gpr(vcpu, rb);
242 			if (ra)
243 				ea += kvmppc_get_gpr(vcpu, ra);
244 
245 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
246 			kvmppc_set_gpr(vcpu, ra, ea);
247 			break;
248 
249 		case OP_31_XOP_MFSPR:
250 			sprn = get_sprn(inst);
251 			rt = get_rt(inst);
252 
253 			switch (sprn) {
254 			case SPRN_SRR0:
255 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0);
256 				break;
257 			case SPRN_SRR1:
258 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1);
259 				break;
260 			case SPRN_PVR:
261 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
262 			case SPRN_PIR:
263 				kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
264 			case SPRN_MSSSR0:
265 				kvmppc_set_gpr(vcpu, rt, 0); break;
266 
267 			/* Note: mftb and TBRL/TBWL are user-accessible, so
268 			 * the guest can always access the real TB anyways.
269 			 * In fact, we probably will never see these traps. */
270 			case SPRN_TBWL:
271 				kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
272 			case SPRN_TBWU:
273 				kvmppc_set_gpr(vcpu, rt, get_tb()); break;
274 
275 			case SPRN_SPRG0:
276 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0);
277 				break;
278 			case SPRN_SPRG1:
279 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1);
280 				break;
281 			case SPRN_SPRG2:
282 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2);
283 				break;
284 			case SPRN_SPRG3:
285 				kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3);
286 				break;
287 			/* Note: SPRG4-7 are user-readable, so we don't get
288 			 * a trap. */
289 
290 			case SPRN_DEC:
291 			{
292 				kvmppc_set_gpr(vcpu, rt,
293 					       kvmppc_get_dec(vcpu, get_tb()));
294 				break;
295 			}
296 			default:
297 				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
298 				if (emulated == EMULATE_FAIL) {
299 					printk("mfspr: unknown spr %x\n", sprn);
300 					kvmppc_set_gpr(vcpu, rt, 0);
301 				}
302 				break;
303 			}
304 			kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
305 			break;
306 
307 		case OP_31_XOP_STHX:
308 			rs = get_rs(inst);
309 			ra = get_ra(inst);
310 			rb = get_rb(inst);
311 
312 			emulated = kvmppc_handle_store(run, vcpu,
313 						       kvmppc_get_gpr(vcpu, rs),
314 			                               2, 1);
315 			break;
316 
317 		case OP_31_XOP_STHUX:
318 			rs = get_rs(inst);
319 			ra = get_ra(inst);
320 			rb = get_rb(inst);
321 
322 			ea = kvmppc_get_gpr(vcpu, rb);
323 			if (ra)
324 				ea += kvmppc_get_gpr(vcpu, ra);
325 
326 			emulated = kvmppc_handle_store(run, vcpu,
327 						       kvmppc_get_gpr(vcpu, rs),
328 			                               2, 1);
329 			kvmppc_set_gpr(vcpu, ra, ea);
330 			break;
331 
332 		case OP_31_XOP_MTSPR:
333 			sprn = get_sprn(inst);
334 			rs = get_rs(inst);
335 			switch (sprn) {
336 			case SPRN_SRR0:
337 				vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs);
338 				break;
339 			case SPRN_SRR1:
340 				vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs);
341 				break;
342 
343 			/* XXX We need to context-switch the timebase for
344 			 * watchdog and FIT. */
345 			case SPRN_TBWL: break;
346 			case SPRN_TBWU: break;
347 
348 			case SPRN_MSSSR0: break;
349 
350 			case SPRN_DEC:
351 				vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
352 				kvmppc_emulate_dec(vcpu);
353 				break;
354 
355 			case SPRN_SPRG0:
356 				vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs);
357 				break;
358 			case SPRN_SPRG1:
359 				vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs);
360 				break;
361 			case SPRN_SPRG2:
362 				vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs);
363 				break;
364 			case SPRN_SPRG3:
365 				vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs);
366 				break;
367 
368 			default:
369 				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
370 				if (emulated == EMULATE_FAIL)
371 					printk("mtspr: unknown spr %x\n", sprn);
372 				break;
373 			}
374 			kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
375 			break;
376 
377 		case OP_31_XOP_DCBF:
378 		case OP_31_XOP_DCBI:
379 			/* Do nothing. The guest is performing dcbi because
380 			 * hardware DMA is not snooped by the dcache, but
381 			 * emulated DMA either goes through the dcache as
382 			 * normal writes, or the host kernel has handled dcache
383 			 * coherence. */
384 			break;
385 
386 		case OP_31_XOP_LWBRX:
387 			rt = get_rt(inst);
388 			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
389 			break;
390 
391 		case OP_31_XOP_TLBSYNC:
392 			break;
393 
394 		case OP_31_XOP_STWBRX:
395 			rs = get_rs(inst);
396 			ra = get_ra(inst);
397 			rb = get_rb(inst);
398 
399 			emulated = kvmppc_handle_store(run, vcpu,
400 						       kvmppc_get_gpr(vcpu, rs),
401 			                               4, 0);
402 			break;
403 
404 		case OP_31_XOP_LHBRX:
405 			rt = get_rt(inst);
406 			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
407 			break;
408 
409 		case OP_31_XOP_STHBRX:
410 			rs = get_rs(inst);
411 			ra = get_ra(inst);
412 			rb = get_rb(inst);
413 
414 			emulated = kvmppc_handle_store(run, vcpu,
415 						       kvmppc_get_gpr(vcpu, rs),
416 			                               2, 0);
417 			break;
418 
419 		default:
420 			/* Attempt core-specific emulation below. */
421 			emulated = EMULATE_FAIL;
422 		}
423 		break;
424 
425 	case OP_LWZ:
426 		rt = get_rt(inst);
427 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
428 		break;
429 
430 	case OP_LWZU:
431 		ra = get_ra(inst);
432 		rt = get_rt(inst);
433 		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
434 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
435 		break;
436 
437 	case OP_LBZ:
438 		rt = get_rt(inst);
439 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
440 		break;
441 
442 	case OP_LBZU:
443 		ra = get_ra(inst);
444 		rt = get_rt(inst);
445 		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
446 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
447 		break;
448 
449 	case OP_STW:
450 		rs = get_rs(inst);
451 		emulated = kvmppc_handle_store(run, vcpu,
452 					       kvmppc_get_gpr(vcpu, rs),
453 		                               4, 1);
454 		break;
455 
456 	case OP_STWU:
457 		ra = get_ra(inst);
458 		rs = get_rs(inst);
459 		emulated = kvmppc_handle_store(run, vcpu,
460 					       kvmppc_get_gpr(vcpu, rs),
461 		                               4, 1);
462 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
463 		break;
464 
465 	case OP_STB:
466 		rs = get_rs(inst);
467 		emulated = kvmppc_handle_store(run, vcpu,
468 					       kvmppc_get_gpr(vcpu, rs),
469 		                               1, 1);
470 		break;
471 
472 	case OP_STBU:
473 		ra = get_ra(inst);
474 		rs = get_rs(inst);
475 		emulated = kvmppc_handle_store(run, vcpu,
476 					       kvmppc_get_gpr(vcpu, rs),
477 		                               1, 1);
478 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
479 		break;
480 
481 	case OP_LHZ:
482 		rt = get_rt(inst);
483 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
484 		break;
485 
486 	case OP_LHZU:
487 		ra = get_ra(inst);
488 		rt = get_rt(inst);
489 		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
490 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
491 		break;
492 
493 	case OP_LHA:
494 		rt = get_rt(inst);
495 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
496 		break;
497 
498 	case OP_LHAU:
499 		ra = get_ra(inst);
500 		rt = get_rt(inst);
501 		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
502 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
503 		break;
504 
505 	case OP_STH:
506 		rs = get_rs(inst);
507 		emulated = kvmppc_handle_store(run, vcpu,
508 					       kvmppc_get_gpr(vcpu, rs),
509 		                               2, 1);
510 		break;
511 
512 	case OP_STHU:
513 		ra = get_ra(inst);
514 		rs = get_rs(inst);
515 		emulated = kvmppc_handle_store(run, vcpu,
516 					       kvmppc_get_gpr(vcpu, rs),
517 		                               2, 1);
518 		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
519 		break;
520 
521 	default:
522 		emulated = EMULATE_FAIL;
523 	}
524 
525 	if (emulated == EMULATE_FAIL) {
526 		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
527 		if (emulated == EMULATE_AGAIN) {
528 			advance = 0;
529 		} else if (emulated == EMULATE_FAIL) {
530 			advance = 0;
531 			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
532 			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
533 			kvmppc_core_queue_program(vcpu, 0);
534 		}
535 	}
536 
537 	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
538 
539 	/* Advance past emulated instruction. */
540 	if (advance)
541 		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
542 
543 	return emulated;
544 }
545