1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright IBM Corp. 2007
16  * Copyright 2010-2011 Freescale Semiconductor, Inc.
17  *
18  * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19  *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20  */
21 
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/kvm_host.h>
25 #include <linux/gfp.h>
26 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <linux/fs.h>
29 
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include "timing.h"
34 #include <asm/cacheflush.h>
35 
36 #include "booke.h"
37 
38 unsigned long kvmppc_booke_handlers;
39 
40 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42 
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 	{ "mmio",       VCPU_STAT(mmio_exits) },
45 	{ "dcr",        VCPU_STAT(dcr_exits) },
46 	{ "sig",        VCPU_STAT(signal_exits) },
47 	{ "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
48 	{ "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
49 	{ "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
50 	{ "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
51 	{ "sysc",       VCPU_STAT(syscall_exits) },
52 	{ "isi",        VCPU_STAT(isi_exits) },
53 	{ "dsi",        VCPU_STAT(dsi_exits) },
54 	{ "inst_emu",   VCPU_STAT(emulated_inst_exits) },
55 	{ "dec",        VCPU_STAT(dec_exits) },
56 	{ "ext_intr",   VCPU_STAT(ext_intr_exits) },
57 	{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
58 	{ NULL }
59 };
60 
61 /* TODO: use vcpu_printf() */
kvmppc_dump_vcpu(struct kvm_vcpu * vcpu)62 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
63 {
64 	int i;
65 
66 	printk("pc:   %08lx msr:  %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
67 	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
68 	printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
69 					    vcpu->arch.shared->srr1);
70 
71 	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
72 
73 	for (i = 0; i < 32; i += 4) {
74 		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
75 		       kvmppc_get_gpr(vcpu, i),
76 		       kvmppc_get_gpr(vcpu, i+1),
77 		       kvmppc_get_gpr(vcpu, i+2),
78 		       kvmppc_get_gpr(vcpu, i+3));
79 	}
80 }
81 
82 #ifdef CONFIG_SPE
kvmppc_vcpu_disable_spe(struct kvm_vcpu * vcpu)83 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
84 {
85 	preempt_disable();
86 	enable_kernel_spe();
87 	kvmppc_save_guest_spe(vcpu);
88 	vcpu->arch.shadow_msr &= ~MSR_SPE;
89 	preempt_enable();
90 }
91 
kvmppc_vcpu_enable_spe(struct kvm_vcpu * vcpu)92 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
93 {
94 	preempt_disable();
95 	enable_kernel_spe();
96 	kvmppc_load_guest_spe(vcpu);
97 	vcpu->arch.shadow_msr |= MSR_SPE;
98 	preempt_enable();
99 }
100 
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)101 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
102 {
103 	if (vcpu->arch.shared->msr & MSR_SPE) {
104 		if (!(vcpu->arch.shadow_msr & MSR_SPE))
105 			kvmppc_vcpu_enable_spe(vcpu);
106 	} else if (vcpu->arch.shadow_msr & MSR_SPE) {
107 		kvmppc_vcpu_disable_spe(vcpu);
108 	}
109 }
110 #else
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)111 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112 {
113 }
114 #endif
115 
116 /*
117  * Helper function for "full" MSR writes.  No need to call this if only
118  * EE/CE/ME/DE/RI are changing.
119  */
kvmppc_set_msr(struct kvm_vcpu * vcpu,u32 new_msr)120 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
121 {
122 	u32 old_msr = vcpu->arch.shared->msr;
123 
124 	vcpu->arch.shared->msr = new_msr;
125 
126 	kvmppc_mmu_msr_notify(vcpu, old_msr);
127 	kvmppc_vcpu_sync_spe(vcpu);
128 }
129 
kvmppc_booke_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)130 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
131                                        unsigned int priority)
132 {
133 	set_bit(priority, &vcpu->arch.pending_exceptions);
134 }
135 
kvmppc_core_queue_dtlb_miss(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)136 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
137                                         ulong dear_flags, ulong esr_flags)
138 {
139 	vcpu->arch.queued_dear = dear_flags;
140 	vcpu->arch.queued_esr = esr_flags;
141 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
142 }
143 
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)144 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
145                                            ulong dear_flags, ulong esr_flags)
146 {
147 	vcpu->arch.queued_dear = dear_flags;
148 	vcpu->arch.queued_esr = esr_flags;
149 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
150 }
151 
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong esr_flags)152 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
153                                            ulong esr_flags)
154 {
155 	vcpu->arch.queued_esr = esr_flags;
156 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
157 }
158 
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong esr_flags)159 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
160 {
161 	vcpu->arch.queued_esr = esr_flags;
162 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
163 }
164 
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)165 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
166 {
167 	kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
168 }
169 
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)170 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
171 {
172 	return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
173 }
174 
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)175 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
176 {
177 	clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
178 }
179 
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)180 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
181                                 struct kvm_interrupt *irq)
182 {
183 	unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
184 
185 	if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
186 		prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
187 
188 	kvmppc_booke_queue_irqprio(vcpu, prio);
189 }
190 
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)191 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
192                                   struct kvm_interrupt *irq)
193 {
194 	clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
195 	clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
196 }
197 
198 /* Deliver the interrupt of the corresponding priority, if possible. */
kvmppc_booke_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)199 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
200                                         unsigned int priority)
201 {
202 	int allowed = 0;
203 	ulong uninitialized_var(msr_mask);
204 	bool update_esr = false, update_dear = false;
205 	ulong crit_raw = vcpu->arch.shared->critical;
206 	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
207 	bool crit;
208 	bool keep_irq = false;
209 
210 	/* Truncate crit indicators in 32 bit mode */
211 	if (!(vcpu->arch.shared->msr & MSR_SF)) {
212 		crit_raw &= 0xffffffff;
213 		crit_r1 &= 0xffffffff;
214 	}
215 
216 	/* Critical section when crit == r1 */
217 	crit = (crit_raw == crit_r1);
218 	/* ... and we're in supervisor mode */
219 	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
220 
221 	if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
222 		priority = BOOKE_IRQPRIO_EXTERNAL;
223 		keep_irq = true;
224 	}
225 
226 	switch (priority) {
227 	case BOOKE_IRQPRIO_DTLB_MISS:
228 	case BOOKE_IRQPRIO_DATA_STORAGE:
229 		update_dear = true;
230 		/* fall through */
231 	case BOOKE_IRQPRIO_INST_STORAGE:
232 	case BOOKE_IRQPRIO_PROGRAM:
233 		update_esr = true;
234 		/* fall through */
235 	case BOOKE_IRQPRIO_ITLB_MISS:
236 	case BOOKE_IRQPRIO_SYSCALL:
237 	case BOOKE_IRQPRIO_FP_UNAVAIL:
238 	case BOOKE_IRQPRIO_SPE_UNAVAIL:
239 	case BOOKE_IRQPRIO_SPE_FP_DATA:
240 	case BOOKE_IRQPRIO_SPE_FP_ROUND:
241 	case BOOKE_IRQPRIO_AP_UNAVAIL:
242 	case BOOKE_IRQPRIO_ALIGNMENT:
243 		allowed = 1;
244 		msr_mask = MSR_CE|MSR_ME|MSR_DE;
245 		break;
246 	case BOOKE_IRQPRIO_CRITICAL:
247 	case BOOKE_IRQPRIO_WATCHDOG:
248 		allowed = vcpu->arch.shared->msr & MSR_CE;
249 		msr_mask = MSR_ME;
250 		break;
251 	case BOOKE_IRQPRIO_MACHINE_CHECK:
252 		allowed = vcpu->arch.shared->msr & MSR_ME;
253 		msr_mask = 0;
254 		break;
255 	case BOOKE_IRQPRIO_DECREMENTER:
256 	case BOOKE_IRQPRIO_FIT:
257 		keep_irq = true;
258 		/* fall through */
259 	case BOOKE_IRQPRIO_EXTERNAL:
260 		allowed = vcpu->arch.shared->msr & MSR_EE;
261 		allowed = allowed && !crit;
262 		msr_mask = MSR_CE|MSR_ME|MSR_DE;
263 		break;
264 	case BOOKE_IRQPRIO_DEBUG:
265 		allowed = vcpu->arch.shared->msr & MSR_DE;
266 		msr_mask = MSR_ME;
267 		break;
268 	}
269 
270 	if (allowed) {
271 		vcpu->arch.shared->srr0 = vcpu->arch.pc;
272 		vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
273 		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
274 		if (update_esr == true)
275 			vcpu->arch.shared->esr = vcpu->arch.queued_esr;
276 		if (update_dear == true)
277 			vcpu->arch.shared->dar = vcpu->arch.queued_dear;
278 		kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
279 
280 		if (!keep_irq)
281 			clear_bit(priority, &vcpu->arch.pending_exceptions);
282 	}
283 
284 	return allowed;
285 }
286 
update_timer_ints(struct kvm_vcpu * vcpu)287 static void update_timer_ints(struct kvm_vcpu *vcpu)
288 {
289 	if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
290 		kvmppc_core_queue_dec(vcpu);
291 	else
292 		kvmppc_core_dequeue_dec(vcpu);
293 }
294 
kvmppc_core_check_exceptions(struct kvm_vcpu * vcpu)295 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
296 {
297 	unsigned long *pending = &vcpu->arch.pending_exceptions;
298 	unsigned int priority;
299 
300 	if (vcpu->requests) {
301 		if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
302 			smp_mb();
303 			update_timer_ints(vcpu);
304 		}
305 	}
306 
307 	priority = __ffs(*pending);
308 	while (priority <= BOOKE_IRQPRIO_MAX) {
309 		if (kvmppc_booke_irqprio_deliver(vcpu, priority))
310 			break;
311 
312 		priority = find_next_bit(pending,
313 		                         BITS_PER_BYTE * sizeof(*pending),
314 		                         priority + 1);
315 	}
316 
317 	/* Tell the guest about our interrupt status */
318 	vcpu->arch.shared->int_pending = !!*pending;
319 }
320 
321 /* Check pending exceptions and deliver one, if possible. */
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)322 void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
323 {
324 	WARN_ON_ONCE(!irqs_disabled());
325 
326 	kvmppc_core_check_exceptions(vcpu);
327 
328 	if (vcpu->arch.shared->msr & MSR_WE) {
329 		local_irq_enable();
330 		kvm_vcpu_block(vcpu);
331 		local_irq_disable();
332 
333 		kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
334 		kvmppc_core_check_exceptions(vcpu);
335 	};
336 }
337 
kvmppc_vcpu_run(struct kvm_run * kvm_run,struct kvm_vcpu * vcpu)338 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
339 {
340 	int ret;
341 
342 	if (!vcpu->arch.sane) {
343 		kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
344 		return -EINVAL;
345 	}
346 
347 	local_irq_disable();
348 
349 	kvmppc_core_prepare_to_enter(vcpu);
350 
351 	if (signal_pending(current)) {
352 		kvm_run->exit_reason = KVM_EXIT_INTR;
353 		ret = -EINTR;
354 		goto out;
355 	}
356 
357 	kvm_guest_enter();
358 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
359 	kvm_guest_exit();
360 
361 out:
362 	local_irq_enable();
363 	return ret;
364 }
365 
366 /**
367  * kvmppc_handle_exit
368  *
369  * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
370  */
kvmppc_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int exit_nr)371 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
372                        unsigned int exit_nr)
373 {
374 	enum emulation_result er;
375 	int r = RESUME_HOST;
376 
377 	/* update before a new last_exit_type is rewritten */
378 	kvmppc_update_timing_stats(vcpu);
379 
380 	local_irq_enable();
381 
382 	run->exit_reason = KVM_EXIT_UNKNOWN;
383 	run->ready_for_interrupt_injection = 1;
384 
385 	switch (exit_nr) {
386 	case BOOKE_INTERRUPT_MACHINE_CHECK:
387 		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
388 		kvmppc_dump_vcpu(vcpu);
389 		r = RESUME_HOST;
390 		break;
391 
392 	case BOOKE_INTERRUPT_EXTERNAL:
393 		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
394 		if (need_resched())
395 			cond_resched();
396 		r = RESUME_GUEST;
397 		break;
398 
399 	case BOOKE_INTERRUPT_DECREMENTER:
400 		/* Since we switched IVPR back to the host's value, the host
401 		 * handled this interrupt the moment we enabled interrupts.
402 		 * Now we just offer it a chance to reschedule the guest. */
403 		kvmppc_account_exit(vcpu, DEC_EXITS);
404 		if (need_resched())
405 			cond_resched();
406 		r = RESUME_GUEST;
407 		break;
408 
409 	case BOOKE_INTERRUPT_PROGRAM:
410 		if (vcpu->arch.shared->msr & MSR_PR) {
411 			/* Program traps generated by user-level software must be handled
412 			 * by the guest kernel. */
413 			kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
414 			r = RESUME_GUEST;
415 			kvmppc_account_exit(vcpu, USR_PR_INST);
416 			break;
417 		}
418 
419 		er = kvmppc_emulate_instruction(run, vcpu);
420 		switch (er) {
421 		case EMULATE_DONE:
422 			/* don't overwrite subtypes, just account kvm_stats */
423 			kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
424 			/* Future optimization: only reload non-volatiles if
425 			 * they were actually modified by emulation. */
426 			r = RESUME_GUEST_NV;
427 			break;
428 		case EMULATE_DO_DCR:
429 			run->exit_reason = KVM_EXIT_DCR;
430 			r = RESUME_HOST;
431 			break;
432 		case EMULATE_FAIL:
433 			/* XXX Deliver Program interrupt to guest. */
434 			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
435 			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
436 			/* For debugging, encode the failing instruction and
437 			 * report it to userspace. */
438 			run->hw.hardware_exit_reason = ~0ULL << 32;
439 			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
440 			r = RESUME_HOST;
441 			break;
442 		default:
443 			BUG();
444 		}
445 		break;
446 
447 	case BOOKE_INTERRUPT_FP_UNAVAIL:
448 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
449 		kvmppc_account_exit(vcpu, FP_UNAVAIL);
450 		r = RESUME_GUEST;
451 		break;
452 
453 #ifdef CONFIG_SPE
454 	case BOOKE_INTERRUPT_SPE_UNAVAIL: {
455 		if (vcpu->arch.shared->msr & MSR_SPE)
456 			kvmppc_vcpu_enable_spe(vcpu);
457 		else
458 			kvmppc_booke_queue_irqprio(vcpu,
459 						   BOOKE_IRQPRIO_SPE_UNAVAIL);
460 		r = RESUME_GUEST;
461 		break;
462 	}
463 
464 	case BOOKE_INTERRUPT_SPE_FP_DATA:
465 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
466 		r = RESUME_GUEST;
467 		break;
468 
469 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
470 		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
471 		r = RESUME_GUEST;
472 		break;
473 #else
474 	case BOOKE_INTERRUPT_SPE_UNAVAIL:
475 		/*
476 		 * Guest wants SPE, but host kernel doesn't support it.  Send
477 		 * an "unimplemented operation" program check to the guest.
478 		 */
479 		kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
480 		r = RESUME_GUEST;
481 		break;
482 
483 	/*
484 	 * These really should never happen without CONFIG_SPE,
485 	 * as we should never enable the real MSR[SPE] in the guest.
486 	 */
487 	case BOOKE_INTERRUPT_SPE_FP_DATA:
488 	case BOOKE_INTERRUPT_SPE_FP_ROUND:
489 		printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
490 		       __func__, exit_nr, vcpu->arch.pc);
491 		run->hw.hardware_exit_reason = exit_nr;
492 		r = RESUME_HOST;
493 		break;
494 #endif
495 
496 	case BOOKE_INTERRUPT_DATA_STORAGE:
497 		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
498 		                               vcpu->arch.fault_esr);
499 		kvmppc_account_exit(vcpu, DSI_EXITS);
500 		r = RESUME_GUEST;
501 		break;
502 
503 	case BOOKE_INTERRUPT_INST_STORAGE:
504 		kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
505 		kvmppc_account_exit(vcpu, ISI_EXITS);
506 		r = RESUME_GUEST;
507 		break;
508 
509 	case BOOKE_INTERRUPT_SYSCALL:
510 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
511 		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
512 			/* KVM PV hypercalls */
513 			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
514 			r = RESUME_GUEST;
515 		} else {
516 			/* Guest syscalls */
517 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
518 		}
519 		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
520 		r = RESUME_GUEST;
521 		break;
522 
523 	case BOOKE_INTERRUPT_DTLB_MISS: {
524 		unsigned long eaddr = vcpu->arch.fault_dear;
525 		int gtlb_index;
526 		gpa_t gpaddr;
527 		gfn_t gfn;
528 
529 #ifdef CONFIG_KVM_E500
530 		if (!(vcpu->arch.shared->msr & MSR_PR) &&
531 		    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
532 			kvmppc_map_magic(vcpu);
533 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
534 			r = RESUME_GUEST;
535 
536 			break;
537 		}
538 #endif
539 
540 		/* Check the guest TLB. */
541 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
542 		if (gtlb_index < 0) {
543 			/* The guest didn't have a mapping for it. */
544 			kvmppc_core_queue_dtlb_miss(vcpu,
545 			                            vcpu->arch.fault_dear,
546 			                            vcpu->arch.fault_esr);
547 			kvmppc_mmu_dtlb_miss(vcpu);
548 			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
549 			r = RESUME_GUEST;
550 			break;
551 		}
552 
553 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
554 		gfn = gpaddr >> PAGE_SHIFT;
555 
556 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
557 			/* The guest TLB had a mapping, but the shadow TLB
558 			 * didn't, and it is RAM. This could be because:
559 			 * a) the entry is mapping the host kernel, or
560 			 * b) the guest used a large mapping which we're faking
561 			 * Either way, we need to satisfy the fault without
562 			 * invoking the guest. */
563 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
564 			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
565 			r = RESUME_GUEST;
566 		} else {
567 			/* Guest has mapped and accessed a page which is not
568 			 * actually RAM. */
569 			vcpu->arch.paddr_accessed = gpaddr;
570 			r = kvmppc_emulate_mmio(run, vcpu);
571 			kvmppc_account_exit(vcpu, MMIO_EXITS);
572 		}
573 
574 		break;
575 	}
576 
577 	case BOOKE_INTERRUPT_ITLB_MISS: {
578 		unsigned long eaddr = vcpu->arch.pc;
579 		gpa_t gpaddr;
580 		gfn_t gfn;
581 		int gtlb_index;
582 
583 		r = RESUME_GUEST;
584 
585 		/* Check the guest TLB. */
586 		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
587 		if (gtlb_index < 0) {
588 			/* The guest didn't have a mapping for it. */
589 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
590 			kvmppc_mmu_itlb_miss(vcpu);
591 			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
592 			break;
593 		}
594 
595 		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
596 
597 		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
598 		gfn = gpaddr >> PAGE_SHIFT;
599 
600 		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
601 			/* The guest TLB had a mapping, but the shadow TLB
602 			 * didn't. This could be because:
603 			 * a) the entry is mapping the host kernel, or
604 			 * b) the guest used a large mapping which we're faking
605 			 * Either way, we need to satisfy the fault without
606 			 * invoking the guest. */
607 			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
608 		} else {
609 			/* Guest mapped and leaped at non-RAM! */
610 			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
611 		}
612 
613 		break;
614 	}
615 
616 	case BOOKE_INTERRUPT_DEBUG: {
617 		u32 dbsr;
618 
619 		vcpu->arch.pc = mfspr(SPRN_CSRR0);
620 
621 		/* clear IAC events in DBSR register */
622 		dbsr = mfspr(SPRN_DBSR);
623 		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
624 		mtspr(SPRN_DBSR, dbsr);
625 
626 		run->exit_reason = KVM_EXIT_DEBUG;
627 		kvmppc_account_exit(vcpu, DEBUG_EXITS);
628 		r = RESUME_HOST;
629 		break;
630 	}
631 
632 	default:
633 		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
634 		BUG();
635 	}
636 
637 	local_irq_disable();
638 
639 	kvmppc_core_prepare_to_enter(vcpu);
640 
641 	if (!(r & RESUME_HOST)) {
642 		/* To avoid clobbering exit_reason, only check for signals if
643 		 * we aren't already exiting to userspace for some other
644 		 * reason. */
645 		if (signal_pending(current)) {
646 			run->exit_reason = KVM_EXIT_INTR;
647 			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
648 			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
649 		}
650 	}
651 
652 	return r;
653 }
654 
655 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)656 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
657 {
658 	int i;
659 	int r;
660 
661 	vcpu->arch.pc = 0;
662 	vcpu->arch.shared->msr = 0;
663 	vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
664 	vcpu->arch.shared->pir = vcpu->vcpu_id;
665 	kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
666 
667 	vcpu->arch.shadow_pid = 1;
668 
669 	/* Eye-catching numbers so we know if the guest takes an interrupt
670 	 * before it's programmed its own IVPR/IVORs. */
671 	vcpu->arch.ivpr = 0x55550000;
672 	for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
673 		vcpu->arch.ivor[i] = 0x7700 | i * 4;
674 
675 	kvmppc_init_timing_stats(vcpu);
676 
677 	r = kvmppc_core_vcpu_setup(vcpu);
678 	kvmppc_sanity_check(vcpu);
679 	return r;
680 }
681 
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)682 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
683 {
684 	int i;
685 
686 	regs->pc = vcpu->arch.pc;
687 	regs->cr = kvmppc_get_cr(vcpu);
688 	regs->ctr = vcpu->arch.ctr;
689 	regs->lr = vcpu->arch.lr;
690 	regs->xer = kvmppc_get_xer(vcpu);
691 	regs->msr = vcpu->arch.shared->msr;
692 	regs->srr0 = vcpu->arch.shared->srr0;
693 	regs->srr1 = vcpu->arch.shared->srr1;
694 	regs->pid = vcpu->arch.pid;
695 	regs->sprg0 = vcpu->arch.shared->sprg0;
696 	regs->sprg1 = vcpu->arch.shared->sprg1;
697 	regs->sprg2 = vcpu->arch.shared->sprg2;
698 	regs->sprg3 = vcpu->arch.shared->sprg3;
699 	regs->sprg4 = vcpu->arch.shared->sprg4;
700 	regs->sprg5 = vcpu->arch.shared->sprg5;
701 	regs->sprg6 = vcpu->arch.shared->sprg6;
702 	regs->sprg7 = vcpu->arch.shared->sprg7;
703 
704 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
705 		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
706 
707 	return 0;
708 }
709 
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)710 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
711 {
712 	int i;
713 
714 	vcpu->arch.pc = regs->pc;
715 	kvmppc_set_cr(vcpu, regs->cr);
716 	vcpu->arch.ctr = regs->ctr;
717 	vcpu->arch.lr = regs->lr;
718 	kvmppc_set_xer(vcpu, regs->xer);
719 	kvmppc_set_msr(vcpu, regs->msr);
720 	vcpu->arch.shared->srr0 = regs->srr0;
721 	vcpu->arch.shared->srr1 = regs->srr1;
722 	kvmppc_set_pid(vcpu, regs->pid);
723 	vcpu->arch.shared->sprg0 = regs->sprg0;
724 	vcpu->arch.shared->sprg1 = regs->sprg1;
725 	vcpu->arch.shared->sprg2 = regs->sprg2;
726 	vcpu->arch.shared->sprg3 = regs->sprg3;
727 	vcpu->arch.shared->sprg4 = regs->sprg4;
728 	vcpu->arch.shared->sprg5 = regs->sprg5;
729 	vcpu->arch.shared->sprg6 = regs->sprg6;
730 	vcpu->arch.shared->sprg7 = regs->sprg7;
731 
732 	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
733 		kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
734 
735 	return 0;
736 }
737 
get_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)738 static void get_sregs_base(struct kvm_vcpu *vcpu,
739                            struct kvm_sregs *sregs)
740 {
741 	u64 tb = get_tb();
742 
743 	sregs->u.e.features |= KVM_SREGS_E_BASE;
744 
745 	sregs->u.e.csrr0 = vcpu->arch.csrr0;
746 	sregs->u.e.csrr1 = vcpu->arch.csrr1;
747 	sregs->u.e.mcsr = vcpu->arch.mcsr;
748 	sregs->u.e.esr = vcpu->arch.shared->esr;
749 	sregs->u.e.dear = vcpu->arch.shared->dar;
750 	sregs->u.e.tsr = vcpu->arch.tsr;
751 	sregs->u.e.tcr = vcpu->arch.tcr;
752 	sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
753 	sregs->u.e.tb = tb;
754 	sregs->u.e.vrsave = vcpu->arch.vrsave;
755 }
756 
set_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)757 static int set_sregs_base(struct kvm_vcpu *vcpu,
758                           struct kvm_sregs *sregs)
759 {
760 	if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
761 		return 0;
762 
763 	vcpu->arch.csrr0 = sregs->u.e.csrr0;
764 	vcpu->arch.csrr1 = sregs->u.e.csrr1;
765 	vcpu->arch.mcsr = sregs->u.e.mcsr;
766 	vcpu->arch.shared->esr = sregs->u.e.esr;
767 	vcpu->arch.shared->dar = sregs->u.e.dear;
768 	vcpu->arch.vrsave = sregs->u.e.vrsave;
769 	kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
770 
771 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
772 		vcpu->arch.dec = sregs->u.e.dec;
773 		kvmppc_emulate_dec(vcpu);
774 	}
775 
776 	if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
777 		vcpu->arch.tsr = sregs->u.e.tsr;
778 		update_timer_ints(vcpu);
779 	}
780 
781 	return 0;
782 }
783 
get_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)784 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
785                               struct kvm_sregs *sregs)
786 {
787 	sregs->u.e.features |= KVM_SREGS_E_ARCH206;
788 
789 	sregs->u.e.pir = vcpu->vcpu_id;
790 	sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
791 	sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
792 	sregs->u.e.decar = vcpu->arch.decar;
793 	sregs->u.e.ivpr = vcpu->arch.ivpr;
794 }
795 
set_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)796 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
797                              struct kvm_sregs *sregs)
798 {
799 	if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
800 		return 0;
801 
802 	if (sregs->u.e.pir != vcpu->vcpu_id)
803 		return -EINVAL;
804 
805 	vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
806 	vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
807 	vcpu->arch.decar = sregs->u.e.decar;
808 	vcpu->arch.ivpr = sregs->u.e.ivpr;
809 
810 	return 0;
811 }
812 
kvmppc_get_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)813 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
814 {
815 	sregs->u.e.features |= KVM_SREGS_E_IVOR;
816 
817 	sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
818 	sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
819 	sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
820 	sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
821 	sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
822 	sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
823 	sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
824 	sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
825 	sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
826 	sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
827 	sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
828 	sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
829 	sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
830 	sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
831 	sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
832 	sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
833 }
834 
kvmppc_set_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)835 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
836 {
837 	if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
838 		return 0;
839 
840 	vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
841 	vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
842 	vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
843 	vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
844 	vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
845 	vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
846 	vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
847 	vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
848 	vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
849 	vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
850 	vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
851 	vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
852 	vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
853 	vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
854 	vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
855 	vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
856 
857 	return 0;
858 }
859 
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)860 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
861                                   struct kvm_sregs *sregs)
862 {
863 	sregs->pvr = vcpu->arch.pvr;
864 
865 	get_sregs_base(vcpu, sregs);
866 	get_sregs_arch206(vcpu, sregs);
867 	kvmppc_core_get_sregs(vcpu, sregs);
868 	return 0;
869 }
870 
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)871 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
872                                   struct kvm_sregs *sregs)
873 {
874 	int ret;
875 
876 	if (vcpu->arch.pvr != sregs->pvr)
877 		return -EINVAL;
878 
879 	ret = set_sregs_base(vcpu, sregs);
880 	if (ret < 0)
881 		return ret;
882 
883 	ret = set_sregs_arch206(vcpu, sregs);
884 	if (ret < 0)
885 		return ret;
886 
887 	return kvmppc_core_set_sregs(vcpu, sregs);
888 }
889 
kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)890 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
891 {
892 	return -EINVAL;
893 }
894 
kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)895 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
896 {
897 	return -EINVAL;
898 }
899 
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)900 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
901 {
902 	return -ENOTSUPP;
903 }
904 
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)905 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
906 {
907 	return -ENOTSUPP;
908 }
909 
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)910 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
911                                   struct kvm_translation *tr)
912 {
913 	int r;
914 
915 	r = kvmppc_core_vcpu_translate(vcpu, tr);
916 	return r;
917 }
918 
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)919 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
920 {
921 	return -ENOTSUPP;
922 }
923 
kvmppc_core_prepare_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem)924 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
925 				      struct kvm_userspace_memory_region *mem)
926 {
927 	return 0;
928 }
929 
kvmppc_core_commit_memory_region(struct kvm * kvm,struct kvm_userspace_memory_region * mem)930 void kvmppc_core_commit_memory_region(struct kvm *kvm,
931 				struct kvm_userspace_memory_region *mem)
932 {
933 }
934 
kvmppc_core_init_vm(struct kvm * kvm)935 int kvmppc_core_init_vm(struct kvm *kvm)
936 {
937 	return 0;
938 }
939 
kvmppc_core_destroy_vm(struct kvm * kvm)940 void kvmppc_core_destroy_vm(struct kvm *kvm)
941 {
942 }
943 
kvmppc_set_tcr(struct kvm_vcpu * vcpu,u32 new_tcr)944 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
945 {
946 	vcpu->arch.tcr = new_tcr;
947 	update_timer_ints(vcpu);
948 }
949 
kvmppc_set_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)950 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
951 {
952 	set_bits(tsr_bits, &vcpu->arch.tsr);
953 	smp_wmb();
954 	kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
955 	kvm_vcpu_kick(vcpu);
956 }
957 
kvmppc_clr_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)958 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
959 {
960 	clear_bits(tsr_bits, &vcpu->arch.tsr);
961 	update_timer_ints(vcpu);
962 }
963 
kvmppc_decrementer_func(unsigned long data)964 void kvmppc_decrementer_func(unsigned long data)
965 {
966 	struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
967 
968 	kvmppc_set_tsr_bits(vcpu, TSR_DIS);
969 }
970 
kvmppc_booke_init(void)971 int __init kvmppc_booke_init(void)
972 {
973 	unsigned long ivor[16];
974 	unsigned long max_ivor = 0;
975 	int i;
976 
977 	/* We install our own exception handlers by hijacking IVPR. IVPR must
978 	 * be 16-bit aligned, so we need a 64KB allocation. */
979 	kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
980 	                                         VCPU_SIZE_ORDER);
981 	if (!kvmppc_booke_handlers)
982 		return -ENOMEM;
983 
984 	/* XXX make sure our handlers are smaller than Linux's */
985 
986 	/* Copy our interrupt handlers to match host IVORs. That way we don't
987 	 * have to swap the IVORs on every guest/host transition. */
988 	ivor[0] = mfspr(SPRN_IVOR0);
989 	ivor[1] = mfspr(SPRN_IVOR1);
990 	ivor[2] = mfspr(SPRN_IVOR2);
991 	ivor[3] = mfspr(SPRN_IVOR3);
992 	ivor[4] = mfspr(SPRN_IVOR4);
993 	ivor[5] = mfspr(SPRN_IVOR5);
994 	ivor[6] = mfspr(SPRN_IVOR6);
995 	ivor[7] = mfspr(SPRN_IVOR7);
996 	ivor[8] = mfspr(SPRN_IVOR8);
997 	ivor[9] = mfspr(SPRN_IVOR9);
998 	ivor[10] = mfspr(SPRN_IVOR10);
999 	ivor[11] = mfspr(SPRN_IVOR11);
1000 	ivor[12] = mfspr(SPRN_IVOR12);
1001 	ivor[13] = mfspr(SPRN_IVOR13);
1002 	ivor[14] = mfspr(SPRN_IVOR14);
1003 	ivor[15] = mfspr(SPRN_IVOR15);
1004 
1005 	for (i = 0; i < 16; i++) {
1006 		if (ivor[i] > max_ivor)
1007 			max_ivor = ivor[i];
1008 
1009 		memcpy((void *)kvmppc_booke_handlers + ivor[i],
1010 		       kvmppc_handlers_start + i * kvmppc_handler_len,
1011 		       kvmppc_handler_len);
1012 	}
1013 	flush_icache_range(kvmppc_booke_handlers,
1014 	                   kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1015 
1016 	return 0;
1017 }
1018 
kvmppc_booke_exit(void)1019 void __exit kvmppc_booke_exit(void)
1020 {
1021 	free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1022 	kvm_exit();
1023 }
1024