1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright IBM Corp. 2007
5 * Copyright 2010-2011 Freescale Semiconductor, Inc.
6 *
7 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
8 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
9 * Scott Wood <scottwood@freescale.com>
10 * Varun Sethi <varun.sethi@freescale.com>
11 */
12
13 #include <linux/errno.h>
14 #include <linux/err.h>
15 #include <linux/kvm_host.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/fs.h>
20
21 #include <asm/cputable.h>
22 #include <linux/uaccess.h>
23 #include <asm/interrupt.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/dbell.h>
27 #include <asm/hw_irq.h>
28 #include <asm/irq.h>
29 #include <asm/time.h>
30
31 #include "timing.h"
32 #include "booke.h"
33
34 #define CREATE_TRACE_POINTS
35 #include "trace_booke.h"
36
37 unsigned long kvmppc_booke_handlers;
38
39 const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
40 KVM_GENERIC_VM_STATS(),
41 STATS_DESC_ICOUNTER(VM, num_2M_pages),
42 STATS_DESC_ICOUNTER(VM, num_1G_pages)
43 };
44
45 const struct kvm_stats_header kvm_vm_stats_header = {
46 .name_size = KVM_STATS_NAME_SIZE,
47 .num_desc = ARRAY_SIZE(kvm_vm_stats_desc),
48 .id_offset = sizeof(struct kvm_stats_header),
49 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
50 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
51 sizeof(kvm_vm_stats_desc),
52 };
53
54 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
55 KVM_GENERIC_VCPU_STATS(),
56 STATS_DESC_COUNTER(VCPU, sum_exits),
57 STATS_DESC_COUNTER(VCPU, mmio_exits),
58 STATS_DESC_COUNTER(VCPU, signal_exits),
59 STATS_DESC_COUNTER(VCPU, light_exits),
60 STATS_DESC_COUNTER(VCPU, itlb_real_miss_exits),
61 STATS_DESC_COUNTER(VCPU, itlb_virt_miss_exits),
62 STATS_DESC_COUNTER(VCPU, dtlb_real_miss_exits),
63 STATS_DESC_COUNTER(VCPU, dtlb_virt_miss_exits),
64 STATS_DESC_COUNTER(VCPU, syscall_exits),
65 STATS_DESC_COUNTER(VCPU, isi_exits),
66 STATS_DESC_COUNTER(VCPU, dsi_exits),
67 STATS_DESC_COUNTER(VCPU, emulated_inst_exits),
68 STATS_DESC_COUNTER(VCPU, dec_exits),
69 STATS_DESC_COUNTER(VCPU, ext_intr_exits),
70 STATS_DESC_COUNTER(VCPU, halt_successful_wait),
71 STATS_DESC_COUNTER(VCPU, dbell_exits),
72 STATS_DESC_COUNTER(VCPU, gdbell_exits),
73 STATS_DESC_COUNTER(VCPU, ld),
74 STATS_DESC_COUNTER(VCPU, st),
75 STATS_DESC_COUNTER(VCPU, pthru_all),
76 STATS_DESC_COUNTER(VCPU, pthru_host),
77 STATS_DESC_COUNTER(VCPU, pthru_bad_aff)
78 };
79
80 const struct kvm_stats_header kvm_vcpu_stats_header = {
81 .name_size = KVM_STATS_NAME_SIZE,
82 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
83 .id_offset = sizeof(struct kvm_stats_header),
84 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
85 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
86 sizeof(kvm_vcpu_stats_desc),
87 };
88
89 /* TODO: use vcpu_printf() */
kvmppc_dump_vcpu(struct kvm_vcpu * vcpu)90 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
91 {
92 int i;
93
94 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip,
95 vcpu->arch.shared->msr);
96 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link,
97 vcpu->arch.regs.ctr);
98 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
99 vcpu->arch.shared->srr1);
100
101 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
102
103 for (i = 0; i < 32; i += 4) {
104 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
105 kvmppc_get_gpr(vcpu, i),
106 kvmppc_get_gpr(vcpu, i+1),
107 kvmppc_get_gpr(vcpu, i+2),
108 kvmppc_get_gpr(vcpu, i+3));
109 }
110 }
111
112 #ifdef CONFIG_SPE
kvmppc_vcpu_disable_spe(struct kvm_vcpu * vcpu)113 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
114 {
115 preempt_disable();
116 enable_kernel_spe();
117 kvmppc_save_guest_spe(vcpu);
118 disable_kernel_spe();
119 vcpu->arch.shadow_msr &= ~MSR_SPE;
120 preempt_enable();
121 }
122
kvmppc_vcpu_enable_spe(struct kvm_vcpu * vcpu)123 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
124 {
125 preempt_disable();
126 enable_kernel_spe();
127 kvmppc_load_guest_spe(vcpu);
128 disable_kernel_spe();
129 vcpu->arch.shadow_msr |= MSR_SPE;
130 preempt_enable();
131 }
132
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)133 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
134 {
135 if (vcpu->arch.shared->msr & MSR_SPE) {
136 if (!(vcpu->arch.shadow_msr & MSR_SPE))
137 kvmppc_vcpu_enable_spe(vcpu);
138 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
139 kvmppc_vcpu_disable_spe(vcpu);
140 }
141 }
142 #else
kvmppc_vcpu_sync_spe(struct kvm_vcpu * vcpu)143 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
144 {
145 }
146 #endif
147
148 /*
149 * Load up guest vcpu FP state if it's needed.
150 * It also set the MSR_FP in thread so that host know
151 * we're holding FPU, and then host can help to save
152 * guest vcpu FP state if other threads require to use FPU.
153 * This simulates an FP unavailable fault.
154 *
155 * It requires to be called with preemption disabled.
156 */
kvmppc_load_guest_fp(struct kvm_vcpu * vcpu)157 static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
158 {
159 #ifdef CONFIG_PPC_FPU
160 if (!(current->thread.regs->msr & MSR_FP)) {
161 enable_kernel_fp();
162 load_fp_state(&vcpu->arch.fp);
163 disable_kernel_fp();
164 current->thread.fp_save_area = &vcpu->arch.fp;
165 current->thread.regs->msr |= MSR_FP;
166 }
167 #endif
168 }
169
170 /*
171 * Save guest vcpu FP state into thread.
172 * It requires to be called with preemption disabled.
173 */
kvmppc_save_guest_fp(struct kvm_vcpu * vcpu)174 static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
175 {
176 #ifdef CONFIG_PPC_FPU
177 if (current->thread.regs->msr & MSR_FP)
178 giveup_fpu(current);
179 current->thread.fp_save_area = NULL;
180 #endif
181 }
182
kvmppc_vcpu_sync_fpu(struct kvm_vcpu * vcpu)183 static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
184 {
185 #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
186 /* We always treat the FP bit as enabled from the host
187 perspective, so only need to adjust the shadow MSR */
188 vcpu->arch.shadow_msr &= ~MSR_FP;
189 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
190 #endif
191 }
192
193 /*
194 * Simulate AltiVec unavailable fault to load guest state
195 * from thread to AltiVec unit.
196 * It requires to be called with preemption disabled.
197 */
kvmppc_load_guest_altivec(struct kvm_vcpu * vcpu)198 static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
199 {
200 #ifdef CONFIG_ALTIVEC
201 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
202 if (!(current->thread.regs->msr & MSR_VEC)) {
203 enable_kernel_altivec();
204 load_vr_state(&vcpu->arch.vr);
205 disable_kernel_altivec();
206 current->thread.vr_save_area = &vcpu->arch.vr;
207 current->thread.regs->msr |= MSR_VEC;
208 }
209 }
210 #endif
211 }
212
213 /*
214 * Save guest vcpu AltiVec state into thread.
215 * It requires to be called with preemption disabled.
216 */
kvmppc_save_guest_altivec(struct kvm_vcpu * vcpu)217 static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
218 {
219 #ifdef CONFIG_ALTIVEC
220 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
221 if (current->thread.regs->msr & MSR_VEC)
222 giveup_altivec(current);
223 current->thread.vr_save_area = NULL;
224 }
225 #endif
226 }
227
kvmppc_vcpu_sync_debug(struct kvm_vcpu * vcpu)228 static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
229 {
230 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
231 #ifndef CONFIG_KVM_BOOKE_HV
232 vcpu->arch.shadow_msr &= ~MSR_DE;
233 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
234 #endif
235
236 /* Force enable debug interrupts when user space wants to debug */
237 if (vcpu->guest_debug) {
238 #ifdef CONFIG_KVM_BOOKE_HV
239 /*
240 * Since there is no shadow MSR, sync MSR_DE into the guest
241 * visible MSR.
242 */
243 vcpu->arch.shared->msr |= MSR_DE;
244 #else
245 vcpu->arch.shadow_msr |= MSR_DE;
246 vcpu->arch.shared->msr &= ~MSR_DE;
247 #endif
248 }
249 }
250
251 /*
252 * Helper function for "full" MSR writes. No need to call this if only
253 * EE/CE/ME/DE/RI are changing.
254 */
kvmppc_set_msr(struct kvm_vcpu * vcpu,u32 new_msr)255 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
256 {
257 u32 old_msr = vcpu->arch.shared->msr;
258
259 #ifdef CONFIG_KVM_BOOKE_HV
260 new_msr |= MSR_GS;
261 #endif
262
263 vcpu->arch.shared->msr = new_msr;
264
265 kvmppc_mmu_msr_notify(vcpu, old_msr);
266 kvmppc_vcpu_sync_spe(vcpu);
267 kvmppc_vcpu_sync_fpu(vcpu);
268 kvmppc_vcpu_sync_debug(vcpu);
269 }
270
kvmppc_booke_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)271 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
272 unsigned int priority)
273 {
274 trace_kvm_booke_queue_irqprio(vcpu, priority);
275 set_bit(priority, &vcpu->arch.pending_exceptions);
276 }
277
kvmppc_core_queue_dtlb_miss(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)278 void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
279 ulong dear_flags, ulong esr_flags)
280 {
281 vcpu->arch.queued_dear = dear_flags;
282 vcpu->arch.queued_esr = esr_flags;
283 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
284 }
285
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)286 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
287 ulong dear_flags, ulong esr_flags)
288 {
289 vcpu->arch.queued_dear = dear_flags;
290 vcpu->arch.queued_esr = esr_flags;
291 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
292 }
293
kvmppc_core_queue_itlb_miss(struct kvm_vcpu * vcpu)294 void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
295 {
296 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
297 }
298
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong esr_flags)299 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
300 {
301 vcpu->arch.queued_esr = esr_flags;
302 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
303 }
304
kvmppc_core_queue_alignment(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)305 static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
306 ulong esr_flags)
307 {
308 vcpu->arch.queued_dear = dear_flags;
309 vcpu->arch.queued_esr = esr_flags;
310 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
311 }
312
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong esr_flags)313 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
314 {
315 vcpu->arch.queued_esr = esr_flags;
316 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
317 }
318
kvmppc_core_queue_fpunavail(struct kvm_vcpu * vcpu)319 void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
320 {
321 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
322 }
323
324 #ifdef CONFIG_ALTIVEC
kvmppc_core_queue_vec_unavail(struct kvm_vcpu * vcpu)325 void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
326 {
327 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
328 }
329 #endif
330
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)331 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
332 {
333 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
334 }
335
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)336 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
337 {
338 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
339 }
340
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)341 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
342 {
343 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
344 }
345
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)346 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
347 struct kvm_interrupt *irq)
348 {
349 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
350
351 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
352 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
353
354 kvmppc_booke_queue_irqprio(vcpu, prio);
355 }
356
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu)357 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
358 {
359 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
360 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
361 }
362
kvmppc_core_queue_watchdog(struct kvm_vcpu * vcpu)363 static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
364 {
365 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
366 }
367
kvmppc_core_dequeue_watchdog(struct kvm_vcpu * vcpu)368 static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
369 {
370 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
371 }
372
kvmppc_core_queue_debug(struct kvm_vcpu * vcpu)373 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
374 {
375 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
376 }
377
kvmppc_core_dequeue_debug(struct kvm_vcpu * vcpu)378 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
379 {
380 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
381 }
382
set_guest_srr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)383 static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
384 {
385 kvmppc_set_srr0(vcpu, srr0);
386 kvmppc_set_srr1(vcpu, srr1);
387 }
388
set_guest_csrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)389 static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
390 {
391 vcpu->arch.csrr0 = srr0;
392 vcpu->arch.csrr1 = srr1;
393 }
394
set_guest_dsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)395 static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
396 {
397 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
398 vcpu->arch.dsrr0 = srr0;
399 vcpu->arch.dsrr1 = srr1;
400 } else {
401 set_guest_csrr(vcpu, srr0, srr1);
402 }
403 }
404
set_guest_mcsrr(struct kvm_vcpu * vcpu,unsigned long srr0,u32 srr1)405 static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
406 {
407 vcpu->arch.mcsrr0 = srr0;
408 vcpu->arch.mcsrr1 = srr1;
409 }
410
411 /* Deliver the interrupt of the corresponding priority, if possible. */
kvmppc_booke_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)412 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
413 unsigned int priority)
414 {
415 int allowed = 0;
416 ulong msr_mask = 0;
417 bool update_esr = false, update_dear = false, update_epr = false;
418 ulong crit_raw = vcpu->arch.shared->critical;
419 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
420 bool crit;
421 bool keep_irq = false;
422 enum int_class int_class;
423 ulong new_msr = vcpu->arch.shared->msr;
424
425 /* Truncate crit indicators in 32 bit mode */
426 if (!(vcpu->arch.shared->msr & MSR_SF)) {
427 crit_raw &= 0xffffffff;
428 crit_r1 &= 0xffffffff;
429 }
430
431 /* Critical section when crit == r1 */
432 crit = (crit_raw == crit_r1);
433 /* ... and we're in supervisor mode */
434 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
435
436 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
437 priority = BOOKE_IRQPRIO_EXTERNAL;
438 keep_irq = true;
439 }
440
441 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
442 update_epr = true;
443
444 switch (priority) {
445 case BOOKE_IRQPRIO_DTLB_MISS:
446 case BOOKE_IRQPRIO_DATA_STORAGE:
447 case BOOKE_IRQPRIO_ALIGNMENT:
448 update_dear = true;
449 fallthrough;
450 case BOOKE_IRQPRIO_INST_STORAGE:
451 case BOOKE_IRQPRIO_PROGRAM:
452 update_esr = true;
453 fallthrough;
454 case BOOKE_IRQPRIO_ITLB_MISS:
455 case BOOKE_IRQPRIO_SYSCALL:
456 case BOOKE_IRQPRIO_FP_UNAVAIL:
457 #ifdef CONFIG_SPE_POSSIBLE
458 case BOOKE_IRQPRIO_SPE_UNAVAIL:
459 case BOOKE_IRQPRIO_SPE_FP_DATA:
460 case BOOKE_IRQPRIO_SPE_FP_ROUND:
461 #endif
462 #ifdef CONFIG_ALTIVEC
463 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
464 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
465 #endif
466 case BOOKE_IRQPRIO_AP_UNAVAIL:
467 allowed = 1;
468 msr_mask = MSR_CE | MSR_ME | MSR_DE;
469 int_class = INT_CLASS_NONCRIT;
470 break;
471 case BOOKE_IRQPRIO_WATCHDOG:
472 case BOOKE_IRQPRIO_CRITICAL:
473 case BOOKE_IRQPRIO_DBELL_CRIT:
474 allowed = vcpu->arch.shared->msr & MSR_CE;
475 allowed = allowed && !crit;
476 msr_mask = MSR_ME;
477 int_class = INT_CLASS_CRIT;
478 break;
479 case BOOKE_IRQPRIO_MACHINE_CHECK:
480 allowed = vcpu->arch.shared->msr & MSR_ME;
481 allowed = allowed && !crit;
482 int_class = INT_CLASS_MC;
483 break;
484 case BOOKE_IRQPRIO_DECREMENTER:
485 case BOOKE_IRQPRIO_FIT:
486 keep_irq = true;
487 fallthrough;
488 case BOOKE_IRQPRIO_EXTERNAL:
489 case BOOKE_IRQPRIO_DBELL:
490 allowed = vcpu->arch.shared->msr & MSR_EE;
491 allowed = allowed && !crit;
492 msr_mask = MSR_CE | MSR_ME | MSR_DE;
493 int_class = INT_CLASS_NONCRIT;
494 break;
495 case BOOKE_IRQPRIO_DEBUG:
496 allowed = vcpu->arch.shared->msr & MSR_DE;
497 allowed = allowed && !crit;
498 msr_mask = MSR_ME;
499 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
500 int_class = INT_CLASS_DBG;
501 else
502 int_class = INT_CLASS_CRIT;
503
504 break;
505 }
506
507 if (allowed) {
508 switch (int_class) {
509 case INT_CLASS_NONCRIT:
510 set_guest_srr(vcpu, vcpu->arch.regs.nip,
511 vcpu->arch.shared->msr);
512 break;
513 case INT_CLASS_CRIT:
514 set_guest_csrr(vcpu, vcpu->arch.regs.nip,
515 vcpu->arch.shared->msr);
516 break;
517 case INT_CLASS_DBG:
518 set_guest_dsrr(vcpu, vcpu->arch.regs.nip,
519 vcpu->arch.shared->msr);
520 break;
521 case INT_CLASS_MC:
522 set_guest_mcsrr(vcpu, vcpu->arch.regs.nip,
523 vcpu->arch.shared->msr);
524 break;
525 }
526
527 vcpu->arch.regs.nip = vcpu->arch.ivpr |
528 vcpu->arch.ivor[priority];
529 if (update_esr)
530 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
531 if (update_dear)
532 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
533 if (update_epr) {
534 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
535 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
536 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
537 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
538 kvmppc_mpic_set_epr(vcpu);
539 }
540 }
541
542 new_msr &= msr_mask;
543 #if defined(CONFIG_64BIT)
544 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
545 new_msr |= MSR_CM;
546 #endif
547 kvmppc_set_msr(vcpu, new_msr);
548
549 if (!keep_irq)
550 clear_bit(priority, &vcpu->arch.pending_exceptions);
551 }
552
553 #ifdef CONFIG_KVM_BOOKE_HV
554 /*
555 * If an interrupt is pending but masked, raise a guest doorbell
556 * so that we are notified when the guest enables the relevant
557 * MSR bit.
558 */
559 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
560 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
561 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
562 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
563 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
564 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
565 #endif
566
567 return allowed;
568 }
569
570 /*
571 * Return the number of jiffies until the next timeout. If the timeout is
572 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
573 * because the larger value can break the timer APIs.
574 */
watchdog_next_timeout(struct kvm_vcpu * vcpu)575 static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
576 {
577 u64 tb, wdt_tb, wdt_ticks = 0;
578 u64 nr_jiffies = 0;
579 u32 period = TCR_GET_WP(vcpu->arch.tcr);
580
581 wdt_tb = 1ULL << (63 - period);
582 tb = get_tb();
583 /*
584 * The watchdog timeout will hapeen when TB bit corresponding
585 * to watchdog will toggle from 0 to 1.
586 */
587 if (tb & wdt_tb)
588 wdt_ticks = wdt_tb;
589
590 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
591
592 /* Convert timebase ticks to jiffies */
593 nr_jiffies = wdt_ticks;
594
595 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
596 nr_jiffies++;
597
598 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
599 }
600
arm_next_watchdog(struct kvm_vcpu * vcpu)601 static void arm_next_watchdog(struct kvm_vcpu *vcpu)
602 {
603 unsigned long nr_jiffies;
604 unsigned long flags;
605
606 /*
607 * If TSR_ENW and TSR_WIS are not set then no need to exit to
608 * userspace, so clear the KVM_REQ_WATCHDOG request.
609 */
610 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
611 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
612
613 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
614 nr_jiffies = watchdog_next_timeout(vcpu);
615 /*
616 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
617 * then do not run the watchdog timer as this can break timer APIs.
618 */
619 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
620 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
621 else
622 del_timer(&vcpu->arch.wdt_timer);
623 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
624 }
625
kvmppc_watchdog_func(struct timer_list * t)626 void kvmppc_watchdog_func(struct timer_list *t)
627 {
628 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
629 u32 tsr, new_tsr;
630 int final;
631
632 do {
633 new_tsr = tsr = vcpu->arch.tsr;
634 final = 0;
635
636 /* Time out event */
637 if (tsr & TSR_ENW) {
638 if (tsr & TSR_WIS)
639 final = 1;
640 else
641 new_tsr = tsr | TSR_WIS;
642 } else {
643 new_tsr = tsr | TSR_ENW;
644 }
645 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
646
647 if (new_tsr & TSR_WIS) {
648 smp_wmb();
649 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
650 kvm_vcpu_kick(vcpu);
651 }
652
653 /*
654 * If this is final watchdog expiry and some action is required
655 * then exit to userspace.
656 */
657 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
658 vcpu->arch.watchdog_enabled) {
659 smp_wmb();
660 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
661 kvm_vcpu_kick(vcpu);
662 }
663
664 /*
665 * Stop running the watchdog timer after final expiration to
666 * prevent the host from being flooded with timers if the
667 * guest sets a short period.
668 * Timers will resume when TSR/TCR is updated next time.
669 */
670 if (!final)
671 arm_next_watchdog(vcpu);
672 }
673
update_timer_ints(struct kvm_vcpu * vcpu)674 static void update_timer_ints(struct kvm_vcpu *vcpu)
675 {
676 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
677 kvmppc_core_queue_dec(vcpu);
678 else
679 kvmppc_core_dequeue_dec(vcpu);
680
681 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
682 kvmppc_core_queue_watchdog(vcpu);
683 else
684 kvmppc_core_dequeue_watchdog(vcpu);
685 }
686
kvmppc_core_check_exceptions(struct kvm_vcpu * vcpu)687 static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
688 {
689 unsigned long *pending = &vcpu->arch.pending_exceptions;
690 unsigned int priority;
691
692 priority = __ffs(*pending);
693 while (priority < BOOKE_IRQPRIO_MAX) {
694 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
695 break;
696
697 priority = find_next_bit(pending,
698 BITS_PER_BYTE * sizeof(*pending),
699 priority + 1);
700 }
701
702 /* Tell the guest about our interrupt status */
703 vcpu->arch.shared->int_pending = !!*pending;
704 }
705
706 /* Check pending exceptions and deliver one, if possible. */
kvmppc_core_prepare_to_enter(struct kvm_vcpu * vcpu)707 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
708 {
709 int r = 0;
710 WARN_ON_ONCE(!irqs_disabled());
711
712 kvmppc_core_check_exceptions(vcpu);
713
714 if (kvm_request_pending(vcpu)) {
715 /* Exception delivery raised request; start over */
716 return 1;
717 }
718
719 if (vcpu->arch.shared->msr & MSR_WE) {
720 local_irq_enable();
721 kvm_vcpu_halt(vcpu);
722 hard_irq_disable();
723
724 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
725 r = 1;
726 }
727
728 return r;
729 }
730
kvmppc_core_check_requests(struct kvm_vcpu * vcpu)731 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
732 {
733 int r = 1; /* Indicate we want to get back into the guest */
734
735 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
736 update_timer_ints(vcpu);
737 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
738 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
739 kvmppc_core_flush_tlb(vcpu);
740 #endif
741
742 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
743 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
744 r = 0;
745 }
746
747 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
748 vcpu->run->epr.epr = 0;
749 vcpu->arch.epr_needed = true;
750 vcpu->run->exit_reason = KVM_EXIT_EPR;
751 r = 0;
752 }
753
754 return r;
755 }
756
kvmppc_vcpu_run(struct kvm_vcpu * vcpu)757 int kvmppc_vcpu_run(struct kvm_vcpu *vcpu)
758 {
759 int ret, s;
760 struct debug_reg debug;
761
762 if (!vcpu->arch.sane) {
763 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
764 return -EINVAL;
765 }
766
767 s = kvmppc_prepare_to_enter(vcpu);
768 if (s <= 0) {
769 ret = s;
770 goto out;
771 }
772 /* interrupts now hard-disabled */
773
774 #ifdef CONFIG_PPC_FPU
775 /* Save userspace FPU state in stack */
776 enable_kernel_fp();
777
778 /*
779 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
780 * as always using the FPU.
781 */
782 kvmppc_load_guest_fp(vcpu);
783 #endif
784
785 #ifdef CONFIG_ALTIVEC
786 /* Save userspace AltiVec state in stack */
787 if (cpu_has_feature(CPU_FTR_ALTIVEC))
788 enable_kernel_altivec();
789 /*
790 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
791 * as always using the AltiVec.
792 */
793 kvmppc_load_guest_altivec(vcpu);
794 #endif
795
796 /* Switch to guest debug context */
797 debug = vcpu->arch.dbg_reg;
798 switch_booke_debug_regs(&debug);
799 debug = current->thread.debug;
800 current->thread.debug = vcpu->arch.dbg_reg;
801
802 vcpu->arch.pgdir = vcpu->kvm->mm->pgd;
803 kvmppc_fix_ee_before_entry();
804
805 ret = __kvmppc_vcpu_run(vcpu);
806
807 /* No need for guest_exit. It's done in handle_exit.
808 We also get here with interrupts enabled. */
809
810 /* Switch back to user space debug context */
811 switch_booke_debug_regs(&debug);
812 current->thread.debug = debug;
813
814 #ifdef CONFIG_PPC_FPU
815 kvmppc_save_guest_fp(vcpu);
816 #endif
817
818 #ifdef CONFIG_ALTIVEC
819 kvmppc_save_guest_altivec(vcpu);
820 #endif
821
822 out:
823 vcpu->mode = OUTSIDE_GUEST_MODE;
824 return ret;
825 }
826
emulation_exit(struct kvm_vcpu * vcpu)827 static int emulation_exit(struct kvm_vcpu *vcpu)
828 {
829 enum emulation_result er;
830
831 er = kvmppc_emulate_instruction(vcpu);
832 switch (er) {
833 case EMULATE_DONE:
834 /* don't overwrite subtypes, just account kvm_stats */
835 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
836 /* Future optimization: only reload non-volatiles if
837 * they were actually modified by emulation. */
838 return RESUME_GUEST_NV;
839
840 case EMULATE_AGAIN:
841 return RESUME_GUEST;
842
843 case EMULATE_FAIL:
844 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
845 __func__, vcpu->arch.regs.nip, vcpu->arch.last_inst);
846 /* For debugging, encode the failing instruction and
847 * report it to userspace. */
848 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
849 vcpu->run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
850 kvmppc_core_queue_program(vcpu, ESR_PIL);
851 return RESUME_HOST;
852
853 case EMULATE_EXIT_USER:
854 return RESUME_HOST;
855
856 default:
857 BUG();
858 }
859 }
860
kvmppc_handle_debug(struct kvm_vcpu * vcpu)861 static int kvmppc_handle_debug(struct kvm_vcpu *vcpu)
862 {
863 struct kvm_run *run = vcpu->run;
864 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
865 u32 dbsr = vcpu->arch.dbsr;
866
867 if (vcpu->guest_debug == 0) {
868 /*
869 * Debug resources belong to Guest.
870 * Imprecise debug event is not injected
871 */
872 if (dbsr & DBSR_IDE) {
873 dbsr &= ~DBSR_IDE;
874 if (!dbsr)
875 return RESUME_GUEST;
876 }
877
878 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
879 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
880 kvmppc_core_queue_debug(vcpu);
881
882 /* Inject a program interrupt if trap debug is not allowed */
883 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
884 kvmppc_core_queue_program(vcpu, ESR_PTR);
885
886 return RESUME_GUEST;
887 }
888
889 /*
890 * Debug resource owned by userspace.
891 * Clear guest dbsr (vcpu->arch.dbsr)
892 */
893 vcpu->arch.dbsr = 0;
894 run->debug.arch.status = 0;
895 run->debug.arch.address = vcpu->arch.regs.nip;
896
897 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
898 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
899 } else {
900 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
901 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
902 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
903 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
904 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
905 run->debug.arch.address = dbg_reg->dac1;
906 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
907 run->debug.arch.address = dbg_reg->dac2;
908 }
909
910 return RESUME_HOST;
911 }
912
kvmppc_fill_pt_regs(struct pt_regs * regs)913 static void kvmppc_fill_pt_regs(struct pt_regs *regs)
914 {
915 ulong r1, ip, msr, lr;
916
917 asm("mr %0, 1" : "=r"(r1));
918 asm("mflr %0" : "=r"(lr));
919 asm("mfmsr %0" : "=r"(msr));
920 asm("bl 1f; 1: mflr %0" : "=r"(ip));
921
922 memset(regs, 0, sizeof(*regs));
923 regs->gpr[1] = r1;
924 regs->nip = ip;
925 regs->msr = msr;
926 regs->link = lr;
927 }
928
929 /*
930 * For interrupts needed to be handled by host interrupt handlers,
931 * corresponding host handler are called from here in similar way
932 * (but not exact) as they are called from low level handler
933 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
934 */
kvmppc_restart_interrupt(struct kvm_vcpu * vcpu,unsigned int exit_nr)935 static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
936 unsigned int exit_nr)
937 {
938 struct pt_regs regs;
939
940 switch (exit_nr) {
941 case BOOKE_INTERRUPT_EXTERNAL:
942 kvmppc_fill_pt_regs(®s);
943 do_IRQ(®s);
944 break;
945 case BOOKE_INTERRUPT_DECREMENTER:
946 kvmppc_fill_pt_regs(®s);
947 timer_interrupt(®s);
948 break;
949 #if defined(CONFIG_PPC_DOORBELL)
950 case BOOKE_INTERRUPT_DOORBELL:
951 kvmppc_fill_pt_regs(®s);
952 doorbell_exception(®s);
953 break;
954 #endif
955 case BOOKE_INTERRUPT_MACHINE_CHECK:
956 /* FIXME */
957 break;
958 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
959 kvmppc_fill_pt_regs(®s);
960 performance_monitor_exception(®s);
961 break;
962 case BOOKE_INTERRUPT_WATCHDOG:
963 kvmppc_fill_pt_regs(®s);
964 #ifdef CONFIG_BOOKE_WDT
965 WatchdogException(®s);
966 #else
967 unknown_exception(®s);
968 #endif
969 break;
970 case BOOKE_INTERRUPT_CRITICAL:
971 kvmppc_fill_pt_regs(®s);
972 unknown_exception(®s);
973 break;
974 case BOOKE_INTERRUPT_DEBUG:
975 /* Save DBSR before preemption is enabled */
976 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
977 kvmppc_clear_dbsr();
978 break;
979 }
980 }
981
kvmppc_resume_inst_load(struct kvm_vcpu * vcpu,enum emulation_result emulated,u32 last_inst)982 static int kvmppc_resume_inst_load(struct kvm_vcpu *vcpu,
983 enum emulation_result emulated, u32 last_inst)
984 {
985 switch (emulated) {
986 case EMULATE_AGAIN:
987 return RESUME_GUEST;
988
989 case EMULATE_FAIL:
990 pr_debug("%s: load instruction from guest address %lx failed\n",
991 __func__, vcpu->arch.regs.nip);
992 /* For debugging, encode the failing instruction and
993 * report it to userspace. */
994 vcpu->run->hw.hardware_exit_reason = ~0ULL << 32;
995 vcpu->run->hw.hardware_exit_reason |= last_inst;
996 kvmppc_core_queue_program(vcpu, ESR_PIL);
997 return RESUME_HOST;
998
999 default:
1000 BUG();
1001 }
1002 }
1003
1004 /**
1005 * kvmppc_handle_exit
1006 *
1007 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
1008 */
kvmppc_handle_exit(struct kvm_vcpu * vcpu,unsigned int exit_nr)1009 int kvmppc_handle_exit(struct kvm_vcpu *vcpu, unsigned int exit_nr)
1010 {
1011 struct kvm_run *run = vcpu->run;
1012 int r = RESUME_HOST;
1013 int s;
1014 int idx;
1015 u32 last_inst = KVM_INST_FETCH_FAILED;
1016 enum emulation_result emulated = EMULATE_DONE;
1017
1018 /* update before a new last_exit_type is rewritten */
1019 kvmppc_update_timing_stats(vcpu);
1020
1021 /* restart interrupts if they were meant for the host */
1022 kvmppc_restart_interrupt(vcpu, exit_nr);
1023
1024 /*
1025 * get last instruction before being preempted
1026 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1027 */
1028 switch (exit_nr) {
1029 case BOOKE_INTERRUPT_DATA_STORAGE:
1030 case BOOKE_INTERRUPT_DTLB_MISS:
1031 case BOOKE_INTERRUPT_HV_PRIV:
1032 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1033 break;
1034 case BOOKE_INTERRUPT_PROGRAM:
1035 /* SW breakpoints arrive as illegal instructions on HV */
1036 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
1037 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1038 break;
1039 default:
1040 break;
1041 }
1042
1043 trace_kvm_exit(exit_nr, vcpu);
1044
1045 context_tracking_guest_exit();
1046 if (!vtime_accounting_enabled_this_cpu()) {
1047 local_irq_enable();
1048 /*
1049 * Service IRQs here before vtime_account_guest_exit() so any
1050 * ticks that occurred while running the guest are accounted to
1051 * the guest. If vtime accounting is enabled, accounting uses
1052 * TB rather than ticks, so it can be done without enabling
1053 * interrupts here, which has the problem that it accounts
1054 * interrupt processing overhead to the host.
1055 */
1056 local_irq_disable();
1057 }
1058 vtime_account_guest_exit();
1059
1060 local_irq_enable();
1061
1062 run->exit_reason = KVM_EXIT_UNKNOWN;
1063 run->ready_for_interrupt_injection = 1;
1064
1065 if (emulated != EMULATE_DONE) {
1066 r = kvmppc_resume_inst_load(vcpu, emulated, last_inst);
1067 goto out;
1068 }
1069
1070 switch (exit_nr) {
1071 case BOOKE_INTERRUPT_MACHINE_CHECK:
1072 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1073 kvmppc_dump_vcpu(vcpu);
1074 /* For debugging, send invalid exit reason to user space */
1075 run->hw.hardware_exit_reason = ~1ULL << 32;
1076 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1077 r = RESUME_HOST;
1078 break;
1079
1080 case BOOKE_INTERRUPT_EXTERNAL:
1081 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1082 r = RESUME_GUEST;
1083 break;
1084
1085 case BOOKE_INTERRUPT_DECREMENTER:
1086 kvmppc_account_exit(vcpu, DEC_EXITS);
1087 r = RESUME_GUEST;
1088 break;
1089
1090 case BOOKE_INTERRUPT_WATCHDOG:
1091 r = RESUME_GUEST;
1092 break;
1093
1094 case BOOKE_INTERRUPT_DOORBELL:
1095 kvmppc_account_exit(vcpu, DBELL_EXITS);
1096 r = RESUME_GUEST;
1097 break;
1098
1099 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1100 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1101
1102 /*
1103 * We are here because there is a pending guest interrupt
1104 * which could not be delivered as MSR_CE or MSR_ME was not
1105 * set. Once we break from here we will retry delivery.
1106 */
1107 r = RESUME_GUEST;
1108 break;
1109
1110 case BOOKE_INTERRUPT_GUEST_DBELL:
1111 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1112
1113 /*
1114 * We are here because there is a pending guest interrupt
1115 * which could not be delivered as MSR_EE was not set. Once
1116 * we break from here we will retry delivery.
1117 */
1118 r = RESUME_GUEST;
1119 break;
1120
1121 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1122 r = RESUME_GUEST;
1123 break;
1124
1125 case BOOKE_INTERRUPT_HV_PRIV:
1126 r = emulation_exit(vcpu);
1127 break;
1128
1129 case BOOKE_INTERRUPT_PROGRAM:
1130 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1131 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1132 /*
1133 * We are here because of an SW breakpoint instr,
1134 * so lets return to host to handle.
1135 */
1136 r = kvmppc_handle_debug(vcpu);
1137 run->exit_reason = KVM_EXIT_DEBUG;
1138 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1139 break;
1140 }
1141
1142 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
1143 /*
1144 * Program traps generated by user-level software must
1145 * be handled by the guest kernel.
1146 *
1147 * In GS mode, hypervisor privileged instructions trap
1148 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1149 * actual program interrupts, handled by the guest.
1150 */
1151 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
1152 r = RESUME_GUEST;
1153 kvmppc_account_exit(vcpu, USR_PR_INST);
1154 break;
1155 }
1156
1157 r = emulation_exit(vcpu);
1158 break;
1159
1160 case BOOKE_INTERRUPT_FP_UNAVAIL:
1161 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
1162 kvmppc_account_exit(vcpu, FP_UNAVAIL);
1163 r = RESUME_GUEST;
1164 break;
1165
1166 #ifdef CONFIG_SPE
1167 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1168 if (vcpu->arch.shared->msr & MSR_SPE)
1169 kvmppc_vcpu_enable_spe(vcpu);
1170 else
1171 kvmppc_booke_queue_irqprio(vcpu,
1172 BOOKE_IRQPRIO_SPE_UNAVAIL);
1173 r = RESUME_GUEST;
1174 break;
1175 }
1176
1177 case BOOKE_INTERRUPT_SPE_FP_DATA:
1178 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1179 r = RESUME_GUEST;
1180 break;
1181
1182 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1183 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1184 r = RESUME_GUEST;
1185 break;
1186 #elif defined(CONFIG_SPE_POSSIBLE)
1187 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1188 /*
1189 * Guest wants SPE, but host kernel doesn't support it. Send
1190 * an "unimplemented operation" program check to the guest.
1191 */
1192 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1193 r = RESUME_GUEST;
1194 break;
1195
1196 /*
1197 * These really should never happen without CONFIG_SPE,
1198 * as we should never enable the real MSR[SPE] in the guest.
1199 */
1200 case BOOKE_INTERRUPT_SPE_FP_DATA:
1201 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1202 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1203 __func__, exit_nr, vcpu->arch.regs.nip);
1204 run->hw.hardware_exit_reason = exit_nr;
1205 r = RESUME_HOST;
1206 break;
1207 #endif /* CONFIG_SPE_POSSIBLE */
1208
1209 /*
1210 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1211 * see kvmppc_core_check_processor_compat().
1212 */
1213 #ifdef CONFIG_ALTIVEC
1214 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1215 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1216 r = RESUME_GUEST;
1217 break;
1218
1219 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1220 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1221 r = RESUME_GUEST;
1222 break;
1223 #endif
1224
1225 case BOOKE_INTERRUPT_DATA_STORAGE:
1226 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1227 vcpu->arch.fault_esr);
1228 kvmppc_account_exit(vcpu, DSI_EXITS);
1229 r = RESUME_GUEST;
1230 break;
1231
1232 case BOOKE_INTERRUPT_INST_STORAGE:
1233 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
1234 kvmppc_account_exit(vcpu, ISI_EXITS);
1235 r = RESUME_GUEST;
1236 break;
1237
1238 case BOOKE_INTERRUPT_ALIGNMENT:
1239 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1240 vcpu->arch.fault_esr);
1241 r = RESUME_GUEST;
1242 break;
1243
1244 #ifdef CONFIG_KVM_BOOKE_HV
1245 case BOOKE_INTERRUPT_HV_SYSCALL:
1246 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1247 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1248 } else {
1249 /*
1250 * hcall from guest userspace -- send privileged
1251 * instruction program check.
1252 */
1253 kvmppc_core_queue_program(vcpu, ESR_PPR);
1254 }
1255
1256 r = RESUME_GUEST;
1257 break;
1258 #else
1259 case BOOKE_INTERRUPT_SYSCALL:
1260 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1261 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1262 /* KVM PV hypercalls */
1263 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1264 r = RESUME_GUEST;
1265 } else {
1266 /* Guest syscalls */
1267 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1268 }
1269 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
1270 r = RESUME_GUEST;
1271 break;
1272 #endif
1273
1274 case BOOKE_INTERRUPT_DTLB_MISS: {
1275 unsigned long eaddr = vcpu->arch.fault_dear;
1276 int gtlb_index;
1277 gpa_t gpaddr;
1278 gfn_t gfn;
1279
1280 #ifdef CONFIG_KVM_E500V2
1281 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1282 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1283 kvmppc_map_magic(vcpu);
1284 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1285 r = RESUME_GUEST;
1286
1287 break;
1288 }
1289 #endif
1290
1291 /* Check the guest TLB. */
1292 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1293 if (gtlb_index < 0) {
1294 /* The guest didn't have a mapping for it. */
1295 kvmppc_core_queue_dtlb_miss(vcpu,
1296 vcpu->arch.fault_dear,
1297 vcpu->arch.fault_esr);
1298 kvmppc_mmu_dtlb_miss(vcpu);
1299 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
1300 r = RESUME_GUEST;
1301 break;
1302 }
1303
1304 idx = srcu_read_lock(&vcpu->kvm->srcu);
1305
1306 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1307 gfn = gpaddr >> PAGE_SHIFT;
1308
1309 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1310 /* The guest TLB had a mapping, but the shadow TLB
1311 * didn't, and it is RAM. This could be because:
1312 * a) the entry is mapping the host kernel, or
1313 * b) the guest used a large mapping which we're faking
1314 * Either way, we need to satisfy the fault without
1315 * invoking the guest. */
1316 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1317 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1318 r = RESUME_GUEST;
1319 } else {
1320 /* Guest has mapped and accessed a page which is not
1321 * actually RAM. */
1322 vcpu->arch.paddr_accessed = gpaddr;
1323 vcpu->arch.vaddr_accessed = eaddr;
1324 r = kvmppc_emulate_mmio(vcpu);
1325 kvmppc_account_exit(vcpu, MMIO_EXITS);
1326 }
1327
1328 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1329 break;
1330 }
1331
1332 case BOOKE_INTERRUPT_ITLB_MISS: {
1333 unsigned long eaddr = vcpu->arch.regs.nip;
1334 gpa_t gpaddr;
1335 gfn_t gfn;
1336 int gtlb_index;
1337
1338 r = RESUME_GUEST;
1339
1340 /* Check the guest TLB. */
1341 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1342 if (gtlb_index < 0) {
1343 /* The guest didn't have a mapping for it. */
1344 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
1345 kvmppc_mmu_itlb_miss(vcpu);
1346 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
1347 break;
1348 }
1349
1350 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
1351
1352 idx = srcu_read_lock(&vcpu->kvm->srcu);
1353
1354 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1355 gfn = gpaddr >> PAGE_SHIFT;
1356
1357 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1358 /* The guest TLB had a mapping, but the shadow TLB
1359 * didn't. This could be because:
1360 * a) the entry is mapping the host kernel, or
1361 * b) the guest used a large mapping which we're faking
1362 * Either way, we need to satisfy the fault without
1363 * invoking the guest. */
1364 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
1365 } else {
1366 /* Guest mapped and leaped at non-RAM! */
1367 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
1368 }
1369
1370 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1371 break;
1372 }
1373
1374 case BOOKE_INTERRUPT_DEBUG: {
1375 r = kvmppc_handle_debug(vcpu);
1376 if (r == RESUME_HOST)
1377 run->exit_reason = KVM_EXIT_DEBUG;
1378 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1379 break;
1380 }
1381
1382 default:
1383 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1384 BUG();
1385 }
1386
1387 out:
1388 /*
1389 * To avoid clobbering exit_reason, only check for signals if we
1390 * aren't already exiting to userspace for some other reason.
1391 */
1392 if (!(r & RESUME_HOST)) {
1393 s = kvmppc_prepare_to_enter(vcpu);
1394 if (s <= 0)
1395 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
1396 else {
1397 /* interrupts now hard-disabled */
1398 kvmppc_fix_ee_before_entry();
1399 kvmppc_load_guest_fp(vcpu);
1400 kvmppc_load_guest_altivec(vcpu);
1401 }
1402 }
1403
1404 return r;
1405 }
1406
kvmppc_set_tsr(struct kvm_vcpu * vcpu,u32 new_tsr)1407 static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1408 {
1409 u32 old_tsr = vcpu->arch.tsr;
1410
1411 vcpu->arch.tsr = new_tsr;
1412
1413 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1414 arm_next_watchdog(vcpu);
1415
1416 update_timer_ints(vcpu);
1417 }
1418
kvmppc_subarch_vcpu_init(struct kvm_vcpu * vcpu)1419 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1420 {
1421 /* setup watchdog timer once */
1422 spin_lock_init(&vcpu->arch.wdt_lock);
1423 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
1424
1425 /*
1426 * Clear DBSR.MRR to avoid guest debug interrupt as
1427 * this is of host interest
1428 */
1429 mtspr(SPRN_DBSR, DBSR_MRR);
1430 return 0;
1431 }
1432
kvmppc_subarch_vcpu_uninit(struct kvm_vcpu * vcpu)1433 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1434 {
1435 del_timer_sync(&vcpu->arch.wdt_timer);
1436 }
1437
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1438 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1439 {
1440 int i;
1441
1442 vcpu_load(vcpu);
1443
1444 regs->pc = vcpu->arch.regs.nip;
1445 regs->cr = kvmppc_get_cr(vcpu);
1446 regs->ctr = vcpu->arch.regs.ctr;
1447 regs->lr = vcpu->arch.regs.link;
1448 regs->xer = kvmppc_get_xer(vcpu);
1449 regs->msr = vcpu->arch.shared->msr;
1450 regs->srr0 = kvmppc_get_srr0(vcpu);
1451 regs->srr1 = kvmppc_get_srr1(vcpu);
1452 regs->pid = vcpu->arch.pid;
1453 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1454 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1455 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1456 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1457 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1458 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1459 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1460 regs->sprg7 = kvmppc_get_sprg7(vcpu);
1461
1462 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1463 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
1464
1465 vcpu_put(vcpu);
1466 return 0;
1467 }
1468
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)1469 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1470 {
1471 int i;
1472
1473 vcpu_load(vcpu);
1474
1475 vcpu->arch.regs.nip = regs->pc;
1476 kvmppc_set_cr(vcpu, regs->cr);
1477 vcpu->arch.regs.ctr = regs->ctr;
1478 vcpu->arch.regs.link = regs->lr;
1479 kvmppc_set_xer(vcpu, regs->xer);
1480 kvmppc_set_msr(vcpu, regs->msr);
1481 kvmppc_set_srr0(vcpu, regs->srr0);
1482 kvmppc_set_srr1(vcpu, regs->srr1);
1483 kvmppc_set_pid(vcpu, regs->pid);
1484 kvmppc_set_sprg0(vcpu, regs->sprg0);
1485 kvmppc_set_sprg1(vcpu, regs->sprg1);
1486 kvmppc_set_sprg2(vcpu, regs->sprg2);
1487 kvmppc_set_sprg3(vcpu, regs->sprg3);
1488 kvmppc_set_sprg4(vcpu, regs->sprg4);
1489 kvmppc_set_sprg5(vcpu, regs->sprg5);
1490 kvmppc_set_sprg6(vcpu, regs->sprg6);
1491 kvmppc_set_sprg7(vcpu, regs->sprg7);
1492
1493 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1494 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
1495
1496 vcpu_put(vcpu);
1497 return 0;
1498 }
1499
get_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1500 static void get_sregs_base(struct kvm_vcpu *vcpu,
1501 struct kvm_sregs *sregs)
1502 {
1503 u64 tb = get_tb();
1504
1505 sregs->u.e.features |= KVM_SREGS_E_BASE;
1506
1507 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1508 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1509 sregs->u.e.mcsr = vcpu->arch.mcsr;
1510 sregs->u.e.esr = kvmppc_get_esr(vcpu);
1511 sregs->u.e.dear = kvmppc_get_dar(vcpu);
1512 sregs->u.e.tsr = vcpu->arch.tsr;
1513 sregs->u.e.tcr = vcpu->arch.tcr;
1514 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1515 sregs->u.e.tb = tb;
1516 sregs->u.e.vrsave = vcpu->arch.vrsave;
1517 }
1518
set_sregs_base(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1519 static int set_sregs_base(struct kvm_vcpu *vcpu,
1520 struct kvm_sregs *sregs)
1521 {
1522 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1523 return 0;
1524
1525 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1526 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1527 vcpu->arch.mcsr = sregs->u.e.mcsr;
1528 kvmppc_set_esr(vcpu, sregs->u.e.esr);
1529 kvmppc_set_dar(vcpu, sregs->u.e.dear);
1530 vcpu->arch.vrsave = sregs->u.e.vrsave;
1531 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
1532
1533 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
1534 vcpu->arch.dec = sregs->u.e.dec;
1535 kvmppc_emulate_dec(vcpu);
1536 }
1537
1538 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1539 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
1540
1541 return 0;
1542 }
1543
get_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1544 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1545 struct kvm_sregs *sregs)
1546 {
1547 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1548
1549 sregs->u.e.pir = vcpu->vcpu_id;
1550 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1551 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1552 sregs->u.e.decar = vcpu->arch.decar;
1553 sregs->u.e.ivpr = vcpu->arch.ivpr;
1554 }
1555
set_sregs_arch206(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1556 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1557 struct kvm_sregs *sregs)
1558 {
1559 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1560 return 0;
1561
1562 if (sregs->u.e.pir != vcpu->vcpu_id)
1563 return -EINVAL;
1564
1565 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1566 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1567 vcpu->arch.decar = sregs->u.e.decar;
1568 vcpu->arch.ivpr = sregs->u.e.ivpr;
1569
1570 return 0;
1571 }
1572
kvmppc_get_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1573 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1574 {
1575 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1576
1577 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1578 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1579 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1580 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1581 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1582 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1583 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1584 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1585 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1586 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1587 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1588 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1589 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1590 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1591 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1592 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1593 return 0;
1594 }
1595
kvmppc_set_sregs_ivor(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1596 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1597 {
1598 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1599 return 0;
1600
1601 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1602 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1603 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1604 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1605 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1606 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1607 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1608 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1609 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1610 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1611 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1612 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1613 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1614 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1615 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1616 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1617
1618 return 0;
1619 }
1620
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1621 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1622 struct kvm_sregs *sregs)
1623 {
1624 int ret;
1625
1626 vcpu_load(vcpu);
1627
1628 sregs->pvr = vcpu->arch.pvr;
1629
1630 get_sregs_base(vcpu, sregs);
1631 get_sregs_arch206(vcpu, sregs);
1632 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1633
1634 vcpu_put(vcpu);
1635 return ret;
1636 }
1637
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)1638 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1639 struct kvm_sregs *sregs)
1640 {
1641 int ret = -EINVAL;
1642
1643 vcpu_load(vcpu);
1644 if (vcpu->arch.pvr != sregs->pvr)
1645 goto out;
1646
1647 ret = set_sregs_base(vcpu, sregs);
1648 if (ret < 0)
1649 goto out;
1650
1651 ret = set_sregs_arch206(vcpu, sregs);
1652 if (ret < 0)
1653 goto out;
1654
1655 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1656
1657 out:
1658 vcpu_put(vcpu);
1659 return ret;
1660 }
1661
kvmppc_get_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1662 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1663 union kvmppc_one_reg *val)
1664 {
1665 int r = 0;
1666
1667 switch (id) {
1668 case KVM_REG_PPC_IAC1:
1669 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
1670 break;
1671 case KVM_REG_PPC_IAC2:
1672 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
1673 break;
1674 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1675 case KVM_REG_PPC_IAC3:
1676 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
1677 break;
1678 case KVM_REG_PPC_IAC4:
1679 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
1680 break;
1681 #endif
1682 case KVM_REG_PPC_DAC1:
1683 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
1684 break;
1685 case KVM_REG_PPC_DAC2:
1686 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
1687 break;
1688 case KVM_REG_PPC_EPR: {
1689 u32 epr = kvmppc_get_epr(vcpu);
1690 *val = get_reg_val(id, epr);
1691 break;
1692 }
1693 #if defined(CONFIG_64BIT)
1694 case KVM_REG_PPC_EPCR:
1695 *val = get_reg_val(id, vcpu->arch.epcr);
1696 break;
1697 #endif
1698 case KVM_REG_PPC_TCR:
1699 *val = get_reg_val(id, vcpu->arch.tcr);
1700 break;
1701 case KVM_REG_PPC_TSR:
1702 *val = get_reg_val(id, vcpu->arch.tsr);
1703 break;
1704 case KVM_REG_PPC_DEBUG_INST:
1705 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1706 break;
1707 case KVM_REG_PPC_VRSAVE:
1708 *val = get_reg_val(id, vcpu->arch.vrsave);
1709 break;
1710 default:
1711 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
1712 break;
1713 }
1714
1715 return r;
1716 }
1717
kvmppc_set_one_reg(struct kvm_vcpu * vcpu,u64 id,union kvmppc_one_reg * val)1718 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1719 union kvmppc_one_reg *val)
1720 {
1721 int r = 0;
1722
1723 switch (id) {
1724 case KVM_REG_PPC_IAC1:
1725 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
1726 break;
1727 case KVM_REG_PPC_IAC2:
1728 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
1729 break;
1730 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1731 case KVM_REG_PPC_IAC3:
1732 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
1733 break;
1734 case KVM_REG_PPC_IAC4:
1735 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
1736 break;
1737 #endif
1738 case KVM_REG_PPC_DAC1:
1739 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
1740 break;
1741 case KVM_REG_PPC_DAC2:
1742 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
1743 break;
1744 case KVM_REG_PPC_EPR: {
1745 u32 new_epr = set_reg_val(id, *val);
1746 kvmppc_set_epr(vcpu, new_epr);
1747 break;
1748 }
1749 #if defined(CONFIG_64BIT)
1750 case KVM_REG_PPC_EPCR: {
1751 u32 new_epcr = set_reg_val(id, *val);
1752 kvmppc_set_epcr(vcpu, new_epcr);
1753 break;
1754 }
1755 #endif
1756 case KVM_REG_PPC_OR_TSR: {
1757 u32 tsr_bits = set_reg_val(id, *val);
1758 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1759 break;
1760 }
1761 case KVM_REG_PPC_CLEAR_TSR: {
1762 u32 tsr_bits = set_reg_val(id, *val);
1763 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1764 break;
1765 }
1766 case KVM_REG_PPC_TSR: {
1767 u32 tsr = set_reg_val(id, *val);
1768 kvmppc_set_tsr(vcpu, tsr);
1769 break;
1770 }
1771 case KVM_REG_PPC_TCR: {
1772 u32 tcr = set_reg_val(id, *val);
1773 kvmppc_set_tcr(vcpu, tcr);
1774 break;
1775 }
1776 case KVM_REG_PPC_VRSAVE:
1777 vcpu->arch.vrsave = set_reg_val(id, *val);
1778 break;
1779 default:
1780 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
1781 break;
1782 }
1783
1784 return r;
1785 }
1786
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1787 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1788 {
1789 return -EOPNOTSUPP;
1790 }
1791
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)1792 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1793 {
1794 return -EOPNOTSUPP;
1795 }
1796
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)1797 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1798 struct kvm_translation *tr)
1799 {
1800 int r;
1801
1802 vcpu_load(vcpu);
1803 r = kvmppc_core_vcpu_translate(vcpu, tr);
1804 vcpu_put(vcpu);
1805 return r;
1806 }
1807
kvm_arch_sync_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)1808 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
1809 {
1810
1811 }
1812
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)1813 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1814 {
1815 return -EOPNOTSUPP;
1816 }
1817
kvmppc_core_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)1818 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
1819 {
1820 }
1821
kvmppc_core_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)1822 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1823 const struct kvm_memory_slot *old,
1824 struct kvm_memory_slot *new,
1825 enum kvm_mr_change change)
1826 {
1827 return 0;
1828 }
1829
kvmppc_core_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)1830 void kvmppc_core_commit_memory_region(struct kvm *kvm,
1831 struct kvm_memory_slot *old,
1832 const struct kvm_memory_slot *new,
1833 enum kvm_mr_change change)
1834 {
1835 }
1836
kvmppc_core_flush_memslot(struct kvm * kvm,struct kvm_memory_slot * memslot)1837 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
1838 {
1839 }
1840
kvmppc_set_epcr(struct kvm_vcpu * vcpu,u32 new_epcr)1841 void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1842 {
1843 #if defined(CONFIG_64BIT)
1844 vcpu->arch.epcr = new_epcr;
1845 #ifdef CONFIG_KVM_BOOKE_HV
1846 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1847 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1848 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1849 #endif
1850 #endif
1851 }
1852
kvmppc_set_tcr(struct kvm_vcpu * vcpu,u32 new_tcr)1853 void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1854 {
1855 vcpu->arch.tcr = new_tcr;
1856 arm_next_watchdog(vcpu);
1857 update_timer_ints(vcpu);
1858 }
1859
kvmppc_set_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1860 void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1861 {
1862 set_bits(tsr_bits, &vcpu->arch.tsr);
1863 smp_wmb();
1864 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1865 kvm_vcpu_kick(vcpu);
1866 }
1867
kvmppc_clr_tsr_bits(struct kvm_vcpu * vcpu,u32 tsr_bits)1868 void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1869 {
1870 clear_bits(tsr_bits, &vcpu->arch.tsr);
1871
1872 /*
1873 * We may have stopped the watchdog due to
1874 * being stuck on final expiration.
1875 */
1876 if (tsr_bits & (TSR_ENW | TSR_WIS))
1877 arm_next_watchdog(vcpu);
1878
1879 update_timer_ints(vcpu);
1880 }
1881
kvmppc_decrementer_func(struct kvm_vcpu * vcpu)1882 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
1883 {
1884 if (vcpu->arch.tcr & TCR_ARE) {
1885 vcpu->arch.dec = vcpu->arch.decar;
1886 kvmppc_emulate_dec(vcpu);
1887 }
1888
1889 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1890 }
1891
kvmppc_booke_add_breakpoint(struct debug_reg * dbg_reg,uint64_t addr,int index)1892 static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1893 uint64_t addr, int index)
1894 {
1895 switch (index) {
1896 case 0:
1897 dbg_reg->dbcr0 |= DBCR0_IAC1;
1898 dbg_reg->iac1 = addr;
1899 break;
1900 case 1:
1901 dbg_reg->dbcr0 |= DBCR0_IAC2;
1902 dbg_reg->iac2 = addr;
1903 break;
1904 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
1905 case 2:
1906 dbg_reg->dbcr0 |= DBCR0_IAC3;
1907 dbg_reg->iac3 = addr;
1908 break;
1909 case 3:
1910 dbg_reg->dbcr0 |= DBCR0_IAC4;
1911 dbg_reg->iac4 = addr;
1912 break;
1913 #endif
1914 default:
1915 return -EINVAL;
1916 }
1917
1918 dbg_reg->dbcr0 |= DBCR0_IDM;
1919 return 0;
1920 }
1921
kvmppc_booke_add_watchpoint(struct debug_reg * dbg_reg,uint64_t addr,int type,int index)1922 static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1923 int type, int index)
1924 {
1925 switch (index) {
1926 case 0:
1927 if (type & KVMPPC_DEBUG_WATCH_READ)
1928 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1929 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1930 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1931 dbg_reg->dac1 = addr;
1932 break;
1933 case 1:
1934 if (type & KVMPPC_DEBUG_WATCH_READ)
1935 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1936 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1937 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1938 dbg_reg->dac2 = addr;
1939 break;
1940 default:
1941 return -EINVAL;
1942 }
1943
1944 dbg_reg->dbcr0 |= DBCR0_IDM;
1945 return 0;
1946 }
kvm_guest_protect_msr(struct kvm_vcpu * vcpu,ulong prot_bitmap,bool set)1947 void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1948 {
1949 /* XXX: Add similar MSR protection for BookE-PR */
1950 #ifdef CONFIG_KVM_BOOKE_HV
1951 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1952 if (set) {
1953 if (prot_bitmap & MSR_UCLE)
1954 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1955 if (prot_bitmap & MSR_DE)
1956 vcpu->arch.shadow_msrp |= MSRP_DEP;
1957 if (prot_bitmap & MSR_PMM)
1958 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1959 } else {
1960 if (prot_bitmap & MSR_UCLE)
1961 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1962 if (prot_bitmap & MSR_DE)
1963 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1964 if (prot_bitmap & MSR_PMM)
1965 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1966 }
1967 #endif
1968 }
1969
kvmppc_xlate(struct kvm_vcpu * vcpu,ulong eaddr,enum xlate_instdata xlid,enum xlate_readwrite xlrw,struct kvmppc_pte * pte)1970 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1971 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1972 {
1973 int gtlb_index;
1974 gpa_t gpaddr;
1975
1976 #ifdef CONFIG_KVM_E500V2
1977 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1978 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1979 pte->eaddr = eaddr;
1980 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1981 (eaddr & ~PAGE_MASK);
1982 pte->vpage = eaddr >> PAGE_SHIFT;
1983 pte->may_read = true;
1984 pte->may_write = true;
1985 pte->may_execute = true;
1986
1987 return 0;
1988 }
1989 #endif
1990
1991 /* Check the guest TLB. */
1992 switch (xlid) {
1993 case XLATE_INST:
1994 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1995 break;
1996 case XLATE_DATA:
1997 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1998 break;
1999 default:
2000 BUG();
2001 }
2002
2003 /* Do we have a TLB entry at all? */
2004 if (gtlb_index < 0)
2005 return -ENOENT;
2006
2007 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
2008
2009 pte->eaddr = eaddr;
2010 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
2011 pte->vpage = eaddr >> PAGE_SHIFT;
2012
2013 /* XXX read permissions from the guest TLB */
2014 pte->may_read = true;
2015 pte->may_write = true;
2016 pte->may_execute = true;
2017
2018 return 0;
2019 }
2020
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)2021 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2022 struct kvm_guest_debug *dbg)
2023 {
2024 struct debug_reg *dbg_reg;
2025 int n, b = 0, w = 0;
2026 int ret = 0;
2027
2028 vcpu_load(vcpu);
2029
2030 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
2031 vcpu->arch.dbg_reg.dbcr0 = 0;
2032 vcpu->guest_debug = 0;
2033 kvm_guest_protect_msr(vcpu, MSR_DE, false);
2034 goto out;
2035 }
2036
2037 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2038 vcpu->guest_debug = dbg->control;
2039 vcpu->arch.dbg_reg.dbcr0 = 0;
2040
2041 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2042 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
2043
2044 /* Code below handles only HW breakpoints */
2045 dbg_reg = &(vcpu->arch.dbg_reg);
2046
2047 #ifdef CONFIG_KVM_BOOKE_HV
2048 /*
2049 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2050 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2051 */
2052 dbg_reg->dbcr1 = 0;
2053 dbg_reg->dbcr2 = 0;
2054 #else
2055 /*
2056 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2057 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2058 * is set.
2059 */
2060 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2061 DBCR1_IAC4US;
2062 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2063 #endif
2064
2065 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2066 goto out;
2067
2068 ret = -EINVAL;
2069 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2070 uint64_t addr = dbg->arch.bp[n].addr;
2071 uint32_t type = dbg->arch.bp[n].type;
2072
2073 if (type == KVMPPC_DEBUG_NONE)
2074 continue;
2075
2076 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
2077 KVMPPC_DEBUG_WATCH_WRITE |
2078 KVMPPC_DEBUG_BREAKPOINT))
2079 goto out;
2080
2081 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2082 /* Setting H/W breakpoint */
2083 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2084 goto out;
2085 } else {
2086 /* Setting H/W watchpoint */
2087 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2088 type, w++))
2089 goto out;
2090 }
2091 }
2092
2093 ret = 0;
2094 out:
2095 vcpu_put(vcpu);
2096 return ret;
2097 }
2098
kvmppc_booke_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2099 void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2100 {
2101 vcpu->cpu = smp_processor_id();
2102 current->thread.kvm_vcpu = vcpu;
2103 }
2104
kvmppc_booke_vcpu_put(struct kvm_vcpu * vcpu)2105 void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2106 {
2107 current->thread.kvm_vcpu = NULL;
2108 vcpu->cpu = -1;
2109
2110 /* Clear pending debug event in DBSR */
2111 kvmppc_clear_dbsr();
2112 }
2113
kvmppc_core_init_vm(struct kvm * kvm)2114 int kvmppc_core_init_vm(struct kvm *kvm)
2115 {
2116 return kvm->arch.kvm_ops->init_vm(kvm);
2117 }
2118
kvmppc_core_vcpu_create(struct kvm_vcpu * vcpu)2119 int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu)
2120 {
2121 int i;
2122 int r;
2123
2124 r = vcpu->kvm->arch.kvm_ops->vcpu_create(vcpu);
2125 if (r)
2126 return r;
2127
2128 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
2129 vcpu->arch.regs.nip = 0;
2130 vcpu->arch.shared->pir = vcpu->vcpu_id;
2131 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
2132 kvmppc_set_msr(vcpu, 0);
2133
2134 #ifndef CONFIG_KVM_BOOKE_HV
2135 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
2136 vcpu->arch.shadow_pid = 1;
2137 vcpu->arch.shared->msr = 0;
2138 #endif
2139
2140 /* Eye-catching numbers so we know if the guest takes an interrupt
2141 * before it's programmed its own IVPR/IVORs. */
2142 vcpu->arch.ivpr = 0x55550000;
2143 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
2144 vcpu->arch.ivor[i] = 0x7700 | i * 4;
2145
2146 kvmppc_init_timing_stats(vcpu);
2147
2148 r = kvmppc_core_vcpu_setup(vcpu);
2149 if (r)
2150 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2151 kvmppc_sanity_check(vcpu);
2152 return r;
2153 }
2154
kvmppc_core_vcpu_free(struct kvm_vcpu * vcpu)2155 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2156 {
2157 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
2158 }
2159
kvmppc_core_destroy_vm(struct kvm * kvm)2160 void kvmppc_core_destroy_vm(struct kvm *kvm)
2161 {
2162 kvm->arch.kvm_ops->destroy_vm(kvm);
2163 }
2164
kvmppc_core_vcpu_load(struct kvm_vcpu * vcpu,int cpu)2165 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2166 {
2167 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
2168 }
2169
kvmppc_core_vcpu_put(struct kvm_vcpu * vcpu)2170 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2171 {
2172 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
2173 }
2174
kvmppc_booke_init(void)2175 int __init kvmppc_booke_init(void)
2176 {
2177 #ifndef CONFIG_KVM_BOOKE_HV
2178 unsigned long ivor[16];
2179 unsigned long *handler = kvmppc_booke_handler_addr;
2180 unsigned long max_ivor = 0;
2181 unsigned long handler_len;
2182 int i;
2183
2184 /* We install our own exception handlers by hijacking IVPR. IVPR must
2185 * be 16-bit aligned, so we need a 64KB allocation. */
2186 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2187 VCPU_SIZE_ORDER);
2188 if (!kvmppc_booke_handlers)
2189 return -ENOMEM;
2190
2191 /* XXX make sure our handlers are smaller than Linux's */
2192
2193 /* Copy our interrupt handlers to match host IVORs. That way we don't
2194 * have to swap the IVORs on every guest/host transition. */
2195 ivor[0] = mfspr(SPRN_IVOR0);
2196 ivor[1] = mfspr(SPRN_IVOR1);
2197 ivor[2] = mfspr(SPRN_IVOR2);
2198 ivor[3] = mfspr(SPRN_IVOR3);
2199 ivor[4] = mfspr(SPRN_IVOR4);
2200 ivor[5] = mfspr(SPRN_IVOR5);
2201 ivor[6] = mfspr(SPRN_IVOR6);
2202 ivor[7] = mfspr(SPRN_IVOR7);
2203 ivor[8] = mfspr(SPRN_IVOR8);
2204 ivor[9] = mfspr(SPRN_IVOR9);
2205 ivor[10] = mfspr(SPRN_IVOR10);
2206 ivor[11] = mfspr(SPRN_IVOR11);
2207 ivor[12] = mfspr(SPRN_IVOR12);
2208 ivor[13] = mfspr(SPRN_IVOR13);
2209 ivor[14] = mfspr(SPRN_IVOR14);
2210 ivor[15] = mfspr(SPRN_IVOR15);
2211
2212 for (i = 0; i < 16; i++) {
2213 if (ivor[i] > max_ivor)
2214 max_ivor = i;
2215
2216 handler_len = handler[i + 1] - handler[i];
2217 memcpy((void *)kvmppc_booke_handlers + ivor[i],
2218 (void *)handler[i], handler_len);
2219 }
2220
2221 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2222 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2223 ivor[max_ivor] + handler_len);
2224 #endif /* !BOOKE_HV */
2225 return 0;
2226 }
2227
kvmppc_booke_exit(void)2228 void __exit kvmppc_booke_exit(void)
2229 {
2230 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2231 kvm_exit();
2232 }
2233