1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/gfp.h>
25 #include <linux/module.h>
26 #include <linux/vmalloc.h>
27 #include <linux/fs.h>
28
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include "timing.h"
33 #include <asm/cacheflush.h>
34
35 #include "booke.h"
36
37 unsigned long kvmppc_booke_handlers;
38
39 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42 struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "mmio", VCPU_STAT(mmio_exits) },
44 { "dcr", VCPU_STAT(dcr_exits) },
45 { "sig", VCPU_STAT(signal_exits) },
46 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
47 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
48 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
49 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "isi", VCPU_STAT(isi_exits) },
52 { "dsi", VCPU_STAT(dsi_exits) },
53 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
54 { "dec", VCPU_STAT(dec_exits) },
55 { "ext_intr", VCPU_STAT(ext_intr_exits) },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
57 { NULL }
58 };
59
60 /* TODO: use vcpu_printf() */
kvmppc_dump_vcpu(struct kvm_vcpu * vcpu)61 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62 {
63 int i;
64
65 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
67 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
68 vcpu->arch.shared->srr1);
69
70 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
71
72 for (i = 0; i < 32; i += 4) {
73 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
74 kvmppc_get_gpr(vcpu, i),
75 kvmppc_get_gpr(vcpu, i+1),
76 kvmppc_get_gpr(vcpu, i+2),
77 kvmppc_get_gpr(vcpu, i+3));
78 }
79 }
80
kvmppc_booke_queue_irqprio(struct kvm_vcpu * vcpu,unsigned int priority)81 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 unsigned int priority)
83 {
84 set_bit(priority, &vcpu->arch.pending_exceptions);
85 }
86
kvmppc_core_queue_dtlb_miss(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)87 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
88 ulong dear_flags, ulong esr_flags)
89 {
90 vcpu->arch.queued_dear = dear_flags;
91 vcpu->arch.queued_esr = esr_flags;
92 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
93 }
94
kvmppc_core_queue_data_storage(struct kvm_vcpu * vcpu,ulong dear_flags,ulong esr_flags)95 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
96 ulong dear_flags, ulong esr_flags)
97 {
98 vcpu->arch.queued_dear = dear_flags;
99 vcpu->arch.queued_esr = esr_flags;
100 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
101 }
102
kvmppc_core_queue_inst_storage(struct kvm_vcpu * vcpu,ulong esr_flags)103 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
104 ulong esr_flags)
105 {
106 vcpu->arch.queued_esr = esr_flags;
107 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
108 }
109
kvmppc_core_queue_program(struct kvm_vcpu * vcpu,ulong esr_flags)110 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
111 {
112 vcpu->arch.queued_esr = esr_flags;
113 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
114 }
115
kvmppc_core_queue_dec(struct kvm_vcpu * vcpu)116 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
117 {
118 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
119 }
120
kvmppc_core_pending_dec(struct kvm_vcpu * vcpu)121 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
122 {
123 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
124 }
125
kvmppc_core_dequeue_dec(struct kvm_vcpu * vcpu)126 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
127 {
128 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
129 }
130
kvmppc_core_queue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)131 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
132 struct kvm_interrupt *irq)
133 {
134 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
135
136 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
137 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
138
139 kvmppc_booke_queue_irqprio(vcpu, prio);
140 }
141
kvmppc_core_dequeue_external(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)142 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
143 struct kvm_interrupt *irq)
144 {
145 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
146 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
147 }
148
149 /* Deliver the interrupt of the corresponding priority, if possible. */
kvmppc_booke_irqprio_deliver(struct kvm_vcpu * vcpu,unsigned int priority)150 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
151 unsigned int priority)
152 {
153 int allowed = 0;
154 ulong uninitialized_var(msr_mask);
155 bool update_esr = false, update_dear = false;
156 ulong crit_raw = vcpu->arch.shared->critical;
157 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
158 bool crit;
159 bool keep_irq = false;
160
161 /* Truncate crit indicators in 32 bit mode */
162 if (!(vcpu->arch.shared->msr & MSR_SF)) {
163 crit_raw &= 0xffffffff;
164 crit_r1 &= 0xffffffff;
165 }
166
167 /* Critical section when crit == r1 */
168 crit = (crit_raw == crit_r1);
169 /* ... and we're in supervisor mode */
170 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
171
172 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
173 priority = BOOKE_IRQPRIO_EXTERNAL;
174 keep_irq = true;
175 }
176
177 switch (priority) {
178 case BOOKE_IRQPRIO_DTLB_MISS:
179 case BOOKE_IRQPRIO_DATA_STORAGE:
180 update_dear = true;
181 /* fall through */
182 case BOOKE_IRQPRIO_INST_STORAGE:
183 case BOOKE_IRQPRIO_PROGRAM:
184 update_esr = true;
185 /* fall through */
186 case BOOKE_IRQPRIO_ITLB_MISS:
187 case BOOKE_IRQPRIO_SYSCALL:
188 case BOOKE_IRQPRIO_FP_UNAVAIL:
189 case BOOKE_IRQPRIO_SPE_UNAVAIL:
190 case BOOKE_IRQPRIO_SPE_FP_DATA:
191 case BOOKE_IRQPRIO_SPE_FP_ROUND:
192 case BOOKE_IRQPRIO_AP_UNAVAIL:
193 case BOOKE_IRQPRIO_ALIGNMENT:
194 allowed = 1;
195 msr_mask = MSR_CE|MSR_ME|MSR_DE;
196 break;
197 case BOOKE_IRQPRIO_CRITICAL:
198 case BOOKE_IRQPRIO_WATCHDOG:
199 allowed = vcpu->arch.shared->msr & MSR_CE;
200 msr_mask = MSR_ME;
201 break;
202 case BOOKE_IRQPRIO_MACHINE_CHECK:
203 allowed = vcpu->arch.shared->msr & MSR_ME;
204 msr_mask = 0;
205 break;
206 case BOOKE_IRQPRIO_EXTERNAL:
207 case BOOKE_IRQPRIO_DECREMENTER:
208 case BOOKE_IRQPRIO_FIT:
209 allowed = vcpu->arch.shared->msr & MSR_EE;
210 allowed = allowed && !crit;
211 msr_mask = MSR_CE|MSR_ME|MSR_DE;
212 break;
213 case BOOKE_IRQPRIO_DEBUG:
214 allowed = vcpu->arch.shared->msr & MSR_DE;
215 msr_mask = MSR_ME;
216 break;
217 }
218
219 if (allowed) {
220 vcpu->arch.shared->srr0 = vcpu->arch.pc;
221 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
222 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
223 if (update_esr == true)
224 vcpu->arch.esr = vcpu->arch.queued_esr;
225 if (update_dear == true)
226 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
227 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
228
229 if (!keep_irq)
230 clear_bit(priority, &vcpu->arch.pending_exceptions);
231 }
232
233 return allowed;
234 }
235
236 /* Check pending exceptions and deliver one, if possible. */
kvmppc_core_deliver_interrupts(struct kvm_vcpu * vcpu)237 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
238 {
239 unsigned long *pending = &vcpu->arch.pending_exceptions;
240 unsigned long old_pending = vcpu->arch.pending_exceptions;
241 unsigned int priority;
242
243 priority = __ffs(*pending);
244 while (priority <= BOOKE_IRQPRIO_MAX) {
245 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
246 break;
247
248 priority = find_next_bit(pending,
249 BITS_PER_BYTE * sizeof(*pending),
250 priority + 1);
251 }
252
253 /* Tell the guest about our interrupt status */
254 if (*pending)
255 vcpu->arch.shared->int_pending = 1;
256 else if (old_pending)
257 vcpu->arch.shared->int_pending = 0;
258 }
259
260 /**
261 * kvmppc_handle_exit
262 *
263 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
264 */
kvmppc_handle_exit(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned int exit_nr)265 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
266 unsigned int exit_nr)
267 {
268 enum emulation_result er;
269 int r = RESUME_HOST;
270
271 /* update before a new last_exit_type is rewritten */
272 kvmppc_update_timing_stats(vcpu);
273
274 local_irq_enable();
275
276 run->exit_reason = KVM_EXIT_UNKNOWN;
277 run->ready_for_interrupt_injection = 1;
278
279 switch (exit_nr) {
280 case BOOKE_INTERRUPT_MACHINE_CHECK:
281 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
282 kvmppc_dump_vcpu(vcpu);
283 r = RESUME_HOST;
284 break;
285
286 case BOOKE_INTERRUPT_EXTERNAL:
287 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
288 if (need_resched())
289 cond_resched();
290 r = RESUME_GUEST;
291 break;
292
293 case BOOKE_INTERRUPT_DECREMENTER:
294 /* Since we switched IVPR back to the host's value, the host
295 * handled this interrupt the moment we enabled interrupts.
296 * Now we just offer it a chance to reschedule the guest. */
297 kvmppc_account_exit(vcpu, DEC_EXITS);
298 if (need_resched())
299 cond_resched();
300 r = RESUME_GUEST;
301 break;
302
303 case BOOKE_INTERRUPT_PROGRAM:
304 if (vcpu->arch.shared->msr & MSR_PR) {
305 /* Program traps generated by user-level software must be handled
306 * by the guest kernel. */
307 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
308 r = RESUME_GUEST;
309 kvmppc_account_exit(vcpu, USR_PR_INST);
310 break;
311 }
312
313 er = kvmppc_emulate_instruction(run, vcpu);
314 switch (er) {
315 case EMULATE_DONE:
316 /* don't overwrite subtypes, just account kvm_stats */
317 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
318 /* Future optimization: only reload non-volatiles if
319 * they were actually modified by emulation. */
320 r = RESUME_GUEST_NV;
321 break;
322 case EMULATE_DO_DCR:
323 run->exit_reason = KVM_EXIT_DCR;
324 r = RESUME_HOST;
325 break;
326 case EMULATE_FAIL:
327 /* XXX Deliver Program interrupt to guest. */
328 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
329 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
330 /* For debugging, encode the failing instruction and
331 * report it to userspace. */
332 run->hw.hardware_exit_reason = ~0ULL << 32;
333 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
334 r = RESUME_HOST;
335 break;
336 default:
337 BUG();
338 }
339 break;
340
341 case BOOKE_INTERRUPT_FP_UNAVAIL:
342 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
343 kvmppc_account_exit(vcpu, FP_UNAVAIL);
344 r = RESUME_GUEST;
345 break;
346
347 case BOOKE_INTERRUPT_SPE_UNAVAIL:
348 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
349 r = RESUME_GUEST;
350 break;
351
352 case BOOKE_INTERRUPT_SPE_FP_DATA:
353 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
354 r = RESUME_GUEST;
355 break;
356
357 case BOOKE_INTERRUPT_SPE_FP_ROUND:
358 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
359 r = RESUME_GUEST;
360 break;
361
362 case BOOKE_INTERRUPT_DATA_STORAGE:
363 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
364 vcpu->arch.fault_esr);
365 kvmppc_account_exit(vcpu, DSI_EXITS);
366 r = RESUME_GUEST;
367 break;
368
369 case BOOKE_INTERRUPT_INST_STORAGE:
370 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
371 kvmppc_account_exit(vcpu, ISI_EXITS);
372 r = RESUME_GUEST;
373 break;
374
375 case BOOKE_INTERRUPT_SYSCALL:
376 if (!(vcpu->arch.shared->msr & MSR_PR) &&
377 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
378 /* KVM PV hypercalls */
379 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
380 r = RESUME_GUEST;
381 } else {
382 /* Guest syscalls */
383 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
384 }
385 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
386 r = RESUME_GUEST;
387 break;
388
389 case BOOKE_INTERRUPT_DTLB_MISS: {
390 unsigned long eaddr = vcpu->arch.fault_dear;
391 int gtlb_index;
392 gpa_t gpaddr;
393 gfn_t gfn;
394
395 /* Check the guest TLB. */
396 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
397 if (gtlb_index < 0) {
398 /* The guest didn't have a mapping for it. */
399 kvmppc_core_queue_dtlb_miss(vcpu,
400 vcpu->arch.fault_dear,
401 vcpu->arch.fault_esr);
402 kvmppc_mmu_dtlb_miss(vcpu);
403 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
404 r = RESUME_GUEST;
405 break;
406 }
407
408 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
409 gfn = gpaddr >> PAGE_SHIFT;
410
411 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
412 /* The guest TLB had a mapping, but the shadow TLB
413 * didn't, and it is RAM. This could be because:
414 * a) the entry is mapping the host kernel, or
415 * b) the guest used a large mapping which we're faking
416 * Either way, we need to satisfy the fault without
417 * invoking the guest. */
418 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
419 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
420 r = RESUME_GUEST;
421 } else {
422 /* Guest has mapped and accessed a page which is not
423 * actually RAM. */
424 vcpu->arch.paddr_accessed = gpaddr;
425 r = kvmppc_emulate_mmio(run, vcpu);
426 kvmppc_account_exit(vcpu, MMIO_EXITS);
427 }
428
429 break;
430 }
431
432 case BOOKE_INTERRUPT_ITLB_MISS: {
433 unsigned long eaddr = vcpu->arch.pc;
434 gpa_t gpaddr;
435 gfn_t gfn;
436 int gtlb_index;
437
438 r = RESUME_GUEST;
439
440 /* Check the guest TLB. */
441 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
442 if (gtlb_index < 0) {
443 /* The guest didn't have a mapping for it. */
444 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
445 kvmppc_mmu_itlb_miss(vcpu);
446 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
447 break;
448 }
449
450 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
451
452 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
453 gfn = gpaddr >> PAGE_SHIFT;
454
455 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
456 /* The guest TLB had a mapping, but the shadow TLB
457 * didn't. This could be because:
458 * a) the entry is mapping the host kernel, or
459 * b) the guest used a large mapping which we're faking
460 * Either way, we need to satisfy the fault without
461 * invoking the guest. */
462 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
463 } else {
464 /* Guest mapped and leaped at non-RAM! */
465 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
466 }
467
468 break;
469 }
470
471 case BOOKE_INTERRUPT_DEBUG: {
472 u32 dbsr;
473
474 vcpu->arch.pc = mfspr(SPRN_CSRR0);
475
476 /* clear IAC events in DBSR register */
477 dbsr = mfspr(SPRN_DBSR);
478 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
479 mtspr(SPRN_DBSR, dbsr);
480
481 run->exit_reason = KVM_EXIT_DEBUG;
482 kvmppc_account_exit(vcpu, DEBUG_EXITS);
483 r = RESUME_HOST;
484 break;
485 }
486
487 default:
488 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
489 BUG();
490 }
491
492 local_irq_disable();
493
494 kvmppc_core_deliver_interrupts(vcpu);
495
496 if (!(r & RESUME_HOST)) {
497 /* To avoid clobbering exit_reason, only check for signals if
498 * we aren't already exiting to userspace for some other
499 * reason. */
500 if (signal_pending(current)) {
501 run->exit_reason = KVM_EXIT_INTR;
502 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
503 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
504 }
505 }
506
507 return r;
508 }
509
510 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
kvm_arch_vcpu_setup(struct kvm_vcpu * vcpu)511 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
512 {
513 int i;
514
515 vcpu->arch.pc = 0;
516 vcpu->arch.shared->msr = 0;
517 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
518
519 vcpu->arch.shadow_pid = 1;
520
521 /* Eye-catching numbers so we know if the guest takes an interrupt
522 * before it's programmed its own IVPR/IVORs. */
523 vcpu->arch.ivpr = 0x55550000;
524 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
525 vcpu->arch.ivor[i] = 0x7700 | i * 4;
526
527 kvmppc_init_timing_stats(vcpu);
528
529 return kvmppc_core_vcpu_setup(vcpu);
530 }
531
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)532 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
533 {
534 int i;
535
536 regs->pc = vcpu->arch.pc;
537 regs->cr = kvmppc_get_cr(vcpu);
538 regs->ctr = vcpu->arch.ctr;
539 regs->lr = vcpu->arch.lr;
540 regs->xer = kvmppc_get_xer(vcpu);
541 regs->msr = vcpu->arch.shared->msr;
542 regs->srr0 = vcpu->arch.shared->srr0;
543 regs->srr1 = vcpu->arch.shared->srr1;
544 regs->pid = vcpu->arch.pid;
545 regs->sprg0 = vcpu->arch.shared->sprg0;
546 regs->sprg1 = vcpu->arch.shared->sprg1;
547 regs->sprg2 = vcpu->arch.shared->sprg2;
548 regs->sprg3 = vcpu->arch.shared->sprg3;
549 regs->sprg4 = vcpu->arch.sprg4;
550 regs->sprg5 = vcpu->arch.sprg5;
551 regs->sprg6 = vcpu->arch.sprg6;
552 regs->sprg7 = vcpu->arch.sprg7;
553
554 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
555 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
556
557 return 0;
558 }
559
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)560 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
561 {
562 int i;
563
564 vcpu->arch.pc = regs->pc;
565 kvmppc_set_cr(vcpu, regs->cr);
566 vcpu->arch.ctr = regs->ctr;
567 vcpu->arch.lr = regs->lr;
568 kvmppc_set_xer(vcpu, regs->xer);
569 kvmppc_set_msr(vcpu, regs->msr);
570 vcpu->arch.shared->srr0 = regs->srr0;
571 vcpu->arch.shared->srr1 = regs->srr1;
572 vcpu->arch.shared->sprg0 = regs->sprg0;
573 vcpu->arch.shared->sprg1 = regs->sprg1;
574 vcpu->arch.shared->sprg2 = regs->sprg2;
575 vcpu->arch.shared->sprg3 = regs->sprg3;
576 vcpu->arch.sprg4 = regs->sprg4;
577 vcpu->arch.sprg5 = regs->sprg5;
578 vcpu->arch.sprg6 = regs->sprg6;
579 vcpu->arch.sprg7 = regs->sprg7;
580
581 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
582 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
583
584 return 0;
585 }
586
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)587 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
588 struct kvm_sregs *sregs)
589 {
590 return -ENOTSUPP;
591 }
592
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)593 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
594 struct kvm_sregs *sregs)
595 {
596 return -ENOTSUPP;
597 }
598
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)599 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
600 {
601 return -ENOTSUPP;
602 }
603
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)604 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
605 {
606 return -ENOTSUPP;
607 }
608
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)609 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
610 struct kvm_translation *tr)
611 {
612 int r;
613
614 r = kvmppc_core_vcpu_translate(vcpu, tr);
615 return r;
616 }
617
kvm_vm_ioctl_get_dirty_log(struct kvm * kvm,struct kvm_dirty_log * log)618 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
619 {
620 return -ENOTSUPP;
621 }
622
kvmppc_booke_init(void)623 int __init kvmppc_booke_init(void)
624 {
625 unsigned long ivor[16];
626 unsigned long max_ivor = 0;
627 int i;
628
629 /* We install our own exception handlers by hijacking IVPR. IVPR must
630 * be 16-bit aligned, so we need a 64KB allocation. */
631 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
632 VCPU_SIZE_ORDER);
633 if (!kvmppc_booke_handlers)
634 return -ENOMEM;
635
636 /* XXX make sure our handlers are smaller than Linux's */
637
638 /* Copy our interrupt handlers to match host IVORs. That way we don't
639 * have to swap the IVORs on every guest/host transition. */
640 ivor[0] = mfspr(SPRN_IVOR0);
641 ivor[1] = mfspr(SPRN_IVOR1);
642 ivor[2] = mfspr(SPRN_IVOR2);
643 ivor[3] = mfspr(SPRN_IVOR3);
644 ivor[4] = mfspr(SPRN_IVOR4);
645 ivor[5] = mfspr(SPRN_IVOR5);
646 ivor[6] = mfspr(SPRN_IVOR6);
647 ivor[7] = mfspr(SPRN_IVOR7);
648 ivor[8] = mfspr(SPRN_IVOR8);
649 ivor[9] = mfspr(SPRN_IVOR9);
650 ivor[10] = mfspr(SPRN_IVOR10);
651 ivor[11] = mfspr(SPRN_IVOR11);
652 ivor[12] = mfspr(SPRN_IVOR12);
653 ivor[13] = mfspr(SPRN_IVOR13);
654 ivor[14] = mfspr(SPRN_IVOR14);
655 ivor[15] = mfspr(SPRN_IVOR15);
656
657 for (i = 0; i < 16; i++) {
658 if (ivor[i] > max_ivor)
659 max_ivor = ivor[i];
660
661 memcpy((void *)kvmppc_booke_handlers + ivor[i],
662 kvmppc_handlers_start + i * kvmppc_handler_len,
663 kvmppc_handler_len);
664 }
665 flush_icache_range(kvmppc_booke_handlers,
666 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
667
668 return 0;
669 }
670
kvmppc_booke_exit(void)671 void __exit kvmppc_booke_exit(void)
672 {
673 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
674 kvm_exit();
675 }
676