1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright IBM Corp. 2007
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
8 */
9
10 #include <linux/errno.h>
11 #include <linux/err.h>
12 #include <linux/kvm_host.h>
13 #include <linux/vmalloc.h>
14 #include <linux/hrtimer.h>
15 #include <linux/sched/signal.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/file.h>
19 #include <linux/module.h>
20 #include <linux/irqbypass.h>
21 #include <linux/kvm_irqfd.h>
22 #include <linux/of.h>
23 #include <asm/cputable.h>
24 #include <linux/uaccess.h>
25 #include <asm/kvm_ppc.h>
26 #include <asm/cputhreads.h>
27 #include <asm/irqflags.h>
28 #include <asm/iommu.h>
29 #include <asm/switch_to.h>
30 #include <asm/xive.h>
31 #ifdef CONFIG_PPC_PSERIES
32 #include <asm/hvcall.h>
33 #include <asm/plpar_wrappers.h>
34 #endif
35 #include <asm/ultravisor.h>
36 #include <asm/setup.h>
37
38 #include "timing.h"
39 #include "irq.h"
40 #include "../mm/mmu_decl.h"
41
42 #define CREATE_TRACE_POINTS
43 #include "trace.h"
44
45 struct kvmppc_ops *kvmppc_hv_ops;
46 EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
47 struct kvmppc_ops *kvmppc_pr_ops;
48 EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
49
50
kvm_arch_vcpu_runnable(struct kvm_vcpu * v)51 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
52 {
53 return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
54 }
55
kvm_arch_dy_runnable(struct kvm_vcpu * vcpu)56 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
57 {
58 return kvm_arch_vcpu_runnable(vcpu);
59 }
60
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)61 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
62 {
63 return false;
64 }
65
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)66 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
67 {
68 return 1;
69 }
70
71 /*
72 * Common checks before entering the guest world. Call with interrupts
73 * disabled.
74 *
75 * returns:
76 *
77 * == 1 if we're ready to go into guest state
78 * <= 0 if we need to go back to the host with return value
79 */
kvmppc_prepare_to_enter(struct kvm_vcpu * vcpu)80 int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
81 {
82 int r;
83
84 WARN_ON(irqs_disabled());
85 hard_irq_disable();
86
87 while (true) {
88 if (need_resched()) {
89 local_irq_enable();
90 cond_resched();
91 hard_irq_disable();
92 continue;
93 }
94
95 if (signal_pending(current)) {
96 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
97 vcpu->run->exit_reason = KVM_EXIT_INTR;
98 r = -EINTR;
99 break;
100 }
101
102 vcpu->mode = IN_GUEST_MODE;
103
104 /*
105 * Reading vcpu->requests must happen after setting vcpu->mode,
106 * so we don't miss a request because the requester sees
107 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
108 * before next entering the guest (and thus doesn't IPI).
109 * This also orders the write to mode from any reads
110 * to the page tables done while the VCPU is running.
111 * Please see the comment in kvm_flush_remote_tlbs.
112 */
113 smp_mb();
114
115 if (kvm_request_pending(vcpu)) {
116 /* Make sure we process requests preemptable */
117 local_irq_enable();
118 trace_kvm_check_requests(vcpu);
119 r = kvmppc_core_check_requests(vcpu);
120 hard_irq_disable();
121 if (r > 0)
122 continue;
123 break;
124 }
125
126 if (kvmppc_core_prepare_to_enter(vcpu)) {
127 /* interrupts got enabled in between, so we
128 are back at square 1 */
129 continue;
130 }
131
132 guest_enter_irqoff();
133 return 1;
134 }
135
136 /* return to host */
137 local_irq_enable();
138 return r;
139 }
140 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
141
142 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
kvmppc_swab_shared(struct kvm_vcpu * vcpu)143 static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
144 {
145 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
146 int i;
147
148 shared->sprg0 = swab64(shared->sprg0);
149 shared->sprg1 = swab64(shared->sprg1);
150 shared->sprg2 = swab64(shared->sprg2);
151 shared->sprg3 = swab64(shared->sprg3);
152 shared->srr0 = swab64(shared->srr0);
153 shared->srr1 = swab64(shared->srr1);
154 shared->dar = swab64(shared->dar);
155 shared->msr = swab64(shared->msr);
156 shared->dsisr = swab32(shared->dsisr);
157 shared->int_pending = swab32(shared->int_pending);
158 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
159 shared->sr[i] = swab32(shared->sr[i]);
160 }
161 #endif
162
kvmppc_kvm_pv(struct kvm_vcpu * vcpu)163 int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
164 {
165 int nr = kvmppc_get_gpr(vcpu, 11);
166 int r;
167 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
168 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
169 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
170 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
171 unsigned long r2 = 0;
172
173 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
174 /* 32 bit mode */
175 param1 &= 0xffffffff;
176 param2 &= 0xffffffff;
177 param3 &= 0xffffffff;
178 param4 &= 0xffffffff;
179 }
180
181 switch (nr) {
182 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
183 {
184 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
185 /* Book3S can be little endian, find it out here */
186 int shared_big_endian = true;
187 if (vcpu->arch.intr_msr & MSR_LE)
188 shared_big_endian = false;
189 if (shared_big_endian != vcpu->arch.shared_big_endian)
190 kvmppc_swab_shared(vcpu);
191 vcpu->arch.shared_big_endian = shared_big_endian;
192 #endif
193
194 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
195 /*
196 * Older versions of the Linux magic page code had
197 * a bug where they would map their trampoline code
198 * NX. If that's the case, remove !PR NX capability.
199 */
200 vcpu->arch.disable_kernel_nx = true;
201 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
202 }
203
204 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
205 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
206
207 #ifdef CONFIG_PPC_64K_PAGES
208 /*
209 * Make sure our 4k magic page is in the same window of a 64k
210 * page within the guest and within the host's page.
211 */
212 if ((vcpu->arch.magic_page_pa & 0xf000) !=
213 ((ulong)vcpu->arch.shared & 0xf000)) {
214 void *old_shared = vcpu->arch.shared;
215 ulong shared = (ulong)vcpu->arch.shared;
216 void *new_shared;
217
218 shared &= PAGE_MASK;
219 shared |= vcpu->arch.magic_page_pa & 0xf000;
220 new_shared = (void*)shared;
221 memcpy(new_shared, old_shared, 0x1000);
222 vcpu->arch.shared = new_shared;
223 }
224 #endif
225
226 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
227
228 r = EV_SUCCESS;
229 break;
230 }
231 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
232 r = EV_SUCCESS;
233 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
234 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
235 #endif
236
237 /* Second return value is in r4 */
238 break;
239 case EV_HCALL_TOKEN(EV_IDLE):
240 r = EV_SUCCESS;
241 kvm_vcpu_halt(vcpu);
242 break;
243 default:
244 r = EV_UNIMPLEMENTED;
245 break;
246 }
247
248 kvmppc_set_gpr(vcpu, 4, r2);
249
250 return r;
251 }
252 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
253
kvmppc_sanity_check(struct kvm_vcpu * vcpu)254 int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
255 {
256 int r = false;
257
258 /* We have to know what CPU to virtualize */
259 if (!vcpu->arch.pvr)
260 goto out;
261
262 /* PAPR only works with book3s_64 */
263 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
264 goto out;
265
266 /* HV KVM can only do PAPR mode for now */
267 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
268 goto out;
269
270 #ifdef CONFIG_KVM_BOOKE_HV
271 if (!cpu_has_feature(CPU_FTR_EMB_HV))
272 goto out;
273 #endif
274
275 r = true;
276
277 out:
278 vcpu->arch.sane = r;
279 return r ? 0 : -EINVAL;
280 }
281 EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
282
kvmppc_emulate_mmio(struct kvm_vcpu * vcpu)283 int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu)
284 {
285 enum emulation_result er;
286 int r;
287
288 er = kvmppc_emulate_loadstore(vcpu);
289 switch (er) {
290 case EMULATE_DONE:
291 /* Future optimization: only reload non-volatiles if they were
292 * actually modified. */
293 r = RESUME_GUEST_NV;
294 break;
295 case EMULATE_AGAIN:
296 r = RESUME_GUEST;
297 break;
298 case EMULATE_DO_MMIO:
299 vcpu->run->exit_reason = KVM_EXIT_MMIO;
300 /* We must reload nonvolatiles because "update" load/store
301 * instructions modify register state. */
302 /* Future optimization: only reload non-volatiles if they were
303 * actually modified. */
304 r = RESUME_HOST_NV;
305 break;
306 case EMULATE_FAIL:
307 {
308 u32 last_inst;
309
310 kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
311 kvm_debug_ratelimited("Guest access to device memory using unsupported instruction (opcode: %#08x)\n",
312 last_inst);
313
314 /*
315 * Injecting a Data Storage here is a bit more
316 * accurate since the instruction that caused the
317 * access could still be a valid one.
318 */
319 if (!IS_ENABLED(CONFIG_BOOKE)) {
320 ulong dsisr = DSISR_BADACCESS;
321
322 if (vcpu->mmio_is_write)
323 dsisr |= DSISR_ISSTORE;
324
325 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.vaddr_accessed, dsisr);
326 } else {
327 /*
328 * BookE does not send a SIGBUS on a bad
329 * fault, so use a Program interrupt instead
330 * to avoid a fault loop.
331 */
332 kvmppc_core_queue_program(vcpu, 0);
333 }
334
335 r = RESUME_GUEST;
336 break;
337 }
338 default:
339 WARN_ON(1);
340 r = RESUME_GUEST;
341 }
342
343 return r;
344 }
345 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
346
kvmppc_st(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)347 int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
348 bool data)
349 {
350 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
351 struct kvmppc_pte pte;
352 int r = -EINVAL;
353
354 vcpu->stat.st++;
355
356 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr)
357 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr,
358 size);
359
360 if ((!r) || (r == -EAGAIN))
361 return r;
362
363 r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
364 XLATE_WRITE, &pte);
365 if (r < 0)
366 return r;
367
368 *eaddr = pte.raddr;
369
370 if (!pte.may_write)
371 return -EPERM;
372
373 /* Magic page override */
374 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
375 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
376 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
377 void *magic = vcpu->arch.shared;
378 magic += pte.eaddr & 0xfff;
379 memcpy(magic, ptr, size);
380 return EMULATE_DONE;
381 }
382
383 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
384 return EMULATE_DO_MMIO;
385
386 return EMULATE_DONE;
387 }
388 EXPORT_SYMBOL_GPL(kvmppc_st);
389
kvmppc_ld(struct kvm_vcpu * vcpu,ulong * eaddr,int size,void * ptr,bool data)390 int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
391 bool data)
392 {
393 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK;
394 struct kvmppc_pte pte;
395 int rc = -EINVAL;
396
397 vcpu->stat.ld++;
398
399 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr)
400 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr,
401 size);
402
403 if ((!rc) || (rc == -EAGAIN))
404 return rc;
405
406 rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST,
407 XLATE_READ, &pte);
408 if (rc)
409 return rc;
410
411 *eaddr = pte.raddr;
412
413 if (!pte.may_read)
414 return -EPERM;
415
416 if (!data && !pte.may_execute)
417 return -ENOEXEC;
418
419 /* Magic page override */
420 if (kvmppc_supports_magic_page(vcpu) && mp_pa &&
421 ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) &&
422 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
423 void *magic = vcpu->arch.shared;
424 magic += pte.eaddr & 0xfff;
425 memcpy(ptr, magic, size);
426 return EMULATE_DONE;
427 }
428
429 kvm_vcpu_srcu_read_lock(vcpu);
430 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size);
431 kvm_vcpu_srcu_read_unlock(vcpu);
432 if (rc)
433 return EMULATE_DO_MMIO;
434
435 return EMULATE_DONE;
436 }
437 EXPORT_SYMBOL_GPL(kvmppc_ld);
438
kvm_arch_hardware_enable(void)439 int kvm_arch_hardware_enable(void)
440 {
441 return 0;
442 }
443
kvm_arch_hardware_setup(void * opaque)444 int kvm_arch_hardware_setup(void *opaque)
445 {
446 return 0;
447 }
448
kvm_arch_check_processor_compat(void * opaque)449 int kvm_arch_check_processor_compat(void *opaque)
450 {
451 return kvmppc_core_check_processor_compat();
452 }
453
kvm_arch_init_vm(struct kvm * kvm,unsigned long type)454 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
455 {
456 struct kvmppc_ops *kvm_ops = NULL;
457 int r;
458
459 /*
460 * if we have both HV and PR enabled, default is HV
461 */
462 if (type == 0) {
463 if (kvmppc_hv_ops)
464 kvm_ops = kvmppc_hv_ops;
465 else
466 kvm_ops = kvmppc_pr_ops;
467 if (!kvm_ops)
468 goto err_out;
469 } else if (type == KVM_VM_PPC_HV) {
470 if (!kvmppc_hv_ops)
471 goto err_out;
472 kvm_ops = kvmppc_hv_ops;
473 } else if (type == KVM_VM_PPC_PR) {
474 if (!kvmppc_pr_ops)
475 goto err_out;
476 kvm_ops = kvmppc_pr_ops;
477 } else
478 goto err_out;
479
480 if (!try_module_get(kvm_ops->owner))
481 return -ENOENT;
482
483 kvm->arch.kvm_ops = kvm_ops;
484 r = kvmppc_core_init_vm(kvm);
485 if (r)
486 module_put(kvm_ops->owner);
487 return r;
488 err_out:
489 return -EINVAL;
490 }
491
kvm_arch_destroy_vm(struct kvm * kvm)492 void kvm_arch_destroy_vm(struct kvm *kvm)
493 {
494 #ifdef CONFIG_KVM_XICS
495 /*
496 * We call kick_all_cpus_sync() to ensure that all
497 * CPUs have executed any pending IPIs before we
498 * continue and free VCPUs structures below.
499 */
500 if (is_kvmppc_hv_enabled(kvm))
501 kick_all_cpus_sync();
502 #endif
503
504 kvm_destroy_vcpus(kvm);
505
506 mutex_lock(&kvm->lock);
507
508 kvmppc_core_destroy_vm(kvm);
509
510 mutex_unlock(&kvm->lock);
511
512 /* drop the module reference */
513 module_put(kvm->arch.kvm_ops->owner);
514 }
515
kvm_vm_ioctl_check_extension(struct kvm * kvm,long ext)516 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
517 {
518 int r;
519 /* Assume we're using HV mode when the HV module is loaded */
520 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
521
522 if (kvm) {
523 /*
524 * Hooray - we know which VM type we're running on. Depend on
525 * that rather than the guess above.
526 */
527 hv_enabled = is_kvmppc_hv_enabled(kvm);
528 }
529
530 switch (ext) {
531 #ifdef CONFIG_BOOKE
532 case KVM_CAP_PPC_BOOKE_SREGS:
533 case KVM_CAP_PPC_BOOKE_WATCHDOG:
534 case KVM_CAP_PPC_EPR:
535 #else
536 case KVM_CAP_PPC_SEGSTATE:
537 case KVM_CAP_PPC_HIOR:
538 case KVM_CAP_PPC_PAPR:
539 #endif
540 case KVM_CAP_PPC_UNSET_IRQ:
541 case KVM_CAP_PPC_IRQ_LEVEL:
542 case KVM_CAP_ENABLE_CAP:
543 case KVM_CAP_ONE_REG:
544 case KVM_CAP_IOEVENTFD:
545 case KVM_CAP_DEVICE_CTRL:
546 case KVM_CAP_IMMEDIATE_EXIT:
547 case KVM_CAP_SET_GUEST_DEBUG:
548 r = 1;
549 break;
550 case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
551 case KVM_CAP_PPC_PAIRED_SINGLES:
552 case KVM_CAP_PPC_OSI:
553 case KVM_CAP_PPC_GET_PVINFO:
554 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
555 case KVM_CAP_SW_TLB:
556 #endif
557 /* We support this only for PR */
558 r = !hv_enabled;
559 break;
560 #ifdef CONFIG_KVM_MPIC
561 case KVM_CAP_IRQ_MPIC:
562 r = 1;
563 break;
564 #endif
565
566 #ifdef CONFIG_PPC_BOOK3S_64
567 case KVM_CAP_SPAPR_TCE:
568 case KVM_CAP_SPAPR_TCE_64:
569 r = 1;
570 break;
571 case KVM_CAP_SPAPR_TCE_VFIO:
572 r = !!cpu_has_feature(CPU_FTR_HVMODE);
573 break;
574 case KVM_CAP_PPC_RTAS:
575 case KVM_CAP_PPC_FIXUP_HCALL:
576 case KVM_CAP_PPC_ENABLE_HCALL:
577 #ifdef CONFIG_KVM_XICS
578 case KVM_CAP_IRQ_XICS:
579 #endif
580 case KVM_CAP_PPC_GET_CPU_CHAR:
581 r = 1;
582 break;
583 #ifdef CONFIG_KVM_XIVE
584 case KVM_CAP_PPC_IRQ_XIVE:
585 /*
586 * We need XIVE to be enabled on the platform (implies
587 * a POWER9 processor) and the PowerNV platform, as
588 * nested is not yet supported.
589 */
590 r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE) &&
591 kvmppc_xive_native_supported();
592 break;
593 #endif
594
595 case KVM_CAP_PPC_ALLOC_HTAB:
596 r = hv_enabled;
597 break;
598 #endif /* CONFIG_PPC_BOOK3S_64 */
599 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
600 case KVM_CAP_PPC_SMT:
601 r = 0;
602 if (kvm) {
603 if (kvm->arch.emul_smt_mode > 1)
604 r = kvm->arch.emul_smt_mode;
605 else
606 r = kvm->arch.smt_mode;
607 } else if (hv_enabled) {
608 if (cpu_has_feature(CPU_FTR_ARCH_300))
609 r = 1;
610 else
611 r = threads_per_subcore;
612 }
613 break;
614 case KVM_CAP_PPC_SMT_POSSIBLE:
615 r = 1;
616 if (hv_enabled) {
617 if (!cpu_has_feature(CPU_FTR_ARCH_300))
618 r = ((threads_per_subcore << 1) - 1);
619 else
620 /* P9 can emulate dbells, so allow any mode */
621 r = 8 | 4 | 2 | 1;
622 }
623 break;
624 case KVM_CAP_PPC_RMA:
625 r = 0;
626 break;
627 case KVM_CAP_PPC_HWRNG:
628 r = kvmppc_hwrng_present();
629 break;
630 case KVM_CAP_PPC_MMU_RADIX:
631 r = !!(hv_enabled && radix_enabled());
632 break;
633 case KVM_CAP_PPC_MMU_HASH_V3:
634 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible &&
635 kvmppc_hv_ops->hash_v3_possible());
636 break;
637 case KVM_CAP_PPC_NESTED_HV:
638 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested &&
639 !kvmppc_hv_ops->enable_nested(NULL));
640 break;
641 #endif
642 case KVM_CAP_SYNC_MMU:
643 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
644 r = hv_enabled;
645 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
646 r = 1;
647 #else
648 r = 0;
649 #endif
650 break;
651 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
652 case KVM_CAP_PPC_HTAB_FD:
653 r = hv_enabled;
654 break;
655 #endif
656 case KVM_CAP_NR_VCPUS:
657 /*
658 * Recommending a number of CPUs is somewhat arbitrary; we
659 * return the number of present CPUs for -HV (since a host
660 * will have secondary threads "offline"), and for other KVM
661 * implementations just count online CPUs.
662 */
663 if (hv_enabled)
664 r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
665 else
666 r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
667 break;
668 case KVM_CAP_MAX_VCPUS:
669 r = KVM_MAX_VCPUS;
670 break;
671 case KVM_CAP_MAX_VCPU_ID:
672 r = KVM_MAX_VCPU_IDS;
673 break;
674 #ifdef CONFIG_PPC_BOOK3S_64
675 case KVM_CAP_PPC_GET_SMMU_INFO:
676 r = 1;
677 break;
678 case KVM_CAP_SPAPR_MULTITCE:
679 r = 1;
680 break;
681 case KVM_CAP_SPAPR_RESIZE_HPT:
682 r = !!hv_enabled;
683 break;
684 #endif
685 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
686 case KVM_CAP_PPC_FWNMI:
687 r = hv_enabled;
688 break;
689 #endif
690 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
691 case KVM_CAP_PPC_HTM:
692 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
693 (hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
694 break;
695 #endif
696 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
697 case KVM_CAP_PPC_SECURE_GUEST:
698 r = hv_enabled && kvmppc_hv_ops->enable_svm &&
699 !kvmppc_hv_ops->enable_svm(NULL);
700 break;
701 case KVM_CAP_PPC_DAWR1:
702 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 &&
703 !kvmppc_hv_ops->enable_dawr1(NULL));
704 break;
705 case KVM_CAP_PPC_RPT_INVALIDATE:
706 r = 1;
707 break;
708 #endif
709 case KVM_CAP_PPC_AIL_MODE_3:
710 r = 0;
711 /*
712 * KVM PR, POWER7, and some POWER9s don't support AIL=3 mode.
713 * The POWER9s can support it if the guest runs in hash mode,
714 * but QEMU doesn't necessarily query the capability in time.
715 */
716 if (hv_enabled) {
717 if (kvmhv_on_pseries()) {
718 if (pseries_reloc_on_exception())
719 r = 1;
720 } else if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
721 !cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
722 r = 1;
723 }
724 }
725 break;
726 default:
727 r = 0;
728 break;
729 }
730 return r;
731
732 }
733
kvm_arch_dev_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)734 long kvm_arch_dev_ioctl(struct file *filp,
735 unsigned int ioctl, unsigned long arg)
736 {
737 return -EINVAL;
738 }
739
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)740 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
741 {
742 kvmppc_core_free_memslot(kvm, slot);
743 }
744
kvm_arch_prepare_memory_region(struct kvm * kvm,const struct kvm_memory_slot * old,struct kvm_memory_slot * new,enum kvm_mr_change change)745 int kvm_arch_prepare_memory_region(struct kvm *kvm,
746 const struct kvm_memory_slot *old,
747 struct kvm_memory_slot *new,
748 enum kvm_mr_change change)
749 {
750 return kvmppc_core_prepare_memory_region(kvm, old, new, change);
751 }
752
kvm_arch_commit_memory_region(struct kvm * kvm,struct kvm_memory_slot * old,const struct kvm_memory_slot * new,enum kvm_mr_change change)753 void kvm_arch_commit_memory_region(struct kvm *kvm,
754 struct kvm_memory_slot *old,
755 const struct kvm_memory_slot *new,
756 enum kvm_mr_change change)
757 {
758 kvmppc_core_commit_memory_region(kvm, old, new, change);
759 }
760
kvm_arch_flush_shadow_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)761 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
762 struct kvm_memory_slot *slot)
763 {
764 kvmppc_core_flush_memslot(kvm, slot);
765 }
766
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)767 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
768 {
769 return 0;
770 }
771
kvmppc_decrementer_wakeup(struct hrtimer * timer)772 static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
773 {
774 struct kvm_vcpu *vcpu;
775
776 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
777 kvmppc_decrementer_func(vcpu);
778
779 return HRTIMER_NORESTART;
780 }
781
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)782 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
783 {
784 int err;
785
786 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
787 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
788
789 #ifdef CONFIG_KVM_EXIT_TIMING
790 mutex_init(&vcpu->arch.exit_timing_lock);
791 #endif
792 err = kvmppc_subarch_vcpu_init(vcpu);
793 if (err)
794 return err;
795
796 err = kvmppc_core_vcpu_create(vcpu);
797 if (err)
798 goto out_vcpu_uninit;
799
800 rcuwait_init(&vcpu->arch.wait);
801 vcpu->arch.waitp = &vcpu->arch.wait;
802 return 0;
803
804 out_vcpu_uninit:
805 kvmppc_subarch_vcpu_uninit(vcpu);
806 return err;
807 }
808
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)809 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
810 {
811 }
812
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)813 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
814 {
815 /* Make sure we're not using the vcpu anymore */
816 hrtimer_cancel(&vcpu->arch.dec_timer);
817
818 switch (vcpu->arch.irq_type) {
819 case KVMPPC_IRQ_MPIC:
820 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
821 break;
822 case KVMPPC_IRQ_XICS:
823 if (xics_on_xive())
824 kvmppc_xive_cleanup_vcpu(vcpu);
825 else
826 kvmppc_xics_free_icp(vcpu);
827 break;
828 case KVMPPC_IRQ_XIVE:
829 kvmppc_xive_native_cleanup_vcpu(vcpu);
830 break;
831 }
832
833 kvmppc_core_vcpu_free(vcpu);
834
835 kvmppc_subarch_vcpu_uninit(vcpu);
836 }
837
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)838 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
839 {
840 return kvmppc_core_pending_dec(vcpu);
841 }
842
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)843 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
844 {
845 #ifdef CONFIG_BOOKE
846 /*
847 * vrsave (formerly usprg0) isn't used by Linux, but may
848 * be used by the guest.
849 *
850 * On non-booke this is associated with Altivec and
851 * is handled by code in book3s.c.
852 */
853 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
854 #endif
855 kvmppc_core_vcpu_load(vcpu, cpu);
856 }
857
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)858 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
859 {
860 kvmppc_core_vcpu_put(vcpu);
861 #ifdef CONFIG_BOOKE
862 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
863 #endif
864 }
865
866 /*
867 * irq_bypass_add_producer and irq_bypass_del_producer are only
868 * useful if the architecture supports PCI passthrough.
869 * irq_bypass_stop and irq_bypass_start are not needed and so
870 * kvm_ops are not defined for them.
871 */
kvm_arch_has_irq_bypass(void)872 bool kvm_arch_has_irq_bypass(void)
873 {
874 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) ||
875 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer));
876 }
877
kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)878 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
879 struct irq_bypass_producer *prod)
880 {
881 struct kvm_kernel_irqfd *irqfd =
882 container_of(cons, struct kvm_kernel_irqfd, consumer);
883 struct kvm *kvm = irqfd->kvm;
884
885 if (kvm->arch.kvm_ops->irq_bypass_add_producer)
886 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod);
887
888 return 0;
889 }
890
kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer * cons,struct irq_bypass_producer * prod)891 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
892 struct irq_bypass_producer *prod)
893 {
894 struct kvm_kernel_irqfd *irqfd =
895 container_of(cons, struct kvm_kernel_irqfd, consumer);
896 struct kvm *kvm = irqfd->kvm;
897
898 if (kvm->arch.kvm_ops->irq_bypass_del_producer)
899 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
900 }
901
902 #ifdef CONFIG_VSX
kvmppc_get_vsr_dword_offset(int index)903 static inline int kvmppc_get_vsr_dword_offset(int index)
904 {
905 int offset;
906
907 if ((index != 0) && (index != 1))
908 return -1;
909
910 #ifdef __BIG_ENDIAN
911 offset = index;
912 #else
913 offset = 1 - index;
914 #endif
915
916 return offset;
917 }
918
kvmppc_get_vsr_word_offset(int index)919 static inline int kvmppc_get_vsr_word_offset(int index)
920 {
921 int offset;
922
923 if ((index > 3) || (index < 0))
924 return -1;
925
926 #ifdef __BIG_ENDIAN
927 offset = index;
928 #else
929 offset = 3 - index;
930 #endif
931 return offset;
932 }
933
kvmppc_set_vsr_dword(struct kvm_vcpu * vcpu,u64 gpr)934 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
935 u64 gpr)
936 {
937 union kvmppc_one_reg val;
938 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
939 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
940
941 if (offset == -1)
942 return;
943
944 if (index >= 32) {
945 val.vval = VCPU_VSX_VR(vcpu, index - 32);
946 val.vsxval[offset] = gpr;
947 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
948 } else {
949 VCPU_VSX_FPR(vcpu, index, offset) = gpr;
950 }
951 }
952
kvmppc_set_vsr_dword_dump(struct kvm_vcpu * vcpu,u64 gpr)953 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
954 u64 gpr)
955 {
956 union kvmppc_one_reg val;
957 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
958
959 if (index >= 32) {
960 val.vval = VCPU_VSX_VR(vcpu, index - 32);
961 val.vsxval[0] = gpr;
962 val.vsxval[1] = gpr;
963 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
964 } else {
965 VCPU_VSX_FPR(vcpu, index, 0) = gpr;
966 VCPU_VSX_FPR(vcpu, index, 1) = gpr;
967 }
968 }
969
kvmppc_set_vsr_word_dump(struct kvm_vcpu * vcpu,u32 gpr)970 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
971 u32 gpr)
972 {
973 union kvmppc_one_reg val;
974 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
975
976 if (index >= 32) {
977 val.vsx32val[0] = gpr;
978 val.vsx32val[1] = gpr;
979 val.vsx32val[2] = gpr;
980 val.vsx32val[3] = gpr;
981 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
982 } else {
983 val.vsx32val[0] = gpr;
984 val.vsx32val[1] = gpr;
985 VCPU_VSX_FPR(vcpu, index, 0) = val.vsxval[0];
986 VCPU_VSX_FPR(vcpu, index, 1) = val.vsxval[0];
987 }
988 }
989
kvmppc_set_vsr_word(struct kvm_vcpu * vcpu,u32 gpr32)990 static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
991 u32 gpr32)
992 {
993 union kvmppc_one_reg val;
994 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
995 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
996 int dword_offset, word_offset;
997
998 if (offset == -1)
999 return;
1000
1001 if (index >= 32) {
1002 val.vval = VCPU_VSX_VR(vcpu, index - 32);
1003 val.vsx32val[offset] = gpr32;
1004 VCPU_VSX_VR(vcpu, index - 32) = val.vval;
1005 } else {
1006 dword_offset = offset / 2;
1007 word_offset = offset % 2;
1008 val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
1009 val.vsx32val[word_offset] = gpr32;
1010 VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
1011 }
1012 }
1013 #endif /* CONFIG_VSX */
1014
1015 #ifdef CONFIG_ALTIVEC
kvmppc_get_vmx_offset_generic(struct kvm_vcpu * vcpu,int index,int element_size)1016 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu *vcpu,
1017 int index, int element_size)
1018 {
1019 int offset;
1020 int elts = sizeof(vector128)/element_size;
1021
1022 if ((index < 0) || (index >= elts))
1023 return -1;
1024
1025 if (kvmppc_need_byteswap(vcpu))
1026 offset = elts - index - 1;
1027 else
1028 offset = index;
1029
1030 return offset;
1031 }
1032
kvmppc_get_vmx_dword_offset(struct kvm_vcpu * vcpu,int index)1033 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu *vcpu,
1034 int index)
1035 {
1036 return kvmppc_get_vmx_offset_generic(vcpu, index, 8);
1037 }
1038
kvmppc_get_vmx_word_offset(struct kvm_vcpu * vcpu,int index)1039 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu *vcpu,
1040 int index)
1041 {
1042 return kvmppc_get_vmx_offset_generic(vcpu, index, 4);
1043 }
1044
kvmppc_get_vmx_hword_offset(struct kvm_vcpu * vcpu,int index)1045 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu *vcpu,
1046 int index)
1047 {
1048 return kvmppc_get_vmx_offset_generic(vcpu, index, 2);
1049 }
1050
kvmppc_get_vmx_byte_offset(struct kvm_vcpu * vcpu,int index)1051 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu *vcpu,
1052 int index)
1053 {
1054 return kvmppc_get_vmx_offset_generic(vcpu, index, 1);
1055 }
1056
1057
kvmppc_set_vmx_dword(struct kvm_vcpu * vcpu,u64 gpr)1058 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu *vcpu,
1059 u64 gpr)
1060 {
1061 union kvmppc_one_reg val;
1062 int offset = kvmppc_get_vmx_dword_offset(vcpu,
1063 vcpu->arch.mmio_vmx_offset);
1064 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1065
1066 if (offset == -1)
1067 return;
1068
1069 val.vval = VCPU_VSX_VR(vcpu, index);
1070 val.vsxval[offset] = gpr;
1071 VCPU_VSX_VR(vcpu, index) = val.vval;
1072 }
1073
kvmppc_set_vmx_word(struct kvm_vcpu * vcpu,u32 gpr32)1074 static inline void kvmppc_set_vmx_word(struct kvm_vcpu *vcpu,
1075 u32 gpr32)
1076 {
1077 union kvmppc_one_reg val;
1078 int offset = kvmppc_get_vmx_word_offset(vcpu,
1079 vcpu->arch.mmio_vmx_offset);
1080 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1081
1082 if (offset == -1)
1083 return;
1084
1085 val.vval = VCPU_VSX_VR(vcpu, index);
1086 val.vsx32val[offset] = gpr32;
1087 VCPU_VSX_VR(vcpu, index) = val.vval;
1088 }
1089
kvmppc_set_vmx_hword(struct kvm_vcpu * vcpu,u16 gpr16)1090 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu *vcpu,
1091 u16 gpr16)
1092 {
1093 union kvmppc_one_reg val;
1094 int offset = kvmppc_get_vmx_hword_offset(vcpu,
1095 vcpu->arch.mmio_vmx_offset);
1096 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1097
1098 if (offset == -1)
1099 return;
1100
1101 val.vval = VCPU_VSX_VR(vcpu, index);
1102 val.vsx16val[offset] = gpr16;
1103 VCPU_VSX_VR(vcpu, index) = val.vval;
1104 }
1105
kvmppc_set_vmx_byte(struct kvm_vcpu * vcpu,u8 gpr8)1106 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu *vcpu,
1107 u8 gpr8)
1108 {
1109 union kvmppc_one_reg val;
1110 int offset = kvmppc_get_vmx_byte_offset(vcpu,
1111 vcpu->arch.mmio_vmx_offset);
1112 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
1113
1114 if (offset == -1)
1115 return;
1116
1117 val.vval = VCPU_VSX_VR(vcpu, index);
1118 val.vsx8val[offset] = gpr8;
1119 VCPU_VSX_VR(vcpu, index) = val.vval;
1120 }
1121 #endif /* CONFIG_ALTIVEC */
1122
1123 #ifdef CONFIG_PPC_FPU
sp_to_dp(u32 fprs)1124 static inline u64 sp_to_dp(u32 fprs)
1125 {
1126 u64 fprd;
1127
1128 preempt_disable();
1129 enable_kernel_fp();
1130 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m<>" (fprd) : "m<>" (fprs)
1131 : "fr0");
1132 preempt_enable();
1133 return fprd;
1134 }
1135
dp_to_sp(u64 fprd)1136 static inline u32 dp_to_sp(u64 fprd)
1137 {
1138 u32 fprs;
1139
1140 preempt_disable();
1141 enable_kernel_fp();
1142 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m<>" (fprs) : "m<>" (fprd)
1143 : "fr0");
1144 preempt_enable();
1145 return fprs;
1146 }
1147
1148 #else
1149 #define sp_to_dp(x) (x)
1150 #define dp_to_sp(x) (x)
1151 #endif /* CONFIG_PPC_FPU */
1152
kvmppc_complete_mmio_load(struct kvm_vcpu * vcpu)1153 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu)
1154 {
1155 struct kvm_run *run = vcpu->run;
1156 u64 gpr;
1157
1158 if (run->mmio.len > sizeof(gpr))
1159 return;
1160
1161 if (!vcpu->arch.mmio_host_swabbed) {
1162 switch (run->mmio.len) {
1163 case 8: gpr = *(u64 *)run->mmio.data; break;
1164 case 4: gpr = *(u32 *)run->mmio.data; break;
1165 case 2: gpr = *(u16 *)run->mmio.data; break;
1166 case 1: gpr = *(u8 *)run->mmio.data; break;
1167 }
1168 } else {
1169 switch (run->mmio.len) {
1170 case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
1171 case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
1172 case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
1173 case 1: gpr = *(u8 *)run->mmio.data; break;
1174 }
1175 }
1176
1177 /* conversion between single and double precision */
1178 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
1179 gpr = sp_to_dp(gpr);
1180
1181 if (vcpu->arch.mmio_sign_extend) {
1182 switch (run->mmio.len) {
1183 #ifdef CONFIG_PPC64
1184 case 4:
1185 gpr = (s64)(s32)gpr;
1186 break;
1187 #endif
1188 case 2:
1189 gpr = (s64)(s16)gpr;
1190 break;
1191 case 1:
1192 gpr = (s64)(s8)gpr;
1193 break;
1194 }
1195 }
1196
1197 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
1198 case KVM_MMIO_REG_GPR:
1199 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
1200 break;
1201 case KVM_MMIO_REG_FPR:
1202 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1203 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP);
1204
1205 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1206 break;
1207 #ifdef CONFIG_PPC_BOOK3S
1208 case KVM_MMIO_REG_QPR:
1209 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1210 break;
1211 case KVM_MMIO_REG_FQPR:
1212 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
1213 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
1214 break;
1215 #endif
1216 #ifdef CONFIG_VSX
1217 case KVM_MMIO_REG_VSX:
1218 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1219 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX);
1220
1221 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD)
1222 kvmppc_set_vsr_dword(vcpu, gpr);
1223 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD)
1224 kvmppc_set_vsr_word(vcpu, gpr);
1225 else if (vcpu->arch.mmio_copy_type ==
1226 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
1227 kvmppc_set_vsr_dword_dump(vcpu, gpr);
1228 else if (vcpu->arch.mmio_copy_type ==
1229 KVMPPC_VSX_COPY_WORD_LOAD_DUMP)
1230 kvmppc_set_vsr_word_dump(vcpu, gpr);
1231 break;
1232 #endif
1233 #ifdef CONFIG_ALTIVEC
1234 case KVM_MMIO_REG_VMX:
1235 if (vcpu->kvm->arch.kvm_ops->giveup_ext)
1236 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC);
1237
1238 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD)
1239 kvmppc_set_vmx_dword(vcpu, gpr);
1240 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD)
1241 kvmppc_set_vmx_word(vcpu, gpr);
1242 else if (vcpu->arch.mmio_copy_type ==
1243 KVMPPC_VMX_COPY_HWORD)
1244 kvmppc_set_vmx_hword(vcpu, gpr);
1245 else if (vcpu->arch.mmio_copy_type ==
1246 KVMPPC_VMX_COPY_BYTE)
1247 kvmppc_set_vmx_byte(vcpu, gpr);
1248 break;
1249 #endif
1250 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1251 case KVM_MMIO_REG_NESTED_GPR:
1252 if (kvmppc_need_byteswap(vcpu))
1253 gpr = swab64(gpr);
1254 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr,
1255 sizeof(gpr));
1256 break;
1257 #endif
1258 default:
1259 BUG();
1260 }
1261 }
1262
__kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int sign_extend)1263 static int __kvmppc_handle_load(struct kvm_vcpu *vcpu,
1264 unsigned int rt, unsigned int bytes,
1265 int is_default_endian, int sign_extend)
1266 {
1267 struct kvm_run *run = vcpu->run;
1268 int idx, ret;
1269 bool host_swabbed;
1270
1271 /* Pity C doesn't have a logical XOR operator */
1272 if (kvmppc_need_byteswap(vcpu)) {
1273 host_swabbed = is_default_endian;
1274 } else {
1275 host_swabbed = !is_default_endian;
1276 }
1277
1278 if (bytes > sizeof(run->mmio.data))
1279 return EMULATE_FAIL;
1280
1281 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1282 run->mmio.len = bytes;
1283 run->mmio.is_write = 0;
1284
1285 vcpu->arch.io_gpr = rt;
1286 vcpu->arch.mmio_host_swabbed = host_swabbed;
1287 vcpu->mmio_needed = 1;
1288 vcpu->mmio_is_write = 0;
1289 vcpu->arch.mmio_sign_extend = sign_extend;
1290
1291 idx = srcu_read_lock(&vcpu->kvm->srcu);
1292
1293 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1294 bytes, &run->mmio.data);
1295
1296 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1297
1298 if (!ret) {
1299 kvmppc_complete_mmio_load(vcpu);
1300 vcpu->mmio_needed = 0;
1301 return EMULATE_DONE;
1302 }
1303
1304 return EMULATE_DO_MMIO;
1305 }
1306
kvmppc_handle_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1307 int kvmppc_handle_load(struct kvm_vcpu *vcpu,
1308 unsigned int rt, unsigned int bytes,
1309 int is_default_endian)
1310 {
1311 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 0);
1312 }
1313 EXPORT_SYMBOL_GPL(kvmppc_handle_load);
1314
1315 /* Same as above, but sign extends */
kvmppc_handle_loads(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1316 int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
1317 unsigned int rt, unsigned int bytes,
1318 int is_default_endian)
1319 {
1320 return __kvmppc_handle_load(vcpu, rt, bytes, is_default_endian, 1);
1321 }
1322
1323 #ifdef CONFIG_VSX
kvmppc_handle_vsx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian,int mmio_sign_extend)1324 int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
1325 unsigned int rt, unsigned int bytes,
1326 int is_default_endian, int mmio_sign_extend)
1327 {
1328 enum emulation_result emulated = EMULATE_DONE;
1329
1330 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1331 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1332 return EMULATE_FAIL;
1333
1334 while (vcpu->arch.mmio_vsx_copy_nums) {
1335 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1336 is_default_endian, mmio_sign_extend);
1337
1338 if (emulated != EMULATE_DONE)
1339 break;
1340
1341 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1342
1343 vcpu->arch.mmio_vsx_copy_nums--;
1344 vcpu->arch.mmio_vsx_offset++;
1345 }
1346 return emulated;
1347 }
1348 #endif /* CONFIG_VSX */
1349
kvmppc_handle_store(struct kvm_vcpu * vcpu,u64 val,unsigned int bytes,int is_default_endian)1350 int kvmppc_handle_store(struct kvm_vcpu *vcpu,
1351 u64 val, unsigned int bytes, int is_default_endian)
1352 {
1353 struct kvm_run *run = vcpu->run;
1354 void *data = run->mmio.data;
1355 int idx, ret;
1356 bool host_swabbed;
1357
1358 /* Pity C doesn't have a logical XOR operator */
1359 if (kvmppc_need_byteswap(vcpu)) {
1360 host_swabbed = is_default_endian;
1361 } else {
1362 host_swabbed = !is_default_endian;
1363 }
1364
1365 if (bytes > sizeof(run->mmio.data))
1366 return EMULATE_FAIL;
1367
1368 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
1369 run->mmio.len = bytes;
1370 run->mmio.is_write = 1;
1371 vcpu->mmio_needed = 1;
1372 vcpu->mmio_is_write = 1;
1373
1374 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
1375 val = dp_to_sp(val);
1376
1377 /* Store the value at the lowest bytes in 'data'. */
1378 if (!host_swabbed) {
1379 switch (bytes) {
1380 case 8: *(u64 *)data = val; break;
1381 case 4: *(u32 *)data = val; break;
1382 case 2: *(u16 *)data = val; break;
1383 case 1: *(u8 *)data = val; break;
1384 }
1385 } else {
1386 switch (bytes) {
1387 case 8: *(u64 *)data = swab64(val); break;
1388 case 4: *(u32 *)data = swab32(val); break;
1389 case 2: *(u16 *)data = swab16(val); break;
1390 case 1: *(u8 *)data = val; break;
1391 }
1392 }
1393
1394 idx = srcu_read_lock(&vcpu->kvm->srcu);
1395
1396 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr,
1397 bytes, &run->mmio.data);
1398
1399 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1400
1401 if (!ret) {
1402 vcpu->mmio_needed = 0;
1403 return EMULATE_DONE;
1404 }
1405
1406 return EMULATE_DO_MMIO;
1407 }
1408 EXPORT_SYMBOL_GPL(kvmppc_handle_store);
1409
1410 #ifdef CONFIG_VSX
kvmppc_get_vsr_data(struct kvm_vcpu * vcpu,int rs,u64 * val)1411 static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
1412 {
1413 u32 dword_offset, word_offset;
1414 union kvmppc_one_reg reg;
1415 int vsx_offset = 0;
1416 int copy_type = vcpu->arch.mmio_copy_type;
1417 int result = 0;
1418
1419 switch (copy_type) {
1420 case KVMPPC_VSX_COPY_DWORD:
1421 vsx_offset =
1422 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
1423
1424 if (vsx_offset == -1) {
1425 result = -1;
1426 break;
1427 }
1428
1429 if (rs < 32) {
1430 *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
1431 } else {
1432 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1433 *val = reg.vsxval[vsx_offset];
1434 }
1435 break;
1436
1437 case KVMPPC_VSX_COPY_WORD:
1438 vsx_offset =
1439 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
1440
1441 if (vsx_offset == -1) {
1442 result = -1;
1443 break;
1444 }
1445
1446 if (rs < 32) {
1447 dword_offset = vsx_offset / 2;
1448 word_offset = vsx_offset % 2;
1449 reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
1450 *val = reg.vsx32val[word_offset];
1451 } else {
1452 reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
1453 *val = reg.vsx32val[vsx_offset];
1454 }
1455 break;
1456
1457 default:
1458 result = -1;
1459 break;
1460 }
1461
1462 return result;
1463 }
1464
kvmppc_handle_vsx_store(struct kvm_vcpu * vcpu,int rs,unsigned int bytes,int is_default_endian)1465 int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
1466 int rs, unsigned int bytes, int is_default_endian)
1467 {
1468 u64 val;
1469 enum emulation_result emulated = EMULATE_DONE;
1470
1471 vcpu->arch.io_gpr = rs;
1472
1473 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1474 if (vcpu->arch.mmio_vsx_copy_nums > 4)
1475 return EMULATE_FAIL;
1476
1477 while (vcpu->arch.mmio_vsx_copy_nums) {
1478 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
1479 return EMULATE_FAIL;
1480
1481 emulated = kvmppc_handle_store(vcpu,
1482 val, bytes, is_default_endian);
1483
1484 if (emulated != EMULATE_DONE)
1485 break;
1486
1487 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1488
1489 vcpu->arch.mmio_vsx_copy_nums--;
1490 vcpu->arch.mmio_vsx_offset++;
1491 }
1492
1493 return emulated;
1494 }
1495
kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu * vcpu)1496 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu)
1497 {
1498 struct kvm_run *run = vcpu->run;
1499 enum emulation_result emulated = EMULATE_FAIL;
1500 int r;
1501
1502 vcpu->arch.paddr_accessed += run->mmio.len;
1503
1504 if (!vcpu->mmio_is_write) {
1505 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr,
1506 run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
1507 } else {
1508 emulated = kvmppc_handle_vsx_store(vcpu,
1509 vcpu->arch.io_gpr, run->mmio.len, 1);
1510 }
1511
1512 switch (emulated) {
1513 case EMULATE_DO_MMIO:
1514 run->exit_reason = KVM_EXIT_MMIO;
1515 r = RESUME_HOST;
1516 break;
1517 case EMULATE_FAIL:
1518 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1519 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1520 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1521 r = RESUME_HOST;
1522 break;
1523 default:
1524 r = RESUME_GUEST;
1525 break;
1526 }
1527 return r;
1528 }
1529 #endif /* CONFIG_VSX */
1530
1531 #ifdef CONFIG_ALTIVEC
kvmppc_handle_vmx_load(struct kvm_vcpu * vcpu,unsigned int rt,unsigned int bytes,int is_default_endian)1532 int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
1533 unsigned int rt, unsigned int bytes, int is_default_endian)
1534 {
1535 enum emulation_result emulated = EMULATE_DONE;
1536
1537 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1538 return EMULATE_FAIL;
1539
1540 while (vcpu->arch.mmio_vmx_copy_nums) {
1541 emulated = __kvmppc_handle_load(vcpu, rt, bytes,
1542 is_default_endian, 0);
1543
1544 if (emulated != EMULATE_DONE)
1545 break;
1546
1547 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1548 vcpu->arch.mmio_vmx_copy_nums--;
1549 vcpu->arch.mmio_vmx_offset++;
1550 }
1551
1552 return emulated;
1553 }
1554
kvmppc_get_vmx_dword(struct kvm_vcpu * vcpu,int index,u64 * val)1555 static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
1556 {
1557 union kvmppc_one_reg reg;
1558 int vmx_offset = 0;
1559 int result = 0;
1560
1561 vmx_offset =
1562 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1563
1564 if (vmx_offset == -1)
1565 return -1;
1566
1567 reg.vval = VCPU_VSX_VR(vcpu, index);
1568 *val = reg.vsxval[vmx_offset];
1569
1570 return result;
1571 }
1572
kvmppc_get_vmx_word(struct kvm_vcpu * vcpu,int index,u64 * val)1573 static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
1574 {
1575 union kvmppc_one_reg reg;
1576 int vmx_offset = 0;
1577 int result = 0;
1578
1579 vmx_offset =
1580 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1581
1582 if (vmx_offset == -1)
1583 return -1;
1584
1585 reg.vval = VCPU_VSX_VR(vcpu, index);
1586 *val = reg.vsx32val[vmx_offset];
1587
1588 return result;
1589 }
1590
kvmppc_get_vmx_hword(struct kvm_vcpu * vcpu,int index,u64 * val)1591 static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
1592 {
1593 union kvmppc_one_reg reg;
1594 int vmx_offset = 0;
1595 int result = 0;
1596
1597 vmx_offset =
1598 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1599
1600 if (vmx_offset == -1)
1601 return -1;
1602
1603 reg.vval = VCPU_VSX_VR(vcpu, index);
1604 *val = reg.vsx16val[vmx_offset];
1605
1606 return result;
1607 }
1608
kvmppc_get_vmx_byte(struct kvm_vcpu * vcpu,int index,u64 * val)1609 static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
1610 {
1611 union kvmppc_one_reg reg;
1612 int vmx_offset = 0;
1613 int result = 0;
1614
1615 vmx_offset =
1616 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset);
1617
1618 if (vmx_offset == -1)
1619 return -1;
1620
1621 reg.vval = VCPU_VSX_VR(vcpu, index);
1622 *val = reg.vsx8val[vmx_offset];
1623
1624 return result;
1625 }
1626
kvmppc_handle_vmx_store(struct kvm_vcpu * vcpu,unsigned int rs,unsigned int bytes,int is_default_endian)1627 int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
1628 unsigned int rs, unsigned int bytes, int is_default_endian)
1629 {
1630 u64 val = 0;
1631 unsigned int index = rs & KVM_MMIO_REG_MASK;
1632 enum emulation_result emulated = EMULATE_DONE;
1633
1634 if (vcpu->arch.mmio_vmx_copy_nums > 2)
1635 return EMULATE_FAIL;
1636
1637 vcpu->arch.io_gpr = rs;
1638
1639 while (vcpu->arch.mmio_vmx_copy_nums) {
1640 switch (vcpu->arch.mmio_copy_type) {
1641 case KVMPPC_VMX_COPY_DWORD:
1642 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1)
1643 return EMULATE_FAIL;
1644
1645 break;
1646 case KVMPPC_VMX_COPY_WORD:
1647 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1)
1648 return EMULATE_FAIL;
1649 break;
1650 case KVMPPC_VMX_COPY_HWORD:
1651 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1)
1652 return EMULATE_FAIL;
1653 break;
1654 case KVMPPC_VMX_COPY_BYTE:
1655 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1)
1656 return EMULATE_FAIL;
1657 break;
1658 default:
1659 return EMULATE_FAIL;
1660 }
1661
1662 emulated = kvmppc_handle_store(vcpu, val, bytes,
1663 is_default_endian);
1664 if (emulated != EMULATE_DONE)
1665 break;
1666
1667 vcpu->arch.paddr_accessed += vcpu->run->mmio.len;
1668 vcpu->arch.mmio_vmx_copy_nums--;
1669 vcpu->arch.mmio_vmx_offset++;
1670 }
1671
1672 return emulated;
1673 }
1674
kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu * vcpu)1675 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu *vcpu)
1676 {
1677 struct kvm_run *run = vcpu->run;
1678 enum emulation_result emulated = EMULATE_FAIL;
1679 int r;
1680
1681 vcpu->arch.paddr_accessed += run->mmio.len;
1682
1683 if (!vcpu->mmio_is_write) {
1684 emulated = kvmppc_handle_vmx_load(vcpu,
1685 vcpu->arch.io_gpr, run->mmio.len, 1);
1686 } else {
1687 emulated = kvmppc_handle_vmx_store(vcpu,
1688 vcpu->arch.io_gpr, run->mmio.len, 1);
1689 }
1690
1691 switch (emulated) {
1692 case EMULATE_DO_MMIO:
1693 run->exit_reason = KVM_EXIT_MMIO;
1694 r = RESUME_HOST;
1695 break;
1696 case EMULATE_FAIL:
1697 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1698 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1699 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1700 r = RESUME_HOST;
1701 break;
1702 default:
1703 r = RESUME_GUEST;
1704 break;
1705 }
1706 return r;
1707 }
1708 #endif /* CONFIG_ALTIVEC */
1709
kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)1710 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1711 {
1712 int r = 0;
1713 union kvmppc_one_reg val;
1714 int size;
1715
1716 size = one_reg_size(reg->id);
1717 if (size > sizeof(val))
1718 return -EINVAL;
1719
1720 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
1721 if (r == -EINVAL) {
1722 r = 0;
1723 switch (reg->id) {
1724 #ifdef CONFIG_ALTIVEC
1725 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1726 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1727 r = -ENXIO;
1728 break;
1729 }
1730 val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
1731 break;
1732 case KVM_REG_PPC_VSCR:
1733 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1734 r = -ENXIO;
1735 break;
1736 }
1737 val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
1738 break;
1739 case KVM_REG_PPC_VRSAVE:
1740 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1741 break;
1742 #endif /* CONFIG_ALTIVEC */
1743 default:
1744 r = -EINVAL;
1745 break;
1746 }
1747 }
1748
1749 if (r)
1750 return r;
1751
1752 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1753 r = -EFAULT;
1754
1755 return r;
1756 }
1757
kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu * vcpu,struct kvm_one_reg * reg)1758 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1759 {
1760 int r;
1761 union kvmppc_one_reg val;
1762 int size;
1763
1764 size = one_reg_size(reg->id);
1765 if (size > sizeof(val))
1766 return -EINVAL;
1767
1768 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1769 return -EFAULT;
1770
1771 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
1772 if (r == -EINVAL) {
1773 r = 0;
1774 switch (reg->id) {
1775 #ifdef CONFIG_ALTIVEC
1776 case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
1777 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1778 r = -ENXIO;
1779 break;
1780 }
1781 vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
1782 break;
1783 case KVM_REG_PPC_VSCR:
1784 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1785 r = -ENXIO;
1786 break;
1787 }
1788 vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
1789 break;
1790 case KVM_REG_PPC_VRSAVE:
1791 if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
1792 r = -ENXIO;
1793 break;
1794 }
1795 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1796 break;
1797 #endif /* CONFIG_ALTIVEC */
1798 default:
1799 r = -EINVAL;
1800 break;
1801 }
1802 }
1803
1804 return r;
1805 }
1806
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)1807 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
1808 {
1809 struct kvm_run *run = vcpu->run;
1810 int r;
1811
1812 vcpu_load(vcpu);
1813
1814 if (vcpu->mmio_needed) {
1815 vcpu->mmio_needed = 0;
1816 if (!vcpu->mmio_is_write)
1817 kvmppc_complete_mmio_load(vcpu);
1818 #ifdef CONFIG_VSX
1819 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1820 vcpu->arch.mmio_vsx_copy_nums--;
1821 vcpu->arch.mmio_vsx_offset++;
1822 }
1823
1824 if (vcpu->arch.mmio_vsx_copy_nums > 0) {
1825 r = kvmppc_emulate_mmio_vsx_loadstore(vcpu);
1826 if (r == RESUME_HOST) {
1827 vcpu->mmio_needed = 1;
1828 goto out;
1829 }
1830 }
1831 #endif
1832 #ifdef CONFIG_ALTIVEC
1833 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1834 vcpu->arch.mmio_vmx_copy_nums--;
1835 vcpu->arch.mmio_vmx_offset++;
1836 }
1837
1838 if (vcpu->arch.mmio_vmx_copy_nums > 0) {
1839 r = kvmppc_emulate_mmio_vmx_loadstore(vcpu);
1840 if (r == RESUME_HOST) {
1841 vcpu->mmio_needed = 1;
1842 goto out;
1843 }
1844 }
1845 #endif
1846 } else if (vcpu->arch.osi_needed) {
1847 u64 *gprs = run->osi.gprs;
1848 int i;
1849
1850 for (i = 0; i < 32; i++)
1851 kvmppc_set_gpr(vcpu, i, gprs[i]);
1852 vcpu->arch.osi_needed = 0;
1853 } else if (vcpu->arch.hcall_needed) {
1854 int i;
1855
1856 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
1857 for (i = 0; i < 9; ++i)
1858 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
1859 vcpu->arch.hcall_needed = 0;
1860 #ifdef CONFIG_BOOKE
1861 } else if (vcpu->arch.epr_needed) {
1862 kvmppc_set_epr(vcpu, run->epr.epr);
1863 vcpu->arch.epr_needed = 0;
1864 #endif
1865 }
1866
1867 kvm_sigset_activate(vcpu);
1868
1869 if (run->immediate_exit)
1870 r = -EINTR;
1871 else
1872 r = kvmppc_vcpu_run(vcpu);
1873
1874 kvm_sigset_deactivate(vcpu);
1875
1876 #ifdef CONFIG_ALTIVEC
1877 out:
1878 #endif
1879
1880 /*
1881 * We're already returning to userspace, don't pass the
1882 * RESUME_HOST flags along.
1883 */
1884 if (r > 0)
1885 r = 0;
1886
1887 vcpu_put(vcpu);
1888 return r;
1889 }
1890
kvm_vcpu_ioctl_interrupt(struct kvm_vcpu * vcpu,struct kvm_interrupt * irq)1891 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
1892 {
1893 if (irq->irq == KVM_INTERRUPT_UNSET) {
1894 kvmppc_core_dequeue_external(vcpu);
1895 return 0;
1896 }
1897
1898 kvmppc_core_queue_external(vcpu, irq);
1899
1900 kvm_vcpu_kick(vcpu);
1901
1902 return 0;
1903 }
1904
kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu * vcpu,struct kvm_enable_cap * cap)1905 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1906 struct kvm_enable_cap *cap)
1907 {
1908 int r;
1909
1910 if (cap->flags)
1911 return -EINVAL;
1912
1913 switch (cap->cap) {
1914 case KVM_CAP_PPC_OSI:
1915 r = 0;
1916 vcpu->arch.osi_enabled = true;
1917 break;
1918 case KVM_CAP_PPC_PAPR:
1919 r = 0;
1920 vcpu->arch.papr_enabled = true;
1921 break;
1922 case KVM_CAP_PPC_EPR:
1923 r = 0;
1924 if (cap->args[0])
1925 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
1926 else
1927 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1928 break;
1929 #ifdef CONFIG_BOOKE
1930 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1931 r = 0;
1932 vcpu->arch.watchdog_enabled = true;
1933 break;
1934 #endif
1935 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1936 case KVM_CAP_SW_TLB: {
1937 struct kvm_config_tlb cfg;
1938 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
1939
1940 r = -EFAULT;
1941 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
1942 break;
1943
1944 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
1945 break;
1946 }
1947 #endif
1948 #ifdef CONFIG_KVM_MPIC
1949 case KVM_CAP_IRQ_MPIC: {
1950 struct fd f;
1951 struct kvm_device *dev;
1952
1953 r = -EBADF;
1954 f = fdget(cap->args[0]);
1955 if (!f.file)
1956 break;
1957
1958 r = -EPERM;
1959 dev = kvm_device_from_filp(f.file);
1960 if (dev)
1961 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
1962
1963 fdput(f);
1964 break;
1965 }
1966 #endif
1967 #ifdef CONFIG_KVM_XICS
1968 case KVM_CAP_IRQ_XICS: {
1969 struct fd f;
1970 struct kvm_device *dev;
1971
1972 r = -EBADF;
1973 f = fdget(cap->args[0]);
1974 if (!f.file)
1975 break;
1976
1977 r = -EPERM;
1978 dev = kvm_device_from_filp(f.file);
1979 if (dev) {
1980 if (xics_on_xive())
1981 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
1982 else
1983 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
1984 }
1985
1986 fdput(f);
1987 break;
1988 }
1989 #endif /* CONFIG_KVM_XICS */
1990 #ifdef CONFIG_KVM_XIVE
1991 case KVM_CAP_PPC_IRQ_XIVE: {
1992 struct fd f;
1993 struct kvm_device *dev;
1994
1995 r = -EBADF;
1996 f = fdget(cap->args[0]);
1997 if (!f.file)
1998 break;
1999
2000 r = -ENXIO;
2001 if (!xive_enabled())
2002 break;
2003
2004 r = -EPERM;
2005 dev = kvm_device_from_filp(f.file);
2006 if (dev)
2007 r = kvmppc_xive_native_connect_vcpu(dev, vcpu,
2008 cap->args[1]);
2009
2010 fdput(f);
2011 break;
2012 }
2013 #endif /* CONFIG_KVM_XIVE */
2014 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
2015 case KVM_CAP_PPC_FWNMI:
2016 r = -EINVAL;
2017 if (!is_kvmppc_hv_enabled(vcpu->kvm))
2018 break;
2019 r = 0;
2020 vcpu->kvm->arch.fwnmi_enabled = true;
2021 break;
2022 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
2023 default:
2024 r = -EINVAL;
2025 break;
2026 }
2027
2028 if (!r)
2029 r = kvmppc_sanity_check(vcpu);
2030
2031 return r;
2032 }
2033
kvm_arch_intc_initialized(struct kvm * kvm)2034 bool kvm_arch_intc_initialized(struct kvm *kvm)
2035 {
2036 #ifdef CONFIG_KVM_MPIC
2037 if (kvm->arch.mpic)
2038 return true;
2039 #endif
2040 #ifdef CONFIG_KVM_XICS
2041 if (kvm->arch.xics || kvm->arch.xive)
2042 return true;
2043 #endif
2044 return false;
2045 }
2046
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2047 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2048 struct kvm_mp_state *mp_state)
2049 {
2050 return -EINVAL;
2051 }
2052
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)2053 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2054 struct kvm_mp_state *mp_state)
2055 {
2056 return -EINVAL;
2057 }
2058
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2059 long kvm_arch_vcpu_async_ioctl(struct file *filp,
2060 unsigned int ioctl, unsigned long arg)
2061 {
2062 struct kvm_vcpu *vcpu = filp->private_data;
2063 void __user *argp = (void __user *)arg;
2064
2065 if (ioctl == KVM_INTERRUPT) {
2066 struct kvm_interrupt irq;
2067 if (copy_from_user(&irq, argp, sizeof(irq)))
2068 return -EFAULT;
2069 return kvm_vcpu_ioctl_interrupt(vcpu, &irq);
2070 }
2071 return -ENOIOCTLCMD;
2072 }
2073
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2074 long kvm_arch_vcpu_ioctl(struct file *filp,
2075 unsigned int ioctl, unsigned long arg)
2076 {
2077 struct kvm_vcpu *vcpu = filp->private_data;
2078 void __user *argp = (void __user *)arg;
2079 long r;
2080
2081 switch (ioctl) {
2082 case KVM_ENABLE_CAP:
2083 {
2084 struct kvm_enable_cap cap;
2085 r = -EFAULT;
2086 if (copy_from_user(&cap, argp, sizeof(cap)))
2087 goto out;
2088 vcpu_load(vcpu);
2089 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2090 vcpu_put(vcpu);
2091 break;
2092 }
2093
2094 case KVM_SET_ONE_REG:
2095 case KVM_GET_ONE_REG:
2096 {
2097 struct kvm_one_reg reg;
2098 r = -EFAULT;
2099 if (copy_from_user(®, argp, sizeof(reg)))
2100 goto out;
2101 if (ioctl == KVM_SET_ONE_REG)
2102 r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®);
2103 else
2104 r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®);
2105 break;
2106 }
2107
2108 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2109 case KVM_DIRTY_TLB: {
2110 struct kvm_dirty_tlb dirty;
2111 r = -EFAULT;
2112 if (copy_from_user(&dirty, argp, sizeof(dirty)))
2113 goto out;
2114 vcpu_load(vcpu);
2115 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
2116 vcpu_put(vcpu);
2117 break;
2118 }
2119 #endif
2120 default:
2121 r = -EINVAL;
2122 }
2123
2124 out:
2125 return r;
2126 }
2127
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)2128 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2129 {
2130 return VM_FAULT_SIGBUS;
2131 }
2132
kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo * pvinfo)2133 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
2134 {
2135 u32 inst_nop = 0x60000000;
2136 #ifdef CONFIG_KVM_BOOKE_HV
2137 u32 inst_sc1 = 0x44000022;
2138 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
2139 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
2140 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
2141 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2142 #else
2143 u32 inst_lis = 0x3c000000;
2144 u32 inst_ori = 0x60000000;
2145 u32 inst_sc = 0x44000002;
2146 u32 inst_imm_mask = 0xffff;
2147
2148 /*
2149 * The hypercall to get into KVM from within guest context is as
2150 * follows:
2151 *
2152 * lis r0, r0, KVM_SC_MAGIC_R0@h
2153 * ori r0, KVM_SC_MAGIC_R0@l
2154 * sc
2155 * nop
2156 */
2157 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
2158 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
2159 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
2160 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
2161 #endif
2162
2163 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
2164
2165 return 0;
2166 }
2167
kvm_vm_ioctl_irq_line(struct kvm * kvm,struct kvm_irq_level * irq_event,bool line_status)2168 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
2169 bool line_status)
2170 {
2171 if (!irqchip_in_kernel(kvm))
2172 return -ENXIO;
2173
2174 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
2175 irq_event->irq, irq_event->level,
2176 line_status);
2177 return 0;
2178 }
2179
2180
kvm_vm_ioctl_enable_cap(struct kvm * kvm,struct kvm_enable_cap * cap)2181 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
2182 struct kvm_enable_cap *cap)
2183 {
2184 int r;
2185
2186 if (cap->flags)
2187 return -EINVAL;
2188
2189 switch (cap->cap) {
2190 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2191 case KVM_CAP_PPC_ENABLE_HCALL: {
2192 unsigned long hcall = cap->args[0];
2193
2194 r = -EINVAL;
2195 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
2196 cap->args[1] > 1)
2197 break;
2198 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
2199 break;
2200 if (cap->args[1])
2201 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
2202 else
2203 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
2204 r = 0;
2205 break;
2206 }
2207 case KVM_CAP_PPC_SMT: {
2208 unsigned long mode = cap->args[0];
2209 unsigned long flags = cap->args[1];
2210
2211 r = -EINVAL;
2212 if (kvm->arch.kvm_ops->set_smt_mode)
2213 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags);
2214 break;
2215 }
2216
2217 case KVM_CAP_PPC_NESTED_HV:
2218 r = -EINVAL;
2219 if (!is_kvmppc_hv_enabled(kvm) ||
2220 !kvm->arch.kvm_ops->enable_nested)
2221 break;
2222 r = kvm->arch.kvm_ops->enable_nested(kvm);
2223 break;
2224 #endif
2225 #if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
2226 case KVM_CAP_PPC_SECURE_GUEST:
2227 r = -EINVAL;
2228 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
2229 break;
2230 r = kvm->arch.kvm_ops->enable_svm(kvm);
2231 break;
2232 case KVM_CAP_PPC_DAWR1:
2233 r = -EINVAL;
2234 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1)
2235 break;
2236 r = kvm->arch.kvm_ops->enable_dawr1(kvm);
2237 break;
2238 #endif
2239 default:
2240 r = -EINVAL;
2241 break;
2242 }
2243
2244 return r;
2245 }
2246
2247 #ifdef CONFIG_PPC_BOOK3S_64
2248 /*
2249 * These functions check whether the underlying hardware is safe
2250 * against attacks based on observing the effects of speculatively
2251 * executed instructions, and whether it supplies instructions for
2252 * use in workarounds. The information comes from firmware, either
2253 * via the device tree on powernv platforms or from an hcall on
2254 * pseries platforms.
2255 */
2256 #ifdef CONFIG_PPC_PSERIES
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)2257 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2258 {
2259 struct h_cpu_char_result c;
2260 unsigned long rc;
2261
2262 if (!machine_is(pseries))
2263 return -ENOTTY;
2264
2265 rc = plpar_get_cpu_characteristics(&c);
2266 if (rc == H_SUCCESS) {
2267 cp->character = c.character;
2268 cp->behaviour = c.behaviour;
2269 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2270 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2271 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2272 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2273 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2274 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED |
2275 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF |
2276 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2277 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2278 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2279 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2280 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2281 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2282 }
2283 return 0;
2284 }
2285 #else
pseries_get_cpu_char(struct kvm_ppc_cpu_char * cp)2286 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2287 {
2288 return -ENOTTY;
2289 }
2290 #endif
2291
have_fw_feat(struct device_node * fw_features,const char * state,const char * name)2292 static inline bool have_fw_feat(struct device_node *fw_features,
2293 const char *state, const char *name)
2294 {
2295 struct device_node *np;
2296 bool r = false;
2297
2298 np = of_get_child_by_name(fw_features, name);
2299 if (np) {
2300 r = of_property_read_bool(np, state);
2301 of_node_put(np);
2302 }
2303 return r;
2304 }
2305
kvmppc_get_cpu_char(struct kvm_ppc_cpu_char * cp)2306 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char *cp)
2307 {
2308 struct device_node *np, *fw_features;
2309 int r;
2310
2311 memset(cp, 0, sizeof(*cp));
2312 r = pseries_get_cpu_char(cp);
2313 if (r != -ENOTTY)
2314 return r;
2315
2316 np = of_find_node_by_name(NULL, "ibm,opal");
2317 if (np) {
2318 fw_features = of_get_child_by_name(np, "fw-features");
2319 of_node_put(np);
2320 if (!fw_features)
2321 return 0;
2322 if (have_fw_feat(fw_features, "enabled",
2323 "inst-spec-barrier-ori31,31,0"))
2324 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31;
2325 if (have_fw_feat(fw_features, "enabled",
2326 "fw-bcctrl-serialized"))
2327 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED;
2328 if (have_fw_feat(fw_features, "enabled",
2329 "inst-l1d-flush-ori30,30,0"))
2330 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30;
2331 if (have_fw_feat(fw_features, "enabled",
2332 "inst-l1d-flush-trig2"))
2333 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2;
2334 if (have_fw_feat(fw_features, "enabled",
2335 "fw-l1d-thread-split"))
2336 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV;
2337 if (have_fw_feat(fw_features, "enabled",
2338 "fw-count-cache-disabled"))
2339 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS;
2340 if (have_fw_feat(fw_features, "enabled",
2341 "fw-count-cache-flush-bcctr2,0,0"))
2342 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2343 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 |
2344 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED |
2345 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30 |
2346 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2 |
2347 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV |
2348 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS |
2349 KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST;
2350
2351 if (have_fw_feat(fw_features, "enabled",
2352 "speculation-policy-favor-security"))
2353 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY;
2354 if (!have_fw_feat(fw_features, "disabled",
2355 "needs-l1d-flush-msr-pr-0-to-1"))
2356 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR;
2357 if (!have_fw_feat(fw_features, "disabled",
2358 "needs-spec-barrier-for-bound-checks"))
2359 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR;
2360 if (have_fw_feat(fw_features, "enabled",
2361 "needs-count-cache-flush-on-context-switch"))
2362 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2363 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY |
2364 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR |
2365 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR |
2366 KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE;
2367
2368 of_node_put(fw_features);
2369 }
2370
2371 return 0;
2372 }
2373 #endif
2374
kvm_arch_vm_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)2375 long kvm_arch_vm_ioctl(struct file *filp,
2376 unsigned int ioctl, unsigned long arg)
2377 {
2378 struct kvm *kvm __maybe_unused = filp->private_data;
2379 void __user *argp = (void __user *)arg;
2380 long r;
2381
2382 switch (ioctl) {
2383 case KVM_PPC_GET_PVINFO: {
2384 struct kvm_ppc_pvinfo pvinfo;
2385 memset(&pvinfo, 0, sizeof(pvinfo));
2386 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
2387 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
2388 r = -EFAULT;
2389 goto out;
2390 }
2391
2392 break;
2393 }
2394 #ifdef CONFIG_SPAPR_TCE_IOMMU
2395 case KVM_CREATE_SPAPR_TCE_64: {
2396 struct kvm_create_spapr_tce_64 create_tce_64;
2397
2398 r = -EFAULT;
2399 if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
2400 goto out;
2401 if (create_tce_64.flags) {
2402 r = -EINVAL;
2403 goto out;
2404 }
2405 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2406 goto out;
2407 }
2408 case KVM_CREATE_SPAPR_TCE: {
2409 struct kvm_create_spapr_tce create_tce;
2410 struct kvm_create_spapr_tce_64 create_tce_64;
2411
2412 r = -EFAULT;
2413 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
2414 goto out;
2415
2416 create_tce_64.liobn = create_tce.liobn;
2417 create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
2418 create_tce_64.offset = 0;
2419 create_tce_64.size = create_tce.window_size >>
2420 IOMMU_PAGE_SHIFT_4K;
2421 create_tce_64.flags = 0;
2422 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
2423 goto out;
2424 }
2425 #endif
2426 #ifdef CONFIG_PPC_BOOK3S_64
2427 case KVM_PPC_GET_SMMU_INFO: {
2428 struct kvm_ppc_smmu_info info;
2429 struct kvm *kvm = filp->private_data;
2430
2431 memset(&info, 0, sizeof(info));
2432 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
2433 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2434 r = -EFAULT;
2435 break;
2436 }
2437 case KVM_PPC_RTAS_DEFINE_TOKEN: {
2438 struct kvm *kvm = filp->private_data;
2439
2440 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
2441 break;
2442 }
2443 case KVM_PPC_CONFIGURE_V3_MMU: {
2444 struct kvm *kvm = filp->private_data;
2445 struct kvm_ppc_mmuv3_cfg cfg;
2446
2447 r = -EINVAL;
2448 if (!kvm->arch.kvm_ops->configure_mmu)
2449 goto out;
2450 r = -EFAULT;
2451 if (copy_from_user(&cfg, argp, sizeof(cfg)))
2452 goto out;
2453 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg);
2454 break;
2455 }
2456 case KVM_PPC_GET_RMMU_INFO: {
2457 struct kvm *kvm = filp->private_data;
2458 struct kvm_ppc_rmmu_info info;
2459
2460 r = -EINVAL;
2461 if (!kvm->arch.kvm_ops->get_rmmu_info)
2462 goto out;
2463 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info);
2464 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
2465 r = -EFAULT;
2466 break;
2467 }
2468 case KVM_PPC_GET_CPU_CHAR: {
2469 struct kvm_ppc_cpu_char cpuchar;
2470
2471 r = kvmppc_get_cpu_char(&cpuchar);
2472 if (r >= 0 && copy_to_user(argp, &cpuchar, sizeof(cpuchar)))
2473 r = -EFAULT;
2474 break;
2475 }
2476 case KVM_PPC_SVM_OFF: {
2477 struct kvm *kvm = filp->private_data;
2478
2479 r = 0;
2480 if (!kvm->arch.kvm_ops->svm_off)
2481 goto out;
2482
2483 r = kvm->arch.kvm_ops->svm_off(kvm);
2484 break;
2485 }
2486 default: {
2487 struct kvm *kvm = filp->private_data;
2488 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
2489 }
2490 #else /* CONFIG_PPC_BOOK3S_64 */
2491 default:
2492 r = -ENOTTY;
2493 #endif
2494 }
2495 out:
2496 return r;
2497 }
2498
2499 static DEFINE_IDA(lpid_inuse);
2500 static unsigned long nr_lpids;
2501
kvmppc_alloc_lpid(void)2502 long kvmppc_alloc_lpid(void)
2503 {
2504 int lpid;
2505
2506 /* The host LPID must always be 0 (allocation starts at 1) */
2507 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL);
2508 if (lpid < 0) {
2509 if (lpid == -ENOMEM)
2510 pr_err("%s: Out of memory\n", __func__);
2511 else
2512 pr_err("%s: No LPIDs free\n", __func__);
2513 return -ENOMEM;
2514 }
2515
2516 return lpid;
2517 }
2518 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
2519
kvmppc_free_lpid(long lpid)2520 void kvmppc_free_lpid(long lpid)
2521 {
2522 ida_free(&lpid_inuse, lpid);
2523 }
2524 EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
2525
2526 /* nr_lpids_param includes the host LPID */
kvmppc_init_lpid(unsigned long nr_lpids_param)2527 void kvmppc_init_lpid(unsigned long nr_lpids_param)
2528 {
2529 nr_lpids = nr_lpids_param;
2530 }
2531 EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
2532
kvm_arch_init(void * opaque)2533 int kvm_arch_init(void *opaque)
2534 {
2535 return 0;
2536 }
2537
2538 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);
2539
kvm_arch_create_vcpu_debugfs(struct kvm_vcpu * vcpu,struct dentry * debugfs_dentry)2540 void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry)
2541 {
2542 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs)
2543 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry);
2544 }
2545
kvm_arch_create_vm_debugfs(struct kvm * kvm)2546 int kvm_arch_create_vm_debugfs(struct kvm *kvm)
2547 {
2548 if (kvm->arch.kvm_ops->create_vm_debugfs)
2549 kvm->arch.kvm_ops->create_vm_debugfs(kvm);
2550 return 0;
2551 }
2552