1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 */
17
18 #include <linux/types.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/gfp.h>
24 #include <linux/slab.h>
25 #include <linux/hugetlb.h>
26 #include <linux/vmalloc.h>
27
28 #include <asm/tlbflush.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/kvm_book3s.h>
31 #include <asm/mmu-hash64.h>
32 #include <asm/hvcall.h>
33 #include <asm/synch.h>
34 #include <asm/ppc-opcode.h>
35 #include <asm/cputable.h>
36
37 /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
38 #define MAX_LPID_970 63
39 #define NR_LPIDS (LPID_RSVD + 1)
40 unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
41
kvmppc_alloc_hpt(struct kvm * kvm)42 long kvmppc_alloc_hpt(struct kvm *kvm)
43 {
44 unsigned long hpt;
45 unsigned long lpid;
46 struct revmap_entry *rev;
47 struct kvmppc_linear_info *li;
48
49 /* Allocate guest's hashed page table */
50 li = kvm_alloc_hpt();
51 if (li) {
52 /* using preallocated memory */
53 hpt = (ulong)li->base_virt;
54 kvm->arch.hpt_li = li;
55 } else {
56 /* using dynamic memory */
57 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
58 __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
59 }
60
61 if (!hpt) {
62 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
63 return -ENOMEM;
64 }
65 kvm->arch.hpt_virt = hpt;
66
67 /* Allocate reverse map array */
68 rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
69 if (!rev) {
70 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
71 goto out_freehpt;
72 }
73 kvm->arch.revmap = rev;
74
75 /* Allocate the guest's logical partition ID */
76 do {
77 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
78 if (lpid >= NR_LPIDS) {
79 pr_err("kvm_alloc_hpt: No LPIDs free\n");
80 goto out_freeboth;
81 }
82 } while (test_and_set_bit(lpid, lpid_inuse));
83
84 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
85 kvm->arch.lpid = lpid;
86
87 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
88 return 0;
89
90 out_freeboth:
91 vfree(rev);
92 out_freehpt:
93 free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
94 return -ENOMEM;
95 }
96
kvmppc_free_hpt(struct kvm * kvm)97 void kvmppc_free_hpt(struct kvm *kvm)
98 {
99 clear_bit(kvm->arch.lpid, lpid_inuse);
100 vfree(kvm->arch.revmap);
101 if (kvm->arch.hpt_li)
102 kvm_release_hpt(kvm->arch.hpt_li);
103 else
104 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
105 }
106
107 /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
hpte0_pgsize_encoding(unsigned long pgsize)108 static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
109 {
110 return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
111 }
112
113 /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
hpte1_pgsize_encoding(unsigned long pgsize)114 static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
115 {
116 return (pgsize == 0x10000) ? 0x1000 : 0;
117 }
118
kvmppc_map_vrma(struct kvm_vcpu * vcpu,struct kvm_memory_slot * memslot,unsigned long porder)119 void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
120 unsigned long porder)
121 {
122 unsigned long i;
123 unsigned long npages;
124 unsigned long hp_v, hp_r;
125 unsigned long addr, hash;
126 unsigned long psize;
127 unsigned long hp0, hp1;
128 long ret;
129
130 psize = 1ul << porder;
131 npages = memslot->npages >> (porder - PAGE_SHIFT);
132
133 /* VRMA can't be > 1TB */
134 if (npages > 1ul << (40 - porder))
135 npages = 1ul << (40 - porder);
136 /* Can't use more than 1 HPTE per HPTEG */
137 if (npages > HPT_NPTEG)
138 npages = HPT_NPTEG;
139
140 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
141 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
142 hp1 = hpte1_pgsize_encoding(psize) |
143 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
144
145 for (i = 0; i < npages; ++i) {
146 addr = i << porder;
147 /* can't use hpt_hash since va > 64 bits */
148 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
149 /*
150 * We assume that the hash table is empty and no
151 * vcpus are using it at this stage. Since we create
152 * at most one HPTE per HPTEG, we just assume entry 7
153 * is available and use it.
154 */
155 hash = (hash << 3) + 7;
156 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
157 hp_r = hp1 | addr;
158 ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
159 if (ret != H_SUCCESS) {
160 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
161 addr, ret);
162 break;
163 }
164 }
165 }
166
kvmppc_mmu_hv_init(void)167 int kvmppc_mmu_hv_init(void)
168 {
169 unsigned long host_lpid, rsvd_lpid;
170
171 if (!cpu_has_feature(CPU_FTR_HVMODE))
172 return -EINVAL;
173
174 memset(lpid_inuse, 0, sizeof(lpid_inuse));
175
176 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
177 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
178 rsvd_lpid = LPID_RSVD;
179 } else {
180 host_lpid = 0; /* PPC970 */
181 rsvd_lpid = MAX_LPID_970;
182 }
183
184 set_bit(host_lpid, lpid_inuse);
185 /* rsvd_lpid is reserved for use in partition switching */
186 set_bit(rsvd_lpid, lpid_inuse);
187
188 return 0;
189 }
190
kvmppc_mmu_destroy(struct kvm_vcpu * vcpu)191 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
192 {
193 }
194
kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu * vcpu)195 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
196 {
197 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
198 }
199
200 /*
201 * This is called to get a reference to a guest page if there isn't
202 * one already in the kvm->arch.slot_phys[][] arrays.
203 */
kvmppc_get_guest_page(struct kvm * kvm,unsigned long gfn,struct kvm_memory_slot * memslot,unsigned long psize)204 static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
205 struct kvm_memory_slot *memslot,
206 unsigned long psize)
207 {
208 unsigned long start;
209 long np, err;
210 struct page *page, *hpage, *pages[1];
211 unsigned long s, pgsize;
212 unsigned long *physp;
213 unsigned int is_io, got, pgorder;
214 struct vm_area_struct *vma;
215 unsigned long pfn, i, npages;
216
217 physp = kvm->arch.slot_phys[memslot->id];
218 if (!physp)
219 return -EINVAL;
220 if (physp[gfn - memslot->base_gfn])
221 return 0;
222
223 is_io = 0;
224 got = 0;
225 page = NULL;
226 pgsize = psize;
227 err = -EINVAL;
228 start = gfn_to_hva_memslot(memslot, gfn);
229
230 /* Instantiate and get the page we want access to */
231 np = get_user_pages_fast(start, 1, 1, pages);
232 if (np != 1) {
233 /* Look up the vma for the page */
234 down_read(¤t->mm->mmap_sem);
235 vma = find_vma(current->mm, start);
236 if (!vma || vma->vm_start > start ||
237 start + psize > vma->vm_end ||
238 !(vma->vm_flags & VM_PFNMAP))
239 goto up_err;
240 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
241 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
242 /* check alignment of pfn vs. requested page size */
243 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
244 goto up_err;
245 up_read(¤t->mm->mmap_sem);
246
247 } else {
248 page = pages[0];
249 got = KVMPPC_GOT_PAGE;
250
251 /* See if this is a large page */
252 s = PAGE_SIZE;
253 if (PageHuge(page)) {
254 hpage = compound_head(page);
255 s <<= compound_order(hpage);
256 /* Get the whole large page if slot alignment is ok */
257 if (s > psize && slot_is_aligned(memslot, s) &&
258 !(memslot->userspace_addr & (s - 1))) {
259 start &= ~(s - 1);
260 pgsize = s;
261 get_page(hpage);
262 put_page(page);
263 page = hpage;
264 }
265 }
266 if (s < psize)
267 goto out;
268 pfn = page_to_pfn(page);
269 }
270
271 npages = pgsize >> PAGE_SHIFT;
272 pgorder = __ilog2(npages);
273 physp += (gfn - memslot->base_gfn) & ~(npages - 1);
274 spin_lock(&kvm->arch.slot_phys_lock);
275 for (i = 0; i < npages; ++i) {
276 if (!physp[i]) {
277 physp[i] = ((pfn + i) << PAGE_SHIFT) +
278 got + is_io + pgorder;
279 got = 0;
280 }
281 }
282 spin_unlock(&kvm->arch.slot_phys_lock);
283 err = 0;
284
285 out:
286 if (got)
287 put_page(page);
288 return err;
289
290 up_err:
291 up_read(¤t->mm->mmap_sem);
292 return err;
293 }
294
295 /*
296 * We come here on a H_ENTER call from the guest when we are not
297 * using mmu notifiers and we don't have the requested page pinned
298 * already.
299 */
kvmppc_virtmode_h_enter(struct kvm_vcpu * vcpu,unsigned long flags,long pte_index,unsigned long pteh,unsigned long ptel)300 long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
301 long pte_index, unsigned long pteh, unsigned long ptel)
302 {
303 struct kvm *kvm = vcpu->kvm;
304 unsigned long psize, gpa, gfn;
305 struct kvm_memory_slot *memslot;
306 long ret;
307
308 if (kvm->arch.using_mmu_notifiers)
309 goto do_insert;
310
311 psize = hpte_page_size(pteh, ptel);
312 if (!psize)
313 return H_PARAMETER;
314
315 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
316
317 /* Find the memslot (if any) for this address */
318 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
319 gfn = gpa >> PAGE_SHIFT;
320 memslot = gfn_to_memslot(kvm, gfn);
321 if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
322 if (!slot_is_aligned(memslot, psize))
323 return H_PARAMETER;
324 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
325 return H_PARAMETER;
326 }
327
328 do_insert:
329 /* Protect linux PTE lookup from page table destruction */
330 rcu_read_lock_sched(); /* this disables preemption too */
331 vcpu->arch.pgdir = current->mm->pgd;
332 ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
333 rcu_read_unlock_sched();
334 if (ret == H_TOO_HARD) {
335 /* this can't happen */
336 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
337 ret = H_RESOURCE; /* or something */
338 }
339 return ret;
340
341 }
342
kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu * vcpu,gva_t eaddr)343 static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
344 gva_t eaddr)
345 {
346 u64 mask;
347 int i;
348
349 for (i = 0; i < vcpu->arch.slb_nr; i++) {
350 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
351 continue;
352
353 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
354 mask = ESID_MASK_1T;
355 else
356 mask = ESID_MASK;
357
358 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
359 return &vcpu->arch.slb[i];
360 }
361 return NULL;
362 }
363
kvmppc_mmu_get_real_addr(unsigned long v,unsigned long r,unsigned long ea)364 static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
365 unsigned long ea)
366 {
367 unsigned long ra_mask;
368
369 ra_mask = hpte_page_size(v, r) - 1;
370 return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
371 }
372
kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,bool data)373 static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
374 struct kvmppc_pte *gpte, bool data)
375 {
376 struct kvm *kvm = vcpu->kvm;
377 struct kvmppc_slb *slbe;
378 unsigned long slb_v;
379 unsigned long pp, key;
380 unsigned long v, gr;
381 unsigned long *hptep;
382 int index;
383 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
384
385 /* Get SLB entry */
386 if (virtmode) {
387 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
388 if (!slbe)
389 return -EINVAL;
390 slb_v = slbe->origv;
391 } else {
392 /* real mode access */
393 slb_v = vcpu->kvm->arch.vrma_slb_v;
394 }
395
396 preempt_disable();
397 /* Find the HPTE in the hash table */
398 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
399 HPTE_V_VALID | HPTE_V_ABSENT);
400 if (index < 0) {
401 preempt_enable();
402 return -ENOENT;
403 }
404 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
405 v = hptep[0] & ~HPTE_V_HVLOCK;
406 gr = kvm->arch.revmap[index].guest_rpte;
407
408 /* Unlock the HPTE */
409 asm volatile("lwsync" : : : "memory");
410 hptep[0] = v;
411 preempt_enable();
412
413 gpte->eaddr = eaddr;
414 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
415
416 /* Get PP bits and key for permission check */
417 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
418 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
419 key &= slb_v;
420
421 /* Calculate permissions */
422 gpte->may_read = hpte_read_permission(pp, key);
423 gpte->may_write = hpte_write_permission(pp, key);
424 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
425
426 /* Storage key permission check for POWER7 */
427 if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
428 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
429 if (amrfield & 1)
430 gpte->may_read = 0;
431 if (amrfield & 2)
432 gpte->may_write = 0;
433 }
434
435 /* Get the guest physical address */
436 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
437 return 0;
438 }
439
440 /*
441 * Quick test for whether an instruction is a load or a store.
442 * If the instruction is a load or a store, then this will indicate
443 * which it is, at least on server processors. (Embedded processors
444 * have some external PID instructions that don't follow the rule
445 * embodied here.) If the instruction isn't a load or store, then
446 * this doesn't return anything useful.
447 */
instruction_is_store(unsigned int instr)448 static int instruction_is_store(unsigned int instr)
449 {
450 unsigned int mask;
451
452 mask = 0x10000000;
453 if ((instr & 0xfc000000) == 0x7c000000)
454 mask = 0x100; /* major opcode 31 */
455 return (instr & mask) != 0;
456 }
457
kvmppc_hv_emulate_mmio(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned long gpa,int is_store)458 static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
459 unsigned long gpa, int is_store)
460 {
461 int ret;
462 u32 last_inst;
463 unsigned long srr0 = kvmppc_get_pc(vcpu);
464
465 /* We try to load the last instruction. We don't let
466 * emulate_instruction do it as it doesn't check what
467 * kvmppc_ld returns.
468 * If we fail, we just return to the guest and try executing it again.
469 */
470 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
471 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
472 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
473 return RESUME_GUEST;
474 vcpu->arch.last_inst = last_inst;
475 }
476
477 /*
478 * WARNING: We do not know for sure whether the instruction we just
479 * read from memory is the same that caused the fault in the first
480 * place. If the instruction we read is neither an load or a store,
481 * then it can't access memory, so we don't need to worry about
482 * enforcing access permissions. So, assuming it is a load or
483 * store, we just check that its direction (load or store) is
484 * consistent with the original fault, since that's what we
485 * checked the access permissions against. If there is a mismatch
486 * we just return and retry the instruction.
487 */
488
489 if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
490 return RESUME_GUEST;
491
492 /*
493 * Emulated accesses are emulated by looking at the hash for
494 * translation once, then performing the access later. The
495 * translation could be invalidated in the meantime in which
496 * point performing the subsequent memory access on the old
497 * physical address could possibly be a security hole for the
498 * guest (but not the host).
499 *
500 * This is less of an issue for MMIO stores since they aren't
501 * globally visible. It could be an issue for MMIO loads to
502 * a certain extent but we'll ignore it for now.
503 */
504
505 vcpu->arch.paddr_accessed = gpa;
506 return kvmppc_emulate_mmio(run, vcpu);
507 }
508
kvmppc_book3s_hv_page_fault(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned long ea,unsigned long dsisr)509 int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
510 unsigned long ea, unsigned long dsisr)
511 {
512 struct kvm *kvm = vcpu->kvm;
513 unsigned long *hptep, hpte[3], r;
514 unsigned long mmu_seq, psize, pte_size;
515 unsigned long gfn, hva, pfn;
516 struct kvm_memory_slot *memslot;
517 unsigned long *rmap;
518 struct revmap_entry *rev;
519 struct page *page, *pages[1];
520 long index, ret, npages;
521 unsigned long is_io;
522 unsigned int writing, write_ok;
523 struct vm_area_struct *vma;
524 unsigned long rcbits;
525
526 /*
527 * Real-mode code has already searched the HPT and found the
528 * entry we're interested in. Lock the entry and check that
529 * it hasn't changed. If it has, just return and re-execute the
530 * instruction.
531 */
532 if (ea != vcpu->arch.pgfault_addr)
533 return RESUME_GUEST;
534 index = vcpu->arch.pgfault_index;
535 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
536 rev = &kvm->arch.revmap[index];
537 preempt_disable();
538 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
539 cpu_relax();
540 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
541 hpte[1] = hptep[1];
542 hpte[2] = r = rev->guest_rpte;
543 asm volatile("lwsync" : : : "memory");
544 hptep[0] = hpte[0];
545 preempt_enable();
546
547 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
548 hpte[1] != vcpu->arch.pgfault_hpte[1])
549 return RESUME_GUEST;
550
551 /* Translate the logical address and get the page */
552 psize = hpte_page_size(hpte[0], r);
553 gfn = hpte_rpn(r, psize);
554 memslot = gfn_to_memslot(kvm, gfn);
555
556 /* No memslot means it's an emulated MMIO region */
557 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
558 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
559 return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
560 dsisr & DSISR_ISSTORE);
561 }
562
563 if (!kvm->arch.using_mmu_notifiers)
564 return -EFAULT; /* should never get here */
565
566 /* used to check for invalidations in progress */
567 mmu_seq = kvm->mmu_notifier_seq;
568 smp_rmb();
569
570 is_io = 0;
571 pfn = 0;
572 page = NULL;
573 pte_size = PAGE_SIZE;
574 writing = (dsisr & DSISR_ISSTORE) != 0;
575 /* If writing != 0, then the HPTE must allow writing, if we get here */
576 write_ok = writing;
577 hva = gfn_to_hva_memslot(memslot, gfn);
578 npages = get_user_pages_fast(hva, 1, writing, pages);
579 if (npages < 1) {
580 /* Check if it's an I/O mapping */
581 down_read(¤t->mm->mmap_sem);
582 vma = find_vma(current->mm, hva);
583 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
584 (vma->vm_flags & VM_PFNMAP)) {
585 pfn = vma->vm_pgoff +
586 ((hva - vma->vm_start) >> PAGE_SHIFT);
587 pte_size = psize;
588 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
589 write_ok = vma->vm_flags & VM_WRITE;
590 }
591 up_read(¤t->mm->mmap_sem);
592 if (!pfn)
593 return -EFAULT;
594 } else {
595 page = pages[0];
596 if (PageHuge(page)) {
597 page = compound_head(page);
598 pte_size <<= compound_order(page);
599 }
600 /* if the guest wants write access, see if that is OK */
601 if (!writing && hpte_is_writable(r)) {
602 pte_t *ptep, pte;
603
604 /*
605 * We need to protect against page table destruction
606 * while looking up and updating the pte.
607 */
608 rcu_read_lock_sched();
609 ptep = find_linux_pte_or_hugepte(current->mm->pgd,
610 hva, NULL);
611 if (ptep && pte_present(*ptep)) {
612 pte = kvmppc_read_update_linux_pte(ptep, 1);
613 if (pte_write(pte))
614 write_ok = 1;
615 }
616 rcu_read_unlock_sched();
617 }
618 pfn = page_to_pfn(page);
619 }
620
621 ret = -EFAULT;
622 if (psize > pte_size)
623 goto out_put;
624
625 /* Check WIMG vs. the actual page we're accessing */
626 if (!hpte_cache_flags_ok(r, is_io)) {
627 if (is_io)
628 return -EFAULT;
629 /*
630 * Allow guest to map emulated device memory as
631 * uncacheable, but actually make it cacheable.
632 */
633 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
634 }
635
636 /* Set the HPTE to point to pfn */
637 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
638 if (hpte_is_writable(r) && !write_ok)
639 r = hpte_make_readonly(r);
640 ret = RESUME_GUEST;
641 preempt_disable();
642 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
643 cpu_relax();
644 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
645 rev->guest_rpte != hpte[2])
646 /* HPTE has been changed under us; let the guest retry */
647 goto out_unlock;
648 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
649
650 rmap = &memslot->rmap[gfn - memslot->base_gfn];
651 lock_rmap(rmap);
652
653 /* Check if we might have been invalidated; let the guest retry if so */
654 ret = RESUME_GUEST;
655 if (mmu_notifier_retry(vcpu, mmu_seq)) {
656 unlock_rmap(rmap);
657 goto out_unlock;
658 }
659
660 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
661 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
662 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
663
664 if (hptep[0] & HPTE_V_VALID) {
665 /* HPTE was previously valid, so we need to invalidate it */
666 unlock_rmap(rmap);
667 hptep[0] |= HPTE_V_ABSENT;
668 kvmppc_invalidate_hpte(kvm, hptep, index);
669 /* don't lose previous R and C bits */
670 r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
671 } else {
672 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
673 }
674
675 hptep[1] = r;
676 eieio();
677 hptep[0] = hpte[0];
678 asm volatile("ptesync" : : : "memory");
679 preempt_enable();
680 if (page && hpte_is_writable(r))
681 SetPageDirty(page);
682
683 out_put:
684 if (page) {
685 /*
686 * We drop pages[0] here, not page because page might
687 * have been set to the head page of a compound, but
688 * we have to drop the reference on the correct tail
689 * page to match the get inside gup()
690 */
691 put_page(pages[0]);
692 }
693 return ret;
694
695 out_unlock:
696 hptep[0] &= ~HPTE_V_HVLOCK;
697 preempt_enable();
698 goto out_put;
699 }
700
kvm_handle_hva(struct kvm * kvm,unsigned long hva,int (* handler)(struct kvm * kvm,unsigned long * rmapp,unsigned long gfn))701 static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
702 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
703 unsigned long gfn))
704 {
705 int ret;
706 int retval = 0;
707 struct kvm_memslots *slots;
708 struct kvm_memory_slot *memslot;
709
710 slots = kvm_memslots(kvm);
711 kvm_for_each_memslot(memslot, slots) {
712 unsigned long start = memslot->userspace_addr;
713 unsigned long end;
714
715 end = start + (memslot->npages << PAGE_SHIFT);
716 if (hva >= start && hva < end) {
717 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
718
719 ret = handler(kvm, &memslot->rmap[gfn_offset],
720 memslot->base_gfn + gfn_offset);
721 retval |= ret;
722 }
723 }
724
725 return retval;
726 }
727
kvm_unmap_rmapp(struct kvm * kvm,unsigned long * rmapp,unsigned long gfn)728 static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
729 unsigned long gfn)
730 {
731 struct revmap_entry *rev = kvm->arch.revmap;
732 unsigned long h, i, j;
733 unsigned long *hptep;
734 unsigned long ptel, psize, rcbits;
735
736 for (;;) {
737 lock_rmap(rmapp);
738 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
739 unlock_rmap(rmapp);
740 break;
741 }
742
743 /*
744 * To avoid an ABBA deadlock with the HPTE lock bit,
745 * we can't spin on the HPTE lock while holding the
746 * rmap chain lock.
747 */
748 i = *rmapp & KVMPPC_RMAP_INDEX;
749 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
750 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
751 /* unlock rmap before spinning on the HPTE lock */
752 unlock_rmap(rmapp);
753 while (hptep[0] & HPTE_V_HVLOCK)
754 cpu_relax();
755 continue;
756 }
757 j = rev[i].forw;
758 if (j == i) {
759 /* chain is now empty */
760 *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
761 } else {
762 /* remove i from chain */
763 h = rev[i].back;
764 rev[h].forw = j;
765 rev[j].back = h;
766 rev[i].forw = rev[i].back = i;
767 *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
768 }
769
770 /* Now check and modify the HPTE */
771 ptel = rev[i].guest_rpte;
772 psize = hpte_page_size(hptep[0], ptel);
773 if ((hptep[0] & HPTE_V_VALID) &&
774 hpte_rpn(ptel, psize) == gfn) {
775 hptep[0] |= HPTE_V_ABSENT;
776 kvmppc_invalidate_hpte(kvm, hptep, i);
777 /* Harvest R and C */
778 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
779 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
780 rev[i].guest_rpte = ptel | rcbits;
781 }
782 unlock_rmap(rmapp);
783 hptep[0] &= ~HPTE_V_HVLOCK;
784 }
785 return 0;
786 }
787
kvm_unmap_hva(struct kvm * kvm,unsigned long hva)788 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
789 {
790 if (kvm->arch.using_mmu_notifiers)
791 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
792 return 0;
793 }
794
kvm_age_rmapp(struct kvm * kvm,unsigned long * rmapp,unsigned long gfn)795 static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
796 unsigned long gfn)
797 {
798 struct revmap_entry *rev = kvm->arch.revmap;
799 unsigned long head, i, j;
800 unsigned long *hptep;
801 int ret = 0;
802
803 retry:
804 lock_rmap(rmapp);
805 if (*rmapp & KVMPPC_RMAP_REFERENCED) {
806 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
807 ret = 1;
808 }
809 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
810 unlock_rmap(rmapp);
811 return ret;
812 }
813
814 i = head = *rmapp & KVMPPC_RMAP_INDEX;
815 do {
816 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
817 j = rev[i].forw;
818
819 /* If this HPTE isn't referenced, ignore it */
820 if (!(hptep[1] & HPTE_R_R))
821 continue;
822
823 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
824 /* unlock rmap before spinning on the HPTE lock */
825 unlock_rmap(rmapp);
826 while (hptep[0] & HPTE_V_HVLOCK)
827 cpu_relax();
828 goto retry;
829 }
830
831 /* Now check and modify the HPTE */
832 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
833 kvmppc_clear_ref_hpte(kvm, hptep, i);
834 rev[i].guest_rpte |= HPTE_R_R;
835 ret = 1;
836 }
837 hptep[0] &= ~HPTE_V_HVLOCK;
838 } while ((i = j) != head);
839
840 unlock_rmap(rmapp);
841 return ret;
842 }
843
kvm_age_hva(struct kvm * kvm,unsigned long hva)844 int kvm_age_hva(struct kvm *kvm, unsigned long hva)
845 {
846 if (!kvm->arch.using_mmu_notifiers)
847 return 0;
848 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
849 }
850
kvm_test_age_rmapp(struct kvm * kvm,unsigned long * rmapp,unsigned long gfn)851 static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
852 unsigned long gfn)
853 {
854 struct revmap_entry *rev = kvm->arch.revmap;
855 unsigned long head, i, j;
856 unsigned long *hp;
857 int ret = 1;
858
859 if (*rmapp & KVMPPC_RMAP_REFERENCED)
860 return 1;
861
862 lock_rmap(rmapp);
863 if (*rmapp & KVMPPC_RMAP_REFERENCED)
864 goto out;
865
866 if (*rmapp & KVMPPC_RMAP_PRESENT) {
867 i = head = *rmapp & KVMPPC_RMAP_INDEX;
868 do {
869 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
870 j = rev[i].forw;
871 if (hp[1] & HPTE_R_R)
872 goto out;
873 } while ((i = j) != head);
874 }
875 ret = 0;
876
877 out:
878 unlock_rmap(rmapp);
879 return ret;
880 }
881
kvm_test_age_hva(struct kvm * kvm,unsigned long hva)882 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
883 {
884 if (!kvm->arch.using_mmu_notifiers)
885 return 0;
886 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
887 }
888
kvm_set_spte_hva(struct kvm * kvm,unsigned long hva,pte_t pte)889 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
890 {
891 if (!kvm->arch.using_mmu_notifiers)
892 return;
893 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
894 }
895
kvm_test_clear_dirty(struct kvm * kvm,unsigned long * rmapp)896 static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
897 {
898 struct revmap_entry *rev = kvm->arch.revmap;
899 unsigned long head, i, j;
900 unsigned long *hptep;
901 int ret = 0;
902
903 retry:
904 lock_rmap(rmapp);
905 if (*rmapp & KVMPPC_RMAP_CHANGED) {
906 *rmapp &= ~KVMPPC_RMAP_CHANGED;
907 ret = 1;
908 }
909 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
910 unlock_rmap(rmapp);
911 return ret;
912 }
913
914 i = head = *rmapp & KVMPPC_RMAP_INDEX;
915 do {
916 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
917 j = rev[i].forw;
918
919 if (!(hptep[1] & HPTE_R_C))
920 continue;
921
922 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
923 /* unlock rmap before spinning on the HPTE lock */
924 unlock_rmap(rmapp);
925 while (hptep[0] & HPTE_V_HVLOCK)
926 cpu_relax();
927 goto retry;
928 }
929
930 /* Now check and modify the HPTE */
931 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
932 /* need to make it temporarily absent to clear C */
933 hptep[0] |= HPTE_V_ABSENT;
934 kvmppc_invalidate_hpte(kvm, hptep, i);
935 hptep[1] &= ~HPTE_R_C;
936 eieio();
937 hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
938 rev[i].guest_rpte |= HPTE_R_C;
939 ret = 1;
940 }
941 hptep[0] &= ~HPTE_V_HVLOCK;
942 } while ((i = j) != head);
943
944 unlock_rmap(rmapp);
945 return ret;
946 }
947
kvmppc_hv_get_dirty_log(struct kvm * kvm,struct kvm_memory_slot * memslot)948 long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
949 {
950 unsigned long i;
951 unsigned long *rmapp, *map;
952
953 preempt_disable();
954 rmapp = memslot->rmap;
955 map = memslot->dirty_bitmap;
956 for (i = 0; i < memslot->npages; ++i) {
957 if (kvm_test_clear_dirty(kvm, rmapp))
958 __set_bit_le(i, map);
959 ++rmapp;
960 }
961 preempt_enable();
962 return 0;
963 }
964
kvmppc_pin_guest_page(struct kvm * kvm,unsigned long gpa,unsigned long * nb_ret)965 void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
966 unsigned long *nb_ret)
967 {
968 struct kvm_memory_slot *memslot;
969 unsigned long gfn = gpa >> PAGE_SHIFT;
970 struct page *page, *pages[1];
971 int npages;
972 unsigned long hva, psize, offset;
973 unsigned long pa;
974 unsigned long *physp;
975
976 memslot = gfn_to_memslot(kvm, gfn);
977 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
978 return NULL;
979 if (!kvm->arch.using_mmu_notifiers) {
980 physp = kvm->arch.slot_phys[memslot->id];
981 if (!physp)
982 return NULL;
983 physp += gfn - memslot->base_gfn;
984 pa = *physp;
985 if (!pa) {
986 if (kvmppc_get_guest_page(kvm, gfn, memslot,
987 PAGE_SIZE) < 0)
988 return NULL;
989 pa = *physp;
990 }
991 page = pfn_to_page(pa >> PAGE_SHIFT);
992 get_page(page);
993 } else {
994 hva = gfn_to_hva_memslot(memslot, gfn);
995 npages = get_user_pages_fast(hva, 1, 1, pages);
996 if (npages < 1)
997 return NULL;
998 page = pages[0];
999 }
1000 psize = PAGE_SIZE;
1001 if (PageHuge(page)) {
1002 page = compound_head(page);
1003 psize <<= compound_order(page);
1004 }
1005 offset = gpa & (psize - 1);
1006 if (nb_ret)
1007 *nb_ret = psize - offset;
1008 return page_address(page) + offset;
1009 }
1010
kvmppc_unpin_guest_page(struct kvm * kvm,void * va)1011 void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
1012 {
1013 struct page *page = virt_to_page(va);
1014
1015 put_page(page);
1016 }
1017
kvmppc_mmu_book3s_hv_init(struct kvm_vcpu * vcpu)1018 void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1019 {
1020 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1021
1022 if (cpu_has_feature(CPU_FTR_ARCH_206))
1023 vcpu->arch.slb_nr = 32; /* POWER7 */
1024 else
1025 vcpu->arch.slb_nr = 64;
1026
1027 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1028 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1029
1030 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
1031 }
1032