1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7 */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/udbg.h>
31 #include <asm/iommu.h>
32 #include <asm/tce.h>
33 #include <asm/mmu_context.h>
34
kvmppc_find_table(struct kvm * kvm,unsigned long liobn)35 static struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
36 unsigned long liobn)
37 {
38 struct kvmppc_spapr_tce_table *stt;
39
40 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
41 if (stt->liobn == liobn)
42 return stt;
43
44 return NULL;
45 }
46
kvmppc_tce_pages(unsigned long iommu_pages)47 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
48 {
49 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
50 }
51
kvmppc_stt_pages(unsigned long tce_pages)52 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
53 {
54 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
55 (tce_pages * sizeof(struct page *));
56
57 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
58 }
59
kvm_spapr_tce_iommu_table_free(struct rcu_head * head)60 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
61 {
62 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
63 struct kvmppc_spapr_tce_iommu_table, rcu);
64
65 iommu_tce_table_put(stit->tbl);
66
67 kfree(stit);
68 }
69
kvm_spapr_tce_liobn_put(struct kref * kref)70 static void kvm_spapr_tce_liobn_put(struct kref *kref)
71 {
72 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
73 struct kvmppc_spapr_tce_iommu_table, kref);
74
75 list_del_rcu(&stit->next);
76
77 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
78 }
79
kvm_spapr_tce_release_iommu_group(struct kvm * kvm,struct iommu_group * grp)80 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
81 struct iommu_group *grp)
82 {
83 int i;
84 struct kvmppc_spapr_tce_table *stt;
85 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
86 struct iommu_table_group *table_group = NULL;
87
88 rcu_read_lock();
89 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
90
91 table_group = iommu_group_get_iommudata(grp);
92 if (WARN_ON(!table_group))
93 continue;
94
95 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
96 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
97 if (table_group->tables[i] != stit->tbl)
98 continue;
99
100 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
101 }
102 }
103 cond_resched_rcu();
104 }
105 rcu_read_unlock();
106 }
107
kvm_spapr_tce_attach_iommu_group(struct kvm * kvm,int tablefd,struct iommu_group * grp)108 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
109 struct iommu_group *grp)
110 {
111 struct kvmppc_spapr_tce_table *stt = NULL;
112 bool found = false;
113 struct iommu_table *tbl = NULL;
114 struct iommu_table_group *table_group;
115 long i;
116 struct kvmppc_spapr_tce_iommu_table *stit;
117 struct fd f;
118
119 f = fdget(tablefd);
120 if (!f.file)
121 return -EBADF;
122
123 rcu_read_lock();
124 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
125 if (stt == f.file->private_data) {
126 found = true;
127 break;
128 }
129 }
130 rcu_read_unlock();
131
132 fdput(f);
133
134 if (!found)
135 return -EINVAL;
136
137 table_group = iommu_group_get_iommudata(grp);
138 if (WARN_ON(!table_group))
139 return -EFAULT;
140
141 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
142 struct iommu_table *tbltmp = table_group->tables[i];
143
144 if (!tbltmp)
145 continue;
146 /* Make sure hardware table parameters are compatible */
147 if ((tbltmp->it_page_shift <= stt->page_shift) &&
148 (tbltmp->it_offset << tbltmp->it_page_shift ==
149 stt->offset << stt->page_shift) &&
150 (tbltmp->it_size << tbltmp->it_page_shift >=
151 stt->size << stt->page_shift)) {
152 /*
153 * Reference the table to avoid races with
154 * add/remove DMA windows.
155 */
156 tbl = iommu_tce_table_get(tbltmp);
157 break;
158 }
159 }
160 if (!tbl)
161 return -EINVAL;
162
163 rcu_read_lock();
164 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
165 if (tbl != stit->tbl)
166 continue;
167
168 if (!kref_get_unless_zero(&stit->kref)) {
169 /* stit is being destroyed */
170 iommu_tce_table_put(tbl);
171 rcu_read_unlock();
172 return -ENOTTY;
173 }
174 /*
175 * The table is already known to this KVM, we just increased
176 * its KVM reference counter and can return.
177 */
178 rcu_read_unlock();
179 return 0;
180 }
181 rcu_read_unlock();
182
183 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
184 if (!stit) {
185 iommu_tce_table_put(tbl);
186 return -ENOMEM;
187 }
188
189 stit->tbl = tbl;
190 kref_init(&stit->kref);
191
192 list_add_rcu(&stit->next, &stt->iommu_tables);
193
194 return 0;
195 }
196
release_spapr_tce_table(struct rcu_head * head)197 static void release_spapr_tce_table(struct rcu_head *head)
198 {
199 struct kvmppc_spapr_tce_table *stt = container_of(head,
200 struct kvmppc_spapr_tce_table, rcu);
201 unsigned long i, npages = kvmppc_tce_pages(stt->size);
202
203 for (i = 0; i < npages; i++)
204 if (stt->pages[i])
205 __free_page(stt->pages[i]);
206
207 kfree(stt);
208 }
209
kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table * stt,unsigned long sttpage)210 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
211 unsigned long sttpage)
212 {
213 struct page *page = stt->pages[sttpage];
214
215 if (page)
216 return page;
217
218 mutex_lock(&stt->alloc_lock);
219 page = stt->pages[sttpage];
220 if (!page) {
221 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
222 WARN_ON_ONCE(!page);
223 if (page)
224 stt->pages[sttpage] = page;
225 }
226 mutex_unlock(&stt->alloc_lock);
227
228 return page;
229 }
230
kvm_spapr_tce_fault(struct vm_fault * vmf)231 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
232 {
233 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
234 struct page *page;
235
236 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
237 return VM_FAULT_SIGBUS;
238
239 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
240 if (!page)
241 return VM_FAULT_OOM;
242
243 get_page(page);
244 vmf->page = page;
245 return 0;
246 }
247
248 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
249 .fault = kvm_spapr_tce_fault,
250 };
251
kvm_spapr_tce_mmap(struct file * file,struct vm_area_struct * vma)252 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
253 {
254 vma->vm_ops = &kvm_spapr_tce_vm_ops;
255 return 0;
256 }
257
kvm_spapr_tce_release(struct inode * inode,struct file * filp)258 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
259 {
260 struct kvmppc_spapr_tce_table *stt = filp->private_data;
261 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
262 struct kvm *kvm = stt->kvm;
263
264 mutex_lock(&kvm->lock);
265 list_del_rcu(&stt->list);
266 mutex_unlock(&kvm->lock);
267
268 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
269 WARN_ON(!kref_read(&stit->kref));
270 while (1) {
271 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
272 break;
273 }
274 }
275
276 account_locked_vm(kvm->mm,
277 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
278
279 kvm_put_kvm(stt->kvm);
280
281 call_rcu(&stt->rcu, release_spapr_tce_table);
282
283 return 0;
284 }
285
286 static const struct file_operations kvm_spapr_tce_fops = {
287 .mmap = kvm_spapr_tce_mmap,
288 .release = kvm_spapr_tce_release,
289 };
290
kvm_vm_ioctl_create_spapr_tce(struct kvm * kvm,struct kvm_create_spapr_tce_64 * args)291 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
292 struct kvm_create_spapr_tce_64 *args)
293 {
294 struct kvmppc_spapr_tce_table *stt = NULL;
295 struct kvmppc_spapr_tce_table *siter;
296 struct mm_struct *mm = kvm->mm;
297 unsigned long npages, size = args->size;
298 int ret;
299
300 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
301 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
302 return -EINVAL;
303
304 npages = kvmppc_tce_pages(size);
305 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
306 if (ret)
307 return ret;
308
309 ret = -ENOMEM;
310 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL);
311 if (!stt)
312 goto fail_acct;
313
314 stt->liobn = args->liobn;
315 stt->page_shift = args->page_shift;
316 stt->offset = args->offset;
317 stt->size = size;
318 stt->kvm = kvm;
319 mutex_init(&stt->alloc_lock);
320 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
321
322 mutex_lock(&kvm->lock);
323
324 /* Check this LIOBN hasn't been previously allocated */
325 ret = 0;
326 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
327 if (siter->liobn == args->liobn) {
328 ret = -EBUSY;
329 break;
330 }
331 }
332
333 kvm_get_kvm(kvm);
334 if (!ret)
335 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
336 stt, O_RDWR | O_CLOEXEC);
337
338 if (ret >= 0)
339 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
340 else
341 kvm_put_kvm_no_destroy(kvm);
342
343 mutex_unlock(&kvm->lock);
344
345 if (ret >= 0)
346 return ret;
347
348 kfree(stt);
349 fail_acct:
350 account_locked_vm(mm, kvmppc_stt_pages(npages), false);
351 return ret;
352 }
353
kvmppc_tce_to_ua(struct kvm * kvm,unsigned long tce,unsigned long * ua)354 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
355 unsigned long *ua)
356 {
357 unsigned long gfn = tce >> PAGE_SHIFT;
358 struct kvm_memory_slot *memslot;
359
360 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
361 if (!memslot)
362 return -EINVAL;
363
364 *ua = __gfn_to_hva_memslot(memslot, gfn) |
365 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
366
367 return 0;
368 }
369
kvmppc_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)370 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
371 unsigned long tce)
372 {
373 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
374 enum dma_data_direction dir = iommu_tce_direction(tce);
375 struct kvmppc_spapr_tce_iommu_table *stit;
376 unsigned long ua = 0;
377
378 /* Allow userspace to poison TCE table */
379 if (dir == DMA_NONE)
380 return H_SUCCESS;
381
382 if (iommu_tce_check_gpa(stt->page_shift, gpa))
383 return H_TOO_HARD;
384
385 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
386 return H_TOO_HARD;
387
388 rcu_read_lock();
389 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
390 unsigned long hpa = 0;
391 struct mm_iommu_table_group_mem_t *mem;
392 long shift = stit->tbl->it_page_shift;
393
394 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
395 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
396 rcu_read_unlock();
397 return H_TOO_HARD;
398 }
399 }
400 rcu_read_unlock();
401
402 return H_SUCCESS;
403 }
404
405 /*
406 * Handles TCE requests for emulated devices.
407 * Puts guest TCE values to the table and expects user space to convert them.
408 * Cannot fail so kvmppc_tce_validate must be called before it.
409 */
kvmppc_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)410 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
411 unsigned long idx, unsigned long tce)
412 {
413 struct page *page;
414 u64 *tbl;
415 unsigned long sttpage;
416
417 idx -= stt->offset;
418 sttpage = idx / TCES_PER_PAGE;
419 page = stt->pages[sttpage];
420
421 if (!page) {
422 /* We allow any TCE, not just with read|write permissions */
423 if (!tce)
424 return;
425
426 page = kvm_spapr_get_tce_page(stt, sttpage);
427 if (!page)
428 return;
429 }
430 tbl = page_to_virt(page);
431
432 tbl[idx % TCES_PER_PAGE] = tce;
433 }
434
kvmppc_clear_tce(struct mm_struct * mm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)435 static void kvmppc_clear_tce(struct mm_struct *mm, struct kvmppc_spapr_tce_table *stt,
436 struct iommu_table *tbl, unsigned long entry)
437 {
438 unsigned long i;
439 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
440 unsigned long io_entry = entry << (stt->page_shift - tbl->it_page_shift);
441
442 for (i = 0; i < subpages; ++i) {
443 unsigned long hpa = 0;
444 enum dma_data_direction dir = DMA_NONE;
445
446 iommu_tce_xchg_no_kill(mm, tbl, io_entry + i, &hpa, &dir);
447 }
448 }
449
kvmppc_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)450 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
451 struct iommu_table *tbl, unsigned long entry)
452 {
453 struct mm_iommu_table_group_mem_t *mem = NULL;
454 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
455 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
456
457 if (!pua)
458 return H_SUCCESS;
459
460 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
461 if (!mem)
462 return H_TOO_HARD;
463
464 mm_iommu_mapped_dec(mem);
465
466 *pua = cpu_to_be64(0);
467
468 return H_SUCCESS;
469 }
470
kvmppc_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)471 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
472 struct iommu_table *tbl, unsigned long entry)
473 {
474 enum dma_data_direction dir = DMA_NONE;
475 unsigned long hpa = 0;
476 long ret;
477
478 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
479 &dir)))
480 return H_TOO_HARD;
481
482 if (dir == DMA_NONE)
483 return H_SUCCESS;
484
485 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
486 if (ret != H_SUCCESS)
487 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
488
489 return ret;
490 }
491
kvmppc_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)492 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
493 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
494 unsigned long entry)
495 {
496 unsigned long i, ret = H_SUCCESS;
497 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
498 unsigned long io_entry = entry * subpages;
499
500 for (i = 0; i < subpages; ++i) {
501 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
502 if (ret != H_SUCCESS)
503 break;
504 }
505
506 iommu_tce_kill(tbl, io_entry, subpages);
507
508 return ret;
509 }
510
kvmppc_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)511 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
512 unsigned long entry, unsigned long ua,
513 enum dma_data_direction dir)
514 {
515 long ret;
516 unsigned long hpa;
517 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
518 struct mm_iommu_table_group_mem_t *mem;
519
520 if (!pua)
521 /* it_userspace allocation might be delayed */
522 return H_TOO_HARD;
523
524 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
525 if (!mem)
526 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
527 return H_TOO_HARD;
528
529 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
530 return H_TOO_HARD;
531
532 if (mm_iommu_mapped_inc(mem))
533 return H_TOO_HARD;
534
535 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
536 if (WARN_ON_ONCE(ret)) {
537 mm_iommu_mapped_dec(mem);
538 return H_TOO_HARD;
539 }
540
541 if (dir != DMA_NONE)
542 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
543
544 *pua = cpu_to_be64(ua);
545
546 return 0;
547 }
548
kvmppc_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)549 static long kvmppc_tce_iommu_map(struct kvm *kvm,
550 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
551 unsigned long entry, unsigned long ua,
552 enum dma_data_direction dir)
553 {
554 unsigned long i, pgoff, ret = H_SUCCESS;
555 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
556 unsigned long io_entry = entry * subpages;
557
558 for (i = 0, pgoff = 0; i < subpages;
559 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
560
561 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
562 io_entry + i, ua + pgoff, dir);
563 if (ret != H_SUCCESS)
564 break;
565 }
566
567 iommu_tce_kill(tbl, io_entry, subpages);
568
569 return ret;
570 }
571
kvmppc_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)572 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
573 unsigned long ioba, unsigned long tce)
574 {
575 struct kvmppc_spapr_tce_table *stt;
576 long ret, idx;
577 struct kvmppc_spapr_tce_iommu_table *stit;
578 unsigned long entry, ua = 0;
579 enum dma_data_direction dir;
580
581 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
582 /* liobn, ioba, tce); */
583
584 stt = kvmppc_find_table(vcpu->kvm, liobn);
585 if (!stt)
586 return H_TOO_HARD;
587
588 ret = kvmppc_ioba_validate(stt, ioba, 1);
589 if (ret != H_SUCCESS)
590 return ret;
591
592 idx = srcu_read_lock(&vcpu->kvm->srcu);
593
594 ret = kvmppc_tce_validate(stt, tce);
595 if (ret != H_SUCCESS)
596 goto unlock_exit;
597
598 dir = iommu_tce_direction(tce);
599
600 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
601 ret = H_PARAMETER;
602 goto unlock_exit;
603 }
604
605 entry = ioba >> stt->page_shift;
606
607 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
608 if (dir == DMA_NONE)
609 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
610 stit->tbl, entry);
611 else
612 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
613 entry, ua, dir);
614
615
616 if (ret != H_SUCCESS) {
617 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry);
618 goto unlock_exit;
619 }
620 }
621
622 kvmppc_tce_put(stt, entry, tce);
623
624 unlock_exit:
625 srcu_read_unlock(&vcpu->kvm->srcu, idx);
626
627 return ret;
628 }
629 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
630
kvmppc_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)631 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
632 unsigned long liobn, unsigned long ioba,
633 unsigned long tce_list, unsigned long npages)
634 {
635 struct kvmppc_spapr_tce_table *stt;
636 long i, ret = H_SUCCESS, idx;
637 unsigned long entry, ua = 0;
638 u64 __user *tces;
639 u64 tce;
640 struct kvmppc_spapr_tce_iommu_table *stit;
641
642 stt = kvmppc_find_table(vcpu->kvm, liobn);
643 if (!stt)
644 return H_TOO_HARD;
645
646 entry = ioba >> stt->page_shift;
647 /*
648 * SPAPR spec says that the maximum size of the list is 512 TCEs
649 * so the whole table fits in 4K page
650 */
651 if (npages > 512)
652 return H_PARAMETER;
653
654 if (tce_list & (SZ_4K - 1))
655 return H_PARAMETER;
656
657 ret = kvmppc_ioba_validate(stt, ioba, npages);
658 if (ret != H_SUCCESS)
659 return ret;
660
661 idx = srcu_read_lock(&vcpu->kvm->srcu);
662 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
663 ret = H_TOO_HARD;
664 goto unlock_exit;
665 }
666 tces = (u64 __user *) ua;
667
668 for (i = 0; i < npages; ++i) {
669 if (get_user(tce, tces + i)) {
670 ret = H_TOO_HARD;
671 goto unlock_exit;
672 }
673 tce = be64_to_cpu(tce);
674
675 ret = kvmppc_tce_validate(stt, tce);
676 if (ret != H_SUCCESS)
677 goto unlock_exit;
678 }
679
680 for (i = 0; i < npages; ++i) {
681 /*
682 * This looks unsafe, because we validate, then regrab
683 * the TCE from userspace which could have been changed by
684 * another thread.
685 *
686 * But it actually is safe, because the relevant checks will be
687 * re-executed in the following code. If userspace tries to
688 * change this dodgily it will result in a messier failure mode
689 * but won't threaten the host.
690 */
691 if (get_user(tce, tces + i)) {
692 ret = H_TOO_HARD;
693 goto unlock_exit;
694 }
695 tce = be64_to_cpu(tce);
696
697 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
698 ret = H_PARAMETER;
699 goto unlock_exit;
700 }
701
702 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
703 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
704 stit->tbl, entry + i, ua,
705 iommu_tce_direction(tce));
706
707 if (ret != H_SUCCESS) {
708 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl,
709 entry + i);
710 goto unlock_exit;
711 }
712 }
713
714 kvmppc_tce_put(stt, entry + i, tce);
715 }
716
717 unlock_exit:
718 srcu_read_unlock(&vcpu->kvm->srcu, idx);
719
720 return ret;
721 }
722 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
723
kvmppc_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)724 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
725 unsigned long liobn, unsigned long ioba,
726 unsigned long tce_value, unsigned long npages)
727 {
728 struct kvmppc_spapr_tce_table *stt;
729 long i, ret;
730 struct kvmppc_spapr_tce_iommu_table *stit;
731
732 stt = kvmppc_find_table(vcpu->kvm, liobn);
733 if (!stt)
734 return H_TOO_HARD;
735
736 ret = kvmppc_ioba_validate(stt, ioba, npages);
737 if (ret != H_SUCCESS)
738 return ret;
739
740 /* Check permission bits only to allow userspace poison TCE for debug */
741 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
742 return H_PARAMETER;
743
744 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
745 unsigned long entry = ioba >> stt->page_shift;
746
747 for (i = 0; i < npages; ++i) {
748 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
749 stit->tbl, entry + i);
750
751 if (ret == H_SUCCESS)
752 continue;
753
754 if (ret == H_TOO_HARD)
755 return ret;
756
757 WARN_ON_ONCE(1);
758 kvmppc_clear_tce(vcpu->kvm->mm, stt, stit->tbl, entry + i);
759 }
760 }
761
762 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
763 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
764
765 return ret;
766 }
767 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
768
kvmppc_h_get_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba)769 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
770 unsigned long ioba)
771 {
772 struct kvmppc_spapr_tce_table *stt;
773 long ret;
774 unsigned long idx;
775 struct page *page;
776 u64 *tbl;
777
778 stt = kvmppc_find_table(vcpu->kvm, liobn);
779 if (!stt)
780 return H_TOO_HARD;
781
782 ret = kvmppc_ioba_validate(stt, ioba, 1);
783 if (ret != H_SUCCESS)
784 return ret;
785
786 idx = (ioba >> stt->page_shift) - stt->offset;
787 page = stt->pages[idx / TCES_PER_PAGE];
788 if (!page) {
789 vcpu->arch.regs.gpr[4] = 0;
790 return H_SUCCESS;
791 }
792 tbl = (u64 *)page_address(page);
793
794 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
795
796 return H_SUCCESS;
797 }
798 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
799