Lines Matching refs:tbl

45 	struct iommu_table *tbl = data;  in iommu_debugfs_weight_get()  local
46 *val = bitmap_weight(tbl->it_map, tbl->it_size); in iommu_debugfs_weight_get()
51 static void iommu_debugfs_add(struct iommu_table *tbl) in iommu_debugfs_add() argument
56 sprintf(name, "%08lx", tbl->it_index); in iommu_debugfs_add()
59 debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight); in iommu_debugfs_add()
60 debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size); in iommu_debugfs_add()
61 debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift); in iommu_debugfs_add()
62 debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start); in iommu_debugfs_add()
63 debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end); in iommu_debugfs_add()
64 debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels); in iommu_debugfs_add()
65 debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size); in iommu_debugfs_add()
68 static void iommu_debugfs_del(struct iommu_table *tbl) in iommu_debugfs_del() argument
72 sprintf(name, "%08lx", tbl->it_index); in iommu_debugfs_del()
76 static void iommu_debugfs_add(struct iommu_table *tbl){} in iommu_debugfs_add() argument
77 static void iommu_debugfs_del(struct iommu_table *tbl){} in iommu_debugfs_del() argument
214 struct iommu_table *tbl, in iommu_range_alloc() argument
247 pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1); in iommu_range_alloc()
250 pool = &(tbl->large_pool); in iommu_range_alloc()
252 pool = &(tbl->pools[pool_nr]); in iommu_range_alloc()
272 if (limit + tbl->it_offset > mask) { in iommu_range_alloc()
273 limit = mask - tbl->it_offset + 1; in iommu_range_alloc()
280 pool = &(tbl->pools[0]); in iommu_range_alloc()
288 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc()
289 dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift), in iommu_range_alloc()
298 } else if (pass <= tbl->nr_pools) { in iommu_range_alloc()
301 pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1); in iommu_range_alloc()
302 pool = &tbl->pools[pool_nr]; in iommu_range_alloc()
308 } else if (pass == tbl->nr_pools + 1) { in iommu_range_alloc()
311 pool = &tbl->large_pool; in iommu_range_alloc()
332 pool->hint = (end + tbl->it_blocksize - 1) & in iommu_range_alloc()
333 ~(tbl->it_blocksize - 1); in iommu_range_alloc()
345 static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, in iommu_alloc() argument
355 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc()
360 entry += tbl->it_offset; /* Offset into real TCE table */ in iommu_alloc()
361 ret = entry << tbl->it_page_shift; /* Set the return dma address */ in iommu_alloc()
364 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc()
366 IOMMU_PAGE_MASK(tbl), direction, attrs); in iommu_alloc()
374 __iommu_free(tbl, ret, npages); in iommu_alloc()
379 if (tbl->it_ops->flush) in iommu_alloc()
380 tbl->it_ops->flush(tbl); in iommu_alloc()
388 static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free_check() argument
393 entry = dma_addr >> tbl->it_page_shift; in iommu_free_check()
394 free_entry = entry - tbl->it_offset; in iommu_free_check()
396 if (((free_entry + npages) > tbl->it_size) || in iommu_free_check()
397 (entry < tbl->it_offset)) { in iommu_free_check()
402 printk(KERN_INFO "\tTable = 0x%llx\n", (u64)tbl); in iommu_free_check()
403 printk(KERN_INFO "\tbus# = 0x%llx\n", (u64)tbl->it_busno); in iommu_free_check()
404 printk(KERN_INFO "\tsize = 0x%llx\n", (u64)tbl->it_size); in iommu_free_check()
405 printk(KERN_INFO "\tstartOff = 0x%llx\n", (u64)tbl->it_offset); in iommu_free_check()
406 printk(KERN_INFO "\tindex = 0x%llx\n", (u64)tbl->it_index); in iommu_free_check()
416 static struct iommu_pool *get_pool(struct iommu_table *tbl, in get_pool() argument
420 unsigned long largepool_start = tbl->large_pool.start; in get_pool()
424 p = &tbl->large_pool; in get_pool()
426 unsigned int pool_nr = entry / tbl->poolsize; in get_pool()
428 BUG_ON(pool_nr > tbl->nr_pools); in get_pool()
429 p = &tbl->pools[pool_nr]; in get_pool()
435 static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in __iommu_free() argument
442 entry = dma_addr >> tbl->it_page_shift; in __iommu_free()
443 free_entry = entry - tbl->it_offset; in __iommu_free()
445 pool = get_pool(tbl, free_entry); in __iommu_free()
447 if (!iommu_free_check(tbl, dma_addr, npages)) in __iommu_free()
450 tbl->it_ops->clear(tbl, entry, npages); in __iommu_free()
453 bitmap_clear(tbl->it_map, free_entry, npages); in __iommu_free()
457 static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, in iommu_free() argument
460 __iommu_free(tbl, dma_addr, npages); in iommu_free()
466 if (tbl->it_ops->flush) in iommu_free()
467 tbl->it_ops->flush(tbl); in iommu_free()
470 int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl, in ppc_iommu_map_sg() argument
484 if ((nelems == 0) || !tbl) in ppc_iommu_map_sg()
509 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
511 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && in ppc_iommu_map_sg()
513 align = PAGE_SHIFT - tbl->it_page_shift; in ppc_iommu_map_sg()
514 entry = iommu_range_alloc(dev, tbl, npages, &handle, in ppc_iommu_map_sg()
515 mask >> tbl->it_page_shift, align); in ppc_iommu_map_sg()
524 "vaddr %lx npages %lu\n", tbl, vaddr, in ppc_iommu_map_sg()
530 entry += tbl->it_offset; in ppc_iommu_map_sg()
531 dma_addr = entry << tbl->it_page_shift; in ppc_iommu_map_sg()
532 dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl)); in ppc_iommu_map_sg()
538 build_fail = tbl->it_ops->set(tbl, entry, npages, in ppc_iommu_map_sg()
539 vaddr & IOMMU_PAGE_MASK(tbl), in ppc_iommu_map_sg()
577 if (tbl->it_ops->flush) in ppc_iommu_map_sg()
578 tbl->it_ops->flush(tbl); in ppc_iommu_map_sg()
600 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); in ppc_iommu_map_sg()
602 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_map_sg()
603 __iommu_free(tbl, vaddr, npages); in ppc_iommu_map_sg()
613 void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, in ppc_iommu_unmap_sg() argument
621 if (!tbl) in ppc_iommu_unmap_sg()
632 IOMMU_PAGE_SIZE(tbl)); in ppc_iommu_unmap_sg()
633 __iommu_free(tbl, dma_handle, npages); in ppc_iommu_unmap_sg()
641 if (tbl->it_ops->flush) in ppc_iommu_unmap_sg()
642 tbl->it_ops->flush(tbl); in ppc_iommu_unmap_sg()
645 static void iommu_table_clear(struct iommu_table *tbl) in iommu_table_clear() argument
654 tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size); in iommu_table_clear()
659 if (tbl->it_ops->get) { in iommu_table_clear()
663 for (index = 0; index < tbl->it_size; index++) { in iommu_table_clear()
664 tceval = tbl->it_ops->get(tbl, index + tbl->it_offset); in iommu_table_clear()
669 __set_bit(index, tbl->it_map); in iommu_table_clear()
674 if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) { in iommu_table_clear()
678 for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES; in iommu_table_clear()
679 index < tbl->it_size; index++) in iommu_table_clear()
680 __clear_bit(index, tbl->it_map); in iommu_table_clear()
686 static void iommu_table_reserve_pages(struct iommu_table *tbl, in iommu_table_reserve_pages() argument
697 if (tbl->it_offset == 0) in iommu_table_reserve_pages()
698 set_bit(0, tbl->it_map); in iommu_table_reserve_pages()
700 if (res_start < tbl->it_offset) in iommu_table_reserve_pages()
701 res_start = tbl->it_offset; in iommu_table_reserve_pages()
703 if (res_end > (tbl->it_offset + tbl->it_size)) in iommu_table_reserve_pages()
704 res_end = tbl->it_offset + tbl->it_size; in iommu_table_reserve_pages()
708 tbl->it_reserved_start = tbl->it_offset; in iommu_table_reserve_pages()
709 tbl->it_reserved_end = tbl->it_offset; in iommu_table_reserve_pages()
713 tbl->it_reserved_start = res_start; in iommu_table_reserve_pages()
714 tbl->it_reserved_end = res_end; in iommu_table_reserve_pages()
716 for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i) in iommu_table_reserve_pages()
717 set_bit(i - tbl->it_offset, tbl->it_map); in iommu_table_reserve_pages()
724 struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid, in iommu_init_table() argument
732 BUG_ON(!tbl->it_ops); in iommu_init_table()
735 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); in iommu_init_table()
737 tbl->it_map = vzalloc_node(sz, nid); in iommu_init_table()
738 if (!tbl->it_map) { in iommu_init_table()
743 iommu_table_reserve_pages(tbl, res_start, res_end); in iommu_init_table()
746 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) in iommu_init_table()
747 tbl->nr_pools = IOMMU_NR_POOLS; in iommu_init_table()
749 tbl->nr_pools = 1; in iommu_init_table()
752 tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools; in iommu_init_table()
754 for (i = 0; i < tbl->nr_pools; i++) { in iommu_init_table()
755 p = &tbl->pools[i]; in iommu_init_table()
757 p->start = tbl->poolsize * i; in iommu_init_table()
759 p->end = p->start + tbl->poolsize; in iommu_init_table()
762 p = &tbl->large_pool; in iommu_init_table()
764 p->start = tbl->poolsize * i; in iommu_init_table()
766 p->end = tbl->it_size; in iommu_init_table()
768 iommu_table_clear(tbl); in iommu_init_table()
776 iommu_debugfs_add(tbl); in iommu_init_table()
778 return tbl; in iommu_init_table()
781 bool iommu_table_in_use(struct iommu_table *tbl) in iommu_table_in_use() argument
786 if (tbl->it_offset == 0) in iommu_table_in_use()
790 if (!tbl->it_reserved_start && !tbl->it_reserved_end) in iommu_table_in_use()
791 return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size; in iommu_table_in_use()
793 end = tbl->it_reserved_start - tbl->it_offset; in iommu_table_in_use()
794 if (find_next_bit(tbl->it_map, end, start) != end) in iommu_table_in_use()
797 start = tbl->it_reserved_end - tbl->it_offset; in iommu_table_in_use()
798 end = tbl->it_size; in iommu_table_in_use()
799 return find_next_bit(tbl->it_map, end, start) != end; in iommu_table_in_use()
804 struct iommu_table *tbl; in iommu_table_free() local
806 tbl = container_of(kref, struct iommu_table, it_kref); in iommu_table_free()
808 if (tbl->it_ops->free) in iommu_table_free()
809 tbl->it_ops->free(tbl); in iommu_table_free()
811 if (!tbl->it_map) { in iommu_table_free()
812 kfree(tbl); in iommu_table_free()
816 iommu_debugfs_del(tbl); in iommu_table_free()
819 if (iommu_table_in_use(tbl)) in iommu_table_free()
823 vfree(tbl->it_map); in iommu_table_free()
826 kfree(tbl); in iommu_table_free()
829 struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl) in iommu_tce_table_get() argument
831 if (kref_get_unless_zero(&tbl->it_kref)) in iommu_tce_table_get()
832 return tbl; in iommu_tce_table_get()
838 int iommu_tce_table_put(struct iommu_table *tbl) in iommu_tce_table_put() argument
840 if (WARN_ON(!tbl)) in iommu_tce_table_put()
843 return kref_put(&tbl->it_kref, iommu_table_free); in iommu_tce_table_put()
852 dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, in iommu_map_page() argument
867 if (tbl) { in iommu_map_page()
868 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); in iommu_map_page()
870 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && in iommu_map_page()
872 align = PAGE_SHIFT - tbl->it_page_shift; in iommu_map_page()
874 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, in iommu_map_page()
875 mask >> tbl->it_page_shift, align, in iommu_map_page()
881 "vaddr %p npages %d\n", tbl, vaddr, in iommu_map_page()
885 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); in iommu_map_page()
891 void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, in iommu_unmap_page() argument
899 if (tbl) { in iommu_unmap_page()
901 IOMMU_PAGE_SIZE(tbl)); in iommu_unmap_page()
902 iommu_free(tbl, dma_handle, npages); in iommu_unmap_page()
910 void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, in iommu_alloc_coherent() argument
919 int tcesize = (1 << tbl->it_page_shift); in iommu_alloc_coherent()
935 if (!tbl) in iommu_alloc_coherent()
946 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift; in iommu_alloc_coherent()
948 io_order = get_iommu_order(size, tbl); in iommu_alloc_coherent()
949 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, in iommu_alloc_coherent()
950 mask >> tbl->it_page_shift, io_order, 0); in iommu_alloc_coherent()
960 void iommu_free_coherent(struct iommu_table *tbl, size_t size, in iommu_free_coherent() argument
963 if (tbl) { in iommu_free_coherent()
967 nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift; in iommu_free_coherent()
968 iommu_free(tbl, dma_handle, nio_pages); in iommu_free_coherent()
1035 void iommu_flush_tce(struct iommu_table *tbl) in iommu_flush_tce() argument
1038 if (tbl->it_ops->flush) in iommu_flush_tce()
1039 tbl->it_ops->flush(tbl); in iommu_flush_tce()
1078 struct iommu_table *tbl, in iommu_tce_xchg_no_kill() argument
1085 ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction); in iommu_tce_xchg_no_kill()
1088 !mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift, in iommu_tce_xchg_no_kill()
1096 void iommu_tce_kill(struct iommu_table *tbl, in iommu_tce_kill() argument
1099 if (tbl->it_ops->tce_kill) in iommu_tce_kill()
1100 tbl->it_ops->tce_kill(tbl, entry, pages); in iommu_tce_kill()
1105 static int iommu_take_ownership(struct iommu_table *tbl) in iommu_take_ownership() argument
1107 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_take_ownership()
1117 if (!tbl->it_ops->xchg_no_kill) in iommu_take_ownership()
1120 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1121 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1122 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_take_ownership()
1124 if (iommu_table_in_use(tbl)) { in iommu_take_ownership()
1128 memset(tbl->it_map, 0xff, sz); in iommu_take_ownership()
1131 for (i = 0; i < tbl->nr_pools; i++) in iommu_take_ownership()
1132 spin_unlock(&tbl->pools[i].lock); in iommu_take_ownership()
1133 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_take_ownership()
1138 static void iommu_release_ownership(struct iommu_table *tbl) in iommu_release_ownership() argument
1140 unsigned long flags, i, sz = (tbl->it_size + 7) >> 3; in iommu_release_ownership()
1142 spin_lock_irqsave(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1143 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1144 spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock); in iommu_release_ownership()
1146 memset(tbl->it_map, 0, sz); in iommu_release_ownership()
1148 iommu_table_reserve_pages(tbl, tbl->it_reserved_start, in iommu_release_ownership()
1149 tbl->it_reserved_end); in iommu_release_ownership()
1151 for (i = 0; i < tbl->nr_pools; i++) in iommu_release_ownership()
1152 spin_unlock(&tbl->pools[i].lock); in iommu_release_ownership()
1153 spin_unlock_irqrestore(&tbl->large_pool.lock, flags); in iommu_release_ownership()
1209 struct iommu_table *tbl = table_group->tables[0]; in spapr_tce_create_table() local
1214 if (tbl->it_page_shift != page_shift || in spapr_tce_create_table()
1215 tbl->it_size != (window_size >> page_shift) || in spapr_tce_create_table()
1216 tbl->it_indirect_levels != levels - 1) in spapr_tce_create_table()
1219 *ptbl = iommu_tce_table_get(tbl); in spapr_tce_create_table()
1224 int num, struct iommu_table *tbl) in spapr_tce_set_window() argument
1226 return tbl == table_group->tables[num] ? 0 : -EPERM; in spapr_tce_set_window()
1239 struct iommu_table *tbl = table_group->tables[i]; in spapr_tce_take_ownership() local
1241 if (!tbl || !tbl->it_map) in spapr_tce_take_ownership()
1244 rc = iommu_take_ownership(tbl); in spapr_tce_take_ownership()
1260 struct iommu_table *tbl = table_group->tables[i]; in spapr_tce_release_ownership() local
1262 if (!tbl) in spapr_tce_release_ownership()
1265 iommu_table_clear(tbl); in spapr_tce_release_ownership()
1266 if (tbl->it_map) in spapr_tce_release_ownership()
1267 iommu_release_ownership(tbl); in spapr_tce_release_ownership()