Lines Matching refs:container
75 static long tce_iommu_mm_set(struct tce_container *container) in tce_iommu_mm_set() argument
77 if (container->mm) { in tce_iommu_mm_set()
78 if (container->mm == current->mm) in tce_iommu_mm_set()
83 container->mm = current->mm; in tce_iommu_mm_set()
84 mmgrab(container->mm); in tce_iommu_mm_set()
89 static long tce_iommu_prereg_free(struct tce_container *container, in tce_iommu_prereg_free() argument
94 ret = mm_iommu_put(container->mm, tcemem->mem); in tce_iommu_prereg_free()
104 static long tce_iommu_unregister_pages(struct tce_container *container, in tce_iommu_unregister_pages() argument
115 mem = mm_iommu_get(container->mm, vaddr, size >> PAGE_SHIFT); in tce_iommu_unregister_pages()
119 list_for_each_entry(tcemem, &container->prereg_list, next) { in tce_iommu_unregister_pages()
129 ret = tce_iommu_prereg_free(container, tcemem); in tce_iommu_unregister_pages()
131 mm_iommu_put(container->mm, mem); in tce_iommu_unregister_pages()
136 static long tce_iommu_register_pages(struct tce_container *container, in tce_iommu_register_pages() argument
148 mem = mm_iommu_get(container->mm, vaddr, entries); in tce_iommu_register_pages()
150 list_for_each_entry(tcemem, &container->prereg_list, next) { in tce_iommu_register_pages()
157 ret = mm_iommu_new(container->mm, vaddr, entries, &mem); in tce_iommu_register_pages()
169 list_add(&tcemem->next, &container->prereg_list); in tce_iommu_register_pages()
171 container->enabled = true; in tce_iommu_register_pages()
176 mm_iommu_put(container->mm, mem); in tce_iommu_register_pages()
198 static inline bool tce_groups_attached(struct tce_container *container) in tce_groups_attached() argument
200 return !list_empty(&container->group_list); in tce_groups_attached()
203 static long tce_iommu_find_table(struct tce_container *container, in tce_iommu_find_table() argument
209 struct iommu_table *tbl = container->tables[i]; in tce_iommu_find_table()
226 static int tce_iommu_find_free_table(struct tce_container *container) in tce_iommu_find_free_table() argument
231 if (!container->tables[i]) in tce_iommu_find_free_table()
238 static int tce_iommu_enable(struct tce_container *container) in tce_iommu_enable() argument
245 if (container->enabled) in tce_iommu_enable()
277 if (!tce_groups_attached(container)) in tce_iommu_enable()
280 tcegrp = list_first_entry(&container->group_list, in tce_iommu_enable()
289 ret = tce_iommu_mm_set(container); in tce_iommu_enable()
294 ret = account_locked_vm(container->mm, locked, true); in tce_iommu_enable()
298 container->locked_pages = locked; in tce_iommu_enable()
300 container->enabled = true; in tce_iommu_enable()
305 static void tce_iommu_disable(struct tce_container *container) in tce_iommu_disable() argument
307 if (!container->enabled) in tce_iommu_disable()
310 container->enabled = false; in tce_iommu_disable()
312 BUG_ON(!container->mm); in tce_iommu_disable()
313 account_locked_vm(container->mm, container->locked_pages, false); in tce_iommu_disable()
318 struct tce_container *container; in tce_iommu_open() local
325 container = kzalloc(sizeof(*container), GFP_KERNEL); in tce_iommu_open()
326 if (!container) in tce_iommu_open()
329 mutex_init(&container->lock); in tce_iommu_open()
330 INIT_LIST_HEAD_RCU(&container->group_list); in tce_iommu_open()
331 INIT_LIST_HEAD_RCU(&container->prereg_list); in tce_iommu_open()
333 container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU; in tce_iommu_open()
335 return container; in tce_iommu_open()
338 static int tce_iommu_clear(struct tce_container *container,
341 static void tce_iommu_free_table(struct tce_container *container,
346 struct tce_container *container = iommu_data; in tce_iommu_release() local
351 while (tce_groups_attached(container)) { in tce_iommu_release()
352 tcegrp = list_first_entry(&container->group_list, in tce_iommu_release()
362 struct iommu_table *tbl = container->tables[i]; in tce_iommu_release()
367 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_release()
368 tce_iommu_free_table(container, tbl); in tce_iommu_release()
371 list_for_each_entry_safe(tcemem, tmtmp, &container->prereg_list, next) in tce_iommu_release()
372 WARN_ON(tce_iommu_prereg_free(container, tcemem)); in tce_iommu_release()
374 tce_iommu_disable(container); in tce_iommu_release()
375 if (container->mm) in tce_iommu_release()
376 mmdrop(container->mm); in tce_iommu_release()
377 mutex_destroy(&container->lock); in tce_iommu_release()
379 kfree(container); in tce_iommu_release()
390 static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, in tce_iommu_prereg_ua_to_hpa() argument
397 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift); in tce_iommu_prereg_ua_to_hpa()
410 static void tce_iommu_unuse_page_v2(struct tce_container *container, in tce_iommu_unuse_page_v2() argument
421 ret = tce_iommu_prereg_ua_to_hpa(container, be64_to_cpu(*pua), in tce_iommu_unuse_page_v2()
432 static int tce_iommu_clear(struct tce_container *container, in tce_iommu_clear() argument
464 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry, &oldhpa, in tce_iommu_clear()
472 if (container->v2) { in tce_iommu_clear()
473 tce_iommu_unuse_page_v2(container, tbl, entry); in tce_iommu_clear()
500 static long tce_iommu_build(struct tce_container *container, in tce_iommu_build() argument
516 if (!tce_page_is_contained(container->mm, hpa, in tce_iommu_build()
524 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, in tce_iommu_build()
541 tce_iommu_clear(container, tbl, entry, i); in tce_iommu_build()
548 static long tce_iommu_build_v2(struct tce_container *container, in tce_iommu_build_v2() argument
561 ret = tce_iommu_prereg_ua_to_hpa(container, in tce_iommu_build_v2()
566 if (!tce_page_is_contained(container->mm, hpa, in tce_iommu_build_v2()
580 ret = iommu_tce_xchg_no_kill(container->mm, tbl, entry + i, in tce_iommu_build_v2()
584 tce_iommu_unuse_page_v2(container, tbl, entry + i); in tce_iommu_build_v2()
592 tce_iommu_unuse_page_v2(container, tbl, entry + i); in tce_iommu_build_v2()
600 tce_iommu_clear(container, tbl, entry, i); in tce_iommu_build_v2()
607 static long tce_iommu_create_table(struct tce_container *container, in tce_iommu_create_table() argument
622 ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true); in tce_iommu_create_table()
635 static void tce_iommu_free_table(struct tce_container *container, in tce_iommu_free_table() argument
641 account_locked_vm(container->mm, pages, false); in tce_iommu_free_table()
644 static long tce_iommu_create_window(struct tce_container *container, in tce_iommu_create_window() argument
653 num = tce_iommu_find_free_table(container); in tce_iommu_create_window()
658 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_window()
673 ret = tce_iommu_create_table(container, table_group, num, in tce_iommu_create_window()
684 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window()
692 container->tables[num] = tbl; in tce_iommu_create_window()
700 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_create_window()
704 tce_iommu_free_table(container, tbl); in tce_iommu_create_window()
709 static long tce_iommu_remove_window(struct tce_container *container, in tce_iommu_remove_window() argument
717 num = tce_iommu_find_table(container, start_addr, &tbl); in tce_iommu_remove_window()
724 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_remove_window()
741 tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size); in tce_iommu_remove_window()
742 tce_iommu_free_table(container, tbl); in tce_iommu_remove_window()
743 container->tables[num] = NULL; in tce_iommu_remove_window()
748 static long tce_iommu_create_default_window(struct tce_container *container) in tce_iommu_create_default_window() argument
755 if (!container->def_window_pending) in tce_iommu_create_default_window()
758 if (!tce_groups_attached(container)) in tce_iommu_create_default_window()
761 tcegrp = list_first_entry(&container->group_list, in tce_iommu_create_default_window()
767 ret = tce_iommu_create_window(container, IOMMU_PAGE_SHIFT_4K, in tce_iommu_create_default_window()
772 container->def_window_pending = false; in tce_iommu_create_default_window()
831 struct tce_container *container = iommu_data; in tce_iommu_ioctl() local
852 BUG_ON(!container); in tce_iommu_ioctl()
853 if (container->mm && container->mm != current->mm) in tce_iommu_ioctl()
862 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
865 tcegrp = list_first_entry(&container->group_list, in tce_iommu_ioctl()
887 container->v2) { in tce_iommu_ioctl()
911 if (!container->enabled) in tce_iommu_ioctl()
926 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
930 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
955 if (container->v2) in tce_iommu_ioctl()
956 ret = tce_iommu_build_v2(container, tbl, in tce_iommu_ioctl()
962 ret = tce_iommu_build(container, tbl, in tce_iommu_ioctl()
977 if (!container->enabled) in tce_iommu_ioctl()
993 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
997 num = tce_iommu_find_table(container, param.iova, &tbl); in tce_iommu_ioctl()
1009 ret = tce_iommu_clear(container, tbl, in tce_iommu_ioctl()
1019 if (!container->v2) in tce_iommu_ioctl()
1025 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1039 mutex_lock(&container->lock); in tce_iommu_ioctl()
1040 ret = tce_iommu_register_pages(container, param.vaddr, in tce_iommu_ioctl()
1042 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1049 if (!container->v2) in tce_iommu_ioctl()
1052 if (!container->mm) in tce_iommu_ioctl()
1068 mutex_lock(&container->lock); in tce_iommu_ioctl()
1069 ret = tce_iommu_unregister_pages(container, param.vaddr, in tce_iommu_ioctl()
1071 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1076 if (container->v2) in tce_iommu_ioctl()
1079 mutex_lock(&container->lock); in tce_iommu_ioctl()
1080 ret = tce_iommu_enable(container); in tce_iommu_ioctl()
1081 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1086 if (container->v2) in tce_iommu_ioctl()
1089 mutex_lock(&container->lock); in tce_iommu_ioctl()
1090 tce_iommu_disable(container); in tce_iommu_ioctl()
1091 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1098 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_ioctl()
1109 if (!container->v2) in tce_iommu_ioctl()
1112 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1116 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
1131 mutex_lock(&container->lock); in tce_iommu_ioctl()
1133 ret = tce_iommu_create_default_window(container); in tce_iommu_ioctl()
1135 ret = tce_iommu_create_window(container, in tce_iommu_ioctl()
1140 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1150 if (!container->v2) in tce_iommu_ioctl()
1153 ret = tce_iommu_mm_set(container); in tce_iommu_ioctl()
1157 if (!tce_groups_attached(container)) in tce_iommu_ioctl()
1172 if (container->def_window_pending && !remove.start_addr) { in tce_iommu_ioctl()
1173 container->def_window_pending = false; in tce_iommu_ioctl()
1177 mutex_lock(&container->lock); in tce_iommu_ioctl()
1179 ret = tce_iommu_remove_window(container, remove.start_addr); in tce_iommu_ioctl()
1181 mutex_unlock(&container->lock); in tce_iommu_ioctl()
1190 static void tce_iommu_release_ownership(struct tce_container *container, in tce_iommu_release_ownership() argument
1201 if (container->tables[i]) in tce_iommu_release_ownership()
1205 static long tce_iommu_take_ownership(struct tce_container *container, in tce_iommu_take_ownership() argument
1212 struct iommu_table *tbl = container->tables[i]; in tce_iommu_take_ownership()
1235 struct tce_container *container = iommu_data; in tce_iommu_attach_group() local
1242 mutex_lock(&container->lock); in tce_iommu_attach_group()
1253 if (container->v2 && table_group->max_dynamic_windows_supported == 0) { in tce_iommu_attach_group()
1259 if (!container->v2 && tce_groups_attached(container)) { in tce_iommu_attach_group()
1268 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_attach_group()
1294 ret = tce_iommu_take_ownership(container, table_group); in tce_iommu_attach_group()
1295 if (!tce_groups_attached(container) && !container->tables[0]) in tce_iommu_attach_group()
1296 container->def_window_pending = true; in tce_iommu_attach_group()
1300 list_add(&tcegrp->next, &container->group_list); in tce_iommu_attach_group()
1307 mutex_unlock(&container->lock); in tce_iommu_attach_group()
1315 struct tce_container *container = iommu_data; in tce_iommu_detach_group() local
1320 mutex_lock(&container->lock); in tce_iommu_detach_group()
1322 list_for_each_entry(tcegrp, &container->group_list, next) { in tce_iommu_detach_group()
1341 tce_iommu_release_ownership(container, table_group); in tce_iommu_detach_group()
1344 mutex_unlock(&container->lock); in tce_iommu_detach_group()