Lines Matching refs:pd
192 struct usnic_uiom_pd *pd) in usnic_uiom_unmap_sorted_intervals() argument
203 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals()
210 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, in __usnic_uiom_reg_release() argument
224 spin_lock(&pd->lock); in __usnic_uiom_reg_release()
225 usnic_uiom_remove_interval(&pd->root, vpn_start, in __usnic_uiom_reg_release()
227 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); in __usnic_uiom_reg_release()
237 spin_unlock(&pd->lock); in __usnic_uiom_reg_release()
251 struct usnic_uiom_pd *pd = uiomr->pd; in usnic_uiom_map_sorted_intervals() local
280 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
297 err = iommu_map(pd->domain, va_start, pa_start, in usnic_uiom_map_sorted_intervals()
326 usnic_uiom_unmap_sorted_intervals(intervals, pd); in usnic_uiom_map_sorted_intervals()
330 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, in usnic_uiom_reg_get() argument
363 uiomr->pd = pd; in usnic_uiom_reg_get()
373 spin_lock(&pd->lock); in usnic_uiom_reg_get()
377 &pd->root, in usnic_uiom_reg_get()
393 err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last, in usnic_uiom_reg_get()
402 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
407 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); in usnic_uiom_reg_get()
412 spin_unlock(&pd->lock); in usnic_uiom_reg_get()
432 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); in usnic_uiom_reg_release()
440 struct usnic_uiom_pd *pd; in usnic_uiom_alloc_pd() local
443 pd = kzalloc(sizeof(*pd), GFP_KERNEL); in usnic_uiom_alloc_pd()
444 if (!pd) in usnic_uiom_alloc_pd()
447 pd->domain = domain = iommu_domain_alloc(dev->bus); in usnic_uiom_alloc_pd()
450 kfree(pd); in usnic_uiom_alloc_pd()
454 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); in usnic_uiom_alloc_pd()
456 spin_lock_init(&pd->lock); in usnic_uiom_alloc_pd()
457 INIT_LIST_HEAD(&pd->devs); in usnic_uiom_alloc_pd()
459 return pd; in usnic_uiom_alloc_pd()
462 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) in usnic_uiom_dealloc_pd() argument
464 iommu_domain_free(pd->domain); in usnic_uiom_dealloc_pd()
465 kfree(pd); in usnic_uiom_dealloc_pd()
468 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_attach_dev_to_pd() argument
478 err = iommu_attach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
489 spin_lock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
490 list_add_tail(&uiom_dev->link, &pd->devs); in usnic_uiom_attach_dev_to_pd()
491 pd->dev_cnt++; in usnic_uiom_attach_dev_to_pd()
492 spin_unlock(&pd->lock); in usnic_uiom_attach_dev_to_pd()
497 iommu_detach_device(pd->domain, dev); in usnic_uiom_attach_dev_to_pd()
503 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) in usnic_uiom_detach_dev_from_pd() argument
508 spin_lock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
509 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_detach_dev_from_pd()
519 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
524 pd->dev_cnt--; in usnic_uiom_detach_dev_from_pd()
525 spin_unlock(&pd->lock); in usnic_uiom_detach_dev_from_pd()
527 return iommu_detach_device(pd->domain, dev); in usnic_uiom_detach_dev_from_pd()
530 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) in usnic_uiom_get_dev_list() argument
536 spin_lock(&pd->lock); in usnic_uiom_get_dev_list()
537 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); in usnic_uiom_get_dev_list()
543 list_for_each_entry(uiom_dev, &pd->devs, link) { in usnic_uiom_get_dev_list()
547 spin_unlock(&pd->lock); in usnic_uiom_get_dev_list()