Lines Matching refs:dma_dom
939 static int dma_ops_unity_map(struct dma_ops_domain *dma_dom, in dma_ops_unity_map() argument
947 ret = iommu_map_page(&dma_dom->domain, addr, addr, e->prot, in dma_ops_unity_map()
955 if (addr < dma_dom->aperture_size) in dma_ops_unity_map()
957 dma_dom->aperture[0]->bitmap); in dma_ops_unity_map()
988 static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, in init_unity_mappings_for_device() argument
997 ret = dma_ops_unity_map(dma_dom, e); in init_unity_mappings_for_device()
1046 static int alloc_new_range(struct dma_ops_domain *dma_dom, in alloc_new_range() argument
1049 int index = dma_dom->aperture_size >> APERTURE_RANGE_SHIFT; in alloc_new_range()
1060 dma_dom->aperture[index] = kzalloc(sizeof(struct aperture_range), gfp); in alloc_new_range()
1061 if (!dma_dom->aperture[index]) in alloc_new_range()
1064 dma_dom->aperture[index]->bitmap = (void *)get_zeroed_page(gfp); in alloc_new_range()
1065 if (!dma_dom->aperture[index]->bitmap) in alloc_new_range()
1068 dma_dom->aperture[index]->offset = dma_dom->aperture_size; in alloc_new_range()
1071 unsigned long address = dma_dom->aperture_size; in alloc_new_range()
1076 pte = alloc_pte(&dma_dom->domain, address, PAGE_SIZE, in alloc_new_range()
1081 dma_dom->aperture[index]->pte_pages[i] = pte_page; in alloc_new_range()
1087 dma_dom->aperture_size += APERTURE_RANGE_SIZE; in alloc_new_range()
1092 iommu->exclusion_start >= dma_dom->aperture[index]->offset in alloc_new_range()
1093 && iommu->exclusion_start < dma_dom->aperture_size) { in alloc_new_range()
1099 dma_ops_reserve_addresses(dma_dom, startpage, pages); in alloc_new_range()
1109 for (i = dma_dom->aperture[index]->offset; in alloc_new_range()
1110 i < dma_dom->aperture_size; in alloc_new_range()
1112 u64 *pte = fetch_pte(&dma_dom->domain, i); in alloc_new_range()
1116 dma_ops_reserve_addresses(dma_dom, i << PAGE_SHIFT, 1); in alloc_new_range()
1119 update_domain(&dma_dom->domain); in alloc_new_range()
1124 update_domain(&dma_dom->domain); in alloc_new_range()
1126 free_page((unsigned long)dma_dom->aperture[index]->bitmap); in alloc_new_range()
1128 kfree(dma_dom->aperture[index]); in alloc_new_range()
1129 dma_dom->aperture[index] = NULL; in alloc_new_range()
1361 struct dma_ops_domain *dma_dom; in dma_ops_domain_alloc() local
1363 dma_dom = kzalloc(sizeof(struct dma_ops_domain), GFP_KERNEL); in dma_ops_domain_alloc()
1364 if (!dma_dom) in dma_ops_domain_alloc()
1367 spin_lock_init(&dma_dom->domain.lock); in dma_ops_domain_alloc()
1369 dma_dom->domain.id = domain_id_alloc(); in dma_ops_domain_alloc()
1370 if (dma_dom->domain.id == 0) in dma_ops_domain_alloc()
1372 INIT_LIST_HEAD(&dma_dom->domain.dev_list); in dma_ops_domain_alloc()
1373 dma_dom->domain.mode = PAGE_MODE_2_LEVEL; in dma_ops_domain_alloc()
1374 dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); in dma_ops_domain_alloc()
1375 dma_dom->domain.flags = PD_DMA_OPS_MASK; in dma_ops_domain_alloc()
1376 dma_dom->domain.priv = dma_dom; in dma_ops_domain_alloc()
1377 if (!dma_dom->domain.pt_root) in dma_ops_domain_alloc()
1380 dma_dom->need_flush = false; in dma_ops_domain_alloc()
1381 dma_dom->target_dev = 0xffff; in dma_ops_domain_alloc()
1383 add_domain_to_list(&dma_dom->domain); in dma_ops_domain_alloc()
1385 if (alloc_new_range(dma_dom, true, GFP_KERNEL)) in dma_ops_domain_alloc()
1392 dma_dom->aperture[0]->bitmap[0] = 1; in dma_ops_domain_alloc()
1393 dma_dom->next_address = 0; in dma_ops_domain_alloc()
1396 return dma_dom; in dma_ops_domain_alloc()
1399 dma_ops_domain_free(dma_dom); in dma_ops_domain_alloc()
1727 struct dma_ops_domain *dma_dom; in get_domain() local
1741 dma_dom = find_protection_domain(devid); in get_domain()
1742 if (!dma_dom) in get_domain()
1743 dma_dom = amd_iommu_rlookup_table[devid]->default_dom; in get_domain()
1744 attach_device(dev, &dma_dom->domain); in get_domain()
1746 dma_dom->domain.id, dev_name(dev)); in get_domain()
1748 return &dma_dom->domain; in get_domain()
1868 struct dma_ops_domain *dma_dom, in __map_single() argument
1893 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, in __map_single()
1901 dma_dom->next_address = dma_dom->aperture_size; in __map_single()
1903 if (alloc_new_range(dma_dom, false, GFP_ATOMIC)) in __map_single()
1915 ret = dma_ops_domain_map(dma_dom, start, paddr, dir); in __map_single()
1926 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { in __map_single()
1927 iommu_flush_tlb(&dma_dom->domain); in __map_single()
1928 dma_dom->need_flush = false; in __map_single()
1930 iommu_flush_pages(&dma_dom->domain, address, size); in __map_single()
1939 dma_ops_domain_unmap(dma_dom, start); in __map_single()
1942 dma_ops_free_addresses(dma_dom, address, pages); in __map_single()
1951 static void __unmap_single(struct dma_ops_domain *dma_dom, in __unmap_single() argument
1961 (dma_addr + size > dma_dom->aperture_size)) in __unmap_single()
1970 dma_ops_domain_unmap(dma_dom, start); in __unmap_single()
1976 dma_ops_free_addresses(dma_dom, dma_addr, pages); in __unmap_single()
1978 if (amd_iommu_unmap_flush || dma_dom->need_flush) { in __unmap_single()
1979 iommu_flush_pages(&dma_dom->domain, flush_addr, size); in __unmap_single()
1980 dma_dom->need_flush = false; in __unmap_single()
2262 struct dma_ops_domain *dma_dom; in prealloc_protection_domains() local
2277 dma_dom = dma_ops_domain_alloc(); in prealloc_protection_domains()
2278 if (!dma_dom) in prealloc_protection_domains()
2280 init_unity_mappings_for_device(dma_dom, devid); in prealloc_protection_domains()
2281 dma_dom->target_dev = devid; in prealloc_protection_domains()
2283 attach_device(&dev->dev, &dma_dom->domain); in prealloc_protection_domains()
2285 list_add_tail(&dma_dom->list, &iommu_pd_list); in prealloc_protection_domains()