Lines Matching refs:ttm

70 				   struct ttm_tt *ttm,
73 struct ttm_tt *ttm);
248 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); in amdgpu_ttm_map_buffer()
257 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; in amdgpu_ttm_map_buffer()
462 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); in amdgpu_bo_move()
474 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in amdgpu_bo_move()
491 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); in amdgpu_bo_move()
628 struct ttm_tt ttm; member
650 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local
651 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_user_pages()
683 readonly = amdgpu_ttm_tt_is_readonly(ttm); in amdgpu_ttm_tt_get_user_pages()
685 ttm->num_pages, &gtt->range, readonly, in amdgpu_ttm_tt_get_user_pages()
703 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_user_pages_done() argument
705 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_user_pages_done()
712 gtt->userptr, ttm->num_pages); in amdgpu_ttm_tt_get_user_pages_done()
737 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages) in amdgpu_ttm_tt_set_user_pages() argument
741 for (i = 0; i < ttm->num_pages; ++i) in amdgpu_ttm_tt_set_user_pages()
742 ttm->pages[i] = pages ? pages[i] : NULL; in amdgpu_ttm_tt_set_user_pages()
751 struct ttm_tt *ttm) in amdgpu_ttm_tt_pin_userptr() argument
754 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_pin_userptr()
761 r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, in amdgpu_ttm_tt_pin_userptr()
762 (u64)ttm->num_pages << PAGE_SHIFT, in amdgpu_ttm_tt_pin_userptr()
768 r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); in amdgpu_ttm_tt_pin_userptr()
773 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in amdgpu_ttm_tt_pin_userptr()
774 ttm->num_pages); in amdgpu_ttm_tt_pin_userptr()
779 kfree(ttm->sg); in amdgpu_ttm_tt_pin_userptr()
780 ttm->sg = NULL; in amdgpu_ttm_tt_pin_userptr()
788 struct ttm_tt *ttm) in amdgpu_ttm_tt_unpin_userptr() argument
791 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_unpin_userptr()
797 if (!ttm->sg || !ttm->sg->sgl) in amdgpu_ttm_tt_unpin_userptr()
801 dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); in amdgpu_ttm_tt_unpin_userptr()
802 sg_free_table(ttm->sg); in amdgpu_ttm_tt_unpin_userptr()
808 for (i = 0; i < ttm->num_pages; i++) { in amdgpu_ttm_tt_unpin_userptr()
809 if (ttm->pages[i] != in amdgpu_ttm_tt_unpin_userptr()
814 WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); in amdgpu_ttm_tt_unpin_userptr()
824 struct ttm_tt *ttm = tbo->ttm; in amdgpu_ttm_gart_bind() local
825 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_gart_bind()
834 gtt->ttm.dma_address, flags); in amdgpu_ttm_gart_bind()
844 ttm->num_pages - page_idx, in amdgpu_ttm_gart_bind()
845 &(gtt->ttm.dma_address[page_idx]), flags); in amdgpu_ttm_gart_bind()
847 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, in amdgpu_ttm_gart_bind()
848 gtt->ttm.dma_address, flags); in amdgpu_ttm_gart_bind()
859 struct ttm_tt *ttm, in amdgpu_ttm_backend_bind() argument
863 struct amdgpu_ttm_tt *gtt = (void*)ttm; in amdgpu_ttm_backend_bind()
874 r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); in amdgpu_ttm_backend_bind()
879 } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { in amdgpu_ttm_backend_bind()
880 if (!ttm->sg) { in amdgpu_ttm_backend_bind()
889 ttm->sg = sgt; in amdgpu_ttm_backend_bind()
892 drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, in amdgpu_ttm_backend_bind()
893 ttm->num_pages); in amdgpu_ttm_backend_bind()
896 if (!ttm->num_pages) { in amdgpu_ttm_backend_bind()
898 ttm->num_pages, bo_mem, ttm); in amdgpu_ttm_backend_bind()
908 flags = amdgpu_ttm_tt_pte_flags(adev, ttm, bo_mem); in amdgpu_ttm_backend_bind()
912 amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, in amdgpu_ttm_backend_bind()
913 gtt->ttm.dma_address, flags); in amdgpu_ttm_backend_bind()
930 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; in amdgpu_ttm_alloc_gart()
961 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); in amdgpu_ttm_alloc_gart()
984 if (!tbo->ttm) in amdgpu_ttm_recover_gart()
987 flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); in amdgpu_ttm_recover_gart()
998 struct ttm_tt *ttm) in amdgpu_ttm_backend_unbind() argument
1001 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_backend_unbind()
1005 amdgpu_ttm_tt_unpin_userptr(bdev, ttm); in amdgpu_ttm_backend_unbind()
1006 } else if (ttm->sg && gtt->gobj->import_attach) { in amdgpu_ttm_backend_unbind()
1010 dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); in amdgpu_ttm_backend_unbind()
1011 ttm->sg = NULL; in amdgpu_ttm_backend_unbind()
1021 amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); in amdgpu_ttm_backend_unbind()
1026 struct ttm_tt *ttm) in amdgpu_ttm_backend_destroy() argument
1028 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_backend_destroy()
1033 ttm_tt_fini(&gtt->ttm); in amdgpu_ttm_backend_destroy()
1064 if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) { in amdgpu_ttm_tt_create()
1068 return &gtt->ttm; in amdgpu_ttm_tt_create()
1078 struct ttm_tt *ttm, in amdgpu_ttm_tt_populate() argument
1082 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_populate()
1088 ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); in amdgpu_ttm_tt_populate()
1089 if (!ttm->sg) in amdgpu_ttm_tt_populate()
1094 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) in amdgpu_ttm_tt_populate()
1097 ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); in amdgpu_ttm_tt_populate()
1101 for (i = 0; i < ttm->num_pages; ++i) in amdgpu_ttm_tt_populate()
1102 ttm->pages[i]->mapping = bdev->dev_mapping; in amdgpu_ttm_tt_populate()
1114 struct ttm_tt *ttm) in amdgpu_ttm_tt_unpopulate() argument
1116 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_unpopulate()
1120 amdgpu_ttm_backend_unbind(bdev, ttm); in amdgpu_ttm_tt_unpopulate()
1123 amdgpu_ttm_tt_set_user_pages(ttm, NULL); in amdgpu_ttm_tt_unpopulate()
1124 kfree(ttm->sg); in amdgpu_ttm_tt_unpopulate()
1125 ttm->sg = NULL; in amdgpu_ttm_tt_unpopulate()
1129 if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) in amdgpu_ttm_tt_unpopulate()
1132 for (i = 0; i < ttm->num_pages; ++i) in amdgpu_ttm_tt_unpopulate()
1133 ttm->pages[i]->mapping = NULL; in amdgpu_ttm_tt_unpopulate()
1136 return ttm_pool_free(&adev->mman.bdev.pool, ttm); in amdgpu_ttm_tt_unpopulate()
1151 if (!tbo->ttm) in amdgpu_ttm_tt_get_userptr()
1154 gtt = (void *)tbo->ttm; in amdgpu_ttm_tt_get_userptr()
1175 if (!bo->ttm) { in amdgpu_ttm_tt_set_userptr()
1177 bo->ttm = amdgpu_ttm_tt_create(bo, 0); in amdgpu_ttm_tt_set_userptr()
1178 if (bo->ttm == NULL) in amdgpu_ttm_tt_set_userptr()
1183 bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; in amdgpu_ttm_tt_set_userptr()
1185 gtt = (void *)bo->ttm; in amdgpu_ttm_tt_set_userptr()
1200 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_usermm() argument
1202 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_usermm()
1218 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, in amdgpu_ttm_tt_affect_userptr() argument
1221 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_affect_userptr()
1230 size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; in amdgpu_ttm_tt_affect_userptr()
1242 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) in amdgpu_ttm_tt_is_userptr() argument
1244 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_is_userptr()
1255 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) in amdgpu_ttm_tt_is_readonly() argument
1257 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_is_readonly()
1273 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) in amdgpu_ttm_tt_pde_flags() argument
1284 if (ttm->caching == ttm_cached) in amdgpu_ttm_tt_pde_flags()
1304 uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, in amdgpu_ttm_tt_pte_flags() argument
1307 uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem); in amdgpu_ttm_tt_pte_flags()
1312 if (!amdgpu_ttm_tt_is_readonly(ttm)) in amdgpu_ttm_tt_pte_flags()