Home
last modified time | relevance | path

Searched refs:ttm (Results 1 – 25 of 54) sorted by relevance

123

/linux-5.19.10/drivers/gpu/drm/ttm/
Dttm_tt.c66 if (bo->ttm) in ttm_tt_create()
84 bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); in ttm_tt_create()
85 if (unlikely(bo->ttm == NULL)) in ttm_tt_create()
88 WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE && in ttm_tt_create()
89 !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)); in ttm_tt_create()
97 static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_tt_alloc_page_directory() argument
99 ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); in ttm_tt_alloc_page_directory()
100 if (!ttm->pages) in ttm_tt_alloc_page_directory()
106 static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) in ttm_dma_tt_alloc_page_directory() argument
108 ttm->pages = kvcalloc(ttm->num_pages, sizeof(*ttm->pages) + in ttm_dma_tt_alloc_page_directory()
[all …]
Dttm_agp_backend.c45 struct ttm_tt ttm; member
50 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) in ttm_agp_bind() argument
52 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_bind()
55 int ret, cached = ttm->caching == ttm_cached; in ttm_agp_bind()
61 mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY); in ttm_agp_bind()
66 for (i = 0; i < ttm->num_pages; i++) { in ttm_agp_bind()
67 struct page *page = ttm->pages[i]; in ttm_agp_bind()
87 void ttm_agp_unbind(struct ttm_tt *ttm) in ttm_agp_unbind() argument
89 struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); in ttm_agp_unbind()
102 bool ttm_agp_is_bound(struct ttm_tt *ttm) in ttm_agp_is_bound() argument
[all …]
Dttm_bo_util.c138 struct ttm_tt *ttm = bo->ttm; in ttm_bo_move_memcpy() local
150 if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) || in ttm_bo_move_memcpy()
152 ret = ttm_tt_populate(bdev, ttm, ctx); in ttm_bo_move_memcpy()
159 dst_iter = ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm); in ttm_bo_move_memcpy()
165 src_iter = ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm); in ttm_bo_move_memcpy()
171 clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); in ttm_bo_move_memcpy()
172 if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) in ttm_bo_move_memcpy()
271 caching = man->use_tt ? bo->ttm->caching : res->bus.caching; in ttm_io_prot()
313 struct ttm_tt *ttm = bo->ttm; in ttm_bo_kmap_ttm() local
317 BUG_ON(!ttm); in ttm_bo_kmap_ttm()
[all …]
DMakefile5 ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
8 ttm-$(CONFIG_AGP) += ttm_agp_backend.o
10 obj-$(CONFIG_DRM_TTM) += ttm.o
Dttm_bo_vm.c157 if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) { in ttm_bo_vm_reserve()
158 if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) { in ttm_bo_vm_reserve()
196 struct ttm_tt *ttm = NULL; in ttm_bo_vm_fault_reserved() local
231 ttm = bo->ttm; in ttm_bo_vm_fault_reserved()
232 if (ttm_tt_populate(bdev, bo->ttm, &ctx)) in ttm_bo_vm_fault_reserved()
247 page = ttm->pages[page_offset]; in ttm_bo_vm_fault_reserved()
/linux-5.19.10/drivers/gpu/drm/radeon/
Dradeon_ttm.c56 static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
58 static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
205 r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); in radeon_bo_move()
220 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in radeon_bo_move()
232 radeon_ttm_tt_unbind(bo->bdev, bo->ttm); in radeon_bo_move()
325 struct ttm_tt ttm; member
335 static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) in radeon_ttm_tt_pin_userptr() argument
338 struct radeon_ttm_tt *gtt = (void *)ttm; in radeon_ttm_tt_pin_userptr()
352 unsigned long end = gtt->userptr + (u64)ttm->num_pages * PAGE_SIZE; in radeon_ttm_tt_pin_userptr()
360 unsigned num_pages = ttm->num_pages - pinned; in radeon_ttm_tt_pin_userptr()
[all …]
Dradeon_prime.c39 return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, in radeon_gem_prime_get_sg_table()
40 bo->tbo.ttm->num_pages); in radeon_gem_prime_get_sg_table()
107 if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) in radeon_gem_prime_export()
Dradeon_mn.c57 if (!bo->tbo.ttm || !radeon_ttm_tt_is_bound(bo->tbo.bdev, bo->tbo.ttm)) in radeon_mn_invalidate()
/linux-5.19.10/drivers/gpu/drm/i915/gem/
Di915_gem_ttm.c50 struct ttm_tt ttm; member
186 struct ttm_tt *ttm, in i915_ttm_tt_shmem_populate() argument
191 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_shmem_populate()
193 const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; in i915_ttm_tt_shmem_populate()
231 ttm->pages[i++] = page; in i915_ttm_tt_shmem_populate()
233 if (ttm->page_flags & TTM_TT_FLAG_SWAPPED) in i915_ttm_tt_shmem_populate()
234 ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED; in i915_ttm_tt_shmem_populate()
244 static void i915_ttm_tt_shmem_unpopulate(struct ttm_tt *ttm) in i915_ttm_tt_shmem_unpopulate() argument
246 struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); in i915_ttm_tt_shmem_unpopulate()
247 bool backup = ttm->page_flags & TTM_TT_FLAG_SWAPPED; in i915_ttm_tt_shmem_unpopulate()
[all …]
Di915_gem_ttm_pm.c24 if (obj->ttm.backup) { in i915_ttm_backup_free()
25 i915_gem_object_put(obj->ttm.backup); in i915_ttm_backup_free()
26 obj->ttm.backup = NULL; in i915_ttm_backup_free()
55 if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup) in i915_ttm_backup()
77 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); in i915_ttm_backup()
85 obj->ttm.backup = backup; in i915_ttm_backup()
153 struct drm_i915_gem_object *backup = obj->ttm.backup; in i915_ttm_restore()
169 err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx); in i915_ttm_restore()
176 obj->ttm.backup = NULL; in i915_ttm_restore()
Di915_gem_ttm_move.c47 struct ttm_tt *ttm) in i915_ttm_cache_level() argument
51 ttm->caching == ttm_cached) ? I915_CACHE_LLC : in i915_ttm_cache_level()
80 if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) { in i915_ttm_adjust_domains_after_move()
127 bo->ttm); in i915_ttm_adjust_gem_after_move()
176 struct ttm_tt *src_ttm = bo->ttm; in i915_ttm_accel_move()
297 ttm_kmap_iter_tt_init(&arg->_src_iter.tt, bo->ttm) : in i915_ttm_memcpy_init()
299 &obj->ttm.cached_io_rsgt->table, in i915_ttm_memcpy_init()
488 struct ttm_tt *ttm = bo->ttm; in i915_ttm_move() local
509 if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) { in i915_ttm_move()
510 ret = ttm_tt_populate(bo->bdev, ttm, ctx); in i915_ttm_move()
[all …]
/linux-5.19.10/include/drm/ttm/
Dttm_tt.h150 int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
163 void ttm_tt_fini(struct ttm_tt *ttm);
173 void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
182 int ttm_tt_swapin(struct ttm_tt *ttm);
183 int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
195 int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm,
206 void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
216 static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm) in ttm_tt_mark_for_clear() argument
218 ttm->page_flags |= TTM_TT_FLAG_ZERO_ALLOC; in ttm_tt_mark_for_clear()
244 int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem);
[all …]
Dttm_device.h86 struct ttm_tt *ttm,
97 struct ttm_tt *ttm);
108 void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
/linux-5.19.10/drivers/gpu/drm/nouveau/
Dnouveau_sgdma.c14 struct ttm_tt ttm; member
19 nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_destroy() argument
21 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_destroy()
23 if (ttm) { in nouveau_sgdma_destroy()
24 ttm_tt_fini(&nvbe->ttm); in nouveau_sgdma_destroy()
30 nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg) in nouveau_sgdma_bind() argument
32 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_bind()
40 ret = nouveau_mem_host(reg, &nvbe->ttm); in nouveau_sgdma_bind()
57 nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) in nouveau_sgdma_unbind() argument
59 struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; in nouveau_sgdma_unbind()
[all …]
Dnouveau_ttm.c146 drm->ttm.type_host[!!kind] = typei; in nouveau_ttm_init_host()
152 drm->ttm.type_ncoh[!!kind] = typei; in nouveau_ttm_init_host()
167 ttm_resource_manager_init(man, &drm->ttm.bdev, in nouveau_ttm_init_vram()
169 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, man); in nouveau_ttm_init_vram()
173 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_VRAM, false, in nouveau_ttm_init_vram()
181 struct ttm_resource_manager *man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram()
185 ttm_resource_manager_evict_all(&drm->ttm.bdev, man); in nouveau_ttm_fini_vram()
187 ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); in nouveau_ttm_fini_vram()
190 ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_VRAM); in nouveau_ttm_fini_vram()
205 return ttm_range_man_init(&drm->ttm.bdev, TTM_PL_TT, true, in nouveau_ttm_init_gtt()
[all …]
Dnouveau_bo.c46 static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
48 static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
220 nvbo->bo.bdev = &drm->ttm.bdev; in nouveau_bo_alloc()
552 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_device()
588 struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; in nouveau_bo_sync_for_cpu()
625 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
626 list_move_tail(&nvbo->io_reserve_lru, &drm->ttm.io_reserve_lru); in nouveau_bo_add_io_reserve_lru()
627 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_add_io_reserve_lru()
635 mutex_lock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
637 mutex_unlock(&drm->ttm.io_reserve_mutex); in nouveau_bo_del_io_reserve_lru()
[all …]
Dnouveau_ttm.h8 return container_of(bd, struct nouveau_drm, ttm.bdev); in nouveau_bdev()
24 int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
25 void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
26 void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
Dnouveau_mem.c99 type = drm->ttm.type_ncoh[!!mem->kind]; in nouveau_mem_host()
101 type = drm->ttm.type_host[0]; in nouveau_mem_host()
138 drm->ttm.type_vram, page, size, in nouveau_mem_vram()
146 drm->ttm.type_vram, page, size, in nouveau_mem_vram()
/linux-5.19.10/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ttm.c70 struct ttm_tt *ttm,
73 struct ttm_tt *ttm);
248 flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem); in amdgpu_ttm_map_buffer()
257 dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; in amdgpu_ttm_map_buffer()
462 r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); in amdgpu_bo_move()
474 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in amdgpu_bo_move()
491 amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); in amdgpu_bo_move()
628 struct ttm_tt ttm; member
650 struct ttm_tt *ttm = bo->tbo.ttm; in amdgpu_ttm_tt_get_user_pages() local
651 struct amdgpu_ttm_tt *gtt = (void *)ttm; in amdgpu_ttm_tt_get_user_pages()
[all …]
Damdgpu_ttm.h161 bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
168 static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) in amdgpu_ttm_tt_get_user_pages_done() argument
174 void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
179 bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
180 struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
181 bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
183 bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
185 bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
186 bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
187 uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
[all …]
Damdgpu_amdkfd_gpuvm.c313 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), in amdgpu_amdkfd_bo_validate()
459 struct ttm_tt *src_ttm = mem->bo->tbo.ttm; in kfd_mem_dmamap_userptr()
460 struct ttm_tt *ttm = bo->tbo.ttm; in kfd_mem_dmamap_userptr() local
463 ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); in kfd_mem_dmamap_userptr()
464 if (unlikely(!ttm->sg)) in kfd_mem_dmamap_userptr()
467 if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) in kfd_mem_dmamap_userptr()
471 ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, in kfd_mem_dmamap_userptr()
472 ttm->num_pages, 0, in kfd_mem_dmamap_userptr()
473 (u64)ttm->num_pages << PAGE_SHIFT, in kfd_mem_dmamap_userptr()
478 ret = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); in kfd_mem_dmamap_userptr()
[all …]
/linux-5.19.10/drivers/gpu/drm/vmwgfx/
Dvmwgfx_ttm_buffer.c375 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); in vmw_bo_sg_table()
382 struct ttm_tt *ttm, struct ttm_resource *bo_mem) in vmw_ttm_bind() argument
385 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_bind()
404 ttm->num_pages, vmw_be->gmr_id); in vmw_ttm_bind()
409 vmw_mob_create(ttm->num_pages); in vmw_ttm_bind()
415 &vmw_be->vsgt, ttm->num_pages, in vmw_ttm_bind()
429 struct ttm_tt *ttm) in vmw_ttm_unbind() argument
432 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_unbind()
456 static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in vmw_ttm_destroy() argument
459 container_of(ttm, struct vmw_ttm_tt, dma_ttm); in vmw_ttm_destroy()
[all …]
Dvmwgfx_blit.c468 if (!ttm_tt_is_populated(dst->ttm)) { in vmw_bo_cpu_blit()
469 ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx); in vmw_bo_cpu_blit()
474 if (!ttm_tt_is_populated(src->ttm)) { in vmw_bo_cpu_blit()
475 ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx); in vmw_bo_cpu_blit()
484 d.dst_pages = dst->ttm->pages; in vmw_bo_cpu_blit()
485 d.src_pages = src->ttm->pages; in vmw_bo_cpu_blit()
/linux-5.19.10/drivers/gpu/drm/qxl/
Dqxl_ttm.c102 static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) in qxl_ttm_backend_destroy() argument
104 ttm_tt_fini(ttm); in qxl_ttm_backend_destroy()
105 kfree(ttm); in qxl_ttm_backend_destroy()
111 struct ttm_tt *ttm; in qxl_ttm_tt_create() local
113 ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); in qxl_ttm_tt_create()
114 if (ttm == NULL) in qxl_ttm_tt_create()
116 if (ttm_tt_init(ttm, bo, page_flags, ttm_cached, 0)) { in qxl_ttm_tt_create()
117 kfree(ttm); in qxl_ttm_tt_create()
120 return ttm; in qxl_ttm_tt_create()
152 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { in qxl_bo_move()
/linux-5.19.10/arch/powerpc/perf/
Dppc970-pmu.c264 unsigned int ttm, grp; in p970_compute_mmcr() local
321 ttm = unitmap[i]; in p970_compute_mmcr()
322 ++ttmuse[(ttm >> 2) & 1]; in p970_compute_mmcr()
323 mmcr1 |= (unsigned long)(ttm & ~4) << MMCR1_TTM1SEL_SH; in p970_compute_mmcr()
335 ttm = (unitmap[unit] >> 2) & 1; in p970_compute_mmcr()
337 ttm = 2; in p970_compute_mmcr()
339 ttm = 3; in p970_compute_mmcr()
343 mmcr1 |= (unsigned long)ttm in p970_compute_mmcr()

123