1 /**************************************************************************
2  * Copyright (c) 2007, Intel Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  **************************************************************************/
18 /*
19  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
20  */
21 #include "ttm/ttm_placement.h"
22 #include "ttm/ttm_execbuf_util.h"
23 #include "psb_ttm_fence_api.h"
24 #include <drm/drmP.h>
25 #include "psb_drv.h"
26 
27 #define DRM_MEM_TTM       26
28 
29 struct drm_psb_ttm_backend {
30 	struct ttm_backend base;
31 	struct page **pages;
32 	unsigned int desired_tile_stride;
33 	unsigned int hw_tile_stride;
34 	int mem_type;
35 	unsigned long offset;
36 	unsigned long num_pages;
37 };
38 
39 /*
40  * MSVDX/TOPAZ GPU virtual space looks like this
41  * (We currently use only one MMU context).
42  * PSB_MEM_MMU_START: from 0x00000000~0xe000000, for generic buffers
43  * TTM_PL_CI: from 0xe0000000+half GTT space, for camear/video buffer sharing
44  * TTM_PL_RAR: from TTM_PL_CI+CI size, for RAR/video buffer sharing
45  * TTM_PL_TT: from TTM_PL_RAR+RAR size, for buffers need to mapping into GTT
46  */
psb_init_mem_type(struct ttm_bo_device * bdev,uint32_t type,struct ttm_mem_type_manager * man)47 static int psb_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
48 			     struct ttm_mem_type_manager *man)
49 {
50 
51 	struct drm_psb_private *dev_priv =
52 	    container_of(bdev, struct drm_psb_private, bdev);
53 	struct psb_gtt *pg = dev_priv->pg;
54 
55 	switch (type) {
56 	case TTM_PL_SYSTEM:
57 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
58 		man->available_caching = TTM_PL_FLAG_CACHED |
59 		    TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
60 		man->default_caching = TTM_PL_FLAG_CACHED;
61 		break;
62 	case DRM_PSB_MEM_MMU:
63 		man->func = &ttm_bo_manager_func;
64 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
65 		    TTM_MEMTYPE_FLAG_CMA;
66 		man->gpu_offset = PSB_MEM_MMU_START;
67 		man->available_caching = TTM_PL_FLAG_CACHED |
68 		    TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
69 		man->default_caching = TTM_PL_FLAG_WC;
70 		break;
71 	case TTM_PL_CI:
72 		man->func = &ttm_bo_manager_func;
73 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
74 			TTM_MEMTYPE_FLAG_FIXED;
75 		man->gpu_offset = pg->mmu_gatt_start + (pg->ci_start);
76 		man->available_caching = TTM_PL_FLAG_UNCACHED;
77 		man->default_caching = TTM_PL_FLAG_UNCACHED;
78 		break;
79 	case TTM_PL_RAR:	/* Unmappable RAR memory */
80 		man->func = &ttm_bo_manager_func;
81 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
82 			TTM_MEMTYPE_FLAG_FIXED;
83 		man->available_caching = TTM_PL_FLAG_UNCACHED;
84 		man->default_caching = TTM_PL_FLAG_UNCACHED;
85 		man->gpu_offset = pg->mmu_gatt_start + (pg->rar_start);
86 		break;
87 	case TTM_PL_TT:	/* Mappable GATT memory */
88 		man->func = &ttm_bo_manager_func;
89 #ifdef PSB_WORKING_HOST_MMU_ACCESS
90 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
91 #else
92 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
93 		    TTM_MEMTYPE_FLAG_CMA;
94 #endif
95 		man->available_caching = TTM_PL_FLAG_CACHED |
96 		    TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
97 		man->default_caching = TTM_PL_FLAG_WC;
98 		man->gpu_offset = pg->mmu_gatt_start +
99 				(pg->rar_start + dev_priv->rar_region_size);
100 		break;
101 	default:
102 		DRM_ERROR("Unsupported memory type %u\n", (unsigned) type);
103 		return -EINVAL;
104 	}
105 	return 0;
106 }
107 
108 
psb_evict_mask(struct ttm_buffer_object * bo,struct ttm_placement * placement)109 static void psb_evict_mask(struct ttm_buffer_object *bo,
110 					struct ttm_placement *placement)
111 {
112 	static uint32_t cur_placement;
113 
114 	cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEM;
115 	cur_placement |= TTM_PL_FLAG_SYSTEM;
116 
117 	placement->fpfn = 0;
118 	placement->lpfn = 0;
119 	placement->num_placement = 1;
120 	placement->placement = &cur_placement;
121 	placement->num_busy_placement = 0;
122 	placement->busy_placement = NULL;
123 
124 	/* all buffers evicted to system memory */
125 	/* return cur_placement | TTM_PL_FLAG_SYSTEM; */
126 }
127 
psb_invalidate_caches(struct ttm_bo_device * bdev,uint32_t placement)128 static int psb_invalidate_caches(struct ttm_bo_device *bdev,
129 				 uint32_t placement)
130 {
131 	return 0;
132 }
133 
psb_move_blit(struct ttm_buffer_object * bo,bool evict,bool no_wait,struct ttm_mem_reg * new_mem)134 static int psb_move_blit(struct ttm_buffer_object *bo,
135 			 bool evict, bool no_wait,
136 			 struct ttm_mem_reg *new_mem)
137 {
138 	BUG();
139 	return 0;
140 }
141 
142 /*
143  * Flip destination ttm into GATT,
144  * then blit and subsequently move out again.
145  */
146 
psb_move_flip(struct ttm_buffer_object * bo,bool evict,bool interruptible,bool no_wait,struct ttm_mem_reg * new_mem)147 static int psb_move_flip(struct ttm_buffer_object *bo,
148 			 bool evict, bool interruptible, bool no_wait,
149 			 struct ttm_mem_reg *new_mem)
150 {
151 	/*struct ttm_bo_device *bdev = bo->bdev;*/
152 	struct ttm_mem_reg tmp_mem;
153 	int ret;
154 	struct ttm_placement placement;
155 	uint32_t flags = TTM_PL_FLAG_TT;
156 
157 	tmp_mem = *new_mem;
158 	tmp_mem.mm_node = NULL;
159 
160 	placement.fpfn = 0;
161 	placement.lpfn = 0;
162 	placement.num_placement = 1;
163 	placement.placement = &flags;
164 	placement.num_busy_placement = 0; /* FIXME */
165 	placement.busy_placement = NULL;
166 
167 	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible,
168 							false, no_wait);
169 	if (ret)
170 		return ret;
171 	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
172 	if (ret)
173 		goto out_cleanup;
174 	ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
175 	if (ret)
176 		goto out_cleanup;
177 
178 	ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem);
179 out_cleanup:
180 	if (tmp_mem.mm_node) {
181 		drm_mm_put_block(tmp_mem.mm_node);
182 		tmp_mem.mm_node = NULL;
183 	}
184 	return ret;
185 }
186 
psb_move(struct ttm_buffer_object * bo,bool evict,bool interruptible,bool no_wait_reserve,bool no_wait,struct ttm_mem_reg * new_mem)187 static int psb_move(struct ttm_buffer_object *bo,
188 		    bool evict, bool interruptible, bool no_wait_reserve,
189 		    bool no_wait, struct ttm_mem_reg *new_mem)
190 {
191 	struct ttm_mem_reg *old_mem = &bo->mem;
192 
193 	if ((old_mem->mem_type == TTM_PL_RAR) ||
194 	    (new_mem->mem_type == TTM_PL_RAR)) {
195 		if (old_mem->mm_node) {
196 			spin_lock(&bo->glob->lru_lock);
197 			drm_mm_put_block(old_mem->mm_node);
198 			spin_unlock(&bo->glob->lru_lock);
199 		}
200 		old_mem->mm_node = NULL;
201 		*old_mem = *new_mem;
202 	} else if (old_mem->mem_type == TTM_PL_SYSTEM) {
203 		return ttm_bo_move_memcpy(bo, evict, false, no_wait, new_mem);
204 	} else if (new_mem->mem_type == TTM_PL_SYSTEM) {
205 		int ret = psb_move_flip(bo, evict, interruptible,
206 					no_wait, new_mem);
207 		if (unlikely(ret != 0)) {
208 			if (ret == -ERESTART)
209 				return ret;
210 			else
211 				return ttm_bo_move_memcpy(bo, evict, false,
212 						no_wait, new_mem);
213 		}
214 	} else {
215 		if (psb_move_blit(bo, evict, no_wait, new_mem))
216 			return ttm_bo_move_memcpy(bo, evict, false, no_wait,
217 						  new_mem);
218 	}
219 	return 0;
220 }
221 
drm_psb_tbe_populate(struct ttm_backend * backend,unsigned long num_pages,struct page ** pages,struct page * dummy_read_page,dma_addr_t * dma_addrs)222 static int drm_psb_tbe_populate(struct ttm_backend *backend,
223 				unsigned long num_pages,
224 				struct page **pages,
225 				struct page *dummy_read_page,
226 				dma_addr_t *dma_addrs)
227 {
228 	struct drm_psb_ttm_backend *psb_be =
229 	    container_of(backend, struct drm_psb_ttm_backend, base);
230 
231 	psb_be->pages = pages;
232 	return 0;
233 }
234 
drm_psb_tbe_unbind(struct ttm_backend * backend)235 static int drm_psb_tbe_unbind(struct ttm_backend *backend)
236 {
237 	struct ttm_bo_device *bdev = backend->bdev;
238 	struct drm_psb_private *dev_priv =
239 	    container_of(bdev, struct drm_psb_private, bdev);
240 	struct drm_psb_ttm_backend *psb_be =
241 	    container_of(backend, struct drm_psb_ttm_backend, base);
242 	struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
243 	/* struct ttm_mem_type_manager *man = &bdev->man[psb_be->mem_type]; */
244 
245 	if (psb_be->mem_type == TTM_PL_TT) {
246 		uint32_t gatt_p_offset =
247 			(psb_be->offset - dev_priv->pg->mmu_gatt_start)
248 								>> PAGE_SHIFT;
249 
250 		(void) psb_gtt_remove_pages(dev_priv->pg, gatt_p_offset,
251 					    psb_be->num_pages,
252 					    psb_be->desired_tile_stride,
253 					    psb_be->hw_tile_stride, 0);
254 	}
255 
256 	psb_mmu_remove_pages(pd, psb_be->offset,
257 			     psb_be->num_pages,
258 			     psb_be->desired_tile_stride,
259 			     psb_be->hw_tile_stride);
260 
261 	return 0;
262 }
263 
drm_psb_tbe_bind(struct ttm_backend * backend,struct ttm_mem_reg * bo_mem)264 static int drm_psb_tbe_bind(struct ttm_backend *backend,
265 			    struct ttm_mem_reg *bo_mem)
266 {
267 	struct ttm_bo_device *bdev = backend->bdev;
268 	struct drm_psb_private *dev_priv =
269 	    container_of(bdev, struct drm_psb_private, bdev);
270 	struct drm_psb_ttm_backend *psb_be =
271 	    container_of(backend, struct drm_psb_ttm_backend, base);
272 	struct psb_mmu_pd *pd = psb_mmu_get_default_pd(dev_priv->mmu);
273 	struct ttm_mem_type_manager *man = &bdev->man[bo_mem->mem_type];
274 	struct drm_mm_node *mm_node = bo_mem->mm_node;
275 	int type;
276 	int ret = 0;
277 
278 	psb_be->mem_type = bo_mem->mem_type;
279 	psb_be->num_pages = bo_mem->num_pages;
280 	psb_be->desired_tile_stride = 0;
281 	psb_be->hw_tile_stride = 0;
282 	psb_be->offset = (mm_node->start << PAGE_SHIFT) +
283 	    man->gpu_offset;
284 
285 	type =
286 	    (bo_mem->
287 	     placement & TTM_PL_FLAG_CACHED) ? PSB_MMU_CACHED_MEMORY : 0;
288 
289 	if (psb_be->mem_type == TTM_PL_TT) {
290 		uint32_t gatt_p_offset =
291 				(psb_be->offset - dev_priv->pg->mmu_gatt_start)
292 								>> PAGE_SHIFT;
293 
294 		ret = psb_gtt_insert_pages(dev_priv->pg, psb_be->pages,
295 					   gatt_p_offset,
296 					   psb_be->num_pages,
297 					   psb_be->desired_tile_stride,
298 					   psb_be->hw_tile_stride, type);
299 	}
300 
301 	ret = psb_mmu_insert_pages(pd, psb_be->pages,
302 				   psb_be->offset, psb_be->num_pages,
303 				   psb_be->desired_tile_stride,
304 				   psb_be->hw_tile_stride, type);
305 	if (ret)
306 		goto out_err;
307 
308 	return 0;
309 out_err:
310 	drm_psb_tbe_unbind(backend);
311 	return ret;
312 
313 }
314 
drm_psb_tbe_clear(struct ttm_backend * backend)315 static void drm_psb_tbe_clear(struct ttm_backend *backend)
316 {
317 	struct drm_psb_ttm_backend *psb_be =
318 	    container_of(backend, struct drm_psb_ttm_backend, base);
319 
320 	psb_be->pages = NULL;
321 	return;
322 }
323 
drm_psb_tbe_destroy(struct ttm_backend * backend)324 static void drm_psb_tbe_destroy(struct ttm_backend *backend)
325 {
326 	struct drm_psb_ttm_backend *psb_be =
327 	    container_of(backend, struct drm_psb_ttm_backend, base);
328 
329 	if (backend)
330 		kfree(psb_be);
331 }
332 
333 static struct ttm_backend_func psb_ttm_backend = {
334 	.populate = drm_psb_tbe_populate,
335 	.clear = drm_psb_tbe_clear,
336 	.bind = drm_psb_tbe_bind,
337 	.unbind = drm_psb_tbe_unbind,
338 	.destroy = drm_psb_tbe_destroy,
339 };
340 
drm_psb_tbe_init(struct ttm_bo_device * bdev)341 static struct ttm_backend *drm_psb_tbe_init(struct ttm_bo_device *bdev)
342 {
343 	struct drm_psb_ttm_backend *psb_be;
344 
345 	psb_be = kzalloc(sizeof(*psb_be), GFP_KERNEL);
346 	if (!psb_be)
347 		return NULL;
348 	psb_be->pages = NULL;
349 	psb_be->base.func = &psb_ttm_backend;
350 	psb_be->base.bdev = bdev;
351 	return &psb_be->base;
352 }
353 
psb_ttm_io_mem_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)354 static int psb_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
355 						struct ttm_mem_reg *mem)
356 {
357 	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
358 	struct drm_psb_private *dev_priv =
359 	    container_of(bdev, struct drm_psb_private, bdev);
360 	struct psb_gtt *pg = dev_priv->pg;
361 	struct drm_mm_node *mm_node = mem->mm_node;
362 
363 	mem->bus.addr = NULL;
364 	mem->bus.offset = 0;
365 	mem->bus.size = mem->num_pages << PAGE_SHIFT;
366 	mem->bus.base = 0;
367 	mem->bus.is_iomem = false;
368 	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
369 		return -EINVAL;
370 	switch (mem->mem_type) {
371 	case TTM_PL_SYSTEM:
372 		/* system memory */
373 		return 0;
374 	case TTM_PL_TT:
375 		mem->bus.offset = mm_node->start << PAGE_SHIFT;
376 		mem->bus.base = pg->gatt_start;
377 		mem->bus.is_iomem = false;
378 		/* Don't know whether it is IO_MEM, this flag
379 						used in vm_fault handle */
380 		break;
381 	case DRM_PSB_MEM_MMU:
382 		mem->bus.offset = mm_node->start << PAGE_SHIFT;
383 		mem->bus.base = 0x00000000;
384 		break;
385 	case TTM_PL_CI:
386 		mem->bus.offset = mm_node->start << PAGE_SHIFT;
387 		mem->bus.base = dev_priv->ci_region_start;;
388 		mem->bus.is_iomem = true;
389 		break;
390 	case TTM_PL_RAR:
391 		mem->bus.offset = mm_node->start << PAGE_SHIFT;
392 		mem->bus.base = dev_priv->rar_region_start;;
393 		mem->bus.is_iomem = true;
394 		break;
395 	default:
396 		return -EINVAL;
397 	}
398 	return 0;
399 }
400 
psb_ttm_io_mem_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)401 static void psb_ttm_io_mem_free(struct ttm_bo_device *bdev,
402 						struct ttm_mem_reg *mem)
403 {
404 }
405 
406 /*
407  * Use this memory type priority if no eviction is needed.
408  */
409 /*
410 static uint32_t psb_mem_prios[] = {
411 	TTM_PL_CI,
412 	TTM_PL_RAR,
413 	TTM_PL_TT,
414 	DRM_PSB_MEM_MMU,
415 	TTM_PL_SYSTEM
416 };
417 */
418 /*
419  * Use this memory type priority if need to evict.
420  */
421 /*
422 static uint32_t psb_busy_prios[] = {
423 	TTM_PL_TT,
424 	TTM_PL_CI,
425 	TTM_PL_RAR,
426 	DRM_PSB_MEM_MMU,
427 	TTM_PL_SYSTEM
428 };
429 */
430 struct ttm_bo_driver psb_ttm_bo_driver = {
431 /*
432 	.mem_type_prio = psb_mem_prios,
433 	.mem_busy_prio = psb_busy_prios,
434 	.num_mem_type_prio = ARRAY_SIZE(psb_mem_prios),
435 	.num_mem_busy_prio = ARRAY_SIZE(psb_busy_prios),
436 */
437 	.create_ttm_backend_entry = &drm_psb_tbe_init,
438 	.invalidate_caches = &psb_invalidate_caches,
439 	.init_mem_type = &psb_init_mem_type,
440 	.evict_flags = &psb_evict_mask,
441 	.move = &psb_move,
442 	.verify_access = &psb_verify_access,
443 	.sync_obj_signaled = &ttm_fence_sync_obj_signaled,
444 	.sync_obj_wait = &ttm_fence_sync_obj_wait,
445 	.sync_obj_flush = &ttm_fence_sync_obj_flush,
446 	.sync_obj_unref = &ttm_fence_sync_obj_unref,
447 	.sync_obj_ref = &ttm_fence_sync_obj_ref,
448 	.io_mem_reserve = &psb_ttm_io_mem_reserve,
449 	.io_mem_free = &psb_ttm_io_mem_free
450 };
451