1 /**************************************************************************
2 *
3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #include "ttm/ttm_bo_driver.h"
32 #include "ttm/ttm_placement.h"
33 #include <linux/io.h>
34 #include <linux/highmem.h>
35 #include <linux/wait.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 #include <linux/module.h>
39
ttm_bo_free_old_node(struct ttm_buffer_object * bo)40 void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
41 {
42 ttm_bo_mem_put(bo, &bo->mem);
43 }
44
ttm_bo_move_ttm(struct ttm_buffer_object * bo,bool evict,bool no_wait_reserve,bool no_wait_gpu,struct ttm_mem_reg * new_mem)45 int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
46 bool evict, bool no_wait_reserve,
47 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
48 {
49 struct ttm_tt *ttm = bo->ttm;
50 struct ttm_mem_reg *old_mem = &bo->mem;
51 int ret;
52
53 if (old_mem->mem_type != TTM_PL_SYSTEM) {
54 ttm_tt_unbind(ttm);
55 ttm_bo_free_old_node(bo);
56 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
57 TTM_PL_MASK_MEM);
58 old_mem->mem_type = TTM_PL_SYSTEM;
59 }
60
61 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
62 if (unlikely(ret != 0))
63 return ret;
64
65 if (new_mem->mem_type != TTM_PL_SYSTEM) {
66 ret = ttm_tt_bind(ttm, new_mem);
67 if (unlikely(ret != 0))
68 return ret;
69 }
70
71 *old_mem = *new_mem;
72 new_mem->mm_node = NULL;
73
74 return 0;
75 }
76 EXPORT_SYMBOL(ttm_bo_move_ttm);
77
ttm_mem_io_lock(struct ttm_mem_type_manager * man,bool interruptible)78 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible)
79 {
80 if (likely(man->io_reserve_fastpath))
81 return 0;
82
83 if (interruptible)
84 return mutex_lock_interruptible(&man->io_reserve_mutex);
85
86 mutex_lock(&man->io_reserve_mutex);
87 return 0;
88 }
89
ttm_mem_io_unlock(struct ttm_mem_type_manager * man)90 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man)
91 {
92 if (likely(man->io_reserve_fastpath))
93 return;
94
95 mutex_unlock(&man->io_reserve_mutex);
96 }
97
ttm_mem_io_evict(struct ttm_mem_type_manager * man)98 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man)
99 {
100 struct ttm_buffer_object *bo;
101
102 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru))
103 return -EAGAIN;
104
105 bo = list_first_entry(&man->io_reserve_lru,
106 struct ttm_buffer_object,
107 io_reserve_lru);
108 list_del_init(&bo->io_reserve_lru);
109 ttm_bo_unmap_virtual_locked(bo);
110
111 return 0;
112 }
113
ttm_mem_io_reserve(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)114 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
115 struct ttm_mem_reg *mem)
116 {
117 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
118 int ret = 0;
119
120 if (!bdev->driver->io_mem_reserve)
121 return 0;
122 if (likely(man->io_reserve_fastpath))
123 return bdev->driver->io_mem_reserve(bdev, mem);
124
125 if (bdev->driver->io_mem_reserve &&
126 mem->bus.io_reserved_count++ == 0) {
127 retry:
128 ret = bdev->driver->io_mem_reserve(bdev, mem);
129 if (ret == -EAGAIN) {
130 ret = ttm_mem_io_evict(man);
131 if (ret == 0)
132 goto retry;
133 }
134 }
135 return ret;
136 }
137
ttm_mem_io_free(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)138 static void ttm_mem_io_free(struct ttm_bo_device *bdev,
139 struct ttm_mem_reg *mem)
140 {
141 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
142
143 if (likely(man->io_reserve_fastpath))
144 return;
145
146 if (bdev->driver->io_mem_reserve &&
147 --mem->bus.io_reserved_count == 0 &&
148 bdev->driver->io_mem_free)
149 bdev->driver->io_mem_free(bdev, mem);
150
151 }
152
ttm_mem_io_reserve_vm(struct ttm_buffer_object * bo)153 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo)
154 {
155 struct ttm_mem_reg *mem = &bo->mem;
156 int ret;
157
158 if (!mem->bus.io_reserved_vm) {
159 struct ttm_mem_type_manager *man =
160 &bo->bdev->man[mem->mem_type];
161
162 ret = ttm_mem_io_reserve(bo->bdev, mem);
163 if (unlikely(ret != 0))
164 return ret;
165 mem->bus.io_reserved_vm = true;
166 if (man->use_io_reserve_lru)
167 list_add_tail(&bo->io_reserve_lru,
168 &man->io_reserve_lru);
169 }
170 return 0;
171 }
172
ttm_mem_io_free_vm(struct ttm_buffer_object * bo)173 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
174 {
175 struct ttm_mem_reg *mem = &bo->mem;
176
177 if (mem->bus.io_reserved_vm) {
178 mem->bus.io_reserved_vm = false;
179 list_del_init(&bo->io_reserve_lru);
180 ttm_mem_io_free(bo->bdev, mem);
181 }
182 }
183
ttm_mem_reg_ioremap(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem,void ** virtual)184 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
185 void **virtual)
186 {
187 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
188 int ret;
189 void *addr;
190
191 *virtual = NULL;
192 (void) ttm_mem_io_lock(man, false);
193 ret = ttm_mem_io_reserve(bdev, mem);
194 ttm_mem_io_unlock(man);
195 if (ret || !mem->bus.is_iomem)
196 return ret;
197
198 if (mem->bus.addr) {
199 addr = mem->bus.addr;
200 } else {
201 if (mem->placement & TTM_PL_FLAG_WC)
202 addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
203 else
204 addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
205 if (!addr) {
206 (void) ttm_mem_io_lock(man, false);
207 ttm_mem_io_free(bdev, mem);
208 ttm_mem_io_unlock(man);
209 return -ENOMEM;
210 }
211 }
212 *virtual = addr;
213 return 0;
214 }
215
ttm_mem_reg_iounmap(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem,void * virtual)216 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
217 void *virtual)
218 {
219 struct ttm_mem_type_manager *man;
220
221 man = &bdev->man[mem->mem_type];
222
223 if (virtual && mem->bus.addr == NULL)
224 iounmap(virtual);
225 (void) ttm_mem_io_lock(man, false);
226 ttm_mem_io_free(bdev, mem);
227 ttm_mem_io_unlock(man);
228 }
229
ttm_copy_io_page(void * dst,void * src,unsigned long page)230 static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
231 {
232 uint32_t *dstP =
233 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
234 uint32_t *srcP =
235 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
236
237 int i;
238 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
239 iowrite32(ioread32(srcP++), dstP++);
240 return 0;
241 }
242
ttm_copy_io_ttm_page(struct ttm_tt * ttm,void * src,unsigned long page,pgprot_t prot)243 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
244 unsigned long page,
245 pgprot_t prot)
246 {
247 struct page *d = ttm->pages[page];
248 void *dst;
249
250 if (!d)
251 return -ENOMEM;
252
253 src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
254
255 #ifdef CONFIG_X86
256 dst = kmap_atomic_prot(d, prot);
257 #else
258 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
259 dst = vmap(&d, 1, 0, prot);
260 else
261 dst = kmap(d);
262 #endif
263 if (!dst)
264 return -ENOMEM;
265
266 memcpy_fromio(dst, src, PAGE_SIZE);
267
268 #ifdef CONFIG_X86
269 kunmap_atomic(dst);
270 #else
271 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
272 vunmap(dst);
273 else
274 kunmap(d);
275 #endif
276
277 return 0;
278 }
279
ttm_copy_ttm_io_page(struct ttm_tt * ttm,void * dst,unsigned long page,pgprot_t prot)280 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
281 unsigned long page,
282 pgprot_t prot)
283 {
284 struct page *s = ttm->pages[page];
285 void *src;
286
287 if (!s)
288 return -ENOMEM;
289
290 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
291 #ifdef CONFIG_X86
292 src = kmap_atomic_prot(s, prot);
293 #else
294 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
295 src = vmap(&s, 1, 0, prot);
296 else
297 src = kmap(s);
298 #endif
299 if (!src)
300 return -ENOMEM;
301
302 memcpy_toio(dst, src, PAGE_SIZE);
303
304 #ifdef CONFIG_X86
305 kunmap_atomic(src);
306 #else
307 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
308 vunmap(src);
309 else
310 kunmap(s);
311 #endif
312
313 return 0;
314 }
315
ttm_bo_move_memcpy(struct ttm_buffer_object * bo,bool evict,bool no_wait_reserve,bool no_wait_gpu,struct ttm_mem_reg * new_mem)316 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
317 bool evict, bool no_wait_reserve, bool no_wait_gpu,
318 struct ttm_mem_reg *new_mem)
319 {
320 struct ttm_bo_device *bdev = bo->bdev;
321 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
322 struct ttm_tt *ttm = bo->ttm;
323 struct ttm_mem_reg *old_mem = &bo->mem;
324 struct ttm_mem_reg old_copy = *old_mem;
325 void *old_iomap;
326 void *new_iomap;
327 int ret;
328 unsigned long i;
329 unsigned long page;
330 unsigned long add = 0;
331 int dir;
332
333 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
334 if (ret)
335 return ret;
336 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
337 if (ret)
338 goto out;
339
340 if (old_iomap == NULL && new_iomap == NULL)
341 goto out2;
342 if (old_iomap == NULL && ttm == NULL)
343 goto out2;
344
345 /* TTM might be null for moves within the same region.
346 */
347 if (ttm && ttm->state == tt_unpopulated) {
348 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
349 if (ret)
350 goto out1;
351 }
352
353 add = 0;
354 dir = 1;
355
356 if ((old_mem->mem_type == new_mem->mem_type) &&
357 (new_mem->start < old_mem->start + old_mem->size)) {
358 dir = -1;
359 add = new_mem->num_pages - 1;
360 }
361
362 for (i = 0; i < new_mem->num_pages; ++i) {
363 page = i * dir + add;
364 if (old_iomap == NULL) {
365 pgprot_t prot = ttm_io_prot(old_mem->placement,
366 PAGE_KERNEL);
367 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
368 prot);
369 } else if (new_iomap == NULL) {
370 pgprot_t prot = ttm_io_prot(new_mem->placement,
371 PAGE_KERNEL);
372 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
373 prot);
374 } else
375 ret = ttm_copy_io_page(new_iomap, old_iomap, page);
376 if (ret)
377 goto out1;
378 }
379 mb();
380 out2:
381 old_copy = *old_mem;
382 *old_mem = *new_mem;
383 new_mem->mm_node = NULL;
384
385 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
386 ttm_tt_unbind(ttm);
387 ttm_tt_destroy(ttm);
388 bo->ttm = NULL;
389 }
390
391 out1:
392 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
393 out:
394 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
395 ttm_bo_mem_put(bo, &old_copy);
396 return ret;
397 }
398 EXPORT_SYMBOL(ttm_bo_move_memcpy);
399
ttm_transfered_destroy(struct ttm_buffer_object * bo)400 static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
401 {
402 kfree(bo);
403 }
404
405 /**
406 * ttm_buffer_object_transfer
407 *
408 * @bo: A pointer to a struct ttm_buffer_object.
409 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
410 * holding the data of @bo with the old placement.
411 *
412 * This is a utility function that may be called after an accelerated move
413 * has been scheduled. A new buffer object is created as a placeholder for
414 * the old data while it's being copied. When that buffer object is idle,
415 * it can be destroyed, releasing the space of the old placement.
416 * Returns:
417 * !0: Failure.
418 */
419
ttm_buffer_object_transfer(struct ttm_buffer_object * bo,struct ttm_buffer_object ** new_obj)420 static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
421 struct ttm_buffer_object **new_obj)
422 {
423 struct ttm_buffer_object *fbo;
424 struct ttm_bo_device *bdev = bo->bdev;
425 struct ttm_bo_driver *driver = bdev->driver;
426
427 fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
428 if (!fbo)
429 return -ENOMEM;
430
431 *fbo = *bo;
432
433 /**
434 * Fix up members that we shouldn't copy directly:
435 * TODO: Explicit member copy would probably be better here.
436 */
437
438 init_waitqueue_head(&fbo->event_queue);
439 INIT_LIST_HEAD(&fbo->ddestroy);
440 INIT_LIST_HEAD(&fbo->lru);
441 INIT_LIST_HEAD(&fbo->swap);
442 INIT_LIST_HEAD(&fbo->io_reserve_lru);
443 fbo->vm_node = NULL;
444 atomic_set(&fbo->cpu_writers, 0);
445
446 fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
447 kref_init(&fbo->list_kref);
448 kref_init(&fbo->kref);
449 fbo->destroy = &ttm_transfered_destroy;
450 fbo->acc_size = 0;
451
452 *new_obj = fbo;
453 return 0;
454 }
455
ttm_io_prot(uint32_t caching_flags,pgprot_t tmp)456 pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
457 {
458 #if defined(__i386__) || defined(__x86_64__)
459 if (caching_flags & TTM_PL_FLAG_WC)
460 tmp = pgprot_writecombine(tmp);
461 else if (boot_cpu_data.x86 > 3)
462 tmp = pgprot_noncached(tmp);
463
464 #elif defined(__powerpc__)
465 if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
466 pgprot_val(tmp) |= _PAGE_NO_CACHE;
467 if (caching_flags & TTM_PL_FLAG_UNCACHED)
468 pgprot_val(tmp) |= _PAGE_GUARDED;
469 }
470 #endif
471 #if defined(__ia64__)
472 if (caching_flags & TTM_PL_FLAG_WC)
473 tmp = pgprot_writecombine(tmp);
474 else
475 tmp = pgprot_noncached(tmp);
476 #endif
477 #if defined(__sparc__)
478 if (!(caching_flags & TTM_PL_FLAG_CACHED))
479 tmp = pgprot_noncached(tmp);
480 #endif
481 return tmp;
482 }
483 EXPORT_SYMBOL(ttm_io_prot);
484
ttm_bo_ioremap(struct ttm_buffer_object * bo,unsigned long offset,unsigned long size,struct ttm_bo_kmap_obj * map)485 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
486 unsigned long offset,
487 unsigned long size,
488 struct ttm_bo_kmap_obj *map)
489 {
490 struct ttm_mem_reg *mem = &bo->mem;
491
492 if (bo->mem.bus.addr) {
493 map->bo_kmap_type = ttm_bo_map_premapped;
494 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
495 } else {
496 map->bo_kmap_type = ttm_bo_map_iomap;
497 if (mem->placement & TTM_PL_FLAG_WC)
498 map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
499 size);
500 else
501 map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
502 size);
503 }
504 return (!map->virtual) ? -ENOMEM : 0;
505 }
506
ttm_bo_kmap_ttm(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)507 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
508 unsigned long start_page,
509 unsigned long num_pages,
510 struct ttm_bo_kmap_obj *map)
511 {
512 struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
513 struct ttm_tt *ttm = bo->ttm;
514 int ret;
515
516 BUG_ON(!ttm);
517
518 if (ttm->state == tt_unpopulated) {
519 ret = ttm->bdev->driver->ttm_tt_populate(ttm);
520 if (ret)
521 return ret;
522 }
523
524 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
525 /*
526 * We're mapping a single page, and the desired
527 * page protection is consistent with the bo.
528 */
529
530 map->bo_kmap_type = ttm_bo_map_kmap;
531 map->page = ttm->pages[start_page];
532 map->virtual = kmap(map->page);
533 } else {
534 /*
535 * We need to use vmap to get the desired page protection
536 * or to make the buffer object look contiguous.
537 */
538 prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
539 PAGE_KERNEL :
540 ttm_io_prot(mem->placement, PAGE_KERNEL);
541 map->bo_kmap_type = ttm_bo_map_vmap;
542 map->virtual = vmap(ttm->pages + start_page, num_pages,
543 0, prot);
544 }
545 return (!map->virtual) ? -ENOMEM : 0;
546 }
547
ttm_bo_kmap(struct ttm_buffer_object * bo,unsigned long start_page,unsigned long num_pages,struct ttm_bo_kmap_obj * map)548 int ttm_bo_kmap(struct ttm_buffer_object *bo,
549 unsigned long start_page, unsigned long num_pages,
550 struct ttm_bo_kmap_obj *map)
551 {
552 struct ttm_mem_type_manager *man =
553 &bo->bdev->man[bo->mem.mem_type];
554 unsigned long offset, size;
555 int ret;
556
557 BUG_ON(!list_empty(&bo->swap));
558 map->virtual = NULL;
559 map->bo = bo;
560 if (num_pages > bo->num_pages)
561 return -EINVAL;
562 if (start_page > bo->num_pages)
563 return -EINVAL;
564 #if 0
565 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
566 return -EPERM;
567 #endif
568 (void) ttm_mem_io_lock(man, false);
569 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
570 ttm_mem_io_unlock(man);
571 if (ret)
572 return ret;
573 if (!bo->mem.bus.is_iomem) {
574 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
575 } else {
576 offset = start_page << PAGE_SHIFT;
577 size = num_pages << PAGE_SHIFT;
578 return ttm_bo_ioremap(bo, offset, size, map);
579 }
580 }
581 EXPORT_SYMBOL(ttm_bo_kmap);
582
ttm_bo_kunmap(struct ttm_bo_kmap_obj * map)583 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
584 {
585 struct ttm_buffer_object *bo = map->bo;
586 struct ttm_mem_type_manager *man =
587 &bo->bdev->man[bo->mem.mem_type];
588
589 if (!map->virtual)
590 return;
591 switch (map->bo_kmap_type) {
592 case ttm_bo_map_iomap:
593 iounmap(map->virtual);
594 break;
595 case ttm_bo_map_vmap:
596 vunmap(map->virtual);
597 break;
598 case ttm_bo_map_kmap:
599 kunmap(map->page);
600 break;
601 case ttm_bo_map_premapped:
602 break;
603 default:
604 BUG();
605 }
606 (void) ttm_mem_io_lock(man, false);
607 ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
608 ttm_mem_io_unlock(man);
609 map->virtual = NULL;
610 map->page = NULL;
611 }
612 EXPORT_SYMBOL(ttm_bo_kunmap);
613
ttm_bo_move_accel_cleanup(struct ttm_buffer_object * bo,void * sync_obj,void * sync_obj_arg,bool evict,bool no_wait_reserve,bool no_wait_gpu,struct ttm_mem_reg * new_mem)614 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
615 void *sync_obj,
616 void *sync_obj_arg,
617 bool evict, bool no_wait_reserve,
618 bool no_wait_gpu,
619 struct ttm_mem_reg *new_mem)
620 {
621 struct ttm_bo_device *bdev = bo->bdev;
622 struct ttm_bo_driver *driver = bdev->driver;
623 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
624 struct ttm_mem_reg *old_mem = &bo->mem;
625 int ret;
626 struct ttm_buffer_object *ghost_obj;
627 void *tmp_obj = NULL;
628
629 spin_lock(&bdev->fence_lock);
630 if (bo->sync_obj) {
631 tmp_obj = bo->sync_obj;
632 bo->sync_obj = NULL;
633 }
634 bo->sync_obj = driver->sync_obj_ref(sync_obj);
635 bo->sync_obj_arg = sync_obj_arg;
636 if (evict) {
637 ret = ttm_bo_wait(bo, false, false, false);
638 spin_unlock(&bdev->fence_lock);
639 if (tmp_obj)
640 driver->sync_obj_unref(&tmp_obj);
641 if (ret)
642 return ret;
643
644 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
645 (bo->ttm != NULL)) {
646 ttm_tt_unbind(bo->ttm);
647 ttm_tt_destroy(bo->ttm);
648 bo->ttm = NULL;
649 }
650 ttm_bo_free_old_node(bo);
651 } else {
652 /**
653 * This should help pipeline ordinary buffer moves.
654 *
655 * Hang old buffer memory on a new buffer object,
656 * and leave it to be released when the GPU
657 * operation has completed.
658 */
659
660 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
661 spin_unlock(&bdev->fence_lock);
662 if (tmp_obj)
663 driver->sync_obj_unref(&tmp_obj);
664
665 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
666 if (ret)
667 return ret;
668
669 /**
670 * If we're not moving to fixed memory, the TTM object
671 * needs to stay alive. Otherwhise hang it on the ghost
672 * bo to be unbound and destroyed.
673 */
674
675 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
676 ghost_obj->ttm = NULL;
677 else
678 bo->ttm = NULL;
679
680 ttm_bo_unreserve(ghost_obj);
681 ttm_bo_unref(&ghost_obj);
682 }
683
684 *old_mem = *new_mem;
685 new_mem->mm_node = NULL;
686
687 return 0;
688 }
689 EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
690