1 /**************************************************************************
2 *
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27 /*
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29 */
30
31 #include "ttm/ttm_module.h"
32 #include "ttm/ttm_bo_driver.h"
33 #include "ttm/ttm_placement.h"
34 #include <linux/jiffies.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/mm.h>
38 #include <linux/file.h>
39 #include <linux/module.h>
40 #include <asm/atomic.h>
41
42 #define TTM_ASSERT_LOCKED(param)
43 #define TTM_DEBUG(fmt, arg...)
44 #define TTM_BO_HASH_ORDER 13
45
46 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
47 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
48 static void ttm_bo_global_kobj_release(struct kobject *kobj);
49
50 static struct attribute ttm_bo_count = {
51 .name = "bo_count",
52 .mode = S_IRUGO
53 };
54
ttm_mem_type_from_flags(uint32_t flags,uint32_t * mem_type)55 static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
56 {
57 int i;
58
59 for (i = 0; i <= TTM_PL_PRIV5; i++)
60 if (flags & (1 << i)) {
61 *mem_type = i;
62 return 0;
63 }
64 return -EINVAL;
65 }
66
ttm_mem_type_debug(struct ttm_bo_device * bdev,int mem_type)67 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
68 {
69 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
70
71 printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
72 printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
73 printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
74 printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
75 printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
76 printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
77 man->available_caching);
78 printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
79 man->default_caching);
80 if (mem_type != TTM_PL_SYSTEM)
81 (*man->func->debug)(man, TTM_PFX);
82 }
83
ttm_bo_mem_space_debug(struct ttm_buffer_object * bo,struct ttm_placement * placement)84 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
85 struct ttm_placement *placement)
86 {
87 int i, ret, mem_type;
88
89 printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
90 bo, bo->mem.num_pages, bo->mem.size >> 10,
91 bo->mem.size >> 20);
92 for (i = 0; i < placement->num_placement; i++) {
93 ret = ttm_mem_type_from_flags(placement->placement[i],
94 &mem_type);
95 if (ret)
96 return;
97 printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
98 i, placement->placement[i], mem_type);
99 ttm_mem_type_debug(bo->bdev, mem_type);
100 }
101 }
102
ttm_bo_global_show(struct kobject * kobj,struct attribute * attr,char * buffer)103 static ssize_t ttm_bo_global_show(struct kobject *kobj,
104 struct attribute *attr,
105 char *buffer)
106 {
107 struct ttm_bo_global *glob =
108 container_of(kobj, struct ttm_bo_global, kobj);
109
110 return snprintf(buffer, PAGE_SIZE, "%lu\n",
111 (unsigned long) atomic_read(&glob->bo_count));
112 }
113
114 static struct attribute *ttm_bo_global_attrs[] = {
115 &ttm_bo_count,
116 NULL
117 };
118
119 static const struct sysfs_ops ttm_bo_global_ops = {
120 .show = &ttm_bo_global_show
121 };
122
123 static struct kobj_type ttm_bo_glob_kobj_type = {
124 .release = &ttm_bo_global_kobj_release,
125 .sysfs_ops = &ttm_bo_global_ops,
126 .default_attrs = ttm_bo_global_attrs
127 };
128
129
ttm_bo_type_flags(unsigned type)130 static inline uint32_t ttm_bo_type_flags(unsigned type)
131 {
132 return 1 << (type);
133 }
134
ttm_bo_release_list(struct kref * list_kref)135 static void ttm_bo_release_list(struct kref *list_kref)
136 {
137 struct ttm_buffer_object *bo =
138 container_of(list_kref, struct ttm_buffer_object, list_kref);
139 struct ttm_bo_device *bdev = bo->bdev;
140
141 BUG_ON(atomic_read(&bo->list_kref.refcount));
142 BUG_ON(atomic_read(&bo->kref.refcount));
143 BUG_ON(atomic_read(&bo->cpu_writers));
144 BUG_ON(bo->sync_obj != NULL);
145 BUG_ON(bo->mem.mm_node != NULL);
146 BUG_ON(!list_empty(&bo->lru));
147 BUG_ON(!list_empty(&bo->ddestroy));
148
149 if (bo->ttm)
150 ttm_tt_destroy(bo->ttm);
151 atomic_dec(&bo->glob->bo_count);
152 if (bo->destroy)
153 bo->destroy(bo);
154 else {
155 ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
156 kfree(bo);
157 }
158 }
159
ttm_bo_wait_unreserved(struct ttm_buffer_object * bo,bool interruptible)160 int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
161 {
162 if (interruptible) {
163 return wait_event_interruptible(bo->event_queue,
164 atomic_read(&bo->reserved) == 0);
165 } else {
166 wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
167 return 0;
168 }
169 }
170 EXPORT_SYMBOL(ttm_bo_wait_unreserved);
171
ttm_bo_add_to_lru(struct ttm_buffer_object * bo)172 void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
173 {
174 struct ttm_bo_device *bdev = bo->bdev;
175 struct ttm_mem_type_manager *man;
176
177 BUG_ON(!atomic_read(&bo->reserved));
178
179 if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
180
181 BUG_ON(!list_empty(&bo->lru));
182
183 man = &bdev->man[bo->mem.mem_type];
184 list_add_tail(&bo->lru, &man->lru);
185 kref_get(&bo->list_kref);
186
187 if (bo->ttm != NULL) {
188 list_add_tail(&bo->swap, &bo->glob->swap_lru);
189 kref_get(&bo->list_kref);
190 }
191 }
192 }
193
ttm_bo_del_from_lru(struct ttm_buffer_object * bo)194 int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
195 {
196 int put_count = 0;
197
198 if (!list_empty(&bo->swap)) {
199 list_del_init(&bo->swap);
200 ++put_count;
201 }
202 if (!list_empty(&bo->lru)) {
203 list_del_init(&bo->lru);
204 ++put_count;
205 }
206
207 /*
208 * TODO: Add a driver hook to delete from
209 * driver-specific LRU's here.
210 */
211
212 return put_count;
213 }
214
ttm_bo_reserve_locked(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,bool use_sequence,uint32_t sequence)215 int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
216 bool interruptible,
217 bool no_wait, bool use_sequence, uint32_t sequence)
218 {
219 struct ttm_bo_global *glob = bo->glob;
220 int ret;
221
222 while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
223 /**
224 * Deadlock avoidance for multi-bo reserving.
225 */
226 if (use_sequence && bo->seq_valid) {
227 /**
228 * We've already reserved this one.
229 */
230 if (unlikely(sequence == bo->val_seq))
231 return -EDEADLK;
232 /**
233 * Already reserved by a thread that will not back
234 * off for us. We need to back off.
235 */
236 if (unlikely(sequence - bo->val_seq < (1 << 31)))
237 return -EAGAIN;
238 }
239
240 if (no_wait)
241 return -EBUSY;
242
243 spin_unlock(&glob->lru_lock);
244 ret = ttm_bo_wait_unreserved(bo, interruptible);
245 spin_lock(&glob->lru_lock);
246
247 if (unlikely(ret))
248 return ret;
249 }
250
251 if (use_sequence) {
252 /**
253 * Wake up waiters that may need to recheck for deadlock,
254 * if we decreased the sequence number.
255 */
256 if (unlikely((bo->val_seq - sequence < (1 << 31))
257 || !bo->seq_valid))
258 wake_up_all(&bo->event_queue);
259
260 bo->val_seq = sequence;
261 bo->seq_valid = true;
262 } else {
263 bo->seq_valid = false;
264 }
265
266 return 0;
267 }
268 EXPORT_SYMBOL(ttm_bo_reserve);
269
ttm_bo_ref_bug(struct kref * list_kref)270 static void ttm_bo_ref_bug(struct kref *list_kref)
271 {
272 BUG();
273 }
274
ttm_bo_list_ref_sub(struct ttm_buffer_object * bo,int count,bool never_free)275 void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
276 bool never_free)
277 {
278 kref_sub(&bo->list_kref, count,
279 (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
280 }
281
ttm_bo_reserve(struct ttm_buffer_object * bo,bool interruptible,bool no_wait,bool use_sequence,uint32_t sequence)282 int ttm_bo_reserve(struct ttm_buffer_object *bo,
283 bool interruptible,
284 bool no_wait, bool use_sequence, uint32_t sequence)
285 {
286 struct ttm_bo_global *glob = bo->glob;
287 int put_count = 0;
288 int ret;
289
290 spin_lock(&glob->lru_lock);
291 ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
292 sequence);
293 if (likely(ret == 0))
294 put_count = ttm_bo_del_from_lru(bo);
295 spin_unlock(&glob->lru_lock);
296
297 ttm_bo_list_ref_sub(bo, put_count, true);
298
299 return ret;
300 }
301
ttm_bo_unreserve_locked(struct ttm_buffer_object * bo)302 void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
303 {
304 ttm_bo_add_to_lru(bo);
305 atomic_set(&bo->reserved, 0);
306 wake_up_all(&bo->event_queue);
307 }
308
ttm_bo_unreserve(struct ttm_buffer_object * bo)309 void ttm_bo_unreserve(struct ttm_buffer_object *bo)
310 {
311 struct ttm_bo_global *glob = bo->glob;
312
313 spin_lock(&glob->lru_lock);
314 ttm_bo_unreserve_locked(bo);
315 spin_unlock(&glob->lru_lock);
316 }
317 EXPORT_SYMBOL(ttm_bo_unreserve);
318
319 /*
320 * Call bo->mutex locked.
321 */
ttm_bo_add_ttm(struct ttm_buffer_object * bo,bool zero_alloc)322 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
323 {
324 struct ttm_bo_device *bdev = bo->bdev;
325 struct ttm_bo_global *glob = bo->glob;
326 int ret = 0;
327 uint32_t page_flags = 0;
328
329 TTM_ASSERT_LOCKED(&bo->mutex);
330 bo->ttm = NULL;
331
332 if (bdev->need_dma32)
333 page_flags |= TTM_PAGE_FLAG_DMA32;
334
335 switch (bo->type) {
336 case ttm_bo_type_device:
337 if (zero_alloc)
338 page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
339 case ttm_bo_type_kernel:
340 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
341 page_flags, glob->dummy_read_page);
342 if (unlikely(bo->ttm == NULL))
343 ret = -ENOMEM;
344 break;
345 case ttm_bo_type_user:
346 bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
347 page_flags | TTM_PAGE_FLAG_USER,
348 glob->dummy_read_page);
349 if (unlikely(bo->ttm == NULL)) {
350 ret = -ENOMEM;
351 break;
352 }
353
354 ret = ttm_tt_set_user(bo->ttm, current,
355 bo->buffer_start, bo->num_pages);
356 if (unlikely(ret != 0))
357 ttm_tt_destroy(bo->ttm);
358 break;
359 default:
360 printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
361 ret = -EINVAL;
362 break;
363 }
364
365 return ret;
366 }
367
ttm_bo_handle_move_mem(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem,bool evict,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)368 static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
369 struct ttm_mem_reg *mem,
370 bool evict, bool interruptible,
371 bool no_wait_reserve, bool no_wait_gpu)
372 {
373 struct ttm_bo_device *bdev = bo->bdev;
374 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
375 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
376 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
377 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
378 int ret = 0;
379
380 if (old_is_pci || new_is_pci ||
381 ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
382 ret = ttm_mem_io_lock(old_man, true);
383 if (unlikely(ret != 0))
384 goto out_err;
385 ttm_bo_unmap_virtual_locked(bo);
386 ttm_mem_io_unlock(old_man);
387 }
388
389 /*
390 * Create and bind a ttm if required.
391 */
392
393 if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
394 ret = ttm_bo_add_ttm(bo, false);
395 if (ret)
396 goto out_err;
397
398 ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
399 if (ret)
400 goto out_err;
401
402 if (mem->mem_type != TTM_PL_SYSTEM) {
403 ret = ttm_tt_bind(bo->ttm, mem);
404 if (ret)
405 goto out_err;
406 }
407
408 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
409 if (bdev->driver->move_notify)
410 bdev->driver->move_notify(bo, mem);
411 bo->mem = *mem;
412 mem->mm_node = NULL;
413 goto moved;
414 }
415 }
416
417 if (bdev->driver->move_notify)
418 bdev->driver->move_notify(bo, mem);
419
420 if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
421 !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
422 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
423 else if (bdev->driver->move)
424 ret = bdev->driver->move(bo, evict, interruptible,
425 no_wait_reserve, no_wait_gpu, mem);
426 else
427 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
428
429 if (ret)
430 goto out_err;
431
432 moved:
433 if (bo->evicted) {
434 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
435 if (ret)
436 printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
437 bo->evicted = false;
438 }
439
440 if (bo->mem.mm_node) {
441 bo->offset = (bo->mem.start << PAGE_SHIFT) +
442 bdev->man[bo->mem.mem_type].gpu_offset;
443 bo->cur_placement = bo->mem.placement;
444 } else
445 bo->offset = 0;
446
447 return 0;
448
449 out_err:
450 new_man = &bdev->man[bo->mem.mem_type];
451 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
452 ttm_tt_unbind(bo->ttm);
453 ttm_tt_destroy(bo->ttm);
454 bo->ttm = NULL;
455 }
456
457 return ret;
458 }
459
460 /**
461 * Call bo::reserved.
462 * Will release GPU memory type usage on destruction.
463 * This is the place to put in driver specific hooks to release
464 * driver private resources.
465 * Will release the bo::reserved lock.
466 */
467
ttm_bo_cleanup_memtype_use(struct ttm_buffer_object * bo)468 static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
469 {
470 if (bo->ttm) {
471 ttm_tt_unbind(bo->ttm);
472 ttm_tt_destroy(bo->ttm);
473 bo->ttm = NULL;
474 }
475 ttm_bo_mem_put(bo, &bo->mem);
476
477 atomic_set(&bo->reserved, 0);
478
479 /*
480 * Make processes trying to reserve really pick it up.
481 */
482 smp_mb__after_atomic_dec();
483 wake_up_all(&bo->event_queue);
484 }
485
ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object * bo)486 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
487 {
488 struct ttm_bo_device *bdev = bo->bdev;
489 struct ttm_bo_global *glob = bo->glob;
490 struct ttm_bo_driver *driver;
491 void *sync_obj = NULL;
492 void *sync_obj_arg;
493 int put_count;
494 int ret;
495
496 spin_lock(&bdev->fence_lock);
497 (void) ttm_bo_wait(bo, false, false, true);
498 if (!bo->sync_obj) {
499
500 spin_lock(&glob->lru_lock);
501
502 /**
503 * Lock inversion between bo:reserve and bdev::fence_lock here,
504 * but that's OK, since we're only trylocking.
505 */
506
507 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
508
509 if (unlikely(ret == -EBUSY))
510 goto queue;
511
512 spin_unlock(&bdev->fence_lock);
513 put_count = ttm_bo_del_from_lru(bo);
514
515 spin_unlock(&glob->lru_lock);
516 ttm_bo_cleanup_memtype_use(bo);
517
518 ttm_bo_list_ref_sub(bo, put_count, true);
519
520 return;
521 } else {
522 spin_lock(&glob->lru_lock);
523 }
524 queue:
525 driver = bdev->driver;
526 if (bo->sync_obj)
527 sync_obj = driver->sync_obj_ref(bo->sync_obj);
528 sync_obj_arg = bo->sync_obj_arg;
529
530 kref_get(&bo->list_kref);
531 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
532 spin_unlock(&glob->lru_lock);
533 spin_unlock(&bdev->fence_lock);
534
535 if (sync_obj) {
536 driver->sync_obj_flush(sync_obj, sync_obj_arg);
537 driver->sync_obj_unref(&sync_obj);
538 }
539 schedule_delayed_work(&bdev->wq,
540 ((HZ / 100) < 1) ? 1 : HZ / 100);
541 }
542
543 /**
544 * function ttm_bo_cleanup_refs
545 * If bo idle, remove from delayed- and lru lists, and unref.
546 * If not idle, do nothing.
547 *
548 * @interruptible Any sleeps should occur interruptibly.
549 * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
550 * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
551 */
552
ttm_bo_cleanup_refs(struct ttm_buffer_object * bo,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)553 static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
554 bool interruptible,
555 bool no_wait_reserve,
556 bool no_wait_gpu)
557 {
558 struct ttm_bo_device *bdev = bo->bdev;
559 struct ttm_bo_global *glob = bo->glob;
560 int put_count;
561 int ret = 0;
562
563 retry:
564 spin_lock(&bdev->fence_lock);
565 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
566 spin_unlock(&bdev->fence_lock);
567
568 if (unlikely(ret != 0))
569 return ret;
570
571 spin_lock(&glob->lru_lock);
572 ret = ttm_bo_reserve_locked(bo, interruptible,
573 no_wait_reserve, false, 0);
574
575 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
576 spin_unlock(&glob->lru_lock);
577 return ret;
578 }
579
580 /**
581 * We can re-check for sync object without taking
582 * the bo::lock since setting the sync object requires
583 * also bo::reserved. A busy object at this point may
584 * be caused by another thread recently starting an accelerated
585 * eviction.
586 */
587
588 if (unlikely(bo->sync_obj)) {
589 atomic_set(&bo->reserved, 0);
590 wake_up_all(&bo->event_queue);
591 spin_unlock(&glob->lru_lock);
592 goto retry;
593 }
594
595 put_count = ttm_bo_del_from_lru(bo);
596 list_del_init(&bo->ddestroy);
597 ++put_count;
598
599 spin_unlock(&glob->lru_lock);
600 ttm_bo_cleanup_memtype_use(bo);
601
602 ttm_bo_list_ref_sub(bo, put_count, true);
603
604 return 0;
605 }
606
607 /**
608 * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
609 * encountered buffers.
610 */
611
ttm_bo_delayed_delete(struct ttm_bo_device * bdev,bool remove_all)612 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
613 {
614 struct ttm_bo_global *glob = bdev->glob;
615 struct ttm_buffer_object *entry = NULL;
616 int ret = 0;
617
618 spin_lock(&glob->lru_lock);
619 if (list_empty(&bdev->ddestroy))
620 goto out_unlock;
621
622 entry = list_first_entry(&bdev->ddestroy,
623 struct ttm_buffer_object, ddestroy);
624 kref_get(&entry->list_kref);
625
626 for (;;) {
627 struct ttm_buffer_object *nentry = NULL;
628
629 if (entry->ddestroy.next != &bdev->ddestroy) {
630 nentry = list_first_entry(&entry->ddestroy,
631 struct ttm_buffer_object, ddestroy);
632 kref_get(&nentry->list_kref);
633 }
634
635 spin_unlock(&glob->lru_lock);
636 ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
637 !remove_all);
638 kref_put(&entry->list_kref, ttm_bo_release_list);
639 entry = nentry;
640
641 if (ret || !entry)
642 goto out;
643
644 spin_lock(&glob->lru_lock);
645 if (list_empty(&entry->ddestroy))
646 break;
647 }
648
649 out_unlock:
650 spin_unlock(&glob->lru_lock);
651 out:
652 if (entry)
653 kref_put(&entry->list_kref, ttm_bo_release_list);
654 return ret;
655 }
656
ttm_bo_delayed_workqueue(struct work_struct * work)657 static void ttm_bo_delayed_workqueue(struct work_struct *work)
658 {
659 struct ttm_bo_device *bdev =
660 container_of(work, struct ttm_bo_device, wq.work);
661
662 if (ttm_bo_delayed_delete(bdev, false)) {
663 schedule_delayed_work(&bdev->wq,
664 ((HZ / 100) < 1) ? 1 : HZ / 100);
665 }
666 }
667
ttm_bo_release(struct kref * kref)668 static void ttm_bo_release(struct kref *kref)
669 {
670 struct ttm_buffer_object *bo =
671 container_of(kref, struct ttm_buffer_object, kref);
672 struct ttm_bo_device *bdev = bo->bdev;
673 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
674
675 if (likely(bo->vm_node != NULL)) {
676 rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
677 drm_mm_put_block(bo->vm_node);
678 bo->vm_node = NULL;
679 }
680 write_unlock(&bdev->vm_lock);
681 ttm_mem_io_lock(man, false);
682 ttm_mem_io_free_vm(bo);
683 ttm_mem_io_unlock(man);
684 ttm_bo_cleanup_refs_or_queue(bo);
685 kref_put(&bo->list_kref, ttm_bo_release_list);
686 write_lock(&bdev->vm_lock);
687 }
688
ttm_bo_unref(struct ttm_buffer_object ** p_bo)689 void ttm_bo_unref(struct ttm_buffer_object **p_bo)
690 {
691 struct ttm_buffer_object *bo = *p_bo;
692 struct ttm_bo_device *bdev = bo->bdev;
693
694 *p_bo = NULL;
695 write_lock(&bdev->vm_lock);
696 kref_put(&bo->kref, ttm_bo_release);
697 write_unlock(&bdev->vm_lock);
698 }
699 EXPORT_SYMBOL(ttm_bo_unref);
700
ttm_bo_lock_delayed_workqueue(struct ttm_bo_device * bdev)701 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
702 {
703 return cancel_delayed_work_sync(&bdev->wq);
704 }
705 EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
706
ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device * bdev,int resched)707 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
708 {
709 if (resched)
710 schedule_delayed_work(&bdev->wq,
711 ((HZ / 100) < 1) ? 1 : HZ / 100);
712 }
713 EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
714
ttm_bo_evict(struct ttm_buffer_object * bo,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)715 static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
716 bool no_wait_reserve, bool no_wait_gpu)
717 {
718 struct ttm_bo_device *bdev = bo->bdev;
719 struct ttm_mem_reg evict_mem;
720 struct ttm_placement placement;
721 int ret = 0;
722
723 spin_lock(&bdev->fence_lock);
724 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
725 spin_unlock(&bdev->fence_lock);
726
727 if (unlikely(ret != 0)) {
728 if (ret != -ERESTARTSYS) {
729 printk(KERN_ERR TTM_PFX
730 "Failed to expire sync object before "
731 "buffer eviction.\n");
732 }
733 goto out;
734 }
735
736 BUG_ON(!atomic_read(&bo->reserved));
737
738 evict_mem = bo->mem;
739 evict_mem.mm_node = NULL;
740 evict_mem.bus.io_reserved_vm = false;
741 evict_mem.bus.io_reserved_count = 0;
742
743 placement.fpfn = 0;
744 placement.lpfn = 0;
745 placement.num_placement = 0;
746 placement.num_busy_placement = 0;
747 bdev->driver->evict_flags(bo, &placement);
748 ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
749 no_wait_reserve, no_wait_gpu);
750 if (ret) {
751 if (ret != -ERESTARTSYS) {
752 printk(KERN_ERR TTM_PFX
753 "Failed to find memory space for "
754 "buffer 0x%p eviction.\n", bo);
755 ttm_bo_mem_space_debug(bo, &placement);
756 }
757 goto out;
758 }
759
760 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
761 no_wait_reserve, no_wait_gpu);
762 if (ret) {
763 if (ret != -ERESTARTSYS)
764 printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
765 ttm_bo_mem_put(bo, &evict_mem);
766 goto out;
767 }
768 bo->evicted = true;
769 out:
770 return ret;
771 }
772
ttm_mem_evict_first(struct ttm_bo_device * bdev,uint32_t mem_type,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)773 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
774 uint32_t mem_type,
775 bool interruptible, bool no_wait_reserve,
776 bool no_wait_gpu)
777 {
778 struct ttm_bo_global *glob = bdev->glob;
779 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
780 struct ttm_buffer_object *bo;
781 int ret, put_count = 0;
782
783 retry:
784 spin_lock(&glob->lru_lock);
785 if (list_empty(&man->lru)) {
786 spin_unlock(&glob->lru_lock);
787 return -EBUSY;
788 }
789
790 bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
791 kref_get(&bo->list_kref);
792
793 if (!list_empty(&bo->ddestroy)) {
794 spin_unlock(&glob->lru_lock);
795 ret = ttm_bo_cleanup_refs(bo, interruptible,
796 no_wait_reserve, no_wait_gpu);
797 kref_put(&bo->list_kref, ttm_bo_release_list);
798
799 if (likely(ret == 0 || ret == -ERESTARTSYS))
800 return ret;
801
802 goto retry;
803 }
804
805 ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
806
807 if (unlikely(ret == -EBUSY)) {
808 spin_unlock(&glob->lru_lock);
809 if (likely(!no_wait_gpu))
810 ret = ttm_bo_wait_unreserved(bo, interruptible);
811
812 kref_put(&bo->list_kref, ttm_bo_release_list);
813
814 /**
815 * We *need* to retry after releasing the lru lock.
816 */
817
818 if (unlikely(ret != 0))
819 return ret;
820 goto retry;
821 }
822
823 put_count = ttm_bo_del_from_lru(bo);
824 spin_unlock(&glob->lru_lock);
825
826 BUG_ON(ret != 0);
827
828 ttm_bo_list_ref_sub(bo, put_count, true);
829
830 ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
831 ttm_bo_unreserve(bo);
832
833 kref_put(&bo->list_kref, ttm_bo_release_list);
834 return ret;
835 }
836
ttm_bo_mem_put(struct ttm_buffer_object * bo,struct ttm_mem_reg * mem)837 void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
838 {
839 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
840
841 if (mem->mm_node)
842 (*man->func->put_node)(man, mem);
843 }
844 EXPORT_SYMBOL(ttm_bo_mem_put);
845
846 /**
847 * Repeatedly evict memory from the LRU for @mem_type until we create enough
848 * space, or we've evicted everything and there isn't enough space.
849 */
ttm_bo_mem_force_space(struct ttm_buffer_object * bo,uint32_t mem_type,struct ttm_placement * placement,struct ttm_mem_reg * mem,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)850 static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
851 uint32_t mem_type,
852 struct ttm_placement *placement,
853 struct ttm_mem_reg *mem,
854 bool interruptible,
855 bool no_wait_reserve,
856 bool no_wait_gpu)
857 {
858 struct ttm_bo_device *bdev = bo->bdev;
859 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
860 int ret;
861
862 do {
863 ret = (*man->func->get_node)(man, bo, placement, mem);
864 if (unlikely(ret != 0))
865 return ret;
866 if (mem->mm_node)
867 break;
868 ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
869 no_wait_reserve, no_wait_gpu);
870 if (unlikely(ret != 0))
871 return ret;
872 } while (1);
873 if (mem->mm_node == NULL)
874 return -ENOMEM;
875 mem->mem_type = mem_type;
876 return 0;
877 }
878
ttm_bo_select_caching(struct ttm_mem_type_manager * man,uint32_t cur_placement,uint32_t proposed_placement)879 static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
880 uint32_t cur_placement,
881 uint32_t proposed_placement)
882 {
883 uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
884 uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
885
886 /**
887 * Keep current caching if possible.
888 */
889
890 if ((cur_placement & caching) != 0)
891 result |= (cur_placement & caching);
892 else if ((man->default_caching & caching) != 0)
893 result |= man->default_caching;
894 else if ((TTM_PL_FLAG_CACHED & caching) != 0)
895 result |= TTM_PL_FLAG_CACHED;
896 else if ((TTM_PL_FLAG_WC & caching) != 0)
897 result |= TTM_PL_FLAG_WC;
898 else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
899 result |= TTM_PL_FLAG_UNCACHED;
900
901 return result;
902 }
903
ttm_bo_mt_compatible(struct ttm_mem_type_manager * man,bool disallow_fixed,uint32_t mem_type,uint32_t proposed_placement,uint32_t * masked_placement)904 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
905 bool disallow_fixed,
906 uint32_t mem_type,
907 uint32_t proposed_placement,
908 uint32_t *masked_placement)
909 {
910 uint32_t cur_flags = ttm_bo_type_flags(mem_type);
911
912 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
913 return false;
914
915 if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
916 return false;
917
918 if ((proposed_placement & man->available_caching) == 0)
919 return false;
920
921 cur_flags |= (proposed_placement & man->available_caching);
922
923 *masked_placement = cur_flags;
924 return true;
925 }
926
927 /**
928 * Creates space for memory region @mem according to its type.
929 *
930 * This function first searches for free space in compatible memory types in
931 * the priority order defined by the driver. If free space isn't found, then
932 * ttm_bo_mem_force_space is attempted in priority order to evict and find
933 * space.
934 */
ttm_bo_mem_space(struct ttm_buffer_object * bo,struct ttm_placement * placement,struct ttm_mem_reg * mem,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)935 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
936 struct ttm_placement *placement,
937 struct ttm_mem_reg *mem,
938 bool interruptible, bool no_wait_reserve,
939 bool no_wait_gpu)
940 {
941 struct ttm_bo_device *bdev = bo->bdev;
942 struct ttm_mem_type_manager *man;
943 uint32_t mem_type = TTM_PL_SYSTEM;
944 uint32_t cur_flags = 0;
945 bool type_found = false;
946 bool type_ok = false;
947 bool has_erestartsys = false;
948 int i, ret;
949
950 mem->mm_node = NULL;
951 for (i = 0; i < placement->num_placement; ++i) {
952 ret = ttm_mem_type_from_flags(placement->placement[i],
953 &mem_type);
954 if (ret)
955 return ret;
956 man = &bdev->man[mem_type];
957
958 type_ok = ttm_bo_mt_compatible(man,
959 bo->type == ttm_bo_type_user,
960 mem_type,
961 placement->placement[i],
962 &cur_flags);
963
964 if (!type_ok)
965 continue;
966
967 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
968 cur_flags);
969 /*
970 * Use the access and other non-mapping-related flag bits from
971 * the memory placement flags to the current flags
972 */
973 ttm_flag_masked(&cur_flags, placement->placement[i],
974 ~TTM_PL_MASK_MEMTYPE);
975
976 if (mem_type == TTM_PL_SYSTEM)
977 break;
978
979 if (man->has_type && man->use_type) {
980 type_found = true;
981 ret = (*man->func->get_node)(man, bo, placement, mem);
982 if (unlikely(ret))
983 return ret;
984 }
985 if (mem->mm_node)
986 break;
987 }
988
989 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
990 mem->mem_type = mem_type;
991 mem->placement = cur_flags;
992 return 0;
993 }
994
995 if (!type_found)
996 return -EINVAL;
997
998 for (i = 0; i < placement->num_busy_placement; ++i) {
999 ret = ttm_mem_type_from_flags(placement->busy_placement[i],
1000 &mem_type);
1001 if (ret)
1002 return ret;
1003 man = &bdev->man[mem_type];
1004 if (!man->has_type)
1005 continue;
1006 if (!ttm_bo_mt_compatible(man,
1007 bo->type == ttm_bo_type_user,
1008 mem_type,
1009 placement->busy_placement[i],
1010 &cur_flags))
1011 continue;
1012
1013 cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
1014 cur_flags);
1015 /*
1016 * Use the access and other non-mapping-related flag bits from
1017 * the memory placement flags to the current flags
1018 */
1019 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
1020 ~TTM_PL_MASK_MEMTYPE);
1021
1022
1023 if (mem_type == TTM_PL_SYSTEM) {
1024 mem->mem_type = mem_type;
1025 mem->placement = cur_flags;
1026 mem->mm_node = NULL;
1027 return 0;
1028 }
1029
1030 ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
1031 interruptible, no_wait_reserve, no_wait_gpu);
1032 if (ret == 0 && mem->mm_node) {
1033 mem->placement = cur_flags;
1034 return 0;
1035 }
1036 if (ret == -ERESTARTSYS)
1037 has_erestartsys = true;
1038 }
1039 ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
1040 return ret;
1041 }
1042 EXPORT_SYMBOL(ttm_bo_mem_space);
1043
ttm_bo_wait_cpu(struct ttm_buffer_object * bo,bool no_wait)1044 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
1045 {
1046 if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
1047 return -EBUSY;
1048
1049 return wait_event_interruptible(bo->event_queue,
1050 atomic_read(&bo->cpu_writers) == 0);
1051 }
1052 EXPORT_SYMBOL(ttm_bo_wait_cpu);
1053
ttm_bo_move_buffer(struct ttm_buffer_object * bo,struct ttm_placement * placement,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)1054 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1055 struct ttm_placement *placement,
1056 bool interruptible, bool no_wait_reserve,
1057 bool no_wait_gpu)
1058 {
1059 int ret = 0;
1060 struct ttm_mem_reg mem;
1061 struct ttm_bo_device *bdev = bo->bdev;
1062
1063 BUG_ON(!atomic_read(&bo->reserved));
1064
1065 /*
1066 * FIXME: It's possible to pipeline buffer moves.
1067 * Have the driver move function wait for idle when necessary,
1068 * instead of doing it here.
1069 */
1070 spin_lock(&bdev->fence_lock);
1071 ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
1072 spin_unlock(&bdev->fence_lock);
1073 if (ret)
1074 return ret;
1075 mem.num_pages = bo->num_pages;
1076 mem.size = mem.num_pages << PAGE_SHIFT;
1077 mem.page_alignment = bo->mem.page_alignment;
1078 mem.bus.io_reserved_vm = false;
1079 mem.bus.io_reserved_count = 0;
1080 /*
1081 * Determine where to move the buffer.
1082 */
1083 ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
1084 if (ret)
1085 goto out_unlock;
1086 ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
1087 out_unlock:
1088 if (ret && mem.mm_node)
1089 ttm_bo_mem_put(bo, &mem);
1090 return ret;
1091 }
1092
ttm_bo_mem_compat(struct ttm_placement * placement,struct ttm_mem_reg * mem)1093 static int ttm_bo_mem_compat(struct ttm_placement *placement,
1094 struct ttm_mem_reg *mem)
1095 {
1096 int i;
1097
1098 if (mem->mm_node && placement->lpfn != 0 &&
1099 (mem->start < placement->fpfn ||
1100 mem->start + mem->num_pages > placement->lpfn))
1101 return -1;
1102
1103 for (i = 0; i < placement->num_placement; i++) {
1104 if ((placement->placement[i] & mem->placement &
1105 TTM_PL_MASK_CACHING) &&
1106 (placement->placement[i] & mem->placement &
1107 TTM_PL_MASK_MEM))
1108 return i;
1109 }
1110 return -1;
1111 }
1112
ttm_bo_validate(struct ttm_buffer_object * bo,struct ttm_placement * placement,bool interruptible,bool no_wait_reserve,bool no_wait_gpu)1113 int ttm_bo_validate(struct ttm_buffer_object *bo,
1114 struct ttm_placement *placement,
1115 bool interruptible, bool no_wait_reserve,
1116 bool no_wait_gpu)
1117 {
1118 int ret;
1119
1120 BUG_ON(!atomic_read(&bo->reserved));
1121 /* Check that range is valid */
1122 if (placement->lpfn || placement->fpfn)
1123 if (placement->fpfn > placement->lpfn ||
1124 (placement->lpfn - placement->fpfn) < bo->num_pages)
1125 return -EINVAL;
1126 /*
1127 * Check whether we need to move buffer.
1128 */
1129 ret = ttm_bo_mem_compat(placement, &bo->mem);
1130 if (ret < 0) {
1131 ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
1132 if (ret)
1133 return ret;
1134 } else {
1135 /*
1136 * Use the access and other non-mapping-related flag bits from
1137 * the compatible memory placement flags to the active flags
1138 */
1139 ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
1140 ~TTM_PL_MASK_MEMTYPE);
1141 }
1142 /*
1143 * We might need to add a TTM.
1144 */
1145 if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
1146 ret = ttm_bo_add_ttm(bo, true);
1147 if (ret)
1148 return ret;
1149 }
1150 return 0;
1151 }
1152 EXPORT_SYMBOL(ttm_bo_validate);
1153
ttm_bo_check_placement(struct ttm_buffer_object * bo,struct ttm_placement * placement)1154 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
1155 struct ttm_placement *placement)
1156 {
1157 BUG_ON((placement->fpfn || placement->lpfn) &&
1158 (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
1159
1160 return 0;
1161 }
1162
ttm_bo_init(struct ttm_bo_device * bdev,struct ttm_buffer_object * bo,unsigned long size,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t page_alignment,unsigned long buffer_start,bool interruptible,struct file * persistent_swap_storage,size_t acc_size,void (* destroy)(struct ttm_buffer_object *))1163 int ttm_bo_init(struct ttm_bo_device *bdev,
1164 struct ttm_buffer_object *bo,
1165 unsigned long size,
1166 enum ttm_bo_type type,
1167 struct ttm_placement *placement,
1168 uint32_t page_alignment,
1169 unsigned long buffer_start,
1170 bool interruptible,
1171 struct file *persistent_swap_storage,
1172 size_t acc_size,
1173 void (*destroy) (struct ttm_buffer_object *))
1174 {
1175 int ret = 0;
1176 unsigned long num_pages;
1177
1178 size += buffer_start & ~PAGE_MASK;
1179 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1180 if (num_pages == 0) {
1181 printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
1182 if (destroy)
1183 (*destroy)(bo);
1184 else
1185 kfree(bo);
1186 return -EINVAL;
1187 }
1188 bo->destroy = destroy;
1189
1190 kref_init(&bo->kref);
1191 kref_init(&bo->list_kref);
1192 atomic_set(&bo->cpu_writers, 0);
1193 atomic_set(&bo->reserved, 1);
1194 init_waitqueue_head(&bo->event_queue);
1195 INIT_LIST_HEAD(&bo->lru);
1196 INIT_LIST_HEAD(&bo->ddestroy);
1197 INIT_LIST_HEAD(&bo->swap);
1198 INIT_LIST_HEAD(&bo->io_reserve_lru);
1199 bo->bdev = bdev;
1200 bo->glob = bdev->glob;
1201 bo->type = type;
1202 bo->num_pages = num_pages;
1203 bo->mem.size = num_pages << PAGE_SHIFT;
1204 bo->mem.mem_type = TTM_PL_SYSTEM;
1205 bo->mem.num_pages = bo->num_pages;
1206 bo->mem.mm_node = NULL;
1207 bo->mem.page_alignment = page_alignment;
1208 bo->mem.bus.io_reserved_vm = false;
1209 bo->mem.bus.io_reserved_count = 0;
1210 bo->buffer_start = buffer_start & PAGE_MASK;
1211 bo->priv_flags = 0;
1212 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1213 bo->seq_valid = false;
1214 bo->persistent_swap_storage = persistent_swap_storage;
1215 bo->acc_size = acc_size;
1216 atomic_inc(&bo->glob->bo_count);
1217
1218 ret = ttm_bo_check_placement(bo, placement);
1219 if (unlikely(ret != 0))
1220 goto out_err;
1221
1222 /*
1223 * For ttm_bo_type_device buffers, allocate
1224 * address space from the device.
1225 */
1226 if (bo->type == ttm_bo_type_device) {
1227 ret = ttm_bo_setup_vm(bo);
1228 if (ret)
1229 goto out_err;
1230 }
1231
1232 ret = ttm_bo_validate(bo, placement, interruptible, false, false);
1233 if (ret)
1234 goto out_err;
1235
1236 ttm_bo_unreserve(bo);
1237 return 0;
1238
1239 out_err:
1240 ttm_bo_unreserve(bo);
1241 ttm_bo_unref(&bo);
1242
1243 return ret;
1244 }
1245 EXPORT_SYMBOL(ttm_bo_init);
1246
ttm_bo_size(struct ttm_bo_global * glob,unsigned long num_pages)1247 static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
1248 unsigned long num_pages)
1249 {
1250 size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
1251 PAGE_MASK;
1252
1253 return glob->ttm_bo_size + 2 * page_array_size;
1254 }
1255
ttm_bo_create(struct ttm_bo_device * bdev,unsigned long size,enum ttm_bo_type type,struct ttm_placement * placement,uint32_t page_alignment,unsigned long buffer_start,bool interruptible,struct file * persistent_swap_storage,struct ttm_buffer_object ** p_bo)1256 int ttm_bo_create(struct ttm_bo_device *bdev,
1257 unsigned long size,
1258 enum ttm_bo_type type,
1259 struct ttm_placement *placement,
1260 uint32_t page_alignment,
1261 unsigned long buffer_start,
1262 bool interruptible,
1263 struct file *persistent_swap_storage,
1264 struct ttm_buffer_object **p_bo)
1265 {
1266 struct ttm_buffer_object *bo;
1267 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1268 int ret;
1269
1270 size_t acc_size =
1271 ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
1272 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1273 if (unlikely(ret != 0))
1274 return ret;
1275
1276 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1277
1278 if (unlikely(bo == NULL)) {
1279 ttm_mem_global_free(mem_glob, acc_size);
1280 return -ENOMEM;
1281 }
1282
1283 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1284 buffer_start, interruptible,
1285 persistent_swap_storage, acc_size, NULL);
1286 if (likely(ret == 0))
1287 *p_bo = bo;
1288
1289 return ret;
1290 }
1291
ttm_bo_force_list_clean(struct ttm_bo_device * bdev,unsigned mem_type,bool allow_errors)1292 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1293 unsigned mem_type, bool allow_errors)
1294 {
1295 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1296 struct ttm_bo_global *glob = bdev->glob;
1297 int ret;
1298
1299 /*
1300 * Can't use standard list traversal since we're unlocking.
1301 */
1302
1303 spin_lock(&glob->lru_lock);
1304 while (!list_empty(&man->lru)) {
1305 spin_unlock(&glob->lru_lock);
1306 ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
1307 if (ret) {
1308 if (allow_errors) {
1309 return ret;
1310 } else {
1311 printk(KERN_ERR TTM_PFX
1312 "Cleanup eviction failed\n");
1313 }
1314 }
1315 spin_lock(&glob->lru_lock);
1316 }
1317 spin_unlock(&glob->lru_lock);
1318 return 0;
1319 }
1320
ttm_bo_clean_mm(struct ttm_bo_device * bdev,unsigned mem_type)1321 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1322 {
1323 struct ttm_mem_type_manager *man;
1324 int ret = -EINVAL;
1325
1326 if (mem_type >= TTM_NUM_MEM_TYPES) {
1327 printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
1328 return ret;
1329 }
1330 man = &bdev->man[mem_type];
1331
1332 if (!man->has_type) {
1333 printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
1334 "memory manager type %u\n", mem_type);
1335 return ret;
1336 }
1337
1338 man->use_type = false;
1339 man->has_type = false;
1340
1341 ret = 0;
1342 if (mem_type > 0) {
1343 ttm_bo_force_list_clean(bdev, mem_type, false);
1344
1345 ret = (*man->func->takedown)(man);
1346 }
1347
1348 return ret;
1349 }
1350 EXPORT_SYMBOL(ttm_bo_clean_mm);
1351
ttm_bo_evict_mm(struct ttm_bo_device * bdev,unsigned mem_type)1352 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1353 {
1354 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1355
1356 if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
1357 printk(KERN_ERR TTM_PFX
1358 "Illegal memory manager memory type %u.\n",
1359 mem_type);
1360 return -EINVAL;
1361 }
1362
1363 if (!man->has_type) {
1364 printk(KERN_ERR TTM_PFX
1365 "Memory type %u has not been initialized.\n",
1366 mem_type);
1367 return 0;
1368 }
1369
1370 return ttm_bo_force_list_clean(bdev, mem_type, true);
1371 }
1372 EXPORT_SYMBOL(ttm_bo_evict_mm);
1373
ttm_bo_init_mm(struct ttm_bo_device * bdev,unsigned type,unsigned long p_size)1374 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1375 unsigned long p_size)
1376 {
1377 int ret = -EINVAL;
1378 struct ttm_mem_type_manager *man;
1379
1380 BUG_ON(type >= TTM_NUM_MEM_TYPES);
1381 man = &bdev->man[type];
1382 BUG_ON(man->has_type);
1383 man->io_reserve_fastpath = true;
1384 man->use_io_reserve_lru = false;
1385 mutex_init(&man->io_reserve_mutex);
1386 INIT_LIST_HEAD(&man->io_reserve_lru);
1387
1388 ret = bdev->driver->init_mem_type(bdev, type, man);
1389 if (ret)
1390 return ret;
1391 man->bdev = bdev;
1392
1393 ret = 0;
1394 if (type != TTM_PL_SYSTEM) {
1395 ret = (*man->func->init)(man, p_size);
1396 if (ret)
1397 return ret;
1398 }
1399 man->has_type = true;
1400 man->use_type = true;
1401 man->size = p_size;
1402
1403 INIT_LIST_HEAD(&man->lru);
1404
1405 return 0;
1406 }
1407 EXPORT_SYMBOL(ttm_bo_init_mm);
1408
ttm_bo_global_kobj_release(struct kobject * kobj)1409 static void ttm_bo_global_kobj_release(struct kobject *kobj)
1410 {
1411 struct ttm_bo_global *glob =
1412 container_of(kobj, struct ttm_bo_global, kobj);
1413
1414 ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
1415 __free_page(glob->dummy_read_page);
1416 kfree(glob);
1417 }
1418
ttm_bo_global_release(struct drm_global_reference * ref)1419 void ttm_bo_global_release(struct drm_global_reference *ref)
1420 {
1421 struct ttm_bo_global *glob = ref->object;
1422
1423 kobject_del(&glob->kobj);
1424 kobject_put(&glob->kobj);
1425 }
1426 EXPORT_SYMBOL(ttm_bo_global_release);
1427
ttm_bo_global_init(struct drm_global_reference * ref)1428 int ttm_bo_global_init(struct drm_global_reference *ref)
1429 {
1430 struct ttm_bo_global_ref *bo_ref =
1431 container_of(ref, struct ttm_bo_global_ref, ref);
1432 struct ttm_bo_global *glob = ref->object;
1433 int ret;
1434
1435 mutex_init(&glob->device_list_mutex);
1436 spin_lock_init(&glob->lru_lock);
1437 glob->mem_glob = bo_ref->mem_glob;
1438 glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
1439
1440 if (unlikely(glob->dummy_read_page == NULL)) {
1441 ret = -ENOMEM;
1442 goto out_no_drp;
1443 }
1444
1445 INIT_LIST_HEAD(&glob->swap_lru);
1446 INIT_LIST_HEAD(&glob->device_list);
1447
1448 ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
1449 ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
1450 if (unlikely(ret != 0)) {
1451 printk(KERN_ERR TTM_PFX
1452 "Could not register buffer object swapout.\n");
1453 goto out_no_shrink;
1454 }
1455
1456 glob->ttm_bo_extra_size =
1457 ttm_round_pot(sizeof(struct ttm_tt)) +
1458 ttm_round_pot(sizeof(struct ttm_backend));
1459
1460 glob->ttm_bo_size = glob->ttm_bo_extra_size +
1461 ttm_round_pot(sizeof(struct ttm_buffer_object));
1462
1463 atomic_set(&glob->bo_count, 0);
1464
1465 ret = kobject_init_and_add(
1466 &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
1467 if (unlikely(ret != 0))
1468 kobject_put(&glob->kobj);
1469 return ret;
1470 out_no_shrink:
1471 __free_page(glob->dummy_read_page);
1472 out_no_drp:
1473 kfree(glob);
1474 return ret;
1475 }
1476 EXPORT_SYMBOL(ttm_bo_global_init);
1477
1478
ttm_bo_device_release(struct ttm_bo_device * bdev)1479 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1480 {
1481 int ret = 0;
1482 unsigned i = TTM_NUM_MEM_TYPES;
1483 struct ttm_mem_type_manager *man;
1484 struct ttm_bo_global *glob = bdev->glob;
1485
1486 while (i--) {
1487 man = &bdev->man[i];
1488 if (man->has_type) {
1489 man->use_type = false;
1490 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1491 ret = -EBUSY;
1492 printk(KERN_ERR TTM_PFX
1493 "DRM memory manager type %d "
1494 "is not clean.\n", i);
1495 }
1496 man->has_type = false;
1497 }
1498 }
1499
1500 mutex_lock(&glob->device_list_mutex);
1501 list_del(&bdev->device_list);
1502 mutex_unlock(&glob->device_list_mutex);
1503
1504 cancel_delayed_work_sync(&bdev->wq);
1505
1506 while (ttm_bo_delayed_delete(bdev, true))
1507 ;
1508
1509 spin_lock(&glob->lru_lock);
1510 if (list_empty(&bdev->ddestroy))
1511 TTM_DEBUG("Delayed destroy list was clean\n");
1512
1513 if (list_empty(&bdev->man[0].lru))
1514 TTM_DEBUG("Swap list was clean\n");
1515 spin_unlock(&glob->lru_lock);
1516
1517 BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
1518 write_lock(&bdev->vm_lock);
1519 drm_mm_takedown(&bdev->addr_space_mm);
1520 write_unlock(&bdev->vm_lock);
1521
1522 return ret;
1523 }
1524 EXPORT_SYMBOL(ttm_bo_device_release);
1525
ttm_bo_device_init(struct ttm_bo_device * bdev,struct ttm_bo_global * glob,struct ttm_bo_driver * driver,uint64_t file_page_offset,bool need_dma32)1526 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1527 struct ttm_bo_global *glob,
1528 struct ttm_bo_driver *driver,
1529 uint64_t file_page_offset,
1530 bool need_dma32)
1531 {
1532 int ret = -EINVAL;
1533
1534 rwlock_init(&bdev->vm_lock);
1535 bdev->driver = driver;
1536
1537 memset(bdev->man, 0, sizeof(bdev->man));
1538
1539 /*
1540 * Initialize the system memory buffer type.
1541 * Other types need to be driver / IOCTL initialized.
1542 */
1543 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1544 if (unlikely(ret != 0))
1545 goto out_no_sys;
1546
1547 bdev->addr_space_rb = RB_ROOT;
1548 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1549 if (unlikely(ret != 0))
1550 goto out_no_addr_mm;
1551
1552 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
1553 bdev->nice_mode = true;
1554 INIT_LIST_HEAD(&bdev->ddestroy);
1555 bdev->dev_mapping = NULL;
1556 bdev->glob = glob;
1557 bdev->need_dma32 = need_dma32;
1558 bdev->val_seq = 0;
1559 spin_lock_init(&bdev->fence_lock);
1560 mutex_lock(&glob->device_list_mutex);
1561 list_add_tail(&bdev->device_list, &glob->device_list);
1562 mutex_unlock(&glob->device_list_mutex);
1563
1564 return 0;
1565 out_no_addr_mm:
1566 ttm_bo_clean_mm(bdev, 0);
1567 out_no_sys:
1568 return ret;
1569 }
1570 EXPORT_SYMBOL(ttm_bo_device_init);
1571
1572 /*
1573 * buffer object vm functions.
1574 */
1575
ttm_mem_reg_is_pci(struct ttm_bo_device * bdev,struct ttm_mem_reg * mem)1576 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1577 {
1578 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1579
1580 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
1581 if (mem->mem_type == TTM_PL_SYSTEM)
1582 return false;
1583
1584 if (man->flags & TTM_MEMTYPE_FLAG_CMA)
1585 return false;
1586
1587 if (mem->placement & TTM_PL_FLAG_CACHED)
1588 return false;
1589 }
1590 return true;
1591 }
1592
ttm_bo_unmap_virtual_locked(struct ttm_buffer_object * bo)1593 void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
1594 {
1595 struct ttm_bo_device *bdev = bo->bdev;
1596 loff_t offset = (loff_t) bo->addr_space_offset;
1597 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
1598
1599 if (!bdev->dev_mapping)
1600 return;
1601 unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
1602 ttm_mem_io_free_vm(bo);
1603 }
1604
ttm_bo_unmap_virtual(struct ttm_buffer_object * bo)1605 void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
1606 {
1607 struct ttm_bo_device *bdev = bo->bdev;
1608 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1609
1610 ttm_mem_io_lock(man, false);
1611 ttm_bo_unmap_virtual_locked(bo);
1612 ttm_mem_io_unlock(man);
1613 }
1614
1615
1616 EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1617
ttm_bo_vm_insert_rb(struct ttm_buffer_object * bo)1618 static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
1619 {
1620 struct ttm_bo_device *bdev = bo->bdev;
1621 struct rb_node **cur = &bdev->addr_space_rb.rb_node;
1622 struct rb_node *parent = NULL;
1623 struct ttm_buffer_object *cur_bo;
1624 unsigned long offset = bo->vm_node->start;
1625 unsigned long cur_offset;
1626
1627 while (*cur) {
1628 parent = *cur;
1629 cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
1630 cur_offset = cur_bo->vm_node->start;
1631 if (offset < cur_offset)
1632 cur = &parent->rb_left;
1633 else if (offset > cur_offset)
1634 cur = &parent->rb_right;
1635 else
1636 BUG();
1637 }
1638
1639 rb_link_node(&bo->vm_rb, parent, cur);
1640 rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
1641 }
1642
1643 /**
1644 * ttm_bo_setup_vm:
1645 *
1646 * @bo: the buffer to allocate address space for
1647 *
1648 * Allocate address space in the drm device so that applications
1649 * can mmap the buffer and access the contents. This only
1650 * applies to ttm_bo_type_device objects as others are not
1651 * placed in the drm device address space.
1652 */
1653
ttm_bo_setup_vm(struct ttm_buffer_object * bo)1654 static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
1655 {
1656 struct ttm_bo_device *bdev = bo->bdev;
1657 int ret;
1658
1659 retry_pre_get:
1660 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1661 if (unlikely(ret != 0))
1662 return ret;
1663
1664 write_lock(&bdev->vm_lock);
1665 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1666 bo->mem.num_pages, 0, 0);
1667
1668 if (unlikely(bo->vm_node == NULL)) {
1669 ret = -ENOMEM;
1670 goto out_unlock;
1671 }
1672
1673 bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
1674 bo->mem.num_pages, 0);
1675
1676 if (unlikely(bo->vm_node == NULL)) {
1677 write_unlock(&bdev->vm_lock);
1678 goto retry_pre_get;
1679 }
1680
1681 ttm_bo_vm_insert_rb(bo);
1682 write_unlock(&bdev->vm_lock);
1683 bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
1684
1685 return 0;
1686 out_unlock:
1687 write_unlock(&bdev->vm_lock);
1688 return ret;
1689 }
1690
ttm_bo_wait(struct ttm_buffer_object * bo,bool lazy,bool interruptible,bool no_wait)1691 int ttm_bo_wait(struct ttm_buffer_object *bo,
1692 bool lazy, bool interruptible, bool no_wait)
1693 {
1694 struct ttm_bo_driver *driver = bo->bdev->driver;
1695 struct ttm_bo_device *bdev = bo->bdev;
1696 void *sync_obj;
1697 void *sync_obj_arg;
1698 int ret = 0;
1699
1700 if (likely(bo->sync_obj == NULL))
1701 return 0;
1702
1703 while (bo->sync_obj) {
1704
1705 if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
1706 void *tmp_obj = bo->sync_obj;
1707 bo->sync_obj = NULL;
1708 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1709 spin_unlock(&bdev->fence_lock);
1710 driver->sync_obj_unref(&tmp_obj);
1711 spin_lock(&bdev->fence_lock);
1712 continue;
1713 }
1714
1715 if (no_wait)
1716 return -EBUSY;
1717
1718 sync_obj = driver->sync_obj_ref(bo->sync_obj);
1719 sync_obj_arg = bo->sync_obj_arg;
1720 spin_unlock(&bdev->fence_lock);
1721 ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
1722 lazy, interruptible);
1723 if (unlikely(ret != 0)) {
1724 driver->sync_obj_unref(&sync_obj);
1725 spin_lock(&bdev->fence_lock);
1726 return ret;
1727 }
1728 spin_lock(&bdev->fence_lock);
1729 if (likely(bo->sync_obj == sync_obj &&
1730 bo->sync_obj_arg == sync_obj_arg)) {
1731 void *tmp_obj = bo->sync_obj;
1732 bo->sync_obj = NULL;
1733 clear_bit(TTM_BO_PRIV_FLAG_MOVING,
1734 &bo->priv_flags);
1735 spin_unlock(&bdev->fence_lock);
1736 driver->sync_obj_unref(&sync_obj);
1737 driver->sync_obj_unref(&tmp_obj);
1738 spin_lock(&bdev->fence_lock);
1739 } else {
1740 spin_unlock(&bdev->fence_lock);
1741 driver->sync_obj_unref(&sync_obj);
1742 spin_lock(&bdev->fence_lock);
1743 }
1744 }
1745 return 0;
1746 }
1747 EXPORT_SYMBOL(ttm_bo_wait);
1748
ttm_bo_synccpu_write_grab(struct ttm_buffer_object * bo,bool no_wait)1749 int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1750 {
1751 struct ttm_bo_device *bdev = bo->bdev;
1752 int ret = 0;
1753
1754 /*
1755 * Using ttm_bo_reserve makes sure the lru lists are updated.
1756 */
1757
1758 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
1759 if (unlikely(ret != 0))
1760 return ret;
1761 spin_lock(&bdev->fence_lock);
1762 ret = ttm_bo_wait(bo, false, true, no_wait);
1763 spin_unlock(&bdev->fence_lock);
1764 if (likely(ret == 0))
1765 atomic_inc(&bo->cpu_writers);
1766 ttm_bo_unreserve(bo);
1767 return ret;
1768 }
1769 EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
1770
ttm_bo_synccpu_write_release(struct ttm_buffer_object * bo)1771 void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
1772 {
1773 if (atomic_dec_and_test(&bo->cpu_writers))
1774 wake_up_all(&bo->event_queue);
1775 }
1776 EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
1777
1778 /**
1779 * A buffer object shrink method that tries to swap out the first
1780 * buffer object on the bo_global::swap_lru list.
1781 */
1782
ttm_bo_swapout(struct ttm_mem_shrink * shrink)1783 static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1784 {
1785 struct ttm_bo_global *glob =
1786 container_of(shrink, struct ttm_bo_global, shrink);
1787 struct ttm_buffer_object *bo;
1788 int ret = -EBUSY;
1789 int put_count;
1790 uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
1791
1792 spin_lock(&glob->lru_lock);
1793 while (ret == -EBUSY) {
1794 if (unlikely(list_empty(&glob->swap_lru))) {
1795 spin_unlock(&glob->lru_lock);
1796 return -EBUSY;
1797 }
1798
1799 bo = list_first_entry(&glob->swap_lru,
1800 struct ttm_buffer_object, swap);
1801 kref_get(&bo->list_kref);
1802
1803 if (!list_empty(&bo->ddestroy)) {
1804 spin_unlock(&glob->lru_lock);
1805 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1806 kref_put(&bo->list_kref, ttm_bo_release_list);
1807 continue;
1808 }
1809
1810 /**
1811 * Reserve buffer. Since we unlock while sleeping, we need
1812 * to re-check that nobody removed us from the swap-list while
1813 * we slept.
1814 */
1815
1816 ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
1817 if (unlikely(ret == -EBUSY)) {
1818 spin_unlock(&glob->lru_lock);
1819 ttm_bo_wait_unreserved(bo, false);
1820 kref_put(&bo->list_kref, ttm_bo_release_list);
1821 spin_lock(&glob->lru_lock);
1822 }
1823 }
1824
1825 BUG_ON(ret != 0);
1826 put_count = ttm_bo_del_from_lru(bo);
1827 spin_unlock(&glob->lru_lock);
1828
1829 ttm_bo_list_ref_sub(bo, put_count, true);
1830
1831 /**
1832 * Wait for GPU, then move to system cached.
1833 */
1834
1835 spin_lock(&bo->bdev->fence_lock);
1836 ret = ttm_bo_wait(bo, false, false, false);
1837 spin_unlock(&bo->bdev->fence_lock);
1838
1839 if (unlikely(ret != 0))
1840 goto out;
1841
1842 if ((bo->mem.placement & swap_placement) != swap_placement) {
1843 struct ttm_mem_reg evict_mem;
1844
1845 evict_mem = bo->mem;
1846 evict_mem.mm_node = NULL;
1847 evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
1848 evict_mem.mem_type = TTM_PL_SYSTEM;
1849
1850 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
1851 false, false, false);
1852 if (unlikely(ret != 0))
1853 goto out;
1854 }
1855
1856 ttm_bo_unmap_virtual(bo);
1857
1858 /**
1859 * Swap out. Buffer will be swapped in again as soon as
1860 * anyone tries to access a ttm page.
1861 */
1862
1863 if (bo->bdev->driver->swap_notify)
1864 bo->bdev->driver->swap_notify(bo);
1865
1866 ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
1867 out:
1868
1869 /**
1870 *
1871 * Unreserve without putting on LRU to avoid swapping out an
1872 * already swapped buffer.
1873 */
1874
1875 atomic_set(&bo->reserved, 0);
1876 wake_up_all(&bo->event_queue);
1877 kref_put(&bo->list_kref, ttm_bo_release_list);
1878 return ret;
1879 }
1880
ttm_bo_swapout_all(struct ttm_bo_device * bdev)1881 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1882 {
1883 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
1884 ;
1885 }
1886 EXPORT_SYMBOL(ttm_bo_swapout_all);
1887