1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include "drmP.h"
29 #include "radeon_drm.h"
30 #include "radeon.h"
31 #include "radeon_reg.h"
32
33 /*
34 * Common GART table functions.
35 */
radeon_gart_table_ram_alloc(struct radeon_device * rdev)36 int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
37 {
38 void *ptr;
39
40 ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size,
41 &rdev->gart.table_addr);
42 if (ptr == NULL) {
43 return -ENOMEM;
44 }
45 #ifdef CONFIG_X86
46 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
47 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
48 set_memory_uc((unsigned long)ptr,
49 rdev->gart.table_size >> PAGE_SHIFT);
50 }
51 #endif
52 rdev->gart.ptr = ptr;
53 memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
54 return 0;
55 }
56
radeon_gart_table_ram_free(struct radeon_device * rdev)57 void radeon_gart_table_ram_free(struct radeon_device *rdev)
58 {
59 if (rdev->gart.ptr == NULL) {
60 return;
61 }
62 #ifdef CONFIG_X86
63 if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
64 rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
65 set_memory_wb((unsigned long)rdev->gart.ptr,
66 rdev->gart.table_size >> PAGE_SHIFT);
67 }
68 #endif
69 pci_free_consistent(rdev->pdev, rdev->gart.table_size,
70 (void *)rdev->gart.ptr,
71 rdev->gart.table_addr);
72 rdev->gart.ptr = NULL;
73 rdev->gart.table_addr = 0;
74 }
75
radeon_gart_table_vram_alloc(struct radeon_device * rdev)76 int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
77 {
78 int r;
79
80 if (rdev->gart.robj == NULL) {
81 r = radeon_bo_create(rdev, rdev->gart.table_size,
82 PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
83 &rdev->gart.robj);
84 if (r) {
85 return r;
86 }
87 }
88 return 0;
89 }
90
radeon_gart_table_vram_pin(struct radeon_device * rdev)91 int radeon_gart_table_vram_pin(struct radeon_device *rdev)
92 {
93 uint64_t gpu_addr;
94 int r;
95
96 r = radeon_bo_reserve(rdev->gart.robj, false);
97 if (unlikely(r != 0))
98 return r;
99 r = radeon_bo_pin(rdev->gart.robj,
100 RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
101 if (r) {
102 radeon_bo_unreserve(rdev->gart.robj);
103 return r;
104 }
105 r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
106 if (r)
107 radeon_bo_unpin(rdev->gart.robj);
108 radeon_bo_unreserve(rdev->gart.robj);
109 rdev->gart.table_addr = gpu_addr;
110 return r;
111 }
112
radeon_gart_table_vram_unpin(struct radeon_device * rdev)113 void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
114 {
115 int r;
116
117 if (rdev->gart.robj == NULL) {
118 return;
119 }
120 r = radeon_bo_reserve(rdev->gart.robj, false);
121 if (likely(r == 0)) {
122 radeon_bo_kunmap(rdev->gart.robj);
123 radeon_bo_unpin(rdev->gart.robj);
124 radeon_bo_unreserve(rdev->gart.robj);
125 rdev->gart.ptr = NULL;
126 }
127 }
128
radeon_gart_table_vram_free(struct radeon_device * rdev)129 void radeon_gart_table_vram_free(struct radeon_device *rdev)
130 {
131 if (rdev->gart.robj == NULL) {
132 return;
133 }
134 radeon_gart_table_vram_unpin(rdev);
135 radeon_bo_unref(&rdev->gart.robj);
136 }
137
138
139
140
141 /*
142 * Common gart functions.
143 */
radeon_gart_unbind(struct radeon_device * rdev,unsigned offset,int pages)144 void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
145 int pages)
146 {
147 unsigned t;
148 unsigned p;
149 int i, j;
150 u64 page_base;
151
152 if (!rdev->gart.ready) {
153 WARN(1, "trying to unbind memory from uninitialized GART !\n");
154 return;
155 }
156 t = offset / RADEON_GPU_PAGE_SIZE;
157 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
158 for (i = 0; i < pages; i++, p++) {
159 if (rdev->gart.pages[p]) {
160 rdev->gart.pages[p] = NULL;
161 rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
162 page_base = rdev->gart.pages_addr[p];
163 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
164 if (rdev->gart.ptr) {
165 radeon_gart_set_page(rdev, t, page_base);
166 }
167 page_base += RADEON_GPU_PAGE_SIZE;
168 }
169 }
170 }
171 mb();
172 radeon_gart_tlb_flush(rdev);
173 }
174
radeon_gart_bind(struct radeon_device * rdev,unsigned offset,int pages,struct page ** pagelist,dma_addr_t * dma_addr)175 int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
176 int pages, struct page **pagelist, dma_addr_t *dma_addr)
177 {
178 unsigned t;
179 unsigned p;
180 uint64_t page_base;
181 int i, j;
182
183 if (!rdev->gart.ready) {
184 WARN(1, "trying to bind memory to uninitialized GART !\n");
185 return -EINVAL;
186 }
187 t = offset / RADEON_GPU_PAGE_SIZE;
188 p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
189
190 for (i = 0; i < pages; i++, p++) {
191 rdev->gart.pages_addr[p] = dma_addr[i];
192 rdev->gart.pages[p] = pagelist[i];
193 if (rdev->gart.ptr) {
194 page_base = rdev->gart.pages_addr[p];
195 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
196 radeon_gart_set_page(rdev, t, page_base);
197 page_base += RADEON_GPU_PAGE_SIZE;
198 }
199 }
200 }
201 mb();
202 radeon_gart_tlb_flush(rdev);
203 return 0;
204 }
205
radeon_gart_restore(struct radeon_device * rdev)206 void radeon_gart_restore(struct radeon_device *rdev)
207 {
208 int i, j, t;
209 u64 page_base;
210
211 if (!rdev->gart.ptr) {
212 return;
213 }
214 for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
215 page_base = rdev->gart.pages_addr[i];
216 for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
217 radeon_gart_set_page(rdev, t, page_base);
218 page_base += RADEON_GPU_PAGE_SIZE;
219 }
220 }
221 mb();
222 radeon_gart_tlb_flush(rdev);
223 }
224
radeon_gart_init(struct radeon_device * rdev)225 int radeon_gart_init(struct radeon_device *rdev)
226 {
227 int r, i;
228
229 if (rdev->gart.pages) {
230 return 0;
231 }
232 /* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
233 if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
234 DRM_ERROR("Page size is smaller than GPU page size!\n");
235 return -EINVAL;
236 }
237 r = radeon_dummy_page_init(rdev);
238 if (r)
239 return r;
240 /* Compute table size */
241 rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
242 rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
243 DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
244 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
245 /* Allocate pages table */
246 rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
247 GFP_KERNEL);
248 if (rdev->gart.pages == NULL) {
249 radeon_gart_fini(rdev);
250 return -ENOMEM;
251 }
252 rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
253 rdev->gart.num_cpu_pages, GFP_KERNEL);
254 if (rdev->gart.pages_addr == NULL) {
255 radeon_gart_fini(rdev);
256 return -ENOMEM;
257 }
258 /* set GART entry to point to the dummy page by default */
259 for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
260 rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
261 }
262 return 0;
263 }
264
radeon_gart_fini(struct radeon_device * rdev)265 void radeon_gart_fini(struct radeon_device *rdev)
266 {
267 if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
268 /* unbind pages */
269 radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
270 }
271 rdev->gart.ready = false;
272 kfree(rdev->gart.pages);
273 kfree(rdev->gart.pages_addr);
274 rdev->gart.pages = NULL;
275 rdev->gart.pages_addr = NULL;
276
277 radeon_dummy_page_fini(rdev);
278 }
279
280 /*
281 * vm helpers
282 *
283 * TODO bind a default page at vm initialization for default address
284 */
radeon_vm_manager_init(struct radeon_device * rdev)285 int radeon_vm_manager_init(struct radeon_device *rdev)
286 {
287 int r;
288
289 rdev->vm_manager.enabled = false;
290
291 /* mark first vm as always in use, it's the system one */
292 /* allocate enough for 2 full VM pts */
293 r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
294 rdev->vm_manager.max_pfn * 8 * 2,
295 RADEON_GEM_DOMAIN_VRAM);
296 if (r) {
297 dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
298 (rdev->vm_manager.max_pfn * 8) >> 10);
299 return r;
300 }
301
302 r = rdev->vm_manager.funcs->init(rdev);
303 if (r == 0)
304 rdev->vm_manager.enabled = true;
305
306 return r;
307 }
308
309 /* cs mutex must be lock */
radeon_vm_unbind_locked(struct radeon_device * rdev,struct radeon_vm * vm)310 static void radeon_vm_unbind_locked(struct radeon_device *rdev,
311 struct radeon_vm *vm)
312 {
313 struct radeon_bo_va *bo_va;
314
315 if (vm->id == -1) {
316 return;
317 }
318
319 /* wait for vm use to end */
320 if (vm->fence) {
321 radeon_fence_wait(vm->fence, false);
322 radeon_fence_unref(&vm->fence);
323 }
324
325 /* hw unbind */
326 rdev->vm_manager.funcs->unbind(rdev, vm);
327 rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
328 list_del_init(&vm->list);
329 vm->id = -1;
330 radeon_sa_bo_free(rdev, &vm->sa_bo);
331 vm->pt = NULL;
332
333 list_for_each_entry(bo_va, &vm->va, vm_list) {
334 bo_va->valid = false;
335 }
336 }
337
radeon_vm_manager_fini(struct radeon_device * rdev)338 void radeon_vm_manager_fini(struct radeon_device *rdev)
339 {
340 if (rdev->vm_manager.sa_manager.bo == NULL)
341 return;
342 radeon_vm_manager_suspend(rdev);
343 rdev->vm_manager.funcs->fini(rdev);
344 radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
345 rdev->vm_manager.enabled = false;
346 }
347
radeon_vm_manager_start(struct radeon_device * rdev)348 int radeon_vm_manager_start(struct radeon_device *rdev)
349 {
350 if (rdev->vm_manager.sa_manager.bo == NULL) {
351 return -EINVAL;
352 }
353 return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
354 }
355
radeon_vm_manager_suspend(struct radeon_device * rdev)356 int radeon_vm_manager_suspend(struct radeon_device *rdev)
357 {
358 struct radeon_vm *vm, *tmp;
359
360 radeon_mutex_lock(&rdev->cs_mutex);
361 /* unbind all active vm */
362 list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
363 radeon_vm_unbind_locked(rdev, vm);
364 }
365 rdev->vm_manager.funcs->fini(rdev);
366 radeon_mutex_unlock(&rdev->cs_mutex);
367 return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
368 }
369
370 /* cs mutex must be lock */
radeon_vm_unbind(struct radeon_device * rdev,struct radeon_vm * vm)371 void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
372 {
373 mutex_lock(&vm->mutex);
374 radeon_vm_unbind_locked(rdev, vm);
375 mutex_unlock(&vm->mutex);
376 }
377
378 /* cs mutex must be lock & vm mutex must be lock */
radeon_vm_bind(struct radeon_device * rdev,struct radeon_vm * vm)379 int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
380 {
381 struct radeon_vm *vm_evict;
382 unsigned i;
383 int id = -1, r;
384
385 if (vm == NULL) {
386 return -EINVAL;
387 }
388
389 if (vm->id != -1) {
390 /* update lru */
391 list_del_init(&vm->list);
392 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
393 return 0;
394 }
395
396 retry:
397 r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
398 RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
399 RADEON_GPU_PAGE_SIZE);
400 if (r) {
401 if (list_empty(&rdev->vm_manager.lru_vm)) {
402 return r;
403 }
404 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
405 radeon_vm_unbind(rdev, vm_evict);
406 goto retry;
407 }
408 vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
409 vm->pt += (vm->sa_bo.offset >> 3);
410 vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
411 vm->pt_gpu_addr += vm->sa_bo.offset;
412 memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
413
414 retry_id:
415 /* search for free vm */
416 for (i = 0; i < rdev->vm_manager.nvm; i++) {
417 if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
418 id = i;
419 break;
420 }
421 }
422 /* evict vm if necessary */
423 if (id == -1) {
424 vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
425 radeon_vm_unbind(rdev, vm_evict);
426 goto retry_id;
427 }
428
429 /* do hw bind */
430 r = rdev->vm_manager.funcs->bind(rdev, vm, id);
431 if (r) {
432 radeon_sa_bo_free(rdev, &vm->sa_bo);
433 return r;
434 }
435 rdev->vm_manager.use_bitmap |= 1 << id;
436 vm->id = id;
437 list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
438 return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
439 &rdev->ib_pool.sa_manager.bo->tbo.mem);
440 }
441
442 /* object have to be reserved */
radeon_vm_bo_add(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_bo * bo,uint64_t offset,uint32_t flags)443 int radeon_vm_bo_add(struct radeon_device *rdev,
444 struct radeon_vm *vm,
445 struct radeon_bo *bo,
446 uint64_t offset,
447 uint32_t flags)
448 {
449 struct radeon_bo_va *bo_va, *tmp;
450 struct list_head *head;
451 uint64_t size = radeon_bo_size(bo), last_offset = 0;
452 unsigned last_pfn;
453
454 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
455 if (bo_va == NULL) {
456 return -ENOMEM;
457 }
458 bo_va->vm = vm;
459 bo_va->bo = bo;
460 bo_va->soffset = offset;
461 bo_va->eoffset = offset + size;
462 bo_va->flags = flags;
463 bo_va->valid = false;
464 INIT_LIST_HEAD(&bo_va->bo_list);
465 INIT_LIST_HEAD(&bo_va->vm_list);
466 /* make sure object fit at this offset */
467 if (bo_va->soffset >= bo_va->eoffset) {
468 kfree(bo_va);
469 return -EINVAL;
470 }
471
472 last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
473 if (last_pfn > rdev->vm_manager.max_pfn) {
474 kfree(bo_va);
475 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
476 last_pfn, rdev->vm_manager.max_pfn);
477 return -EINVAL;
478 }
479
480 mutex_lock(&vm->mutex);
481 if (last_pfn > vm->last_pfn) {
482 /* release mutex and lock in right order */
483 mutex_unlock(&vm->mutex);
484 radeon_mutex_lock(&rdev->cs_mutex);
485 mutex_lock(&vm->mutex);
486 /* and check again */
487 if (last_pfn > vm->last_pfn) {
488 /* grow va space 32M by 32M */
489 unsigned align = ((32 << 20) >> 12) - 1;
490 radeon_vm_unbind_locked(rdev, vm);
491 vm->last_pfn = (last_pfn + align) & ~align;
492 }
493 radeon_mutex_unlock(&rdev->cs_mutex);
494 }
495 head = &vm->va;
496 last_offset = 0;
497 list_for_each_entry(tmp, &vm->va, vm_list) {
498 if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
499 /* bo can be added before this one */
500 break;
501 }
502 if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
503 /* bo and tmp overlap, invalid offset */
504 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
505 bo, (unsigned)bo_va->soffset, tmp->bo,
506 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
507 kfree(bo_va);
508 mutex_unlock(&vm->mutex);
509 return -EINVAL;
510 }
511 last_offset = tmp->eoffset;
512 head = &tmp->vm_list;
513 }
514 list_add(&bo_va->vm_list, head);
515 list_add_tail(&bo_va->bo_list, &bo->va);
516 mutex_unlock(&vm->mutex);
517 return 0;
518 }
519
radeon_vm_get_addr(struct radeon_device * rdev,struct ttm_mem_reg * mem,unsigned pfn)520 static u64 radeon_vm_get_addr(struct radeon_device *rdev,
521 struct ttm_mem_reg *mem,
522 unsigned pfn)
523 {
524 u64 addr = 0;
525
526 switch (mem->mem_type) {
527 case TTM_PL_VRAM:
528 addr = (mem->start << PAGE_SHIFT);
529 addr += pfn * RADEON_GPU_PAGE_SIZE;
530 addr += rdev->vm_manager.vram_base_offset;
531 break;
532 case TTM_PL_TT:
533 /* offset inside page table */
534 addr = mem->start << PAGE_SHIFT;
535 addr += pfn * RADEON_GPU_PAGE_SIZE;
536 addr = addr >> PAGE_SHIFT;
537 /* page table offset */
538 addr = rdev->gart.pages_addr[addr];
539 /* in case cpu page size != gpu page size*/
540 addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
541 break;
542 default:
543 break;
544 }
545 return addr;
546 }
547
548 /* object have to be reserved & cs mutex took & vm mutex took */
radeon_vm_bo_update_pte(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_bo * bo,struct ttm_mem_reg * mem)549 int radeon_vm_bo_update_pte(struct radeon_device *rdev,
550 struct radeon_vm *vm,
551 struct radeon_bo *bo,
552 struct ttm_mem_reg *mem)
553 {
554 struct radeon_bo_va *bo_va;
555 unsigned ngpu_pages, i;
556 uint64_t addr = 0, pfn;
557 uint32_t flags;
558
559 /* nothing to do if vm isn't bound */
560 if (vm->id == -1)
561 return 0;;
562
563 bo_va = radeon_bo_va(bo, vm);
564 if (bo_va == NULL) {
565 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
566 return -EINVAL;
567 }
568
569 if (bo_va->valid)
570 return 0;
571
572 ngpu_pages = radeon_bo_ngpu_pages(bo);
573 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
574 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
575 if (mem) {
576 if (mem->mem_type != TTM_PL_SYSTEM) {
577 bo_va->flags |= RADEON_VM_PAGE_VALID;
578 bo_va->valid = true;
579 }
580 if (mem->mem_type == TTM_PL_TT) {
581 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
582 }
583 }
584 pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
585 flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
586 for (i = 0, addr = 0; i < ngpu_pages; i++) {
587 if (mem && bo_va->valid) {
588 addr = radeon_vm_get_addr(rdev, mem, i);
589 }
590 rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
591 }
592 rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
593 return 0;
594 }
595
596 /* object have to be reserved */
radeon_vm_bo_rmv(struct radeon_device * rdev,struct radeon_vm * vm,struct radeon_bo * bo)597 int radeon_vm_bo_rmv(struct radeon_device *rdev,
598 struct radeon_vm *vm,
599 struct radeon_bo *bo)
600 {
601 struct radeon_bo_va *bo_va;
602
603 bo_va = radeon_bo_va(bo, vm);
604 if (bo_va == NULL)
605 return 0;
606
607 radeon_mutex_lock(&rdev->cs_mutex);
608 mutex_lock(&vm->mutex);
609 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
610 radeon_mutex_unlock(&rdev->cs_mutex);
611 list_del(&bo_va->vm_list);
612 mutex_unlock(&vm->mutex);
613 list_del(&bo_va->bo_list);
614
615 kfree(bo_va);
616 return 0;
617 }
618
radeon_vm_bo_invalidate(struct radeon_device * rdev,struct radeon_bo * bo)619 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
620 struct radeon_bo *bo)
621 {
622 struct radeon_bo_va *bo_va;
623
624 BUG_ON(!atomic_read(&bo->tbo.reserved));
625 list_for_each_entry(bo_va, &bo->va, bo_list) {
626 bo_va->valid = false;
627 }
628 }
629
radeon_vm_init(struct radeon_device * rdev,struct radeon_vm * vm)630 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
631 {
632 int r;
633
634 vm->id = -1;
635 vm->fence = NULL;
636 mutex_init(&vm->mutex);
637 INIT_LIST_HEAD(&vm->list);
638 INIT_LIST_HEAD(&vm->va);
639 /* SI requires equal sized PTs for all VMs, so always set
640 * last_pfn to max_pfn. cayman allows variable sized
641 * pts so we can grow then as needed. Once we switch
642 * to two level pts we can unify this again.
643 */
644 if (rdev->family >= CHIP_TAHITI)
645 vm->last_pfn = rdev->vm_manager.max_pfn;
646 else
647 vm->last_pfn = 0;
648 /* map the ib pool buffer at 0 in virtual address space, set
649 * read only
650 */
651 r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
652 RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
653 return r;
654 }
655
radeon_vm_fini(struct radeon_device * rdev,struct radeon_vm * vm)656 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
657 {
658 struct radeon_bo_va *bo_va, *tmp;
659 int r;
660
661 radeon_mutex_lock(&rdev->cs_mutex);
662 mutex_lock(&vm->mutex);
663 radeon_vm_unbind_locked(rdev, vm);
664 radeon_mutex_unlock(&rdev->cs_mutex);
665
666 /* remove all bo */
667 r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
668 if (!r) {
669 bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
670 list_del_init(&bo_va->bo_list);
671 list_del_init(&bo_va->vm_list);
672 radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
673 kfree(bo_va);
674 }
675 if (!list_empty(&vm->va)) {
676 dev_err(rdev->dev, "still active bo inside vm\n");
677 }
678 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
679 list_del_init(&bo_va->vm_list);
680 r = radeon_bo_reserve(bo_va->bo, false);
681 if (!r) {
682 list_del_init(&bo_va->bo_list);
683 radeon_bo_unreserve(bo_va->bo);
684 kfree(bo_va);
685 }
686 }
687 mutex_unlock(&vm->mutex);
688 }
689