1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28
29 #include <linux/iosys-map.h>
30 #include <linux/pci.h>
31
32 #include <drm/drm_device.h>
33 #include <drm/drm_file.h>
34 #include <drm/drm_gem_ttm_helper.h>
35 #include <drm/radeon_drm.h>
36
37 #include "radeon.h"
38 #include "radeon_prime.h"
39
40 struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
41 int flags);
42 struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
43 int radeon_gem_prime_pin(struct drm_gem_object *obj);
44 void radeon_gem_prime_unpin(struct drm_gem_object *obj);
45
46 const struct drm_gem_object_funcs radeon_gem_object_funcs;
47
radeon_gem_fault(struct vm_fault * vmf)48 static vm_fault_t radeon_gem_fault(struct vm_fault *vmf)
49 {
50 struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
51 struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
52 vm_fault_t ret;
53
54 down_read(&rdev->pm.mclk_lock);
55
56 ret = ttm_bo_vm_reserve(bo, vmf);
57 if (ret)
58 goto unlock_mclk;
59
60 ret = radeon_bo_fault_reserve_notify(bo);
61 if (ret)
62 goto unlock_resv;
63
64 ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
65 TTM_BO_VM_NUM_PREFAULT);
66 if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
67 goto unlock_mclk;
68
69 unlock_resv:
70 dma_resv_unlock(bo->base.resv);
71
72 unlock_mclk:
73 up_read(&rdev->pm.mclk_lock);
74 return ret;
75 }
76
77 static const struct vm_operations_struct radeon_gem_vm_ops = {
78 .fault = radeon_gem_fault,
79 .open = ttm_bo_vm_open,
80 .close = ttm_bo_vm_close,
81 .access = ttm_bo_vm_access
82 };
83
radeon_gem_object_free(struct drm_gem_object * gobj)84 static void radeon_gem_object_free(struct drm_gem_object *gobj)
85 {
86 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
87
88 if (robj) {
89 radeon_mn_unregister(robj);
90 radeon_bo_unref(&robj);
91 }
92 }
93
radeon_gem_object_create(struct radeon_device * rdev,unsigned long size,int alignment,int initial_domain,u32 flags,bool kernel,struct drm_gem_object ** obj)94 int radeon_gem_object_create(struct radeon_device *rdev, unsigned long size,
95 int alignment, int initial_domain,
96 u32 flags, bool kernel,
97 struct drm_gem_object **obj)
98 {
99 struct radeon_bo *robj;
100 unsigned long max_size;
101 int r;
102
103 *obj = NULL;
104 /* At least align on page size */
105 if (alignment < PAGE_SIZE) {
106 alignment = PAGE_SIZE;
107 }
108
109 /* Maximum bo size is the unpinned gtt size since we use the gtt to
110 * handle vram to system pool migrations.
111 */
112 max_size = rdev->mc.gtt_size - rdev->gart_pin_size;
113 if (size > max_size) {
114 DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
115 size >> 20, max_size >> 20);
116 return -ENOMEM;
117 }
118
119 retry:
120 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
121 flags, NULL, NULL, &robj);
122 if (r) {
123 if (r != -ERESTARTSYS) {
124 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
125 initial_domain |= RADEON_GEM_DOMAIN_GTT;
126 goto retry;
127 }
128 DRM_ERROR("Failed to allocate GEM object (%ld, %d, %u, %d)\n",
129 size, initial_domain, alignment, r);
130 }
131 return r;
132 }
133 *obj = &robj->tbo.base;
134 (*obj)->funcs = &radeon_gem_object_funcs;
135 robj->pid = task_pid_nr(current);
136
137 mutex_lock(&rdev->gem.mutex);
138 list_add_tail(&robj->list, &rdev->gem.objects);
139 mutex_unlock(&rdev->gem.mutex);
140
141 return 0;
142 }
143
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)144 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
145 uint32_t rdomain, uint32_t wdomain)
146 {
147 struct radeon_bo *robj;
148 uint32_t domain;
149 long r;
150
151 /* FIXME: reeimplement */
152 robj = gem_to_radeon_bo(gobj);
153 /* work out where to validate the buffer to */
154 domain = wdomain;
155 if (!domain) {
156 domain = rdomain;
157 }
158 if (!domain) {
159 /* Do nothings */
160 pr_warn("Set domain without domain !\n");
161 return 0;
162 }
163 if (domain == RADEON_GEM_DOMAIN_CPU) {
164 /* Asking for cpu access wait for object idle */
165 r = dma_resv_wait_timeout(robj->tbo.base.resv,
166 DMA_RESV_USAGE_BOOKKEEP,
167 true, 30 * HZ);
168 if (!r)
169 r = -EBUSY;
170
171 if (r < 0 && r != -EINTR) {
172 pr_err("Failed to wait for object: %li\n", r);
173 return r;
174 }
175 }
176 if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
177 /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
178 return -EINVAL;
179 }
180 return 0;
181 }
182
radeon_gem_init(struct radeon_device * rdev)183 int radeon_gem_init(struct radeon_device *rdev)
184 {
185 INIT_LIST_HEAD(&rdev->gem.objects);
186 return 0;
187 }
188
radeon_gem_fini(struct radeon_device * rdev)189 void radeon_gem_fini(struct radeon_device *rdev)
190 {
191 radeon_bo_force_delete(rdev);
192 }
193
194 /*
195 * Call from drm_gem_handle_create which appear in both new and open ioctl
196 * case.
197 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)198 static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
199 {
200 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
201 struct radeon_device *rdev = rbo->rdev;
202 struct radeon_fpriv *fpriv = file_priv->driver_priv;
203 struct radeon_vm *vm = &fpriv->vm;
204 struct radeon_bo_va *bo_va;
205 int r;
206
207 if ((rdev->family < CHIP_CAYMAN) ||
208 (!rdev->accel_working)) {
209 return 0;
210 }
211
212 r = radeon_bo_reserve(rbo, false);
213 if (r) {
214 return r;
215 }
216
217 bo_va = radeon_vm_bo_find(vm, rbo);
218 if (!bo_va) {
219 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
220 } else {
221 ++bo_va->ref_count;
222 }
223 radeon_bo_unreserve(rbo);
224
225 return 0;
226 }
227
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)228 static void radeon_gem_object_close(struct drm_gem_object *obj,
229 struct drm_file *file_priv)
230 {
231 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
232 struct radeon_device *rdev = rbo->rdev;
233 struct radeon_fpriv *fpriv = file_priv->driver_priv;
234 struct radeon_vm *vm = &fpriv->vm;
235 struct radeon_bo_va *bo_va;
236 int r;
237
238 if ((rdev->family < CHIP_CAYMAN) ||
239 (!rdev->accel_working)) {
240 return;
241 }
242
243 r = radeon_bo_reserve(rbo, true);
244 if (r) {
245 dev_err(rdev->dev, "leaking bo va because "
246 "we fail to reserve bo (%d)\n", r);
247 return;
248 }
249 bo_va = radeon_vm_bo_find(vm, rbo);
250 if (bo_va) {
251 if (--bo_va->ref_count == 0) {
252 radeon_vm_bo_rmv(rdev, bo_va);
253 }
254 }
255 radeon_bo_unreserve(rbo);
256 }
257
radeon_gem_handle_lockup(struct radeon_device * rdev,int r)258 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
259 {
260 if (r == -EDEADLK) {
261 r = radeon_gpu_reset(rdev);
262 if (!r)
263 r = -EAGAIN;
264 }
265 return r;
266 }
267
radeon_gem_object_mmap(struct drm_gem_object * obj,struct vm_area_struct * vma)268 static int radeon_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
269 {
270 struct radeon_bo *bo = gem_to_radeon_bo(obj);
271 struct radeon_device *rdev = radeon_get_rdev(bo->tbo.bdev);
272
273 if (radeon_ttm_tt_has_userptr(rdev, bo->tbo.ttm))
274 return -EPERM;
275
276 return drm_gem_ttm_mmap(obj, vma);
277 }
278
279 const struct drm_gem_object_funcs radeon_gem_object_funcs = {
280 .free = radeon_gem_object_free,
281 .open = radeon_gem_object_open,
282 .close = radeon_gem_object_close,
283 .export = radeon_gem_prime_export,
284 .pin = radeon_gem_prime_pin,
285 .unpin = radeon_gem_prime_unpin,
286 .get_sg_table = radeon_gem_prime_get_sg_table,
287 .vmap = drm_gem_ttm_vmap,
288 .vunmap = drm_gem_ttm_vunmap,
289 .mmap = radeon_gem_object_mmap,
290 .vm_ops = &radeon_gem_vm_ops,
291 };
292
293 /*
294 * GEM ioctls.
295 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)296 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
297 struct drm_file *filp)
298 {
299 struct radeon_device *rdev = dev->dev_private;
300 struct drm_radeon_gem_info *args = data;
301 struct ttm_resource_manager *man;
302
303 man = ttm_manager_type(&rdev->mman.bdev, TTM_PL_VRAM);
304
305 args->vram_size = (u64)man->size << PAGE_SHIFT;
306 args->vram_visible = rdev->mc.visible_vram_size;
307 args->vram_visible -= rdev->vram_pin_size;
308 args->gart_size = rdev->mc.gtt_size;
309 args->gart_size -= rdev->gart_pin_size;
310
311 return 0;
312 }
313
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)314 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
315 struct drm_file *filp)
316 {
317 /* TODO: implement */
318 DRM_ERROR("unimplemented %s\n", __func__);
319 return -ENOSYS;
320 }
321
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)322 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
323 struct drm_file *filp)
324 {
325 /* TODO: implement */
326 DRM_ERROR("unimplemented %s\n", __func__);
327 return -ENOSYS;
328 }
329
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)330 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *filp)
332 {
333 struct radeon_device *rdev = dev->dev_private;
334 struct drm_radeon_gem_create *args = data;
335 struct drm_gem_object *gobj;
336 uint32_t handle;
337 int r;
338
339 down_read(&rdev->exclusive_lock);
340 /* create a gem object to contain this object in */
341 args->size = roundup(args->size, PAGE_SIZE);
342 r = radeon_gem_object_create(rdev, args->size, args->alignment,
343 args->initial_domain, args->flags,
344 false, &gobj);
345 if (r) {
346 up_read(&rdev->exclusive_lock);
347 r = radeon_gem_handle_lockup(rdev, r);
348 return r;
349 }
350 r = drm_gem_handle_create(filp, gobj, &handle);
351 /* drop reference from allocate - handle holds it now */
352 drm_gem_object_put(gobj);
353 if (r) {
354 up_read(&rdev->exclusive_lock);
355 r = radeon_gem_handle_lockup(rdev, r);
356 return r;
357 }
358 args->handle = handle;
359 up_read(&rdev->exclusive_lock);
360 return 0;
361 }
362
radeon_gem_userptr_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)363 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
364 struct drm_file *filp)
365 {
366 struct ttm_operation_ctx ctx = { true, false };
367 struct radeon_device *rdev = dev->dev_private;
368 struct drm_radeon_gem_userptr *args = data;
369 struct drm_gem_object *gobj;
370 struct radeon_bo *bo;
371 uint32_t handle;
372 int r;
373
374 args->addr = untagged_addr(args->addr);
375
376 if (offset_in_page(args->addr | args->size))
377 return -EINVAL;
378
379 /* reject unknown flag values */
380 if (args->flags & ~(RADEON_GEM_USERPTR_READONLY |
381 RADEON_GEM_USERPTR_ANONONLY | RADEON_GEM_USERPTR_VALIDATE |
382 RADEON_GEM_USERPTR_REGISTER))
383 return -EINVAL;
384
385 if (args->flags & RADEON_GEM_USERPTR_READONLY) {
386 /* readonly pages not tested on older hardware */
387 if (rdev->family < CHIP_R600)
388 return -EINVAL;
389
390 } else if (!(args->flags & RADEON_GEM_USERPTR_ANONONLY) ||
391 !(args->flags & RADEON_GEM_USERPTR_REGISTER)) {
392
393 /* if we want to write to it we must require anonymous
394 memory and install a MMU notifier */
395 return -EACCES;
396 }
397
398 down_read(&rdev->exclusive_lock);
399
400 /* create a gem object to contain this object in */
401 r = radeon_gem_object_create(rdev, args->size, 0,
402 RADEON_GEM_DOMAIN_CPU, 0,
403 false, &gobj);
404 if (r)
405 goto handle_lockup;
406
407 bo = gem_to_radeon_bo(gobj);
408 r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags);
409 if (r)
410 goto release_object;
411
412 if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
413 r = radeon_mn_register(bo, args->addr);
414 if (r)
415 goto release_object;
416 }
417
418 if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
419 mmap_read_lock(current->mm);
420 r = radeon_bo_reserve(bo, true);
421 if (r) {
422 mmap_read_unlock(current->mm);
423 goto release_object;
424 }
425
426 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
427 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
428 radeon_bo_unreserve(bo);
429 mmap_read_unlock(current->mm);
430 if (r)
431 goto release_object;
432 }
433
434 r = drm_gem_handle_create(filp, gobj, &handle);
435 /* drop reference from allocate - handle holds it now */
436 drm_gem_object_put(gobj);
437 if (r)
438 goto handle_lockup;
439
440 args->handle = handle;
441 up_read(&rdev->exclusive_lock);
442 return 0;
443
444 release_object:
445 drm_gem_object_put(gobj);
446
447 handle_lockup:
448 up_read(&rdev->exclusive_lock);
449 r = radeon_gem_handle_lockup(rdev, r);
450
451 return r;
452 }
453
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)454 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
455 struct drm_file *filp)
456 {
457 /* transition the BO to a domain -
458 * just validate the BO into a certain domain */
459 struct radeon_device *rdev = dev->dev_private;
460 struct drm_radeon_gem_set_domain *args = data;
461 struct drm_gem_object *gobj;
462 struct radeon_bo *robj;
463 int r;
464
465 /* for now if someone requests domain CPU -
466 * just make sure the buffer is finished with */
467 down_read(&rdev->exclusive_lock);
468
469 /* just do a BO wait for now */
470 gobj = drm_gem_object_lookup(filp, args->handle);
471 if (gobj == NULL) {
472 up_read(&rdev->exclusive_lock);
473 return -ENOENT;
474 }
475 robj = gem_to_radeon_bo(gobj);
476
477 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
478
479 drm_gem_object_put(gobj);
480 up_read(&rdev->exclusive_lock);
481 r = radeon_gem_handle_lockup(robj->rdev, r);
482 return r;
483 }
484
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)485 int radeon_mode_dumb_mmap(struct drm_file *filp,
486 struct drm_device *dev,
487 uint32_t handle, uint64_t *offset_p)
488 {
489 struct drm_gem_object *gobj;
490 struct radeon_bo *robj;
491
492 gobj = drm_gem_object_lookup(filp, handle);
493 if (gobj == NULL) {
494 return -ENOENT;
495 }
496 robj = gem_to_radeon_bo(gobj);
497 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm)) {
498 drm_gem_object_put(gobj);
499 return -EPERM;
500 }
501 *offset_p = radeon_bo_mmap_offset(robj);
502 drm_gem_object_put(gobj);
503 return 0;
504 }
505
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)506 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
507 struct drm_file *filp)
508 {
509 struct drm_radeon_gem_mmap *args = data;
510
511 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
512 }
513
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)514 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
515 struct drm_file *filp)
516 {
517 struct drm_radeon_gem_busy *args = data;
518 struct drm_gem_object *gobj;
519 struct radeon_bo *robj;
520 int r;
521 uint32_t cur_placement = 0;
522
523 gobj = drm_gem_object_lookup(filp, args->handle);
524 if (gobj == NULL) {
525 return -ENOENT;
526 }
527 robj = gem_to_radeon_bo(gobj);
528
529 r = dma_resv_test_signaled(robj->tbo.base.resv, DMA_RESV_USAGE_READ);
530 if (r == 0)
531 r = -EBUSY;
532 else
533 r = 0;
534
535 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
536 args->domain = radeon_mem_type_to_domain(cur_placement);
537 drm_gem_object_put(gobj);
538 return r;
539 }
540
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)541 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
542 struct drm_file *filp)
543 {
544 struct radeon_device *rdev = dev->dev_private;
545 struct drm_radeon_gem_wait_idle *args = data;
546 struct drm_gem_object *gobj;
547 struct radeon_bo *robj;
548 int r = 0;
549 uint32_t cur_placement = 0;
550 long ret;
551
552 gobj = drm_gem_object_lookup(filp, args->handle);
553 if (gobj == NULL) {
554 return -ENOENT;
555 }
556 robj = gem_to_radeon_bo(gobj);
557
558 ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
559 true, 30 * HZ);
560 if (ret == 0)
561 r = -EBUSY;
562 else if (ret < 0)
563 r = ret;
564
565 /* Flush HDP cache via MMIO if necessary */
566 cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
567 if (rdev->asic->mmio_hdp_flush &&
568 radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
569 robj->rdev->asic->mmio_hdp_flush(rdev);
570 drm_gem_object_put(gobj);
571 r = radeon_gem_handle_lockup(rdev, r);
572 return r;
573 }
574
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)575 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
576 struct drm_file *filp)
577 {
578 struct drm_radeon_gem_set_tiling *args = data;
579 struct drm_gem_object *gobj;
580 struct radeon_bo *robj;
581 int r = 0;
582
583 DRM_DEBUG("%d \n", args->handle);
584 gobj = drm_gem_object_lookup(filp, args->handle);
585 if (gobj == NULL)
586 return -ENOENT;
587 robj = gem_to_radeon_bo(gobj);
588 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
589 drm_gem_object_put(gobj);
590 return r;
591 }
592
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)593 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
594 struct drm_file *filp)
595 {
596 struct drm_radeon_gem_get_tiling *args = data;
597 struct drm_gem_object *gobj;
598 struct radeon_bo *rbo;
599 int r = 0;
600
601 DRM_DEBUG("\n");
602 gobj = drm_gem_object_lookup(filp, args->handle);
603 if (gobj == NULL)
604 return -ENOENT;
605 rbo = gem_to_radeon_bo(gobj);
606 r = radeon_bo_reserve(rbo, false);
607 if (unlikely(r != 0))
608 goto out;
609 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
610 radeon_bo_unreserve(rbo);
611 out:
612 drm_gem_object_put(gobj);
613 return r;
614 }
615
616 /**
617 * radeon_gem_va_update_vm -update the bo_va in its VM
618 *
619 * @rdev: radeon_device pointer
620 * @bo_va: bo_va to update
621 *
622 * Update the bo_va directly after setting it's address. Errors are not
623 * vital here, so they are not reported back to userspace.
624 */
radeon_gem_va_update_vm(struct radeon_device * rdev,struct radeon_bo_va * bo_va)625 static void radeon_gem_va_update_vm(struct radeon_device *rdev,
626 struct radeon_bo_va *bo_va)
627 {
628 struct ttm_validate_buffer tv, *entry;
629 struct radeon_bo_list *vm_bos;
630 struct ww_acquire_ctx ticket;
631 struct list_head list;
632 unsigned domain;
633 int r;
634
635 INIT_LIST_HEAD(&list);
636
637 tv.bo = &bo_va->bo->tbo;
638 tv.num_shared = 1;
639 list_add(&tv.head, &list);
640
641 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
642 if (!vm_bos)
643 return;
644
645 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
646 if (r)
647 goto error_free;
648
649 list_for_each_entry(entry, &list, head) {
650 domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
651 /* if anything is swapped out don't swap it in here,
652 just abort and wait for the next CS */
653 if (domain == RADEON_GEM_DOMAIN_CPU)
654 goto error_unreserve;
655 }
656
657 mutex_lock(&bo_va->vm->mutex);
658 r = radeon_vm_clear_freed(rdev, bo_va->vm);
659 if (r)
660 goto error_unlock;
661
662 if (bo_va->it.start)
663 r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
664
665 error_unlock:
666 mutex_unlock(&bo_va->vm->mutex);
667
668 error_unreserve:
669 ttm_eu_backoff_reservation(&ticket, &list);
670
671 error_free:
672 kvfree(vm_bos);
673
674 if (r && r != -ERESTARTSYS)
675 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
676 }
677
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)678 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
679 struct drm_file *filp)
680 {
681 struct drm_radeon_gem_va *args = data;
682 struct drm_gem_object *gobj;
683 struct radeon_device *rdev = dev->dev_private;
684 struct radeon_fpriv *fpriv = filp->driver_priv;
685 struct radeon_bo *rbo;
686 struct radeon_bo_va *bo_va;
687 u32 invalid_flags;
688 int r = 0;
689
690 if (!rdev->vm_manager.enabled) {
691 args->operation = RADEON_VA_RESULT_ERROR;
692 return -ENOTTY;
693 }
694
695 /* !! DONT REMOVE !!
696 * We don't support vm_id yet, to be sure we don't have have broken
697 * userspace, reject anyone trying to use non 0 value thus moving
698 * forward we can use those fields without breaking existant userspace
699 */
700 if (args->vm_id) {
701 args->operation = RADEON_VA_RESULT_ERROR;
702 return -EINVAL;
703 }
704
705 if (args->offset < RADEON_VA_RESERVED_SIZE) {
706 dev_err(dev->dev,
707 "offset 0x%lX is in reserved area 0x%X\n",
708 (unsigned long)args->offset,
709 RADEON_VA_RESERVED_SIZE);
710 args->operation = RADEON_VA_RESULT_ERROR;
711 return -EINVAL;
712 }
713
714 /* don't remove, we need to enforce userspace to set the snooped flag
715 * otherwise we will endup with broken userspace and we won't be able
716 * to enable this feature without adding new interface
717 */
718 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
719 if ((args->flags & invalid_flags)) {
720 dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
721 args->flags, invalid_flags);
722 args->operation = RADEON_VA_RESULT_ERROR;
723 return -EINVAL;
724 }
725
726 switch (args->operation) {
727 case RADEON_VA_MAP:
728 case RADEON_VA_UNMAP:
729 break;
730 default:
731 dev_err(dev->dev, "unsupported operation %d\n",
732 args->operation);
733 args->operation = RADEON_VA_RESULT_ERROR;
734 return -EINVAL;
735 }
736
737 gobj = drm_gem_object_lookup(filp, args->handle);
738 if (gobj == NULL) {
739 args->operation = RADEON_VA_RESULT_ERROR;
740 return -ENOENT;
741 }
742 rbo = gem_to_radeon_bo(gobj);
743 r = radeon_bo_reserve(rbo, false);
744 if (r) {
745 args->operation = RADEON_VA_RESULT_ERROR;
746 drm_gem_object_put(gobj);
747 return r;
748 }
749 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
750 if (!bo_va) {
751 args->operation = RADEON_VA_RESULT_ERROR;
752 radeon_bo_unreserve(rbo);
753 drm_gem_object_put(gobj);
754 return -ENOENT;
755 }
756
757 switch (args->operation) {
758 case RADEON_VA_MAP:
759 if (bo_va->it.start) {
760 args->operation = RADEON_VA_RESULT_VA_EXIST;
761 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
762 radeon_bo_unreserve(rbo);
763 goto out;
764 }
765 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
766 break;
767 case RADEON_VA_UNMAP:
768 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
769 break;
770 default:
771 break;
772 }
773 if (!r)
774 radeon_gem_va_update_vm(rdev, bo_va);
775 args->operation = RADEON_VA_RESULT_OK;
776 if (r) {
777 args->operation = RADEON_VA_RESULT_ERROR;
778 }
779 out:
780 drm_gem_object_put(gobj);
781 return r;
782 }
783
radeon_gem_op_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)784 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
785 struct drm_file *filp)
786 {
787 struct drm_radeon_gem_op *args = data;
788 struct drm_gem_object *gobj;
789 struct radeon_bo *robj;
790 int r;
791
792 gobj = drm_gem_object_lookup(filp, args->handle);
793 if (gobj == NULL) {
794 return -ENOENT;
795 }
796 robj = gem_to_radeon_bo(gobj);
797
798 r = -EPERM;
799 if (radeon_ttm_tt_has_userptr(robj->rdev, robj->tbo.ttm))
800 goto out;
801
802 r = radeon_bo_reserve(robj, false);
803 if (unlikely(r))
804 goto out;
805
806 switch (args->op) {
807 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
808 args->value = robj->initial_domain;
809 break;
810 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
811 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
812 RADEON_GEM_DOMAIN_GTT |
813 RADEON_GEM_DOMAIN_CPU);
814 break;
815 default:
816 r = -EINVAL;
817 }
818
819 radeon_bo_unreserve(robj);
820 out:
821 drm_gem_object_put(gobj);
822 return r;
823 }
824
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)825 int radeon_mode_dumb_create(struct drm_file *file_priv,
826 struct drm_device *dev,
827 struct drm_mode_create_dumb *args)
828 {
829 struct radeon_device *rdev = dev->dev_private;
830 struct drm_gem_object *gobj;
831 uint32_t handle;
832 int r;
833
834 args->pitch = radeon_align_pitch(rdev, args->width,
835 DIV_ROUND_UP(args->bpp, 8), 0);
836 args->size = args->pitch * args->height;
837 args->size = ALIGN(args->size, PAGE_SIZE);
838
839 r = radeon_gem_object_create(rdev, args->size, 0,
840 RADEON_GEM_DOMAIN_VRAM, 0,
841 false, &gobj);
842 if (r)
843 return -ENOMEM;
844
845 r = drm_gem_handle_create(file_priv, gobj, &handle);
846 /* drop reference from allocate - handle holds it now */
847 drm_gem_object_put(gobj);
848 if (r) {
849 return r;
850 }
851 args->handle = handle;
852 return 0;
853 }
854
855 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_gem_info_show(struct seq_file * m,void * unused)856 static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
857 {
858 struct radeon_device *rdev = (struct radeon_device *)m->private;
859 struct radeon_bo *rbo;
860 unsigned i = 0;
861
862 mutex_lock(&rdev->gem.mutex);
863 list_for_each_entry(rbo, &rdev->gem.objects, list) {
864 unsigned domain;
865 const char *placement;
866
867 domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
868 switch (domain) {
869 case RADEON_GEM_DOMAIN_VRAM:
870 placement = "VRAM";
871 break;
872 case RADEON_GEM_DOMAIN_GTT:
873 placement = " GTT";
874 break;
875 case RADEON_GEM_DOMAIN_CPU:
876 default:
877 placement = " CPU";
878 break;
879 }
880 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
881 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
882 placement, (unsigned long)rbo->pid);
883 i++;
884 }
885 mutex_unlock(&rdev->gem.mutex);
886 return 0;
887 }
888
889 DEFINE_SHOW_ATTRIBUTE(radeon_debugfs_gem_info);
890 #endif
891
radeon_gem_debugfs_init(struct radeon_device * rdev)892 void radeon_gem_debugfs_init(struct radeon_device *rdev)
893 {
894 #if defined(CONFIG_DEBUG_FS)
895 struct dentry *root = rdev->ddev->primary->debugfs_root;
896
897 debugfs_create_file("radeon_gem_info", 0444, root, rdev,
898 &radeon_debugfs_gem_info_fops);
899
900 #endif
901 }
902