1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include "drmP.h"
29 #include "drm.h"
30 #include "radeon_drm.h"
31 #include "radeon.h"
32
radeon_gem_object_init(struct drm_gem_object * obj)33 int radeon_gem_object_init(struct drm_gem_object *obj)
34 {
35 BUG();
36
37 return 0;
38 }
39
radeon_gem_object_free(struct drm_gem_object * gobj)40 void radeon_gem_object_free(struct drm_gem_object *gobj)
41 {
42 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
43
44 if (robj) {
45 radeon_bo_unref(&robj);
46 }
47 }
48
radeon_gem_object_create(struct radeon_device * rdev,int size,int alignment,int initial_domain,bool discardable,bool kernel,struct drm_gem_object ** obj)49 int radeon_gem_object_create(struct radeon_device *rdev, int size,
50 int alignment, int initial_domain,
51 bool discardable, bool kernel,
52 struct drm_gem_object **obj)
53 {
54 struct radeon_bo *robj;
55 int r;
56
57 *obj = NULL;
58 /* At least align on page size */
59 if (alignment < PAGE_SIZE) {
60 alignment = PAGE_SIZE;
61 }
62 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
63 if (r) {
64 if (r != -ERESTARTSYS)
65 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
66 size, initial_domain, alignment, r);
67 return r;
68 }
69 *obj = &robj->gem_base;
70
71 mutex_lock(&rdev->gem.mutex);
72 list_add_tail(&robj->list, &rdev->gem.objects);
73 mutex_unlock(&rdev->gem.mutex);
74
75 return 0;
76 }
77
radeon_gem_set_domain(struct drm_gem_object * gobj,uint32_t rdomain,uint32_t wdomain)78 int radeon_gem_set_domain(struct drm_gem_object *gobj,
79 uint32_t rdomain, uint32_t wdomain)
80 {
81 struct radeon_bo *robj;
82 uint32_t domain;
83 int r;
84
85 /* FIXME: reeimplement */
86 robj = gem_to_radeon_bo(gobj);
87 /* work out where to validate the buffer to */
88 domain = wdomain;
89 if (!domain) {
90 domain = rdomain;
91 }
92 if (!domain) {
93 /* Do nothings */
94 printk(KERN_WARNING "Set domain withou domain !\n");
95 return 0;
96 }
97 if (domain == RADEON_GEM_DOMAIN_CPU) {
98 /* Asking for cpu access wait for object idle */
99 r = radeon_bo_wait(robj, NULL, false);
100 if (r) {
101 printk(KERN_ERR "Failed to wait for object !\n");
102 return r;
103 }
104 }
105 return 0;
106 }
107
radeon_gem_init(struct radeon_device * rdev)108 int radeon_gem_init(struct radeon_device *rdev)
109 {
110 INIT_LIST_HEAD(&rdev->gem.objects);
111 return 0;
112 }
113
radeon_gem_fini(struct radeon_device * rdev)114 void radeon_gem_fini(struct radeon_device *rdev)
115 {
116 radeon_bo_force_delete(rdev);
117 }
118
119 /*
120 * Call from drm_gem_handle_create which appear in both new and open ioctl
121 * case.
122 */
radeon_gem_object_open(struct drm_gem_object * obj,struct drm_file * file_priv)123 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
124 {
125 return 0;
126 }
127
radeon_gem_object_close(struct drm_gem_object * obj,struct drm_file * file_priv)128 void radeon_gem_object_close(struct drm_gem_object *obj,
129 struct drm_file *file_priv)
130 {
131 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
132 struct radeon_device *rdev = rbo->rdev;
133 struct radeon_fpriv *fpriv = file_priv->driver_priv;
134 struct radeon_vm *vm = &fpriv->vm;
135 struct radeon_bo_va *bo_va, *tmp;
136
137 if (rdev->family < CHIP_CAYMAN) {
138 return;
139 }
140
141 if (radeon_bo_reserve(rbo, false)) {
142 return;
143 }
144 list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
145 if (bo_va->vm == vm) {
146 /* remove from this vm address space */
147 mutex_lock(&vm->mutex);
148 list_del(&bo_va->vm_list);
149 mutex_unlock(&vm->mutex);
150 list_del(&bo_va->bo_list);
151 kfree(bo_va);
152 }
153 }
154 radeon_bo_unreserve(rbo);
155 }
156
157
158 /*
159 * GEM ioctls.
160 */
radeon_gem_info_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)161 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
162 struct drm_file *filp)
163 {
164 struct radeon_device *rdev = dev->dev_private;
165 struct drm_radeon_gem_info *args = data;
166 struct ttm_mem_type_manager *man;
167 unsigned i;
168
169 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
170
171 args->vram_size = rdev->mc.real_vram_size;
172 args->vram_visible = (u64)man->size << PAGE_SHIFT;
173 if (rdev->stollen_vga_memory)
174 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
175 args->vram_visible -= radeon_fbdev_total_size(rdev);
176 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
177 for(i = 0; i < RADEON_NUM_RINGS; ++i)
178 args->gart_size -= rdev->ring[i].ring_size;
179 return 0;
180 }
181
radeon_gem_pread_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)182 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
183 struct drm_file *filp)
184 {
185 /* TODO: implement */
186 DRM_ERROR("unimplemented %s\n", __func__);
187 return -ENOSYS;
188 }
189
radeon_gem_pwrite_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)190 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *filp)
192 {
193 /* TODO: implement */
194 DRM_ERROR("unimplemented %s\n", __func__);
195 return -ENOSYS;
196 }
197
radeon_gem_create_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)198 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
199 struct drm_file *filp)
200 {
201 struct radeon_device *rdev = dev->dev_private;
202 struct drm_radeon_gem_create *args = data;
203 struct drm_gem_object *gobj;
204 uint32_t handle;
205 int r;
206
207 /* create a gem object to contain this object in */
208 args->size = roundup(args->size, PAGE_SIZE);
209 r = radeon_gem_object_create(rdev, args->size, args->alignment,
210 args->initial_domain, false,
211 false, &gobj);
212 if (r) {
213 return r;
214 }
215 r = drm_gem_handle_create(filp, gobj, &handle);
216 /* drop reference from allocate - handle holds it now */
217 drm_gem_object_unreference_unlocked(gobj);
218 if (r) {
219 return r;
220 }
221 args->handle = handle;
222 return 0;
223 }
224
radeon_gem_set_domain_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)225 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
226 struct drm_file *filp)
227 {
228 /* transition the BO to a domain -
229 * just validate the BO into a certain domain */
230 struct drm_radeon_gem_set_domain *args = data;
231 struct drm_gem_object *gobj;
232 struct radeon_bo *robj;
233 int r;
234
235 /* for now if someone requests domain CPU -
236 * just make sure the buffer is finished with */
237
238 /* just do a BO wait for now */
239 gobj = drm_gem_object_lookup(dev, filp, args->handle);
240 if (gobj == NULL) {
241 return -ENOENT;
242 }
243 robj = gem_to_radeon_bo(gobj);
244
245 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
246
247 drm_gem_object_unreference_unlocked(gobj);
248 return r;
249 }
250
radeon_mode_dumb_mmap(struct drm_file * filp,struct drm_device * dev,uint32_t handle,uint64_t * offset_p)251 int radeon_mode_dumb_mmap(struct drm_file *filp,
252 struct drm_device *dev,
253 uint32_t handle, uint64_t *offset_p)
254 {
255 struct drm_gem_object *gobj;
256 struct radeon_bo *robj;
257
258 gobj = drm_gem_object_lookup(dev, filp, handle);
259 if (gobj == NULL) {
260 return -ENOENT;
261 }
262 robj = gem_to_radeon_bo(gobj);
263 *offset_p = radeon_bo_mmap_offset(robj);
264 drm_gem_object_unreference_unlocked(gobj);
265 return 0;
266 }
267
radeon_gem_mmap_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)268 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *filp)
270 {
271 struct drm_radeon_gem_mmap *args = data;
272
273 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
274 }
275
radeon_gem_busy_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)276 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
277 struct drm_file *filp)
278 {
279 struct drm_radeon_gem_busy *args = data;
280 struct drm_gem_object *gobj;
281 struct radeon_bo *robj;
282 int r;
283 uint32_t cur_placement = 0;
284
285 gobj = drm_gem_object_lookup(dev, filp, args->handle);
286 if (gobj == NULL) {
287 return -ENOENT;
288 }
289 robj = gem_to_radeon_bo(gobj);
290 r = radeon_bo_wait(robj, &cur_placement, true);
291 switch (cur_placement) {
292 case TTM_PL_VRAM:
293 args->domain = RADEON_GEM_DOMAIN_VRAM;
294 break;
295 case TTM_PL_TT:
296 args->domain = RADEON_GEM_DOMAIN_GTT;
297 break;
298 case TTM_PL_SYSTEM:
299 args->domain = RADEON_GEM_DOMAIN_CPU;
300 default:
301 break;
302 }
303 drm_gem_object_unreference_unlocked(gobj);
304 return r;
305 }
306
radeon_gem_wait_idle_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)307 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
308 struct drm_file *filp)
309 {
310 struct drm_radeon_gem_wait_idle *args = data;
311 struct drm_gem_object *gobj;
312 struct radeon_bo *robj;
313 int r;
314
315 gobj = drm_gem_object_lookup(dev, filp, args->handle);
316 if (gobj == NULL) {
317 return -ENOENT;
318 }
319 robj = gem_to_radeon_bo(gobj);
320 r = radeon_bo_wait(robj, NULL, false);
321 /* callback hw specific functions if any */
322 if (robj->rdev->asic->ioctl_wait_idle)
323 robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
324 drm_gem_object_unreference_unlocked(gobj);
325 return r;
326 }
327
radeon_gem_set_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)328 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
329 struct drm_file *filp)
330 {
331 struct drm_radeon_gem_set_tiling *args = data;
332 struct drm_gem_object *gobj;
333 struct radeon_bo *robj;
334 int r = 0;
335
336 DRM_DEBUG("%d \n", args->handle);
337 gobj = drm_gem_object_lookup(dev, filp, args->handle);
338 if (gobj == NULL)
339 return -ENOENT;
340 robj = gem_to_radeon_bo(gobj);
341 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
342 drm_gem_object_unreference_unlocked(gobj);
343 return r;
344 }
345
radeon_gem_get_tiling_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)346 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
347 struct drm_file *filp)
348 {
349 struct drm_radeon_gem_get_tiling *args = data;
350 struct drm_gem_object *gobj;
351 struct radeon_bo *rbo;
352 int r = 0;
353
354 DRM_DEBUG("\n");
355 gobj = drm_gem_object_lookup(dev, filp, args->handle);
356 if (gobj == NULL)
357 return -ENOENT;
358 rbo = gem_to_radeon_bo(gobj);
359 r = radeon_bo_reserve(rbo, false);
360 if (unlikely(r != 0))
361 goto out;
362 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
363 radeon_bo_unreserve(rbo);
364 out:
365 drm_gem_object_unreference_unlocked(gobj);
366 return r;
367 }
368
radeon_gem_va_ioctl(struct drm_device * dev,void * data,struct drm_file * filp)369 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *filp)
371 {
372 struct drm_radeon_gem_va *args = data;
373 struct drm_gem_object *gobj;
374 struct radeon_device *rdev = dev->dev_private;
375 struct radeon_fpriv *fpriv = filp->driver_priv;
376 struct radeon_bo *rbo;
377 struct radeon_bo_va *bo_va;
378 u32 invalid_flags;
379 int r = 0;
380
381 if (!rdev->vm_manager.enabled) {
382 args->operation = RADEON_VA_RESULT_ERROR;
383 return -ENOTTY;
384 }
385
386 /* !! DONT REMOVE !!
387 * We don't support vm_id yet, to be sure we don't have have broken
388 * userspace, reject anyone trying to use non 0 value thus moving
389 * forward we can use those fields without breaking existant userspace
390 */
391 if (args->vm_id) {
392 args->operation = RADEON_VA_RESULT_ERROR;
393 return -EINVAL;
394 }
395
396 if (args->offset < RADEON_VA_RESERVED_SIZE) {
397 dev_err(&dev->pdev->dev,
398 "offset 0x%lX is in reserved area 0x%X\n",
399 (unsigned long)args->offset,
400 RADEON_VA_RESERVED_SIZE);
401 args->operation = RADEON_VA_RESULT_ERROR;
402 return -EINVAL;
403 }
404
405 /* don't remove, we need to enforce userspace to set the snooped flag
406 * otherwise we will endup with broken userspace and we won't be able
407 * to enable this feature without adding new interface
408 */
409 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
410 if ((args->flags & invalid_flags)) {
411 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
412 args->flags, invalid_flags);
413 args->operation = RADEON_VA_RESULT_ERROR;
414 return -EINVAL;
415 }
416 if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
417 dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
418 args->operation = RADEON_VA_RESULT_ERROR;
419 return -EINVAL;
420 }
421
422 switch (args->operation) {
423 case RADEON_VA_MAP:
424 case RADEON_VA_UNMAP:
425 break;
426 default:
427 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
428 args->operation);
429 args->operation = RADEON_VA_RESULT_ERROR;
430 return -EINVAL;
431 }
432
433 gobj = drm_gem_object_lookup(dev, filp, args->handle);
434 if (gobj == NULL) {
435 args->operation = RADEON_VA_RESULT_ERROR;
436 return -ENOENT;
437 }
438 rbo = gem_to_radeon_bo(gobj);
439 r = radeon_bo_reserve(rbo, false);
440 if (r) {
441 args->operation = RADEON_VA_RESULT_ERROR;
442 drm_gem_object_unreference_unlocked(gobj);
443 return r;
444 }
445 switch (args->operation) {
446 case RADEON_VA_MAP:
447 bo_va = radeon_bo_va(rbo, &fpriv->vm);
448 if (bo_va) {
449 args->operation = RADEON_VA_RESULT_VA_EXIST;
450 args->offset = bo_va->soffset;
451 goto out;
452 }
453 r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
454 args->offset, args->flags);
455 break;
456 case RADEON_VA_UNMAP:
457 r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
458 break;
459 default:
460 break;
461 }
462 args->operation = RADEON_VA_RESULT_OK;
463 if (r) {
464 args->operation = RADEON_VA_RESULT_ERROR;
465 }
466 out:
467 radeon_bo_unreserve(rbo);
468 drm_gem_object_unreference_unlocked(gobj);
469 return r;
470 }
471
radeon_mode_dumb_create(struct drm_file * file_priv,struct drm_device * dev,struct drm_mode_create_dumb * args)472 int radeon_mode_dumb_create(struct drm_file *file_priv,
473 struct drm_device *dev,
474 struct drm_mode_create_dumb *args)
475 {
476 struct radeon_device *rdev = dev->dev_private;
477 struct drm_gem_object *gobj;
478 uint32_t handle;
479 int r;
480
481 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
482 args->size = args->pitch * args->height;
483 args->size = ALIGN(args->size, PAGE_SIZE);
484
485 r = radeon_gem_object_create(rdev, args->size, 0,
486 RADEON_GEM_DOMAIN_VRAM,
487 false, ttm_bo_type_device,
488 &gobj);
489 if (r)
490 return -ENOMEM;
491
492 r = drm_gem_handle_create(file_priv, gobj, &handle);
493 /* drop reference from allocate - handle holds it now */
494 drm_gem_object_unreference_unlocked(gobj);
495 if (r) {
496 return r;
497 }
498 args->handle = handle;
499 return 0;
500 }
501
radeon_mode_dumb_destroy(struct drm_file * file_priv,struct drm_device * dev,uint32_t handle)502 int radeon_mode_dumb_destroy(struct drm_file *file_priv,
503 struct drm_device *dev,
504 uint32_t handle)
505 {
506 return drm_gem_handle_delete(file_priv, handle);
507 }
508