1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
30 #include "drmP.h"
31 #include "radeon_drm.h"
32 #include "radeon_reg.h"
33 #include "radeon.h"
34 #include "atom.h"
35
36 int radeon_debugfs_ib_init(struct radeon_device *rdev);
37
radeon_ib_bogus_cleanup(struct radeon_device * rdev)38 void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
39 {
40 struct radeon_ib *ib, *n;
41
42 list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
43 list_del(&ib->list);
44 vfree(ib->ptr);
45 kfree(ib);
46 }
47 }
48
radeon_ib_bogus_add(struct radeon_device * rdev,struct radeon_ib * ib)49 void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
50 {
51 struct radeon_ib *bib;
52
53 bib = kmalloc(sizeof(*bib), GFP_KERNEL);
54 if (bib == NULL)
55 return;
56 bib->ptr = vmalloc(ib->length_dw * 4);
57 if (bib->ptr == NULL) {
58 kfree(bib);
59 return;
60 }
61 memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
62 bib->length_dw = ib->length_dw;
63 mutex_lock(&rdev->ib_pool.mutex);
64 list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
65 mutex_unlock(&rdev->ib_pool.mutex);
66 }
67
68 /*
69 * IB.
70 */
radeon_ib_get(struct radeon_device * rdev,struct radeon_ib ** ib)71 int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
72 {
73 struct radeon_fence *fence;
74 struct radeon_ib *nib;
75 int r = 0, i, c;
76
77 *ib = NULL;
78 r = radeon_fence_create(rdev, &fence);
79 if (r) {
80 dev_err(rdev->dev, "failed to create fence for new IB\n");
81 return r;
82 }
83 mutex_lock(&rdev->ib_pool.mutex);
84 for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
85 i &= (RADEON_IB_POOL_SIZE - 1);
86 if (rdev->ib_pool.ibs[i].free) {
87 nib = &rdev->ib_pool.ibs[i];
88 break;
89 }
90 }
91 if (nib == NULL) {
92 /* This should never happen, it means we allocated all
93 * IB and haven't scheduled one yet, return EBUSY to
94 * userspace hoping that on ioctl recall we get better
95 * luck
96 */
97 dev_err(rdev->dev, "no free indirect buffer !\n");
98 mutex_unlock(&rdev->ib_pool.mutex);
99 radeon_fence_unref(&fence);
100 return -EBUSY;
101 }
102 rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
103 nib->free = false;
104 if (nib->fence) {
105 mutex_unlock(&rdev->ib_pool.mutex);
106 r = radeon_fence_wait(nib->fence, false);
107 if (r) {
108 dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
109 nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
110 mutex_lock(&rdev->ib_pool.mutex);
111 nib->free = true;
112 mutex_unlock(&rdev->ib_pool.mutex);
113 radeon_fence_unref(&fence);
114 return r;
115 }
116 mutex_lock(&rdev->ib_pool.mutex);
117 }
118 radeon_fence_unref(&nib->fence);
119 nib->fence = fence;
120 nib->length_dw = 0;
121 mutex_unlock(&rdev->ib_pool.mutex);
122 *ib = nib;
123 return 0;
124 }
125
radeon_ib_free(struct radeon_device * rdev,struct radeon_ib ** ib)126 void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
127 {
128 struct radeon_ib *tmp = *ib;
129
130 *ib = NULL;
131 if (tmp == NULL) {
132 return;
133 }
134 if (!tmp->fence->emited)
135 radeon_fence_unref(&tmp->fence);
136 mutex_lock(&rdev->ib_pool.mutex);
137 tmp->free = true;
138 mutex_unlock(&rdev->ib_pool.mutex);
139 }
140
radeon_ib_schedule(struct radeon_device * rdev,struct radeon_ib * ib)141 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
142 {
143 int r = 0;
144
145 if (!ib->length_dw || !rdev->cp.ready) {
146 /* TODO: Nothings in the ib we should report. */
147 DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
148 return -EINVAL;
149 }
150
151 /* 64 dwords should be enough for fence too */
152 r = radeon_ring_lock(rdev, 64);
153 if (r) {
154 DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
155 return r;
156 }
157 radeon_ring_ib_execute(rdev, ib);
158 radeon_fence_emit(rdev, ib->fence);
159 mutex_lock(&rdev->ib_pool.mutex);
160 /* once scheduled IB is considered free and protected by the fence */
161 ib->free = true;
162 mutex_unlock(&rdev->ib_pool.mutex);
163 radeon_ring_unlock_commit(rdev);
164 return 0;
165 }
166
radeon_ib_pool_init(struct radeon_device * rdev)167 int radeon_ib_pool_init(struct radeon_device *rdev)
168 {
169 void *ptr;
170 uint64_t gpu_addr;
171 int i;
172 int r = 0;
173
174 if (rdev->ib_pool.robj)
175 return 0;
176 INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
177 /* Allocate 1M object buffer */
178 r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
179 PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
180 &rdev->ib_pool.robj);
181 if (r) {
182 DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
183 return r;
184 }
185 r = radeon_bo_reserve(rdev->ib_pool.robj, false);
186 if (unlikely(r != 0))
187 return r;
188 r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
189 if (r) {
190 radeon_bo_unreserve(rdev->ib_pool.robj);
191 DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
192 return r;
193 }
194 r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
195 radeon_bo_unreserve(rdev->ib_pool.robj);
196 if (r) {
197 DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
198 return r;
199 }
200 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
201 unsigned offset;
202
203 offset = i * 64 * 1024;
204 rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
205 rdev->ib_pool.ibs[i].ptr = ptr + offset;
206 rdev->ib_pool.ibs[i].idx = i;
207 rdev->ib_pool.ibs[i].length_dw = 0;
208 rdev->ib_pool.ibs[i].free = true;
209 }
210 rdev->ib_pool.head_id = 0;
211 rdev->ib_pool.ready = true;
212 DRM_INFO("radeon: ib pool ready.\n");
213 if (radeon_debugfs_ib_init(rdev)) {
214 DRM_ERROR("Failed to register debugfs file for IB !\n");
215 }
216 return r;
217 }
218
radeon_ib_pool_fini(struct radeon_device * rdev)219 void radeon_ib_pool_fini(struct radeon_device *rdev)
220 {
221 int r;
222 struct radeon_bo *robj;
223
224 if (!rdev->ib_pool.ready) {
225 return;
226 }
227 mutex_lock(&rdev->ib_pool.mutex);
228 radeon_ib_bogus_cleanup(rdev);
229 robj = rdev->ib_pool.robj;
230 rdev->ib_pool.robj = NULL;
231 mutex_unlock(&rdev->ib_pool.mutex);
232
233 if (robj) {
234 r = radeon_bo_reserve(robj, false);
235 if (likely(r == 0)) {
236 radeon_bo_kunmap(robj);
237 radeon_bo_unpin(robj);
238 radeon_bo_unreserve(robj);
239 }
240 radeon_bo_unref(&robj);
241 }
242 }
243
244
245 /*
246 * Ring.
247 */
radeon_ring_free_size(struct radeon_device * rdev)248 void radeon_ring_free_size(struct radeon_device *rdev)
249 {
250 if (rdev->wb.enabled)
251 rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
252 else {
253 if (rdev->family >= CHIP_R600)
254 rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
255 else
256 rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
257 }
258 /* This works because ring_size is a power of 2 */
259 rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
260 rdev->cp.ring_free_dw -= rdev->cp.wptr;
261 rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
262 if (!rdev->cp.ring_free_dw) {
263 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
264 }
265 }
266
radeon_ring_alloc(struct radeon_device * rdev,unsigned ndw)267 int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
268 {
269 int r;
270
271 /* Align requested size with padding so unlock_commit can
272 * pad safely */
273 ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
274 while (ndw > (rdev->cp.ring_free_dw - 1)) {
275 radeon_ring_free_size(rdev);
276 if (ndw < rdev->cp.ring_free_dw) {
277 break;
278 }
279 r = radeon_fence_wait_next(rdev);
280 if (r)
281 return r;
282 }
283 rdev->cp.count_dw = ndw;
284 rdev->cp.wptr_old = rdev->cp.wptr;
285 return 0;
286 }
287
radeon_ring_lock(struct radeon_device * rdev,unsigned ndw)288 int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
289 {
290 int r;
291
292 mutex_lock(&rdev->cp.mutex);
293 r = radeon_ring_alloc(rdev, ndw);
294 if (r) {
295 mutex_unlock(&rdev->cp.mutex);
296 return r;
297 }
298 return 0;
299 }
300
radeon_ring_commit(struct radeon_device * rdev)301 void radeon_ring_commit(struct radeon_device *rdev)
302 {
303 unsigned count_dw_pad;
304 unsigned i;
305
306 /* We pad to match fetch size */
307 count_dw_pad = (rdev->cp.align_mask + 1) -
308 (rdev->cp.wptr & rdev->cp.align_mask);
309 for (i = 0; i < count_dw_pad; i++) {
310 radeon_ring_write(rdev, 2 << 30);
311 }
312 DRM_MEMORYBARRIER();
313 radeon_cp_commit(rdev);
314 }
315
radeon_ring_unlock_commit(struct radeon_device * rdev)316 void radeon_ring_unlock_commit(struct radeon_device *rdev)
317 {
318 radeon_ring_commit(rdev);
319 mutex_unlock(&rdev->cp.mutex);
320 }
321
radeon_ring_unlock_undo(struct radeon_device * rdev)322 void radeon_ring_unlock_undo(struct radeon_device *rdev)
323 {
324 rdev->cp.wptr = rdev->cp.wptr_old;
325 mutex_unlock(&rdev->cp.mutex);
326 }
327
radeon_ring_init(struct radeon_device * rdev,unsigned ring_size)328 int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
329 {
330 int r;
331
332 rdev->cp.ring_size = ring_size;
333 /* Allocate ring buffer */
334 if (rdev->cp.ring_obj == NULL) {
335 r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
336 RADEON_GEM_DOMAIN_GTT,
337 &rdev->cp.ring_obj);
338 if (r) {
339 dev_err(rdev->dev, "(%d) ring create failed\n", r);
340 return r;
341 }
342 r = radeon_bo_reserve(rdev->cp.ring_obj, false);
343 if (unlikely(r != 0))
344 return r;
345 r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
346 &rdev->cp.gpu_addr);
347 if (r) {
348 radeon_bo_unreserve(rdev->cp.ring_obj);
349 dev_err(rdev->dev, "(%d) ring pin failed\n", r);
350 return r;
351 }
352 r = radeon_bo_kmap(rdev->cp.ring_obj,
353 (void **)&rdev->cp.ring);
354 radeon_bo_unreserve(rdev->cp.ring_obj);
355 if (r) {
356 dev_err(rdev->dev, "(%d) ring map failed\n", r);
357 return r;
358 }
359 }
360 rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
361 rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
362 return 0;
363 }
364
radeon_ring_fini(struct radeon_device * rdev)365 void radeon_ring_fini(struct radeon_device *rdev)
366 {
367 int r;
368 struct radeon_bo *ring_obj;
369
370 mutex_lock(&rdev->cp.mutex);
371 ring_obj = rdev->cp.ring_obj;
372 rdev->cp.ring = NULL;
373 rdev->cp.ring_obj = NULL;
374 mutex_unlock(&rdev->cp.mutex);
375
376 if (ring_obj) {
377 r = radeon_bo_reserve(ring_obj, false);
378 if (likely(r == 0)) {
379 radeon_bo_kunmap(ring_obj);
380 radeon_bo_unpin(ring_obj);
381 radeon_bo_unreserve(ring_obj);
382 }
383 radeon_bo_unref(&ring_obj);
384 }
385 }
386
387
388 /*
389 * Debugfs info
390 */
391 #if defined(CONFIG_DEBUG_FS)
radeon_debugfs_ib_info(struct seq_file * m,void * data)392 static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
393 {
394 struct drm_info_node *node = (struct drm_info_node *) m->private;
395 struct radeon_ib *ib = node->info_ent->data;
396 unsigned i;
397
398 if (ib == NULL) {
399 return 0;
400 }
401 seq_printf(m, "IB %04u\n", ib->idx);
402 seq_printf(m, "IB fence %p\n", ib->fence);
403 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
404 for (i = 0; i < ib->length_dw; i++) {
405 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
406 }
407 return 0;
408 }
409
radeon_debugfs_ib_bogus_info(struct seq_file * m,void * data)410 static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
411 {
412 struct drm_info_node *node = (struct drm_info_node *) m->private;
413 struct radeon_device *rdev = node->info_ent->data;
414 struct radeon_ib *ib;
415 unsigned i;
416
417 mutex_lock(&rdev->ib_pool.mutex);
418 if (list_empty(&rdev->ib_pool.bogus_ib)) {
419 mutex_unlock(&rdev->ib_pool.mutex);
420 seq_printf(m, "no bogus IB recorded\n");
421 return 0;
422 }
423 ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
424 list_del_init(&ib->list);
425 mutex_unlock(&rdev->ib_pool.mutex);
426 seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
427 for (i = 0; i < ib->length_dw; i++) {
428 seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
429 }
430 vfree(ib->ptr);
431 kfree(ib);
432 return 0;
433 }
434
435 static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
436 static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
437
438 static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
439 {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
440 };
441 #endif
442
radeon_debugfs_ib_init(struct radeon_device * rdev)443 int radeon_debugfs_ib_init(struct radeon_device *rdev)
444 {
445 #if defined(CONFIG_DEBUG_FS)
446 unsigned i;
447 int r;
448
449 radeon_debugfs_ib_bogus_info_list[0].data = rdev;
450 r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
451 if (r)
452 return r;
453 for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
454 sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
455 radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
456 radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
457 radeon_debugfs_ib_list[i].driver_features = 0;
458 radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
459 }
460 return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
461 RADEON_IB_POOL_SIZE);
462 #else
463 return 0;
464 #endif
465 }
466