1 /* i830_dma.c -- DMA support for the I830 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 * Abraham vd Merwe <abraham@2d3d.co.za>
31 *
32 */
33
34
35 #include "i830.h"
36 #include "drmP.h"
37 #include "drm.h"
38 #include "i830_drm.h"
39 #include "i830_drv.h"
40 #include <linux/interrupt.h> /* For task queue support */
41 #include <linux/pagemap.h> /* For FASTCALL on unlock_page() */
42 #include <linux/delay.h>
43
44 #define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
45
46 #define I830_BUF_FREE 2
47 #define I830_BUF_CLIENT 1
48 #define I830_BUF_HARDWARE 0
49
50 #define I830_BUF_UNMAPPED 0
51 #define I830_BUF_MAPPED 1
52
53
54
55
56
57
58
59
60
61
62
63
64
65
i830_print_status_page(drm_device_t * dev)66 static inline void i830_print_status_page(drm_device_t *dev)
67 {
68 drm_device_dma_t *dma = dev->dma;
69 drm_i830_private_t *dev_priv = dev->dev_private;
70 u32 *temp = (u32 *)dev_priv->hw_status_page;
71 int i;
72
73 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
74 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
75 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
76 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
77 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
78 for(i = 9; i < dma->buf_count + 9; i++) {
79 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 9, temp[i]);
80 }
81 }
82
i830_freelist_get(drm_device_t * dev)83 static drm_buf_t *i830_freelist_get(drm_device_t *dev)
84 {
85 drm_device_dma_t *dma = dev->dma;
86 int i;
87 int used;
88
89 /* Linear search might not be the best solution */
90
91 for (i = 0; i < dma->buf_count; i++) {
92 drm_buf_t *buf = dma->buflist[ i ];
93 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
94 /* In use is already a pointer */
95 used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
96 I830_BUF_CLIENT);
97 if(used == I830_BUF_FREE) {
98 return buf;
99 }
100 }
101 return NULL;
102 }
103
104 /* This should only be called if the buffer is not sent to the hardware
105 * yet, the hardware updates in use for us once its on the ring buffer.
106 */
107
i830_freelist_put(drm_device_t * dev,drm_buf_t * buf)108 static int i830_freelist_put(drm_device_t *dev, drm_buf_t *buf)
109 {
110 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
111 int used;
112
113 /* In use is already a pointer */
114 used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT, I830_BUF_FREE);
115 if(used != I830_BUF_CLIENT) {
116 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
117 return -EINVAL;
118 }
119
120 return 0;
121 }
122
123 static struct file_operations i830_buffer_fops = {
124 .open = DRM(open),
125 .flush = DRM(flush),
126 .release = DRM(release),
127 .ioctl = DRM(ioctl),
128 .mmap = i830_mmap_buffers,
129 .read = DRM(read),
130 .fasync = DRM(fasync),
131 .poll = DRM(poll),
132 };
133
i830_mmap_buffers(struct file * filp,struct vm_area_struct * vma)134 int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
135 {
136 drm_file_t *priv = filp->private_data;
137 drm_device_t *dev;
138 drm_i830_private_t *dev_priv;
139 drm_buf_t *buf;
140 drm_i830_buf_priv_t *buf_priv;
141
142 lock_kernel();
143 dev = priv->dev;
144 dev_priv = dev->dev_private;
145 buf = dev_priv->mmap_buffer;
146 buf_priv = buf->dev_private;
147
148 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
149 vma->vm_file = filp;
150
151 buf_priv->currently_mapped = I830_BUF_MAPPED;
152 unlock_kernel();
153
154 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
155 VM_OFFSET(vma),
156 vma->vm_end - vma->vm_start,
157 vma->vm_page_prot)) return -EAGAIN;
158 return 0;
159 }
160
i830_map_buffer(drm_buf_t * buf,struct file * filp)161 static int i830_map_buffer(drm_buf_t *buf, struct file *filp)
162 {
163 drm_file_t *priv = filp->private_data;
164 drm_device_t *dev = priv->dev;
165 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
166 drm_i830_private_t *dev_priv = dev->dev_private;
167 struct file_operations *old_fops;
168 int retcode = 0;
169
170 if(buf_priv->currently_mapped == I830_BUF_MAPPED) return -EINVAL;
171
172 down_write( ¤t->mm->mmap_sem );
173 old_fops = filp->f_op;
174 filp->f_op = &i830_buffer_fops;
175 dev_priv->mmap_buffer = buf;
176 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
177 PROT_READ|PROT_WRITE,
178 MAP_SHARED,
179 buf->bus_address);
180 dev_priv->mmap_buffer = NULL;
181 filp->f_op = old_fops;
182 if ((unsigned long)buf_priv->virtual > -1024UL) {
183 /* Real error */
184 DRM_ERROR("mmap error\n");
185 retcode = (signed int)buf_priv->virtual;
186 buf_priv->virtual = 0;
187 }
188 up_write( ¤t->mm->mmap_sem );
189
190 return retcode;
191 }
192
i830_unmap_buffer(drm_buf_t * buf)193 static int i830_unmap_buffer(drm_buf_t *buf)
194 {
195 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
196 int retcode = 0;
197
198 if(buf_priv->currently_mapped != I830_BUF_MAPPED)
199 return -EINVAL;
200
201 down_write(¤t->mm->mmap_sem);
202 retcode = DO_MUNMAP(current->mm,
203 (unsigned long)buf_priv->virtual,
204 (size_t) buf->total);
205 up_write(¤t->mm->mmap_sem);
206
207 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
208 buf_priv->virtual = 0;
209
210 return retcode;
211 }
212
i830_dma_get_buffer(drm_device_t * dev,drm_i830_dma_t * d,struct file * filp)213 static int i830_dma_get_buffer(drm_device_t *dev, drm_i830_dma_t *d,
214 struct file *filp)
215 {
216 drm_file_t *priv = filp->private_data;
217 drm_buf_t *buf;
218 drm_i830_buf_priv_t *buf_priv;
219 int retcode = 0;
220
221 buf = i830_freelist_get(dev);
222 if (!buf) {
223 retcode = -ENOMEM;
224 DRM_DEBUG("retcode=%d\n", retcode);
225 return retcode;
226 }
227
228 retcode = i830_map_buffer(buf, filp);
229 if(retcode) {
230 i830_freelist_put(dev, buf);
231 DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
232 return retcode;
233 }
234 buf->pid = priv->pid;
235 buf_priv = buf->dev_private;
236 d->granted = 1;
237 d->request_idx = buf->idx;
238 d->request_size = buf->total;
239 d->virtual = buf_priv->virtual;
240
241 return retcode;
242 }
243
i830_dma_cleanup(drm_device_t * dev)244 static int i830_dma_cleanup(drm_device_t *dev)
245 {
246 drm_device_dma_t *dma = dev->dma;
247
248 if(dev->dev_private) {
249 int i;
250 drm_i830_private_t *dev_priv =
251 (drm_i830_private_t *) dev->dev_private;
252
253 if(dev_priv->ring.virtual_start) {
254 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
255 dev_priv->ring.Size, dev);
256 }
257 if(dev_priv->hw_status_page != 0UL) {
258 pci_free_consistent(dev->pdev, PAGE_SIZE,
259 (void *)dev_priv->hw_status_page,
260 dev_priv->dma_status_page);
261 /* Need to rewrite hardware status page */
262 I830_WRITE(0x02080, 0x1ffff000);
263 }
264
265 /* Disable interrupts here because after dev_private
266 * is freed, it's too late.
267 */
268 if (dev->irq) {
269 I830_WRITE16( I830REG_INT_MASK_R, 0xffff );
270 I830_WRITE16( I830REG_INT_ENABLE_R, 0x0 );
271 }
272
273 DRM(free)(dev->dev_private, sizeof(drm_i830_private_t),
274 DRM_MEM_DRIVER);
275 dev->dev_private = NULL;
276
277 for (i = 0; i < dma->buf_count; i++) {
278 drm_buf_t *buf = dma->buflist[ i ];
279 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
280 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
281 }
282 }
283 return 0;
284 }
285
i830_wait_ring(drm_device_t * dev,int n,const char * caller)286 int i830_wait_ring(drm_device_t *dev, int n, const char *caller)
287 {
288 drm_i830_private_t *dev_priv = dev->dev_private;
289 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
290 int iters = 0;
291 unsigned long end;
292 unsigned int last_head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
293
294 end = jiffies + (HZ*3);
295 while (ring->space < n) {
296 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
297 ring->space = ring->head - (ring->tail+8);
298 if (ring->space < 0) ring->space += ring->Size;
299
300 if (ring->head != last_head) {
301 end = jiffies + (HZ*3);
302 last_head = ring->head;
303 }
304
305 iters++;
306 if(time_before(end, jiffies)) {
307 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
308 DRM_ERROR("lockup\n");
309 goto out_wait_ring;
310 }
311 udelay(1);
312 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
313 }
314
315 out_wait_ring:
316 return iters;
317 }
318
i830_kernel_lost_context(drm_device_t * dev)319 static void i830_kernel_lost_context(drm_device_t *dev)
320 {
321 drm_i830_private_t *dev_priv = dev->dev_private;
322 drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
323
324 ring->head = I830_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
325 ring->tail = I830_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
326 ring->space = ring->head - (ring->tail+8);
327 if (ring->space < 0) ring->space += ring->Size;
328
329 if (ring->head == ring->tail)
330 dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
331 }
332
i830_freelist_init(drm_device_t * dev,drm_i830_private_t * dev_priv)333 static int i830_freelist_init(drm_device_t *dev, drm_i830_private_t *dev_priv)
334 {
335 drm_device_dma_t *dma = dev->dma;
336 int my_idx = 36;
337 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
338 int i;
339
340 if(dma->buf_count > 1019) {
341 /* Not enough space in the status page for the freelist */
342 return -EINVAL;
343 }
344
345 for (i = 0; i < dma->buf_count; i++) {
346 drm_buf_t *buf = dma->buflist[ i ];
347 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
348
349 buf_priv->in_use = hw_status++;
350 buf_priv->my_use_idx = my_idx;
351 my_idx += 4;
352
353 *buf_priv->in_use = I830_BUF_FREE;
354
355 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
356 buf->total, dev);
357 }
358 return 0;
359 }
360
i830_dma_initialize(drm_device_t * dev,drm_i830_private_t * dev_priv,drm_i830_init_t * init)361 static int i830_dma_initialize(drm_device_t *dev,
362 drm_i830_private_t *dev_priv,
363 drm_i830_init_t *init)
364 {
365 struct list_head *list;
366
367 memset(dev_priv, 0, sizeof(drm_i830_private_t));
368
369 list_for_each(list, &dev->maplist->head) {
370 drm_map_list_t *r_list = (drm_map_list_t *)list;
371 if( r_list->map &&
372 r_list->map->type == _DRM_SHM &&
373 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
374 dev_priv->sarea_map = r_list->map;
375 break;
376 }
377 }
378
379 if(!dev_priv->sarea_map) {
380 dev->dev_private = (void *)dev_priv;
381 i830_dma_cleanup(dev);
382 DRM_ERROR("can not find sarea!\n");
383 return -EINVAL;
384 }
385 DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
386 if(!dev_priv->mmio_map) {
387 dev->dev_private = (void *)dev_priv;
388 i830_dma_cleanup(dev);
389 DRM_ERROR("can not find mmio map!\n");
390 return -EINVAL;
391 }
392 DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
393 if(!dev_priv->buffer_map) {
394 dev->dev_private = (void *)dev_priv;
395 i830_dma_cleanup(dev);
396 DRM_ERROR("can not find dma buffer map!\n");
397 return -EINVAL;
398 }
399
400 dev_priv->sarea_priv = (drm_i830_sarea_t *)
401 ((u8 *)dev_priv->sarea_map->handle +
402 init->sarea_priv_offset);
403
404 dev_priv->ring.Start = init->ring_start;
405 dev_priv->ring.End = init->ring_end;
406 dev_priv->ring.Size = init->ring_size;
407
408 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
409 init->ring_start,
410 init->ring_size, dev);
411
412 if (dev_priv->ring.virtual_start == NULL) {
413 dev->dev_private = (void *) dev_priv;
414 i830_dma_cleanup(dev);
415 DRM_ERROR("can not ioremap virtual address for"
416 " ring buffer\n");
417 return -ENOMEM;
418 }
419
420 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
421
422 dev_priv->w = init->w;
423 dev_priv->h = init->h;
424 dev_priv->pitch = init->pitch;
425 dev_priv->back_offset = init->back_offset;
426 dev_priv->depth_offset = init->depth_offset;
427 dev_priv->front_offset = init->front_offset;
428
429 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
430 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
431 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
432
433 DRM_DEBUG("front_di1 %x\n", dev_priv->front_di1);
434 DRM_DEBUG("back_offset %x\n", dev_priv->back_offset);
435 DRM_DEBUG("back_di1 %x\n", dev_priv->back_di1);
436 DRM_DEBUG("pitch_bits %x\n", init->pitch_bits);
437
438 dev_priv->cpp = init->cpp;
439 /* We are using seperate values as placeholders for mechanisms for
440 * private backbuffer/depthbuffer usage.
441 */
442
443 dev_priv->back_pitch = init->back_pitch;
444 dev_priv->depth_pitch = init->depth_pitch;
445 dev_priv->do_boxes = 0;
446 dev_priv->use_mi_batchbuffer_start = 0;
447
448 /* Program Hardware Status Page */
449 dev_priv->hw_status_page =
450 (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE,
451 &dev_priv->dma_status_page);
452 if(dev_priv->hw_status_page == 0UL) {
453 dev->dev_private = (void *)dev_priv;
454 i830_dma_cleanup(dev);
455 DRM_ERROR("Can not allocate hardware status page\n");
456 return -ENOMEM;
457 }
458 memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
459 DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
460
461 I830_WRITE(0x02080, dev_priv->dma_status_page);
462 DRM_DEBUG("Enabled hardware status page\n");
463
464 /* Now we need to init our freelist */
465 if(i830_freelist_init(dev, dev_priv) != 0) {
466 dev->dev_private = (void *)dev_priv;
467 i830_dma_cleanup(dev);
468 DRM_ERROR("Not enough space in the status page for"
469 " the freelist\n");
470 return -ENOMEM;
471 }
472 dev->dev_private = (void *)dev_priv;
473
474 return 0;
475 }
476
i830_dma_init(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)477 int i830_dma_init(struct inode *inode, struct file *filp,
478 unsigned int cmd, unsigned long arg)
479 {
480 drm_file_t *priv = filp->private_data;
481 drm_device_t *dev = priv->dev;
482 drm_i830_private_t *dev_priv;
483 drm_i830_init_t init;
484 int retcode = 0;
485
486 if (copy_from_user(&init, (drm_i830_init_t *)arg, sizeof(init)))
487 return -EFAULT;
488
489 switch(init.func) {
490 case I830_INIT_DMA:
491 dev_priv = DRM(alloc)(sizeof(drm_i830_private_t),
492 DRM_MEM_DRIVER);
493 if(dev_priv == NULL) return -ENOMEM;
494 retcode = i830_dma_initialize(dev, dev_priv, &init);
495 break;
496 case I830_CLEANUP_DMA:
497 retcode = i830_dma_cleanup(dev);
498 break;
499 default:
500 retcode = -EINVAL;
501 break;
502 }
503
504 return retcode;
505 }
506
507 #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16))
508 #define ST1_ENABLE (1<<16)
509 #define ST1_MASK (0xffff)
510
511 /* Most efficient way to verify state for the i830 is as it is
512 * emitted. Non-conformant state is silently dropped.
513 */
i830EmitContextVerified(drm_device_t * dev,unsigned int * code)514 static void i830EmitContextVerified( drm_device_t *dev,
515 unsigned int *code )
516 {
517 drm_i830_private_t *dev_priv = dev->dev_private;
518 int i, j = 0;
519 unsigned int tmp;
520 RING_LOCALS;
521
522 BEGIN_LP_RING( I830_CTX_SETUP_SIZE + 4 );
523
524 for ( i = 0 ; i < I830_CTXREG_BLENDCOLR0 ; i++ ) {
525 tmp = code[i];
526 if ((tmp & (7<<29)) == CMD_3D &&
527 (tmp & (0x1f<<24)) < (0x1d<<24)) {
528 OUT_RING( tmp );
529 j++;
530 } else {
531 DRM_ERROR("Skipping %d\n", i);
532 }
533 }
534
535 OUT_RING( STATE3D_CONST_BLEND_COLOR_CMD );
536 OUT_RING( code[I830_CTXREG_BLENDCOLR] );
537 j += 2;
538
539 for ( i = I830_CTXREG_VF ; i < I830_CTXREG_MCSB0 ; i++ ) {
540 tmp = code[i];
541 if ((tmp & (7<<29)) == CMD_3D &&
542 (tmp & (0x1f<<24)) < (0x1d<<24)) {
543 OUT_RING( tmp );
544 j++;
545 } else {
546 DRM_ERROR("Skipping %d\n", i);
547 }
548 }
549
550 OUT_RING( STATE3D_MAP_COORD_SETBIND_CMD );
551 OUT_RING( code[I830_CTXREG_MCSB1] );
552 j += 2;
553
554 if (j & 1)
555 OUT_RING( 0 );
556
557 ADVANCE_LP_RING();
558 }
559
i830EmitTexVerified(drm_device_t * dev,unsigned int * code)560 static void i830EmitTexVerified( drm_device_t *dev, unsigned int *code )
561 {
562 drm_i830_private_t *dev_priv = dev->dev_private;
563 int i, j = 0;
564 unsigned int tmp;
565 RING_LOCALS;
566
567 if (code[I830_TEXREG_MI0] == GFX_OP_MAP_INFO ||
568 (code[I830_TEXREG_MI0] & ~(0xf*LOAD_TEXTURE_MAP0)) ==
569 (STATE3D_LOAD_STATE_IMMEDIATE_2|4)) {
570
571 BEGIN_LP_RING( I830_TEX_SETUP_SIZE );
572
573 OUT_RING( code[I830_TEXREG_MI0] ); /* TM0LI */
574 OUT_RING( code[I830_TEXREG_MI1] ); /* TM0S0 */
575 OUT_RING( code[I830_TEXREG_MI2] ); /* TM0S1 */
576 OUT_RING( code[I830_TEXREG_MI3] ); /* TM0S2 */
577 OUT_RING( code[I830_TEXREG_MI4] ); /* TM0S3 */
578 OUT_RING( code[I830_TEXREG_MI5] ); /* TM0S4 */
579
580 for ( i = 6 ; i < I830_TEX_SETUP_SIZE ; i++ ) {
581 tmp = code[i];
582 OUT_RING( tmp );
583 j++;
584 }
585
586 if (j & 1)
587 OUT_RING( 0 );
588
589 ADVANCE_LP_RING();
590 }
591 else
592 printk("rejected packet %x\n", code[0]);
593 }
594
i830EmitTexBlendVerified(drm_device_t * dev,unsigned int * code,unsigned int num)595 static void i830EmitTexBlendVerified( drm_device_t *dev,
596 unsigned int *code,
597 unsigned int num)
598 {
599 drm_i830_private_t *dev_priv = dev->dev_private;
600 int i, j = 0;
601 unsigned int tmp;
602 RING_LOCALS;
603
604 if (!num)
605 return;
606
607 BEGIN_LP_RING( num + 1 );
608
609 for ( i = 0 ; i < num ; i++ ) {
610 tmp = code[i];
611 OUT_RING( tmp );
612 j++;
613 }
614
615 if (j & 1)
616 OUT_RING( 0 );
617
618 ADVANCE_LP_RING();
619 }
620
i830EmitTexPalette(drm_device_t * dev,unsigned int * palette,int number,int is_shared)621 static void i830EmitTexPalette( drm_device_t *dev,
622 unsigned int *palette,
623 int number,
624 int is_shared )
625 {
626 drm_i830_private_t *dev_priv = dev->dev_private;
627 int i;
628 RING_LOCALS;
629
630 return; /* Is this right ? -- Arjan */
631
632 BEGIN_LP_RING( 258 );
633
634 if(is_shared == 1) {
635 OUT_RING(CMD_OP_MAP_PALETTE_LOAD |
636 MAP_PALETTE_NUM(0) |
637 MAP_PALETTE_BOTH);
638 } else {
639 OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
640 }
641 for(i = 0; i < 256; i++) {
642 OUT_RING(palette[i]);
643 }
644 OUT_RING(0);
645 /* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
646 */
647 }
648
649 /* Need to do some additional checking when setting the dest buffer.
650 */
i830EmitDestVerified(drm_device_t * dev,unsigned int * code)651 static void i830EmitDestVerified( drm_device_t *dev,
652 unsigned int *code )
653 {
654 drm_i830_private_t *dev_priv = dev->dev_private;
655 unsigned int tmp;
656 RING_LOCALS;
657
658 BEGIN_LP_RING( I830_DEST_SETUP_SIZE + 10 );
659
660
661 tmp = code[I830_DESTREG_CBUFADDR];
662 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
663 if (((int)outring) & 8) {
664 OUT_RING(0);
665 OUT_RING(0);
666 }
667
668 OUT_RING( CMD_OP_DESTBUFFER_INFO );
669 OUT_RING( BUF_3D_ID_COLOR_BACK |
670 BUF_3D_PITCH(dev_priv->back_pitch * dev_priv->cpp) |
671 BUF_3D_USE_FENCE);
672 OUT_RING( tmp );
673 OUT_RING( 0 );
674
675 OUT_RING( CMD_OP_DESTBUFFER_INFO );
676 OUT_RING( BUF_3D_ID_DEPTH | BUF_3D_USE_FENCE |
677 BUF_3D_PITCH(dev_priv->depth_pitch * dev_priv->cpp));
678 OUT_RING( dev_priv->zi1 );
679 OUT_RING( 0 );
680 } else {
681 DRM_ERROR("bad di1 %x (allow %x or %x)\n",
682 tmp, dev_priv->front_di1, dev_priv->back_di1);
683 }
684
685 /* invarient:
686 */
687
688
689 OUT_RING( GFX_OP_DESTBUFFER_VARS );
690 OUT_RING( code[I830_DESTREG_DV1] );
691
692 OUT_RING( GFX_OP_DRAWRECT_INFO );
693 OUT_RING( code[I830_DESTREG_DR1] );
694 OUT_RING( code[I830_DESTREG_DR2] );
695 OUT_RING( code[I830_DESTREG_DR3] );
696 OUT_RING( code[I830_DESTREG_DR4] );
697
698 /* Need to verify this */
699 tmp = code[I830_DESTREG_SENABLE];
700 if((tmp & ~0x3) == GFX_OP_SCISSOR_ENABLE) {
701 OUT_RING( tmp );
702 } else {
703 DRM_ERROR("bad scissor enable\n");
704 OUT_RING( 0 );
705 }
706
707 OUT_RING( GFX_OP_SCISSOR_RECT );
708 OUT_RING( code[I830_DESTREG_SR1] );
709 OUT_RING( code[I830_DESTREG_SR2] );
710 OUT_RING( 0 );
711
712 ADVANCE_LP_RING();
713 }
714
i830EmitStippleVerified(drm_device_t * dev,unsigned int * code)715 static void i830EmitStippleVerified( drm_device_t *dev,
716 unsigned int *code )
717 {
718 drm_i830_private_t *dev_priv = dev->dev_private;
719 RING_LOCALS;
720
721 BEGIN_LP_RING( 2 );
722 OUT_RING( GFX_OP_STIPPLE );
723 OUT_RING( code[1] );
724 ADVANCE_LP_RING();
725 }
726
727
i830EmitState(drm_device_t * dev)728 static void i830EmitState( drm_device_t *dev )
729 {
730 drm_i830_private_t *dev_priv = dev->dev_private;
731 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
732 unsigned int dirty = sarea_priv->dirty;
733
734 DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
735
736 if (dirty & I830_UPLOAD_BUFFERS) {
737 i830EmitDestVerified( dev, sarea_priv->BufferState );
738 sarea_priv->dirty &= ~I830_UPLOAD_BUFFERS;
739 }
740
741 if (dirty & I830_UPLOAD_CTX) {
742 i830EmitContextVerified( dev, sarea_priv->ContextState );
743 sarea_priv->dirty &= ~I830_UPLOAD_CTX;
744 }
745
746 if (dirty & I830_UPLOAD_TEX0) {
747 i830EmitTexVerified( dev, sarea_priv->TexState[0] );
748 sarea_priv->dirty &= ~I830_UPLOAD_TEX0;
749 }
750
751 if (dirty & I830_UPLOAD_TEX1) {
752 i830EmitTexVerified( dev, sarea_priv->TexState[1] );
753 sarea_priv->dirty &= ~I830_UPLOAD_TEX1;
754 }
755
756 if (dirty & I830_UPLOAD_TEXBLEND0) {
757 i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[0],
758 sarea_priv->TexBlendStateWordsUsed[0]);
759 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND0;
760 }
761
762 if (dirty & I830_UPLOAD_TEXBLEND1) {
763 i830EmitTexBlendVerified( dev, sarea_priv->TexBlendState[1],
764 sarea_priv->TexBlendStateWordsUsed[1]);
765 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND1;
766 }
767
768 if (dirty & I830_UPLOAD_TEX_PALETTE_SHARED) {
769 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 1);
770 } else {
771 if (dirty & I830_UPLOAD_TEX_PALETTE_N(0)) {
772 i830EmitTexPalette(dev, sarea_priv->Palette[0], 0, 0);
773 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(0);
774 }
775 if (dirty & I830_UPLOAD_TEX_PALETTE_N(1)) {
776 i830EmitTexPalette(dev, sarea_priv->Palette[1], 1, 0);
777 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(1);
778 }
779
780 /* 1.3:
781 */
782 #if 0
783 if (dirty & I830_UPLOAD_TEX_PALETTE_N(2)) {
784 i830EmitTexPalette(dev, sarea_priv->Palette2[0], 0, 0);
785 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
786 }
787 if (dirty & I830_UPLOAD_TEX_PALETTE_N(3)) {
788 i830EmitTexPalette(dev, sarea_priv->Palette2[1], 1, 0);
789 sarea_priv->dirty &= ~I830_UPLOAD_TEX_PALETTE_N(2);
790 }
791 #endif
792 }
793
794 /* 1.3:
795 */
796 if (dirty & I830_UPLOAD_STIPPLE) {
797 i830EmitStippleVerified( dev,
798 sarea_priv->StippleState);
799 sarea_priv->dirty &= ~I830_UPLOAD_STIPPLE;
800 }
801
802 if (dirty & I830_UPLOAD_TEX2) {
803 i830EmitTexVerified( dev, sarea_priv->TexState2 );
804 sarea_priv->dirty &= ~I830_UPLOAD_TEX2;
805 }
806
807 if (dirty & I830_UPLOAD_TEX3) {
808 i830EmitTexVerified( dev, sarea_priv->TexState3 );
809 sarea_priv->dirty &= ~I830_UPLOAD_TEX3;
810 }
811
812
813 if (dirty & I830_UPLOAD_TEXBLEND2) {
814 i830EmitTexBlendVerified(
815 dev,
816 sarea_priv->TexBlendState2,
817 sarea_priv->TexBlendStateWordsUsed2);
818
819 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND2;
820 }
821
822 if (dirty & I830_UPLOAD_TEXBLEND3) {
823 i830EmitTexBlendVerified(
824 dev,
825 sarea_priv->TexBlendState3,
826 sarea_priv->TexBlendStateWordsUsed3);
827 sarea_priv->dirty &= ~I830_UPLOAD_TEXBLEND3;
828 }
829 }
830
831 /* ================================================================
832 * Performance monitoring functions
833 */
834
i830_fill_box(drm_device_t * dev,int x,int y,int w,int h,int r,int g,int b)835 static void i830_fill_box( drm_device_t *dev,
836 int x, int y, int w, int h,
837 int r, int g, int b )
838 {
839 drm_i830_private_t *dev_priv = dev->dev_private;
840 u32 color;
841 unsigned int BR13, CMD;
842 RING_LOCALS;
843
844 BR13 = (0xF0 << 16) | (dev_priv->pitch * dev_priv->cpp) | (1<<24);
845 CMD = XY_COLOR_BLT_CMD;
846 x += dev_priv->sarea_priv->boxes[0].x1;
847 y += dev_priv->sarea_priv->boxes[0].y1;
848
849 if (dev_priv->cpp == 4) {
850 BR13 |= (1<<25);
851 CMD |= (XY_COLOR_BLT_WRITE_ALPHA | XY_COLOR_BLT_WRITE_RGB);
852 color = (((0xff) << 24) | (r << 16) | (g << 8) | b);
853 } else {
854 color = (((r & 0xf8) << 8) |
855 ((g & 0xfc) << 3) |
856 ((b & 0xf8) >> 3));
857 }
858
859 BEGIN_LP_RING( 6 );
860 OUT_RING( CMD );
861 OUT_RING( BR13 );
862 OUT_RING( (y << 16) | x );
863 OUT_RING( ((y+h) << 16) | (x+w) );
864
865 if ( dev_priv->current_page == 1 ) {
866 OUT_RING( dev_priv->front_offset );
867 } else {
868 OUT_RING( dev_priv->back_offset );
869 }
870
871 OUT_RING( color );
872 ADVANCE_LP_RING();
873 }
874
i830_cp_performance_boxes(drm_device_t * dev)875 static void i830_cp_performance_boxes( drm_device_t *dev )
876 {
877 drm_i830_private_t *dev_priv = dev->dev_private;
878
879 /* Purple box for page flipping
880 */
881 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_FLIP )
882 i830_fill_box( dev, 4, 4, 8, 8, 255, 0, 255 );
883
884 /* Red box if we have to wait for idle at any point
885 */
886 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_WAIT )
887 i830_fill_box( dev, 16, 4, 8, 8, 255, 0, 0 );
888
889 /* Blue box: lost context?
890 */
891 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_LOST_CONTEXT )
892 i830_fill_box( dev, 28, 4, 8, 8, 0, 0, 255 );
893
894 /* Yellow box for texture swaps
895 */
896 if ( dev_priv->sarea_priv->perf_boxes & I830_BOX_TEXTURE_LOAD )
897 i830_fill_box( dev, 40, 4, 8, 8, 255, 255, 0 );
898
899 /* Green box if hardware never idles (as far as we can tell)
900 */
901 if ( !(dev_priv->sarea_priv->perf_boxes & I830_BOX_RING_EMPTY) )
902 i830_fill_box( dev, 64, 4, 8, 8, 0, 255, 0 );
903
904
905 /* Draw bars indicating number of buffers allocated
906 * (not a great measure, easily confused)
907 */
908 if (dev_priv->dma_used) {
909 int bar = dev_priv->dma_used / 10240;
910 if (bar > 100) bar = 100;
911 if (bar < 1) bar = 1;
912 i830_fill_box( dev, 4, 16, bar, 4, 196, 128, 128 );
913 dev_priv->dma_used = 0;
914 }
915
916 dev_priv->sarea_priv->perf_boxes = 0;
917 }
918
i830_dma_dispatch_clear(drm_device_t * dev,int flags,unsigned int clear_color,unsigned int clear_zval,unsigned int clear_depthmask)919 static void i830_dma_dispatch_clear( drm_device_t *dev, int flags,
920 unsigned int clear_color,
921 unsigned int clear_zval,
922 unsigned int clear_depthmask)
923 {
924 drm_i830_private_t *dev_priv = dev->dev_private;
925 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
926 int nbox = sarea_priv->nbox;
927 drm_clip_rect_t *pbox = sarea_priv->boxes;
928 int pitch = dev_priv->pitch;
929 int cpp = dev_priv->cpp;
930 int i;
931 unsigned int BR13, CMD, D_CMD;
932 RING_LOCALS;
933
934
935 if ( dev_priv->current_page == 1 ) {
936 unsigned int tmp = flags;
937
938 flags &= ~(I830_FRONT | I830_BACK);
939 if ( tmp & I830_FRONT ) flags |= I830_BACK;
940 if ( tmp & I830_BACK ) flags |= I830_FRONT;
941 }
942
943 i830_kernel_lost_context(dev);
944
945 switch(cpp) {
946 case 2:
947 BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
948 D_CMD = CMD = XY_COLOR_BLT_CMD;
949 break;
950 case 4:
951 BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24) | (1<<25);
952 CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
953 XY_COLOR_BLT_WRITE_RGB);
954 D_CMD = XY_COLOR_BLT_CMD;
955 if(clear_depthmask & 0x00ffffff)
956 D_CMD |= XY_COLOR_BLT_WRITE_RGB;
957 if(clear_depthmask & 0xff000000)
958 D_CMD |= XY_COLOR_BLT_WRITE_ALPHA;
959 break;
960 default:
961 BR13 = (0xF0 << 16) | (pitch * cpp) | (1<<24);
962 D_CMD = CMD = XY_COLOR_BLT_CMD;
963 break;
964 }
965
966 if (nbox > I830_NR_SAREA_CLIPRECTS)
967 nbox = I830_NR_SAREA_CLIPRECTS;
968
969 for (i = 0 ; i < nbox ; i++, pbox++) {
970 if (pbox->x1 > pbox->x2 ||
971 pbox->y1 > pbox->y2 ||
972 pbox->x2 > dev_priv->w ||
973 pbox->y2 > dev_priv->h)
974 continue;
975
976 if ( flags & I830_FRONT ) {
977 DRM_DEBUG("clear front\n");
978 BEGIN_LP_RING( 6 );
979 OUT_RING( CMD );
980 OUT_RING( BR13 );
981 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
982 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
983 OUT_RING( dev_priv->front_offset );
984 OUT_RING( clear_color );
985 ADVANCE_LP_RING();
986 }
987
988 if ( flags & I830_BACK ) {
989 DRM_DEBUG("clear back\n");
990 BEGIN_LP_RING( 6 );
991 OUT_RING( CMD );
992 OUT_RING( BR13 );
993 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
994 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
995 OUT_RING( dev_priv->back_offset );
996 OUT_RING( clear_color );
997 ADVANCE_LP_RING();
998 }
999
1000 if ( flags & I830_DEPTH ) {
1001 DRM_DEBUG("clear depth\n");
1002 BEGIN_LP_RING( 6 );
1003 OUT_RING( D_CMD );
1004 OUT_RING( BR13 );
1005 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
1006 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
1007 OUT_RING( dev_priv->depth_offset );
1008 OUT_RING( clear_zval );
1009 ADVANCE_LP_RING();
1010 }
1011 }
1012 }
1013
i830_dma_dispatch_swap(drm_device_t * dev)1014 static void i830_dma_dispatch_swap( drm_device_t *dev )
1015 {
1016 drm_i830_private_t *dev_priv = dev->dev_private;
1017 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
1018 int nbox = sarea_priv->nbox;
1019 drm_clip_rect_t *pbox = sarea_priv->boxes;
1020 int pitch = dev_priv->pitch;
1021 int cpp = dev_priv->cpp;
1022 int i;
1023 unsigned int CMD, BR13;
1024 RING_LOCALS;
1025
1026 DRM_DEBUG("swapbuffers\n");
1027
1028 i830_kernel_lost_context(dev);
1029
1030 if (dev_priv->do_boxes)
1031 i830_cp_performance_boxes( dev );
1032
1033 switch(cpp) {
1034 case 2:
1035 BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
1036 CMD = XY_SRC_COPY_BLT_CMD;
1037 break;
1038 case 4:
1039 BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24) | (1<<25);
1040 CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
1041 XY_SRC_COPY_BLT_WRITE_RGB);
1042 break;
1043 default:
1044 BR13 = (pitch * cpp) | (0xCC << 16) | (1<<24);
1045 CMD = XY_SRC_COPY_BLT_CMD;
1046 break;
1047 }
1048
1049
1050 if (nbox > I830_NR_SAREA_CLIPRECTS)
1051 nbox = I830_NR_SAREA_CLIPRECTS;
1052
1053 for (i = 0 ; i < nbox; i++, pbox++)
1054 {
1055 if (pbox->x1 > pbox->x2 ||
1056 pbox->y1 > pbox->y2 ||
1057 pbox->x2 > dev_priv->w ||
1058 pbox->y2 > dev_priv->h)
1059 continue;
1060
1061 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
1062 pbox->x1, pbox->y1,
1063 pbox->x2, pbox->y2);
1064
1065 BEGIN_LP_RING( 8 );
1066 OUT_RING( CMD );
1067 OUT_RING( BR13 );
1068 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
1069 OUT_RING( (pbox->y2 << 16) | pbox->x2 );
1070
1071 if (dev_priv->current_page == 0)
1072 OUT_RING( dev_priv->front_offset );
1073 else
1074 OUT_RING( dev_priv->back_offset );
1075
1076 OUT_RING( (pbox->y1 << 16) | pbox->x1 );
1077 OUT_RING( BR13 & 0xffff );
1078
1079 if (dev_priv->current_page == 0)
1080 OUT_RING( dev_priv->back_offset );
1081 else
1082 OUT_RING( dev_priv->front_offset );
1083
1084 ADVANCE_LP_RING();
1085 }
1086 }
1087
i830_dma_dispatch_flip(drm_device_t * dev)1088 static void i830_dma_dispatch_flip( drm_device_t *dev )
1089 {
1090 drm_i830_private_t *dev_priv = dev->dev_private;
1091 RING_LOCALS;
1092
1093 DRM_DEBUG( "%s: page=%d pfCurrentPage=%d\n",
1094 __FUNCTION__,
1095 dev_priv->current_page,
1096 dev_priv->sarea_priv->pf_current_page);
1097
1098 i830_kernel_lost_context(dev);
1099
1100 if (dev_priv->do_boxes) {
1101 dev_priv->sarea_priv->perf_boxes |= I830_BOX_FLIP;
1102 i830_cp_performance_boxes( dev );
1103 }
1104
1105
1106 BEGIN_LP_RING( 2 );
1107 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
1108 OUT_RING( 0 );
1109 ADVANCE_LP_RING();
1110
1111 BEGIN_LP_RING( 6 );
1112 OUT_RING( CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP );
1113 OUT_RING( 0 );
1114 if ( dev_priv->current_page == 0 ) {
1115 OUT_RING( dev_priv->back_offset );
1116 dev_priv->current_page = 1;
1117 } else {
1118 OUT_RING( dev_priv->front_offset );
1119 dev_priv->current_page = 0;
1120 }
1121 OUT_RING(0);
1122 ADVANCE_LP_RING();
1123
1124
1125 BEGIN_LP_RING( 2 );
1126 OUT_RING( MI_WAIT_FOR_EVENT |
1127 MI_WAIT_FOR_PLANE_A_FLIP );
1128 OUT_RING( 0 );
1129 ADVANCE_LP_RING();
1130
1131
1132 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1133 }
1134
i830_dma_dispatch_vertex(drm_device_t * dev,drm_buf_t * buf,int discard,int used)1135 static void i830_dma_dispatch_vertex(drm_device_t *dev,
1136 drm_buf_t *buf,
1137 int discard,
1138 int used)
1139 {
1140 drm_i830_private_t *dev_priv = dev->dev_private;
1141 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1142 drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
1143 drm_clip_rect_t *box = sarea_priv->boxes;
1144 int nbox = sarea_priv->nbox;
1145 unsigned long address = (unsigned long)buf->bus_address;
1146 unsigned long start = address - dev->agp->base;
1147 int i = 0, u;
1148 RING_LOCALS;
1149
1150 i830_kernel_lost_context(dev);
1151
1152 if (nbox > I830_NR_SAREA_CLIPRECTS)
1153 nbox = I830_NR_SAREA_CLIPRECTS;
1154
1155 if (discard) {
1156 u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1157 I830_BUF_HARDWARE);
1158 if(u != I830_BUF_CLIENT) {
1159 DRM_DEBUG("xxxx 2\n");
1160 }
1161 }
1162
1163 if (used > 4*1023)
1164 used = 0;
1165
1166 if (sarea_priv->dirty)
1167 i830EmitState( dev );
1168
1169 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
1170 address, used, nbox);
1171
1172 dev_priv->counter++;
1173 DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
1174 DRM_DEBUG( "i830_dma_dispatch\n");
1175 DRM_DEBUG( "start : %lx\n", start);
1176 DRM_DEBUG( "used : %d\n", used);
1177 DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
1178
1179 if (buf_priv->currently_mapped == I830_BUF_MAPPED) {
1180 u32 *vp = buf_priv->virtual;
1181
1182 vp[0] = (GFX_OP_PRIMITIVE |
1183 sarea_priv->vertex_prim |
1184 ((used/4)-2));
1185
1186 if (dev_priv->use_mi_batchbuffer_start) {
1187 vp[used/4] = MI_BATCH_BUFFER_END;
1188 used += 4;
1189 }
1190
1191 if (used & 4) {
1192 vp[used/4] = 0;
1193 used += 4;
1194 }
1195
1196 i830_unmap_buffer(buf);
1197 }
1198
1199 if (used) {
1200 do {
1201 if (i < nbox) {
1202 BEGIN_LP_RING(6);
1203 OUT_RING( GFX_OP_DRAWRECT_INFO );
1204 OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR1] );
1205 OUT_RING( box[i].x1 | (box[i].y1<<16) );
1206 OUT_RING( box[i].x2 | (box[i].y2<<16) );
1207 OUT_RING( sarea_priv->BufferState[I830_DESTREG_DR4] );
1208 OUT_RING( 0 );
1209 ADVANCE_LP_RING();
1210 }
1211
1212 if (dev_priv->use_mi_batchbuffer_start) {
1213 BEGIN_LP_RING(2);
1214 OUT_RING( MI_BATCH_BUFFER_START | (2<<6) );
1215 OUT_RING( start | MI_BATCH_NON_SECURE );
1216 ADVANCE_LP_RING();
1217 }
1218 else {
1219 BEGIN_LP_RING(4);
1220 OUT_RING( MI_BATCH_BUFFER );
1221 OUT_RING( start | MI_BATCH_NON_SECURE );
1222 OUT_RING( start + used - 4 );
1223 OUT_RING( 0 );
1224 ADVANCE_LP_RING();
1225 }
1226
1227 } while (++i < nbox);
1228 }
1229
1230 if (discard) {
1231 dev_priv->counter++;
1232
1233 (void) cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1234 I830_BUF_HARDWARE);
1235
1236 BEGIN_LP_RING(8);
1237 OUT_RING( CMD_STORE_DWORD_IDX );
1238 OUT_RING( 20 );
1239 OUT_RING( dev_priv->counter );
1240 OUT_RING( CMD_STORE_DWORD_IDX );
1241 OUT_RING( buf_priv->my_use_idx );
1242 OUT_RING( I830_BUF_FREE );
1243 OUT_RING( CMD_REPORT_HEAD );
1244 OUT_RING( 0 );
1245 ADVANCE_LP_RING();
1246 }
1247 }
1248
1249
i830_dma_quiescent(drm_device_t * dev)1250 void i830_dma_quiescent(drm_device_t *dev)
1251 {
1252 drm_i830_private_t *dev_priv = dev->dev_private;
1253 RING_LOCALS;
1254
1255 i830_kernel_lost_context(dev);
1256
1257 BEGIN_LP_RING(4);
1258 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
1259 OUT_RING( CMD_REPORT_HEAD );
1260 OUT_RING( 0 );
1261 OUT_RING( 0 );
1262 ADVANCE_LP_RING();
1263
1264 i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
1265 }
1266
i830_flush_queue(drm_device_t * dev)1267 static int i830_flush_queue(drm_device_t *dev)
1268 {
1269 drm_i830_private_t *dev_priv = dev->dev_private;
1270 drm_device_dma_t *dma = dev->dma;
1271 int i, ret = 0;
1272 RING_LOCALS;
1273
1274 i830_kernel_lost_context(dev);
1275
1276 BEGIN_LP_RING(2);
1277 OUT_RING( CMD_REPORT_HEAD );
1278 OUT_RING( 0 );
1279 ADVANCE_LP_RING();
1280
1281 i830_wait_ring( dev, dev_priv->ring.Size - 8, __FUNCTION__ );
1282
1283 for (i = 0; i < dma->buf_count; i++) {
1284 drm_buf_t *buf = dma->buflist[ i ];
1285 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1286
1287 int used = cmpxchg(buf_priv->in_use, I830_BUF_HARDWARE,
1288 I830_BUF_FREE);
1289
1290 if (used == I830_BUF_HARDWARE)
1291 DRM_DEBUG("reclaimed from HARDWARE\n");
1292 if (used == I830_BUF_CLIENT)
1293 DRM_DEBUG("still on client\n");
1294 }
1295
1296 return ret;
1297 }
1298
1299 /* Must be called with the lock held */
i830_reclaim_buffers(drm_device_t * dev,pid_t pid)1300 void i830_reclaim_buffers(drm_device_t *dev, pid_t pid)
1301 {
1302 drm_device_dma_t *dma = dev->dma;
1303 int i;
1304
1305 if (!dma) return;
1306 if (!dev->dev_private) return;
1307 if (!dma->buflist) return;
1308
1309 i830_flush_queue(dev);
1310
1311 for (i = 0; i < dma->buf_count; i++) {
1312 drm_buf_t *buf = dma->buflist[ i ];
1313 drm_i830_buf_priv_t *buf_priv = buf->dev_private;
1314
1315 if (buf->pid == pid && buf_priv) {
1316 int used = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
1317 I830_BUF_FREE);
1318
1319 if (used == I830_BUF_CLIENT)
1320 DRM_DEBUG("reclaimed from client\n");
1321 if(buf_priv->currently_mapped == I830_BUF_MAPPED)
1322 buf_priv->currently_mapped = I830_BUF_UNMAPPED;
1323 }
1324 }
1325 }
1326
i830_flush_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1327 int i830_flush_ioctl(struct inode *inode, struct file *filp,
1328 unsigned int cmd, unsigned long arg)
1329 {
1330 drm_file_t *priv = filp->private_data;
1331 drm_device_t *dev = priv->dev;
1332
1333 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1334 DRM_ERROR("i830_flush_ioctl called without lock held\n");
1335 return -EINVAL;
1336 }
1337
1338 i830_flush_queue(dev);
1339 return 0;
1340 }
1341
i830_dma_vertex(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1342 int i830_dma_vertex(struct inode *inode, struct file *filp,
1343 unsigned int cmd, unsigned long arg)
1344 {
1345 drm_file_t *priv = filp->private_data;
1346 drm_device_t *dev = priv->dev;
1347 drm_device_dma_t *dma = dev->dma;
1348 drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
1349 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1350 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1351 dev_priv->sarea_priv;
1352 drm_i830_vertex_t vertex;
1353
1354 if (copy_from_user(&vertex, (drm_i830_vertex_t *)arg, sizeof(vertex)))
1355 return -EFAULT;
1356
1357 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1358 DRM_ERROR("i830_dma_vertex called without lock held\n");
1359 return -EINVAL;
1360 }
1361
1362 DRM_DEBUG("i830 dma vertex, idx %d used %d discard %d\n",
1363 vertex.idx, vertex.used, vertex.discard);
1364
1365 if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
1366
1367 i830_dma_dispatch_vertex( dev,
1368 dma->buflist[ vertex.idx ],
1369 vertex.discard, vertex.used );
1370
1371 sarea_priv->last_enqueue = dev_priv->counter-1;
1372 sarea_priv->last_dispatch = (int) hw_status[5];
1373
1374 return 0;
1375 }
1376
i830_clear_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1377 int i830_clear_bufs(struct inode *inode, struct file *filp,
1378 unsigned int cmd, unsigned long arg)
1379 {
1380 drm_file_t *priv = filp->private_data;
1381 drm_device_t *dev = priv->dev;
1382 drm_i830_clear_t clear;
1383
1384 if (copy_from_user(&clear, (drm_i830_clear_t *)arg, sizeof(clear)))
1385 return -EFAULT;
1386
1387 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1388 DRM_ERROR("i830_clear_bufs called without lock held\n");
1389 return -EINVAL;
1390 }
1391
1392 /* GH: Someone's doing nasty things... */
1393 if (!dev->dev_private) {
1394 return -EINVAL;
1395 }
1396
1397 i830_dma_dispatch_clear( dev, clear.flags,
1398 clear.clear_color,
1399 clear.clear_depth,
1400 clear.clear_depthmask);
1401 return 0;
1402 }
1403
i830_swap_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1404 int i830_swap_bufs(struct inode *inode, struct file *filp,
1405 unsigned int cmd, unsigned long arg)
1406 {
1407 drm_file_t *priv = filp->private_data;
1408 drm_device_t *dev = priv->dev;
1409
1410 DRM_DEBUG("i830_swap_bufs\n");
1411
1412 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1413 DRM_ERROR("i830_swap_buf called without lock held\n");
1414 return -EINVAL;
1415 }
1416
1417 i830_dma_dispatch_swap( dev );
1418 return 0;
1419 }
1420
1421
1422
1423 /* Not sure why this isn't set all the time:
1424 */
i830_do_init_pageflip(drm_device_t * dev)1425 static void i830_do_init_pageflip( drm_device_t *dev )
1426 {
1427 drm_i830_private_t *dev_priv = dev->dev_private;
1428
1429 DRM_DEBUG("%s\n", __FUNCTION__);
1430 dev_priv->page_flipping = 1;
1431 dev_priv->current_page = 0;
1432 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
1433 }
1434
i830_do_cleanup_pageflip(drm_device_t * dev)1435 int i830_do_cleanup_pageflip( drm_device_t *dev )
1436 {
1437 drm_i830_private_t *dev_priv = dev->dev_private;
1438
1439 DRM_DEBUG("%s\n", __FUNCTION__);
1440 if (dev_priv->current_page != 0)
1441 i830_dma_dispatch_flip( dev );
1442
1443 dev_priv->page_flipping = 0;
1444 return 0;
1445 }
1446
i830_flip_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1447 int i830_flip_bufs(struct inode *inode, struct file *filp,
1448 unsigned int cmd, unsigned long arg)
1449 {
1450 drm_file_t *priv = filp->private_data;
1451 drm_device_t *dev = priv->dev;
1452 drm_i830_private_t *dev_priv = dev->dev_private;
1453
1454 DRM_DEBUG("%s\n", __FUNCTION__);
1455
1456 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1457 DRM_ERROR("i830_flip_buf called without lock held\n");
1458 return -EINVAL;
1459 }
1460
1461 if (!dev_priv->page_flipping)
1462 i830_do_init_pageflip( dev );
1463
1464 i830_dma_dispatch_flip( dev );
1465 return 0;
1466 }
1467
i830_getage(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1468 int i830_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1469 unsigned long arg)
1470 {
1471 drm_file_t *priv = filp->private_data;
1472 drm_device_t *dev = priv->dev;
1473 drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
1474 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1475 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1476 dev_priv->sarea_priv;
1477
1478 sarea_priv->last_dispatch = (int) hw_status[5];
1479 return 0;
1480 }
1481
i830_getbuf(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1482 int i830_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1483 unsigned long arg)
1484 {
1485 drm_file_t *priv = filp->private_data;
1486 drm_device_t *dev = priv->dev;
1487 int retcode = 0;
1488 drm_i830_dma_t d;
1489 drm_i830_private_t *dev_priv = (drm_i830_private_t *)dev->dev_private;
1490 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1491 drm_i830_sarea_t *sarea_priv = (drm_i830_sarea_t *)
1492 dev_priv->sarea_priv;
1493
1494 DRM_DEBUG("getbuf\n");
1495 if (copy_from_user(&d, (drm_i830_dma_t *)arg, sizeof(d)))
1496 return -EFAULT;
1497
1498 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1499 DRM_ERROR("i830_dma called without lock held\n");
1500 return -EINVAL;
1501 }
1502
1503 d.granted = 0;
1504
1505 retcode = i830_dma_get_buffer(dev, &d, filp);
1506
1507 DRM_DEBUG("i830_dma: %d returning %d, granted = %d\n",
1508 current->pid, retcode, d.granted);
1509
1510 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
1511 return -EFAULT;
1512 sarea_priv->last_dispatch = (int) hw_status[5];
1513
1514 return retcode;
1515 }
1516
i830_copybuf(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1517 int i830_copybuf(struct inode *inode,
1518 struct file *filp,
1519 unsigned int cmd,
1520 unsigned long arg)
1521 {
1522 /* Never copy - 2.4.x doesn't need it */
1523 return 0;
1524 }
1525
i830_docopy(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1526 int i830_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1527 unsigned long arg)
1528 {
1529 return 0;
1530 }
1531
1532
1533
i830_getparam(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1534 int i830_getparam( struct inode *inode, struct file *filp, unsigned int cmd,
1535 unsigned long arg )
1536 {
1537 drm_file_t *priv = filp->private_data;
1538 drm_device_t *dev = priv->dev;
1539 drm_i830_private_t *dev_priv = dev->dev_private;
1540 drm_i830_getparam_t param;
1541 int value;
1542
1543 if ( !dev_priv ) {
1544 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1545 return -EINVAL;
1546 }
1547
1548 if (copy_from_user(¶m, (drm_i830_getparam_t *)arg, sizeof(param) ))
1549 return -EFAULT;
1550
1551 switch( param.param ) {
1552 case I830_PARAM_IRQ_ACTIVE:
1553 value = dev->irq ? 1 : 0;
1554 break;
1555 default:
1556 return -EINVAL;
1557 }
1558
1559 if ( copy_to_user( param.value, &value, sizeof(int) ) ) {
1560 DRM_ERROR( "copy_to_user\n" );
1561 return -EFAULT;
1562 }
1563
1564 return 0;
1565 }
1566
1567
i830_setparam(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1568 int i830_setparam( struct inode *inode, struct file *filp, unsigned int cmd,
1569 unsigned long arg )
1570 {
1571 drm_file_t *priv = filp->private_data;
1572 drm_device_t *dev = priv->dev;
1573 drm_i830_private_t *dev_priv = dev->dev_private;
1574 drm_i830_setparam_t param;
1575
1576 if ( !dev_priv ) {
1577 DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
1578 return -EINVAL;
1579 }
1580
1581 if (copy_from_user(¶m, (drm_i830_setparam_t *)arg, sizeof(param) ))
1582 return -EFAULT;
1583
1584 switch( param.param ) {
1585 case I830_SETPARAM_USE_MI_BATCHBUFFER_START:
1586 dev_priv->use_mi_batchbuffer_start = param.value;
1587 break;
1588 default:
1589 return -EINVAL;
1590 }
1591
1592 return 0;
1593 }
1594