1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
30 *
31 */
32
33 #include <linux/config.h>
34 #include "i810.h"
35 #include "drmP.h"
36 #include "drm.h"
37 #include "i810_drm.h"
38 #include "i810_drv.h"
39 #include <linux/interrupt.h> /* For task queue support */
40 #include <linux/delay.h>
41
42 #define DO_MUNMAP(m, a, l) do_munmap(m, a, l)
43
44 #define I810_BUF_FREE 2
45 #define I810_BUF_CLIENT 1
46 #define I810_BUF_HARDWARE 0
47
48 #define I810_BUF_UNMAPPED 0
49 #define I810_BUF_MAPPED 1
50
51 #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
52
53 #define BEGIN_LP_RING(n) do { \
54 if (0) DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__); \
55 if (dev_priv->ring.space < n*4) \
56 i810_wait_ring(dev, n*4); \
57 dev_priv->ring.space -= n*4; \
58 outring = dev_priv->ring.tail; \
59 ringmask = dev_priv->ring.tail_mask; \
60 virt = dev_priv->ring.virtual_start; \
61 } while (0)
62
63 #define ADVANCE_LP_RING() do { \
64 if (0) DRM_DEBUG("ADVANCE_LP_RING\n"); \
65 dev_priv->ring.tail = outring; \
66 I810_WRITE(LP_RING + RING_TAIL, outring); \
67 } while(0)
68
69 #define OUT_RING(n) do { \
70 if (0) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
71 *(volatile unsigned int *)(virt + outring) = n; \
72 outring += 4; \
73 outring &= ringmask; \
74 } while (0)
75
i810_print_status_page(drm_device_t * dev)76 static inline void i810_print_status_page(drm_device_t *dev)
77 {
78 drm_device_dma_t *dma = dev->dma;
79 drm_i810_private_t *dev_priv = dev->dev_private;
80 u32 *temp = (u32 *)dev_priv->hw_status_page;
81 int i;
82
83 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
84 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
85 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
86 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
87 DRM_DEBUG( "hw_status: Last Render: %x\n", temp[4]);
88 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
89 for(i = 6; i < dma->buf_count + 6; i++) {
90 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
91 }
92 }
93
i810_freelist_get(drm_device_t * dev)94 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
95 {
96 drm_device_dma_t *dma = dev->dma;
97 int i;
98 int used;
99
100 /* Linear search might not be the best solution */
101
102 for (i = 0; i < dma->buf_count; i++) {
103 drm_buf_t *buf = dma->buflist[ i ];
104 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
105 /* In use is already a pointer */
106 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
107 I810_BUF_CLIENT);
108 if(used == I810_BUF_FREE) {
109 return buf;
110 }
111 }
112 return NULL;
113 }
114
115 /* This should only be called if the buffer is not sent to the hardware
116 * yet, the hardware updates in use for us once its on the ring buffer.
117 */
118
i810_freelist_put(drm_device_t * dev,drm_buf_t * buf)119 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
120 {
121 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
122 int used;
123
124 /* In use is already a pointer */
125 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
126 if(used != I810_BUF_CLIENT) {
127 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
128 return -EINVAL;
129 }
130
131 return 0;
132 }
133
134 static struct file_operations i810_buffer_fops = {
135 .open = DRM(open),
136 .flush = DRM(flush),
137 .release = DRM(release),
138 .ioctl = DRM(ioctl),
139 .mmap = i810_mmap_buffers,
140 .read = DRM(read),
141 .fasync = DRM(fasync),
142 .poll = DRM(poll),
143 };
144
i810_mmap_buffers(struct file * filp,struct vm_area_struct * vma)145 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
146 {
147 drm_file_t *priv = filp->private_data;
148 drm_device_t *dev;
149 drm_i810_private_t *dev_priv;
150 drm_buf_t *buf;
151 drm_i810_buf_priv_t *buf_priv;
152
153 lock_kernel();
154 dev = priv->dev;
155 dev_priv = dev->dev_private;
156 buf = dev_priv->mmap_buffer;
157 buf_priv = buf->dev_private;
158
159 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
160 vma->vm_file = filp;
161
162 buf_priv->currently_mapped = I810_BUF_MAPPED;
163 unlock_kernel();
164
165 if (remap_page_range(DRM_RPR_ARG(vma) vma->vm_start,
166 VM_OFFSET(vma),
167 vma->vm_end - vma->vm_start,
168 vma->vm_page_prot)) return -EAGAIN;
169 return 0;
170 }
171
i810_map_buffer(drm_buf_t * buf,struct file * filp)172 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
173 {
174 drm_file_t *priv = filp->private_data;
175 drm_device_t *dev = priv->dev;
176 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
177 drm_i810_private_t *dev_priv = dev->dev_private;
178 struct file_operations *old_fops;
179 int retcode = 0;
180
181 if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
182
183
184
185
186 down_write( ¤t->mm->mmap_sem );
187
188 old_fops = filp->f_op;
189 filp->f_op = &i810_buffer_fops;
190 dev_priv->mmap_buffer = buf;
191 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
192 PROT_READ|PROT_WRITE,
193 MAP_SHARED,
194 buf->bus_address);
195 dev_priv->mmap_buffer = NULL;
196 filp->f_op = old_fops;
197 if ((unsigned long)buf_priv->virtual > -1024UL) {
198 /* Real error */
199 DRM_DEBUG("mmap error\n");
200 retcode = (signed int)buf_priv->virtual;
201 buf_priv->virtual = 0;
202 }
203
204
205
206 up_write( ¤t->mm->mmap_sem );
207
208 return retcode;
209 }
210
i810_unmap_buffer(drm_buf_t * buf)211 static int i810_unmap_buffer(drm_buf_t *buf)
212 {
213 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
214 int retcode = 0;
215
216 if(buf_priv->currently_mapped != I810_BUF_MAPPED)
217 return -EINVAL;
218
219
220
221 down_write( ¤t->mm->mmap_sem );
222
223 retcode = DO_MUNMAP(current->mm,
224 (unsigned long)buf_priv->virtual,
225 (size_t) buf->total);
226
227
228
229 up_write( ¤t->mm->mmap_sem );
230
231 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
232 buf_priv->virtual = 0;
233
234 return retcode;
235 }
236
i810_dma_get_buffer(drm_device_t * dev,drm_i810_dma_t * d,struct file * filp)237 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
238 struct file *filp)
239 {
240 drm_file_t *priv = filp->private_data;
241 drm_buf_t *buf;
242 drm_i810_buf_priv_t *buf_priv;
243 int retcode = 0;
244
245 buf = i810_freelist_get(dev);
246 if (!buf) {
247 retcode = -ENOMEM;
248 DRM_DEBUG("retcode=%d\n", retcode);
249 return retcode;
250 }
251
252 retcode = i810_map_buffer(buf, filp);
253 if(retcode) {
254 i810_freelist_put(dev, buf);
255 DRM_DEBUG("mapbuf failed, retcode %d\n", retcode);
256 return retcode;
257 }
258 buf->pid = priv->pid;
259 buf_priv = buf->dev_private;
260 d->granted = 1;
261 d->request_idx = buf->idx;
262 d->request_size = buf->total;
263 d->virtual = buf_priv->virtual;
264
265 return retcode;
266 }
267
i810_dma_cleanup(drm_device_t * dev)268 static int i810_dma_cleanup(drm_device_t *dev)
269 {
270 drm_device_dma_t *dma = dev->dma;
271
272 if(dev->dev_private) {
273 int i;
274 drm_i810_private_t *dev_priv =
275 (drm_i810_private_t *) dev->dev_private;
276
277 if(dev_priv->ring.virtual_start) {
278 DRM(ioremapfree)((void *) dev_priv->ring.virtual_start,
279 dev_priv->ring.Size, dev);
280 }
281 if(dev_priv->hw_status_page != 0UL) {
282 pci_free_consistent(dev->pdev, PAGE_SIZE,
283 (void *)dev_priv->hw_status_page,
284 dev_priv->dma_status_page);
285 /* Need to rewrite hardware status page */
286 I810_WRITE(0x02080, 0x1ffff000);
287 }
288 DRM(free)(dev->dev_private, sizeof(drm_i810_private_t),
289 DRM_MEM_DRIVER);
290 dev->dev_private = NULL;
291
292 for (i = 0; i < dma->buf_count; i++) {
293 drm_buf_t *buf = dma->buflist[ i ];
294 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
295 DRM(ioremapfree)(buf_priv->kernel_virtual, buf->total, dev);
296 }
297 }
298 return 0;
299 }
300
i810_wait_ring(drm_device_t * dev,int n)301 static int i810_wait_ring(drm_device_t *dev, int n)
302 {
303 drm_i810_private_t *dev_priv = dev->dev_private;
304 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
305 int iters = 0;
306 unsigned long end;
307 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
308
309 end = jiffies + (HZ*3);
310 while (ring->space < n) {
311 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
312 ring->space = ring->head - (ring->tail+8);
313 if (ring->space < 0) ring->space += ring->Size;
314
315 if (ring->head != last_head)
316 end = jiffies + (HZ*3);
317
318 iters++;
319 if(time_before(end, jiffies)) {
320 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
321 DRM_ERROR("lockup\n");
322 goto out_wait_ring;
323 }
324 udelay(1);
325 }
326
327 out_wait_ring:
328 return iters;
329 }
330
i810_kernel_lost_context(drm_device_t * dev)331 static void i810_kernel_lost_context(drm_device_t *dev)
332 {
333 drm_i810_private_t *dev_priv = dev->dev_private;
334 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
335
336 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
337 ring->tail = I810_READ(LP_RING + RING_TAIL);
338 ring->space = ring->head - (ring->tail+8);
339 if (ring->space < 0) ring->space += ring->Size;
340 }
341
i810_freelist_init(drm_device_t * dev,drm_i810_private_t * dev_priv)342 static int i810_freelist_init(drm_device_t *dev, drm_i810_private_t *dev_priv)
343 {
344 drm_device_dma_t *dma = dev->dma;
345 int my_idx = 24;
346 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
347 int i;
348
349 if(dma->buf_count > 1019) {
350 /* Not enough space in the status page for the freelist */
351 return -EINVAL;
352 }
353
354 for (i = 0; i < dma->buf_count; i++) {
355 drm_buf_t *buf = dma->buflist[ i ];
356 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
357
358 buf_priv->in_use = hw_status++;
359 buf_priv->my_use_idx = my_idx;
360 my_idx += 4;
361
362 *buf_priv->in_use = I810_BUF_FREE;
363
364 buf_priv->kernel_virtual = DRM(ioremap)(buf->bus_address,
365 buf->total, dev);
366 }
367 return 0;
368 }
369
i810_dma_initialize(drm_device_t * dev,drm_i810_private_t * dev_priv,drm_i810_init_t * init)370 static int i810_dma_initialize(drm_device_t *dev,
371 drm_i810_private_t *dev_priv,
372 drm_i810_init_t *init)
373 {
374 struct list_head *list;
375
376 memset(dev_priv, 0, sizeof(drm_i810_private_t));
377
378 list_for_each(list, &dev->maplist->head) {
379 drm_map_list_t *r_list = (drm_map_list_t *)list;
380 if( r_list->map &&
381 r_list->map->type == _DRM_SHM &&
382 r_list->map->flags & _DRM_CONTAINS_LOCK ) {
383 dev_priv->sarea_map = r_list->map;
384 break;
385 }
386 }
387 if(!dev_priv->sarea_map) {
388 dev->dev_private = (void *)dev_priv;
389 i810_dma_cleanup(dev);
390 DRM_ERROR("can not find sarea!\n");
391 return -EINVAL;
392 }
393 DRM_FIND_MAP( dev_priv->mmio_map, init->mmio_offset );
394 if(!dev_priv->mmio_map) {
395 dev->dev_private = (void *)dev_priv;
396 i810_dma_cleanup(dev);
397 DRM_ERROR("can not find mmio map!\n");
398 return -EINVAL;
399 }
400 DRM_FIND_MAP( dev_priv->buffer_map, init->buffers_offset );
401 if(!dev_priv->buffer_map) {
402 dev->dev_private = (void *)dev_priv;
403 i810_dma_cleanup(dev);
404 DRM_ERROR("can not find dma buffer map!\n");
405 return -EINVAL;
406 }
407
408 dev_priv->sarea_priv = (drm_i810_sarea_t *)
409 ((u8 *)dev_priv->sarea_map->handle +
410 init->sarea_priv_offset);
411
412 dev_priv->ring.Start = init->ring_start;
413 dev_priv->ring.End = init->ring_end;
414 dev_priv->ring.Size = init->ring_size;
415
416 dev_priv->ring.virtual_start = DRM(ioremap)(dev->agp->base +
417 init->ring_start,
418 init->ring_size, dev);
419
420 if (dev_priv->ring.virtual_start == NULL) {
421 dev->dev_private = (void *) dev_priv;
422 i810_dma_cleanup(dev);
423 DRM_ERROR("can not ioremap virtual address for"
424 " ring buffer\n");
425 return -ENOMEM;
426 }
427
428 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
429
430 dev_priv->w = init->w;
431 dev_priv->h = init->h;
432 dev_priv->pitch = init->pitch;
433 dev_priv->back_offset = init->back_offset;
434 dev_priv->depth_offset = init->depth_offset;
435
436 dev_priv->overlay_offset = init->overlay_offset;
437 dev_priv->overlay_physical = init->overlay_physical;
438
439 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
440 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
441 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
442
443 /* Program Hardware Status Page */
444 dev_priv->hw_status_page =
445 (unsigned long) pci_alloc_consistent(dev->pdev, PAGE_SIZE,
446 &dev_priv->dma_status_page);
447 if(dev_priv->hw_status_page == 0UL) {
448 dev->dev_private = (void *)dev_priv;
449 i810_dma_cleanup(dev);
450 DRM_ERROR("Can not allocate hardware status page\n");
451 return -ENOMEM;
452 }
453 memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
454 DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
455
456 I810_WRITE(0x02080, dev_priv->dma_status_page);
457 DRM_DEBUG("Enabled hardware status page\n");
458
459 /* Now we need to init our freelist */
460 if(i810_freelist_init(dev, dev_priv) != 0) {
461 dev->dev_private = (void *)dev_priv;
462 i810_dma_cleanup(dev);
463 DRM_ERROR("Not enough space in the status page for"
464 " the freelist\n");
465 return -ENOMEM;
466 }
467 dev->dev_private = (void *)dev_priv;
468
469 return 0;
470 }
471
472 #ifdef CONFIG_DRM_I810_XFREE_41
473 int xfreeversion = 41;
474 #else
475 int xfreeversion = -1;
476 #endif
477
478 MODULE_PARM(xfreeversion, "i");
479 MODULE_PARM_DESC(xfreeversion, "The version of XFree86 that needs to be supported");
480
i810_dma_init(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)481 int i810_dma_init(struct inode *inode, struct file *filp,
482 unsigned int cmd, unsigned long arg)
483 {
484 drm_file_t *priv = filp->private_data;
485 drm_device_t *dev = priv->dev;
486 drm_i810_private_t *dev_priv;
487 drm_i810_init_t init;
488 int retcode = 0;
489
490 if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init)))
491 return -EFAULT;
492
493 if ((xfreeversion == 41) ||
494 ((xfreeversion == -1) && (init.pitch == 0))) {
495 /*
496 * Ok we have a problem here. Someone decided it was
497 * funny to add two fields in the middle of the
498 * drm_i810_init_it structure in the transition between
499 * XFree86 4.1.0 and 4.2.0.
500 *
501 * The code below tries to fix this ABI breakage up as
502 * good as possible, unfortionatly it's impossible to
503 * autodetect which interface the user wants, hence the
504 * module parameter -- Arjan
505 */
506
507 init.pitch_bits = init.h;
508 init.pitch = init.w;
509 init.h = init.overlay_physical;
510 init.w = init.overlay_offset;
511 init.overlay_physical = 0;
512 init.overlay_offset = 0;
513 }
514
515 switch(init.func) {
516 case I810_INIT_DMA:
517 dev_priv = DRM(alloc)(sizeof(drm_i810_private_t),
518 DRM_MEM_DRIVER);
519 if(dev_priv == NULL) return -ENOMEM;
520 retcode = i810_dma_initialize(dev, dev_priv, &init);
521 break;
522 case I810_CLEANUP_DMA:
523 retcode = i810_dma_cleanup(dev);
524 break;
525 default:
526 retcode = -EINVAL;
527 break;
528 }
529
530 return retcode;
531 }
532
533
534
535 /* Most efficient way to verify state for the i810 is as it is
536 * emitted. Non-conformant state is silently dropped.
537 */
i810EmitContextVerified(drm_device_t * dev,unsigned int * code)538 static void i810EmitContextVerified( drm_device_t *dev,
539 unsigned int *code )
540 {
541 drm_i810_private_t *dev_priv = dev->dev_private;
542 int i, j = 0;
543 RING_LOCALS;
544
545 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
546
547 OUT_RING( GFX_OP_COLOR_FACTOR );
548 OUT_RING( code[I810_CTXREG_CF1] );
549
550 OUT_RING( GFX_OP_STIPPLE );
551 OUT_RING( code[I810_CTXREG_ST1] );
552
553 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
554 if ((code[i] & (7<<29)) == (3<<29) &&
555 (code[i] & (0x1f<<24)) < (0x1d<<24))
556 {
557 OUT_RING( code[i] );
558 j++;
559 }
560 else printk("constext state dropped!!!\n");
561 }
562
563 if (j & 1)
564 OUT_RING( 0 );
565
566 ADVANCE_LP_RING();
567 }
568
i810EmitTexVerified(drm_device_t * dev,volatile unsigned int * code)569 static void i810EmitTexVerified( drm_device_t *dev,
570 volatile unsigned int *code )
571 {
572 drm_i810_private_t *dev_priv = dev->dev_private;
573 int i, j = 0;
574 RING_LOCALS;
575
576 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
577
578 OUT_RING( GFX_OP_MAP_INFO );
579 OUT_RING( code[I810_TEXREG_MI1] );
580 OUT_RING( code[I810_TEXREG_MI2] );
581 OUT_RING( code[I810_TEXREG_MI3] );
582
583 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
584
585 if ((code[i] & (7<<29)) == (3<<29) &&
586 (code[i] & (0x1f<<24)) < (0x1d<<24))
587 {
588 OUT_RING( code[i] );
589 j++;
590 }
591 else printk("texture state dropped!!!\n");
592 }
593
594 if (j & 1)
595 OUT_RING( 0 );
596
597 ADVANCE_LP_RING();
598 }
599
600
601 /* Need to do some additional checking when setting the dest buffer.
602 */
i810EmitDestVerified(drm_device_t * dev,volatile unsigned int * code)603 static void i810EmitDestVerified( drm_device_t *dev,
604 volatile unsigned int *code )
605 {
606 drm_i810_private_t *dev_priv = dev->dev_private;
607 unsigned int tmp;
608 RING_LOCALS;
609
610 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
611
612 tmp = code[I810_DESTREG_DI1];
613 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
614 OUT_RING( CMD_OP_DESTBUFFER_INFO );
615 OUT_RING( tmp );
616 }
617 else
618 printk("buffer state dropped\n");
619
620 /* invarient:
621 */
622 OUT_RING( CMD_OP_Z_BUFFER_INFO );
623 OUT_RING( dev_priv->zi1 );
624
625 OUT_RING( GFX_OP_DESTBUFFER_VARS );
626 OUT_RING( code[I810_DESTREG_DV1] );
627
628 OUT_RING( GFX_OP_DRAWRECT_INFO );
629 OUT_RING( code[I810_DESTREG_DR1] );
630 OUT_RING( code[I810_DESTREG_DR2] );
631 OUT_RING( code[I810_DESTREG_DR3] );
632 OUT_RING( code[I810_DESTREG_DR4] );
633 OUT_RING( 0 );
634
635 ADVANCE_LP_RING();
636 }
637
638
639
i810EmitState(drm_device_t * dev)640 static void i810EmitState( drm_device_t *dev )
641 {
642 drm_i810_private_t *dev_priv = dev->dev_private;
643 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
644 unsigned int dirty = sarea_priv->dirty;
645
646 if (dirty & I810_UPLOAD_BUFFERS) {
647 i810EmitDestVerified( dev, sarea_priv->BufferState );
648 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
649 }
650
651 if (dirty & I810_UPLOAD_CTX) {
652 i810EmitContextVerified( dev, sarea_priv->ContextState );
653 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
654 }
655
656 if (dirty & I810_UPLOAD_TEX0) {
657 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
658 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
659 }
660
661 if (dirty & I810_UPLOAD_TEX1) {
662 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
663 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
664 }
665 }
666
667
668
669 /* need to verify
670 */
i810_dma_dispatch_clear(drm_device_t * dev,int flags,unsigned int clear_color,unsigned int clear_zval)671 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
672 unsigned int clear_color,
673 unsigned int clear_zval )
674 {
675 drm_i810_private_t *dev_priv = dev->dev_private;
676 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
677 int nbox = sarea_priv->nbox;
678 drm_clip_rect_t *pbox = sarea_priv->boxes;
679 int pitch = dev_priv->pitch;
680 int cpp = 2;
681 int i;
682 RING_LOCALS;
683
684 i810_kernel_lost_context(dev);
685
686 if (nbox > I810_NR_SAREA_CLIPRECTS)
687 nbox = I810_NR_SAREA_CLIPRECTS;
688
689 for (i = 0 ; i < nbox ; i++, pbox++) {
690 unsigned int x = pbox->x1;
691 unsigned int y = pbox->y1;
692 unsigned int width = (pbox->x2 - x) * cpp;
693 unsigned int height = pbox->y2 - y;
694 unsigned int start = y * pitch + x * cpp;
695
696 if (pbox->x1 > pbox->x2 ||
697 pbox->y1 > pbox->y2 ||
698 pbox->x2 > dev_priv->w ||
699 pbox->y2 > dev_priv->h)
700 continue;
701
702 if ( flags & I810_FRONT ) {
703 BEGIN_LP_RING( 6 );
704 OUT_RING( BR00_BITBLT_CLIENT |
705 BR00_OP_COLOR_BLT | 0x3 );
706 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
707 OUT_RING( (height << 16) | width );
708 OUT_RING( start );
709 OUT_RING( clear_color );
710 OUT_RING( 0 );
711 ADVANCE_LP_RING();
712 }
713
714 if ( flags & I810_BACK ) {
715 BEGIN_LP_RING( 6 );
716 OUT_RING( BR00_BITBLT_CLIENT |
717 BR00_OP_COLOR_BLT | 0x3 );
718 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
719 OUT_RING( (height << 16) | width );
720 OUT_RING( dev_priv->back_offset + start );
721 OUT_RING( clear_color );
722 OUT_RING( 0 );
723 ADVANCE_LP_RING();
724 }
725
726 if ( flags & I810_DEPTH ) {
727 BEGIN_LP_RING( 6 );
728 OUT_RING( BR00_BITBLT_CLIENT |
729 BR00_OP_COLOR_BLT | 0x3 );
730 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
731 OUT_RING( (height << 16) | width );
732 OUT_RING( dev_priv->depth_offset + start );
733 OUT_RING( clear_zval );
734 OUT_RING( 0 );
735 ADVANCE_LP_RING();
736 }
737 }
738 }
739
i810_dma_dispatch_swap(drm_device_t * dev)740 static void i810_dma_dispatch_swap( drm_device_t *dev )
741 {
742 drm_i810_private_t *dev_priv = dev->dev_private;
743 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
744 int nbox = sarea_priv->nbox;
745 drm_clip_rect_t *pbox = sarea_priv->boxes;
746 int pitch = dev_priv->pitch;
747 int cpp = 2;
748 int ofs = dev_priv->back_offset;
749 int i;
750 RING_LOCALS;
751
752 i810_kernel_lost_context(dev);
753
754 if (nbox > I810_NR_SAREA_CLIPRECTS)
755 nbox = I810_NR_SAREA_CLIPRECTS;
756
757 for (i = 0 ; i < nbox; i++, pbox++)
758 {
759 unsigned int w = pbox->x2 - pbox->x1;
760 unsigned int h = pbox->y2 - pbox->y1;
761 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
762 unsigned int start = ofs + dst;
763
764 if (pbox->x1 > pbox->x2 ||
765 pbox->y1 > pbox->y2 ||
766 pbox->x2 > dev_priv->w ||
767 pbox->y2 > dev_priv->h)
768 continue;
769
770 BEGIN_LP_RING( 6 );
771 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
772 OUT_RING( pitch | (0xCC << 16));
773 OUT_RING( (h << 16) | (w * cpp));
774 OUT_RING( dst );
775 OUT_RING( pitch );
776 OUT_RING( start );
777 ADVANCE_LP_RING();
778 }
779 }
780
781
i810_dma_dispatch_vertex(drm_device_t * dev,drm_buf_t * buf,int discard,int used)782 static void i810_dma_dispatch_vertex(drm_device_t *dev,
783 drm_buf_t *buf,
784 int discard,
785 int used)
786 {
787 drm_i810_private_t *dev_priv = dev->dev_private;
788 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
789 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
790 drm_clip_rect_t *box = sarea_priv->boxes;
791 int nbox = sarea_priv->nbox;
792 unsigned long address = (unsigned long)buf->bus_address;
793 unsigned long start = address - dev->agp->base;
794 int i = 0;
795 RING_LOCALS;
796
797 i810_kernel_lost_context(dev);
798
799 if (nbox > I810_NR_SAREA_CLIPRECTS)
800 nbox = I810_NR_SAREA_CLIPRECTS;
801
802 if (used > 4*1024)
803 used = 0;
804
805 if (sarea_priv->dirty)
806 i810EmitState( dev );
807
808 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
809 unsigned int prim = (sarea_priv->vertex_prim & PR_MASK);
810
811 *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE | prim |
812 ((used/4)-2));
813
814 if (used & 4) {
815 *(u32 *)((u32)buf_priv->virtual + used) = 0;
816 used += 4;
817 }
818
819 i810_unmap_buffer(buf);
820 }
821
822 if (used) {
823 do {
824 if (i < nbox) {
825 BEGIN_LP_RING(4);
826 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
827 SC_ENABLE );
828 OUT_RING( GFX_OP_SCISSOR_INFO );
829 OUT_RING( box[i].x1 | (box[i].y1<<16) );
830 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
831 ADVANCE_LP_RING();
832 }
833
834 BEGIN_LP_RING(4);
835 OUT_RING( CMD_OP_BATCH_BUFFER );
836 OUT_RING( start | BB1_PROTECTED );
837 OUT_RING( start + used - 4 );
838 OUT_RING( 0 );
839 ADVANCE_LP_RING();
840
841 } while (++i < nbox);
842 }
843
844 if (discard) {
845 dev_priv->counter++;
846
847 (void) cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
848 I810_BUF_HARDWARE);
849
850 BEGIN_LP_RING(8);
851 OUT_RING( CMD_STORE_DWORD_IDX );
852 OUT_RING( 20 );
853 OUT_RING( dev_priv->counter );
854 OUT_RING( CMD_STORE_DWORD_IDX );
855 OUT_RING( buf_priv->my_use_idx );
856 OUT_RING( I810_BUF_FREE );
857 OUT_RING( CMD_REPORT_HEAD );
858 OUT_RING( 0 );
859 ADVANCE_LP_RING();
860 }
861 }
862
863
i810_dma_quiescent(drm_device_t * dev)864 void i810_dma_quiescent(drm_device_t *dev)
865 {
866 drm_i810_private_t *dev_priv = dev->dev_private;
867 RING_LOCALS;
868
869 /* printk("%s\n", __FUNCTION__); */
870
871 i810_kernel_lost_context(dev);
872
873 BEGIN_LP_RING(4);
874 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
875 OUT_RING( CMD_REPORT_HEAD );
876 OUT_RING( 0 );
877 OUT_RING( 0 );
878 ADVANCE_LP_RING();
879
880 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
881 }
882
i810_flush_queue(drm_device_t * dev)883 static int i810_flush_queue(drm_device_t *dev)
884 {
885 drm_i810_private_t *dev_priv = dev->dev_private;
886 drm_device_dma_t *dma = dev->dma;
887 int i, ret = 0;
888 RING_LOCALS;
889
890 /* printk("%s\n", __FUNCTION__); */
891
892 i810_kernel_lost_context(dev);
893
894 BEGIN_LP_RING(2);
895 OUT_RING( CMD_REPORT_HEAD );
896 OUT_RING( 0 );
897 ADVANCE_LP_RING();
898
899 i810_wait_ring( dev, dev_priv->ring.Size - 8 );
900
901 for (i = 0; i < dma->buf_count; i++) {
902 drm_buf_t *buf = dma->buflist[ i ];
903 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
904
905 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
906 I810_BUF_FREE);
907
908 if (used == I810_BUF_HARDWARE)
909 DRM_DEBUG("reclaimed from HARDWARE\n");
910 if (used == I810_BUF_CLIENT)
911 DRM_DEBUG("still on client\n");
912 }
913
914 return ret;
915 }
916
917 /* Must be called with the lock held */
i810_reclaim_buffers(drm_device_t * dev,pid_t pid)918 void i810_reclaim_buffers(drm_device_t *dev, pid_t pid)
919 {
920 drm_device_dma_t *dma = dev->dma;
921 int i;
922
923 if (!dma) return;
924 if (!dev->dev_private) return;
925 if (!dma->buflist) return;
926
927 i810_flush_queue(dev);
928
929 for (i = 0; i < dma->buf_count; i++) {
930 drm_buf_t *buf = dma->buflist[ i ];
931 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
932
933 if (buf->pid == pid && buf_priv) {
934 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
935 I810_BUF_FREE);
936
937 if (used == I810_BUF_CLIENT)
938 DRM_DEBUG("reclaimed from client\n");
939 if(buf_priv->currently_mapped == I810_BUF_MAPPED)
940 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
941 }
942 }
943 }
944
i810_flush_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)945 int i810_flush_ioctl(struct inode *inode, struct file *filp,
946 unsigned int cmd, unsigned long arg)
947 {
948 drm_file_t *priv = filp->private_data;
949 drm_device_t *dev = priv->dev;
950
951 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
952 DRM_ERROR("i810_flush_ioctl called without lock held\n");
953 return -EINVAL;
954 }
955
956 i810_flush_queue(dev);
957 return 0;
958 }
959
960
i810_dma_vertex(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)961 int i810_dma_vertex(struct inode *inode, struct file *filp,
962 unsigned int cmd, unsigned long arg)
963 {
964 drm_file_t *priv = filp->private_data;
965 drm_device_t *dev = priv->dev;
966 drm_device_dma_t *dma = dev->dma;
967 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
968 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
969 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
970 dev_priv->sarea_priv;
971 drm_i810_vertex_t vertex;
972
973 if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
974 return -EFAULT;
975
976 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
977 DRM_ERROR("i810_dma_vertex called without lock held\n");
978 return -EINVAL;
979 }
980
981 if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
982
983 i810_dma_dispatch_vertex( dev,
984 dma->buflist[ vertex.idx ],
985 vertex.discard, vertex.used );
986
987 atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
988 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
989 sarea_priv->last_enqueue = dev_priv->counter-1;
990 sarea_priv->last_dispatch = (int) hw_status[5];
991
992 return 0;
993 }
994
995
996
i810_clear_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)997 int i810_clear_bufs(struct inode *inode, struct file *filp,
998 unsigned int cmd, unsigned long arg)
999 {
1000 drm_file_t *priv = filp->private_data;
1001 drm_device_t *dev = priv->dev;
1002 drm_i810_clear_t clear;
1003
1004 if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
1005 return -EFAULT;
1006
1007 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1008 DRM_ERROR("i810_clear_bufs called without lock held\n");
1009 return -EINVAL;
1010 }
1011
1012 /* GH: Someone's doing nasty things... */
1013 if (!dev->dev_private) {
1014 return -EINVAL;
1015 }
1016
1017 i810_dma_dispatch_clear( dev, clear.flags,
1018 clear.clear_color,
1019 clear.clear_depth );
1020 return 0;
1021 }
1022
i810_swap_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1023 int i810_swap_bufs(struct inode *inode, struct file *filp,
1024 unsigned int cmd, unsigned long arg)
1025 {
1026 drm_file_t *priv = filp->private_data;
1027 drm_device_t *dev = priv->dev;
1028
1029 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1030 DRM_ERROR("i810_swap_buf called without lock held\n");
1031 return -EINVAL;
1032 }
1033
1034 i810_dma_dispatch_swap( dev );
1035 return 0;
1036 }
1037
i810_getage(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1038 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1039 unsigned long arg)
1040 {
1041 drm_file_t *priv = filp->private_data;
1042 drm_device_t *dev = priv->dev;
1043 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1044 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1045 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1046 dev_priv->sarea_priv;
1047
1048 sarea_priv->last_dispatch = (int) hw_status[5];
1049 return 0;
1050 }
1051
i810_getbuf(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1052 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1053 unsigned long arg)
1054 {
1055 drm_file_t *priv = filp->private_data;
1056 drm_device_t *dev = priv->dev;
1057 int retcode = 0;
1058 drm_i810_dma_t d;
1059 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1060 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1061 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1062 dev_priv->sarea_priv;
1063
1064 if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
1065 return -EFAULT;
1066
1067 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1068 DRM_ERROR("i810_dma called without lock held\n");
1069 return -EINVAL;
1070 }
1071
1072 d.granted = 0;
1073
1074 retcode = i810_dma_get_buffer(dev, &d, filp);
1075
1076 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
1077 return -EFAULT;
1078 sarea_priv->last_dispatch = (int) hw_status[5];
1079
1080 return retcode;
1081 }
1082
i810_copybuf(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1083 int i810_copybuf(struct inode *inode,
1084 struct file *filp,
1085 unsigned int cmd,
1086 unsigned long arg)
1087 {
1088 /* Never copy - 2.4.x doesn't need it */
1089 return 0;
1090 }
1091
i810_docopy(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1092 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1093 unsigned long arg)
1094 {
1095 /* Never copy - 2.4.x doesn't need it */
1096 return 0;
1097 }
1098
i810_dma_dispatch_mc(drm_device_t * dev,drm_buf_t * buf,int used,unsigned int last_render)1099 static void i810_dma_dispatch_mc(drm_device_t *dev, drm_buf_t *buf, int used,
1100 unsigned int last_render)
1101 {
1102 drm_i810_private_t *dev_priv = dev->dev_private;
1103 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1104 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
1105 unsigned long address = (unsigned long)buf->bus_address;
1106 unsigned long start = address - dev->agp->base;
1107 int u;
1108 RING_LOCALS;
1109
1110 i810_kernel_lost_context(dev);
1111
1112 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1113 I810_BUF_HARDWARE);
1114 if(u != I810_BUF_CLIENT) {
1115 DRM_DEBUG("MC found buffer that isn't mine!\n");
1116 }
1117
1118 if (used > 4*1024)
1119 used = 0;
1120
1121 sarea_priv->dirty = 0x7f;
1122
1123 DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n",
1124 address, used);
1125
1126 dev_priv->counter++;
1127 DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
1128 DRM_DEBUG("i810_dma_dispatch_mc\n");
1129 DRM_DEBUG("start : %lx\n", start);
1130 DRM_DEBUG("used : %d\n", used);
1131 DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
1132
1133 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
1134 if (used & 4) {
1135 *(u32 *)((u32)buf_priv->virtual + used) = 0;
1136 used += 4;
1137 }
1138
1139 i810_unmap_buffer(buf);
1140 }
1141 BEGIN_LP_RING(4);
1142 OUT_RING( CMD_OP_BATCH_BUFFER );
1143 OUT_RING( start | BB1_PROTECTED );
1144 OUT_RING( start + used - 4 );
1145 OUT_RING( 0 );
1146 ADVANCE_LP_RING();
1147
1148
1149 BEGIN_LP_RING(8);
1150 OUT_RING( CMD_STORE_DWORD_IDX );
1151 OUT_RING( buf_priv->my_use_idx );
1152 OUT_RING( I810_BUF_FREE );
1153 OUT_RING( 0 );
1154
1155 OUT_RING( CMD_STORE_DWORD_IDX );
1156 OUT_RING( 16 );
1157 OUT_RING( last_render );
1158 OUT_RING( 0 );
1159 ADVANCE_LP_RING();
1160 }
1161
i810_dma_mc(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1162 int i810_dma_mc(struct inode *inode, struct file *filp,
1163 unsigned int cmd, unsigned long arg)
1164 {
1165 drm_file_t *priv = filp->private_data;
1166 drm_device_t *dev = priv->dev;
1167 drm_device_dma_t *dma = dev->dma;
1168 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1169 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1170 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1171 dev_priv->sarea_priv;
1172 drm_i810_mc_t mc;
1173
1174 if (copy_from_user(&mc, (drm_i810_mc_t *)arg, sizeof(mc)))
1175 return -EFAULT;
1176
1177
1178 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1179 DRM_ERROR("i810_dma_mc called without lock held\n");
1180 return -EINVAL;
1181 }
1182
1183 i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
1184 mc.last_render );
1185
1186 atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
1187 atomic_inc(&dev->counts[_DRM_STAT_DMA]);
1188 sarea_priv->last_enqueue = dev_priv->counter-1;
1189 sarea_priv->last_dispatch = (int) hw_status[5];
1190
1191 return 0;
1192 }
1193
i810_rstatus(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1194 int i810_rstatus(struct inode *inode, struct file *filp,
1195 unsigned int cmd, unsigned long arg)
1196 {
1197 drm_file_t *priv = filp->private_data;
1198 drm_device_t *dev = priv->dev;
1199 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1200
1201 return (int)(((u32 *)(dev_priv->hw_status_page))[4]);
1202 }
1203
i810_ov0_info(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1204 int i810_ov0_info(struct inode *inode, struct file *filp,
1205 unsigned int cmd, unsigned long arg)
1206 {
1207 drm_file_t *priv = filp->private_data;
1208 drm_device_t *dev = priv->dev;
1209 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1210 drm_i810_overlay_t data;
1211
1212 data.offset = dev_priv->overlay_offset;
1213 data.physical = dev_priv->overlay_physical;
1214 if (copy_to_user((drm_i810_overlay_t *)arg,&data,sizeof(data)))
1215 return -EFAULT;
1216 return 0;
1217 }
1218
i810_fstatus(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1219 int i810_fstatus(struct inode *inode, struct file *filp,
1220 unsigned int cmd, unsigned long arg)
1221 {
1222 drm_file_t *priv = filp->private_data;
1223 drm_device_t *dev = priv->dev;
1224 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1225
1226 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1227 DRM_ERROR("i810_fstatus called without lock held\n");
1228 return -EINVAL;
1229 }
1230 return I810_READ(0x30008);
1231 }
1232
i810_ov0_flip(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1233 int i810_ov0_flip(struct inode *inode, struct file *filp,
1234 unsigned int cmd, unsigned long arg)
1235 {
1236 drm_file_t *priv = filp->private_data;
1237 drm_device_t *dev = priv->dev;
1238 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1239
1240 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1241 DRM_ERROR("i810_ov0_flip called without lock held\n");
1242 return -EINVAL;
1243 }
1244
1245 //Tell the overlay to update
1246 I810_WRITE(0x30000,dev_priv->overlay_physical | 0x80000000);
1247
1248 return 0;
1249 }
1250
1251
1252