1 /* i810_dma.c -- DMA support for the i810 -*- linux-c -*-
2 * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keithw@valinux.com>
30 *
31 */
32
33 #define __NO_VERSION__
34 #include "drmP.h"
35 #include "i810_drv.h"
36 #include <linux/interrupt.h> /* For task queue support */
37 #include <linux/pagemap.h>
38
39 /* in case we don't have a 2.3.99-pre6 kernel or later: */
40 #ifndef VM_DONTCOPY
41 #define VM_DONTCOPY 0
42 #endif
43
44 #define I810_BUF_FREE 2
45 #define I810_BUF_CLIENT 1
46 #define I810_BUF_HARDWARE 0
47
48 #define I810_BUF_UNMAPPED 0
49 #define I810_BUF_MAPPED 1
50
51 #define I810_REG(reg) 2
52 #define I810_BASE(reg) ((unsigned long) \
53 dev->maplist[I810_REG(reg)]->handle)
54 #define I810_ADDR(reg) (I810_BASE(reg) + reg)
55 #define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
56 #define I810_READ(reg) I810_DEREF(reg)
57 #define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
58 #define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
59 #define I810_READ16(reg) I810_DEREF16(reg)
60 #define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
61
62 #define RING_LOCALS unsigned int outring, ringmask; volatile char *virt;
63
64 #define BEGIN_LP_RING(n) do { \
65 if (I810_VERBOSE) \
66 DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", \
67 n, __FUNCTION__); \
68 if (dev_priv->ring.space < n*4) \
69 i810_wait_ring(dev, n*4); \
70 dev_priv->ring.space -= n*4; \
71 outring = dev_priv->ring.tail; \
72 ringmask = dev_priv->ring.tail_mask; \
73 virt = dev_priv->ring.virtual_start; \
74 } while (0)
75
76 #define ADVANCE_LP_RING() do { \
77 if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
78 dev_priv->ring.tail = outring; \
79 I810_WRITE(LP_RING + RING_TAIL, outring); \
80 } while(0)
81
82 #define OUT_RING(n) do { \
83 if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
84 *(volatile unsigned int *)(virt + outring) = n; \
85 outring += 4; \
86 outring &= ringmask; \
87 } while (0)
88
i810_print_status_page(drm_device_t * dev)89 static inline void i810_print_status_page(drm_device_t *dev)
90 {
91 drm_device_dma_t *dma = dev->dma;
92 drm_i810_private_t *dev_priv = dev->dev_private;
93 u32 *temp = (u32 *)dev_priv->hw_status_page;
94 int i;
95
96 DRM_DEBUG( "hw_status: Interrupt Status : %x\n", temp[0]);
97 DRM_DEBUG( "hw_status: LpRing Head ptr : %x\n", temp[1]);
98 DRM_DEBUG( "hw_status: IRing Head ptr : %x\n", temp[2]);
99 DRM_DEBUG( "hw_status: Reserved : %x\n", temp[3]);
100 DRM_DEBUG( "hw_status: Driver Counter : %d\n", temp[5]);
101 for(i = 6; i < dma->buf_count + 6; i++) {
102 DRM_DEBUG( "buffer status idx : %d used: %d\n", i - 6, temp[i]);
103 }
104 }
105
i810_freelist_get(drm_device_t * dev)106 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
107 {
108 drm_device_dma_t *dma = dev->dma;
109 int i;
110 int used;
111
112 /* Linear search might not be the best solution */
113
114 for (i = 0; i < dma->buf_count; i++) {
115 drm_buf_t *buf = dma->buflist[ i ];
116 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
117 /* In use is already a pointer */
118 used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
119 I810_BUF_CLIENT);
120 if(used == I810_BUF_FREE) {
121 return buf;
122 }
123 }
124 return NULL;
125 }
126
127 /* This should only be called if the buffer is not sent to the hardware
128 * yet, the hardware updates in use for us once its on the ring buffer.
129 */
130
i810_freelist_put(drm_device_t * dev,drm_buf_t * buf)131 static int i810_freelist_put(drm_device_t *dev, drm_buf_t *buf)
132 {
133 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
134 int used;
135
136 /* In use is already a pointer */
137 used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE);
138 if(used != I810_BUF_CLIENT) {
139 DRM_ERROR("Freeing buffer thats not in use : %d\n", buf->idx);
140 return -EINVAL;
141 }
142
143 return 0;
144 }
145
146 static struct file_operations i810_buffer_fops = {
147 open: i810_open,
148 flush: drm_flush,
149 release: i810_release,
150 ioctl: i810_ioctl,
151 mmap: i810_mmap_buffers,
152 read: drm_read,
153 fasync: drm_fasync,
154 poll: drm_poll,
155 };
156
i810_mmap_buffers(struct file * filp,struct vm_area_struct * vma)157 int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
158 {
159 drm_file_t *priv = filp->private_data;
160 drm_device_t *dev;
161 drm_i810_private_t *dev_priv;
162 drm_buf_t *buf;
163 drm_i810_buf_priv_t *buf_priv;
164
165 lock_kernel();
166 dev = priv->dev;
167 dev_priv = dev->dev_private;
168 buf = dev_priv->mmap_buffer;
169 buf_priv = buf->dev_private;
170
171 vma->vm_flags |= (VM_IO | VM_DONTCOPY);
172 vma->vm_file = filp;
173
174 buf_priv->currently_mapped = I810_BUF_MAPPED;
175 unlock_kernel();
176
177 if (remap_page_range(vma->vm_start,
178 VM_OFFSET(vma),
179 vma->vm_end - vma->vm_start,
180 vma->vm_page_prot)) return -EAGAIN;
181 return 0;
182 }
183
i810_map_buffer(drm_buf_t * buf,struct file * filp)184 static int i810_map_buffer(drm_buf_t *buf, struct file *filp)
185 {
186 drm_file_t *priv = filp->private_data;
187 drm_device_t *dev = priv->dev;
188 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
189 drm_i810_private_t *dev_priv = dev->dev_private;
190 struct file_operations *old_fops;
191 int retcode = 0;
192
193 if(buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL;
194
195 if(VM_DONTCOPY != 0) {
196 down_write(¤t->mm->mmap_sem);
197 old_fops = filp->f_op;
198 filp->f_op = &i810_buffer_fops;
199 dev_priv->mmap_buffer = buf;
200 buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
201 PROT_READ|PROT_WRITE,
202 MAP_SHARED,
203 buf->bus_address);
204 dev_priv->mmap_buffer = NULL;
205 filp->f_op = old_fops;
206 if ((unsigned long)buf_priv->virtual > -1024UL) {
207 /* Real error */
208 DRM_DEBUG("mmap error\n");
209 retcode = (signed int)buf_priv->virtual;
210 buf_priv->virtual = 0;
211 }
212 up_write(¤t->mm->mmap_sem);
213 } else {
214 buf_priv->virtual = buf_priv->kernel_virtual;
215 buf_priv->currently_mapped = I810_BUF_MAPPED;
216 }
217 return retcode;
218 }
219
i810_unmap_buffer(drm_buf_t * buf)220 static int i810_unmap_buffer(drm_buf_t *buf)
221 {
222 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
223 int retcode = 0;
224
225 if(VM_DONTCOPY != 0) {
226 if(buf_priv->currently_mapped != I810_BUF_MAPPED)
227 return -EINVAL;
228 down_write(¤t->mm->mmap_sem);
229 retcode = do_munmap(current->mm,
230 (unsigned long)buf_priv->virtual,
231 (size_t) buf->total);
232 up_write(¤t->mm->mmap_sem);
233 }
234 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
235 buf_priv->virtual = 0;
236
237 return retcode;
238 }
239
i810_dma_get_buffer(drm_device_t * dev,drm_i810_dma_t * d,struct file * filp)240 static int i810_dma_get_buffer(drm_device_t *dev, drm_i810_dma_t *d,
241 struct file *filp)
242 {
243 drm_file_t *priv = filp->private_data;
244 drm_buf_t *buf;
245 drm_i810_buf_priv_t *buf_priv;
246 int retcode = 0;
247
248 buf = i810_freelist_get(dev);
249 if (!buf) {
250 retcode = -ENOMEM;
251 DRM_DEBUG("retcode=%d\n", retcode);
252 return retcode;
253 }
254
255 retcode = i810_map_buffer(buf, filp);
256 if(retcode) {
257 i810_freelist_put(dev, buf);
258 DRM_DEBUG("mapbuf failed, retcode %d\n", retcode);
259 return retcode;
260 }
261 buf->pid = priv->pid;
262 buf_priv = buf->dev_private;
263 d->granted = 1;
264 d->request_idx = buf->idx;
265 d->request_size = buf->total;
266 d->virtual = buf_priv->virtual;
267
268 return retcode;
269 }
270
i810_alloc_page(drm_device_t * dev)271 static unsigned long i810_alloc_page(drm_device_t *dev)
272 {
273 unsigned long address;
274
275 address = __get_free_page(GFP_KERNEL);
276 if(address == 0UL)
277 return 0;
278
279 get_page(virt_to_page(address));
280 LockPage(virt_to_page(address));
281
282 return address;
283 }
284
i810_free_page(drm_device_t * dev,unsigned long page)285 static void i810_free_page(drm_device_t *dev, unsigned long page)
286 {
287 struct page * p = virt_to_page(page);
288 if(page == 0UL)
289 return;
290
291 put_page(p);
292 UnlockPage(p);
293 free_page(page);
294 return;
295 }
296
i810_dma_cleanup(drm_device_t * dev)297 static int i810_dma_cleanup(drm_device_t *dev)
298 {
299 drm_device_dma_t *dma = dev->dma;
300
301 if(dev->dev_private) {
302 int i;
303 drm_i810_private_t *dev_priv =
304 (drm_i810_private_t *) dev->dev_private;
305
306 if(dev_priv->ring.virtual_start) {
307 drm_ioremapfree((void *) dev_priv->ring.virtual_start,
308 dev_priv->ring.Size, dev);
309 }
310 if(dev_priv->hw_status_page != 0UL) {
311 i810_free_page(dev, dev_priv->hw_status_page);
312 /* Need to rewrite hardware status page */
313 I810_WRITE(0x02080, 0x1ffff000);
314 }
315 drm_free(dev->dev_private, sizeof(drm_i810_private_t),
316 DRM_MEM_DRIVER);
317 dev->dev_private = NULL;
318
319 for (i = 0; i < dma->buf_count; i++) {
320 drm_buf_t *buf = dma->buflist[ i ];
321 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
322 drm_ioremapfree(buf_priv->kernel_virtual, buf->total, dev);
323 }
324 }
325 return 0;
326 }
327
i810_wait_ring(drm_device_t * dev,int n)328 static int i810_wait_ring(drm_device_t *dev, int n)
329 {
330 drm_i810_private_t *dev_priv = dev->dev_private;
331 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
332 int iters = 0;
333 unsigned long end;
334 unsigned int last_head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
335
336 end = jiffies + (HZ*3);
337 while (ring->space < n) {
338 int i;
339
340 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
341 ring->space = ring->head - (ring->tail+8);
342 if (ring->space < 0) ring->space += ring->Size;
343
344 if (ring->head != last_head)
345 end = jiffies + (HZ*3);
346
347 iters++;
348 if((signed)(end - jiffies) <= 0) {
349 DRM_ERROR("space: %d wanted %d\n", ring->space, n);
350 DRM_ERROR("lockup\n");
351 goto out_wait_ring;
352 }
353
354 for (i = 0 ; i < 2000 ; i++) ;
355 }
356
357 out_wait_ring:
358 return iters;
359 }
360
i810_kernel_lost_context(drm_device_t * dev)361 static void i810_kernel_lost_context(drm_device_t *dev)
362 {
363 drm_i810_private_t *dev_priv = dev->dev_private;
364 drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
365
366 ring->head = I810_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
367 ring->tail = I810_READ(LP_RING + RING_TAIL);
368 ring->space = ring->head - (ring->tail+8);
369 if (ring->space < 0) ring->space += ring->Size;
370 }
371
i810_freelist_init(drm_device_t * dev)372 static int i810_freelist_init(drm_device_t *dev)
373 {
374 drm_device_dma_t *dma = dev->dma;
375 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
376 int my_idx = 24;
377 u32 *hw_status = (u32 *)(dev_priv->hw_status_page + my_idx);
378 int i;
379
380 if(dma->buf_count > 1019) {
381 /* Not enough space in the status page for the freelist */
382 return -EINVAL;
383 }
384
385 for (i = 0; i < dma->buf_count; i++) {
386 drm_buf_t *buf = dma->buflist[ i ];
387 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
388
389 buf_priv->in_use = hw_status++;
390 buf_priv->my_use_idx = my_idx;
391 my_idx += 4;
392
393 *buf_priv->in_use = I810_BUF_FREE;
394
395 buf_priv->kernel_virtual = drm_ioremap(buf->bus_address,
396 buf->total, dev);
397 }
398 return 0;
399 }
400
i810_dma_initialize(drm_device_t * dev,drm_i810_private_t * dev_priv,drm_i810_init_t * init)401 static int i810_dma_initialize(drm_device_t *dev,
402 drm_i810_private_t *dev_priv,
403 drm_i810_init_t *init)
404 {
405 drm_map_t *sarea_map;
406
407 dev->dev_private = (void *) dev_priv;
408 memset(dev_priv, 0, sizeof(drm_i810_private_t));
409
410 if (init->ring_map_idx >= dev->map_count ||
411 init->buffer_map_idx >= dev->map_count) {
412 i810_dma_cleanup(dev);
413 DRM_ERROR("ring_map or buffer_map are invalid\n");
414 return -EINVAL;
415 }
416
417 dev_priv->ring_map_idx = init->ring_map_idx;
418 dev_priv->buffer_map_idx = init->buffer_map_idx;
419 sarea_map = dev->maplist[0];
420 dev_priv->sarea_priv = (drm_i810_sarea_t *)
421 ((u8 *)sarea_map->handle +
422 init->sarea_priv_offset);
423
424 atomic_set(&dev_priv->flush_done, 0);
425 init_waitqueue_head(&dev_priv->flush_queue);
426
427 dev_priv->ring.Start = init->ring_start;
428 dev_priv->ring.End = init->ring_end;
429 dev_priv->ring.Size = init->ring_size;
430
431 dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base +
432 init->ring_start,
433 init->ring_size, dev);
434
435 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
436
437 if (dev_priv->ring.virtual_start == NULL) {
438 i810_dma_cleanup(dev);
439 DRM_ERROR("can not ioremap virtual address for"
440 " ring buffer\n");
441 return -ENOMEM;
442 }
443
444 dev_priv->w = init->w;
445 dev_priv->h = init->h;
446 dev_priv->pitch = init->pitch;
447 dev_priv->back_offset = init->back_offset;
448 dev_priv->depth_offset = init->depth_offset;
449
450 dev_priv->front_di1 = init->front_offset | init->pitch_bits;
451 dev_priv->back_di1 = init->back_offset | init->pitch_bits;
452 dev_priv->zi1 = init->depth_offset | init->pitch_bits;
453
454
455 /* Program Hardware Status Page */
456 dev_priv->hw_status_page = i810_alloc_page(dev);
457 memset((void *) dev_priv->hw_status_page, 0, PAGE_SIZE);
458 if(dev_priv->hw_status_page == 0UL) {
459 i810_dma_cleanup(dev);
460 DRM_ERROR("Can not allocate hardware status page\n");
461 return -ENOMEM;
462 }
463 DRM_DEBUG("hw status page @ %lx\n", dev_priv->hw_status_page);
464
465 I810_WRITE(0x02080, virt_to_bus((void *)dev_priv->hw_status_page));
466 DRM_DEBUG("Enabled hardware status page\n");
467
468 /* Now we need to init our freelist */
469 if(i810_freelist_init(dev) != 0) {
470 i810_dma_cleanup(dev);
471 DRM_ERROR("Not enough space in the status page for"
472 " the freelist\n");
473 return -ENOMEM;
474 }
475 return 0;
476 }
477
i810_dma_init(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)478 int i810_dma_init(struct inode *inode, struct file *filp,
479 unsigned int cmd, unsigned long arg)
480 {
481 drm_file_t *priv = filp->private_data;
482 drm_device_t *dev = priv->dev;
483 drm_i810_private_t *dev_priv;
484 drm_i810_init_t init;
485 int retcode = 0;
486
487 if (copy_from_user(&init, (drm_i810_init_t *)arg, sizeof(init)))
488 return -EFAULT;
489
490 switch(init.func) {
491 case I810_INIT_DMA:
492 dev_priv = drm_alloc(sizeof(drm_i810_private_t),
493 DRM_MEM_DRIVER);
494 if(dev_priv == NULL) return -ENOMEM;
495 retcode = i810_dma_initialize(dev, dev_priv, &init);
496 break;
497 case I810_CLEANUP_DMA:
498 retcode = i810_dma_cleanup(dev);
499 break;
500 default:
501 retcode = -EINVAL;
502 break;
503 }
504
505 return retcode;
506 }
507
508
509
510 /* Most efficient way to verify state for the i810 is as it is
511 * emitted. Non-conformant state is silently dropped.
512 *
513 * Use 'volatile' & local var tmp to force the emitted values to be
514 * identical to the verified ones.
515 */
i810EmitContextVerified(drm_device_t * dev,volatile unsigned int * code)516 static void i810EmitContextVerified( drm_device_t *dev,
517 volatile unsigned int *code )
518 {
519 drm_i810_private_t *dev_priv = dev->dev_private;
520 int i, j = 0;
521 unsigned int tmp;
522 RING_LOCALS;
523
524 BEGIN_LP_RING( I810_CTX_SETUP_SIZE );
525
526 OUT_RING( GFX_OP_COLOR_FACTOR );
527 OUT_RING( code[I810_CTXREG_CF1] );
528
529 OUT_RING( GFX_OP_STIPPLE );
530 OUT_RING( code[I810_CTXREG_ST1] );
531
532 for ( i = 4 ; i < I810_CTX_SETUP_SIZE ; i++ ) {
533 tmp = code[i];
534
535 if ((tmp & (7<<29)) == (3<<29) &&
536 (tmp & (0x1f<<24)) < (0x1d<<24))
537 {
538 OUT_RING( tmp );
539 j++;
540 }
541 }
542
543 if (j & 1)
544 OUT_RING( 0 );
545
546 ADVANCE_LP_RING();
547 }
548
i810EmitTexVerified(drm_device_t * dev,volatile unsigned int * code)549 static void i810EmitTexVerified( drm_device_t *dev,
550 volatile unsigned int *code )
551 {
552 drm_i810_private_t *dev_priv = dev->dev_private;
553 int i, j = 0;
554 unsigned int tmp;
555 RING_LOCALS;
556
557 BEGIN_LP_RING( I810_TEX_SETUP_SIZE );
558
559 OUT_RING( GFX_OP_MAP_INFO );
560 OUT_RING( code[I810_TEXREG_MI1] );
561 OUT_RING( code[I810_TEXREG_MI2] );
562 OUT_RING( code[I810_TEXREG_MI3] );
563
564 for ( i = 4 ; i < I810_TEX_SETUP_SIZE ; i++ ) {
565 tmp = code[i];
566
567 if ((tmp & (7<<29)) == (3<<29) &&
568 (tmp & (0x1f<<24)) < (0x1d<<24))
569 {
570 OUT_RING( tmp );
571 j++;
572 }
573 }
574
575 if (j & 1)
576 OUT_RING( 0 );
577
578 ADVANCE_LP_RING();
579 }
580
581
582 /* Need to do some additional checking when setting the dest buffer.
583 */
i810EmitDestVerified(drm_device_t * dev,volatile unsigned int * code)584 static void i810EmitDestVerified( drm_device_t *dev,
585 volatile unsigned int *code )
586 {
587 drm_i810_private_t *dev_priv = dev->dev_private;
588 unsigned int tmp;
589 RING_LOCALS;
590
591 BEGIN_LP_RING( I810_DEST_SETUP_SIZE + 2 );
592
593 tmp = code[I810_DESTREG_DI1];
594 if (tmp == dev_priv->front_di1 || tmp == dev_priv->back_di1) {
595 OUT_RING( CMD_OP_DESTBUFFER_INFO );
596 OUT_RING( tmp );
597 } else
598 DRM_DEBUG("bad di1 %x (allow %x or %x)\n",
599 tmp, dev_priv->front_di1, dev_priv->back_di1);
600
601 /* invarient:
602 */
603 OUT_RING( CMD_OP_Z_BUFFER_INFO );
604 OUT_RING( dev_priv->zi1 );
605
606 OUT_RING( GFX_OP_DESTBUFFER_VARS );
607 OUT_RING( code[I810_DESTREG_DV1] );
608
609 OUT_RING( GFX_OP_DRAWRECT_INFO );
610 OUT_RING( code[I810_DESTREG_DR1] );
611 OUT_RING( code[I810_DESTREG_DR2] );
612 OUT_RING( code[I810_DESTREG_DR3] );
613 OUT_RING( code[I810_DESTREG_DR4] );
614 OUT_RING( 0 );
615
616 ADVANCE_LP_RING();
617 }
618
619
620
i810EmitState(drm_device_t * dev)621 static void i810EmitState( drm_device_t *dev )
622 {
623 drm_i810_private_t *dev_priv = dev->dev_private;
624 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
625 unsigned int dirty = sarea_priv->dirty;
626
627 if (dirty & I810_UPLOAD_BUFFERS) {
628 i810EmitDestVerified( dev, sarea_priv->BufferState );
629 sarea_priv->dirty &= ~I810_UPLOAD_BUFFERS;
630 }
631
632 if (dirty & I810_UPLOAD_CTX) {
633 i810EmitContextVerified( dev, sarea_priv->ContextState );
634 sarea_priv->dirty &= ~I810_UPLOAD_CTX;
635 }
636
637 if (dirty & I810_UPLOAD_TEX0) {
638 i810EmitTexVerified( dev, sarea_priv->TexState[0] );
639 sarea_priv->dirty &= ~I810_UPLOAD_TEX0;
640 }
641
642 if (dirty & I810_UPLOAD_TEX1) {
643 i810EmitTexVerified( dev, sarea_priv->TexState[1] );
644 sarea_priv->dirty &= ~I810_UPLOAD_TEX1;
645 }
646 }
647
648
649
650 /* need to verify
651 */
i810_dma_dispatch_clear(drm_device_t * dev,int flags,unsigned int clear_color,unsigned int clear_zval)652 static void i810_dma_dispatch_clear( drm_device_t *dev, int flags,
653 unsigned int clear_color,
654 unsigned int clear_zval )
655 {
656 drm_i810_private_t *dev_priv = dev->dev_private;
657 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
658 int nbox = sarea_priv->nbox;
659 drm_clip_rect_t *pbox = sarea_priv->boxes;
660 int pitch = dev_priv->pitch;
661 int cpp = 2;
662 int i;
663 RING_LOCALS;
664
665 i810_kernel_lost_context(dev);
666
667 if (nbox > I810_NR_SAREA_CLIPRECTS)
668 nbox = I810_NR_SAREA_CLIPRECTS;
669
670 for (i = 0 ; i < nbox ; i++, pbox++) {
671 unsigned int x = pbox->x1;
672 unsigned int y = pbox->y1;
673 unsigned int width = (pbox->x2 - x) * cpp;
674 unsigned int height = pbox->y2 - y;
675 unsigned int start = y * pitch + x * cpp;
676
677 if (pbox->x1 > pbox->x2 ||
678 pbox->y1 > pbox->y2 ||
679 pbox->x2 > dev_priv->w ||
680 pbox->y2 > dev_priv->h)
681 continue;
682
683 if ( flags & I810_FRONT ) {
684 DRM_DEBUG("clear front\n");
685 BEGIN_LP_RING( 6 );
686 OUT_RING( BR00_BITBLT_CLIENT |
687 BR00_OP_COLOR_BLT | 0x3 );
688 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
689 OUT_RING( (height << 16) | width );
690 OUT_RING( start );
691 OUT_RING( clear_color );
692 OUT_RING( 0 );
693 ADVANCE_LP_RING();
694 }
695
696 if ( flags & I810_BACK ) {
697 DRM_DEBUG("clear back\n");
698 BEGIN_LP_RING( 6 );
699 OUT_RING( BR00_BITBLT_CLIENT |
700 BR00_OP_COLOR_BLT | 0x3 );
701 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
702 OUT_RING( (height << 16) | width );
703 OUT_RING( dev_priv->back_offset + start );
704 OUT_RING( clear_color );
705 OUT_RING( 0 );
706 ADVANCE_LP_RING();
707 }
708
709 if ( flags & I810_DEPTH ) {
710 DRM_DEBUG("clear depth\n");
711 BEGIN_LP_RING( 6 );
712 OUT_RING( BR00_BITBLT_CLIENT |
713 BR00_OP_COLOR_BLT | 0x3 );
714 OUT_RING( BR13_SOLID_PATTERN | (0xF0 << 16) | pitch );
715 OUT_RING( (height << 16) | width );
716 OUT_RING( dev_priv->depth_offset + start );
717 OUT_RING( clear_zval );
718 OUT_RING( 0 );
719 ADVANCE_LP_RING();
720 }
721 }
722 }
723
i810_dma_dispatch_swap(drm_device_t * dev)724 static void i810_dma_dispatch_swap( drm_device_t *dev )
725 {
726 drm_i810_private_t *dev_priv = dev->dev_private;
727 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
728 int nbox = sarea_priv->nbox;
729 drm_clip_rect_t *pbox = sarea_priv->boxes;
730 int pitch = dev_priv->pitch;
731 int cpp = 2;
732 int ofs = dev_priv->back_offset;
733 int i;
734 RING_LOCALS;
735
736 DRM_DEBUG("swapbuffers\n");
737
738 i810_kernel_lost_context(dev);
739
740 if (nbox > I810_NR_SAREA_CLIPRECTS)
741 nbox = I810_NR_SAREA_CLIPRECTS;
742
743 for (i = 0 ; i < nbox; i++, pbox++)
744 {
745 unsigned int w = pbox->x2 - pbox->x1;
746 unsigned int h = pbox->y2 - pbox->y1;
747 unsigned int dst = pbox->x1*cpp + pbox->y1*pitch;
748 unsigned int start = ofs + dst;
749
750 if (pbox->x1 > pbox->x2 ||
751 pbox->y1 > pbox->y2 ||
752 pbox->x2 > dev_priv->w ||
753 pbox->y2 > dev_priv->h)
754 continue;
755
756 DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
757 pbox[i].x1, pbox[i].y1,
758 pbox[i].x2, pbox[i].y2);
759
760 BEGIN_LP_RING( 6 );
761 OUT_RING( BR00_BITBLT_CLIENT | BR00_OP_SRC_COPY_BLT | 0x4 );
762 OUT_RING( pitch | (0xCC << 16));
763 OUT_RING( (h << 16) | (w * cpp));
764 OUT_RING( dst );
765 OUT_RING( pitch );
766 OUT_RING( start );
767 ADVANCE_LP_RING();
768 }
769 }
770
771
i810_dma_dispatch_vertex(drm_device_t * dev,drm_buf_t * buf,int discard,int used)772 static void i810_dma_dispatch_vertex(drm_device_t *dev,
773 drm_buf_t *buf,
774 int discard,
775 int used)
776 {
777 drm_i810_private_t *dev_priv = dev->dev_private;
778 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
779 drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
780 drm_clip_rect_t *box = sarea_priv->boxes;
781 int nbox = sarea_priv->nbox;
782 unsigned long address = (unsigned long)buf->bus_address;
783 unsigned long start = address - dev->agp->base;
784 int i = 0, u;
785 RING_LOCALS;
786
787 i810_kernel_lost_context(dev);
788
789 if (nbox > I810_NR_SAREA_CLIPRECTS)
790 nbox = I810_NR_SAREA_CLIPRECTS;
791
792 if (discard) {
793 u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
794 I810_BUF_HARDWARE);
795 if(u != I810_BUF_CLIENT) {
796 DRM_DEBUG("xxxx 2\n");
797 }
798 }
799
800 if (used > 4*1024)
801 used = 0;
802
803 if (sarea_priv->dirty)
804 i810EmitState( dev );
805
806 DRM_DEBUG("dispatch vertex addr 0x%lx, used 0x%x nbox %d\n",
807 address, used, nbox);
808
809 dev_priv->counter++;
810 DRM_DEBUG( "dispatch counter : %ld\n", dev_priv->counter);
811 DRM_DEBUG( "i810_dma_dispatch\n");
812 DRM_DEBUG( "start : %lx\n", start);
813 DRM_DEBUG( "used : %d\n", used);
814 DRM_DEBUG( "start + used - 4 : %ld\n", start + used - 4);
815
816 if (buf_priv->currently_mapped == I810_BUF_MAPPED) {
817 *(u32 *)buf_priv->virtual = (GFX_OP_PRIMITIVE |
818 sarea_priv->vertex_prim |
819 ((used/4)-2));
820
821 if (used & 4) {
822 *(u32 *)((u32)buf_priv->virtual + used) = 0;
823 used += 4;
824 }
825
826 i810_unmap_buffer(buf);
827 }
828
829 if (used) {
830 do {
831 if (i < nbox) {
832 BEGIN_LP_RING(4);
833 OUT_RING( GFX_OP_SCISSOR | SC_UPDATE_SCISSOR |
834 SC_ENABLE );
835 OUT_RING( GFX_OP_SCISSOR_INFO );
836 OUT_RING( box[i].x1 | (box[i].y1<<16) );
837 OUT_RING( (box[i].x2-1) | ((box[i].y2-1)<<16) );
838 ADVANCE_LP_RING();
839 }
840
841 BEGIN_LP_RING(4);
842 OUT_RING( CMD_OP_BATCH_BUFFER );
843 OUT_RING( start | BB1_PROTECTED );
844 OUT_RING( start + used - 4 );
845 OUT_RING( 0 );
846 ADVANCE_LP_RING();
847
848 } while (++i < nbox);
849 }
850
851 BEGIN_LP_RING(10);
852 OUT_RING( CMD_STORE_DWORD_IDX );
853 OUT_RING( 20 );
854 OUT_RING( dev_priv->counter );
855 OUT_RING( 0 );
856
857 if (discard) {
858 OUT_RING( CMD_STORE_DWORD_IDX );
859 OUT_RING( buf_priv->my_use_idx );
860 OUT_RING( I810_BUF_FREE );
861 OUT_RING( 0 );
862 }
863
864 OUT_RING( CMD_REPORT_HEAD );
865 OUT_RING( 0 );
866 ADVANCE_LP_RING();
867 }
868
869
870 /* Interrupts are only for flushing */
i810_dma_service(int irq,void * device,struct pt_regs * regs)871 static void i810_dma_service(int irq, void *device, struct pt_regs *regs)
872 {
873 drm_device_t *dev = (drm_device_t *)device;
874 u16 temp;
875
876 atomic_inc(&dev->total_irq);
877 temp = I810_READ16(I810REG_INT_IDENTITY_R);
878 temp = temp & ~(0x6000);
879 if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R,
880 temp); /* Clear all interrupts */
881 else
882 return;
883
884 queue_task(&dev->tq, &tq_immediate);
885 mark_bh(IMMEDIATE_BH);
886 }
887
i810_dma_task_queue(void * device)888 static void i810_dma_task_queue(void *device)
889 {
890 drm_device_t *dev = (drm_device_t *) device;
891 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
892
893 atomic_set(&dev_priv->flush_done, 1);
894 wake_up_interruptible(&dev_priv->flush_queue);
895 }
896
i810_irq_install(drm_device_t * dev,int irq)897 int i810_irq_install(drm_device_t *dev, int irq)
898 {
899 int retcode;
900 u16 temp;
901
902 if (!irq) return -EINVAL;
903
904 down(&dev->struct_sem);
905 if (dev->irq) {
906 up(&dev->struct_sem);
907 return -EBUSY;
908 }
909 dev->irq = irq;
910 up(&dev->struct_sem);
911
912 DRM_DEBUG( "Interrupt Install : %d\n", irq);
913 DRM_DEBUG("%d\n", irq);
914
915 dev->context_flag = 0;
916 dev->interrupt_flag = 0;
917 dev->dma_flag = 0;
918
919 dev->dma->next_buffer = NULL;
920 dev->dma->next_queue = NULL;
921 dev->dma->this_buffer = NULL;
922
923 INIT_LIST_HEAD(&dev->tq.list);
924 dev->tq.sync = 0;
925 dev->tq.routine = i810_dma_task_queue;
926 dev->tq.data = dev;
927
928 /* Before installing handler */
929 temp = I810_READ16(I810REG_HWSTAM);
930 temp = temp & 0x6000;
931 I810_WRITE16(I810REG_HWSTAM, temp);
932
933 temp = I810_READ16(I810REG_INT_MASK_R);
934 temp = temp & 0x6000;
935 I810_WRITE16(I810REG_INT_MASK_R, temp); /* Unmask interrupts */
936 temp = I810_READ16(I810REG_INT_ENABLE_R);
937 temp = temp & 0x6000;
938 I810_WRITE16(I810REG_INT_ENABLE_R, temp); /* Disable all interrupts */
939
940 /* Install handler */
941 if ((retcode = request_irq(dev->irq,
942 i810_dma_service,
943 SA_SHIRQ,
944 dev->devname,
945 dev))) {
946 down(&dev->struct_sem);
947 dev->irq = 0;
948 up(&dev->struct_sem);
949 return retcode;
950 }
951 temp = I810_READ16(I810REG_INT_ENABLE_R);
952 temp = temp & 0x6000;
953 temp = temp | 0x0003;
954 I810_WRITE16(I810REG_INT_ENABLE_R,
955 temp); /* Enable bp & user interrupts */
956 return 0;
957 }
958
i810_irq_uninstall(drm_device_t * dev)959 int i810_irq_uninstall(drm_device_t *dev)
960 {
961 int irq;
962 u16 temp;
963
964
965 /* return 0; */
966
967 down(&dev->struct_sem);
968 irq = dev->irq;
969 dev->irq = 0;
970 up(&dev->struct_sem);
971
972 if (!irq) return -EINVAL;
973
974 DRM_DEBUG( "Interrupt UnInstall: %d\n", irq);
975 DRM_DEBUG("%d\n", irq);
976
977 temp = I810_READ16(I810REG_INT_IDENTITY_R);
978 temp = temp & ~(0x6000);
979 if(temp != 0) I810_WRITE16(I810REG_INT_IDENTITY_R,
980 temp); /* Clear all interrupts */
981
982 temp = I810_READ16(I810REG_INT_ENABLE_R);
983 temp = temp & 0x6000;
984 I810_WRITE16(I810REG_INT_ENABLE_R,
985 temp); /* Disable all interrupts */
986
987 free_irq(irq, dev);
988
989 return 0;
990 }
991
i810_control(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)992 int i810_control(struct inode *inode, struct file *filp, unsigned int cmd,
993 unsigned long arg)
994 {
995 drm_file_t *priv = filp->private_data;
996 drm_device_t *dev = priv->dev;
997 drm_control_t ctl;
998 int retcode;
999
1000 DRM_DEBUG( "i810_control\n");
1001
1002 if (copy_from_user(&ctl, (drm_control_t *)arg, sizeof(ctl)))
1003 return -EFAULT;
1004
1005 switch (ctl.func) {
1006 case DRM_INST_HANDLER:
1007 if ((retcode = i810_irq_install(dev, ctl.irq)))
1008 return retcode;
1009 break;
1010 case DRM_UNINST_HANDLER:
1011 if ((retcode = i810_irq_uninstall(dev)))
1012 return retcode;
1013 break;
1014 default:
1015 return -EINVAL;
1016 }
1017 return 0;
1018 }
1019
i810_dma_emit_flush(drm_device_t * dev)1020 static inline void i810_dma_emit_flush(drm_device_t *dev)
1021 {
1022 drm_i810_private_t *dev_priv = dev->dev_private;
1023 RING_LOCALS;
1024
1025 i810_kernel_lost_context(dev);
1026
1027 BEGIN_LP_RING(2);
1028 OUT_RING( CMD_REPORT_HEAD );
1029 OUT_RING( GFX_OP_USER_INTERRUPT );
1030 ADVANCE_LP_RING();
1031
1032 /* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
1033 /* atomic_set(&dev_priv->flush_done, 1); */
1034 /* wake_up_interruptible(&dev_priv->flush_queue); */
1035 }
1036
i810_dma_quiescent_emit(drm_device_t * dev)1037 static inline void i810_dma_quiescent_emit(drm_device_t *dev)
1038 {
1039 drm_i810_private_t *dev_priv = dev->dev_private;
1040 RING_LOCALS;
1041
1042 i810_kernel_lost_context(dev);
1043
1044 BEGIN_LP_RING(4);
1045 OUT_RING( INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE );
1046 OUT_RING( CMD_REPORT_HEAD );
1047 OUT_RING( 0 );
1048 OUT_RING( GFX_OP_USER_INTERRUPT );
1049 ADVANCE_LP_RING();
1050
1051 /* i810_wait_ring( dev, dev_priv->ring.Size - 8 ); */
1052 /* atomic_set(&dev_priv->flush_done, 1); */
1053 /* wake_up_interruptible(&dev_priv->flush_queue); */
1054 }
1055
i810_dma_quiescent(drm_device_t * dev)1056 static void i810_dma_quiescent(drm_device_t *dev)
1057 {
1058 DECLARE_WAITQUEUE(entry, current);
1059 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1060 unsigned long end;
1061
1062 if(dev_priv == NULL) {
1063 return;
1064 }
1065 atomic_set(&dev_priv->flush_done, 0);
1066 add_wait_queue(&dev_priv->flush_queue, &entry);
1067 end = jiffies + (HZ*3);
1068
1069 for (;;) {
1070 current->state = TASK_INTERRUPTIBLE;
1071 i810_dma_quiescent_emit(dev);
1072 if (atomic_read(&dev_priv->flush_done) == 1) break;
1073 if((signed)(end - jiffies) <= 0) {
1074 DRM_ERROR("lockup\n");
1075 break;
1076 }
1077 schedule_timeout(HZ*3);
1078 if (signal_pending(current)) {
1079 break;
1080 }
1081 }
1082
1083 current->state = TASK_RUNNING;
1084 remove_wait_queue(&dev_priv->flush_queue, &entry);
1085
1086 return;
1087 }
1088
i810_flush_queue(drm_device_t * dev)1089 static int i810_flush_queue(drm_device_t *dev)
1090 {
1091 DECLARE_WAITQUEUE(entry, current);
1092 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1093 drm_device_dma_t *dma = dev->dma;
1094 unsigned long end;
1095 int i, ret = 0;
1096
1097 if(dev_priv == NULL) {
1098 return 0;
1099 }
1100 atomic_set(&dev_priv->flush_done, 0);
1101 add_wait_queue(&dev_priv->flush_queue, &entry);
1102 end = jiffies + (HZ*3);
1103 for (;;) {
1104 current->state = TASK_INTERRUPTIBLE;
1105 i810_dma_emit_flush(dev);
1106 if (atomic_read(&dev_priv->flush_done) == 1) break;
1107 if((signed)(end - jiffies) <= 0) {
1108 DRM_ERROR("lockup\n");
1109 break;
1110 }
1111 schedule_timeout(HZ*3);
1112 if (signal_pending(current)) {
1113 ret = -EINTR; /* Can't restart */
1114 break;
1115 }
1116 }
1117
1118 current->state = TASK_RUNNING;
1119 remove_wait_queue(&dev_priv->flush_queue, &entry);
1120
1121
1122 for (i = 0; i < dma->buf_count; i++) {
1123 drm_buf_t *buf = dma->buflist[ i ];
1124 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1125
1126 int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
1127 I810_BUF_FREE);
1128
1129 if (used == I810_BUF_HARDWARE)
1130 DRM_DEBUG("reclaimed from HARDWARE\n");
1131 if (used == I810_BUF_CLIENT)
1132 DRM_DEBUG("still on client HARDWARE\n");
1133 }
1134
1135 return ret;
1136 }
1137
1138 /* Must be called with the lock held */
i810_reclaim_buffers(drm_device_t * dev,pid_t pid)1139 void i810_reclaim_buffers(drm_device_t *dev, pid_t pid)
1140 {
1141 drm_device_dma_t *dma = dev->dma;
1142 int i;
1143
1144 if (!dma) return;
1145 if (!dev->dev_private) return;
1146 if (!dma->buflist) return;
1147
1148 i810_flush_queue(dev);
1149
1150 for (i = 0; i < dma->buf_count; i++) {
1151 drm_buf_t *buf = dma->buflist[ i ];
1152 drm_i810_buf_priv_t *buf_priv = buf->dev_private;
1153
1154 if (buf->pid == pid && buf_priv) {
1155 int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
1156 I810_BUF_FREE);
1157
1158 if (used == I810_BUF_CLIENT)
1159 DRM_DEBUG("reclaimed from client\n");
1160 if(buf_priv->currently_mapped == I810_BUF_MAPPED)
1161 buf_priv->currently_mapped = I810_BUF_UNMAPPED;
1162 }
1163 }
1164 }
1165
i810_lock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1166 int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd,
1167 unsigned long arg)
1168 {
1169 drm_file_t *priv = filp->private_data;
1170 drm_device_t *dev = priv->dev;
1171
1172 DECLARE_WAITQUEUE(entry, current);
1173 int ret = 0;
1174 drm_lock_t lock;
1175
1176 if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
1177 return -EFAULT;
1178
1179 if (lock.context == DRM_KERNEL_CONTEXT) {
1180 DRM_ERROR("Process %d using kernel context %d\n",
1181 current->pid, lock.context);
1182 return -EINVAL;
1183 }
1184
1185 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
1186 lock.context, current->pid, dev->lock.hw_lock->lock,
1187 lock.flags);
1188
1189 if (lock.context < 0) {
1190 return -EINVAL;
1191 }
1192 /* Only one queue:
1193 */
1194
1195 if (!ret) {
1196 add_wait_queue(&dev->lock.lock_queue, &entry);
1197 for (;;) {
1198 current->state = TASK_INTERRUPTIBLE;
1199 if (!dev->lock.hw_lock) {
1200 /* Device has been unregistered */
1201 ret = -EINTR;
1202 break;
1203 }
1204 if (drm_lock_take(&dev->lock.hw_lock->lock,
1205 lock.context)) {
1206 dev->lock.pid = current->pid;
1207 dev->lock.lock_time = jiffies;
1208 atomic_inc(&dev->total_locks);
1209 break; /* Got lock */
1210 }
1211
1212 /* Contention */
1213 atomic_inc(&dev->total_sleeps);
1214 DRM_DEBUG("Calling lock schedule\n");
1215 schedule();
1216 if (signal_pending(current)) {
1217 ret = -ERESTARTSYS;
1218 break;
1219 }
1220 }
1221 current->state = TASK_RUNNING;
1222 remove_wait_queue(&dev->lock.lock_queue, &entry);
1223 }
1224
1225 if (!ret) {
1226 sigemptyset(&dev->sigmask);
1227 sigaddset(&dev->sigmask, SIGSTOP);
1228 sigaddset(&dev->sigmask, SIGTSTP);
1229 sigaddset(&dev->sigmask, SIGTTIN);
1230 sigaddset(&dev->sigmask, SIGTTOU);
1231 dev->sigdata.context = lock.context;
1232 dev->sigdata.lock = dev->lock.hw_lock;
1233 block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
1234
1235 if (lock.flags & _DRM_LOCK_QUIESCENT) {
1236 DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
1237 DRM_DEBUG("fred\n");
1238 i810_dma_quiescent(dev);
1239 }
1240 }
1241 DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
1242 return ret;
1243 }
1244
i810_flush_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1245 int i810_flush_ioctl(struct inode *inode, struct file *filp,
1246 unsigned int cmd, unsigned long arg)
1247 {
1248 drm_file_t *priv = filp->private_data;
1249 drm_device_t *dev = priv->dev;
1250
1251 DRM_DEBUG("i810_flush_ioctl\n");
1252 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1253 DRM_ERROR("i810_flush_ioctl called without lock held\n");
1254 return -EINVAL;
1255 }
1256
1257 i810_flush_queue(dev);
1258 return 0;
1259 }
1260
1261
i810_dma_vertex(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1262 int i810_dma_vertex(struct inode *inode, struct file *filp,
1263 unsigned int cmd, unsigned long arg)
1264 {
1265 drm_file_t *priv = filp->private_data;
1266 drm_device_t *dev = priv->dev;
1267 drm_device_dma_t *dma = dev->dma;
1268 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1269 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1270 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1271 dev_priv->sarea_priv;
1272 drm_i810_vertex_t vertex;
1273
1274 if (copy_from_user(&vertex, (drm_i810_vertex_t *)arg, sizeof(vertex)))
1275 return -EFAULT;
1276
1277 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1278 DRM_ERROR("i810_dma_vertex called without lock held\n");
1279 return -EINVAL;
1280 }
1281
1282 DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
1283 vertex.idx, vertex.used, vertex.discard);
1284
1285 if(vertex.idx < 0 || vertex.idx > dma->buf_count) return -EINVAL;
1286
1287 i810_dma_dispatch_vertex( dev,
1288 dma->buflist[ vertex.idx ],
1289 vertex.discard, vertex.used );
1290
1291 atomic_add(vertex.used, &dma->total_bytes);
1292 atomic_inc(&dma->total_dmas);
1293 sarea_priv->last_enqueue = dev_priv->counter-1;
1294 sarea_priv->last_dispatch = (int) hw_status[5];
1295
1296 return 0;
1297 }
1298
1299
1300
i810_clear_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1301 int i810_clear_bufs(struct inode *inode, struct file *filp,
1302 unsigned int cmd, unsigned long arg)
1303 {
1304 drm_file_t *priv = filp->private_data;
1305 drm_device_t *dev = priv->dev;
1306 drm_i810_clear_t clear;
1307
1308 if (copy_from_user(&clear, (drm_i810_clear_t *)arg, sizeof(clear)))
1309 return -EFAULT;
1310
1311 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1312 DRM_ERROR("i810_clear_bufs called without lock held\n");
1313 return -EINVAL;
1314 }
1315
1316 i810_dma_dispatch_clear( dev, clear.flags,
1317 clear.clear_color,
1318 clear.clear_depth );
1319 return 0;
1320 }
1321
i810_swap_bufs(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1322 int i810_swap_bufs(struct inode *inode, struct file *filp,
1323 unsigned int cmd, unsigned long arg)
1324 {
1325 drm_file_t *priv = filp->private_data;
1326 drm_device_t *dev = priv->dev;
1327
1328 DRM_DEBUG("i810_swap_bufs\n");
1329
1330 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1331 DRM_ERROR("i810_swap_buf called without lock held\n");
1332 return -EINVAL;
1333 }
1334
1335 i810_dma_dispatch_swap( dev );
1336 return 0;
1337 }
1338
i810_getage(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1339 int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
1340 unsigned long arg)
1341 {
1342 drm_file_t *priv = filp->private_data;
1343 drm_device_t *dev = priv->dev;
1344 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1345 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1346 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1347 dev_priv->sarea_priv;
1348
1349 sarea_priv->last_dispatch = (int) hw_status[5];
1350 return 0;
1351 }
1352
i810_getbuf(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1353 int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
1354 unsigned long arg)
1355 {
1356 drm_file_t *priv = filp->private_data;
1357 drm_device_t *dev = priv->dev;
1358 int retcode = 0;
1359 drm_i810_dma_t d;
1360 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1361 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1362 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1363 dev_priv->sarea_priv;
1364
1365 DRM_DEBUG("getbuf\n");
1366 if (copy_from_user(&d, (drm_i810_dma_t *)arg, sizeof(d)))
1367 return -EFAULT;
1368
1369 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1370 DRM_ERROR("i810_dma called without lock held\n");
1371 return -EINVAL;
1372 }
1373
1374 d.granted = 0;
1375
1376 retcode = i810_dma_get_buffer(dev, &d, filp);
1377
1378 DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
1379 current->pid, retcode, d.granted);
1380
1381 if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
1382 return -EFAULT;
1383 sarea_priv->last_dispatch = (int) hw_status[5];
1384
1385 return retcode;
1386 }
1387
i810_copybuf(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1388 int i810_copybuf(struct inode *inode, struct file *filp, unsigned int cmd,
1389 unsigned long arg)
1390 {
1391 drm_file_t *priv = filp->private_data;
1392 drm_device_t *dev = priv->dev;
1393 drm_i810_copy_t d;
1394 drm_i810_private_t *dev_priv = (drm_i810_private_t *)dev->dev_private;
1395 u32 *hw_status = (u32 *)dev_priv->hw_status_page;
1396 drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
1397 dev_priv->sarea_priv;
1398 drm_buf_t *buf;
1399 drm_i810_buf_priv_t *buf_priv;
1400 drm_device_dma_t *dma = dev->dma;
1401
1402 if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
1403 DRM_ERROR("i810_dma called without lock held\n");
1404 return -EINVAL;
1405 }
1406
1407 if (copy_from_user(&d, (drm_i810_copy_t *)arg, sizeof(d)))
1408 return -EFAULT;
1409
1410 if(d.idx < 0 || d.idx > dma->buf_count) return -EINVAL;
1411 buf = dma->buflist[ d.idx ];
1412 buf_priv = buf->dev_private;
1413 if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EPERM;
1414
1415 /* Stopping end users copying their data to the entire kernel
1416 is good.. */
1417 if (d.used < 0 || d.used > buf->total)
1418 return -EINVAL;
1419
1420 if (copy_from_user(buf_priv->virtual, d.address, d.used))
1421 return -EFAULT;
1422
1423 sarea_priv->last_dispatch = (int) hw_status[5];
1424
1425 return 0;
1426 }
1427
i810_docopy(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)1428 int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
1429 unsigned long arg)
1430 {
1431 if(VM_DONTCOPY == 0) return 1;
1432 return 0;
1433 }
1434