1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  */
31 
32 #include "gamma.h"
33 #include "drmP.h"
34 #include "drm.h"
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
37 
38 #include <linux/interrupt.h>	/* For task queue support */
39 #include <linux/delay.h>
40 
gamma_dma_dispatch(drm_device_t * dev,unsigned long address,unsigned long length)41 static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
42 				      unsigned long length)
43 {
44 	drm_gamma_private_t *dev_priv =
45 				(drm_gamma_private_t *)dev->dev_private;
46 	mb();
47 	while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax();
48 	GAMMA_WRITE(GAMMA_DMAADDRESS, address);
49 	while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4) cpu_relax();
50 	GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
51 }
52 
gamma_dma_quiescent_single(drm_device_t * dev)53 void gamma_dma_quiescent_single(drm_device_t *dev)
54 {
55 	drm_gamma_private_t *dev_priv =
56 				(drm_gamma_private_t *)dev->dev_private;
57 	while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax();
58 
59 	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2) cpu_relax();
60 
61 	GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
62 	GAMMA_WRITE(GAMMA_SYNC, 0);
63 
64 	do {
65 		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
66 			;
67 	} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
68 }
69 
gamma_dma_quiescent_dual(drm_device_t * dev)70 void gamma_dma_quiescent_dual(drm_device_t *dev)
71 {
72 	drm_gamma_private_t *dev_priv =
73 				(drm_gamma_private_t *)dev->dev_private;
74 	while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax();
75 
76 	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax();
77 
78 	GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
79 	GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
80 	GAMMA_WRITE(GAMMA_SYNC, 0);
81 
82 	/* Read from first MX */
83 	do {
84 		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS)) cpu_relax();
85 	} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
86 
87 	/* Read from second MX */
88 	do {
89 		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000)) cpu_relax();
90 	} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
91 }
92 
gamma_dma_ready(drm_device_t * dev)93 void gamma_dma_ready(drm_device_t *dev)
94 {
95 	drm_gamma_private_t *dev_priv =
96 				(drm_gamma_private_t *)dev->dev_private;
97 	while (GAMMA_READ(GAMMA_DMACOUNT)) cpu_relax();
98 }
99 
gamma_dma_is_ready(drm_device_t * dev)100 static inline int gamma_dma_is_ready(drm_device_t *dev)
101 {
102 	drm_gamma_private_t *dev_priv =
103 				(drm_gamma_private_t *)dev->dev_private;
104 	return(!GAMMA_READ(GAMMA_DMACOUNT));
105 }
106 
gamma_dma_service(int irq,void * device,struct pt_regs * regs)107 void gamma_dma_service(int irq, void *device, struct pt_regs *regs)
108 {
109 	drm_device_t	 *dev = (drm_device_t *)device;
110 	drm_device_dma_t *dma = dev->dma;
111 	drm_gamma_private_t *dev_priv =
112 				(drm_gamma_private_t *)dev->dev_private;
113 
114 	atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
115 
116 	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3) cpu_relax();
117 	GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
118 	GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
119 	GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
120 	if (gamma_dma_is_ready(dev)) {
121 				/* Free previous buffer */
122 		if (test_and_set_bit(0, &dev->dma_flag)) return;
123 		if (dma->this_buffer) {
124 			gamma_free_buffer(dev, dma->this_buffer);
125 			dma->this_buffer = NULL;
126 		}
127 		clear_bit(0, &dev->dma_flag);
128 
129 				/* Dispatch new buffer */
130 		queue_task(&dev->tq, &tq_immediate);
131 		mark_bh(IMMEDIATE_BH);
132 	}
133 }
134 
135 /* Only called by gamma_dma_schedule. */
gamma_do_dma(drm_device_t * dev,int locked)136 static int gamma_do_dma(drm_device_t *dev, int locked)
137 {
138 	unsigned long	 address;
139 	unsigned long	 length;
140 	drm_buf_t	 *buf;
141 	int		 retcode = 0;
142 	drm_device_dma_t *dma = dev->dma;
143 #if DRM_DMA_HISTOGRAM
144 	cycles_t	 dma_start, dma_stop;
145 #endif
146 
147 	if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
148 
149 #if DRM_DMA_HISTOGRAM
150 	dma_start = get_cycles();
151 #endif
152 
153 	if (!dma->next_buffer) {
154 		DRM_ERROR("No next_buffer\n");
155 		clear_bit(0, &dev->dma_flag);
156 		return -EINVAL;
157 	}
158 
159 	buf	= dma->next_buffer;
160 	/* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
161 	/* So we pass the buffer index value into the physical page offset */
162 	address = buf->idx << 12;
163 	length	= buf->used;
164 
165 	DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
166 		  buf->context, buf->idx, length);
167 
168 	if (buf->list == DRM_LIST_RECLAIM) {
169 		gamma_clear_next_buffer(dev);
170 		gamma_free_buffer(dev, buf);
171 		clear_bit(0, &dev->dma_flag);
172 		return -EINVAL;
173 	}
174 
175 	if (!length) {
176 		DRM_ERROR("0 length buffer\n");
177 		gamma_clear_next_buffer(dev);
178 		gamma_free_buffer(dev, buf);
179 		clear_bit(0, &dev->dma_flag);
180 		return 0;
181 	}
182 
183 	if (!gamma_dma_is_ready(dev)) {
184 		clear_bit(0, &dev->dma_flag);
185 		return -EBUSY;
186 	}
187 
188 	if (buf->while_locked) {
189 		if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
190 			DRM_ERROR("Dispatching buffer %d from pid %d"
191 				  " \"while locked\", but no lock held\n",
192 				  buf->idx, buf->pid);
193 		}
194 	} else {
195 		if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
196 					      DRM_KERNEL_CONTEXT)) {
197 			clear_bit(0, &dev->dma_flag);
198 			return -EBUSY;
199 		}
200 	}
201 
202 	if (dev->last_context != buf->context
203 	    && !(dev->queuelist[buf->context]->flags
204 		 & _DRM_CONTEXT_PRESERVED)) {
205 				/* PRE: dev->last_context != buf->context */
206 		if (DRM(context_switch)(dev, dev->last_context,
207 					buf->context)) {
208 			DRM(clear_next_buffer)(dev);
209 			DRM(free_buffer)(dev, buf);
210 		}
211 		retcode = -EBUSY;
212 		goto cleanup;
213 
214 				/* POST: we will wait for the context
215 				   switch and will dispatch on a later call
216 				   when dev->last_context == buf->context.
217 				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
218 				   TIME! */
219 	}
220 
221 	gamma_clear_next_buffer(dev);
222 	buf->pending	 = 1;
223 	buf->waiting	 = 0;
224 	buf->list	 = DRM_LIST_PEND;
225 #if DRM_DMA_HISTOGRAM
226 	buf->time_dispatched = get_cycles();
227 #endif
228 
229 	/* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
230 	address = buf->idx << 12;
231 
232 	gamma_dma_dispatch(dev, address, length);
233 	gamma_free_buffer(dev, dma->this_buffer);
234 	dma->this_buffer = buf;
235 
236 	atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
237 	atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
238 
239 	if (!buf->while_locked && !dev->context_flag && !locked) {
240 		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
241 				  DRM_KERNEL_CONTEXT)) {
242 			DRM_ERROR("\n");
243 		}
244 	}
245 cleanup:
246 
247 	clear_bit(0, &dev->dma_flag);
248 
249 #if DRM_DMA_HISTOGRAM
250 	dma_stop = get_cycles();
251 	atomic_inc(&dev->histo.dma[gamma_histogram_slot(dma_stop - dma_start)]);
252 #endif
253 
254 	return retcode;
255 }
256 
gamma_dma_timer_bh(unsigned long dev)257 static void gamma_dma_timer_bh(unsigned long dev)
258 {
259 	gamma_dma_schedule((drm_device_t *)dev, 0);
260 }
261 
gamma_dma_immediate_bh(void * dev)262 void gamma_dma_immediate_bh(void *dev)
263 {
264 	gamma_dma_schedule(dev, 0);
265 }
266 
gamma_dma_schedule(drm_device_t * dev,int locked)267 int gamma_dma_schedule(drm_device_t *dev, int locked)
268 {
269 	int		 next;
270 	drm_queue_t	 *q;
271 	drm_buf_t	 *buf;
272 	int		 retcode   = 0;
273 	int		 processed = 0;
274 	int		 missed;
275 	int		 expire	   = 20;
276 	drm_device_dma_t *dma	   = dev->dma;
277 #if DRM_DMA_HISTOGRAM
278 	cycles_t	 schedule_start;
279 #endif
280 
281 	if (test_and_set_bit(0, &dev->interrupt_flag)) {
282 				/* Not reentrant */
283 		atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
284 		return -EBUSY;
285 	}
286 	missed = atomic_read(&dev->counts[10]);
287 
288 #if DRM_DMA_HISTOGRAM
289 	schedule_start = get_cycles();
290 #endif
291 
292 again:
293 	if (dev->context_flag) {
294 		clear_bit(0, &dev->interrupt_flag);
295 		return -EBUSY;
296 	}
297 	if (dma->next_buffer) {
298 				/* Unsent buffer that was previously
299 				   selected, but that couldn't be sent
300 				   because the lock could not be obtained
301 				   or the DMA engine wasn't ready.  Try
302 				   again. */
303 		if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
304 	} else {
305 		do {
306 			next = gamma_select_queue(dev, gamma_dma_timer_bh);
307 			if (next >= 0) {
308 				q   = dev->queuelist[next];
309 				buf = gamma_waitlist_get(&q->waitlist);
310 				dma->next_buffer = buf;
311 				dma->next_queue	 = q;
312 				if (buf && buf->list == DRM_LIST_RECLAIM) {
313 					gamma_clear_next_buffer(dev);
314 					gamma_free_buffer(dev, buf);
315 				}
316 			}
317 		} while (next >= 0 && !dma->next_buffer);
318 		if (dma->next_buffer) {
319 			if (!(retcode = gamma_do_dma(dev, locked))) {
320 				++processed;
321 			}
322 		}
323 	}
324 
325 	if (--expire) {
326 		if (missed != atomic_read(&dev->counts[10])) {
327 			if (gamma_dma_is_ready(dev)) goto again;
328 		}
329 		if (processed && gamma_dma_is_ready(dev)) {
330 			processed = 0;
331 			goto again;
332 		}
333 	}
334 
335 	clear_bit(0, &dev->interrupt_flag);
336 
337 #if DRM_DMA_HISTOGRAM
338 	atomic_inc(&dev->histo.schedule[gamma_histogram_slot(get_cycles()
339 							   - schedule_start)]);
340 #endif
341 	return retcode;
342 }
343 
gamma_dma_priority(drm_device_t * dev,drm_dma_t * d)344 static int gamma_dma_priority(drm_device_t *dev, drm_dma_t *d)
345 {
346 	unsigned long	  address;
347 	unsigned long	  length;
348 	int		  must_free = 0;
349 	int		  retcode   = 0;
350 	int		  i;
351 	int		  idx;
352 	drm_buf_t	  *buf;
353 	drm_buf_t	  *last_buf = NULL;
354 	drm_device_dma_t  *dma	    = dev->dma;
355 	DECLARE_WAITQUEUE(entry, current);
356 
357 				/* Turn off interrupt handling */
358 	while (test_and_set_bit(0, &dev->interrupt_flag)) {
359 		schedule();
360 		if (signal_pending(current)) return -EINTR;
361 	}
362 	if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
363 		while (!gamma_lock_take(&dev->lock.hw_lock->lock,
364 				      DRM_KERNEL_CONTEXT)) {
365 			schedule();
366 			if (signal_pending(current)) {
367 				clear_bit(0, &dev->interrupt_flag);
368 				return -EINTR;
369 			}
370 		}
371 		++must_free;
372 	}
373 
374 	for (i = 0; i < d->send_count; i++) {
375 		idx = d->send_indices[i];
376 		if (idx < 0 || idx >= dma->buf_count) {
377 			DRM_ERROR("Index %d (of %d max)\n",
378 				  d->send_indices[i], dma->buf_count - 1);
379 			continue;
380 		}
381 		buf = dma->buflist[ idx ];
382 		if (buf->pid != current->pid) {
383 			DRM_ERROR("Process %d using buffer owned by %d\n",
384 				  current->pid, buf->pid);
385 			retcode = -EINVAL;
386 			goto cleanup;
387 		}
388 		if (buf->list != DRM_LIST_NONE) {
389 			DRM_ERROR("Process %d using %d's buffer on list %d\n",
390 				  current->pid, buf->pid, buf->list);
391 			retcode = -EINVAL;
392 			goto cleanup;
393 		}
394 				/* This isn't a race condition on
395 				   buf->list, since our concern is the
396 				   buffer reclaim during the time the
397 				   process closes the /dev/drm? handle, so
398 				   it can't also be doing DMA. */
399 		buf->list	  = DRM_LIST_PRIO;
400 		buf->used	  = d->send_sizes[i];
401 		buf->context	  = d->context;
402 		buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
403 		address		  = (unsigned long)buf->address;
404 		length		  = buf->used;
405 		if (!length) {
406 			DRM_ERROR("0 length buffer\n");
407 		}
408 		if (buf->pending) {
409 			DRM_ERROR("Sending pending buffer:"
410 				  " buffer %d, offset %d\n",
411 				  d->send_indices[i], i);
412 			retcode = -EINVAL;
413 			goto cleanup;
414 		}
415 		if (buf->waiting) {
416 			DRM_ERROR("Sending waiting buffer:"
417 				  " buffer %d, offset %d\n",
418 				  d->send_indices[i], i);
419 			retcode = -EINVAL;
420 			goto cleanup;
421 		}
422 		buf->pending = 1;
423 
424 		if (dev->last_context != buf->context
425 		    && !(dev->queuelist[buf->context]->flags
426 			 & _DRM_CONTEXT_PRESERVED)) {
427 			add_wait_queue(&dev->context_wait, &entry);
428 			current->state = TASK_INTERRUPTIBLE;
429 				/* PRE: dev->last_context != buf->context */
430 			DRM(context_switch)(dev, dev->last_context,
431 					    buf->context);
432 				/* POST: we will wait for the context
433 				   switch and will dispatch on a later call
434 				   when dev->last_context == buf->context.
435 				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
436 				   TIME! */
437 			schedule();
438 			current->state = TASK_RUNNING;
439 			remove_wait_queue(&dev->context_wait, &entry);
440 			if (signal_pending(current)) {
441 				retcode = -EINTR;
442 				goto cleanup;
443 			}
444 			if (dev->last_context != buf->context) {
445 				DRM_ERROR("Context mismatch: %d %d\n",
446 					  dev->last_context,
447 					  buf->context);
448 			}
449 		}
450 
451 #if DRM_DMA_HISTOGRAM
452 		buf->time_queued     = get_cycles();
453 		buf->time_dispatched = buf->time_queued;
454 #endif
455 		gamma_dma_dispatch(dev, address, length);
456 		atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
457 		atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
458 
459 		if (last_buf) {
460 			gamma_free_buffer(dev, last_buf);
461 		}
462 		last_buf = buf;
463 	}
464 
465 
466 cleanup:
467 	if (last_buf) {
468 		gamma_dma_ready(dev);
469 		gamma_free_buffer(dev, last_buf);
470 	}
471 
472 	if (must_free && !dev->context_flag) {
473 		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
474 				  DRM_KERNEL_CONTEXT)) {
475 			DRM_ERROR("\n");
476 		}
477 	}
478 	clear_bit(0, &dev->interrupt_flag);
479 	return retcode;
480 }
481 
gamma_dma_send_buffers(drm_device_t * dev,drm_dma_t * d)482 static int gamma_dma_send_buffers(drm_device_t *dev, drm_dma_t *d)
483 {
484 	DECLARE_WAITQUEUE(entry, current);
485 	drm_buf_t	  *last_buf = NULL;
486 	int		  retcode   = 0;
487 	drm_device_dma_t  *dma	    = dev->dma;
488 
489 	if (d->flags & _DRM_DMA_BLOCK) {
490 		last_buf = dma->buflist[d->send_indices[d->send_count-1]];
491 		add_wait_queue(&last_buf->dma_wait, &entry);
492 	}
493 
494 	if ((retcode = gamma_dma_enqueue(dev, d))) {
495 		if (d->flags & _DRM_DMA_BLOCK)
496 			remove_wait_queue(&last_buf->dma_wait, &entry);
497 		return retcode;
498 	}
499 
500 	gamma_dma_schedule(dev, 0);
501 
502 	if (d->flags & _DRM_DMA_BLOCK) {
503 		DRM_DEBUG("%d waiting\n", current->pid);
504 		for (;;) {
505 			current->state = TASK_INTERRUPTIBLE;
506 			if (!last_buf->waiting && !last_buf->pending)
507 				break; /* finished */
508 			schedule();
509 			if (signal_pending(current)) {
510 				retcode = -EINTR; /* Can't restart */
511 				break;
512 			}
513 		}
514 		current->state = TASK_RUNNING;
515 		DRM_DEBUG("%d running\n", current->pid);
516 		remove_wait_queue(&last_buf->dma_wait, &entry);
517 		if (!retcode
518 		    || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
519 			if (!waitqueue_active(&last_buf->dma_wait)) {
520 				gamma_free_buffer(dev, last_buf);
521 			}
522 		}
523 		if (retcode) {
524 			DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d %d/%d\n",
525 				  d->context,
526 				  last_buf->waiting,
527 				  last_buf->pending,
528 				  (long)DRM_WAITCOUNT(dev, d->context),
529 				  last_buf->idx,
530 				  last_buf->list,
531 				  last_buf->pid,
532 				  current->pid);
533 		}
534 	}
535 	return retcode;
536 }
537 
gamma_dma(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)538 int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
539 	      unsigned long arg)
540 {
541 	drm_file_t	  *priv	    = filp->private_data;
542 	drm_device_t	  *dev	    = priv->dev;
543 	drm_device_dma_t  *dma	    = dev->dma;
544 	int		  retcode   = 0;
545 	drm_dma_t	  d;
546 
547 	if (copy_from_user(&d, (drm_dma_t *)arg, sizeof(d)))
548 		return -EFAULT;
549 
550 	if (d.send_count < 0 || d.send_count > dma->buf_count) {
551 		DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
552 			  current->pid, d.send_count, dma->buf_count);
553 		return -EINVAL;
554 	}
555 
556 	if (d.request_count < 0 || d.request_count > dma->buf_count) {
557 		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
558 			  current->pid, d.request_count, dma->buf_count);
559 		return -EINVAL;
560 	}
561 
562 	if (d.send_count) {
563 		if (d.flags & _DRM_DMA_PRIORITY)
564 			retcode = gamma_dma_priority(dev, &d);
565 		else
566 			retcode = gamma_dma_send_buffers(dev, &d);
567 	}
568 
569 	d.granted_count = 0;
570 
571 	if (!retcode && d.request_count) {
572 		retcode = gamma_dma_get_buffers(dev, &d);
573 	}
574 
575 	DRM_DEBUG("%d returning, granted = %d\n",
576 		  current->pid, d.granted_count);
577 	if (copy_to_user((drm_dma_t *)arg, &d, sizeof(d)))
578 		return -EFAULT;
579 
580 	return retcode;
581 }
582 
583 /* =============================================================
584  * DMA initialization, cleanup
585  */
586 
gamma_do_init_dma(drm_device_t * dev,drm_gamma_init_t * init)587 static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
588 {
589 	drm_gamma_private_t *dev_priv;
590 	drm_device_dma_t    *dma = dev->dma;
591 	drm_buf_t	    *buf;
592 	int i;
593 	struct list_head    *list;
594 	unsigned long	    *pgt;
595 
596 	DRM_DEBUG( "%s\n", __FUNCTION__ );
597 
598 	dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
599 							DRM_MEM_DRIVER );
600 	if ( !dev_priv )
601 		return -ENOMEM;
602 
603 	dev->dev_private = (void *)dev_priv;
604 
605 	memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
606 
607 	list_for_each(list, &dev->maplist->head) {
608 		#warning list_entry() is needed here
609 		drm_map_list_t *r_list = (drm_map_list_t *)list;
610 		if( r_list->map &&
611 		    r_list->map->type == _DRM_SHM &&
612 		    r_list->map->flags & _DRM_CONTAINS_LOCK ) {
613 			dev_priv->sarea = r_list->map;
614  			break;
615  		}
616  	}
617 
618 	DRM_FIND_MAP( dev_priv->mmio0, init->mmio0 );
619 	DRM_FIND_MAP( dev_priv->mmio1, init->mmio1 );
620 	DRM_FIND_MAP( dev_priv->mmio2, init->mmio2 );
621 	DRM_FIND_MAP( dev_priv->mmio3, init->mmio3 );
622 
623 	dev_priv->sarea_priv = (drm_gamma_sarea_t *)
624 		((u8 *)dev_priv->sarea->handle +
625 		 init->sarea_priv_offset);
626 
627 	if (init->pcimode) {
628 		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
629 		pgt = buf->address;
630 
631  		for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
632 			buf = dma->buflist[i];
633 			*pgt = virt_to_phys((void*)buf->address) | 0x07;
634 			pgt++;
635 		}
636 
637 		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
638 	} else {
639 		DRM_FIND_MAP( dev_priv->buffers, init->buffers_offset );
640 
641 		DRM_IOREMAP( dev_priv->buffers, dev );
642 
643 		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
644 		pgt = buf->address;
645 
646  		for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
647 			buf = dma->buflist[i];
648 			*pgt = (unsigned long)buf->address + 0x07;
649 			pgt++;
650 		}
651 
652 		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
653 
654 		while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1) cpu_relax();
655 		GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe) ;
656 	}
657 	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2); cpu_relax();
658 	GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
659 	GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
660 
661 	return 0;
662 }
663 
gamma_do_cleanup_dma(drm_device_t * dev)664 int gamma_do_cleanup_dma( drm_device_t *dev )
665 {
666 	DRM_DEBUG( "%s\n", __FUNCTION__ );
667 
668 	if ( dev->dev_private ) {
669 		drm_gamma_private_t *dev_priv = dev->dev_private;
670 
671 		DRM_IOREMAPFREE( dev_priv->buffers, dev );
672 
673 		DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
674 			   DRM_MEM_DRIVER );
675 		dev->dev_private = NULL;
676 	}
677 
678 	return 0;
679 }
680 
gamma_dma_init(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)681 int gamma_dma_init( struct inode *inode, struct file *filp,
682 		  unsigned int cmd, unsigned long arg )
683 {
684 	drm_file_t *priv = filp->private_data;
685 	drm_device_t *dev = priv->dev;
686 	drm_gamma_init_t init;
687 
688 	if ( copy_from_user( &init, (drm_gamma_init_t *)arg, sizeof(init) ) )
689 		return -EFAULT;
690 
691 	switch ( init.func ) {
692 	case GAMMA_INIT_DMA:
693 		return gamma_do_init_dma( dev, &init );
694 	case GAMMA_CLEANUP_DMA:
695 		return gamma_do_cleanup_dma( dev );
696 	}
697 
698 	return -EINVAL;
699 }
700 
gamma_do_copy_dma(drm_device_t * dev,drm_gamma_copy_t * copy)701 static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
702 {
703 	drm_device_dma_t    *dma = dev->dma;
704 	unsigned int        *screenbuf;
705 
706 	DRM_DEBUG( "%s\n", __FUNCTION__ );
707 
708 	/* We've DRM_RESTRICTED this DMA buffer */
709 
710 	screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
711 
712 #if 0
713 	*buffer++ = 0x180;	/* Tag (FilterMode) */
714 	*buffer++ = 0x200;	/* Allow FBColor through */
715 	*buffer++ = 0x53B;	/* Tag */
716 	*buffer++ = copy->Pitch;
717 	*buffer++ = 0x53A;	/* Tag */
718 	*buffer++ = copy->SrcAddress;
719 	*buffer++ = 0x539;	/* Tag */
720 	*buffer++ = copy->WidthHeight; /* Initiates transfer */
721 	*buffer++ = 0x53C;	/* Tag - DMAOutputAddress */
722 	*buffer++ = virt_to_phys((void*)screenbuf);
723 	*buffer++ = 0x53D;	/* Tag - DMAOutputCount */
724 	*buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
725 
726 	/* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
727 	/* Now put it back to the screen */
728 
729 	*buffer++ = 0x180;	/* Tag (FilterMode) */
730 	*buffer++ = 0x400;	/* Allow Sync through */
731 	*buffer++ = 0x538;	/* Tag - DMARectangleReadTarget */
732 	*buffer++ = 0x155;	/* FBSourceData | count */
733 	*buffer++ = 0x537;	/* Tag */
734 	*buffer++ = copy->Pitch;
735 	*buffer++ = 0x536;	/* Tag */
736 	*buffer++ = copy->DstAddress;
737 	*buffer++ = 0x535;	/* Tag */
738 	*buffer++ = copy->WidthHeight; /* Initiates transfer */
739 	*buffer++ = 0x530;	/* Tag - DMAAddr */
740 	*buffer++ = virt_to_phys((void*)screenbuf);
741 	*buffer++ = 0x531;
742 	*buffer++ = copy->Count; /* initiates DMA transfer of color data */
743 #endif
744 
745 	/* need to dispatch it now */
746 
747 	return 0;
748 }
749 
gamma_dma_copy(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)750 int gamma_dma_copy( struct inode *inode, struct file *filp,
751 		  unsigned int cmd, unsigned long arg )
752 {
753 	drm_file_t *priv = filp->private_data;
754 	drm_device_t *dev = priv->dev;
755 	drm_gamma_copy_t copy;
756 
757 	if ( copy_from_user( &copy, (drm_gamma_copy_t *)arg, sizeof(copy) ) )
758 		return -EFAULT;
759 
760 	return gamma_do_copy_dma( dev, &copy );
761 }
762 
763 /* =============================================================
764  * Per Context SAREA Support
765  */
766 
gamma_getsareactx(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)767 int gamma_getsareactx(struct inode *inode, struct file *filp,
768 		     unsigned int cmd, unsigned long arg)
769 {
770 	drm_file_t	*priv	= filp->private_data;
771 	drm_device_t	*dev	= priv->dev;
772 	drm_ctx_priv_map_t request;
773 	drm_map_t *map;
774 
775 	if (copy_from_user(&request,
776 			   (drm_ctx_priv_map_t *)arg,
777 			   sizeof(request)))
778 		return -EFAULT;
779 
780 	down(&dev->struct_sem);
781 	if ((int)request.ctx_id >= dev->max_context) {
782 		up(&dev->struct_sem);
783 		return -EINVAL;
784 	}
785 
786 	map = dev->context_sareas[request.ctx_id];
787 	up(&dev->struct_sem);
788 
789 	request.handle = map->handle;
790 	if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
791 		return -EFAULT;
792 	return 0;
793 }
794 
gamma_setsareactx(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)795 int gamma_setsareactx(struct inode *inode, struct file *filp,
796 		     unsigned int cmd, unsigned long arg)
797 {
798 	drm_file_t	*priv	= filp->private_data;
799 	drm_device_t	*dev	= priv->dev;
800 	drm_ctx_priv_map_t request;
801 	drm_map_t *map = NULL;
802 	drm_map_list_t *r_list;
803 	struct list_head *list;
804 
805 	if (copy_from_user(&request,
806 			   (drm_ctx_priv_map_t *)arg,
807 			   sizeof(request)))
808 		return -EFAULT;
809 
810 	down(&dev->struct_sem);
811 	r_list = NULL;
812 	list_for_each(list, &dev->maplist->head) {
813 		r_list = (drm_map_list_t *)list;
814 		if(r_list->map &&
815 		   r_list->map->handle == request.handle) break;
816 	}
817 	if (list == &(dev->maplist->head)) {
818 		up(&dev->struct_sem);
819 		return -EINVAL;
820 	}
821 	map = r_list->map;
822 	up(&dev->struct_sem);
823 
824 	if (!map) return -EINVAL;
825 
826 	down(&dev->struct_sem);
827 	if ((int)request.ctx_id >= dev->max_context) {
828 		up(&dev->struct_sem);
829 		return -EINVAL;
830 	}
831 	dev->context_sareas[request.ctx_id] = map;
832 	up(&dev->struct_sem);
833 	return 0;
834 }
835 
836 /* drm_dma.h hooks
837 */
DRM(driver_irq_preinstall)838 void DRM(driver_irq_preinstall)( drm_device_t *dev ) {
839 }
840 
DRM(driver_irq_postinstall)841 void DRM(driver_irq_postinstall)( drm_device_t *dev ) {
842 }
843 
DRM(driver_irq_uninstall)844 void DRM(driver_irq_uninstall)( drm_device_t *dev ) {
845 }
846