1 /* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
2  * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31 
32 #include "drmP.h"
33 #include "drm_os_linux.h"
34 #include <linux/interrupt.h>	/* For task queue support */
35 
36 #ifndef __HAVE_DMA_WAITQUEUE
37 #define __HAVE_DMA_WAITQUEUE	0
38 #endif
39 #ifndef __HAVE_DMA_RECLAIM
40 #define __HAVE_DMA_RECLAIM	0
41 #endif
42 #ifndef __HAVE_SHARED_IRQ
43 #define __HAVE_SHARED_IRQ	0
44 #endif
45 
46 #if __HAVE_SHARED_IRQ
47 #define DRM_IRQ_TYPE		SA_SHIRQ
48 #else
49 #define DRM_IRQ_TYPE		0
50 #endif
51 
52 #if __HAVE_DMA
53 
DRM(dma_setup)54 int DRM(dma_setup)( drm_device_t *dev )
55 {
56 	int i;
57 
58 	dev->dma = DRM(alloc)( sizeof(*dev->dma), DRM_MEM_DRIVER );
59 	if ( !dev->dma )
60 		return -ENOMEM;
61 
62 	memset( dev->dma, 0, sizeof(*dev->dma) );
63 
64 	for ( i = 0 ; i <= DRM_MAX_ORDER ; i++ )
65 		memset(&dev->dma->bufs[i], 0, sizeof(dev->dma->bufs[0]));
66 
67 	return 0;
68 }
69 
DRM(dma_takedown)70 void DRM(dma_takedown)(drm_device_t *dev)
71 {
72 	drm_device_dma_t  *dma = dev->dma;
73 	int		  i, j;
74 
75 	if (!dma) return;
76 
77 				/* Clear dma buffers */
78 	for (i = 0; i <= DRM_MAX_ORDER; i++) {
79 		if (dma->bufs[i].seg_count) {
80 			DRM_DEBUG("order %d: buf_count = %d,"
81 				  " seg_count = %d\n",
82 				  i,
83 				  dma->bufs[i].buf_count,
84 				  dma->bufs[i].seg_count);
85 			for (j = 0; j < dma->bufs[i].seg_count; j++) {
86 				DRM(free_pages)(dma->bufs[i].seglist[j],
87 						dma->bufs[i].page_order,
88 						DRM_MEM_DMA);
89 			}
90 			DRM(free)(dma->bufs[i].seglist,
91 				  dma->bufs[i].seg_count
92 				  * sizeof(*dma->bufs[0].seglist),
93 				  DRM_MEM_SEGS);
94 		}
95 	   	if(dma->bufs[i].buf_count) {
96 		   	for(j = 0; j < dma->bufs[i].buf_count; j++) {
97 			   if(dma->bufs[i].buflist[j].dev_private) {
98 			      DRM(free)(dma->bufs[i].buflist[j].dev_private,
99 					dma->bufs[i].buflist[j].dev_priv_size,
100 					DRM_MEM_BUFS);
101 			   }
102 			}
103 		   	DRM(free)(dma->bufs[i].buflist,
104 				  dma->bufs[i].buf_count *
105 				  sizeof(*dma->bufs[0].buflist),
106 				  DRM_MEM_BUFS);
107 #if __HAVE_DMA_FREELIST
108 		   	DRM(freelist_destroy)(&dma->bufs[i].freelist);
109 #endif
110 		}
111 	}
112 
113 	if (dma->buflist) {
114 		DRM(free)(dma->buflist,
115 			  dma->buf_count * sizeof(*dma->buflist),
116 			  DRM_MEM_BUFS);
117 	}
118 
119 	if (dma->pagelist) {
120 		DRM(free)(dma->pagelist,
121 			  dma->page_count * sizeof(*dma->pagelist),
122 			  DRM_MEM_PAGES);
123 	}
124 	DRM(free)(dev->dma, sizeof(*dev->dma), DRM_MEM_DRIVER);
125 	dev->dma = NULL;
126 }
127 
128 
129 #if __HAVE_DMA_HISTOGRAM
130 /* This is slow, but is useful for debugging. */
DRM(histogram_slot)131 int DRM(histogram_slot)(unsigned long count)
132 {
133 	int value = DRM_DMA_HISTOGRAM_INITIAL;
134 	int slot;
135 
136 	for (slot = 0;
137 	     slot < DRM_DMA_HISTOGRAM_SLOTS;
138 	     ++slot, value = DRM_DMA_HISTOGRAM_NEXT(value)) {
139 		if (count < value) return slot;
140 	}
141 	return DRM_DMA_HISTOGRAM_SLOTS - 1;
142 }
143 
DRM(histogram_compute)144 void DRM(histogram_compute)(drm_device_t *dev, drm_buf_t *buf)
145 {
146 	cycles_t queued_to_dispatched;
147 	cycles_t dispatched_to_completed;
148 	cycles_t completed_to_freed;
149 	int	 q2d, d2c, c2f, q2c, q2f;
150 
151 	if (buf->time_queued) {
152 		queued_to_dispatched	= (buf->time_dispatched
153 					   - buf->time_queued);
154 		dispatched_to_completed = (buf->time_completed
155 					   - buf->time_dispatched);
156 		completed_to_freed	= (buf->time_freed
157 					   - buf->time_completed);
158 
159 		q2d = DRM(histogram_slot)(queued_to_dispatched);
160 		d2c = DRM(histogram_slot)(dispatched_to_completed);
161 		c2f = DRM(histogram_slot)(completed_to_freed);
162 
163 		q2c = DRM(histogram_slot)(queued_to_dispatched
164 					  + dispatched_to_completed);
165 		q2f = DRM(histogram_slot)(queued_to_dispatched
166 					  + dispatched_to_completed
167 					  + completed_to_freed);
168 
169 		atomic_inc(&dev->histo.total);
170 		atomic_inc(&dev->histo.queued_to_dispatched[q2d]);
171 		atomic_inc(&dev->histo.dispatched_to_completed[d2c]);
172 		atomic_inc(&dev->histo.completed_to_freed[c2f]);
173 
174 		atomic_inc(&dev->histo.queued_to_completed[q2c]);
175 		atomic_inc(&dev->histo.queued_to_freed[q2f]);
176 
177 	}
178 	buf->time_queued     = 0;
179 	buf->time_dispatched = 0;
180 	buf->time_completed  = 0;
181 	buf->time_freed	     = 0;
182 }
183 #endif
184 
DRM(free_buffer)185 void DRM(free_buffer)(drm_device_t *dev, drm_buf_t *buf)
186 {
187 	if (!buf) return;
188 
189 	buf->waiting  = 0;
190 	buf->pending  = 0;
191 	buf->pid      = 0;
192 	buf->used     = 0;
193 #if __HAVE_DMA_HISTOGRAM
194 	buf->time_completed = get_cycles();
195 #endif
196 
197 	if ( __HAVE_DMA_WAITQUEUE && waitqueue_active(&buf->dma_wait)) {
198 		wake_up_interruptible(&buf->dma_wait);
199 	}
200 #if __HAVE_DMA_FREELIST
201 	else {
202 		drm_device_dma_t *dma = dev->dma;
203 				/* If processes are waiting, the last one
204 				   to wake will put the buffer on the free
205 				   list.  If no processes are waiting, we
206 				   put the buffer on the freelist here. */
207 		DRM(freelist_put)(dev, &dma->bufs[buf->order].freelist, buf);
208 	}
209 #endif
210 }
211 
212 #if !__HAVE_DMA_RECLAIM
DRM(reclaim_buffers)213 void DRM(reclaim_buffers)(drm_device_t *dev, pid_t pid)
214 {
215 	drm_device_dma_t *dma = dev->dma;
216 	int		 i;
217 
218 	if (!dma) return;
219 	for (i = 0; i < dma->buf_count; i++) {
220 		if (dma->buflist[i]->pid == pid) {
221 			switch (dma->buflist[i]->list) {
222 			case DRM_LIST_NONE:
223 				DRM(free_buffer)(dev, dma->buflist[i]);
224 				break;
225 			case DRM_LIST_WAIT:
226 				dma->buflist[i]->list = DRM_LIST_RECLAIM;
227 				break;
228 			default:
229 				/* Buffer already on hardware. */
230 				break;
231 			}
232 		}
233 	}
234 }
235 #endif
236 
237 
238 /* GH: This is a big hack for now...
239  */
240 #if __HAVE_OLD_DMA
241 
DRM(clear_next_buffer)242 void DRM(clear_next_buffer)(drm_device_t *dev)
243 {
244 	drm_device_dma_t *dma = dev->dma;
245 
246 	dma->next_buffer = NULL;
247 	if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
248 		wake_up_interruptible(&dma->next_queue->flush_queue);
249 	}
250 	dma->next_queue	 = NULL;
251 }
252 
DRM(select_queue)253 int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
254 {
255 	int	   i;
256 	int	   candidate = -1;
257 	int	   j	     = jiffies;
258 
259 	if (!dev) {
260 		DRM_ERROR("No device\n");
261 		return -1;
262 	}
263 	if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
264 				/* This only happens between the time the
265 				   interrupt is initialized and the time
266 				   the queues are initialized. */
267 		return -1;
268 	}
269 
270 				/* Doing "while locked" DMA? */
271 	if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
272 		return DRM_KERNEL_CONTEXT;
273 	}
274 
275 				/* If there are buffers on the last_context
276 				   queue, and we have not been executing
277 				   this context very long, continue to
278 				   execute this context. */
279 	if (dev->last_switch <= j
280 	    && dev->last_switch + DRM_TIME_SLICE > j
281 	    && DRM_WAITCOUNT(dev, dev->last_context)) {
282 		return dev->last_context;
283 	}
284 
285 				/* Otherwise, find a candidate */
286 	for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
287 		if (DRM_WAITCOUNT(dev, i)) {
288 			candidate = dev->last_checked = i;
289 			break;
290 		}
291 	}
292 
293 	if (candidate < 0) {
294 		for (i = 0; i < dev->queue_count; i++) {
295 			if (DRM_WAITCOUNT(dev, i)) {
296 				candidate = dev->last_checked = i;
297 				break;
298 			}
299 		}
300 	}
301 
302 	if (wrapper
303 	    && candidate >= 0
304 	    && candidate != dev->last_context
305 	    && dev->last_switch <= j
306 	    && dev->last_switch + DRM_TIME_SLICE > j) {
307 		if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
308 			del_timer(&dev->timer);
309 			dev->timer.function = wrapper;
310 			dev->timer.data	    = (unsigned long)dev;
311 			dev->timer.expires  = dev->last_switch+DRM_TIME_SLICE;
312 			add_timer(&dev->timer);
313 		}
314 		return -1;
315 	}
316 
317 	return candidate;
318 }
319 
320 
DRM(dma_enqueue)321 int DRM(dma_enqueue)(drm_device_t *dev, drm_dma_t *d)
322 {
323 	int		  i;
324 	drm_queue_t	  *q;
325 	drm_buf_t	  *buf;
326 	int		  idx;
327 	int		  while_locked = 0;
328 	drm_device_dma_t  *dma = dev->dma;
329 	DECLARE_WAITQUEUE(entry, current);
330 
331 	DRM_DEBUG("%d\n", d->send_count);
332 
333 	if (d->flags & _DRM_DMA_WHILE_LOCKED) {
334 		int context = dev->lock.hw_lock->lock;
335 
336 		if (!_DRM_LOCK_IS_HELD(context)) {
337 			DRM_ERROR("No lock held during \"while locked\""
338 				  " request\n");
339 			return -EINVAL;
340 		}
341 		if (d->context != _DRM_LOCKING_CONTEXT(context)
342 		    && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
343 			DRM_ERROR("Lock held by %d while %d makes"
344 				  " \"while locked\" request\n",
345 				  _DRM_LOCKING_CONTEXT(context),
346 				  d->context);
347 			return -EINVAL;
348 		}
349 		q = dev->queuelist[DRM_KERNEL_CONTEXT];
350 		while_locked = 1;
351 	} else {
352 		q = dev->queuelist[d->context];
353 	}
354 
355 
356 	atomic_inc(&q->use_count);
357 	if (atomic_read(&q->block_write)) {
358 		add_wait_queue(&q->write_queue, &entry);
359 		atomic_inc(&q->block_count);
360 		for (;;) {
361 			current->state = TASK_INTERRUPTIBLE;
362 			if (!atomic_read(&q->block_write)) break;
363 			schedule();
364 			if (signal_pending(current)) {
365 				atomic_dec(&q->use_count);
366 				remove_wait_queue(&q->write_queue, &entry);
367 				return -EINTR;
368 			}
369 		}
370 		atomic_dec(&q->block_count);
371 		current->state = TASK_RUNNING;
372 		remove_wait_queue(&q->write_queue, &entry);
373 	}
374 
375 	for (i = 0; i < d->send_count; i++) {
376 		idx = d->send_indices[i];
377 		if (idx < 0 || idx >= dma->buf_count) {
378 			atomic_dec(&q->use_count);
379 			DRM_ERROR("Index %d (of %d max)\n",
380 				  d->send_indices[i], dma->buf_count - 1);
381 			return -EINVAL;
382 		}
383 		buf = dma->buflist[ idx ];
384 		if (buf->pid != current->pid) {
385 			atomic_dec(&q->use_count);
386 			DRM_ERROR("Process %d using buffer owned by %d\n",
387 				  current->pid, buf->pid);
388 			return -EINVAL;
389 		}
390 		if (buf->list != DRM_LIST_NONE) {
391 			atomic_dec(&q->use_count);
392 			DRM_ERROR("Process %d using buffer %d on list %d\n",
393 				  current->pid, buf->idx, buf->list);
394 		}
395 		buf->used	  = d->send_sizes[i];
396 		buf->while_locked = while_locked;
397 		buf->context	  = d->context;
398 		if (!buf->used) {
399 			DRM_ERROR("Queueing 0 length buffer\n");
400 		}
401 		if (buf->pending) {
402 			atomic_dec(&q->use_count);
403 			DRM_ERROR("Queueing pending buffer:"
404 				  " buffer %d, offset %d\n",
405 				  d->send_indices[i], i);
406 			return -EINVAL;
407 		}
408 		if (buf->waiting) {
409 			atomic_dec(&q->use_count);
410 			DRM_ERROR("Queueing waiting buffer:"
411 				  " buffer %d, offset %d\n",
412 				  d->send_indices[i], i);
413 			return -EINVAL;
414 		}
415 		buf->waiting = 1;
416 		if (atomic_read(&q->use_count) == 1
417 		    || atomic_read(&q->finalization)) {
418 			DRM(free_buffer)(dev, buf);
419 		} else {
420 			DRM(waitlist_put)(&q->waitlist, buf);
421 			atomic_inc(&q->total_queued);
422 		}
423 	}
424 	atomic_dec(&q->use_count);
425 
426 	return 0;
427 }
428 
DRM(dma_get_buffers_of_order)429 static int DRM(dma_get_buffers_of_order)(drm_device_t *dev, drm_dma_t *d,
430 					 int order)
431 {
432 	int		  i;
433 	drm_buf_t	  *buf;
434 	drm_device_dma_t  *dma = dev->dma;
435 
436 	for (i = d->granted_count; i < d->request_count; i++) {
437 		buf = DRM(freelist_get)(&dma->bufs[order].freelist,
438 					d->flags & _DRM_DMA_WAIT);
439 		if (!buf) break;
440 		if (buf->pending || buf->waiting) {
441 			DRM_ERROR("Free buffer %d in use by %d (w%d, p%d)\n",
442 				  buf->idx,
443 				  buf->pid,
444 				  buf->waiting,
445 				  buf->pending);
446 		}
447 		buf->pid     = current->pid;
448 		if (copy_to_user(&d->request_indices[i],
449 				 &buf->idx,
450 				 sizeof(buf->idx)))
451 			return -EFAULT;
452 
453 		if (copy_to_user(&d->request_sizes[i],
454 				 &buf->total,
455 				 sizeof(buf->total)))
456 			return -EFAULT;
457 
458 		++d->granted_count;
459 	}
460 	return 0;
461 }
462 
463 
DRM(dma_get_buffers)464 int DRM(dma_get_buffers)(drm_device_t *dev, drm_dma_t *dma)
465 {
466 	int		  order;
467 	int		  retcode = 0;
468 	int		  tmp_order;
469 
470 	order = DRM(order)(dma->request_size);
471 
472 	dma->granted_count = 0;
473 	retcode		   = DRM(dma_get_buffers_of_order)(dev, dma, order);
474 
475 	if (dma->granted_count < dma->request_count
476 	    && (dma->flags & _DRM_DMA_SMALLER_OK)) {
477 		for (tmp_order = order - 1;
478 		     !retcode
479 			     && dma->granted_count < dma->request_count
480 			     && tmp_order >= DRM_MIN_ORDER;
481 		     --tmp_order) {
482 
483 			retcode = DRM(dma_get_buffers_of_order)(dev, dma,
484 								tmp_order);
485 		}
486 	}
487 
488 	if (dma->granted_count < dma->request_count
489 	    && (dma->flags & _DRM_DMA_LARGER_OK)) {
490 		for (tmp_order = order + 1;
491 		     !retcode
492 			     && dma->granted_count < dma->request_count
493 			     && tmp_order <= DRM_MAX_ORDER;
494 		     ++tmp_order) {
495 
496 			retcode = DRM(dma_get_buffers_of_order)(dev, dma,
497 								tmp_order);
498 		}
499 	}
500 	return 0;
501 }
502 
503 #endif /* __HAVE_OLD_DMA */
504 
505 
506 #if __HAVE_DMA_IRQ
507 
DRM(irq_install)508 int DRM(irq_install)( drm_device_t *dev, int irq )
509 {
510 	int ret;
511 
512 	if ( !irq )
513 		return -EINVAL;
514 
515 	down( &dev->struct_sem );
516 	if ( dev->irq ) {
517 		up( &dev->struct_sem );
518 		return -EBUSY;
519 	}
520 	dev->irq = irq;
521 	up( &dev->struct_sem );
522 
523 	DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
524 
525 	dev->context_flag = 0;
526 	dev->interrupt_flag = 0;
527 	dev->dma_flag = 0;
528 
529 	dev->dma->next_buffer = NULL;
530 	dev->dma->next_queue = NULL;
531 	dev->dma->this_buffer = NULL;
532 
533 #if __HAVE_DMA_IRQ_BH
534 	INIT_LIST_HEAD( &dev->tq.list );
535 	dev->tq.sync = 0;
536 	dev->tq.routine = DRM(dma_immediate_bh);
537 	dev->tq.data = dev;
538 #endif
539 
540 #if __HAVE_VBL_IRQ
541 	init_waitqueue_head(&dev->vbl_queue);
542 
543 	spin_lock_init( &dev->vbl_lock );
544 
545 	INIT_LIST_HEAD( &dev->vbl_sigs.head );
546 
547 	dev->vbl_pending = 0;
548 #endif
549 
550 				/* Before installing handler */
551 	DRM(driver_irq_preinstall)(dev);
552 
553 				/* Install handler */
554 	ret = request_irq( dev->irq, DRM(dma_service),
555 			   DRM_IRQ_TYPE, dev->devname, dev );
556 	if ( ret < 0 ) {
557 		down( &dev->struct_sem );
558 		dev->irq = 0;
559 		up( &dev->struct_sem );
560 		return ret;
561 	}
562 
563 				/* After installing handler */
564 	DRM(driver_irq_postinstall)(dev);
565 
566 	return 0;
567 }
568 
DRM(irq_uninstall)569 int DRM(irq_uninstall)( drm_device_t *dev )
570 {
571 	int irq;
572 
573 	down( &dev->struct_sem );
574 	irq = dev->irq;
575 	dev->irq = 0;
576 	up( &dev->struct_sem );
577 
578 	if ( !irq )
579 		return -EINVAL;
580 
581 	DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, irq );
582 
583 	DRM(driver_irq_uninstall)( dev );
584 
585 	free_irq( irq, dev );
586 
587 	return 0;
588 }
589 
DRM(control)590 int DRM(control)( struct inode *inode, struct file *filp,
591 		  unsigned int cmd, unsigned long arg )
592 {
593 	drm_file_t *priv = filp->private_data;
594 	drm_device_t *dev = priv->dev;
595 	drm_control_t ctl;
596 
597 	if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
598 		return -EFAULT;
599 
600 	switch ( ctl.func ) {
601 	case DRM_INST_HANDLER:
602 		return DRM(irq_install)( dev, ctl.irq );
603 	case DRM_UNINST_HANDLER:
604 		return DRM(irq_uninstall)( dev );
605 	default:
606 		return -EINVAL;
607 	}
608 }
609 
610 #if __HAVE_VBL_IRQ
611 
DRM(wait_vblank)612 int DRM(wait_vblank)(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data )
613 {
614 	drm_file_t *priv = filp->private_data;
615 	drm_device_t *dev = priv->dev;
616 	drm_wait_vblank_t vblwait;
617 	struct timeval now;
618 	int ret = 0;
619 	unsigned int flags;
620 
621 	if (!dev->irq)
622 		return -EINVAL;
623 
624 	DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
625 				  sizeof(vblwait) );
626 
627 	switch ( vblwait.request.type & ~_DRM_VBLANK_FLAGS_MASK ) {
628 	case _DRM_VBLANK_RELATIVE:
629 		vblwait.request.sequence += atomic_read( &dev->vbl_received );
630 		vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
631 	case _DRM_VBLANK_ABSOLUTE:
632 		break;
633 	default:
634 		return -EINVAL;
635 	}
636 
637 	flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
638 
639 	if ( flags & _DRM_VBLANK_SIGNAL ) {
640 		unsigned long irqflags;
641 		drm_vbl_sig_t *vbl_sig;
642 
643 		vblwait.reply.sequence = atomic_read( &dev->vbl_received );
644 
645 		spin_lock_irqsave( &dev->vbl_lock, irqflags );
646 
647 		/* Check if this task has already scheduled the same signal
648 		 * for the same vblank sequence number; nothing to be done in
649 		 * that case
650 		 */
651 		list_for_each_entry( vbl_sig, &dev->vbl_sigs.head, head ) {
652 			if (vbl_sig->sequence == vblwait.request.sequence
653 			    && vbl_sig->info.si_signo == vblwait.request.signal
654 			    && vbl_sig->task == current)
655 			{
656 				spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
657 				goto done;
658 			}
659 		}
660 
661 		if ( dev->vbl_pending >= 100 ) {
662 			spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
663 			return -EBUSY;
664 		}
665 
666 		dev->vbl_pending++;
667 
668 		spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
669 
670 		if ( !( vbl_sig = kmalloc(sizeof(drm_vbl_sig_t), GFP_KERNEL) ) )
671 			return -ENOMEM;
672 
673 
674 		memset( (void *)vbl_sig, 0, sizeof(*vbl_sig) );
675 
676 		vbl_sig->sequence = vblwait.request.sequence;
677 		vbl_sig->info.si_signo = vblwait.request.signal;
678 		vbl_sig->task = current;
679 
680 		spin_lock_irqsave( &dev->vbl_lock, irqflags );
681 
682 		list_add_tail( (struct list_head *) vbl_sig, &dev->vbl_sigs.head );
683 
684 		spin_unlock_irqrestore( &dev->vbl_lock, irqflags );
685 	} else {
686 		ret = DRM(vblank_wait)( dev, &vblwait.request.sequence );
687 
688 		do_gettimeofday( &now );
689 		vblwait.reply.tval_sec = now.tv_sec;
690 		vblwait.reply.tval_usec = now.tv_usec;
691 	}
692 
693 done:
694 	DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
695 				sizeof(vblwait) );
696 
697 	return ret;
698 }
699 
DRM(vbl_send_signals)700 void DRM(vbl_send_signals)( drm_device_t *dev )
701 {
702 	struct list_head *list, *tmp;
703 	drm_vbl_sig_t *vbl_sig;
704 	unsigned int vbl_seq = atomic_read( &dev->vbl_received );
705 	unsigned long flags;
706 
707 	spin_lock_irqsave( &dev->vbl_lock, flags );
708 
709 	list_for_each_safe( list, tmp, &dev->vbl_sigs.head ) {
710 		vbl_sig = list_entry( list, drm_vbl_sig_t, head );
711 		if ( ( vbl_seq - vbl_sig->sequence ) <= (1<<23) ) {
712 			vbl_sig->info.si_code = vbl_seq;
713 			send_sig_info( vbl_sig->info.si_signo, &vbl_sig->info, vbl_sig->task );
714 
715 			list_del( list );
716 
717 
718 			kfree( vbl_sig );
719 			dev->vbl_pending--;
720 		}
721 	}
722 
723 	spin_unlock_irqrestore( &dev->vbl_lock, flags );
724 }
725 
726 #endif	/* __HAVE_VBL_IRQ */
727 
728 #else
729 
DRM(control)730 int DRM(control)( struct inode *inode, struct file *filp,
731 		  unsigned int cmd, unsigned long arg )
732 {
733 	drm_control_t ctl;
734 
735 	if ( copy_from_user( &ctl, (drm_control_t *)arg, sizeof(ctl) ) )
736 		return -EFAULT;
737 
738 	switch ( ctl.func ) {
739 	case DRM_INST_HANDLER:
740 	case DRM_UNINST_HANDLER:
741 		return 0;
742 	default:
743 		return -EINVAL;
744 	}
745 }
746 
747 #endif /* __HAVE_DMA_IRQ */
748 
749 #endif /* __HAVE_DMA */
750