1 /* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
2  * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  * ChangeLog:
31  *  2001-11-16	Torsten Duwe <duwe@caldera.de>
32  *		added context constructor/destructor hooks,
33  *		needed by SiS driver's memory management.
34  */
35 
36 #include "drmP.h"
37 
38 #if __HAVE_CTX_BITMAP
39 
40 /* ================================================================
41  * Context bitmap support
42  */
43 
DRM(ctxbitmap_free)44 void DRM(ctxbitmap_free)( drm_device_t *dev, int ctx_handle )
45 {
46 	if ( ctx_handle < 0 ) goto failed;
47 	if ( !dev->ctx_bitmap ) goto failed;
48 
49 	if ( ctx_handle < DRM_MAX_CTXBITMAP ) {
50 		down(&dev->struct_sem);
51 		clear_bit( ctx_handle, dev->ctx_bitmap );
52 		dev->context_sareas[ctx_handle] = NULL;
53 		up(&dev->struct_sem);
54 		return;
55 	}
56 failed:
57        	DRM_ERROR( "Attempt to free invalid context handle: %d\n",
58 		   ctx_handle );
59        	return;
60 }
61 
DRM(ctxbitmap_next)62 int DRM(ctxbitmap_next)( drm_device_t *dev )
63 {
64 	int bit;
65 
66 	if(!dev->ctx_bitmap) return -1;
67 
68 	down(&dev->struct_sem);
69 	bit = find_first_zero_bit( dev->ctx_bitmap, DRM_MAX_CTXBITMAP );
70 	if ( bit < DRM_MAX_CTXBITMAP ) {
71 		set_bit( bit, dev->ctx_bitmap );
72 	   	DRM_DEBUG( "drm_ctxbitmap_next bit : %d\n", bit );
73 		if((bit+1) > dev->max_context) {
74 			dev->max_context = (bit+1);
75 			if(dev->context_sareas) {
76 				drm_map_t **ctx_sareas;
77 
78 				ctx_sareas = DRM(realloc)(dev->context_sareas,
79 						(dev->max_context - 1) *
80 						sizeof(*dev->context_sareas),
81 						dev->max_context *
82 						sizeof(*dev->context_sareas),
83 						DRM_MEM_MAPS);
84 				if(!ctx_sareas) {
85 					clear_bit(bit, dev->ctx_bitmap);
86 					up(&dev->struct_sem);
87 					return -1;
88 				}
89 				dev->context_sareas = ctx_sareas;
90 				dev->context_sareas[bit] = NULL;
91 			} else {
92 				/* max_context == 1 at this point */
93 				dev->context_sareas = DRM(alloc)(
94 						dev->max_context *
95 						sizeof(*dev->context_sareas),
96 						DRM_MEM_MAPS);
97 				if(!dev->context_sareas) {
98 					clear_bit(bit, dev->ctx_bitmap);
99 					up(&dev->struct_sem);
100 					return -1;
101 				}
102 				dev->context_sareas[bit] = NULL;
103 			}
104 		}
105 		up(&dev->struct_sem);
106 		return bit;
107 	}
108 	up(&dev->struct_sem);
109 	return -1;
110 }
111 
DRM(ctxbitmap_init)112 int DRM(ctxbitmap_init)( drm_device_t *dev )
113 {
114 	int i;
115    	int temp;
116 
117 	down(&dev->struct_sem);
118 	dev->ctx_bitmap = (unsigned long *) DRM(alloc)( PAGE_SIZE,
119 							DRM_MEM_CTXBITMAP );
120 	if ( dev->ctx_bitmap == NULL ) {
121 		up(&dev->struct_sem);
122 		return -ENOMEM;
123 	}
124 	memset( (void *)dev->ctx_bitmap, 0, PAGE_SIZE );
125 	dev->context_sareas = NULL;
126 	dev->max_context = -1;
127 	up(&dev->struct_sem);
128 
129 	for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
130 		temp = DRM(ctxbitmap_next)( dev );
131 	   	DRM_DEBUG( "drm_ctxbitmap_init : %d\n", temp );
132 	}
133 
134 	return 0;
135 }
136 
DRM(ctxbitmap_cleanup)137 void DRM(ctxbitmap_cleanup)( drm_device_t *dev )
138 {
139 	down(&dev->struct_sem);
140 	if( dev->context_sareas ) DRM(free)( dev->context_sareas,
141 					     sizeof(*dev->context_sareas) *
142 					     dev->max_context,
143 					     DRM_MEM_MAPS );
144 	DRM(free)( (void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP );
145 	up(&dev->struct_sem);
146 }
147 
148 /* ================================================================
149  * Per Context SAREA Support
150  */
151 
DRM(getsareactx)152 int DRM(getsareactx)(struct inode *inode, struct file *filp,
153 		     unsigned int cmd, unsigned long arg)
154 {
155 	drm_file_t	*priv	= filp->private_data;
156 	drm_device_t	*dev	= priv->dev;
157 	drm_ctx_priv_map_t request;
158 	drm_map_t *map;
159 
160 	if (copy_from_user(&request,
161 			   (drm_ctx_priv_map_t *)arg,
162 			   sizeof(request)))
163 		return -EFAULT;
164 
165 	down(&dev->struct_sem);
166 	if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
167 		up(&dev->struct_sem);
168 		return -EINVAL;
169 	}
170 
171 	map = dev->context_sareas[request.ctx_id];
172 	up(&dev->struct_sem);
173 
174 	request.handle = map->handle;
175 	if (copy_to_user((drm_ctx_priv_map_t *)arg, &request, sizeof(request)))
176 		return -EFAULT;
177 	return 0;
178 }
179 
DRM(setsareactx)180 int DRM(setsareactx)(struct inode *inode, struct file *filp,
181 		     unsigned int cmd, unsigned long arg)
182 {
183 	drm_file_t	*priv	= filp->private_data;
184 	drm_device_t	*dev	= priv->dev;
185 	drm_ctx_priv_map_t request;
186 	drm_map_t *map = NULL;
187 	drm_map_list_t *r_list = NULL;
188 	struct list_head *list;
189 
190 	if (copy_from_user(&request,
191 			   (drm_ctx_priv_map_t *)arg,
192 			   sizeof(request)))
193 		return -EFAULT;
194 
195 	down(&dev->struct_sem);
196 	list_for_each(list, &dev->maplist->head) {
197 		r_list = (drm_map_list_t *)list;
198 		if(r_list->map &&
199 		   r_list->map->handle == request.handle)
200 			goto found;
201 	}
202 bad:
203 	up(&dev->struct_sem);
204 	return -EINVAL;
205 
206 found:
207 	map = r_list->map;
208 	if (!map) goto bad;
209 	if (dev->max_context < 0)
210 		goto bad;
211 	if (request.ctx_id >= (unsigned) dev->max_context)
212 		goto bad;
213 	dev->context_sareas[request.ctx_id] = map;
214 	up(&dev->struct_sem);
215 	return 0;
216 }
217 
218 /* ================================================================
219  * The actual DRM context handling routines
220  */
221 
DRM(context_switch)222 int DRM(context_switch)( drm_device_t *dev, int old, int new )
223 {
224         char buf[64];
225 
226         if ( test_and_set_bit( 0, &dev->context_flag ) ) {
227                 DRM_ERROR( "Reentering -- FIXME\n" );
228                 return -EBUSY;
229         }
230 
231 #if __HAVE_DMA_HISTOGRAM
232         dev->ctx_start = get_cycles();
233 #endif
234 
235         DRM_DEBUG( "Context switch from %d to %d\n", old, new );
236 
237         if ( new == dev->last_context ) {
238                 clear_bit( 0, &dev->context_flag );
239                 return 0;
240         }
241 
242         if ( DRM(flags) & DRM_FLAG_NOCTX ) {
243                 DRM(context_switch_complete)( dev, new );
244         } else {
245                 sprintf( buf, "C %d %d\n", old, new );
246                 DRM(write_string)( dev, buf );
247         }
248 
249         return 0;
250 }
251 
DRM(context_switch_complete)252 int DRM(context_switch_complete)( drm_device_t *dev, int new )
253 {
254         dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
255         dev->last_switch  = jiffies;
256 
257         if ( !_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ) {
258                 DRM_ERROR( "Lock isn't held after context switch\n" );
259         }
260 
261 				/* If a context switch is ever initiated
262                                    when the kernel holds the lock, release
263                                    that lock here. */
264 #if __HAVE_DMA_HISTOGRAM
265         atomic_inc( &dev->histo.ctx[DRM(histogram_slot)(get_cycles()
266 							- dev->ctx_start)] );
267 
268 #endif
269         clear_bit( 0, &dev->context_flag );
270         wake_up( &dev->context_wait );
271 
272         return 0;
273 }
274 
DRM(resctx)275 int DRM(resctx)( struct inode *inode, struct file *filp,
276 		 unsigned int cmd, unsigned long arg )
277 {
278 	drm_ctx_res_t res;
279 	drm_ctx_t ctx;
280 	int i;
281 
282 	if ( copy_from_user( &res, (drm_ctx_res_t *)arg, sizeof(res) ) )
283 		return -EFAULT;
284 
285 	if ( res.count >= DRM_RESERVED_CONTEXTS ) {
286 		memset( &ctx, 0, sizeof(ctx) );
287 		for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
288 			ctx.handle = i;
289 			if ( copy_to_user( &res.contexts[i],
290 					   &i, sizeof(i) ) )
291 				return -EFAULT;
292 		}
293 	}
294 	res.count = DRM_RESERVED_CONTEXTS;
295 
296 	if ( copy_to_user( (drm_ctx_res_t *)arg, &res, sizeof(res) ) )
297 		return -EFAULT;
298 	return 0;
299 }
300 
DRM(addctx)301 int DRM(addctx)( struct inode *inode, struct file *filp,
302 		 unsigned int cmd, unsigned long arg )
303 {
304 	drm_file_t *priv = filp->private_data;
305 	drm_device_t *dev = priv->dev;
306 	drm_ctx_t ctx;
307 
308 	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
309 		return -EFAULT;
310 
311 	ctx.handle = DRM(ctxbitmap_next)( dev );
312 	if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
313 				/* Skip kernel's context and get a new one. */
314 		ctx.handle = DRM(ctxbitmap_next)( dev );
315 	}
316 	DRM_DEBUG( "%d\n", ctx.handle );
317 	if ( ctx.handle == -1 ) {
318 		DRM_DEBUG( "Not enough free contexts.\n" );
319 				/* Should this return -EBUSY instead? */
320 		return -ENOMEM;
321 	}
322 #ifdef DRIVER_CTX_CTOR
323 	if ( ctx.handle != DRM_KERNEL_CONTEXT )
324 		DRIVER_CTX_CTOR(ctx.handle); /* XXX: also pass dev ? */
325 #endif
326 
327 	if ( copy_to_user( (drm_ctx_t *)arg, &ctx, sizeof(ctx) ) )
328 		return -EFAULT;
329 	return 0;
330 }
331 
DRM(modctx)332 int DRM(modctx)( struct inode *inode, struct file *filp,
333 		 unsigned int cmd, unsigned long arg )
334 {
335 	/* This does nothing */
336 	return 0;
337 }
338 
DRM(getctx)339 int DRM(getctx)( struct inode *inode, struct file *filp,
340 		 unsigned int cmd, unsigned long arg )
341 {
342 	drm_ctx_t ctx;
343 
344 	if ( copy_from_user( &ctx, (drm_ctx_t*)arg, sizeof(ctx) ) )
345 		return -EFAULT;
346 
347 	/* This is 0, because we don't handle any context flags */
348 	ctx.flags = 0;
349 
350 	if ( copy_to_user( (drm_ctx_t*)arg, &ctx, sizeof(ctx) ) )
351 		return -EFAULT;
352 	return 0;
353 }
354 
DRM(switchctx)355 int DRM(switchctx)( struct inode *inode, struct file *filp,
356 		    unsigned int cmd, unsigned long arg )
357 {
358 	drm_file_t *priv = filp->private_data;
359 	drm_device_t *dev = priv->dev;
360 	drm_ctx_t ctx;
361 
362 	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
363 		return -EFAULT;
364 
365 	DRM_DEBUG( "%d\n", ctx.handle );
366 	return DRM(context_switch)( dev, dev->last_context, ctx.handle );
367 }
368 
DRM(newctx)369 int DRM(newctx)( struct inode *inode, struct file *filp,
370 		 unsigned int cmd, unsigned long arg )
371 {
372 	drm_file_t *priv = filp->private_data;
373 	drm_device_t *dev = priv->dev;
374 	drm_ctx_t ctx;
375 
376 	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
377 		return -EFAULT;
378 
379 	DRM_DEBUG( "%d\n", ctx.handle );
380 	DRM(context_switch_complete)( dev, ctx.handle );
381 
382 	return 0;
383 }
384 
DRM(rmctx)385 int DRM(rmctx)( struct inode *inode, struct file *filp,
386 		unsigned int cmd, unsigned long arg )
387 {
388 	drm_file_t *priv = filp->private_data;
389 	drm_device_t *dev = priv->dev;
390 	drm_ctx_t ctx;
391 
392 	if ( copy_from_user( &ctx, (drm_ctx_t *)arg, sizeof(ctx) ) )
393 		return -EFAULT;
394 
395 	DRM_DEBUG( "%d\n", ctx.handle );
396 	if ( ctx.handle == DRM_KERNEL_CONTEXT + 1 ) {
397 		priv->remove_auth_on_close = 1;
398 	}
399 	if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
400 #ifdef DRIVER_CTX_DTOR
401 		DRIVER_CTX_DTOR(ctx.handle); /* XXX: also pass dev ? */
402 #endif
403 		DRM(ctxbitmap_free)( dev, ctx.handle );
404 	}
405 
406 	return 0;
407 }
408 
409 
410 #else /* __HAVE_CTX_BITMAP */
411 
412 /* ================================================================
413  * Old-style context support
414  */
415 
416 
DRM(context_switch)417 int DRM(context_switch)(drm_device_t *dev, int old, int new)
418 {
419 	char	    buf[64];
420 	drm_queue_t *q;
421 
422 #if 0
423 	atomic_inc(&dev->total_ctx);
424 #endif
425 
426 	if (test_and_set_bit(0, &dev->context_flag)) {
427 		DRM_ERROR("Reentering -- FIXME\n");
428 		return -EBUSY;
429 	}
430 
431 #if __HAVE_DMA_HISTOGRAM
432 	dev->ctx_start = get_cycles();
433 #endif
434 
435 	DRM_DEBUG("Context switch from %d to %d\n", old, new);
436 
437 	if (new >= dev->queue_count) {
438 		clear_bit(0, &dev->context_flag);
439 		return -EINVAL;
440 	}
441 
442 	if (new == dev->last_context) {
443 		clear_bit(0, &dev->context_flag);
444 		return 0;
445 	}
446 
447 	q = dev->queuelist[new];
448 	atomic_inc(&q->use_count);
449 	if (atomic_read(&q->use_count) == 1) {
450 		atomic_dec(&q->use_count);
451 		clear_bit(0, &dev->context_flag);
452 		return -EINVAL;
453 	}
454 
455 	if (DRM(flags) & DRM_FLAG_NOCTX) {
456 		DRM(context_switch_complete)(dev, new);
457 	} else {
458 		sprintf(buf, "C %d %d\n", old, new);
459 		DRM(write_string)(dev, buf);
460 	}
461 
462 	atomic_dec(&q->use_count);
463 
464 	return 0;
465 }
466 
DRM(context_switch_complete)467 int DRM(context_switch_complete)(drm_device_t *dev, int new)
468 {
469 	drm_device_dma_t *dma = dev->dma;
470 
471 	dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
472 	dev->last_switch  = jiffies;
473 
474 	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
475 		DRM_ERROR("Lock isn't held after context switch\n");
476 	}
477 
478 	if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
479 		if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
480 				  DRM_KERNEL_CONTEXT)) {
481 			DRM_ERROR("Cannot free lock\n");
482 		}
483 	}
484 
485 #if __HAVE_DMA_HISTOGRAM
486 	atomic_inc(&dev->histo.ctx[DRM(histogram_slot)(get_cycles()
487 						      - dev->ctx_start)]);
488 
489 #endif
490 	clear_bit(0, &dev->context_flag);
491 	wake_up_interruptible(&dev->context_wait);
492 
493 	return 0;
494 }
495 
DRM(init_queue)496 static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
497 {
498 	DRM_DEBUG("\n");
499 
500 	if (atomic_read(&q->use_count) != 1
501 	    || atomic_read(&q->finalization)
502 	    || atomic_read(&q->block_count)) {
503 		DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
504 			  atomic_read(&q->use_count),
505 			  atomic_read(&q->finalization),
506 			  atomic_read(&q->block_count));
507 	}
508 
509 	atomic_set(&q->finalization,  0);
510 	atomic_set(&q->block_count,   0);
511 	atomic_set(&q->block_read,    0);
512 	atomic_set(&q->block_write,   0);
513 	atomic_set(&q->total_queued,  0);
514 	atomic_set(&q->total_flushed, 0);
515 	atomic_set(&q->total_locks,   0);
516 
517 	init_waitqueue_head(&q->write_queue);
518 	init_waitqueue_head(&q->read_queue);
519 	init_waitqueue_head(&q->flush_queue);
520 
521 	q->flags = ctx->flags;
522 
523 	DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
524 
525 	return 0;
526 }
527 
528 
529 /* drm_alloc_queue:
530 PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
531 	disappear (so all deallocation must be done after IOCTLs are off)
532      2) dev->queue_count < dev->queue_slots
533      3) dev->queuelist[i].use_count == 0 and
534 	dev->queuelist[i].finalization == 0 if i not in use
535 POST: 1) dev->queuelist[i].use_count == 1
536       2) dev->queue_count < dev->queue_slots */
537 
DRM(alloc_queue)538 static int DRM(alloc_queue)(drm_device_t *dev)
539 {
540 	int	    i;
541 	drm_queue_t *queue;
542 	int	    oldslots;
543 	int	    newslots;
544 				/* Check for a free queue */
545 	for (i = 0; i < dev->queue_count; i++) {
546 		atomic_inc(&dev->queuelist[i]->use_count);
547 		if (atomic_read(&dev->queuelist[i]->use_count) == 1
548 		    && !atomic_read(&dev->queuelist[i]->finalization)) {
549 			DRM_DEBUG("%d (free)\n", i);
550 			return i;
551 		}
552 		atomic_dec(&dev->queuelist[i]->use_count);
553 	}
554 				/* Allocate a new queue */
555 	down(&dev->struct_sem);
556 
557 	queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
558 	memset(queue, 0, sizeof(*queue));
559 	atomic_set(&queue->use_count, 1);
560 
561 	++dev->queue_count;
562 	if (dev->queue_count >= dev->queue_slots) {
563 		oldslots = dev->queue_slots * sizeof(*dev->queuelist);
564 		if (!dev->queue_slots) dev->queue_slots = 1;
565 		dev->queue_slots *= 2;
566 		newslots = dev->queue_slots * sizeof(*dev->queuelist);
567 
568 		dev->queuelist = DRM(realloc)(dev->queuelist,
569 					      oldslots,
570 					      newslots,
571 					      DRM_MEM_QUEUES);
572 		if (!dev->queuelist) {
573 			up(&dev->struct_sem);
574 			DRM_DEBUG("out of memory\n");
575 			return -ENOMEM;
576 		}
577 	}
578 	dev->queuelist[dev->queue_count-1] = queue;
579 
580 	up(&dev->struct_sem);
581 	DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
582 	return dev->queue_count - 1;
583 }
584 
DRM(resctx)585 int DRM(resctx)(struct inode *inode, struct file *filp,
586 		unsigned int cmd, unsigned long arg)
587 {
588 	drm_ctx_res_t	res;
589 	drm_ctx_t	ctx;
590 	int		i;
591 
592 	DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
593 	if (copy_from_user(&res, (drm_ctx_res_t *)arg, sizeof(res)))
594 		return -EFAULT;
595 	if (res.count >= DRM_RESERVED_CONTEXTS) {
596 		memset(&ctx, 0, sizeof(ctx));
597 		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
598 			ctx.handle = i;
599 			if (copy_to_user(&res.contexts[i],
600 					 &i,
601 					 sizeof(i)))
602 				return -EFAULT;
603 		}
604 	}
605 	res.count = DRM_RESERVED_CONTEXTS;
606 	if (copy_to_user((drm_ctx_res_t *)arg, &res, sizeof(res)))
607 		return -EFAULT;
608 	return 0;
609 }
610 
DRM(addctx)611 int DRM(addctx)(struct inode *inode, struct file *filp,
612 		unsigned int cmd, unsigned long arg)
613 {
614 	drm_file_t	*priv	= filp->private_data;
615 	drm_device_t	*dev	= priv->dev;
616 	drm_ctx_t	ctx;
617 
618 	if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
619 		return -EFAULT;
620 	if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
621 				/* Init kernel's context and get a new one. */
622 		DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
623 		ctx.handle = DRM(alloc_queue)(dev);
624 	}
625 	DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
626 	DRM_DEBUG("%d\n", ctx.handle);
627 	if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
628 		return -EFAULT;
629 	return 0;
630 }
631 
DRM(modctx)632 int DRM(modctx)(struct inode *inode, struct file *filp,
633 		unsigned int cmd, unsigned long arg)
634 {
635 	drm_file_t	*priv	= filp->private_data;
636 	drm_device_t	*dev	= priv->dev;
637 	drm_ctx_t	ctx;
638 	drm_queue_t	*q;
639 
640 	if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
641 		return -EFAULT;
642 
643 	DRM_DEBUG("%d\n", ctx.handle);
644 
645 	if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
646 	q = dev->queuelist[ctx.handle];
647 
648 	atomic_inc(&q->use_count);
649 	if (atomic_read(&q->use_count) == 1) {
650 				/* No longer in use */
651 		atomic_dec(&q->use_count);
652 		return -EINVAL;
653 	}
654 
655 	if (DRM_BUFCOUNT(&q->waitlist)) {
656 		atomic_dec(&q->use_count);
657 		return -EBUSY;
658 	}
659 
660 	q->flags = ctx.flags;
661 
662 	atomic_dec(&q->use_count);
663 	return 0;
664 }
665 
DRM(getctx)666 int DRM(getctx)(struct inode *inode, struct file *filp,
667 		unsigned int cmd, unsigned long arg)
668 {
669 	drm_file_t	*priv	= filp->private_data;
670 	drm_device_t	*dev	= priv->dev;
671 	drm_ctx_t	ctx;
672 	drm_queue_t	*q;
673 
674 	if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
675 		return -EFAULT;
676 
677 	DRM_DEBUG("%d\n", ctx.handle);
678 
679 	if (ctx.handle >= dev->queue_count) return -EINVAL;
680 	q = dev->queuelist[ctx.handle];
681 
682 	atomic_inc(&q->use_count);
683 	if (atomic_read(&q->use_count) == 1) {
684 				/* No longer in use */
685 		atomic_dec(&q->use_count);
686 		return -EINVAL;
687 	}
688 
689 	ctx.flags = q->flags;
690 	atomic_dec(&q->use_count);
691 
692 	if (copy_to_user((drm_ctx_t *)arg, &ctx, sizeof(ctx)))
693 		return -EFAULT;
694 
695 	return 0;
696 }
697 
DRM(switchctx)698 int DRM(switchctx)(struct inode *inode, struct file *filp,
699 		   unsigned int cmd, unsigned long arg)
700 {
701 	drm_file_t	*priv	= filp->private_data;
702 	drm_device_t	*dev	= priv->dev;
703 	drm_ctx_t	ctx;
704 
705 	if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
706 		return -EFAULT;
707 	DRM_DEBUG("%d\n", ctx.handle);
708 	return DRM(context_switch)(dev, dev->last_context, ctx.handle);
709 }
710 
DRM(newctx)711 int DRM(newctx)(struct inode *inode, struct file *filp,
712 		unsigned int cmd, unsigned long arg)
713 {
714 	drm_file_t	*priv	= filp->private_data;
715 	drm_device_t	*dev	= priv->dev;
716 	drm_ctx_t	ctx;
717 
718 	if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
719 		return -EFAULT;
720 	DRM_DEBUG("%d\n", ctx.handle);
721 	DRM(context_switch_complete)(dev, ctx.handle);
722 
723 	return 0;
724 }
725 
DRM(rmctx)726 int DRM(rmctx)(struct inode *inode, struct file *filp,
727 	       unsigned int cmd, unsigned long arg)
728 {
729 	drm_file_t	*priv	= filp->private_data;
730 	drm_device_t	*dev	= priv->dev;
731 	drm_ctx_t	ctx;
732 	drm_queue_t	*q;
733 	drm_buf_t	*buf;
734 
735 	if (copy_from_user(&ctx, (drm_ctx_t *)arg, sizeof(ctx)))
736 		return -EFAULT;
737 	DRM_DEBUG("%d\n", ctx.handle);
738 
739 	if (ctx.handle >= dev->queue_count) return -EINVAL;
740 	q = dev->queuelist[ctx.handle];
741 
742 	atomic_inc(&q->use_count);
743 	if (atomic_read(&q->use_count) == 1) {
744 				/* No longer in use */
745 		atomic_dec(&q->use_count);
746 		return -EINVAL;
747 	}
748 
749 	atomic_inc(&q->finalization); /* Mark queue in finalization state */
750 	atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
751 					 finalization) */
752 
753 	while (test_and_set_bit(0, &dev->interrupt_flag)) {
754 		schedule();
755 		if (signal_pending(current)) {
756 			clear_bit(0, &dev->interrupt_flag);
757 			return -EINTR;
758 		}
759 	}
760 				/* Remove queued buffers */
761 	while ((buf = DRM(waitlist_get)(&q->waitlist))) {
762 		DRM(free_buffer)(dev, buf);
763 	}
764 	clear_bit(0, &dev->interrupt_flag);
765 
766 				/* Wakeup blocked processes */
767 	wake_up_interruptible(&q->read_queue);
768 	wake_up_interruptible(&q->write_queue);
769 	wake_up_interruptible(&q->flush_queue);
770 
771 				/* Finalization over.  Queue is made
772 				   available when both use_count and
773 				   finalization become 0, which won't
774 				   happen until all the waiting processes
775 				   stop waiting. */
776 	atomic_dec(&q->finalization);
777 	return 0;
778 }
779 
780 #endif /* __HAVE_CTX_BITMAP */
781