1 /* i810_drv.c -- I810 driver -*- linux-c -*-
2  * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
28  *	    Jeff Hartmann <jhartmann@valinux.com>
29  *
30  */
31 
32 #include <linux/config.h>
33 #include "drmP.h"
34 #include "i810_drv.h"
35 
36 #define I810_NAME	 "i810"
37 #define I810_DESC	 "Intel I810"
38 #define I810_DATE	 "20000928"
39 #define I810_MAJOR	 1
40 #define I810_MINOR	 1
41 #define I810_PATCHLEVEL	 0
42 
43 static drm_device_t	      i810_device;
44 drm_ctx_t		      i810_res_ctx;
45 
46 static struct file_operations i810_fops = {
47 #if LINUX_VERSION_CODE >= 0x020400
48 				/* This started being used during 2.4.0-test */
49 	owner:   THIS_MODULE,
50 #endif
51 	open:	 i810_open,
52 	flush:	 drm_flush,
53 	release: i810_release,
54 	ioctl:	 i810_ioctl,
55 	mmap:	 drm_mmap,
56 	read:	 drm_read,
57 	fasync:	 drm_fasync,
58       	poll:	 drm_poll,
59 };
60 
61 static struct miscdevice      i810_misc = {
62 	minor: MISC_DYNAMIC_MINOR,
63 	name:  I810_NAME,
64 	fops:  &i810_fops,
65 };
66 
67 static drm_ioctl_desc_t	      i810_ioctls[] = {
68 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]     = { i810_version,	  0, 0 },
69 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]  = { drm_getunique,  0, 0 },
70 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]   = { drm_getmagic,	  0, 0 },
71 	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]   = { drm_irq_busid,  0, 1 },
72 
73 	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]  = { drm_setunique,  1, 1 },
74 	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	      = { drm_block,	  1, 1 },
75 	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]     = { drm_unblock,	  1, 1 },
76 	[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]     = { i810_control,	  1, 1 },
77 	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]  = { drm_authmagic,  1, 1 },
78 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]     = { drm_addmap,	  1, 1 },
79 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]    = { i810_addbufs,	  1, 1 },
80 	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]   = { i810_markbufs,  1, 1 },
81 	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]   = { i810_infobufs,  1, 0 },
82 	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]   = { i810_freebufs,  1, 0 },
83 
84 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]     = { i810_addctx,	  1, 1 },
85 	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]      = { i810_rmctx,	  1, 1 },
86 	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]     = { i810_modctx,	  1, 1 },
87 	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]     = { i810_getctx,	  1, 0 },
88 	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]  = { i810_switchctx,  1, 1 },
89 	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]     = { i810_newctx,	  1, 1 },
90 	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]     = { i810_resctx,	  1, 0 },
91 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]    = { drm_adddraw,	  1, 1 },
92 	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]     = { drm_rmdraw,	  1, 1 },
93 
94 	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	      = { i810_lock,	  1, 0 },
95 	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]      = { i810_unlock,	  1, 0 },
96 	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]      = { drm_finish,	  1, 0 },
97 
98 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
99 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
100 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]  = { drm_agp_enable,  1, 1 },
101 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]    = { drm_agp_info,    1, 0 },
102 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]   = { drm_agp_alloc,   1, 1 },
103 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]    = { drm_agp_free,    1, 1 },
104 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]    = { drm_agp_bind,    1, 1 },
105 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]  = { drm_agp_unbind,  1, 1 },
106 
107    	[DRM_IOCTL_NR(DRM_IOCTL_I810_INIT)]   = { i810_dma_init,   1, 1 },
108    	[DRM_IOCTL_NR(DRM_IOCTL_I810_VERTEX)] = { i810_dma_vertex, 1, 0 },
109    	[DRM_IOCTL_NR(DRM_IOCTL_I810_CLEAR)]  = { i810_clear_bufs, 1, 0 },
110       	[DRM_IOCTL_NR(DRM_IOCTL_I810_FLUSH)]  = { i810_flush_ioctl,1, 0 },
111    	[DRM_IOCTL_NR(DRM_IOCTL_I810_GETAGE)] = { i810_getage,     1, 0 },
112 	[DRM_IOCTL_NR(DRM_IOCTL_I810_GETBUF)] = { i810_getbuf,     1, 0 },
113    	[DRM_IOCTL_NR(DRM_IOCTL_I810_SWAP)]   = { i810_swap_bufs,  1, 0 },
114    	[DRM_IOCTL_NR(DRM_IOCTL_I810_COPY)]   = { i810_copybuf,    1, 0 },
115    	[DRM_IOCTL_NR(DRM_IOCTL_I810_DOCOPY)] = { i810_docopy,     1, 0 },
116 };
117 
118 #define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls)
119 
120 #ifdef MODULE
121 static char		      *i810 = NULL;
122 #endif
123 
124 MODULE_AUTHOR("VA Linux Systems, Inc.");
125 MODULE_DESCRIPTION("Intel I810");
126 MODULE_LICENSE("GPL and additional rights");
127 MODULE_PARM(i810, "s");
128 
129 #ifndef MODULE
130 /* i810_options is called by the kernel to parse command-line options
131  * passed via the boot-loader (e.g., LILO).  It calls the insmod option
132  * routine, drm_parse_drm.
133  */
134 
i810_options(char * str)135 static int __init i810_options(char *str)
136 {
137 	drm_parse_options(str);
138 	return 1;
139 }
140 
141 __setup("i810=", i810_options);
142 #endif
143 
i810_setup(drm_device_t * dev)144 static int i810_setup(drm_device_t *dev)
145 {
146 	int i;
147 
148 	atomic_set(&dev->ioctl_count, 0);
149 	atomic_set(&dev->vma_count, 0);
150 	dev->buf_use	  = 0;
151 	atomic_set(&dev->buf_alloc, 0);
152 
153 	drm_dma_setup(dev);
154 
155 	atomic_set(&dev->total_open, 0);
156 	atomic_set(&dev->total_close, 0);
157 	atomic_set(&dev->total_ioctl, 0);
158 	atomic_set(&dev->total_irq, 0);
159 	atomic_set(&dev->total_ctx, 0);
160 	atomic_set(&dev->total_locks, 0);
161 	atomic_set(&dev->total_unlocks, 0);
162 	atomic_set(&dev->total_contends, 0);
163 	atomic_set(&dev->total_sleeps, 0);
164 
165 	for (i = 0; i < DRM_HASH_SIZE; i++) {
166 		dev->magiclist[i].head = NULL;
167 		dev->magiclist[i].tail = NULL;
168 	}
169 	dev->maplist	    = NULL;
170 	dev->map_count	    = 0;
171 	dev->vmalist	    = NULL;
172 	dev->lock.hw_lock   = NULL;
173 	init_waitqueue_head(&dev->lock.lock_queue);
174 	dev->queue_count    = 0;
175 	dev->queue_reserved = 0;
176 	dev->queue_slots    = 0;
177 	dev->queuelist	    = NULL;
178 	dev->irq	    = 0;
179 	dev->context_flag   = 0;
180 	dev->interrupt_flag = 0;
181 	dev->dma_flag	    = 0;
182 	dev->last_context   = 0;
183 	dev->last_switch    = 0;
184 	dev->last_checked   = 0;
185 	init_timer(&dev->timer);
186 	init_waitqueue_head(&dev->context_wait);
187 #if DRM_DMA_HISTO
188 	memset(&dev->histo, 0, sizeof(dev->histo));
189 #endif
190 	dev->ctx_start	    = 0;
191 	dev->lck_start	    = 0;
192 
193 	dev->buf_rp	  = dev->buf;
194 	dev->buf_wp	  = dev->buf;
195 	dev->buf_end	  = dev->buf + DRM_BSZ;
196 	dev->buf_async	  = NULL;
197 	init_waitqueue_head(&dev->buf_readers);
198 	init_waitqueue_head(&dev->buf_writers);
199 
200 	DRM_DEBUG("\n");
201 
202 	/* The kernel's context could be created here, but is now created
203 	   in drm_dma_enqueue.	This is more resource-efficient for
204 	   hardware that does not do DMA, but may mean that
205 	   drm_select_queue fails between the time the interrupt is
206 	   initialized and the time the queues are initialized. */
207 
208 	return 0;
209 }
210 
211 
i810_takedown(drm_device_t * dev)212 static int i810_takedown(drm_device_t *dev)
213 {
214 	int		  i;
215 	drm_magic_entry_t *pt, *next;
216 	drm_map_t	  *map;
217 	drm_vma_entry_t	  *vma, *vma_next;
218 
219 	DRM_DEBUG("\n");
220 
221 	if (dev->irq) i810_irq_uninstall(dev);
222 
223 	down(&dev->struct_sem);
224 	del_timer(&dev->timer);
225 
226 	if (dev->devname) {
227 		drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
228 		dev->devname = NULL;
229 	}
230 
231 	if (dev->unique) {
232 		drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
233 		dev->unique = NULL;
234 		dev->unique_len = 0;
235 	}
236 				/* Clear pid list */
237 	for (i = 0; i < DRM_HASH_SIZE; i++) {
238 		for (pt = dev->magiclist[i].head; pt; pt = next) {
239 			next = pt->next;
240 			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
241 		}
242 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
243 	}
244    				/* Clear AGP information */
245 	if (dev->agp) {
246 		drm_agp_mem_t *entry;
247 		drm_agp_mem_t *nexte;
248 
249 				/* Remove AGP resources, but leave dev->agp
250                                    intact until r128_cleanup is called. */
251 		for (entry = dev->agp->memory; entry; entry = nexte) {
252 			nexte = entry->next;
253 			if (entry->bound) drm_unbind_agp(entry->memory);
254 			drm_free_agp(entry->memory, entry->pages);
255 			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
256 		}
257 		dev->agp->memory = NULL;
258 
259 		if (dev->agp->acquired) _drm_agp_release();
260 
261 		dev->agp->acquired = 0;
262 		dev->agp->enabled  = 0;
263 	}
264 				/* Clear vma list (only built for debugging) */
265 	if (dev->vmalist) {
266 		for (vma = dev->vmalist; vma; vma = vma_next) {
267 			vma_next = vma->next;
268 			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
269 		}
270 		dev->vmalist = NULL;
271 	}
272 
273 				/* Clear map area and mtrr information */
274 	if (dev->maplist) {
275 		for (i = 0; i < dev->map_count; i++) {
276 			map = dev->maplist[i];
277 			switch (map->type) {
278 			case _DRM_REGISTERS:
279 			case _DRM_FRAME_BUFFER:
280 #ifdef CONFIG_MTRR
281 				if (map->mtrr >= 0) {
282 					int retcode;
283 					retcode = mtrr_del(map->mtrr,
284 							   map->offset,
285 							   map->size);
286 					DRM_DEBUG("mtrr_del = %d\n", retcode);
287 				}
288 #endif
289 				drm_ioremapfree(map->handle, map->size, dev);
290 				break;
291 			case _DRM_SHM:
292 				drm_free_pages((unsigned long)map->handle,
293 					       drm_order(map->size)
294 					       - PAGE_SHIFT,
295 					       DRM_MEM_SAREA);
296 				break;
297 			case _DRM_AGP:
298 				break;
299 			}
300 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
301 		}
302 		drm_free(dev->maplist,
303 			 dev->map_count * sizeof(*dev->maplist),
304 			 DRM_MEM_MAPS);
305 		dev->maplist   = NULL;
306 		dev->map_count = 0;
307 	}
308 
309 	if (dev->queuelist) {
310 		for (i = 0; i < dev->queue_count; i++) {
311 			drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
312 			if (dev->queuelist[i]) {
313 				drm_free(dev->queuelist[i],
314 					 sizeof(*dev->queuelist[0]),
315 					 DRM_MEM_QUEUES);
316 				dev->queuelist[i] = NULL;
317 			}
318 		}
319 		drm_free(dev->queuelist,
320 			 dev->queue_slots * sizeof(*dev->queuelist),
321 			 DRM_MEM_QUEUES);
322 		dev->queuelist	 = NULL;
323 	}
324 
325 	drm_dma_takedown(dev);
326 
327 	dev->queue_count     = 0;
328 	if (dev->lock.hw_lock) {
329 		dev->lock.hw_lock    = NULL; /* SHM removed */
330 		dev->lock.pid	     = 0;
331 		wake_up_interruptible(&dev->lock.lock_queue);
332 	}
333 	up(&dev->struct_sem);
334 
335 	return 0;
336 }
337 
338 /* i810_init is called via init_module at module load time, or via
339  * linux/init/main.c (this is not currently supported). */
340 
i810_init(void)341 static int __init i810_init(void)
342 {
343 	int		      retcode;
344 	drm_device_t	      *dev = &i810_device;
345 
346 	DRM_DEBUG("\n");
347 
348 	memset((void *)dev, 0, sizeof(*dev));
349 	dev->count_lock	  = SPIN_LOCK_UNLOCKED;
350 	sema_init(&dev->struct_sem, 1);
351 
352 #ifdef MODULE
353 	drm_parse_options(i810);
354 #endif
355 	DRM_DEBUG("doing misc_register\n");
356 	if ((retcode = misc_register(&i810_misc))) {
357 		DRM_ERROR("Cannot register \"%s\"\n", I810_NAME);
358 		return retcode;
359 	}
360 	dev->device = MKDEV(MISC_MAJOR, i810_misc.minor);
361 	dev->name   = I810_NAME;
362 
363    	DRM_DEBUG("doing mem init\n");
364 	drm_mem_init();
365 	DRM_DEBUG("doing proc init\n");
366 	drm_proc_init(dev);
367 	DRM_DEBUG("doing agp init\n");
368 	dev->agp    = drm_agp_init();
369    	if(dev->agp == NULL) {
370 	   	DRM_INFO("The i810 drm module requires the agpgart module"
371 			 " to function correctly\nPlease load the agpgart"
372 			 " module before you load the i810 module\n");
373 	   	drm_proc_cleanup();
374 	   	misc_deregister(&i810_misc);
375 	   	i810_takedown(dev);
376 	   	return -ENOMEM;
377 	}
378 	DRM_DEBUG("doing ctxbitmap init\n");
379 	if((retcode = drm_ctxbitmap_init(dev))) {
380 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
381 		drm_proc_cleanup();
382 		misc_deregister(&i810_misc);
383 		i810_takedown(dev);
384 		return retcode;
385 	}
386 
387 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
388 		 I810_NAME,
389 		 I810_MAJOR,
390 		 I810_MINOR,
391 		 I810_PATCHLEVEL,
392 		 I810_DATE,
393 		 i810_misc.minor);
394 
395 	return 0;
396 }
397 
398 /* i810_cleanup is called via cleanup_module at module unload time. */
399 
i810_cleanup(void)400 static void __exit i810_cleanup(void)
401 {
402 	drm_device_t	      *dev = &i810_device;
403 
404 	DRM_DEBUG("\n");
405 
406 	drm_proc_cleanup();
407 	if (misc_deregister(&i810_misc)) {
408 		DRM_ERROR("Cannot unload module\n");
409 	} else {
410 		DRM_INFO("Module unloaded\n");
411 	}
412 	drm_ctxbitmap_cleanup(dev);
413 	i810_takedown(dev);
414 	if (dev->agp) {
415 		drm_agp_uninit();
416 		drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
417 		dev->agp = NULL;
418 	}
419 }
420 
421 module_init(i810_init);
422 module_exit(i810_cleanup);
423 
424 
i810_version(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)425 int i810_version(struct inode *inode, struct file *filp, unsigned int cmd,
426 		  unsigned long arg)
427 {
428 	drm_version_t version;
429 	int	      len;
430 
431 	if (copy_from_user(&version,
432 			   (drm_version_t *)arg,
433 			   sizeof(version)))
434 		return -EFAULT;
435 
436 #define DRM_COPY(name,value)				     \
437 	len = strlen(value);				     \
438 	if (len > name##_len) len = name##_len;		     \
439 	name##_len = strlen(value);			     \
440 	if (len && name) {				     \
441 		if (copy_to_user(name, value, len))          \
442 			return -EFAULT;			     \
443 	}
444 
445 	version.version_major	   = I810_MAJOR;
446 	version.version_minor	   = I810_MINOR;
447 	version.version_patchlevel = I810_PATCHLEVEL;
448 
449 	DRM_COPY(version.name, I810_NAME);
450 	DRM_COPY(version.date, I810_DATE);
451 	DRM_COPY(version.desc, I810_DESC);
452 
453 	if (copy_to_user((drm_version_t *)arg,
454 			 &version,
455 			 sizeof(version)))
456 		return -EFAULT;
457 	return 0;
458 }
459 
i810_open(struct inode * inode,struct file * filp)460 int i810_open(struct inode *inode, struct file *filp)
461 {
462 	drm_device_t  *dev    = &i810_device;
463 	int	      retcode = 0;
464 
465 	DRM_DEBUG("open_count = %d\n", dev->open_count);
466 	if (!(retcode = drm_open_helper(inode, filp, dev))) {
467 #if LINUX_VERSION_CODE < 0x020333
468 		MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
469 #endif
470 		atomic_inc(&dev->total_open);
471 		spin_lock(&dev->count_lock);
472 		if (!dev->open_count++) {
473 			spin_unlock(&dev->count_lock);
474 			return i810_setup(dev);
475 		}
476 		spin_unlock(&dev->count_lock);
477 	}
478 	return retcode;
479 }
480 
i810_release(struct inode * inode,struct file * filp)481 int i810_release(struct inode *inode, struct file *filp)
482 {
483 	drm_file_t    *priv   = filp->private_data;
484 	drm_device_t  *dev;
485 	int	      retcode = 0;
486 
487 	lock_kernel();
488 	dev    = priv->dev;
489 	DRM_DEBUG("pid = %d, device = 0x%x, open_count = %d\n",
490 		  current->pid, dev->device, dev->open_count);
491 
492 	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
493 	    && dev->lock.pid == current->pid) {
494 	      	i810_reclaim_buffers(dev, priv->pid);
495 		DRM_ERROR("Process %d dead, freeing lock for context %d\n",
496 			  current->pid,
497 			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
498 		drm_lock_free(dev,
499 			      &dev->lock.hw_lock->lock,
500 			      _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
501 
502 				/* FIXME: may require heavy-handed reset of
503                                    hardware at this point, possibly
504                                    processed via a callback to the X
505                                    server. */
506 	} else if (dev->lock.hw_lock) {
507 	   	/* The lock is required to reclaim buffers */
508 	   	DECLARE_WAITQUEUE(entry, current);
509 	   	add_wait_queue(&dev->lock.lock_queue, &entry);
510 		for (;;) {
511 			current->state = TASK_INTERRUPTIBLE;
512 			if (!dev->lock.hw_lock) {
513 				/* Device has been unregistered */
514 				retcode = -EINTR;
515 				break;
516 			}
517 			if (drm_lock_take(&dev->lock.hw_lock->lock,
518 					  DRM_KERNEL_CONTEXT)) {
519 				dev->lock.pid	    = priv->pid;
520 				dev->lock.lock_time = jiffies;
521 				atomic_inc(&dev->total_locks);
522 				break;	/* Got lock */
523 			}
524 				/* Contention */
525 			atomic_inc(&dev->total_sleeps);
526 			schedule();
527 			if (signal_pending(current)) {
528 				retcode = -ERESTARTSYS;
529 				break;
530 			}
531 		}
532 		current->state = TASK_RUNNING;
533 		remove_wait_queue(&dev->lock.lock_queue, &entry);
534 	   	if(!retcode) {
535 		   	i810_reclaim_buffers(dev, priv->pid);
536 		   	drm_lock_free(dev, &dev->lock.hw_lock->lock,
537 				      DRM_KERNEL_CONTEXT);
538 		}
539 	}
540 	drm_fasync(-1, filp, 0);
541 
542 	down(&dev->struct_sem);
543 	if (priv->prev) priv->prev->next = priv->next;
544 	else		dev->file_first	 = priv->next;
545 	if (priv->next) priv->next->prev = priv->prev;
546 	else		dev->file_last	 = priv->prev;
547 	up(&dev->struct_sem);
548 
549 	drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
550 #if LINUX_VERSION_CODE < 0x020333
551 	MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
552 #endif
553    	atomic_inc(&dev->total_close);
554    	spin_lock(&dev->count_lock);
555    	if (!--dev->open_count) {
556 	   	if (atomic_read(&dev->ioctl_count) || dev->blocked) {
557 		   	DRM_ERROR("Device busy: %d %d\n",
558 				  atomic_read(&dev->ioctl_count),
559 				  dev->blocked);
560 		   	spin_unlock(&dev->count_lock);
561 			unlock_kernel();
562 		   	return -EBUSY;
563 		}
564 	   	spin_unlock(&dev->count_lock);
565 		unlock_kernel();
566 		return i810_takedown(dev);
567 	}
568 	spin_unlock(&dev->count_lock);
569 	unlock_kernel();
570 	return retcode;
571 }
572 
573 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
574 
i810_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)575 int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
576 		unsigned long arg)
577 {
578 	int		 nr	 = DRM_IOCTL_NR(cmd);
579 	drm_file_t	 *priv	 = filp->private_data;
580 	drm_device_t	 *dev	 = priv->dev;
581 	int		 retcode = 0;
582 	drm_ioctl_desc_t *ioctl;
583 	drm_ioctl_t	 *func;
584 
585 	atomic_inc(&dev->ioctl_count);
586 	atomic_inc(&dev->total_ioctl);
587 	++priv->ioctl_count;
588 
589 	DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
590 		  current->pid, cmd, nr, dev->device, priv->authenticated);
591 
592 	if (nr >= I810_IOCTL_COUNT) {
593 		retcode = -EINVAL;
594 	} else {
595 		ioctl	  = &i810_ioctls[nr];
596 		func	  = ioctl->func;
597 
598 		if (!func) {
599 			DRM_DEBUG("no function\n");
600 			retcode = -EINVAL;
601 		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
602 			    || (ioctl->auth_needed && !priv->authenticated)) {
603 			retcode = -EACCES;
604 		} else {
605 			retcode = (func)(inode, filp, cmd, arg);
606 		}
607 	}
608 
609 	atomic_dec(&dev->ioctl_count);
610 	return retcode;
611 }
612 
i810_unlock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)613 int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
614 		 unsigned long arg)
615 {
616 	drm_file_t	  *priv	  = filp->private_data;
617 	drm_device_t	  *dev	  = priv->dev;
618 	drm_lock_t	  lock;
619 
620 	if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
621 		return -EFAULT;
622 
623 	if (lock.context == DRM_KERNEL_CONTEXT) {
624 		DRM_ERROR("Process %d using kernel context %d\n",
625 			  current->pid, lock.context);
626 		return -EINVAL;
627 	}
628 
629 	DRM_DEBUG("%d frees lock (%d holds)\n",
630 		  lock.context,
631 		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
632 	atomic_inc(&dev->total_unlocks);
633 	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
634 		atomic_inc(&dev->total_contends);
635    	drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
636 	if (!dev->context_flag) {
637 		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
638 				  DRM_KERNEL_CONTEXT)) {
639 			DRM_ERROR("\n");
640 		}
641 	}
642 #if DRM_DMA_HISTOGRAM
643 	atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
644 						       - dev->lck_start)]);
645 #endif
646 
647 	unblock_all_signals();
648 	return 0;
649 }
650