1 /* tdfx_drv.c -- tdfx driver -*- linux-c -*-
2  * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Daryll Strauss <daryll@valinux.com>
30  *
31  */
32 
33 #include <linux/config.h>
34 #include "drmP.h"
35 #include "tdfx_drv.h"
36 
37 #define TDFX_NAME	 "tdfx"
38 #define TDFX_DESC	 "3dfx Banshee/Voodoo3+"
39 #define TDFX_DATE	 "20000928"
40 #define TDFX_MAJOR	 1
41 #define TDFX_MINOR	 0
42 #define TDFX_PATCHLEVEL  0
43 
44 static drm_device_t	      tdfx_device;
45 drm_ctx_t	              tdfx_res_ctx;
46 
47 static struct file_operations tdfx_fops = {
48 #if LINUX_VERSION_CODE >= 0x020400
49 				/* This started being used during 2.4.0-test */
50 	owner:   THIS_MODULE,
51 #endif
52 	open:	 tdfx_open,
53 	flush:	 drm_flush,
54 	release: tdfx_release,
55 	ioctl:	 tdfx_ioctl,
56 	mmap:	 drm_mmap,
57 	read:	 drm_read,
58 	fasync:	 drm_fasync,
59 	poll:	 drm_poll,
60 };
61 
62 static struct miscdevice      tdfx_misc = {
63 	minor: MISC_DYNAMIC_MINOR,
64 	name:  TDFX_NAME,
65 	fops:  &tdfx_fops,
66 };
67 
68 static drm_ioctl_desc_t	      tdfx_ioctls[] = {
69 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]    = { tdfx_version,	  0, 0 },
70 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique,	  0, 0 },
71 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]  = { drm_getmagic,	  0, 0 },
72 	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]  = { drm_irq_busid,	  0, 1 },
73 
74 	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique,	  1, 1 },
75 	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	     = { drm_block,	  1, 1 },
76 	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]    = { drm_unblock,	  1, 1 },
77 	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic,	  1, 1 },
78 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]    = { drm_addmap,	  1, 1 },
79 
80 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]    = { tdfx_addctx,	  1, 1 },
81 	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]     = { tdfx_rmctx,	  1, 1 },
82 	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]    = { tdfx_modctx,	  1, 1 },
83 	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]    = { tdfx_getctx,	  1, 0 },
84 	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx,  1, 1 },
85 	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]    = { tdfx_newctx,	  1, 1 },
86 	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]    = { tdfx_resctx,	  1, 0 },
87 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]   = { drm_adddraw,	  1, 1 },
88 	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]    = { drm_rmdraw,	  1, 1 },
89 	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	     = { tdfx_lock,	  1, 0 },
90 	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]     = { tdfx_unlock,	  1, 0 },
91 	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]     = { drm_finish,	  1, 0 },
92 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
93 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = {drm_agp_acquire, 1, 1},
94 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = {drm_agp_release, 1, 1},
95 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = {drm_agp_enable,  1, 1},
96 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = {drm_agp_info,    1, 1},
97 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = {drm_agp_alloc,   1, 1},
98 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = {drm_agp_free,    1, 1},
99 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = {drm_agp_unbind,  1, 1},
100 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = {drm_agp_bind,    1, 1},
101 #endif
102 };
103 #define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
104 
105 #ifdef MODULE
106 static char		      *tdfx = NULL;
107 #endif
108 
109 MODULE_AUTHOR("VA Linux Systems, Inc.");
110 MODULE_LICENSE("GPL and additional rights");
111 MODULE_DESCRIPTION("tdfx");
112 MODULE_PARM(tdfx, "s");
113 
114 #ifndef MODULE
115 /* tdfx_options is called by the kernel to parse command-line options
116  * passed via the boot-loader (e.g., LILO).  It calls the insmod option
117  * routine, drm_parse_drm.
118  */
119 
tdfx_options(char * str)120 static int __init tdfx_options(char *str)
121 {
122 	drm_parse_options(str);
123 	return 1;
124 }
125 
126 __setup("tdfx=", tdfx_options);
127 #endif
128 
tdfx_setup(drm_device_t * dev)129 static int tdfx_setup(drm_device_t *dev)
130 {
131 	int i;
132 
133 	atomic_set(&dev->ioctl_count, 0);
134 	atomic_set(&dev->vma_count, 0);
135 	dev->buf_use	  = 0;
136 	atomic_set(&dev->buf_alloc, 0);
137 
138 	atomic_set(&dev->total_open, 0);
139 	atomic_set(&dev->total_close, 0);
140 	atomic_set(&dev->total_ioctl, 0);
141 	atomic_set(&dev->total_irq, 0);
142 	atomic_set(&dev->total_ctx, 0);
143 	atomic_set(&dev->total_locks, 0);
144 	atomic_set(&dev->total_unlocks, 0);
145 	atomic_set(&dev->total_contends, 0);
146 	atomic_set(&dev->total_sleeps, 0);
147 
148 	for (i = 0; i < DRM_HASH_SIZE; i++) {
149 		dev->magiclist[i].head = NULL;
150 		dev->magiclist[i].tail = NULL;
151 	}
152 	dev->maplist	    = NULL;
153 	dev->map_count	    = 0;
154 	dev->vmalist	    = NULL;
155 	dev->lock.hw_lock   = NULL;
156 	init_waitqueue_head(&dev->lock.lock_queue);
157 	dev->queue_count    = 0;
158 	dev->queue_reserved = 0;
159 	dev->queue_slots    = 0;
160 	dev->queuelist	    = NULL;
161 	dev->irq	    = 0;
162 	dev->context_flag   = 0;
163 	dev->interrupt_flag = 0;
164 	dev->dma            = 0;
165 	dev->dma_flag	    = 0;
166 	dev->last_context   = 0;
167 	dev->last_switch    = 0;
168 	dev->last_checked   = 0;
169 	init_timer(&dev->timer);
170 	init_waitqueue_head(&dev->context_wait);
171 
172 	dev->ctx_start	    = 0;
173 	dev->lck_start	    = 0;
174 
175 	dev->buf_rp	  = dev->buf;
176 	dev->buf_wp	  = dev->buf;
177 	dev->buf_end	  = dev->buf + DRM_BSZ;
178 	dev->buf_async	  = NULL;
179 	init_waitqueue_head(&dev->buf_readers);
180 	init_waitqueue_head(&dev->buf_writers);
181 
182 	tdfx_res_ctx.handle=-1;
183 
184 	DRM_DEBUG("\n");
185 
186 	/* The kernel's context could be created here, but is now created
187 	   in drm_dma_enqueue.	This is more resource-efficient for
188 	   hardware that does not do DMA, but may mean that
189 	   drm_select_queue fails between the time the interrupt is
190 	   initialized and the time the queues are initialized. */
191 
192 	return 0;
193 }
194 
195 
tdfx_takedown(drm_device_t * dev)196 static int tdfx_takedown(drm_device_t *dev)
197 {
198 	int		  i;
199 	drm_magic_entry_t *pt, *next;
200 	drm_map_t	  *map;
201 	drm_vma_entry_t	  *vma, *vma_next;
202 
203 	DRM_DEBUG("\n");
204 
205 	down(&dev->struct_sem);
206 	del_timer(&dev->timer);
207 
208 	if (dev->devname) {
209 		drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
210 		dev->devname = NULL;
211 	}
212 
213 	if (dev->unique) {
214 		drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
215 		dev->unique = NULL;
216 		dev->unique_len = 0;
217 	}
218 				/* Clear pid list */
219 	for (i = 0; i < DRM_HASH_SIZE; i++) {
220 		for (pt = dev->magiclist[i].head; pt; pt = next) {
221 			next = pt->next;
222 			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
223 		}
224 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
225 	}
226 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
227 				/* Clear AGP information */
228 	if (dev->agp) {
229 		drm_agp_mem_t *temp;
230 		drm_agp_mem_t *temp_next;
231 
232 		temp = dev->agp->memory;
233 		while(temp != NULL) {
234 			temp_next = temp->next;
235 			drm_free_agp(temp->memory, temp->pages);
236 			drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS);
237 			temp = temp_next;
238 		}
239 		if (dev->agp->acquired) _drm_agp_release();
240 	}
241 #endif
242 				/* Clear vma list (only built for debugging) */
243 	if (dev->vmalist) {
244 		for (vma = dev->vmalist; vma; vma = vma_next) {
245 			vma_next = vma->next;
246 			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
247 		}
248 		dev->vmalist = NULL;
249 	}
250 
251 				/* Clear map area and mtrr information */
252 	if (dev->maplist) {
253 		for (i = 0; i < dev->map_count; i++) {
254 			map = dev->maplist[i];
255 			switch (map->type) {
256 			case _DRM_REGISTERS:
257 			case _DRM_FRAME_BUFFER:
258 #ifdef CONFIG_MTRR
259 				if (map->mtrr >= 0) {
260 					int retcode;
261 					retcode = mtrr_del(map->mtrr,
262 							   map->offset,
263 							   map->size);
264 					DRM_DEBUG("mtrr_del = %d\n", retcode);
265 				}
266 #endif
267 				drm_ioremapfree(map->handle, map->size, dev);
268 				break;
269 			case _DRM_SHM:
270 				drm_free_pages((unsigned long)map->handle,
271 					       drm_order(map->size)
272 					       - PAGE_SHIFT,
273 					       DRM_MEM_SAREA);
274 				break;
275 			case _DRM_AGP:
276 				/* Do nothing here, because this is all
277                                    handled in the AGP/GART driver. */
278 				break;
279 			}
280 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281 		}
282 		drm_free(dev->maplist,
283 			 dev->map_count * sizeof(*dev->maplist),
284 			 DRM_MEM_MAPS);
285 		dev->maplist   = NULL;
286 		dev->map_count = 0;
287 	}
288 
289 	if (dev->lock.hw_lock) {
290 		dev->lock.hw_lock    = NULL; /* SHM removed */
291 		dev->lock.pid	     = 0;
292 		wake_up_interruptible(&dev->lock.lock_queue);
293 	}
294 	up(&dev->struct_sem);
295 
296 	return 0;
297 }
298 
299 /* tdfx_init is called via init_module at module load time, or via
300  * linux/init/main.c (this is not currently supported). */
301 
tdfx_init(void)302 static int __init tdfx_init(void)
303 {
304 	int		      retcode;
305 	drm_device_t	      *dev = &tdfx_device;
306 
307 	DRM_DEBUG("\n");
308 
309 	memset((void *)dev, 0, sizeof(*dev));
310 	dev->count_lock	  = SPIN_LOCK_UNLOCKED;
311 	sema_init(&dev->struct_sem, 1);
312 
313 #ifdef MODULE
314 	drm_parse_options(tdfx);
315 #endif
316 
317 	if ((retcode = misc_register(&tdfx_misc))) {
318 		DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
319 		return retcode;
320 	}
321 	dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
322 	dev->name   = TDFX_NAME;
323 
324 	drm_mem_init();
325 	drm_proc_init(dev);
326 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
327 	dev->agp    = drm_agp_init();
328 #endif
329 	if((retcode = drm_ctxbitmap_init(dev))) {
330 		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
331 		drm_proc_cleanup();
332 		misc_deregister(&tdfx_misc);
333 		tdfx_takedown(dev);
334 		return retcode;
335 	}
336 
337 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
338 		 TDFX_NAME,
339 		 TDFX_MAJOR,
340 		 TDFX_MINOR,
341 		 TDFX_PATCHLEVEL,
342 		 TDFX_DATE,
343 		 tdfx_misc.minor);
344 
345 	return 0;
346 }
347 
348 /* tdfx_cleanup is called via cleanup_module at module unload time. */
349 
tdfx_cleanup(void)350 static void __exit tdfx_cleanup(void)
351 {
352 	drm_device_t	      *dev = &tdfx_device;
353 
354 	DRM_DEBUG("\n");
355 
356 	drm_proc_cleanup();
357 	if (misc_deregister(&tdfx_misc)) {
358 		DRM_ERROR("Cannot unload module\n");
359 	} else {
360 		DRM_INFO("Module unloaded\n");
361 	}
362 	drm_ctxbitmap_cleanup(dev);
363 	tdfx_takedown(dev);
364 #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
365 	if (dev->agp) {
366 		drm_agp_uninit();
367 		drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
368 		dev->agp = NULL;
369 	}
370 #endif
371 }
372 
373 module_init(tdfx_init);
374 module_exit(tdfx_cleanup);
375 
376 
tdfx_version(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)377 int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
378 		  unsigned long arg)
379 {
380 	drm_version_t version;
381 	int	      len;
382 
383 	if (copy_from_user(&version,
384 			   (drm_version_t *)arg,
385 			   sizeof(version)))
386 		return -EFAULT;
387 
388 #define DRM_COPY(name,value)				     \
389 	len = strlen(value);				     \
390 	if (len > name##_len) len = name##_len;		     \
391 	name##_len = strlen(value);			     \
392 	if (len && name) {				     \
393 		if (copy_to_user(name, value, len))	     \
394 			return -EFAULT;			     \
395 	}
396 
397 	version.version_major	   = TDFX_MAJOR;
398 	version.version_minor	   = TDFX_MINOR;
399 	version.version_patchlevel = TDFX_PATCHLEVEL;
400 
401 	DRM_COPY(version.name, TDFX_NAME);
402 	DRM_COPY(version.date, TDFX_DATE);
403 	DRM_COPY(version.desc, TDFX_DESC);
404 
405 	if (copy_to_user((drm_version_t *)arg,
406 			 &version,
407 			 sizeof(version)))
408 		return -EFAULT;
409 	return 0;
410 }
411 
tdfx_open(struct inode * inode,struct file * filp)412 int tdfx_open(struct inode *inode, struct file *filp)
413 {
414 	drm_device_t  *dev    = &tdfx_device;
415 	int	      retcode = 0;
416 
417 	DRM_DEBUG("open_count = %d\n", dev->open_count);
418 	if (!(retcode = drm_open_helper(inode, filp, dev))) {
419 #if LINUX_VERSION_CODE < 0x020333
420 		MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
421 #endif
422 		atomic_inc(&dev->total_open);
423 		spin_lock(&dev->count_lock);
424 		if (!dev->open_count++) {
425 			spin_unlock(&dev->count_lock);
426 			return tdfx_setup(dev);
427 		}
428 		spin_unlock(&dev->count_lock);
429 	}
430 	return retcode;
431 }
432 
tdfx_release(struct inode * inode,struct file * filp)433 int tdfx_release(struct inode *inode, struct file *filp)
434 {
435 	drm_file_t    *priv   = filp->private_data;
436 	drm_device_t  *dev;
437 	int	      retcode = 0;
438 
439 	lock_kernel();
440 	dev = priv->dev;
441 
442 	DRM_DEBUG("open_count = %d\n", dev->open_count);
443 	if (!(retcode = drm_release(inode, filp))) {
444 #if LINUX_VERSION_CODE < 0x020333
445 		MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
446 #endif
447 		atomic_inc(&dev->total_close);
448 		spin_lock(&dev->count_lock);
449 		if (!--dev->open_count) {
450 			if (atomic_read(&dev->ioctl_count) || dev->blocked) {
451 				DRM_ERROR("Device busy: %d %d\n",
452 					  atomic_read(&dev->ioctl_count),
453 					  dev->blocked);
454 				spin_unlock(&dev->count_lock);
455 				unlock_kernel();
456 				return -EBUSY;
457 			}
458 			spin_unlock(&dev->count_lock);
459 			unlock_kernel();
460 			return tdfx_takedown(dev);
461 		}
462 		spin_unlock(&dev->count_lock);
463 	}
464 
465 	unlock_kernel();
466 	return retcode;
467 }
468 
469 /* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
470 
tdfx_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)471 int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
472 		unsigned long arg)
473 {
474 	int		 nr	 = DRM_IOCTL_NR(cmd);
475 	drm_file_t	 *priv	 = filp->private_data;
476 	drm_device_t	 *dev	 = priv->dev;
477 	int		 retcode = 0;
478 	drm_ioctl_desc_t *ioctl;
479 	drm_ioctl_t	 *func;
480 
481 	atomic_inc(&dev->ioctl_count);
482 	atomic_inc(&dev->total_ioctl);
483 	++priv->ioctl_count;
484 
485 	DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
486 		  current->pid, cmd, nr, dev->device, priv->authenticated);
487 
488 	if (nr >= TDFX_IOCTL_COUNT) {
489 		retcode = -EINVAL;
490 	} else {
491 		ioctl	  = &tdfx_ioctls[nr];
492 		func	  = ioctl->func;
493 
494 		if (!func) {
495 			DRM_DEBUG("no function\n");
496 			retcode = -EINVAL;
497 		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
498 			    || (ioctl->auth_needed && !priv->authenticated)) {
499 			retcode = -EACCES;
500 		} else {
501 			retcode = (func)(inode, filp, cmd, arg);
502 		}
503 	}
504 
505 	atomic_dec(&dev->ioctl_count);
506 	return retcode;
507 }
508 
tdfx_lock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)509 int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
510 	      unsigned long arg)
511 {
512         drm_file_t        *priv   = filp->private_data;
513         drm_device_t      *dev    = priv->dev;
514         DECLARE_WAITQUEUE(entry, current);
515         int               ret   = 0;
516         drm_lock_t        lock;
517 #if DRM_DMA_HISTOGRAM
518         cycles_t          start;
519 
520         dev->lck_start = start = get_cycles();
521 #endif
522 
523         if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
524 		return -EFAULT;
525 
526         if (lock.context == DRM_KERNEL_CONTEXT) {
527                 DRM_ERROR("Process %d using kernel context %d\n",
528                           current->pid, lock.context);
529                 return -EINVAL;
530         }
531 
532         DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
533                   lock.context, current->pid, dev->lock.hw_lock->lock,
534                   lock.flags);
535 
536 #if 0
537 				/* dev->queue_count == 0 right now for
538                                    tdfx.  FIXME? */
539         if (lock.context < 0 || lock.context >= dev->queue_count)
540                 return -EINVAL;
541 #endif
542 
543         if (!ret) {
544 #if 0
545                 if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)
546                     != lock.context) {
547                         long j = jiffies - dev->lock.lock_time;
548 
549                         if (lock.context == tdfx_res_ctx.handle &&
550 				j >= 0 && j < DRM_LOCK_SLICE) {
551                                 /* Can't take lock if we just had it and
552                                    there is contention. */
553                                 DRM_DEBUG("%d (pid %d) delayed j=%d dev=%d jiffies=%d\n",
554 					lock.context, current->pid, j,
555 					dev->lock.lock_time, jiffies);
556                                 current->state = TASK_INTERRUPTIBLE;
557 				current->policy |= SCHED_YIELD;
558                                 schedule_timeout(DRM_LOCK_SLICE-j);
559 				DRM_DEBUG("jiffies=%d\n", jiffies);
560                         }
561                 }
562 #endif
563                 add_wait_queue(&dev->lock.lock_queue, &entry);
564                 for (;;) {
565                         current->state = TASK_INTERRUPTIBLE;
566                         if (!dev->lock.hw_lock) {
567                                 /* Device has been unregistered */
568                                 ret = -EINTR;
569                                 break;
570                         }
571                         if (drm_lock_take(&dev->lock.hw_lock->lock,
572                                           lock.context)) {
573                                 dev->lock.pid       = current->pid;
574                                 dev->lock.lock_time = jiffies;
575                                 atomic_inc(&dev->total_locks);
576                                 break;  /* Got lock */
577                         }
578 
579                                 /* Contention */
580                         atomic_inc(&dev->total_sleeps);
581 			yield();
582                         if (signal_pending(current)) {
583                                 ret = -ERESTARTSYS;
584                                 break;
585                         }
586                 }
587                 current->state = TASK_RUNNING;
588                 remove_wait_queue(&dev->lock.lock_queue, &entry);
589         }
590 
591 #if 0
592 	if (!ret && dev->last_context != lock.context &&
593 		lock.context != tdfx_res_ctx.handle &&
594 		dev->last_context != tdfx_res_ctx.handle) {
595 		add_wait_queue(&dev->context_wait, &entry);
596 	        current->state = TASK_INTERRUPTIBLE;
597                 /* PRE: dev->last_context != lock.context */
598 	        tdfx_context_switch(dev, dev->last_context, lock.context);
599 		/* POST: we will wait for the context
600                    switch and will dispatch on a later call
601                    when dev->last_context == lock.context
602                    NOTE WE HOLD THE LOCK THROUGHOUT THIS
603                    TIME! */
604 		yield();
605 	        current->state = TASK_RUNNING;
606 	        remove_wait_queue(&dev->context_wait, &entry);
607 	        if (signal_pending(current)) {
608 	                ret = -EINTR;
609 	        } else if (dev->last_context != lock.context) {
610 			DRM_ERROR("Context mismatch: %d %d\n",
611                         	dev->last_context, lock.context);
612 	        }
613 	}
614 #endif
615 
616         if (!ret) {
617 		sigemptyset(&dev->sigmask);
618 		sigaddset(&dev->sigmask, SIGSTOP);
619 		sigaddset(&dev->sigmask, SIGTSTP);
620 		sigaddset(&dev->sigmask, SIGTTIN);
621 		sigaddset(&dev->sigmask, SIGTTOU);
622 		dev->sigdata.context = lock.context;
623 		dev->sigdata.lock    = dev->lock.hw_lock;
624 		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
625 
626                 if (lock.flags & _DRM_LOCK_READY) {
627 				/* Wait for space in DMA/FIFO */
628 		}
629                 if (lock.flags & _DRM_LOCK_QUIESCENT) {
630 				/* Make hardware quiescent */
631 #if 0
632                         tdfx_quiescent(dev);
633 #endif
634 		}
635         }
636 
637 #if LINUX_VERSION_CODE < 0x020400
638 	if (lock.context != tdfx_res_ctx.handle) {
639 		current->counter = 5;
640 		current->priority = DEF_PRIORITY/4;
641 	}
642 #endif
643         DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
644 
645 #if DRM_DMA_HISTOGRAM
646         atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
647 #endif
648 
649         return ret;
650 }
651 
652 
tdfx_unlock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)653 int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
654 		 unsigned long arg)
655 {
656 	drm_file_t	  *priv	  = filp->private_data;
657 	drm_device_t	  *dev	  = priv->dev;
658 	drm_lock_t	  lock;
659 
660 	if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
661 		return -EFAULT;
662 
663 	if (lock.context == DRM_KERNEL_CONTEXT) {
664 		DRM_ERROR("Process %d using kernel context %d\n",
665 			  current->pid, lock.context);
666 		return -EINVAL;
667 	}
668 
669 	DRM_DEBUG("%d frees lock (%d holds)\n",
670 		  lock.context,
671 		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
672 	atomic_inc(&dev->total_unlocks);
673 	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
674 		atomic_inc(&dev->total_contends);
675 	drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
676 				/* FIXME: Try to send data to card here */
677 	if (!dev->context_flag) {
678 		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
679 				  DRM_KERNEL_CONTEXT)) {
680 			DRM_ERROR("\n");
681 		}
682 	}
683 
684 #if LINUX_VERSION_CODE < 0x020400
685 	if (lock.context != tdfx_res_ctx.handle) {
686 		current->counter = 5;
687 		current->priority = DEF_PRIORITY;
688 	}
689 #endif
690 
691 	unblock_all_signals();
692 	return 0;
693 }
694