1 /* drm_drv.h -- Generic driver template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  */
31 
32 /*
33  * To use this template, you must at least define the following (samples
34  * given for the MGA driver):
35  *
36  * #define DRIVER_AUTHOR	"VA Linux Systems, Inc."
37  *
38  * #define DRIVER_NAME		"mga"
39  * #define DRIVER_DESC		"Matrox G200/G400"
40  * #define DRIVER_DATE		"20001127"
41  *
42  * #define DRIVER_MAJOR		2
43  * #define DRIVER_MINOR		0
44  * #define DRIVER_PATCHLEVEL	2
45  *
46  * #define DRIVER_IOCTL_COUNT	DRM_ARRAY_SIZE( mga_ioctls )
47  *
48  * #define DRM(x)		mga_##x
49  */
50 
51 #ifndef __MUST_HAVE_AGP
52 #define __MUST_HAVE_AGP			0
53 #endif
54 #ifndef __HAVE_CTX_BITMAP
55 #define __HAVE_CTX_BITMAP		0
56 #endif
57 #ifndef __HAVE_DMA_IRQ
58 #define __HAVE_DMA_IRQ			0
59 #endif
60 #ifndef __HAVE_DMA_QUEUE
61 #define __HAVE_DMA_QUEUE		0
62 #endif
63 #ifndef __HAVE_MULTIPLE_DMA_QUEUES
64 #define __HAVE_MULTIPLE_DMA_QUEUES	0
65 #endif
66 #ifndef __HAVE_DMA_SCHEDULE
67 #define __HAVE_DMA_SCHEDULE		0
68 #endif
69 #ifndef __HAVE_DMA_FLUSH
70 #define __HAVE_DMA_FLUSH		0
71 #endif
72 #ifndef __HAVE_DMA_READY
73 #define __HAVE_DMA_READY		0
74 #endif
75 #ifndef __HAVE_DMA_QUIESCENT
76 #define __HAVE_DMA_QUIESCENT		0
77 #endif
78 #ifndef __HAVE_RELEASE
79 #define __HAVE_RELEASE			0
80 #endif
81 #ifndef __HAVE_COUNTERS
82 #define __HAVE_COUNTERS			0
83 #endif
84 #ifndef __HAVE_SG
85 #define __HAVE_SG			0
86 #endif
87 #ifndef __HAVE_KERNEL_CTX_SWITCH
88 #define __HAVE_KERNEL_CTX_SWITCH	0
89 #endif
90 
91 #ifndef DRIVER_PREINIT
92 #define DRIVER_PREINIT()
93 #endif
94 #ifndef DRIVER_POSTINIT
95 #define DRIVER_POSTINIT()
96 #endif
97 #ifndef DRIVER_PRERELEASE
98 #define DRIVER_PRERELEASE()
99 #endif
100 #ifndef DRIVER_PRETAKEDOWN
101 #define DRIVER_PRETAKEDOWN()
102 #endif
103 #ifndef DRIVER_POSTCLEANUP
104 #define DRIVER_POSTCLEANUP()
105 #endif
106 #ifndef DRIVER_PRESETUP
107 #define DRIVER_PRESETUP()
108 #endif
109 #ifndef DRIVER_POSTSETUP
110 #define DRIVER_POSTSETUP()
111 #endif
112 #ifndef DRIVER_IOCTLS
113 #define DRIVER_IOCTLS
114 #endif
115 #ifndef DRIVER_FOPS
116 #define DRIVER_FOPS				\
117 static struct file_operations	DRM(fops) = {	\
118 	.owner   = THIS_MODULE,			\
119 	.open	 = DRM(open),			\
120 	.flush	 = DRM(flush),			\
121 	.release = DRM(release),		\
122 	.ioctl	 = DRM(ioctl),			\
123 	.mmap	 = DRM(mmap),			\
124 	.read	 = DRM(read),			\
125 	.fasync  = DRM(fasync),			\
126 	.poll	 = DRM(poll),			\
127 }
128 #endif
129 
130 #ifndef MODULE
131 /* DRM(options) is called by the kernel to parse command-line options
132  * passed via the boot-loader (e.g., LILO).  It calls the insmod option
133  * routine, drm_parse_drm.
134  */
135 /* Use an additional macro to avoid preprocessor troubles */
136 #define DRM_OPTIONS_FUNC DRM(options)
DRM(options)137 static int __init DRM(options)( char *str )
138 {
139 	DRM(parse_options)( str );
140 	return 1;
141 }
142 
143 __setup( DRIVER_NAME "=", DRM_OPTIONS_FUNC );
144 #undef DRM_OPTIONS_FUNC
145 #endif
146 
147 /*
148  * The default number of instances (minor numbers) to initialize.
149  */
150 #ifndef DRIVER_NUM_CARDS
151 #define DRIVER_NUM_CARDS 1
152 #endif
153 
154 static drm_device_t	*DRM(device);
155 static int		*DRM(minor);
156 static int		DRM(numdevs) = 0;
157 
158 DRIVER_FOPS;
159 
160 static drm_ioctl_desc_t		  DRM(ioctls)[] = {
161 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]       = { DRM(version),     0, 0 },
162 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]    = { DRM(getunique),   0, 0 },
163 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]     = { DRM(getmagic),    0, 0 },
164 	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]     = { DRM(irq_busid),   0, 1 },
165 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)]       = { DRM(getmap),      0, 0 },
166 	[DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)]    = { DRM(getclient),   0, 0 },
167 	[DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)]     = { DRM(getstats),    0, 0 },
168 
169 	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]    = { DRM(setunique),   1, 1 },
170 	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]         = { DRM(block),       1, 1 },
171 	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { DRM(unblock),     1, 1 },
172 	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { DRM(authmagic),   1, 1 },
173 
174 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { DRM(addmap),      1, 1 },
175 	[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { DRM(rmmap),       1, 0 },
176 
177 #if __HAVE_CTX_BITMAP
178 	[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { DRM(setsareactx), 1, 1 },
179 	[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { DRM(getsareactx), 1, 0 },
180 #endif
181 
182 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]       = { DRM(addctx),      1, 1 },
183 	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]        = { DRM(rmctx),       1, 1 },
184 	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]       = { DRM(modctx),      1, 1 },
185 	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]       = { DRM(getctx),      1, 0 },
186 	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]    = { DRM(switchctx),   1, 1 },
187 	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]       = { DRM(newctx),      1, 1 },
188 	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]       = { DRM(resctx),      1, 0 },
189 
190 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]      = { DRM(adddraw),     1, 1 },
191 	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]       = { DRM(rmdraw),      1, 1 },
192 
193 	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	        = { DRM(lock),        1, 0 },
194 	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]        = { DRM(unlock),      1, 0 },
195 	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { DRM(finish),      1, 0 },
196 
197 #if __HAVE_DMA
198 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]      = { DRM(addbufs),     1, 1 },
199 	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]     = { DRM(markbufs),    1, 1 },
200 	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]     = { DRM(infobufs),    1, 0 },
201 	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]      = { DRM(mapbufs),     1, 0 },
202 	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]     = { DRM(freebufs),    1, 0 },
203 
204 	/* The DRM_IOCTL_DMA ioctl should be defined by the driver.
205 	 */
206 	[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]       = { DRM(control),     1, 1 },
207 #endif
208 
209 #if __REALLY_HAVE_AGP
210 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { DRM(agp_acquire), 1, 1 },
211 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { DRM(agp_release), 1, 1 },
212 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { DRM(agp_enable),  1, 1 },
213 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { DRM(agp_info),    1, 0 },
214 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { DRM(agp_alloc),   1, 1 },
215 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { DRM(agp_free),    1, 1 },
216 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { DRM(agp_bind),    1, 1 },
217 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = { DRM(agp_unbind),  1, 1 },
218 #endif
219 
220 #if __HAVE_SG
221 	[DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)]      = { DRM(sg_alloc),    1, 1 },
222 	[DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)]       = { DRM(sg_free),     1, 1 },
223 #endif
224 
225 #if __HAVE_VBL_IRQ
226 	[DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)]   = { DRM(wait_vblank), 0, 0 },
227 #endif
228 
229 	DRIVER_IOCTLS
230 };
231 
232 #define DRIVER_IOCTL_COUNT	DRM_ARRAY_SIZE( DRM(ioctls) )
233 
234 #ifdef MODULE
235 static char *drm_opts = NULL;
236 #endif
237 
238 MODULE_AUTHOR( DRIVER_AUTHOR );
239 MODULE_DESCRIPTION( DRIVER_DESC );
240 MODULE_PARM( drm_opts, "s" );
241 MODULE_LICENSE("GPL and additional rights");
242 
DRM(setup)243 static int DRM(setup)( drm_device_t *dev )
244 {
245 	int i;
246 
247 	DRIVER_PRESETUP();
248 	atomic_set( &dev->ioctl_count, 0 );
249 	atomic_set( &dev->vma_count, 0 );
250 	dev->buf_use = 0;
251 	atomic_set( &dev->buf_alloc, 0 );
252 
253 #if __HAVE_DMA
254 	i = DRM(dma_setup)( dev );
255 	if ( i < 0 )
256 		return i;
257 #endif
258 
259 	dev->counters  = 6 + __HAVE_COUNTERS;
260 	dev->types[0]  = _DRM_STAT_LOCK;
261 	dev->types[1]  = _DRM_STAT_OPENS;
262 	dev->types[2]  = _DRM_STAT_CLOSES;
263 	dev->types[3]  = _DRM_STAT_IOCTLS;
264 	dev->types[4]  = _DRM_STAT_LOCKS;
265 	dev->types[5]  = _DRM_STAT_UNLOCKS;
266 #ifdef __HAVE_COUNTER6
267 	dev->types[6]  = __HAVE_COUNTER6;
268 #endif
269 #ifdef __HAVE_COUNTER7
270 	dev->types[7]  = __HAVE_COUNTER7;
271 #endif
272 #ifdef __HAVE_COUNTER8
273 	dev->types[8]  = __HAVE_COUNTER8;
274 #endif
275 #ifdef __HAVE_COUNTER9
276 	dev->types[9]  = __HAVE_COUNTER9;
277 #endif
278 #ifdef __HAVE_COUNTER10
279 	dev->types[10] = __HAVE_COUNTER10;
280 #endif
281 #ifdef __HAVE_COUNTER11
282 	dev->types[11] = __HAVE_COUNTER11;
283 #endif
284 #ifdef __HAVE_COUNTER12
285 	dev->types[12] = __HAVE_COUNTER12;
286 #endif
287 #ifdef __HAVE_COUNTER13
288 	dev->types[13] = __HAVE_COUNTER13;
289 #endif
290 #ifdef __HAVE_COUNTER14
291 	dev->types[14] = __HAVE_COUNTER14;
292 #endif
293 #ifdef __HAVE_COUNTER15
294 	dev->types[14] = __HAVE_COUNTER14;
295 #endif
296 
297 	for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ )
298 		atomic_set( &dev->counts[i], 0 );
299 
300 	for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
301 		dev->magiclist[i].head = NULL;
302 		dev->magiclist[i].tail = NULL;
303 	}
304 
305 	dev->maplist = DRM(alloc)(sizeof(*dev->maplist),
306 				  DRM_MEM_MAPS);
307 	if(dev->maplist == NULL) return -ENOMEM;
308 	memset(dev->maplist, 0, sizeof(*dev->maplist));
309 	INIT_LIST_HEAD(&dev->maplist->head);
310 	dev->map_count = 0;
311 
312 	dev->vmalist = NULL;
313 	dev->sigdata.lock = dev->lock.hw_lock = NULL;
314 	init_waitqueue_head( &dev->lock.lock_queue );
315 	dev->queue_count = 0;
316 	dev->queue_reserved = 0;
317 	dev->queue_slots = 0;
318 	dev->queuelist = NULL;
319 	dev->irq = 0;
320 	dev->context_flag = 0;
321 	dev->interrupt_flag = 0;
322 	dev->dma_flag = 0;
323 	dev->last_context = 0;
324 	dev->last_switch = 0;
325 	dev->last_checked = 0;
326 	init_timer( &dev->timer );
327 	init_waitqueue_head( &dev->context_wait );
328 
329 	dev->ctx_start = 0;
330 	dev->lck_start = 0;
331 
332 	dev->buf_rp = dev->buf;
333 	dev->buf_wp = dev->buf;
334 	dev->buf_end = dev->buf + DRM_BSZ;
335 	dev->buf_async = NULL;
336 	init_waitqueue_head( &dev->buf_readers );
337 	init_waitqueue_head( &dev->buf_writers );
338 
339 	DRM_DEBUG( "\n" );
340 
341 	/* The kernel's context could be created here, but is now created
342 	 * in drm_dma_enqueue.	This is more resource-efficient for
343 	 * hardware that does not do DMA, but may mean that
344 	 * drm_select_queue fails between the time the interrupt is
345 	 * initialized and the time the queues are initialized.
346 	 */
347 	DRIVER_POSTSETUP();
348 	return 0;
349 }
350 
351 
DRM(takedown)352 static int DRM(takedown)( drm_device_t *dev )
353 {
354 	drm_magic_entry_t *pt, *next;
355 	drm_map_t *map;
356 	drm_map_list_t *r_list;
357 	struct list_head *list, *list_next;
358 	drm_vma_entry_t *vma, *vma_next;
359 	int i;
360 
361 	DRM_DEBUG( "\n" );
362 
363 	DRIVER_PRETAKEDOWN();
364 #if __HAVE_DMA_IRQ
365 	if ( dev->irq ) DRM(irq_uninstall)( dev );
366 #endif
367 
368 	down( &dev->struct_sem );
369 	del_timer( &dev->timer );
370 
371 	if ( dev->devname ) {
372 		DRM(free)( dev->devname, strlen( dev->devname ) + 1,
373 			   DRM_MEM_DRIVER );
374 		dev->devname = NULL;
375 	}
376 
377 	if ( dev->unique ) {
378 		DRM(free)( dev->unique, strlen( dev->unique ) + 1,
379 			   DRM_MEM_DRIVER );
380 		dev->unique = NULL;
381 		dev->unique_len = 0;
382 	}
383 				/* Clear pid list */
384 	for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {
385 		for ( pt = dev->magiclist[i].head ; pt ; pt = next ) {
386 			next = pt->next;
387 			DRM(free)( pt, sizeof(*pt), DRM_MEM_MAGIC );
388 		}
389 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
390 	}
391 
392 #if __REALLY_HAVE_AGP
393 				/* Clear AGP information */
394 	if ( dev->agp ) {
395 		drm_agp_mem_t *entry;
396 		drm_agp_mem_t *nexte;
397 
398 				/* Remove AGP resources, but leave dev->agp
399                                    intact until drv_cleanup is called. */
400 		for ( entry = dev->agp->memory ; entry ; entry = nexte ) {
401 			nexte = entry->next;
402 			if ( entry->bound ) DRM(unbind_agp)( entry->memory );
403 			DRM(free_agp)( entry->memory, entry->pages );
404 			DRM(free)( entry, sizeof(*entry), DRM_MEM_AGPLISTS );
405 		}
406 		dev->agp->memory = NULL;
407 
408 		if ( dev->agp->acquired ) DRM(agp_do_release)();
409 
410 		dev->agp->acquired = 0;
411 		dev->agp->enabled  = 0;
412 	}
413 #endif
414 
415 				/* Clear vma list (only built for debugging) */
416 	if ( dev->vmalist ) {
417 		for ( vma = dev->vmalist ; vma ; vma = vma_next ) {
418 			vma_next = vma->next;
419 			DRM(free)( vma, sizeof(*vma), DRM_MEM_VMAS );
420 		}
421 		dev->vmalist = NULL;
422 	}
423 
424 	if( dev->maplist ) {
425 		for(list = dev->maplist->head.next;
426 		    list != &dev->maplist->head;
427 		    list = list_next) {
428 			list_next = list->next;
429 			r_list = (drm_map_list_t *)list;
430 			map = r_list->map;
431 			DRM(free)(r_list, sizeof(*r_list), DRM_MEM_MAPS);
432 			if(!map) continue;
433 
434 			switch ( map->type ) {
435 			case _DRM_REGISTERS:
436 			case _DRM_FRAME_BUFFER:
437 #if __REALLY_HAVE_MTRR
438 				if ( map->mtrr >= 0 ) {
439 					int retcode;
440 					retcode = mtrr_del( map->mtrr,
441 							    map->offset,
442 							    map->size );
443 					DRM_DEBUG( "mtrr_del=%d\n", retcode );
444 				}
445 #endif
446 				DRM(ioremapfree)( map->handle, map->size, dev );
447 				break;
448 			case _DRM_SHM:
449 				vfree(map->handle);
450 				break;
451 
452 			case _DRM_AGP:
453 				/* Do nothing here, because this is all
454 				 * handled in the AGP/GART driver.
455 				 */
456 				break;
457                        case _DRM_SCATTER_GATHER:
458 				/* Handle it, but do nothing, if HAVE_SG
459 				 * isn't defined.
460 				 */
461 #if __HAVE_SG
462 				if(dev->sg) {
463 					DRM(sg_cleanup)(dev->sg);
464 					dev->sg = NULL;
465 				}
466 #endif
467 				break;
468 			}
469  			DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
470  		}
471 		DRM(free)(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
472 		dev->maplist = NULL;
473  	}
474 
475 #if __HAVE_DMA_QUEUE || __HAVE_MULTIPLE_DMA_QUEUES
476 	if ( dev->queuelist ) {
477 		for ( i = 0 ; i < dev->queue_count ; i++ ) {
478 			DRM(waitlist_destroy)( &dev->queuelist[i]->waitlist );
479 			if ( dev->queuelist[i] ) {
480 				DRM(free)( dev->queuelist[i],
481 					  sizeof(*dev->queuelist[0]),
482 					  DRM_MEM_QUEUES );
483 				dev->queuelist[i] = NULL;
484 			}
485 		}
486 		DRM(free)( dev->queuelist,
487 			  dev->queue_slots * sizeof(*dev->queuelist),
488 			  DRM_MEM_QUEUES );
489 		dev->queuelist = NULL;
490 	}
491 	dev->queue_count = 0;
492 #endif
493 
494 #if __HAVE_DMA
495 	DRM(dma_takedown)( dev );
496 #endif
497 	if ( dev->lock.hw_lock ) {
498 		dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
499 		dev->lock.pid = 0;
500 		wake_up_interruptible( &dev->lock.lock_queue );
501 	}
502 	up( &dev->struct_sem );
503 
504 	return 0;
505 }
506 
507 /*
508  * Figure out how many instances to initialize.
509  */
drm_count_cards(void)510 static int drm_count_cards(void)
511 {
512 	int num = 0;
513 #if defined(DRIVER_CARD_LIST)
514 	int i;
515 	drm_pci_list_t *l;
516 	u16 device, vendor;
517 	struct pci_dev *pdev = NULL;
518 #endif
519 
520 	DRM_DEBUG( "\n" );
521 
522 #if defined(DRIVER_COUNT_CARDS)
523 	num = DRIVER_COUNT_CARDS();
524 #elif defined(DRIVER_CARD_LIST)
525 	for (i = 0, l = DRIVER_CARD_LIST; l[i].vendor != 0; i++) {
526 		pdev = NULL;
527 		vendor = l[i].vendor;
528 		device = l[i].device;
529 		if(device == 0xffff) device = PCI_ANY_ID;
530 		if(vendor == 0xffff) vendor = PCI_ANY_ID;
531 		while ((pdev = pci_find_device(vendor, device, pdev))) {
532 			num++;
533 		}
534 	}
535 #else
536 	num = DRIVER_NUM_CARDS;
537 #endif
538 	DRM_DEBUG("numdevs = %d\n", num);
539 	return num;
540 }
541 
542 /* drm_init is called via init_module at module load time, or via
543  * linux/init/main.c (this is not currently supported).
544  */
drm_init(void)545 static int __init drm_init( void )
546 {
547 
548 	drm_device_t *dev;
549 	int i;
550 #if __HAVE_CTX_BITMAP
551 	int retcode;
552 #endif
553 	DRM_DEBUG( "\n" );
554 
555 #ifdef MODULE
556 	DRM(parse_options)( drm_opts );
557 #endif
558 
559 	DRM(numdevs) = drm_count_cards();
560 	/* Force at least one instance. */
561 	if (DRM(numdevs) <= 0)
562 		DRM(numdevs) = 1;
563 
564 	DRM(device) = kmalloc(sizeof(*DRM(device)) * DRM(numdevs), GFP_KERNEL);
565 	if (!DRM(device)) {
566 		return -ENOMEM;
567 	}
568 	DRM(minor) = kmalloc(sizeof(*DRM(minor)) * DRM(numdevs), GFP_KERNEL);
569 	if (!DRM(minor)) {
570 		kfree(DRM(device));
571 		return -ENOMEM;
572 	}
573 
574 	DRIVER_PREINIT();
575 
576 	DRM(mem_init)();
577 
578 	for (i = 0; i < DRM(numdevs); i++) {
579 		dev = &(DRM(device)[i]);
580 		memset( (void *)dev, 0, sizeof(*dev) );
581 		dev->count_lock = SPIN_LOCK_UNLOCKED;
582 		sema_init( &dev->struct_sem, 1 );
583 
584 		if ((DRM(minor)[i] = DRM(stub_register)(DRIVER_NAME, &DRM(fops),dev)) < 0)
585 			return -EPERM;
586 		dev->device = MKDEV(DRM_MAJOR, DRM(minor)[i] );
587 		dev->name   = DRIVER_NAME;
588 
589 #if __REALLY_HAVE_AGP
590 		dev->agp = DRM(agp_init)();
591 #if __MUST_HAVE_AGP
592 		if ( dev->agp == NULL ) {
593 			DRM_ERROR( "Cannot initialize the agpgart module.\n" );
594 			DRM(stub_unregister)(DRM(minor)[i]);
595 			DRM(takedown)( dev );
596 			return -ENOMEM;
597 		}
598 #endif
599 #if __REALLY_HAVE_MTRR
600 		if (dev->agp)
601 			dev->agp->agp_mtrr = mtrr_add( dev->agp->agp_info.aper_base,
602 				       dev->agp->agp_info.aper_size*1024*1024,
603 				       MTRR_TYPE_WRCOMB,
604 				       1 );
605 #endif
606 #endif
607 
608 #if __HAVE_CTX_BITMAP
609 		retcode = DRM(ctxbitmap_init)( dev );
610 		if( retcode ) {
611 			DRM_ERROR( "Cannot allocate memory for context bitmap.\n" );
612 			DRM(stub_unregister)(DRM(minor)[i]);
613 			DRM(takedown)( dev );
614 			return retcode;
615 		}
616 #endif
617 		DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d\n",
618 		  	DRIVER_NAME,
619 		  	DRIVER_MAJOR,
620 		  	DRIVER_MINOR,
621 		  	DRIVER_PATCHLEVEL,
622 		  	DRIVER_DATE,
623 		  	DRM(minor)[i] );
624 	}
625 
626 	DRIVER_POSTINIT();
627 
628 	return 0;
629 }
630 
631 /* drm_cleanup is called via cleanup_module at module unload time.
632  */
drm_cleanup(void)633 static void __exit drm_cleanup( void )
634 {
635 	drm_device_t *dev;
636 	int i;
637 
638 	DRM_DEBUG( "\n" );
639 
640 	for (i = DRM(numdevs) - 1; i >= 0; i--) {
641 		dev = &(DRM(device)[i]);
642 		if ( DRM(stub_unregister)(DRM(minor)[i]) ) {
643 			DRM_ERROR( "Cannot unload module\n" );
644 		} else {
645 			DRM_DEBUG("minor %d unregistered\n", DRM(minor)[i]);
646 			if (i == 0) {
647 				DRM_INFO( "Module unloaded\n" );
648 			}
649 		}
650 #if __HAVE_CTX_BITMAP
651 		DRM(ctxbitmap_cleanup)( dev );
652 #endif
653 
654 #if __REALLY_HAVE_AGP && __REALLY_HAVE_MTRR
655 		if ( dev->agp && dev->agp->agp_mtrr >= 0) {
656 			int retval;
657 			retval = mtrr_del( dev->agp->agp_mtrr,
658 				   dev->agp->agp_info.aper_base,
659 				   dev->agp->agp_info.aper_size*1024*1024 );
660 			DRM_DEBUG( "mtrr_del=%d\n", retval );
661 		}
662 #endif
663 
664 		DRM(takedown)( dev );
665 
666 #if __REALLY_HAVE_AGP
667 		if ( dev->agp ) {
668 			DRM(agp_uninit)();
669 			DRM(free)( dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS );
670 			dev->agp = NULL;
671 		}
672 #endif
673 	}
674 	DRIVER_POSTCLEANUP();
675 	kfree(DRM(minor));
676 	kfree(DRM(device));
677 	DRM(numdevs) = 0;
678 }
679 
680 module_init( drm_init );
681 module_exit( drm_cleanup );
682 
683 
DRM(version)684 int DRM(version)( struct inode *inode, struct file *filp,
685 		  unsigned int cmd, unsigned long arg )
686 {
687 	drm_version_t version;
688 	int len;
689 
690 	if ( copy_from_user( &version,
691 			     (drm_version_t *)arg,
692 			     sizeof(version) ) )
693 		return -EFAULT;
694 
695 #define DRM_COPY( name, value )						\
696 	len = strlen( value );						\
697 	if ( len > name##_len ) len = name##_len;			\
698 	name##_len = strlen( value );					\
699 	if ( len && name ) {						\
700 		if ( copy_to_user( name, value, len ) )			\
701 			return -EFAULT;					\
702 	}
703 
704 	version.version_major = DRIVER_MAJOR;
705 	version.version_minor = DRIVER_MINOR;
706 	version.version_patchlevel = DRIVER_PATCHLEVEL;
707 
708 	DRM_COPY( version.name, DRIVER_NAME );
709 	DRM_COPY( version.date, DRIVER_DATE );
710 	DRM_COPY( version.desc, DRIVER_DESC );
711 
712 	if ( copy_to_user( (drm_version_t *)arg,
713 			   &version,
714 			   sizeof(version) ) )
715 		return -EFAULT;
716 	return 0;
717 }
718 
DRM(open)719 int DRM(open)( struct inode *inode, struct file *filp )
720 {
721 	drm_device_t *dev = NULL;
722 	int retcode = 0;
723 	int i;
724 
725 	for (i = 0; i < DRM(numdevs); i++) {
726 		if (minor(inode->i_rdev) == DRM(minor)[i]) {
727 			dev = &(DRM(device)[i]);
728 			break;
729 		}
730 	}
731 	if (!dev) {
732 		return -ENODEV;
733 	}
734 
735 	DRM_DEBUG( "open_count = %d\n", dev->open_count );
736 
737 	retcode = DRM(open_helper)( inode, filp, dev );
738 	if ( !retcode ) {
739 		atomic_inc( &dev->counts[_DRM_STAT_OPENS] );
740 		spin_lock( &dev->count_lock );
741 		if ( !dev->open_count++ ) {
742 			spin_unlock( &dev->count_lock );
743 			return DRM(setup)( dev );
744 		}
745 		spin_unlock( &dev->count_lock );
746 	}
747 
748 	return retcode;
749 }
750 
DRM(release)751 int DRM(release)( struct inode *inode, struct file *filp )
752 {
753 	drm_file_t *priv = filp->private_data;
754 	drm_device_t *dev;
755 	int retcode = 0;
756 
757 	lock_kernel();
758 	dev = priv->dev;
759 
760 	DRM_DEBUG( "open_count = %d\n", dev->open_count );
761 
762 	DRIVER_PRERELEASE();
763 
764 	/* ========================================================
765 	 * Begin inline drm_release
766 	 */
767 
768 	DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n",
769 		   current->pid, (long)dev->device, dev->open_count );
770 
771 	if ( dev->lock.hw_lock &&
772 	     _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
773 	     dev->lock.pid == current->pid ) {
774 		DRM_DEBUG( "Process %d dead, freeing lock for context %d\n",
775 			   current->pid,
776 			   _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
777 #if __HAVE_RELEASE
778 		DRIVER_RELEASE();
779 #endif
780 		DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
781 				_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) );
782 
783 				/* FIXME: may require heavy-handed reset of
784                                    hardware at this point, possibly
785                                    processed via a callback to the X
786                                    server. */
787 	}
788 #if __HAVE_RELEASE
789 	else if ( dev->lock.hw_lock ) {
790 		/* The lock is required to reclaim buffers */
791 		DECLARE_WAITQUEUE( entry, current );
792 		add_wait_queue( &dev->lock.lock_queue, &entry );
793 		for (;;) {
794 			current->state = TASK_INTERRUPTIBLE;
795 			if ( !dev->lock.hw_lock ) {
796 				/* Device has been unregistered */
797 				retcode = -EINTR;
798 				break;
799 			}
800 			if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
801 					     DRM_KERNEL_CONTEXT ) ) {
802 				dev->lock.pid	    = priv->pid;
803 				dev->lock.lock_time = jiffies;
804                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
805 				break;	/* Got lock */
806 			}
807 				/* Contention */
808 #if 0
809 			atomic_inc( &dev->total_sleeps );
810 #endif
811 			schedule();
812 			if ( signal_pending( current ) ) {
813 				retcode = -ERESTARTSYS;
814 				break;
815 			}
816 		}
817 		current->state = TASK_RUNNING;
818 		remove_wait_queue( &dev->lock.lock_queue, &entry );
819 		if( !retcode ) {
820 			DRIVER_RELEASE();
821 			DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
822 					DRM_KERNEL_CONTEXT );
823 		}
824 	}
825 #elif __HAVE_DMA
826 	DRM(reclaim_buffers)( dev, priv->pid );
827 #endif
828 
829 	DRM(fasync)( -1, filp, 0 );
830 
831 	down( &dev->struct_sem );
832 	if ( priv->remove_auth_on_close == 1 ) {
833 		drm_file_t *temp = dev->file_first;
834 		while ( temp ) {
835 			temp->authenticated = 0;
836 			temp = temp->next;
837 		}
838 	}
839 	if ( priv->prev ) {
840 		priv->prev->next = priv->next;
841 	} else {
842 		dev->file_first	 = priv->next;
843 	}
844 	if ( priv->next ) {
845 		priv->next->prev = priv->prev;
846 	} else {
847 		dev->file_last	 = priv->prev;
848 	}
849 	up( &dev->struct_sem );
850 
851 	DRM(free)( priv, sizeof(*priv), DRM_MEM_FILES );
852 
853 	/* ========================================================
854 	 * End inline drm_release
855 	 */
856 
857 	atomic_inc( &dev->counts[_DRM_STAT_CLOSES] );
858 	spin_lock( &dev->count_lock );
859 	if ( !--dev->open_count ) {
860 		if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) {
861 			DRM_ERROR( "Device busy: %d %d\n",
862 				   atomic_read( &dev->ioctl_count ),
863 				   dev->blocked );
864 			spin_unlock( &dev->count_lock );
865 			unlock_kernel();
866 			return -EBUSY;
867 		}
868 		spin_unlock( &dev->count_lock );
869 		unlock_kernel();
870 		return DRM(takedown)( dev );
871 	}
872 	spin_unlock( &dev->count_lock );
873 
874 	unlock_kernel();
875 	return retcode;
876 }
877 
878 /* DRM(ioctl) is called whenever a process performs an ioctl on /dev/drm.
879  */
DRM(ioctl)880 int DRM(ioctl)( struct inode *inode, struct file *filp,
881 		unsigned int cmd, unsigned long arg )
882 {
883 	drm_file_t *priv = filp->private_data;
884 	drm_device_t *dev = priv->dev;
885 	drm_ioctl_desc_t *ioctl;
886 	drm_ioctl_t *func;
887 	int nr = DRM_IOCTL_NR(cmd);
888 	int retcode = 0;
889 
890 	atomic_inc( &dev->ioctl_count );
891 	atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
892 	++priv->ioctl_count;
893 
894 	DRM_DEBUG( "pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
895 		   current->pid, cmd, nr, (long)dev->device,
896 		   priv->authenticated );
897 
898 	if ( nr >= DRIVER_IOCTL_COUNT ) {
899 		retcode = -EINVAL;
900 	} else {
901 		ioctl = &DRM(ioctls)[nr];
902 		func = ioctl->func;
903 
904 		if ( !func ) {
905 			DRM_DEBUG( "no function\n" );
906 			retcode = -EINVAL;
907 		} else if ( ( ioctl->root_only && !capable( CAP_SYS_ADMIN ) )||
908 			    ( ioctl->auth_needed && !priv->authenticated ) ) {
909 			retcode = -EACCES;
910 		} else {
911 			retcode = func( inode, filp, cmd, arg );
912 		}
913 	}
914 
915 	atomic_dec( &dev->ioctl_count );
916 	return retcode;
917 }
918 
DRM(lock)919 int DRM(lock)( struct inode *inode, struct file *filp,
920 	       unsigned int cmd, unsigned long arg )
921 {
922         drm_file_t *priv = filp->private_data;
923         drm_device_t *dev = priv->dev;
924         DECLARE_WAITQUEUE( entry, current );
925         drm_lock_t lock;
926         int ret = 0;
927 #if __HAVE_MULTIPLE_DMA_QUEUES
928 	drm_queue_t *q;
929 #endif
930 #if __HAVE_DMA_HISTOGRAM
931         cycles_t start;
932 
933         dev->lck_start = start = get_cycles();
934 #endif
935 
936         if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
937 		return -EFAULT;
938 
939         if ( lock.context == DRM_KERNEL_CONTEXT ) {
940                 DRM_ERROR( "Process %d using kernel context %d\n",
941 			   current->pid, lock.context );
942                 return -EINVAL;
943         }
944 
945         DRM_DEBUG( "%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
946 		   lock.context, current->pid,
947 		   dev->lock.hw_lock->lock, lock.flags );
948 
949 #if __HAVE_DMA_QUEUE
950         if ( lock.context < 0 )
951                 return -EINVAL;
952 #elif __HAVE_MULTIPLE_DMA_QUEUES
953         if ( lock.context < 0 || lock.context >= dev->queue_count )
954                 return -EINVAL;
955 	q = dev->queuelist[lock.context];
956 #endif
957 
958 #if __HAVE_DMA_FLUSH
959 	ret = DRM(flush_block_and_flush)( dev, lock.context, lock.flags );
960 #endif
961         if ( !ret ) {
962                 add_wait_queue( &dev->lock.lock_queue, &entry );
963                 for (;;) {
964                         current->state = TASK_INTERRUPTIBLE;
965                         if ( !dev->lock.hw_lock ) {
966                                 /* Device has been unregistered */
967                                 ret = -EINTR;
968                                 break;
969                         }
970                         if ( DRM(lock_take)( &dev->lock.hw_lock->lock,
971 					     lock.context ) ) {
972                                 dev->lock.pid       = current->pid;
973                                 dev->lock.lock_time = jiffies;
974                                 atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
975                                 break;  /* Got lock */
976                         }
977 
978                                 /* Contention */
979                         schedule();
980                         if ( signal_pending( current ) ) {
981                                 ret = -ERESTARTSYS;
982                                 break;
983                         }
984                 }
985                 current->state = TASK_RUNNING;
986                 remove_wait_queue( &dev->lock.lock_queue, &entry );
987         }
988 
989 #if __HAVE_DMA_FLUSH
990 	DRM(flush_unblock)( dev, lock.context, lock.flags ); /* cleanup phase */
991 #endif
992 
993         if ( !ret ) {
994 		sigemptyset( &dev->sigmask );
995 		sigaddset( &dev->sigmask, SIGSTOP );
996 		sigaddset( &dev->sigmask, SIGTSTP );
997 		sigaddset( &dev->sigmask, SIGTTIN );
998 		sigaddset( &dev->sigmask, SIGTTOU );
999 		dev->sigdata.context = lock.context;
1000 		dev->sigdata.lock    = dev->lock.hw_lock;
1001 		block_all_signals( DRM(notifier),
1002 				   &dev->sigdata, &dev->sigmask );
1003 
1004 #if __HAVE_DMA_READY
1005                 if ( lock.flags & _DRM_LOCK_READY ) {
1006 			DRIVER_DMA_READY();
1007 		}
1008 #endif
1009 #if __HAVE_DMA_QUIESCENT
1010                 if ( lock.flags & _DRM_LOCK_QUIESCENT ) {
1011 			DRIVER_DMA_QUIESCENT();
1012 		}
1013 #endif
1014 #if __HAVE_KERNEL_CTX_SWITCH
1015 		if ( dev->last_context != lock.context ) {
1016 			DRM(context_switch)(dev, dev->last_context,
1017 					    lock.context);
1018 		}
1019 #endif
1020         }
1021 
1022         DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
1023 
1024 #if __HAVE_DMA_HISTOGRAM
1025         atomic_inc(&dev->histo.lacq[DRM(histogram_slot)(get_cycles()-start)]);
1026 #endif
1027         return ret;
1028 }
1029 
1030 
DRM(unlock)1031 int DRM(unlock)( struct inode *inode, struct file *filp,
1032 		 unsigned int cmd, unsigned long arg )
1033 {
1034 	drm_file_t *priv = filp->private_data;
1035 	drm_device_t *dev = priv->dev;
1036 	drm_lock_t lock;
1037 
1038 	if ( copy_from_user( &lock, (drm_lock_t *)arg, sizeof(lock) ) )
1039 		return -EFAULT;
1040 
1041 	if ( lock.context == DRM_KERNEL_CONTEXT ) {
1042 		DRM_ERROR( "Process %d using kernel context %d\n",
1043 			   current->pid, lock.context );
1044 		return -EINVAL;
1045 	}
1046 
1047 	atomic_inc( &dev->counts[_DRM_STAT_UNLOCKS] );
1048 
1049 #if __HAVE_KERNEL_CTX_SWITCH
1050 	/* We no longer really hold it, but if we are the next
1051 	 * agent to request it then we should just be able to
1052 	 * take it immediately and not eat the ioctl.
1053 	 */
1054 	dev->lock.pid = 0;
1055 	{
1056 		__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
1057 		unsigned int old, new, prev, ctx;
1058 
1059 		ctx = lock.context;
1060 		do {
1061 			old  = *plock;
1062 			new  = ctx;
1063 			prev = cmpxchg(plock, old, new);
1064 		} while (prev != old);
1065 	}
1066 	wake_up_interruptible(&dev->lock.lock_queue);
1067 #else
1068 	DRM(lock_transfer)( dev, &dev->lock.hw_lock->lock,
1069 			    DRM_KERNEL_CONTEXT );
1070 #if __HAVE_DMA_SCHEDULE
1071 	DRM(dma_schedule)( dev, 1 );
1072 #endif
1073 
1074 	/* FIXME: Do we ever really need to check this???
1075 	 */
1076 	if ( 1 /* !dev->context_flag */ ) {
1077 		if ( DRM(lock_free)( dev, &dev->lock.hw_lock->lock,
1078 				     DRM_KERNEL_CONTEXT ) ) {
1079 			DRM_ERROR( "\n" );
1080 		}
1081 	}
1082 #endif /* !__HAVE_KERNEL_CTX_SWITCH */
1083 
1084 	unblock_all_signals();
1085 	return 0;
1086 }
1087