1 /* $Id: ffb_drv.c,v 1.14 2001/05/24 12:01:47 davem Exp $
2  * ffb_drv.c: Creator/Creator3D direct rendering driver.
3  *
4  * Copyright (C) 2000 David S. Miller (davem@redhat.com)
5  */
6 
7 #include "drmP.h"
8 
9 #include <linux/sched.h>
10 #include <linux/smp_lock.h>
11 #include <asm/shmparam.h>
12 #include <asm/oplib.h>
13 #include <asm/upa.h>
14 
15 #include "ffb_drv.h"
16 
17 #define FFB_NAME	"ffb"
18 #define FFB_DESC	"Creator/Creator3D"
19 #define FFB_DATE	"20000517"
20 #define FFB_MAJOR	0
21 #define FFB_MINOR	0
22 #define FFB_PATCHLEVEL	1
23 
24 /* Forward declarations. */
25 int  ffb_init(void);
26 void ffb_cleanup(void);
27 static int  ffb_version(struct inode *inode, struct file *filp,
28 			unsigned int cmd, unsigned long arg);
29 static int  ffb_open(struct inode *inode, struct file *filp);
30 static int  ffb_release(struct inode *inode, struct file *filp);
31 static int  ffb_ioctl(struct inode *inode, struct file *filp,
32 		      unsigned int cmd, unsigned long arg);
33 static int  ffb_lock(struct inode *inode, struct file *filp,
34 		     unsigned int cmd, unsigned long arg);
35 static int  ffb_unlock(struct inode *inode, struct file *filp,
36 		       unsigned int cmd, unsigned long arg);
37 static int ffb_mmap(struct file *filp, struct vm_area_struct *vma);
38 static unsigned long ffb_get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
39 
40 /* From ffb_context.c */
41 extern int ffb_resctx(struct inode *, struct file *, unsigned int, unsigned long);
42 extern int ffb_addctx(struct inode *, struct file *, unsigned int, unsigned long);
43 extern int ffb_modctx(struct inode *, struct file *, unsigned int, unsigned long);
44 extern int ffb_getctx(struct inode *, struct file *, unsigned int, unsigned long);
45 extern int ffb_switchctx(struct inode *, struct file *, unsigned int, unsigned long);
46 extern int ffb_newctx(struct inode *, struct file *, unsigned int, unsigned long);
47 extern int ffb_rmctx(struct inode *, struct file *, unsigned int, unsigned long);
48 extern int ffb_context_switch(drm_device_t *, int, int);
49 
50 static struct file_operations ffb_fops = {
51 	owner:			THIS_MODULE,
52 	open:			ffb_open,
53 	flush:			drm_flush,
54 	release:		ffb_release,
55 	ioctl:			ffb_ioctl,
56 	mmap:			ffb_mmap,
57 	read:			drm_read,
58 	fasync:			drm_fasync,
59 	poll:			drm_poll,
60 	get_unmapped_area:	ffb_get_unmapped_area,
61 };
62 
63 /* This is just a template, we make a new copy for each FFB
64  * we discover at init time so that each one gets a unique
65  * misc device minor number.
66  */
67 static struct miscdevice ffb_misc = {
68 	minor:	MISC_DYNAMIC_MINOR,
69 	name:	FFB_NAME,
70 	fops:	&ffb_fops,
71 };
72 
73 static drm_ioctl_desc_t ffb_ioctls[] = {
74 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]    = { ffb_version,	  0, 0 },
75 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique,	  0, 0 },
76 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]  = { drm_getmagic,	  0, 0 },
77 	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]  = { drm_irq_busid,	  0, 1 }, /* XXX */
78 
79 	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique,	  1, 1 },
80 	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	     = { drm_block,	  1, 1 },
81 	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]    = { drm_unblock,	  1, 1 },
82 	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic,	  1, 1 },
83 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]    = { drm_addmap,	  1, 1 },
84 
85 	/* The implementation is currently a nop just like on tdfx.
86 	 * Later we can do something more clever. -DaveM
87 	 */
88 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]    = { ffb_addctx,	  1, 1 },
89 	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]     = { ffb_rmctx,	  1, 1 },
90 	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]    = { ffb_modctx,	  1, 1 },
91 	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]    = { ffb_getctx,	  1, 0 },
92 	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { ffb_switchctx,	  1, 1 },
93 	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]    = { ffb_newctx,	  1, 1 },
94 	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]    = { ffb_resctx,	  1, 0 },
95 
96 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]   = { drm_adddraw,	  1, 1 },
97 	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]    = { drm_rmdraw,	  1, 1 },
98 
99 	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	     = { ffb_lock,	  1, 0 },
100 	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]     = { ffb_unlock,	  1, 0 },
101 	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]     = { drm_finish,	  1, 0 },
102 };
103 #define FFB_IOCTL_COUNT DRM_ARRAY_SIZE(ffb_ioctls)
104 
105 #ifdef MODULE
106 static char *ffb = NULL;
107 #endif
108 
109 MODULE_AUTHOR("David S. Miller (davem@redhat.com)");
110 MODULE_DESCRIPTION("Sun Creator/Creator3D DRI");
111 MODULE_LICENSE("GPL");
112 
ffb_takedown(drm_device_t * dev)113 static int ffb_takedown(drm_device_t *dev)
114 {
115 	int		  i;
116 	drm_magic_entry_t *pt, *next;
117 	drm_map_t	  *map;
118 	drm_vma_entry_t	  *vma, *vma_next;
119 
120 	DRM_DEBUG("\n");
121 
122 	down(&dev->struct_sem);
123 	del_timer(&dev->timer);
124 
125 	if (dev->devname) {
126 		drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
127 		dev->devname = NULL;
128 	}
129 
130 	if (dev->unique) {
131 		drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
132 		dev->unique = NULL;
133 		dev->unique_len = 0;
134 	}
135 
136 	/* Clear pid list */
137 	for (i = 0; i < DRM_HASH_SIZE; i++) {
138 		for (pt = dev->magiclist[i].head; pt; pt = next) {
139 			next = pt->next;
140 			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
141 		}
142 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
143 	}
144 
145 	/* Clear vma list (only built for debugging) */
146 	if (dev->vmalist) {
147 		for (vma = dev->vmalist; vma; vma = vma_next) {
148 			vma_next = vma->next;
149 			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
150 		}
151 		dev->vmalist = NULL;
152 	}
153 
154 	/* Clear map area information */
155 	if (dev->maplist) {
156 		for (i = 0; i < dev->map_count; i++) {
157 			map = dev->maplist[i];
158 			switch (map->type) {
159 			case _DRM_REGISTERS:
160 			case _DRM_FRAME_BUFFER:
161 				drm_ioremapfree(map->handle, map->size, dev);
162 				break;
163 
164 			case _DRM_SHM:
165 				drm_free_pages((unsigned long)map->handle,
166 					       drm_order(map->size)
167 					       - PAGE_SHIFT,
168 					       DRM_MEM_SAREA);
169 				break;
170 
171 			default:
172 				break;
173 			};
174 
175 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
176 		}
177 
178 		drm_free(dev->maplist,
179 			 dev->map_count * sizeof(*dev->maplist),
180 			 DRM_MEM_MAPS);
181 		dev->maplist   = NULL;
182 		dev->map_count = 0;
183 	}
184 
185 	if (dev->lock.hw_lock) {
186 		dev->lock.hw_lock    = NULL; /* SHM removed */
187 		dev->lock.pid	     = 0;
188 		wake_up_interruptible(&dev->lock.lock_queue);
189 	}
190 	up(&dev->struct_sem);
191 
192 	return 0;
193 }
194 
195 drm_device_t **ffb_dev_table;
196 static int ffb_dev_table_size;
197 
get_ffb_type(ffb_dev_priv_t * ffb_priv,int instance)198 static void get_ffb_type(ffb_dev_priv_t *ffb_priv, int instance)
199 {
200 	volatile unsigned char *strap_bits;
201 	unsigned char val;
202 
203 	strap_bits = (volatile unsigned char *)
204 		(ffb_priv->card_phys_base + 0x00200000UL);
205 
206 	/* Don't ask, you have to read the value twice for whatever
207 	 * reason to get correct contents.
208 	 */
209 	val = upa_readb(strap_bits);
210 	val = upa_readb(strap_bits);
211 	switch (val & 0x78) {
212 	case (0x0 << 5) | (0x0 << 3):
213 		ffb_priv->ffb_type = ffb1_prototype;
214 		printk("ffb%d: Detected FFB1 pre-FCS prototype\n", instance);
215 		break;
216 	case (0x0 << 5) | (0x1 << 3):
217 		ffb_priv->ffb_type = ffb1_standard;
218 		printk("ffb%d: Detected FFB1\n", instance);
219 		break;
220 	case (0x0 << 5) | (0x3 << 3):
221 		ffb_priv->ffb_type = ffb1_speedsort;
222 		printk("ffb%d: Detected FFB1-SpeedSort\n", instance);
223 		break;
224 	case (0x1 << 5) | (0x0 << 3):
225 		ffb_priv->ffb_type = ffb2_prototype;
226 		printk("ffb%d: Detected FFB2/vertical pre-FCS prototype\n", instance);
227 		break;
228 	case (0x1 << 5) | (0x1 << 3):
229 		ffb_priv->ffb_type = ffb2_vertical;
230 		printk("ffb%d: Detected FFB2/vertical\n", instance);
231 		break;
232 	case (0x1 << 5) | (0x2 << 3):
233 		ffb_priv->ffb_type = ffb2_vertical_plus;
234 		printk("ffb%d: Detected FFB2+/vertical\n", instance);
235 		break;
236 	case (0x2 << 5) | (0x0 << 3):
237 		ffb_priv->ffb_type = ffb2_horizontal;
238 		printk("ffb%d: Detected FFB2/horizontal\n", instance);
239 		break;
240 	case (0x2 << 5) | (0x2 << 3):
241 		ffb_priv->ffb_type = ffb2_horizontal;
242 		printk("ffb%d: Detected FFB2+/horizontal\n", instance);
243 		break;
244 	default:
245 		ffb_priv->ffb_type = ffb2_vertical;
246 		printk("ffb%d: Unknown boardID[%08x], assuming FFB2\n", instance, val);
247 		break;
248 	};
249 }
250 
ffb_apply_upa_parent_ranges(int parent,struct linux_prom64_registers * regs)251 static void __init ffb_apply_upa_parent_ranges(int parent, struct linux_prom64_registers *regs)
252 {
253 	struct linux_prom64_ranges ranges[PROMREG_MAX];
254 	char name[128];
255 	int len, i;
256 
257 	prom_getproperty(parent, "name", name, sizeof(name));
258 	if (strcmp(name, "upa") != 0)
259 		return;
260 
261 	len = prom_getproperty(parent, "ranges", (void *) ranges, sizeof(ranges));
262 	if (len <= 0)
263 		return;
264 
265 	len /= sizeof(struct linux_prom64_ranges);
266 	for (i = 0; i < len; i++) {
267 		struct linux_prom64_ranges *rng = &ranges[i];
268 		u64 phys_addr = regs->phys_addr;
269 
270 		if (phys_addr >= rng->ot_child_base &&
271 		    phys_addr < (rng->ot_child_base + rng->or_size)) {
272 			regs->phys_addr -= rng->ot_child_base;
273 			regs->phys_addr += rng->ot_parent_base;
274 			return;
275 		}
276 	}
277 
278 	return;
279 }
280 
ffb_init_one(int prom_node,int parent_node,int instance)281 static int __init ffb_init_one(int prom_node, int parent_node, int instance)
282 {
283 	struct linux_prom64_registers regs[2*PROMREG_MAX];
284 	drm_device_t *dev;
285 	ffb_dev_priv_t *ffb_priv;
286 	int ret, i;
287 
288 	dev = kmalloc(sizeof(drm_device_t) + sizeof(ffb_dev_priv_t), GFP_KERNEL);
289 	if (!dev)
290 		return -ENOMEM;
291 
292 	memset(dev, 0, sizeof(*dev));
293 	spin_lock_init(&dev->count_lock);
294 	sema_init(&dev->struct_sem, 1);
295 
296 	ffb_priv = (ffb_dev_priv_t *) (dev + 1);
297 	ffb_priv->prom_node = prom_node;
298 	if (prom_getproperty(ffb_priv->prom_node, "reg",
299 			     (void *)regs, sizeof(regs)) <= 0) {
300 		kfree(dev);
301 		return -EINVAL;
302 	}
303 	ffb_apply_upa_parent_ranges(parent_node, &regs[0]);
304 	ffb_priv->card_phys_base = regs[0].phys_addr;
305 	ffb_priv->regs = (ffb_fbcPtr)
306 		(regs[0].phys_addr + 0x00600000UL);
307 	get_ffb_type(ffb_priv, instance);
308 	for (i = 0; i < FFB_MAX_CTXS; i++)
309 		ffb_priv->hw_state[i] = NULL;
310 
311 	ffb_dev_table[instance] = dev;
312 
313 #ifdef MODULE
314 	drm_parse_options(ffb);
315 #endif
316 
317 	memcpy(&ffb_priv->miscdev, &ffb_misc, sizeof(ffb_misc));
318 	ret = misc_register(&ffb_priv->miscdev);
319 	if (ret) {
320 		ffb_dev_table[instance] = NULL;
321 		kfree(dev);
322 		return ret;
323 	}
324 
325 	dev->device = MKDEV(MISC_MAJOR, ffb_priv->miscdev.minor);
326 	dev->name = FFB_NAME;
327 
328 	drm_mem_init();
329 	drm_proc_init(dev);
330 
331 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d at %016lx\n",
332 		 FFB_NAME,
333 		 FFB_MAJOR,
334 		 FFB_MINOR,
335 		 FFB_PATCHLEVEL,
336 		 FFB_DATE,
337 		 ffb_priv->miscdev.minor,
338 		 ffb_priv->card_phys_base);
339 
340 	return 0;
341 }
342 
ffb_count_siblings(int root)343 static int __init ffb_count_siblings(int root)
344 {
345 	int node, child, count = 0;
346 
347 	child = prom_getchild(root);
348 	for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
349 	     node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb"))
350 		count++;
351 
352 	return count;
353 }
354 
ffb_init_dev_table(void)355 static int __init ffb_init_dev_table(void)
356 {
357 	int root, total;
358 
359 	total = ffb_count_siblings(prom_root_node);
360 	root = prom_getchild(prom_root_node);
361 	for (root = prom_searchsiblings(root, "upa"); root;
362 	     root = prom_searchsiblings(prom_getsibling(root), "upa"))
363 		total += ffb_count_siblings(root);
364 
365 	if (!total)
366 		return -ENODEV;
367 
368 	ffb_dev_table = kmalloc(sizeof(drm_device_t *) * total, GFP_KERNEL);
369 	if (!ffb_dev_table)
370 		return -ENOMEM;
371 
372 	ffb_dev_table_size = total;
373 
374 	return 0;
375 }
376 
ffb_scan_siblings(int root,int instance)377 static int __init ffb_scan_siblings(int root, int instance)
378 {
379 	int node, child;
380 
381 	child = prom_getchild(root);
382 	for (node = prom_searchsiblings(child, "SUNW,ffb"); node;
383 	     node = prom_searchsiblings(prom_getsibling(node), "SUNW,ffb")) {
384 		ffb_init_one(node, root, instance);
385 		instance++;
386 	}
387 
388 	return instance;
389 }
390 
ffb_init(void)391 int __init ffb_init(void)
392 {
393 	int root, instance, ret;
394 
395 	ret = ffb_init_dev_table();
396 	if (ret)
397 		return ret;
398 
399 	instance = ffb_scan_siblings(prom_root_node, 0);
400 
401 	root = prom_getchild(prom_root_node);
402 	for (root = prom_searchsiblings(root, "upa"); root;
403 	     root = prom_searchsiblings(prom_getsibling(root), "upa"))
404 		instance = ffb_scan_siblings(root, instance);
405 
406 	return 0;
407 }
408 
ffb_cleanup(void)409 void __exit ffb_cleanup(void)
410 {
411 	int instance;
412 
413 	DRM_DEBUG("\n");
414 
415 	drm_proc_cleanup();
416 	for (instance = 0; instance < ffb_dev_table_size; instance++) {
417 		drm_device_t *dev = ffb_dev_table[instance];
418 		ffb_dev_priv_t *ffb_priv;
419 
420 		if (!dev)
421 			continue;
422 
423 		ffb_priv = (ffb_dev_priv_t *) (dev + 1);
424 		if (misc_deregister(&ffb_priv->miscdev)) {
425 			DRM_ERROR("Cannot unload module\n");
426 		} else {
427 			DRM_INFO("Module unloaded\n");
428 		}
429 		ffb_takedown(dev);
430 		kfree(dev);
431 		ffb_dev_table[instance] = NULL;
432 	}
433 	kfree(ffb_dev_table);
434 	ffb_dev_table = NULL;
435 	ffb_dev_table_size = 0;
436 }
437 
ffb_version(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)438 static int ffb_version(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
439 {
440 	drm_version_t version;
441 	int len, ret;
442 
443 	ret = copy_from_user(&version, (drm_version_t *)arg, sizeof(version));
444 	if (ret)
445 		return -EFAULT;
446 
447 	version.version_major		= FFB_MAJOR;
448 	version.version_minor		= FFB_MINOR;
449 	version.version_patchlevel	= FFB_PATCHLEVEL;
450 
451 	len = strlen(FFB_NAME);
452 	if (len > version.name_len)
453 		len = version.name_len;
454 	version.name_len = len;
455 	if (len && version.name) {
456 		ret = copy_to_user(version.name, FFB_NAME, len);
457 		if (ret)
458 			return -EFAULT;
459 	}
460 
461 	len = strlen(FFB_DATE);
462 	if (len > version.date_len)
463 		len = version.date_len;
464 	version.date_len = len;
465 	if (len && version.date) {
466 		ret = copy_to_user(version.date, FFB_DATE, len);
467 		if (ret)
468 			return -EFAULT;
469 	}
470 
471 	len = strlen(FFB_DESC);
472 	if (len > version.desc_len)
473 		len = version.desc_len;
474 	version.desc_len = len;
475 	if (len && version.desc) {
476 		ret = copy_to_user(version.desc, FFB_DESC, len);
477 		if (ret)
478 			return -EFAULT;
479 	}
480 
481 	ret = copy_to_user((drm_version_t *) arg, &version, sizeof(version));
482 	if (ret)
483 		ret = -EFAULT;
484 
485 	return ret;
486 }
487 
ffb_setup(drm_device_t * dev)488 static int ffb_setup(drm_device_t *dev)
489 {
490 	int i;
491 
492 	atomic_set(&dev->ioctl_count, 0);
493 	atomic_set(&dev->vma_count, 0);
494 	dev->buf_use = 0;
495 	atomic_set(&dev->buf_alloc, 0);
496 
497 	atomic_set(&dev->total_open, 0);
498 	atomic_set(&dev->total_close, 0);
499 	atomic_set(&dev->total_ioctl, 0);
500 	atomic_set(&dev->total_irq, 0);
501 	atomic_set(&dev->total_ctx, 0);
502 	atomic_set(&dev->total_locks, 0);
503 	atomic_set(&dev->total_unlocks, 0);
504 	atomic_set(&dev->total_contends, 0);
505 	atomic_set(&dev->total_sleeps, 0);
506 
507 	for (i = 0; i < DRM_HASH_SIZE; i++) {
508 		dev->magiclist[i].head = NULL;
509 		dev->magiclist[i].tail = NULL;
510 	}
511 
512 	dev->maplist	    = NULL;
513 	dev->map_count	    = 0;
514 	dev->vmalist	    = NULL;
515 	dev->lock.hw_lock   = NULL;
516 	init_waitqueue_head(&dev->lock.lock_queue);
517 	dev->queue_count    = 0;
518 	dev->queue_reserved = 0;
519 	dev->queue_slots    = 0;
520 	dev->queuelist	    = NULL;
521 	dev->irq	    = 0;
522 	dev->context_flag   = 0;
523 	dev->interrupt_flag = 0;
524 	dev->dma            = 0;
525 	dev->dma_flag	    = 0;
526 	dev->last_context   = 0;
527 	dev->last_switch    = 0;
528 	dev->last_checked   = 0;
529 	init_timer(&dev->timer);
530 	init_waitqueue_head(&dev->context_wait);
531 
532 	dev->ctx_start	    = 0;
533 	dev->lck_start	    = 0;
534 
535 	dev->buf_rp	  = dev->buf;
536 	dev->buf_wp	  = dev->buf;
537 	dev->buf_end	  = dev->buf + DRM_BSZ;
538 	dev->buf_async	  = NULL;
539 	init_waitqueue_head(&dev->buf_readers);
540 	init_waitqueue_head(&dev->buf_writers);
541 
542 	return 0;
543 }
544 
ffb_open(struct inode * inode,struct file * filp)545 static int ffb_open(struct inode *inode, struct file *filp)
546 {
547 	drm_device_t *dev;
548 	int minor, i;
549 	int ret = 0;
550 
551 	minor = MINOR(inode->i_rdev);
552 	for (i = 0; i < ffb_dev_table_size; i++) {
553 		ffb_dev_priv_t *ffb_priv;
554 
555 		ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);
556 
557 		if (ffb_priv->miscdev.minor == minor)
558 			break;
559 	}
560 
561 	if (i >= ffb_dev_table_size)
562 		return -EINVAL;
563 
564 	dev = ffb_dev_table[i];
565 	if (!dev)
566 		return -EINVAL;
567 
568 	DRM_DEBUG("open_count = %d\n", dev->open_count);
569 	ret = drm_open_helper(inode, filp, dev);
570 	if (!ret) {
571 		atomic_inc(&dev->total_open);
572 		spin_lock(&dev->count_lock);
573 		if (!dev->open_count++) {
574 			spin_unlock(&dev->count_lock);
575 			return ffb_setup(dev);
576 		}
577 		spin_unlock(&dev->count_lock);
578 	}
579 
580 	return ret;
581 }
582 
ffb_release(struct inode * inode,struct file * filp)583 static int ffb_release(struct inode *inode, struct file *filp)
584 {
585 	drm_file_t *priv = filp->private_data;
586 	drm_device_t *dev;
587 	int ret = 0;
588 
589 	lock_kernel();
590 	dev = priv->dev;
591 	DRM_DEBUG("open_count = %d\n", dev->open_count);
592 	if (dev->lock.hw_lock != NULL
593 	    && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
594 	    && dev->lock.pid == current->pid) {
595 		ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) (dev + 1);
596 		int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
597 		int idx;
598 
599 		/* We have to free up the rogue hw context state
600 		 * holding error or else we will leak it.
601 		 */
602 		idx = context - 1;
603 		if (fpriv->hw_state[idx] != NULL) {
604 			kfree(fpriv->hw_state[idx]);
605 			fpriv->hw_state[idx] = NULL;
606 		}
607 	}
608 
609 	ret = drm_release(inode, filp);
610 
611 	if (!ret) {
612 		atomic_inc(&dev->total_close);
613 		spin_lock(&dev->count_lock);
614 		if (!--dev->open_count) {
615 			if (atomic_read(&dev->ioctl_count) || dev->blocked) {
616 				DRM_ERROR("Device busy: %d %d\n",
617 					  atomic_read(&dev->ioctl_count),
618 					  dev->blocked);
619 				spin_unlock(&dev->count_lock);
620 				unlock_kernel();
621 				return -EBUSY;
622 			}
623 			spin_unlock(&dev->count_lock);
624 			ret = ffb_takedown(dev);
625 			unlock_kernel();
626 			return ret;
627 		}
628 		spin_unlock(&dev->count_lock);
629 	}
630 
631 	unlock_kernel();
632 	return ret;
633 }
634 
ffb_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)635 static int ffb_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
636 {
637 	int		 nr	 = DRM_IOCTL_NR(cmd);
638 	drm_file_t	 *priv	 = filp->private_data;
639 	drm_device_t	 *dev	 = priv->dev;
640 	drm_ioctl_desc_t *ioctl;
641 	drm_ioctl_t	 *func;
642 	int		 ret;
643 
644 	atomic_inc(&dev->ioctl_count);
645 	atomic_inc(&dev->total_ioctl);
646 	++priv->ioctl_count;
647 
648 	DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
649 		  current->pid, cmd, nr, dev->device, priv->authenticated);
650 
651 	if (nr >= FFB_IOCTL_COUNT) {
652 		ret = -EINVAL;
653 	} else {
654 		ioctl	  = &ffb_ioctls[nr];
655 		func	  = ioctl->func;
656 
657 		if (!func) {
658 			DRM_DEBUG("no function\n");
659 			ret = -EINVAL;
660 		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
661 			    || (ioctl->auth_needed && !priv->authenticated)) {
662 			ret = -EACCES;
663 		} else {
664 			ret = (func)(inode, filp, cmd, arg);
665 		}
666 	}
667 
668 	atomic_dec(&dev->ioctl_count);
669 
670 	return ret;
671 }
672 
ffb_lock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)673 static int ffb_lock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
674 {
675         drm_file_t        *priv	= filp->private_data;
676         drm_device_t      *dev	= priv->dev;
677         DECLARE_WAITQUEUE(entry, current);
678         int               ret	= 0;
679         drm_lock_t        lock;
680 
681 	ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));
682 	if (ret)
683 		return -EFAULT;
684 
685         if (lock.context == DRM_KERNEL_CONTEXT) {
686                 DRM_ERROR("Process %d using kernel context %d\n",
687                           current->pid, lock.context);
688                 return -EINVAL;
689         }
690 
691         DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
692                   lock.context, current->pid, dev->lock.hw_lock->lock,
693                   lock.flags);
694 
695 	add_wait_queue(&dev->lock.lock_queue, &entry);
696 	for (;;) {
697 		if (!dev->lock.hw_lock) {
698 			/* Device has been unregistered */
699 			ret = -EINTR;
700 			break;
701 		}
702 		if (drm_lock_take(&dev->lock.hw_lock->lock,
703 				  lock.context)) {
704 			dev->lock.pid       = current->pid;
705 			dev->lock.lock_time = jiffies;
706 			atomic_inc(&dev->total_locks);
707 			break;  /* Got lock */
708 		}
709 
710 		/* Contention */
711 		atomic_inc(&dev->total_sleeps);
712 		current->state = TASK_INTERRUPTIBLE;
713 		yield();
714 		if (signal_pending(current)) {
715 			ret = -ERESTARTSYS;
716 			break;
717 		}
718 	}
719 	current->state = TASK_RUNNING;
720 	remove_wait_queue(&dev->lock.lock_queue, &entry);
721 
722         if (!ret) {
723 		sigemptyset(&dev->sigmask);
724 		sigaddset(&dev->sigmask, SIGSTOP);
725 		sigaddset(&dev->sigmask, SIGTSTP);
726 		sigaddset(&dev->sigmask, SIGTTIN);
727 		sigaddset(&dev->sigmask, SIGTTOU);
728 		dev->sigdata.context = lock.context;
729 		dev->sigdata.lock = dev->lock.hw_lock;
730 		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
731 
732 		if (dev->last_context != lock.context)
733 			ffb_context_switch(dev, dev->last_context, lock.context);
734 	}
735 
736         DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
737 
738         return ret;
739 }
740 
ffb_unlock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)741 int ffb_unlock(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
742 {
743 	drm_file_t	  *priv	  = filp->private_data;
744 	drm_device_t	  *dev	  = priv->dev;
745 	drm_lock_t	  lock;
746 	unsigned int old, new, prev, ctx;
747 	int ret;
748 
749 	ret = copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock));
750 	if (ret)
751 		return -EFAULT;
752 
753 	if ((ctx = lock.context) == DRM_KERNEL_CONTEXT) {
754 		DRM_ERROR("Process %d using kernel context %d\n",
755 			  current->pid, lock.context);
756 		return -EINVAL;
757 	}
758 
759 	DRM_DEBUG("%d frees lock (%d holds)\n",
760 		  lock.context,
761 		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
762 	atomic_inc(&dev->total_unlocks);
763 	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
764 		atomic_inc(&dev->total_contends);
765 
766 	/* We no longer really hold it, but if we are the next
767 	 * agent to request it then we should just be able to
768 	 * take it immediately and not eat the ioctl.
769 	 */
770 	dev->lock.pid = 0;
771 	{
772 		__volatile__ unsigned int *plock = &dev->lock.hw_lock->lock;
773 
774 		do {
775 			old  = *plock;
776 			new  = ctx;
777 			prev = cmpxchg(plock, old, new);
778 		} while (prev != old);
779 	}
780 
781 	wake_up_interruptible(&dev->lock.lock_queue);
782 
783 	unblock_all_signals();
784 	return 0;
785 }
786 
787 extern struct vm_operations_struct drm_vm_ops;
788 extern struct vm_operations_struct drm_vm_shm_ops;
789 extern struct vm_operations_struct drm_vm_shm_lock_ops;
790 
ffb_mmap(struct file * filp,struct vm_area_struct * vma)791 static int ffb_mmap(struct file *filp, struct vm_area_struct *vma)
792 {
793 	drm_file_t	*priv	= filp->private_data;
794 	drm_device_t	*dev	= priv->dev;
795 	drm_map_t	*map	= NULL;
796 	ffb_dev_priv_t	*ffb_priv;
797 	int		i, minor;
798 
799 	DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
800 		  vma->vm_start, vma->vm_end, VM_OFFSET(vma));
801 
802 	minor = MINOR(filp->f_dentry->d_inode->i_rdev);
803 	ffb_priv = NULL;
804 	for (i = 0; i < ffb_dev_table_size; i++) {
805 		ffb_priv = (ffb_dev_priv_t *) (ffb_dev_table[i] + 1);
806 		if (ffb_priv->miscdev.minor == minor)
807 			break;
808 	}
809 	if (i >= ffb_dev_table_size)
810 		return -EINVAL;
811 
812 	/* We don't support/need dma mappings, so... */
813 	if (!VM_OFFSET(vma))
814 		return -EINVAL;
815 
816 	for (i = 0; i < dev->map_count; i++) {
817 		unsigned long off;
818 
819 		map = dev->maplist[i];
820 
821 		/* Ok, a little hack to make 32-bit apps work. */
822 		off = (map->offset & 0xffffffff);
823 		if (off == VM_OFFSET(vma))
824 			break;
825 	}
826 
827 	if (i >= dev->map_count)
828 		return -EINVAL;
829 
830 	if (!map ||
831 	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
832 		return -EPERM;
833 
834 	if (map->size != (vma->vm_end - vma->vm_start))
835 		return -EINVAL;
836 
837 	/* Set read-only attribute before mappings are created
838 	 * so it works for fb/reg maps too.
839 	 */
840 	if (map->flags & _DRM_READ_ONLY)
841 		vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
842 			__pte(pgprot_val(vma->vm_page_prot)))));
843 
844 	switch (map->type) {
845 	case _DRM_FRAME_BUFFER:
846 		/* FALLTHROUGH */
847 
848 	case _DRM_REGISTERS:
849 		/* In order to handle 32-bit drm apps/xserver we
850 		 * play a trick.  The mappings only really specify
851 		 * the 32-bit offset from the cards 64-bit base
852 		 * address, and we just add in the base here.
853 		 */
854 		vma->vm_flags |= VM_IO;
855 		if (io_remap_page_range(vma->vm_start,
856 					ffb_priv->card_phys_base + VM_OFFSET(vma),
857 					vma->vm_end - vma->vm_start,
858 					vma->vm_page_prot, 0))
859 			return -EAGAIN;
860 
861 		vma->vm_ops = &drm_vm_ops;
862 		break;
863 	case _DRM_SHM:
864 		if (map->flags & _DRM_CONTAINS_LOCK)
865 			vma->vm_ops = &drm_vm_shm_lock_ops;
866 		else {
867 			vma->vm_ops = &drm_vm_shm_ops;
868 			vma->vm_private_data = (void *) map;
869 		}
870 
871 		/* Don't let this area swap.  Change when
872 		 * DRM_KERNEL advisory is supported.
873 		 */
874 		vma->vm_flags |= VM_LOCKED;
875 		break;
876 	default:
877 		return -EINVAL;	/* This should never happen. */
878 	};
879 
880 	vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
881 
882 	vma->vm_file = filp; /* Needed for drm_vm_open() */
883 	drm_vm_open(vma);
884 	return 0;
885 }
886 
ffb_find_map(struct file * filp,unsigned long off)887 static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
888 {
889 	drm_file_t	*priv	= filp->private_data;
890 	drm_device_t	*dev;
891 	drm_map_t	*map;
892 	int		i;
893 
894 	if (!priv || (dev = priv->dev) == NULL)
895 		return NULL;
896 
897 	for (i = 0; i < dev->map_count; i++) {
898 		unsigned long uoff;
899 
900 		map = dev->maplist[i];
901 
902 		/* Ok, a little hack to make 32-bit apps work. */
903 		uoff = (map->offset & 0xffffffff);
904 		if (uoff == off)
905 			return map;
906 	}
907 	return NULL;
908 }
909 
ffb_get_unmapped_area(struct file * filp,unsigned long hint,unsigned long len,unsigned long pgoff,unsigned long flags)910 static unsigned long ffb_get_unmapped_area(struct file *filp, unsigned long hint, unsigned long len, unsigned long pgoff, unsigned long flags)
911 {
912 	drm_map_t *map = ffb_find_map(filp, pgoff << PAGE_SHIFT);
913 	unsigned long addr = -ENOMEM;
914 
915 	if (!map)
916 		return get_unmapped_area(NULL, hint, len, pgoff, flags);
917 
918 	if (map->type == _DRM_FRAME_BUFFER ||
919 	    map->type == _DRM_REGISTERS) {
920 #ifdef HAVE_ARCH_FB_UNMAPPED_AREA
921 		addr = get_fb_unmapped_area(filp, hint, len, pgoff, flags);
922 #else
923 		addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
924 #endif
925 	} else if (map->type == _DRM_SHM && SHMLBA > PAGE_SIZE) {
926 		unsigned long slack = SHMLBA - PAGE_SIZE;
927 
928 		addr = get_unmapped_area(NULL, hint, len + slack, pgoff, flags);
929 		if (!(addr & ~PAGE_MASK)) {
930 			unsigned long kvirt = (unsigned long) map->handle;
931 
932 			if ((kvirt & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
933 				unsigned long koff, aoff;
934 
935 				koff = kvirt & (SHMLBA - 1);
936 				aoff = addr & (SHMLBA - 1);
937 				if (koff < aoff)
938 					koff += SHMLBA;
939 
940 				addr += (koff - aoff);
941 			}
942 		}
943 	} else {
944 		addr = get_unmapped_area(NULL, hint, len, pgoff, flags);
945 	}
946 
947 	return addr;
948 }
949 
950 module_init(ffb_init);
951 module_exit(ffb_cleanup);
952