1 /* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
2  * Created: Mon Jan  4 08:58:31 1999 by faith@precisioninsight.com
3  *
4  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25  * DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *
30  */
31 
32 #include <linux/config.h>
33 #include "drmP.h"
34 #include "gamma_drv.h"
35 
36 #ifndef PCI_DEVICE_ID_3DLABS_GAMMA
37 #define PCI_DEVICE_ID_3DLABS_GAMMA 0x0008
38 #endif
39 #ifndef PCI_DEVICE_ID_3DLABS_MX
40 #define PCI_DEVICE_ID_3DLABS_MX 0x0006
41 #endif
42 
43 #define GAMMA_NAME	 "gamma"
44 #define GAMMA_DESC	 "3dlabs GMX 2000"
45 #define GAMMA_DATE	 "20000910"
46 #define GAMMA_MAJOR	 1
47 #define GAMMA_MINOR	 0
48 #define GAMMA_PATCHLEVEL 0
49 
50 static drm_device_t	      gamma_device;
51 
52 static struct file_operations gamma_fops = {
53 #if LINUX_VERSION_CODE >= 0x020400
54 				/* This started being used during 2.4.0-test */
55 	owner:   THIS_MODULE,
56 #endif
57 	open:	 gamma_open,
58 	flush:	 drm_flush,
59 	release: gamma_release,
60 	ioctl:	 gamma_ioctl,
61 	mmap:	 drm_mmap,
62 	read:	 drm_read,
63 	fasync:	 drm_fasync,
64 	poll:	 drm_poll,
65 };
66 
67 static struct miscdevice      gamma_misc = {
68 	minor: MISC_DYNAMIC_MINOR,
69 	name:  GAMMA_NAME,
70 	fops:  &gamma_fops,
71 };
72 
73 static drm_ioctl_desc_t	      gamma_ioctls[] = {
74 	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]    = { gamma_version,	  0, 0 },
75 	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique,	  0, 0 },
76 	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]  = { drm_getmagic,	  0, 0 },
77 	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]  = { drm_irq_busid,	  0, 1 },
78 
79 	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique,	  1, 1 },
80 	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	     = { drm_block,	  1, 1 },
81 	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]    = { drm_unblock,	  1, 1 },
82 	[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]    = { gamma_control,	  1, 1 },
83 	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic,	  1, 1 },
84 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]    = { drm_addmap,	  1, 1 },
85 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]   = { drm_addbufs,	  1, 1 },
86 	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]  = { drm_markbufs,	  1, 1 },
87 	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]  = { drm_infobufs,	  1, 0 },
88 	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]   = { drm_mapbufs,	  1, 0 },
89 	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]  = { drm_freebufs,	  1, 0 },
90 
91 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]    = { drm_addctx,	  1, 1 },
92 	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]     = { drm_rmctx,	  1, 1 },
93 	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]    = { drm_modctx,	  1, 1 },
94 	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]    = { drm_getctx,	  1, 0 },
95 	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx,	  1, 1 },
96 	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]    = { drm_newctx,	  1, 1 },
97 	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]    = { drm_resctx,	  1, 0 },
98 	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]   = { drm_adddraw,	  1, 1 },
99 	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]    = { drm_rmdraw,	  1, 1 },
100 	[DRM_IOCTL_NR(DRM_IOCTL_DMA)]	     = { gamma_dma,	  1, 0 },
101 	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	     = { gamma_lock,	  1, 0 },
102 	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]     = { gamma_unlock,	  1, 0 },
103 	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]     = { drm_finish,	  1, 0 },
104 };
105 #define GAMMA_IOCTL_COUNT DRM_ARRAY_SIZE(gamma_ioctls)
106 
107 #ifdef MODULE
108 static char		      *gamma = NULL;
109 #endif
110 static int 		      devices = 0;
111 
112 MODULE_AUTHOR("VA Linux Systems, Inc.");
113 MODULE_DESCRIPTION("3dlabs GMX 2000");
114 MODULE_LICENSE("GPL and additional rights");
115 MODULE_PARM(gamma, "s");
116 MODULE_PARM(devices, "i");
117 MODULE_PARM_DESC(devices,
118 		 "devices=x, where x is the number of MX chips on card\n");
119 #ifndef MODULE
120 /* gamma_options is called by the kernel to parse command-line options
121  * passed via the boot-loader (e.g., LILO).  It calls the insmod option
122  * routine, drm_parse_options.
123  */
124 
125 
gamma_options(char * str)126 static int __init gamma_options(char *str)
127 {
128 	drm_parse_options(str);
129 	return 1;
130 }
131 
132 __setup("gamma=", gamma_options);
133 #endif
134 
gamma_setup(drm_device_t * dev)135 static int gamma_setup(drm_device_t *dev)
136 {
137 	int i;
138 
139 	atomic_set(&dev->ioctl_count, 0);
140 	atomic_set(&dev->vma_count, 0);
141 	dev->buf_use	  = 0;
142 	atomic_set(&dev->buf_alloc, 0);
143 
144 	drm_dma_setup(dev);
145 
146 	atomic_set(&dev->total_open, 0);
147 	atomic_set(&dev->total_close, 0);
148 	atomic_set(&dev->total_ioctl, 0);
149 	atomic_set(&dev->total_irq, 0);
150 	atomic_set(&dev->total_ctx, 0);
151 	atomic_set(&dev->total_locks, 0);
152 	atomic_set(&dev->total_unlocks, 0);
153 	atomic_set(&dev->total_contends, 0);
154 	atomic_set(&dev->total_sleeps, 0);
155 
156 	for (i = 0; i < DRM_HASH_SIZE; i++) {
157 		dev->magiclist[i].head = NULL;
158 		dev->magiclist[i].tail = NULL;
159 	}
160 	dev->maplist	    = NULL;
161 	dev->map_count	    = 0;
162 	dev->vmalist	    = NULL;
163 	dev->lock.hw_lock   = NULL;
164 	init_waitqueue_head(&dev->lock.lock_queue);
165 	dev->queue_count    = 0;
166 	dev->queue_reserved = 0;
167 	dev->queue_slots    = 0;
168 	dev->queuelist	    = NULL;
169 	dev->irq	    = 0;
170 	dev->context_flag   = 0;
171 	dev->interrupt_flag = 0;
172 	dev->dma_flag	    = 0;
173 	dev->last_context   = 0;
174 	dev->last_switch    = 0;
175 	dev->last_checked   = 0;
176 	init_timer(&dev->timer);
177 	init_waitqueue_head(&dev->context_wait);
178 #if DRM_DMA_HISTO
179 	memset(&dev->histo, 0, sizeof(dev->histo));
180 #endif
181 	dev->ctx_start	    = 0;
182 	dev->lck_start	    = 0;
183 
184 	dev->buf_rp	  = dev->buf;
185 	dev->buf_wp	  = dev->buf;
186 	dev->buf_end	  = dev->buf + DRM_BSZ;
187 	dev->buf_async	  = NULL;
188 	init_waitqueue_head(&dev->buf_readers);
189 	init_waitqueue_head(&dev->buf_writers);
190 
191 	DRM_DEBUG("\n");
192 
193 	/* The kernel's context could be created here, but is now created
194 	   in drm_dma_enqueue.	This is more resource-efficient for
195 	   hardware that does not do DMA, but may mean that
196 	   drm_select_queue fails between the time the interrupt is
197 	   initialized and the time the queues are initialized. */
198 
199 	return 0;
200 }
201 
202 
gamma_takedown(drm_device_t * dev)203 static int gamma_takedown(drm_device_t *dev)
204 {
205 	int		  i;
206 	drm_magic_entry_t *pt, *next;
207 	drm_map_t	  *map;
208 	drm_vma_entry_t	  *vma, *vma_next;
209 
210 	DRM_DEBUG("\n");
211 
212 	if (dev->irq) gamma_irq_uninstall(dev);
213 
214 	down(&dev->struct_sem);
215 	del_timer(&dev->timer);
216 
217 	if (dev->devname) {
218 		drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
219 		dev->devname = NULL;
220 	}
221 
222 	if (dev->unique) {
223 		drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
224 		dev->unique = NULL;
225 		dev->unique_len = 0;
226 	}
227 				/* Clear pid list */
228 	for (i = 0; i < DRM_HASH_SIZE; i++) {
229 		for (pt = dev->magiclist[i].head; pt; pt = next) {
230 			next = pt->next;
231 			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
232 		}
233 		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
234 	}
235 
236 				/* Clear vma list (only built for debugging) */
237 	if (dev->vmalist) {
238 		for (vma = dev->vmalist; vma; vma = vma_next) {
239 			vma_next = vma->next;
240 			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
241 		}
242 		dev->vmalist = NULL;
243 	}
244 
245 				/* Clear map area and mtrr information */
246 	if (dev->maplist) {
247 		for (i = 0; i < dev->map_count; i++) {
248 			map = dev->maplist[i];
249 			switch (map->type) {
250 			case _DRM_REGISTERS:
251 			case _DRM_FRAME_BUFFER:
252 #ifdef CONFIG_MTRR
253 				if (map->mtrr >= 0) {
254 					int retcode;
255 					retcode = mtrr_del(map->mtrr,
256 							   map->offset,
257 							   map->size);
258 					DRM_DEBUG("mtrr_del = %d\n", retcode);
259 				}
260 #endif
261 				drm_ioremapfree(map->handle, map->size, dev);
262 				break;
263 			case _DRM_SHM:
264 				drm_free_pages((unsigned long)map->handle,
265 					       drm_order(map->size)
266 					       - PAGE_SHIFT,
267 					       DRM_MEM_SAREA);
268 				break;
269 			case _DRM_AGP:
270 				/* Do nothing here, because this is all
271                                    handled in the AGP/GART driver. */
272 				break;
273 			}
274 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
275 		}
276 		drm_free(dev->maplist,
277 			 dev->map_count * sizeof(*dev->maplist),
278 			 DRM_MEM_MAPS);
279 		dev->maplist   = NULL;
280 		dev->map_count = 0;
281 	}
282 
283 	if (dev->queuelist) {
284 		for (i = 0; i < dev->queue_count; i++) {
285 			drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
286 			if (dev->queuelist[i]) {
287 				drm_free(dev->queuelist[i],
288 					 sizeof(*dev->queuelist[0]),
289 					 DRM_MEM_QUEUES);
290 				dev->queuelist[i] = NULL;
291 			}
292 		}
293 		drm_free(dev->queuelist,
294 			 dev->queue_slots * sizeof(*dev->queuelist),
295 			 DRM_MEM_QUEUES);
296 		dev->queuelist	 = NULL;
297 	}
298 
299 	drm_dma_takedown(dev);
300 
301 	dev->queue_count     = 0;
302 	if (dev->lock.hw_lock) {
303 		dev->lock.hw_lock    = NULL; /* SHM removed */
304 		dev->lock.pid	     = 0;
305 		wake_up_interruptible(&dev->lock.lock_queue);
306 	}
307 	up(&dev->struct_sem);
308 
309 	return 0;
310 }
311 
gamma_found(void)312 int gamma_found(void)
313 {
314 	return devices;
315 }
316 
gamma_find_devices(void)317 int gamma_find_devices(void)
318 {
319 	struct pci_dev *d = NULL, *one = NULL, *two = NULL;
320 
321 	d = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_GAMMA,d);
322 	if (!d) return 0;
323 
324 	one = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_MX,d);
325 	if (!one) return 0;
326 
327 	/* Make sure it's on the same card, if not - no MX's found */
328 	if (PCI_SLOT(d->devfn) != PCI_SLOT(one->devfn)) return 0;
329 
330 	two = pci_find_device(PCI_VENDOR_ID_3DLABS,PCI_DEVICE_ID_3DLABS_MX,one);
331 	if (!two) return 1;
332 
333 	/* Make sure it's on the same card, if not - only 1 MX found */
334 	if (PCI_SLOT(d->devfn) != PCI_SLOT(two->devfn)) return 1;
335 
336 	/* Two MX's found - we don't currently support more than 2 */
337 	return 2;
338 }
339 
340 /* gamma_init is called via init_module at module load time, or via
341  * linux/init/main.c (this is not currently supported). */
342 
gamma_init(void)343 static int __init gamma_init(void)
344 {
345 	int		      retcode;
346 	drm_device_t	      *dev = &gamma_device;
347 
348 	DRM_DEBUG("\n");
349 
350 	memset((void *)dev, 0, sizeof(*dev));
351 	dev->count_lock	  = SPIN_LOCK_UNLOCKED;
352 	sema_init(&dev->struct_sem, 1);
353 
354 #ifdef MODULE
355 	drm_parse_options(gamma);
356 #endif
357 	devices = gamma_find_devices();
358 	if (devices == 0) return -1;
359 
360 	if ((retcode = misc_register(&gamma_misc))) {
361 		DRM_ERROR("Cannot register \"%s\"\n", GAMMA_NAME);
362 		return retcode;
363 	}
364 	dev->device = MKDEV(MISC_MAJOR, gamma_misc.minor);
365 	dev->name   = GAMMA_NAME;
366 
367 	drm_mem_init();
368 	drm_proc_init(dev);
369 
370 	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d with %d MX devices\n",
371 		 GAMMA_NAME,
372 		 GAMMA_MAJOR,
373 		 GAMMA_MINOR,
374 		 GAMMA_PATCHLEVEL,
375 		 GAMMA_DATE,
376 		 gamma_misc.minor,
377 		 devices);
378 
379 	return 0;
380 }
381 
382 /* gamma_cleanup is called via cleanup_module at module unload time. */
383 
gamma_cleanup(void)384 static void __exit gamma_cleanup(void)
385 {
386 	drm_device_t	      *dev = &gamma_device;
387 
388 	DRM_DEBUG("\n");
389 
390 	drm_proc_cleanup();
391 	if (misc_deregister(&gamma_misc)) {
392 		DRM_ERROR("Cannot unload module\n");
393 	} else {
394 		DRM_INFO("Module unloaded\n");
395 	}
396 	gamma_takedown(dev);
397 }
398 
399 module_init(gamma_init);
400 module_exit(gamma_cleanup);
401 
402 
gamma_version(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)403 int gamma_version(struct inode *inode, struct file *filp, unsigned int cmd,
404 		  unsigned long arg)
405 {
406 	drm_version_t version;
407 	int	      len;
408 
409 	if (copy_from_user(&version,
410 			   (drm_version_t *)arg,
411 			   sizeof(version)))
412 		return -EFAULT;
413 
414 #define DRM_COPY(name,value)				     \
415 	len = strlen(value);				     \
416 	if (len > name##_len) len = name##_len;		     \
417 	name##_len = strlen(value);			     \
418 	if (len && name) {				     \
419 		if (copy_to_user(name, value, len))	     \
420 			return -EFAULT;			     \
421 	}
422 
423 	version.version_major	   = GAMMA_MAJOR;
424 	version.version_minor	   = GAMMA_MINOR;
425 	version.version_patchlevel = GAMMA_PATCHLEVEL;
426 
427 	DRM_COPY(version.name, GAMMA_NAME);
428 	DRM_COPY(version.date, GAMMA_DATE);
429 	DRM_COPY(version.desc, GAMMA_DESC);
430 
431 	if (copy_to_user((drm_version_t *)arg,
432 			 &version,
433 			 sizeof(version)))
434 		return -EFAULT;
435 	return 0;
436 }
437 
gamma_open(struct inode * inode,struct file * filp)438 int gamma_open(struct inode *inode, struct file *filp)
439 {
440 	drm_device_t  *dev    = &gamma_device;
441 	int	      retcode = 0;
442 
443 	DRM_DEBUG("open_count = %d\n", dev->open_count);
444 	if (!(retcode = drm_open_helper(inode, filp, dev))) {
445 #if LINUX_VERSION_CODE < 0x020333
446 		MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
447 #endif
448 		atomic_inc(&dev->total_open);
449 		spin_lock(&dev->count_lock);
450 		if (!dev->open_count++) {
451 			spin_unlock(&dev->count_lock);
452 			return gamma_setup(dev);
453 		}
454 		spin_unlock(&dev->count_lock);
455 	}
456 	return retcode;
457 }
458 
gamma_release(struct inode * inode,struct file * filp)459 int gamma_release(struct inode *inode, struct file *filp)
460 {
461 	drm_file_t    *priv   = filp->private_data;
462 	drm_device_t  *dev;
463 	int	      retcode = 0;
464 
465 	lock_kernel();
466 	dev = priv->dev;
467 
468 	DRM_DEBUG("open_count = %d\n", dev->open_count);
469 	if (!(retcode = drm_release(inode, filp))) {
470 #if LINUX_VERSION_CODE < 0x020333
471 		MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
472 #endif
473 		atomic_inc(&dev->total_close);
474 		spin_lock(&dev->count_lock);
475 		if (!--dev->open_count) {
476 			if (atomic_read(&dev->ioctl_count) || dev->blocked) {
477 				DRM_ERROR("Device busy: %d %d\n",
478 					  atomic_read(&dev->ioctl_count),
479 					  dev->blocked);
480 				spin_unlock(&dev->count_lock);
481 				unlock_kernel();
482 				return -EBUSY;
483 			}
484 			spin_unlock(&dev->count_lock);
485 			unlock_kernel();
486 			return gamma_takedown(dev);
487 		}
488 		spin_unlock(&dev->count_lock);
489 	}
490 	unlock_kernel();
491 	return retcode;
492 }
493 
494 /* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */
495 
gamma_ioctl(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)496 int gamma_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
497 		unsigned long arg)
498 {
499 	int		 nr	 = DRM_IOCTL_NR(cmd);
500 	drm_file_t	 *priv	 = filp->private_data;
501 	drm_device_t	 *dev	 = priv->dev;
502 	int		 retcode = 0;
503 	drm_ioctl_desc_t *ioctl;
504 	drm_ioctl_t	 *func;
505 
506 	atomic_inc(&dev->ioctl_count);
507 	atomic_inc(&dev->total_ioctl);
508 	++priv->ioctl_count;
509 
510 	DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
511 		  current->pid, cmd, nr, dev->device, priv->authenticated);
512 
513 	if (nr >= GAMMA_IOCTL_COUNT) {
514 		retcode = -EINVAL;
515 	} else {
516 		ioctl	  = &gamma_ioctls[nr];
517 		func	  = ioctl->func;
518 
519 		if (!func) {
520 			DRM_DEBUG("no function\n");
521 			retcode = -EINVAL;
522 		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
523 			    || (ioctl->auth_needed && !priv->authenticated)) {
524 			retcode = -EACCES;
525 		} else {
526 			retcode = (func)(inode, filp, cmd, arg);
527 		}
528 	}
529 
530 	atomic_dec(&dev->ioctl_count);
531 	return retcode;
532 }
533 
534 
gamma_unlock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)535 int gamma_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
536 		 unsigned long arg)
537 {
538 	drm_file_t	  *priv	  = filp->private_data;
539 	drm_device_t	  *dev	  = priv->dev;
540 	drm_lock_t	  lock;
541 
542 	if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
543 		return -EFAULT;
544 
545 	if (lock.context == DRM_KERNEL_CONTEXT) {
546 		DRM_ERROR("Process %d using kernel context %d\n",
547 			  current->pid, lock.context);
548 		return -EINVAL;
549 	}
550 
551 	DRM_DEBUG("%d frees lock (%d holds)\n",
552 		  lock.context,
553 		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
554 	atomic_inc(&dev->total_unlocks);
555 	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
556 		atomic_inc(&dev->total_contends);
557 	drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
558 	gamma_dma_schedule(dev, 1);
559 	if (!dev->context_flag) {
560 		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
561 				  DRM_KERNEL_CONTEXT)) {
562 			DRM_ERROR("\n");
563 		}
564 	}
565 #if DRM_DMA_HISTOGRAM
566 	atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles()
567 						       - dev->lck_start)]);
568 #endif
569 
570 	unblock_all_signals();
571 	return 0;
572 }
573