1 /* lock.c -- IOCTLs for locking -*- linux-c -*-
2 * Created: Tue Feb 2 08:37:54 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 *
30 */
31
32 #define __NO_VERSION__
33 #include "drmP.h"
34
drm_block(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)35 int drm_block(struct inode *inode, struct file *filp, unsigned int cmd,
36 unsigned long arg)
37 {
38 DRM_DEBUG("\n");
39 return 0;
40 }
41
drm_unblock(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)42 int drm_unblock(struct inode *inode, struct file *filp, unsigned int cmd,
43 unsigned long arg)
44 {
45 DRM_DEBUG("\n");
46 return 0;
47 }
48
drm_lock_take(__volatile__ unsigned int * lock,unsigned int context)49 int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
50 {
51 unsigned int old, new, prev;
52
53 do {
54 old = *lock;
55 if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT;
56 else new = context | _DRM_LOCK_HELD;
57 prev = cmpxchg(lock, old, new);
58 } while (prev != old);
59 if (_DRM_LOCKING_CONTEXT(old) == context) {
60 if (old & _DRM_LOCK_HELD) {
61 if (context != DRM_KERNEL_CONTEXT) {
62 DRM_ERROR("%d holds heavyweight lock\n",
63 context);
64 }
65 return 0;
66 }
67 }
68 if (new == (context | _DRM_LOCK_HELD)) {
69 /* Have lock */
70 return 1;
71 }
72 return 0;
73 }
74
75 /* This takes a lock forcibly and hands it to context. Should ONLY be used
76 inside *_unlock to give lock to kernel before calling *_dma_schedule. */
drm_lock_transfer(drm_device_t * dev,__volatile__ unsigned int * lock,unsigned int context)77 int drm_lock_transfer(drm_device_t *dev,
78 __volatile__ unsigned int *lock, unsigned int context)
79 {
80 unsigned int old, new, prev;
81
82 dev->lock.pid = 0;
83 do {
84 old = *lock;
85 new = context | _DRM_LOCK_HELD;
86 prev = cmpxchg(lock, old, new);
87 } while (prev != old);
88 return 1;
89 }
90
drm_lock_free(drm_device_t * dev,__volatile__ unsigned int * lock,unsigned int context)91 int drm_lock_free(drm_device_t *dev,
92 __volatile__ unsigned int *lock, unsigned int context)
93 {
94 unsigned int old, new, prev;
95 pid_t pid = dev->lock.pid;
96
97 dev->lock.pid = 0;
98 do {
99 old = *lock;
100 new = 0;
101 prev = cmpxchg(lock, old, new);
102 } while (prev != old);
103 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
104 DRM_ERROR("%d freed heavyweight lock held by %d (pid %d)\n",
105 context,
106 _DRM_LOCKING_CONTEXT(old),
107 pid);
108 return 1;
109 }
110 wake_up_interruptible(&dev->lock.lock_queue);
111 return 0;
112 }
113
drm_flush_queue(drm_device_t * dev,int context)114 static int drm_flush_queue(drm_device_t *dev, int context)
115 {
116 DECLARE_WAITQUEUE(entry, current);
117 int ret = 0;
118 drm_queue_t *q = dev->queuelist[context];
119
120 DRM_DEBUG("\n");
121
122 atomic_inc(&q->use_count);
123 if (atomic_read(&q->use_count) > 1) {
124 atomic_inc(&q->block_write);
125 add_wait_queue(&q->flush_queue, &entry);
126 atomic_inc(&q->block_count);
127 for (;;) {
128 current->state = TASK_INTERRUPTIBLE;
129 if (!DRM_BUFCOUNT(&q->waitlist)) break;
130 schedule();
131 if (signal_pending(current)) {
132 ret = -EINTR; /* Can't restart */
133 break;
134 }
135 }
136 atomic_dec(&q->block_count);
137 current->state = TASK_RUNNING;
138 remove_wait_queue(&q->flush_queue, &entry);
139 }
140 atomic_dec(&q->use_count);
141 atomic_inc(&q->total_flushed);
142
143 /* NOTE: block_write is still incremented!
144 Use drm_flush_unlock_queue to decrement. */
145 return ret;
146 }
147
drm_flush_unblock_queue(drm_device_t * dev,int context)148 static int drm_flush_unblock_queue(drm_device_t *dev, int context)
149 {
150 drm_queue_t *q = dev->queuelist[context];
151
152 DRM_DEBUG("\n");
153
154 atomic_inc(&q->use_count);
155 if (atomic_read(&q->use_count) > 1) {
156 if (atomic_read(&q->block_write)) {
157 atomic_dec(&q->block_write);
158 wake_up_interruptible(&q->write_queue);
159 }
160 }
161 atomic_dec(&q->use_count);
162 return 0;
163 }
164
drm_flush_block_and_flush(drm_device_t * dev,int context,drm_lock_flags_t flags)165 int drm_flush_block_and_flush(drm_device_t *dev, int context,
166 drm_lock_flags_t flags)
167 {
168 int ret = 0;
169 int i;
170
171 DRM_DEBUG("\n");
172
173 if (flags & _DRM_LOCK_FLUSH) {
174 ret = drm_flush_queue(dev, DRM_KERNEL_CONTEXT);
175 if (!ret) ret = drm_flush_queue(dev, context);
176 }
177 if (flags & _DRM_LOCK_FLUSH_ALL) {
178 for (i = 0; !ret && i < dev->queue_count; i++) {
179 ret = drm_flush_queue(dev, i);
180 }
181 }
182 return ret;
183 }
184
drm_flush_unblock(drm_device_t * dev,int context,drm_lock_flags_t flags)185 int drm_flush_unblock(drm_device_t *dev, int context, drm_lock_flags_t flags)
186 {
187 int ret = 0;
188 int i;
189
190 DRM_DEBUG("\n");
191
192 if (flags & _DRM_LOCK_FLUSH) {
193 ret = drm_flush_unblock_queue(dev, DRM_KERNEL_CONTEXT);
194 if (!ret) ret = drm_flush_unblock_queue(dev, context);
195 }
196 if (flags & _DRM_LOCK_FLUSH_ALL) {
197 for (i = 0; !ret && i < dev->queue_count; i++) {
198 ret = drm_flush_unblock_queue(dev, i);
199 }
200 }
201
202 return ret;
203 }
204
drm_finish(struct inode * inode,struct file * filp,unsigned int cmd,unsigned long arg)205 int drm_finish(struct inode *inode, struct file *filp, unsigned int cmd,
206 unsigned long arg)
207 {
208 drm_file_t *priv = filp->private_data;
209 drm_device_t *dev = priv->dev;
210 int ret = 0;
211 drm_lock_t lock;
212
213 DRM_DEBUG("\n");
214
215 if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
216 return -EFAULT;
217 ret = drm_flush_block_and_flush(dev, lock.context, lock.flags);
218 drm_flush_unblock(dev, lock.context, lock.flags);
219 return ret;
220 }
221
222 /* If we get here, it means that the process has called DRM_IOCTL_LOCK
223 without calling DRM_IOCTL_UNLOCK.
224
225 If the lock is not held, then let the signal proceed as usual.
226
227 If the lock is held, then set the contended flag and keep the signal
228 blocked.
229
230
231 Return 1 if the signal should be delivered normally.
232 Return 0 if the signal should be blocked. */
233
drm_notifier(void * priv)234 int drm_notifier(void *priv)
235 {
236 drm_sigdata_t *s = (drm_sigdata_t *)priv;
237 unsigned int old, new, prev;
238
239
240 /* Allow signal delivery if lock isn't held */
241 if (!_DRM_LOCK_IS_HELD(s->lock->lock)
242 || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1;
243
244 /* Otherwise, set flag to force call to
245 drmUnlock */
246 do {
247 old = s->lock->lock;
248 new = old | _DRM_LOCK_CONT;
249 prev = cmpxchg(&s->lock->lock, old, new);
250 } while (prev != old);
251 return 0;
252 }
253