1 /**************************************************************************
2  *
3  * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30 
31 #include "ttm/ttm_lock.h"
32 #include "ttm/ttm_module.h"
33 #include <linux/atomic.h>
34 #include <linux/errno.h>
35 #include <linux/wait.h>
36 #include <linux/sched.h>
37 #include <linux/module.h>
38 
39 #define TTM_WRITE_LOCK_PENDING    (1 << 0)
40 #define TTM_VT_LOCK_PENDING       (1 << 1)
41 #define TTM_SUSPEND_LOCK_PENDING  (1 << 2)
42 #define TTM_VT_LOCK               (1 << 3)
43 #define TTM_SUSPEND_LOCK          (1 << 4)
44 
ttm_lock_init(struct ttm_lock * lock)45 void ttm_lock_init(struct ttm_lock *lock)
46 {
47 	spin_lock_init(&lock->lock);
48 	init_waitqueue_head(&lock->queue);
49 	lock->rw = 0;
50 	lock->flags = 0;
51 	lock->kill_takers = false;
52 	lock->signal = SIGKILL;
53 }
54 EXPORT_SYMBOL(ttm_lock_init);
55 
ttm_read_unlock(struct ttm_lock * lock)56 void ttm_read_unlock(struct ttm_lock *lock)
57 {
58 	spin_lock(&lock->lock);
59 	if (--lock->rw == 0)
60 		wake_up_all(&lock->queue);
61 	spin_unlock(&lock->lock);
62 }
63 EXPORT_SYMBOL(ttm_read_unlock);
64 
__ttm_read_lock(struct ttm_lock * lock)65 static bool __ttm_read_lock(struct ttm_lock *lock)
66 {
67 	bool locked = false;
68 
69 	spin_lock(&lock->lock);
70 	if (unlikely(lock->kill_takers)) {
71 		send_sig(lock->signal, current, 0);
72 		spin_unlock(&lock->lock);
73 		return false;
74 	}
75 	if (lock->rw >= 0 && lock->flags == 0) {
76 		++lock->rw;
77 		locked = true;
78 	}
79 	spin_unlock(&lock->lock);
80 	return locked;
81 }
82 
ttm_read_lock(struct ttm_lock * lock,bool interruptible)83 int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
84 {
85 	int ret = 0;
86 
87 	if (interruptible)
88 		ret = wait_event_interruptible(lock->queue,
89 					       __ttm_read_lock(lock));
90 	else
91 		wait_event(lock->queue, __ttm_read_lock(lock));
92 	return ret;
93 }
94 EXPORT_SYMBOL(ttm_read_lock);
95 
__ttm_read_trylock(struct ttm_lock * lock,bool * locked)96 static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
97 {
98 	bool block = true;
99 
100 	*locked = false;
101 
102 	spin_lock(&lock->lock);
103 	if (unlikely(lock->kill_takers)) {
104 		send_sig(lock->signal, current, 0);
105 		spin_unlock(&lock->lock);
106 		return false;
107 	}
108 	if (lock->rw >= 0 && lock->flags == 0) {
109 		++lock->rw;
110 		block = false;
111 		*locked = true;
112 	} else if (lock->flags == 0) {
113 		block = false;
114 	}
115 	spin_unlock(&lock->lock);
116 
117 	return !block;
118 }
119 
ttm_read_trylock(struct ttm_lock * lock,bool interruptible)120 int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
121 {
122 	int ret = 0;
123 	bool locked;
124 
125 	if (interruptible)
126 		ret = wait_event_interruptible
127 			(lock->queue, __ttm_read_trylock(lock, &locked));
128 	else
129 		wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
130 
131 	if (unlikely(ret != 0)) {
132 		BUG_ON(locked);
133 		return ret;
134 	}
135 
136 	return (locked) ? 0 : -EBUSY;
137 }
138 
ttm_write_unlock(struct ttm_lock * lock)139 void ttm_write_unlock(struct ttm_lock *lock)
140 {
141 	spin_lock(&lock->lock);
142 	lock->rw = 0;
143 	wake_up_all(&lock->queue);
144 	spin_unlock(&lock->lock);
145 }
146 EXPORT_SYMBOL(ttm_write_unlock);
147 
__ttm_write_lock(struct ttm_lock * lock)148 static bool __ttm_write_lock(struct ttm_lock *lock)
149 {
150 	bool locked = false;
151 
152 	spin_lock(&lock->lock);
153 	if (unlikely(lock->kill_takers)) {
154 		send_sig(lock->signal, current, 0);
155 		spin_unlock(&lock->lock);
156 		return false;
157 	}
158 	if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
159 		lock->rw = -1;
160 		lock->flags &= ~TTM_WRITE_LOCK_PENDING;
161 		locked = true;
162 	} else {
163 		lock->flags |= TTM_WRITE_LOCK_PENDING;
164 	}
165 	spin_unlock(&lock->lock);
166 	return locked;
167 }
168 
ttm_write_lock(struct ttm_lock * lock,bool interruptible)169 int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
170 {
171 	int ret = 0;
172 
173 	if (interruptible) {
174 		ret = wait_event_interruptible(lock->queue,
175 					       __ttm_write_lock(lock));
176 		if (unlikely(ret != 0)) {
177 			spin_lock(&lock->lock);
178 			lock->flags &= ~TTM_WRITE_LOCK_PENDING;
179 			wake_up_all(&lock->queue);
180 			spin_unlock(&lock->lock);
181 		}
182 	} else
183 		wait_event(lock->queue, __ttm_read_lock(lock));
184 
185 	return ret;
186 }
187 EXPORT_SYMBOL(ttm_write_lock);
188 
ttm_write_lock_downgrade(struct ttm_lock * lock)189 void ttm_write_lock_downgrade(struct ttm_lock *lock)
190 {
191 	spin_lock(&lock->lock);
192 	lock->rw = 1;
193 	wake_up_all(&lock->queue);
194 	spin_unlock(&lock->lock);
195 }
196 
__ttm_vt_unlock(struct ttm_lock * lock)197 static int __ttm_vt_unlock(struct ttm_lock *lock)
198 {
199 	int ret = 0;
200 
201 	spin_lock(&lock->lock);
202 	if (unlikely(!(lock->flags & TTM_VT_LOCK)))
203 		ret = -EINVAL;
204 	lock->flags &= ~TTM_VT_LOCK;
205 	wake_up_all(&lock->queue);
206 	spin_unlock(&lock->lock);
207 
208 	return ret;
209 }
210 
ttm_vt_lock_remove(struct ttm_base_object ** p_base)211 static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
212 {
213 	struct ttm_base_object *base = *p_base;
214 	struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
215 	int ret;
216 
217 	*p_base = NULL;
218 	ret = __ttm_vt_unlock(lock);
219 	BUG_ON(ret != 0);
220 }
221 
__ttm_vt_lock(struct ttm_lock * lock)222 static bool __ttm_vt_lock(struct ttm_lock *lock)
223 {
224 	bool locked = false;
225 
226 	spin_lock(&lock->lock);
227 	if (lock->rw == 0) {
228 		lock->flags &= ~TTM_VT_LOCK_PENDING;
229 		lock->flags |= TTM_VT_LOCK;
230 		locked = true;
231 	} else {
232 		lock->flags |= TTM_VT_LOCK_PENDING;
233 	}
234 	spin_unlock(&lock->lock);
235 	return locked;
236 }
237 
ttm_vt_lock(struct ttm_lock * lock,bool interruptible,struct ttm_object_file * tfile)238 int ttm_vt_lock(struct ttm_lock *lock,
239 		bool interruptible,
240 		struct ttm_object_file *tfile)
241 {
242 	int ret = 0;
243 
244 	if (interruptible) {
245 		ret = wait_event_interruptible(lock->queue,
246 					       __ttm_vt_lock(lock));
247 		if (unlikely(ret != 0)) {
248 			spin_lock(&lock->lock);
249 			lock->flags &= ~TTM_VT_LOCK_PENDING;
250 			wake_up_all(&lock->queue);
251 			spin_unlock(&lock->lock);
252 			return ret;
253 		}
254 	} else
255 		wait_event(lock->queue, __ttm_vt_lock(lock));
256 
257 	/*
258 	 * Add a base-object, the destructor of which will
259 	 * make sure the lock is released if the client dies
260 	 * while holding it.
261 	 */
262 
263 	ret = ttm_base_object_init(tfile, &lock->base, false,
264 				   ttm_lock_type, &ttm_vt_lock_remove, NULL);
265 	if (ret)
266 		(void)__ttm_vt_unlock(lock);
267 	else
268 		lock->vt_holder = tfile;
269 
270 	return ret;
271 }
272 EXPORT_SYMBOL(ttm_vt_lock);
273 
ttm_vt_unlock(struct ttm_lock * lock)274 int ttm_vt_unlock(struct ttm_lock *lock)
275 {
276 	return ttm_ref_object_base_unref(lock->vt_holder,
277 					 lock->base.hash.key, TTM_REF_USAGE);
278 }
279 EXPORT_SYMBOL(ttm_vt_unlock);
280 
ttm_suspend_unlock(struct ttm_lock * lock)281 void ttm_suspend_unlock(struct ttm_lock *lock)
282 {
283 	spin_lock(&lock->lock);
284 	lock->flags &= ~TTM_SUSPEND_LOCK;
285 	wake_up_all(&lock->queue);
286 	spin_unlock(&lock->lock);
287 }
288 EXPORT_SYMBOL(ttm_suspend_unlock);
289 
__ttm_suspend_lock(struct ttm_lock * lock)290 static bool __ttm_suspend_lock(struct ttm_lock *lock)
291 {
292 	bool locked = false;
293 
294 	spin_lock(&lock->lock);
295 	if (lock->rw == 0) {
296 		lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
297 		lock->flags |= TTM_SUSPEND_LOCK;
298 		locked = true;
299 	} else {
300 		lock->flags |= TTM_SUSPEND_LOCK_PENDING;
301 	}
302 	spin_unlock(&lock->lock);
303 	return locked;
304 }
305 
ttm_suspend_lock(struct ttm_lock * lock)306 void ttm_suspend_lock(struct ttm_lock *lock)
307 {
308 	wait_event(lock->queue, __ttm_suspend_lock(lock));
309 }
310 EXPORT_SYMBOL(ttm_suspend_lock);
311