1 /*
2  * Header file for reservations for dma-buf and ttm
3  *
4  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5  * Copyright (C) 2012-2013 Canonical Ltd
6  * Copyright (C) 2012 Texas Instruments
7  *
8  * Authors:
9  * Rob Clark <robdclark@gmail.com>
10  * Maarten Lankhorst <maarten.lankhorst@canonical.com>
11  * Thomas Hellstrom <thellstrom-at-vmware-dot-com>
12  *
13  * Based on bo.c which bears the following copyright notice,
14  * but is dual licensed:
15  *
16  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
17  * All Rights Reserved.
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a
20  * copy of this software and associated documentation files (the
21  * "Software"), to deal in the Software without restriction, including
22  * without limitation the rights to use, copy, modify, merge, publish,
23  * distribute, sub license, and/or sell copies of the Software, and to
24  * permit persons to whom the Software is furnished to do so, subject to
25  * the following conditions:
26  *
27  * The above copyright notice and this permission notice (including the
28  * next paragraph) shall be included in all copies or substantial portions
29  * of the Software.
30  *
31  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
32  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
33  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
34  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
35  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
36  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
37  * USE OR OTHER DEALINGS IN THE SOFTWARE.
38  */
39 #ifndef _LINUX_RESERVATION_H
40 #define _LINUX_RESERVATION_H
41 
42 #include <linux/ww_mutex.h>
43 #include <linux/dma-fence.h>
44 #include <linux/slab.h>
45 #include <linux/seqlock.h>
46 #include <linux/rcupdate.h>
47 
48 extern struct ww_class reservation_ww_class;
49 
50 struct dma_resv_list;
51 
52 /**
53  * enum dma_resv_usage - how the fences from a dma_resv obj are used
54  *
55  * This enum describes the different use cases for a dma_resv object and
56  * controls which fences are returned when queried.
57  *
58  * An important fact is that there is the order KERNEL<WRITE<READ<BOOKKEEP and
59  * when the dma_resv object is asked for fences for one use case the fences
60  * for the lower use case are returned as well.
61  *
62  * For example when asking for WRITE fences then the KERNEL fences are returned
63  * as well. Similar when asked for READ fences then both WRITE and KERNEL
64  * fences are returned as well.
65  */
66 enum dma_resv_usage {
67 	/**
68 	 * @DMA_RESV_USAGE_KERNEL: For in kernel memory management only.
69 	 *
70 	 * This should only be used for things like copying or clearing memory
71 	 * with a DMA hardware engine for the purpose of kernel memory
72 	 * management.
73 	 *
74 	 * Drivers *always* must wait for those fences before accessing the
75 	 * resource protected by the dma_resv object. The only exception for
76 	 * that is when the resource is known to be locked down in place by
77 	 * pinning it previously.
78 	 */
79 	DMA_RESV_USAGE_KERNEL,
80 
81 	/**
82 	 * @DMA_RESV_USAGE_WRITE: Implicit write synchronization.
83 	 *
84 	 * This should only be used for userspace command submissions which add
85 	 * an implicit write dependency.
86 	 */
87 	DMA_RESV_USAGE_WRITE,
88 
89 	/**
90 	 * @DMA_RESV_USAGE_READ: Implicit read synchronization.
91 	 *
92 	 * This should only be used for userspace command submissions which add
93 	 * an implicit read dependency.
94 	 */
95 	DMA_RESV_USAGE_READ,
96 
97 	/**
98 	 * @DMA_RESV_USAGE_BOOKKEEP: No implicit sync.
99 	 *
100 	 * This should be used by submissions which don't want to participate in
101 	 * implicit synchronization.
102 	 *
103 	 * The most common case are preemption fences as well as page table
104 	 * updates and their TLB flushes.
105 	 */
106 	DMA_RESV_USAGE_BOOKKEEP
107 };
108 
109 /**
110  * dma_resv_usage_rw - helper for implicit sync
111  * @write: true if we create a new implicit sync write
112  *
113  * This returns the implicit synchronization usage for write or read accesses,
114  * see enum dma_resv_usage and &dma_buf.resv.
115  */
dma_resv_usage_rw(bool write)116 static inline enum dma_resv_usage dma_resv_usage_rw(bool write)
117 {
118 	/* This looks confusing at first sight, but is indeed correct.
119 	 *
120 	 * The rational is that new write operations needs to wait for the
121 	 * existing read and write operations to finish.
122 	 * But a new read operation only needs to wait for the existing write
123 	 * operations to finish.
124 	 */
125 	return write ? DMA_RESV_USAGE_READ : DMA_RESV_USAGE_WRITE;
126 }
127 
128 /**
129  * struct dma_resv - a reservation object manages fences for a buffer
130  *
131  * This is a container for dma_fence objects which needs to handle multiple use
132  * cases.
133  *
134  * One use is to synchronize cross-driver access to a struct dma_buf, either for
135  * dynamic buffer management or just to handle implicit synchronization between
136  * different users of the buffer in userspace. See &dma_buf.resv for a more
137  * in-depth discussion.
138  *
139  * The other major use is to manage access and locking within a driver in a
140  * buffer based memory manager. struct ttm_buffer_object is the canonical
141  * example here, since this is where reservation objects originated from. But
142  * use in drivers is spreading and some drivers also manage struct
143  * drm_gem_object with the same scheme.
144  */
145 struct dma_resv {
146 	/**
147 	 * @lock:
148 	 *
149 	 * Update side lock. Don't use directly, instead use the wrapper
150 	 * functions like dma_resv_lock() and dma_resv_unlock().
151 	 *
152 	 * Drivers which use the reservation object to manage memory dynamically
153 	 * also use this lock to protect buffer object state like placement,
154 	 * allocation policies or throughout command submission.
155 	 */
156 	struct ww_mutex lock;
157 
158 	/**
159 	 * @fences:
160 	 *
161 	 * Array of fences which where added to the dma_resv object
162 	 *
163 	 * A new fence is added by calling dma_resv_add_fence(). Since this
164 	 * often needs to be done past the point of no return in command
165 	 * submission it cannot fail, and therefore sufficient slots need to be
166 	 * reserved by calling dma_resv_reserve_fences().
167 	 */
168 	struct dma_resv_list __rcu *fences;
169 };
170 
171 /**
172  * struct dma_resv_iter - current position into the dma_resv fences
173  *
174  * Don't touch this directly in the driver, use the accessor function instead.
175  *
176  * IMPORTANT
177  *
178  * When using the lockless iterators like dma_resv_iter_next_unlocked() or
179  * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted.
180  * Code which accumulates statistics or similar needs to check for this with
181  * dma_resv_iter_is_restarted().
182  */
183 struct dma_resv_iter {
184 	/** @obj: The dma_resv object we iterate over */
185 	struct dma_resv *obj;
186 
187 	/** @usage: Return fences with this usage or lower. */
188 	enum dma_resv_usage usage;
189 
190 	/** @fence: the currently handled fence */
191 	struct dma_fence *fence;
192 
193 	/** @fence_usage: the usage of the current fence */
194 	enum dma_resv_usage fence_usage;
195 
196 	/** @index: index into the shared fences */
197 	unsigned int index;
198 
199 	/** @fences: the shared fences; private, *MUST* not dereference  */
200 	struct dma_resv_list *fences;
201 
202 	/** @num_fences: number of fences */
203 	unsigned int num_fences;
204 
205 	/** @is_restarted: true if this is the first returned fence */
206 	bool is_restarted;
207 };
208 
209 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor);
210 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor);
211 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor);
212 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor);
213 
214 /**
215  * dma_resv_iter_begin - initialize a dma_resv_iter object
216  * @cursor: The dma_resv_iter object to initialize
217  * @obj: The dma_resv object which we want to iterate over
218  * @usage: controls which fences to include, see enum dma_resv_usage.
219  */
dma_resv_iter_begin(struct dma_resv_iter * cursor,struct dma_resv * obj,enum dma_resv_usage usage)220 static inline void dma_resv_iter_begin(struct dma_resv_iter *cursor,
221 				       struct dma_resv *obj,
222 				       enum dma_resv_usage usage)
223 {
224 	cursor->obj = obj;
225 	cursor->usage = usage;
226 	cursor->fence = NULL;
227 }
228 
229 /**
230  * dma_resv_iter_end - cleanup a dma_resv_iter object
231  * @cursor: the dma_resv_iter object which should be cleaned up
232  *
233  * Make sure that the reference to the fence in the cursor is properly
234  * dropped.
235  */
dma_resv_iter_end(struct dma_resv_iter * cursor)236 static inline void dma_resv_iter_end(struct dma_resv_iter *cursor)
237 {
238 	dma_fence_put(cursor->fence);
239 }
240 
241 /**
242  * dma_resv_iter_usage - Return the usage of the current fence
243  * @cursor: the cursor of the current position
244  *
245  * Returns the usage of the currently processed fence.
246  */
247 static inline enum dma_resv_usage
dma_resv_iter_usage(struct dma_resv_iter * cursor)248 dma_resv_iter_usage(struct dma_resv_iter *cursor)
249 {
250 	return cursor->fence_usage;
251 }
252 
253 /**
254  * dma_resv_iter_is_restarted - test if this is the first fence after a restart
255  * @cursor: the cursor with the current position
256  *
257  * Return true if this is the first fence in an iteration after a restart.
258  */
dma_resv_iter_is_restarted(struct dma_resv_iter * cursor)259 static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor)
260 {
261 	return cursor->is_restarted;
262 }
263 
264 /**
265  * dma_resv_for_each_fence_unlocked - unlocked fence iterator
266  * @cursor: a struct dma_resv_iter pointer
267  * @fence: the current fence
268  *
269  * Iterate over the fences in a struct dma_resv object without holding the
270  * &dma_resv.lock and using RCU instead. The cursor needs to be initialized
271  * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside
272  * the iterator a reference to the dma_fence is held and the RCU lock dropped.
273  *
274  * Beware that the iterator can be restarted when the struct dma_resv for
275  * @cursor is modified. Code which accumulates statistics or similar needs to
276  * check for this with dma_resv_iter_is_restarted(). For this reason prefer the
277  * lock iterator dma_resv_for_each_fence() whenever possible.
278  */
279 #define dma_resv_for_each_fence_unlocked(cursor, fence)			\
280 	for (fence = dma_resv_iter_first_unlocked(cursor);		\
281 	     fence; fence = dma_resv_iter_next_unlocked(cursor))
282 
283 /**
284  * dma_resv_for_each_fence - fence iterator
285  * @cursor: a struct dma_resv_iter pointer
286  * @obj: a dma_resv object pointer
287  * @usage: controls which fences to return
288  * @fence: the current fence
289  *
290  * Iterate over the fences in a struct dma_resv object while holding the
291  * &dma_resv.lock. @all_fences controls if the shared fences are returned as
292  * well. The cursor initialisation is part of the iterator and the fence stays
293  * valid as long as the lock is held and so no extra reference to the fence is
294  * taken.
295  */
296 #define dma_resv_for_each_fence(cursor, obj, usage, fence)	\
297 	for (dma_resv_iter_begin(cursor, obj, usage),	\
298 	     fence = dma_resv_iter_first(cursor); fence;	\
299 	     fence = dma_resv_iter_next(cursor))
300 
301 #define dma_resv_held(obj) lockdep_is_held(&(obj)->lock.base)
302 #define dma_resv_assert_held(obj) lockdep_assert_held(&(obj)->lock.base)
303 
304 #ifdef CONFIG_DEBUG_MUTEXES
305 void dma_resv_reset_max_fences(struct dma_resv *obj);
306 #else
dma_resv_reset_max_fences(struct dma_resv * obj)307 static inline void dma_resv_reset_max_fences(struct dma_resv *obj) {}
308 #endif
309 
310 /**
311  * dma_resv_lock - lock the reservation object
312  * @obj: the reservation object
313  * @ctx: the locking context
314  *
315  * Locks the reservation object for exclusive access and modification. Note,
316  * that the lock is only against other writers, readers will run concurrently
317  * with a writer under RCU. The seqlock is used to notify readers if they
318  * overlap with a writer.
319  *
320  * As the reservation object may be locked by multiple parties in an
321  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
322  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
323  * object may be locked by itself by passing NULL as @ctx.
324  *
325  * When a die situation is indicated by returning -EDEADLK all locks held by
326  * @ctx must be unlocked and then dma_resv_lock_slow() called on @obj.
327  *
328  * Unlocked by calling dma_resv_unlock().
329  *
330  * See also dma_resv_lock_interruptible() for the interruptible variant.
331  */
dma_resv_lock(struct dma_resv * obj,struct ww_acquire_ctx * ctx)332 static inline int dma_resv_lock(struct dma_resv *obj,
333 				struct ww_acquire_ctx *ctx)
334 {
335 	return ww_mutex_lock(&obj->lock, ctx);
336 }
337 
338 /**
339  * dma_resv_lock_interruptible - lock the reservation object
340  * @obj: the reservation object
341  * @ctx: the locking context
342  *
343  * Locks the reservation object interruptible for exclusive access and
344  * modification. Note, that the lock is only against other writers, readers
345  * will run concurrently with a writer under RCU. The seqlock is used to
346  * notify readers if they overlap with a writer.
347  *
348  * As the reservation object may be locked by multiple parties in an
349  * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle
350  * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation
351  * object may be locked by itself by passing NULL as @ctx.
352  *
353  * When a die situation is indicated by returning -EDEADLK all locks held by
354  * @ctx must be unlocked and then dma_resv_lock_slow_interruptible() called on
355  * @obj.
356  *
357  * Unlocked by calling dma_resv_unlock().
358  */
dma_resv_lock_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)359 static inline int dma_resv_lock_interruptible(struct dma_resv *obj,
360 					      struct ww_acquire_ctx *ctx)
361 {
362 	return ww_mutex_lock_interruptible(&obj->lock, ctx);
363 }
364 
365 /**
366  * dma_resv_lock_slow - slowpath lock the reservation object
367  * @obj: the reservation object
368  * @ctx: the locking context
369  *
370  * Acquires the reservation object after a die case. This function
371  * will sleep until the lock becomes available. See dma_resv_lock() as
372  * well.
373  *
374  * See also dma_resv_lock_slow_interruptible() for the interruptible variant.
375  */
dma_resv_lock_slow(struct dma_resv * obj,struct ww_acquire_ctx * ctx)376 static inline void dma_resv_lock_slow(struct dma_resv *obj,
377 				      struct ww_acquire_ctx *ctx)
378 {
379 	ww_mutex_lock_slow(&obj->lock, ctx);
380 }
381 
382 /**
383  * dma_resv_lock_slow_interruptible - slowpath lock the reservation
384  * object, interruptible
385  * @obj: the reservation object
386  * @ctx: the locking context
387  *
388  * Acquires the reservation object interruptible after a die case. This function
389  * will sleep until the lock becomes available. See
390  * dma_resv_lock_interruptible() as well.
391  */
dma_resv_lock_slow_interruptible(struct dma_resv * obj,struct ww_acquire_ctx * ctx)392 static inline int dma_resv_lock_slow_interruptible(struct dma_resv *obj,
393 						   struct ww_acquire_ctx *ctx)
394 {
395 	return ww_mutex_lock_slow_interruptible(&obj->lock, ctx);
396 }
397 
398 /**
399  * dma_resv_trylock - trylock the reservation object
400  * @obj: the reservation object
401  *
402  * Tries to lock the reservation object for exclusive access and modification.
403  * Note, that the lock is only against other writers, readers will run
404  * concurrently with a writer under RCU. The seqlock is used to notify readers
405  * if they overlap with a writer.
406  *
407  * Also note that since no context is provided, no deadlock protection is
408  * possible, which is also not needed for a trylock.
409  *
410  * Returns true if the lock was acquired, false otherwise.
411  */
dma_resv_trylock(struct dma_resv * obj)412 static inline bool __must_check dma_resv_trylock(struct dma_resv *obj)
413 {
414 	return ww_mutex_trylock(&obj->lock, NULL);
415 }
416 
417 /**
418  * dma_resv_is_locked - is the reservation object locked
419  * @obj: the reservation object
420  *
421  * Returns true if the mutex is locked, false if unlocked.
422  */
dma_resv_is_locked(struct dma_resv * obj)423 static inline bool dma_resv_is_locked(struct dma_resv *obj)
424 {
425 	return ww_mutex_is_locked(&obj->lock);
426 }
427 
428 /**
429  * dma_resv_locking_ctx - returns the context used to lock the object
430  * @obj: the reservation object
431  *
432  * Returns the context used to lock a reservation object or NULL if no context
433  * was used or the object is not locked at all.
434  *
435  * WARNING: This interface is pretty horrible, but TTM needs it because it
436  * doesn't pass the struct ww_acquire_ctx around in some very long callchains.
437  * Everyone else just uses it to check whether they're holding a reservation or
438  * not.
439  */
dma_resv_locking_ctx(struct dma_resv * obj)440 static inline struct ww_acquire_ctx *dma_resv_locking_ctx(struct dma_resv *obj)
441 {
442 	return READ_ONCE(obj->lock.ctx);
443 }
444 
445 /**
446  * dma_resv_unlock - unlock the reservation object
447  * @obj: the reservation object
448  *
449  * Unlocks the reservation object following exclusive access.
450  */
dma_resv_unlock(struct dma_resv * obj)451 static inline void dma_resv_unlock(struct dma_resv *obj)
452 {
453 	dma_resv_reset_max_fences(obj);
454 	ww_mutex_unlock(&obj->lock);
455 }
456 
457 void dma_resv_init(struct dma_resv *obj);
458 void dma_resv_fini(struct dma_resv *obj);
459 int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences);
460 void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
461 			enum dma_resv_usage usage);
462 void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
463 			     struct dma_fence *fence,
464 			     enum dma_resv_usage usage);
465 int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
466 			unsigned int *num_fences, struct dma_fence ***fences);
467 int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
468 			   struct dma_fence **fence);
469 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src);
470 long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
471 			   bool intr, unsigned long timeout);
472 bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage);
473 void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq);
474 
475 #endif /* _LINUX_RESERVATION_H */
476