1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include <linux/interval_tree_generic.h>
7 #include <linux/sched/mm.h>
8 
9 #include "i915_sw_fence.h"
10 #include "i915_vma_resource.h"
11 #include "i915_drv.h"
12 #include "intel_memory_region.h"
13 
14 #include "gt/intel_gtt.h"
15 
16 static struct kmem_cache *slab_vma_resources;
17 
18 /**
19  * DOC:
20  * We use a per-vm interval tree to keep track of vma_resources
21  * scheduled for unbind but not yet unbound. The tree is protected by
22  * the vm mutex, and nodes are removed just after the unbind fence signals.
23  * The removal takes the vm mutex from a kernel thread which we need to
24  * keep in mind so that we don't grab the mutex and try to wait for all
25  * pending unbinds to complete, because that will temporaryily block many
26  * of the workqueue threads, and people will get angry.
27  *
28  * We should consider using a single ordered fence per VM instead but that
29  * requires ordering the unbinds and might introduce unnecessary waiting
30  * for unrelated unbinds. Amount of code will probably be roughly the same
31  * due to the simplicity of using the interval tree interface.
32  *
33  * Another drawback of this interval tree is that the complexity of insertion
34  * and removal of fences increases as O(ln(pending_unbinds)) instead of
35  * O(1) for a single fence without interval tree.
36  */
37 #define VMA_RES_START(_node) ((_node)->start)
38 #define VMA_RES_LAST(_node) ((_node)->start + (_node)->node_size - 1)
39 INTERVAL_TREE_DEFINE(struct i915_vma_resource, rb,
40 		     u64, __subtree_last,
41 		     VMA_RES_START, VMA_RES_LAST, static, vma_res_itree);
42 
43 /* Callbacks for the unbind dma-fence. */
44 
45 /**
46  * i915_vma_resource_alloc - Allocate a vma resource
47  *
48  * Return: A pointer to a cleared struct i915_vma_resource or
49  * a -ENOMEM error pointer if allocation fails.
50  */
i915_vma_resource_alloc(void)51 struct i915_vma_resource *i915_vma_resource_alloc(void)
52 {
53 	struct i915_vma_resource *vma_res =
54 		kmem_cache_zalloc(slab_vma_resources, GFP_KERNEL);
55 
56 	return vma_res ? vma_res : ERR_PTR(-ENOMEM);
57 }
58 
59 /**
60  * i915_vma_resource_free - Free a vma resource
61  * @vma_res: The vma resource to free.
62  */
i915_vma_resource_free(struct i915_vma_resource * vma_res)63 void i915_vma_resource_free(struct i915_vma_resource *vma_res)
64 {
65 	if (vma_res)
66 		kmem_cache_free(slab_vma_resources, vma_res);
67 }
68 
get_driver_name(struct dma_fence * fence)69 static const char *get_driver_name(struct dma_fence *fence)
70 {
71 	return "vma unbind fence";
72 }
73 
get_timeline_name(struct dma_fence * fence)74 static const char *get_timeline_name(struct dma_fence *fence)
75 {
76 	return "unbound";
77 }
78 
unbind_fence_free_rcu(struct rcu_head * head)79 static void unbind_fence_free_rcu(struct rcu_head *head)
80 {
81 	struct i915_vma_resource *vma_res =
82 		container_of(head, typeof(*vma_res), unbind_fence.rcu);
83 
84 	i915_vma_resource_free(vma_res);
85 }
86 
unbind_fence_release(struct dma_fence * fence)87 static void unbind_fence_release(struct dma_fence *fence)
88 {
89 	struct i915_vma_resource *vma_res =
90 		container_of(fence, typeof(*vma_res), unbind_fence);
91 
92 	i915_sw_fence_fini(&vma_res->chain);
93 
94 	call_rcu(&fence->rcu, unbind_fence_free_rcu);
95 }
96 
97 static struct dma_fence_ops unbind_fence_ops = {
98 	.get_driver_name = get_driver_name,
99 	.get_timeline_name = get_timeline_name,
100 	.release = unbind_fence_release,
101 };
102 
__i915_vma_resource_unhold(struct i915_vma_resource * vma_res)103 static void __i915_vma_resource_unhold(struct i915_vma_resource *vma_res)
104 {
105 	struct i915_address_space *vm;
106 
107 	if (!refcount_dec_and_test(&vma_res->hold_count))
108 		return;
109 
110 	dma_fence_signal(&vma_res->unbind_fence);
111 
112 	vm = vma_res->vm;
113 	if (vma_res->wakeref)
114 		intel_runtime_pm_put(&vm->i915->runtime_pm, vma_res->wakeref);
115 
116 	vma_res->vm = NULL;
117 	if (!RB_EMPTY_NODE(&vma_res->rb)) {
118 		mutex_lock(&vm->mutex);
119 		vma_res_itree_remove(vma_res, &vm->pending_unbind);
120 		mutex_unlock(&vm->mutex);
121 	}
122 
123 	if (vma_res->bi.pages_rsgt)
124 		i915_refct_sgt_put(vma_res->bi.pages_rsgt);
125 }
126 
127 /**
128  * i915_vma_resource_unhold - Unhold the signaling of the vma resource unbind
129  * fence.
130  * @vma_res: The vma resource.
131  * @lockdep_cookie: The lockdep cookie returned from i915_vma_resource_hold.
132  *
133  * The function may leave a dma_fence critical section.
134  */
i915_vma_resource_unhold(struct i915_vma_resource * vma_res,bool lockdep_cookie)135 void i915_vma_resource_unhold(struct i915_vma_resource *vma_res,
136 			      bool lockdep_cookie)
137 {
138 	dma_fence_end_signalling(lockdep_cookie);
139 
140 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
141 		unsigned long irq_flags;
142 
143 		/* Inefficient open-coded might_lock_irqsave() */
144 		spin_lock_irqsave(&vma_res->lock, irq_flags);
145 		spin_unlock_irqrestore(&vma_res->lock, irq_flags);
146 	}
147 
148 	__i915_vma_resource_unhold(vma_res);
149 }
150 
151 /**
152  * i915_vma_resource_hold - Hold the signaling of the vma resource unbind fence.
153  * @vma_res: The vma resource.
154  * @lockdep_cookie: Pointer to a bool serving as a lockdep cooke that should
155  * be given as an argument to the pairing i915_vma_resource_unhold.
156  *
157  * If returning true, the function enters a dma_fence signalling critical
158  * section if not in one already.
159  *
160  * Return: true if holding successful, false if not.
161  */
i915_vma_resource_hold(struct i915_vma_resource * vma_res,bool * lockdep_cookie)162 bool i915_vma_resource_hold(struct i915_vma_resource *vma_res,
163 			    bool *lockdep_cookie)
164 {
165 	bool held = refcount_inc_not_zero(&vma_res->hold_count);
166 
167 	if (held)
168 		*lockdep_cookie = dma_fence_begin_signalling();
169 
170 	return held;
171 }
172 
i915_vma_resource_unbind_work(struct work_struct * work)173 static void i915_vma_resource_unbind_work(struct work_struct *work)
174 {
175 	struct i915_vma_resource *vma_res =
176 		container_of(work, typeof(*vma_res), work);
177 	struct i915_address_space *vm = vma_res->vm;
178 	bool lockdep_cookie;
179 
180 	lockdep_cookie = dma_fence_begin_signalling();
181 	if (likely(!vma_res->skip_pte_rewrite))
182 		vma_res->ops->unbind_vma(vm, vma_res);
183 
184 	dma_fence_end_signalling(lockdep_cookie);
185 	__i915_vma_resource_unhold(vma_res);
186 	i915_vma_resource_put(vma_res);
187 }
188 
189 static int
i915_vma_resource_fence_notify(struct i915_sw_fence * fence,enum i915_sw_fence_notify state)190 i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
191 			       enum i915_sw_fence_notify state)
192 {
193 	struct i915_vma_resource *vma_res =
194 		container_of(fence, typeof(*vma_res), chain);
195 	struct dma_fence *unbind_fence =
196 		&vma_res->unbind_fence;
197 
198 	switch (state) {
199 	case FENCE_COMPLETE:
200 		dma_fence_get(unbind_fence);
201 		if (vma_res->immediate_unbind) {
202 			i915_vma_resource_unbind_work(&vma_res->work);
203 		} else {
204 			INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
205 			queue_work(system_unbound_wq, &vma_res->work);
206 		}
207 		break;
208 	case FENCE_FREE:
209 		i915_vma_resource_put(vma_res);
210 		break;
211 	}
212 
213 	return NOTIFY_DONE;
214 }
215 
216 /**
217  * i915_vma_resource_unbind - Unbind a vma resource
218  * @vma_res: The vma resource to unbind.
219  *
220  * At this point this function does little more than publish a fence that
221  * signals immediately unless signaling is held back.
222  *
223  * Return: A refcounted pointer to a dma-fence that signals when unbinding is
224  * complete.
225  */
i915_vma_resource_unbind(struct i915_vma_resource * vma_res,u32 * tlb)226 struct dma_fence *i915_vma_resource_unbind(struct i915_vma_resource *vma_res,
227 					   u32 *tlb)
228 {
229 	struct i915_address_space *vm = vma_res->vm;
230 
231 	vma_res->tlb = tlb;
232 
233 	/* Reference for the sw fence */
234 	i915_vma_resource_get(vma_res);
235 
236 	/* Caller must already have a wakeref in this case. */
237 	if (vma_res->needs_wakeref)
238 		vma_res->wakeref = intel_runtime_pm_get_if_in_use(&vm->i915->runtime_pm);
239 
240 	if (atomic_read(&vma_res->chain.pending) <= 1) {
241 		RB_CLEAR_NODE(&vma_res->rb);
242 		vma_res->immediate_unbind = 1;
243 	} else {
244 		vma_res_itree_insert(vma_res, &vma_res->vm->pending_unbind);
245 	}
246 
247 	i915_sw_fence_commit(&vma_res->chain);
248 
249 	return &vma_res->unbind_fence;
250 }
251 
252 /**
253  * __i915_vma_resource_init - Initialize a vma resource.
254  * @vma_res: The vma resource to initialize
255  *
256  * Initializes the private members of a vma resource.
257  */
__i915_vma_resource_init(struct i915_vma_resource * vma_res)258 void __i915_vma_resource_init(struct i915_vma_resource *vma_res)
259 {
260 	spin_lock_init(&vma_res->lock);
261 	dma_fence_init(&vma_res->unbind_fence, &unbind_fence_ops,
262 		       &vma_res->lock, 0, 0);
263 	refcount_set(&vma_res->hold_count, 1);
264 	i915_sw_fence_init(&vma_res->chain, i915_vma_resource_fence_notify);
265 }
266 
267 static void
i915_vma_resource_color_adjust_range(struct i915_address_space * vm,u64 * start,u64 * end)268 i915_vma_resource_color_adjust_range(struct i915_address_space *vm,
269 				     u64 *start,
270 				     u64 *end)
271 {
272 	if (i915_vm_has_cache_coloring(vm)) {
273 		if (*start)
274 			*start -= I915_GTT_PAGE_SIZE;
275 		*end += I915_GTT_PAGE_SIZE;
276 	}
277 }
278 
279 /**
280  * i915_vma_resource_bind_dep_sync - Wait for / sync all unbinds touching a
281  * certain vm range.
282  * @vm: The vm to look at.
283  * @offset: The range start.
284  * @size: The range size.
285  * @intr: Whether to wait interrubtible.
286  *
287  * The function needs to be called with the vm lock held.
288  *
289  * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
290  */
i915_vma_resource_bind_dep_sync(struct i915_address_space * vm,u64 offset,u64 size,bool intr)291 int i915_vma_resource_bind_dep_sync(struct i915_address_space *vm,
292 				    u64 offset,
293 				    u64 size,
294 				    bool intr)
295 {
296 	struct i915_vma_resource *node;
297 	u64 last = offset + size - 1;
298 
299 	lockdep_assert_held(&vm->mutex);
300 	might_sleep();
301 
302 	i915_vma_resource_color_adjust_range(vm, &offset, &last);
303 	node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
304 	while (node) {
305 		int ret = dma_fence_wait(&node->unbind_fence, intr);
306 
307 		if (ret)
308 			return ret;
309 
310 		node = vma_res_itree_iter_next(node, offset, last);
311 	}
312 
313 	return 0;
314 }
315 
316 /**
317  * i915_vma_resource_bind_dep_sync_all - Wait for / sync all unbinds of a vm,
318  * releasing the vm lock while waiting.
319  * @vm: The vm to look at.
320  *
321  * The function may not be called with the vm lock held.
322  * Typically this is called at vm destruction to finish any pending
323  * unbind operations. The vm mutex is released while waiting to avoid
324  * stalling kernel workqueues trying to grab the mutex.
325  */
i915_vma_resource_bind_dep_sync_all(struct i915_address_space * vm)326 void i915_vma_resource_bind_dep_sync_all(struct i915_address_space *vm)
327 {
328 	struct i915_vma_resource *node;
329 	struct dma_fence *fence;
330 
331 	do {
332 		fence = NULL;
333 		mutex_lock(&vm->mutex);
334 		node = vma_res_itree_iter_first(&vm->pending_unbind, 0,
335 						U64_MAX);
336 		if (node)
337 			fence = dma_fence_get_rcu(&node->unbind_fence);
338 		mutex_unlock(&vm->mutex);
339 
340 		if (fence) {
341 			/*
342 			 * The wait makes sure the node eventually removes
343 			 * itself from the tree.
344 			 */
345 			dma_fence_wait(fence, false);
346 			dma_fence_put(fence);
347 		}
348 	} while (node);
349 }
350 
351 /**
352  * i915_vma_resource_bind_dep_await - Have a struct i915_sw_fence await all
353  * pending unbinds in a certain range of a vm.
354  * @vm: The vm to look at.
355  * @sw_fence: The struct i915_sw_fence that will be awaiting the unbinds.
356  * @offset: The range start.
357  * @size: The range size.
358  * @intr: Whether to wait interrubtible.
359  * @gfp: Allocation mode for memory allocations.
360  *
361  * The function makes @sw_fence await all pending unbinds in a certain
362  * vm range before calling the complete notifier. To be able to await
363  * each individual unbind, the function needs to allocate memory using
364  * the @gpf allocation mode. If that fails, the function will instead
365  * wait for the unbind fence to signal, using @intr to judge whether to
366  * wait interruptible or not. Note that @gfp should ideally be selected so
367  * as to avoid any expensive memory allocation stalls and rather fail and
368  * synchronize itself. For now the vm mutex is required when calling this
369  * function with means that @gfp can't call into direct reclaim. In reality
370  * this means that during heavy memory pressure, we will sync in this
371  * function.
372  *
373  * Return: Zero on success, -ERESTARTSYS if interrupted and @intr==true
374  */
i915_vma_resource_bind_dep_await(struct i915_address_space * vm,struct i915_sw_fence * sw_fence,u64 offset,u64 size,bool intr,gfp_t gfp)375 int i915_vma_resource_bind_dep_await(struct i915_address_space *vm,
376 				     struct i915_sw_fence *sw_fence,
377 				     u64 offset,
378 				     u64 size,
379 				     bool intr,
380 				     gfp_t gfp)
381 {
382 	struct i915_vma_resource *node;
383 	u64 last = offset + size - 1;
384 
385 	lockdep_assert_held(&vm->mutex);
386 	might_alloc(gfp);
387 	might_sleep();
388 
389 	i915_vma_resource_color_adjust_range(vm, &offset, &last);
390 	node = vma_res_itree_iter_first(&vm->pending_unbind, offset, last);
391 	while (node) {
392 		int ret;
393 
394 		ret = i915_sw_fence_await_dma_fence(sw_fence,
395 						    &node->unbind_fence,
396 						    0, gfp);
397 		if (ret < 0) {
398 			ret = dma_fence_wait(&node->unbind_fence, intr);
399 			if (ret)
400 				return ret;
401 		}
402 
403 		node = vma_res_itree_iter_next(node, offset, last);
404 	}
405 
406 	return 0;
407 }
408 
i915_vma_resource_module_exit(void)409 void i915_vma_resource_module_exit(void)
410 {
411 	kmem_cache_destroy(slab_vma_resources);
412 }
413 
i915_vma_resource_module_init(void)414 int __init i915_vma_resource_module_init(void)
415 {
416 	slab_vma_resources = KMEM_CACHE(i915_vma_resource, SLAB_HWCACHE_ALIGN);
417 	if (!slab_vma_resources)
418 		return -ENOMEM;
419 
420 	return 0;
421 }
422