1 /*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <linux/sched/mm.h>
26 #include <linux/dma-fence-array.h>
27 #include <drm/drm_gem.h>
28
29 #include "display/intel_frontbuffer.h"
30 #include "gem/i915_gem_lmem.h"
31 #include "gem/i915_gem_tiling.h"
32 #include "gt/intel_engine.h"
33 #include "gt/intel_engine_heartbeat.h"
34 #include "gt/intel_gt.h"
35 #include "gt/intel_gt_requests.h"
36
37 #include "i915_drv.h"
38 #include "i915_gem_evict.h"
39 #include "i915_sw_fence_work.h"
40 #include "i915_trace.h"
41 #include "i915_vma.h"
42 #include "i915_vma_resource.h"
43
assert_vma_held_evict(const struct i915_vma * vma)44 static inline void assert_vma_held_evict(const struct i915_vma *vma)
45 {
46 /*
47 * We may be forced to unbind when the vm is dead, to clean it up.
48 * This is the only exception to the requirement of the object lock
49 * being held.
50 */
51 if (kref_read(&vma->vm->ref))
52 assert_object_held_shared(vma->obj);
53 }
54
55 static struct kmem_cache *slab_vmas;
56
i915_vma_alloc(void)57 static struct i915_vma *i915_vma_alloc(void)
58 {
59 return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
60 }
61
i915_vma_free(struct i915_vma * vma)62 static void i915_vma_free(struct i915_vma *vma)
63 {
64 return kmem_cache_free(slab_vmas, vma);
65 }
66
67 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
68
69 #include <linux/stackdepot.h>
70
vma_print_allocator(struct i915_vma * vma,const char * reason)71 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
72 {
73 char buf[512];
74
75 if (!vma->node.stack) {
76 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n",
77 vma->node.start, vma->node.size, reason);
78 return;
79 }
80
81 stack_depot_snprint(vma->node.stack, buf, sizeof(buf), 0);
82 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
83 vma->node.start, vma->node.size, reason, buf);
84 }
85
86 #else
87
vma_print_allocator(struct i915_vma * vma,const char * reason)88 static void vma_print_allocator(struct i915_vma *vma, const char *reason)
89 {
90 }
91
92 #endif
93
active_to_vma(struct i915_active * ref)94 static inline struct i915_vma *active_to_vma(struct i915_active *ref)
95 {
96 return container_of(ref, typeof(struct i915_vma), active);
97 }
98
__i915_vma_active(struct i915_active * ref)99 static int __i915_vma_active(struct i915_active *ref)
100 {
101 return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
102 }
103
__i915_vma_retire(struct i915_active * ref)104 static void __i915_vma_retire(struct i915_active *ref)
105 {
106 i915_vma_put(active_to_vma(ref));
107 }
108
109 static struct i915_vma *
vma_create(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)110 vma_create(struct drm_i915_gem_object *obj,
111 struct i915_address_space *vm,
112 const struct i915_gtt_view *view)
113 {
114 struct i915_vma *pos = ERR_PTR(-E2BIG);
115 struct i915_vma *vma;
116 struct rb_node *rb, **p;
117 int err;
118
119 /* The aliasing_ppgtt should never be used directly! */
120 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm);
121
122 vma = i915_vma_alloc();
123 if (vma == NULL)
124 return ERR_PTR(-ENOMEM);
125
126 vma->ops = &vm->vma_ops;
127 vma->obj = obj;
128 vma->size = obj->base.size;
129 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
130
131 i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire, 0);
132
133 /* Declare ourselves safe for use inside shrinkers */
134 if (IS_ENABLED(CONFIG_LOCKDEP)) {
135 fs_reclaim_acquire(GFP_KERNEL);
136 might_lock(&vma->active.mutex);
137 fs_reclaim_release(GFP_KERNEL);
138 }
139
140 INIT_LIST_HEAD(&vma->closed_link);
141 INIT_LIST_HEAD(&vma->obj_link);
142 RB_CLEAR_NODE(&vma->obj_node);
143
144 if (view && view->type != I915_GTT_VIEW_NORMAL) {
145 vma->gtt_view = *view;
146 if (view->type == I915_GTT_VIEW_PARTIAL) {
147 GEM_BUG_ON(range_overflows_t(u64,
148 view->partial.offset,
149 view->partial.size,
150 obj->base.size >> PAGE_SHIFT));
151 vma->size = view->partial.size;
152 vma->size <<= PAGE_SHIFT;
153 GEM_BUG_ON(vma->size > obj->base.size);
154 } else if (view->type == I915_GTT_VIEW_ROTATED) {
155 vma->size = intel_rotation_info_size(&view->rotated);
156 vma->size <<= PAGE_SHIFT;
157 } else if (view->type == I915_GTT_VIEW_REMAPPED) {
158 vma->size = intel_remapped_info_size(&view->remapped);
159 vma->size <<= PAGE_SHIFT;
160 }
161 }
162
163 if (unlikely(vma->size > vm->total))
164 goto err_vma;
165
166 GEM_BUG_ON(!IS_ALIGNED(vma->size, I915_GTT_PAGE_SIZE));
167
168 err = mutex_lock_interruptible(&vm->mutex);
169 if (err) {
170 pos = ERR_PTR(err);
171 goto err_vma;
172 }
173
174 vma->vm = vm;
175 list_add_tail(&vma->vm_link, &vm->unbound_list);
176
177 spin_lock(&obj->vma.lock);
178 if (i915_is_ggtt(vm)) {
179 if (unlikely(overflows_type(vma->size, u32)))
180 goto err_unlock;
181
182 vma->fence_size = i915_gem_fence_size(vm->i915, vma->size,
183 i915_gem_object_get_tiling(obj),
184 i915_gem_object_get_stride(obj));
185 if (unlikely(vma->fence_size < vma->size || /* overflow */
186 vma->fence_size > vm->total))
187 goto err_unlock;
188
189 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I915_GTT_MIN_ALIGNMENT));
190
191 vma->fence_alignment = i915_gem_fence_alignment(vm->i915, vma->size,
192 i915_gem_object_get_tiling(obj),
193 i915_gem_object_get_stride(obj));
194 GEM_BUG_ON(!is_power_of_2(vma->fence_alignment));
195
196 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma));
197 }
198
199 rb = NULL;
200 p = &obj->vma.tree.rb_node;
201 while (*p) {
202 long cmp;
203
204 rb = *p;
205 pos = rb_entry(rb, struct i915_vma, obj_node);
206
207 /*
208 * If the view already exists in the tree, another thread
209 * already created a matching vma, so return the older instance
210 * and dispose of ours.
211 */
212 cmp = i915_vma_compare(pos, vm, view);
213 if (cmp < 0)
214 p = &rb->rb_right;
215 else if (cmp > 0)
216 p = &rb->rb_left;
217 else
218 goto err_unlock;
219 }
220 rb_link_node(&vma->obj_node, rb, p);
221 rb_insert_color(&vma->obj_node, &obj->vma.tree);
222
223 if (i915_vma_is_ggtt(vma))
224 /*
225 * We put the GGTT vma at the start of the vma-list, followed
226 * by the ppGGTT vma. This allows us to break early when
227 * iterating over only the GGTT vma for an object, see
228 * for_each_ggtt_vma()
229 */
230 list_add(&vma->obj_link, &obj->vma.list);
231 else
232 list_add_tail(&vma->obj_link, &obj->vma.list);
233
234 spin_unlock(&obj->vma.lock);
235 mutex_unlock(&vm->mutex);
236
237 return vma;
238
239 err_unlock:
240 spin_unlock(&obj->vma.lock);
241 list_del_init(&vma->vm_link);
242 mutex_unlock(&vm->mutex);
243 err_vma:
244 i915_vma_free(vma);
245 return pos;
246 }
247
248 static struct i915_vma *
i915_vma_lookup(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)249 i915_vma_lookup(struct drm_i915_gem_object *obj,
250 struct i915_address_space *vm,
251 const struct i915_gtt_view *view)
252 {
253 struct rb_node *rb;
254
255 rb = obj->vma.tree.rb_node;
256 while (rb) {
257 struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
258 long cmp;
259
260 cmp = i915_vma_compare(vma, vm, view);
261 if (cmp == 0)
262 return vma;
263
264 if (cmp < 0)
265 rb = rb->rb_right;
266 else
267 rb = rb->rb_left;
268 }
269
270 return NULL;
271 }
272
273 /**
274 * i915_vma_instance - return the singleton instance of the VMA
275 * @obj: parent &struct drm_i915_gem_object to be mapped
276 * @vm: address space in which the mapping is located
277 * @view: additional mapping requirements
278 *
279 * i915_vma_instance() looks up an existing VMA of the @obj in the @vm with
280 * the same @view characteristics. If a match is not found, one is created.
281 * Once created, the VMA is kept until either the object is freed, or the
282 * address space is closed.
283 *
284 * Returns the vma, or an error pointer.
285 */
286 struct i915_vma *
i915_vma_instance(struct drm_i915_gem_object * obj,struct i915_address_space * vm,const struct i915_gtt_view * view)287 i915_vma_instance(struct drm_i915_gem_object *obj,
288 struct i915_address_space *vm,
289 const struct i915_gtt_view *view)
290 {
291 struct i915_vma *vma;
292
293 GEM_BUG_ON(view && !i915_is_ggtt_or_dpt(vm));
294 GEM_BUG_ON(!kref_read(&vm->ref));
295
296 spin_lock(&obj->vma.lock);
297 vma = i915_vma_lookup(obj, vm, view);
298 spin_unlock(&obj->vma.lock);
299
300 /* vma_create() will resolve the race if another creates the vma */
301 if (unlikely(!vma))
302 vma = vma_create(obj, vm, view);
303
304 GEM_BUG_ON(!IS_ERR(vma) && i915_vma_compare(vma, vm, view));
305 return vma;
306 }
307
308 struct i915_vma_work {
309 struct dma_fence_work base;
310 struct i915_address_space *vm;
311 struct i915_vm_pt_stash stash;
312 struct i915_vma_resource *vma_res;
313 struct drm_i915_gem_object *obj;
314 struct i915_sw_dma_fence_cb cb;
315 enum i915_cache_level cache_level;
316 unsigned int flags;
317 };
318
__vma_bind(struct dma_fence_work * work)319 static void __vma_bind(struct dma_fence_work *work)
320 {
321 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
322 struct i915_vma_resource *vma_res = vw->vma_res;
323
324 /*
325 * We are about the bind the object, which must mean we have already
326 * signaled the work to potentially clear/move the pages underneath. If
327 * something went wrong at that stage then the object should have
328 * unknown_state set, in which case we need to skip the bind.
329 */
330 if (i915_gem_object_has_unknown_state(vw->obj))
331 return;
332
333 vma_res->ops->bind_vma(vma_res->vm, &vw->stash,
334 vma_res, vw->cache_level, vw->flags);
335 }
336
__vma_release(struct dma_fence_work * work)337 static void __vma_release(struct dma_fence_work *work)
338 {
339 struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
340
341 if (vw->obj)
342 i915_gem_object_put(vw->obj);
343
344 i915_vm_free_pt_stash(vw->vm, &vw->stash);
345 if (vw->vma_res)
346 i915_vma_resource_put(vw->vma_res);
347 }
348
349 static const struct dma_fence_work_ops bind_ops = {
350 .name = "bind",
351 .work = __vma_bind,
352 .release = __vma_release,
353 };
354
i915_vma_work(void)355 struct i915_vma_work *i915_vma_work(void)
356 {
357 struct i915_vma_work *vw;
358
359 vw = kzalloc(sizeof(*vw), GFP_KERNEL);
360 if (!vw)
361 return NULL;
362
363 dma_fence_work_init(&vw->base, &bind_ops);
364 vw->base.dma.error = -EAGAIN; /* disable the worker by default */
365
366 return vw;
367 }
368
i915_vma_wait_for_bind(struct i915_vma * vma)369 int i915_vma_wait_for_bind(struct i915_vma *vma)
370 {
371 int err = 0;
372
373 if (rcu_access_pointer(vma->active.excl.fence)) {
374 struct dma_fence *fence;
375
376 rcu_read_lock();
377 fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
378 rcu_read_unlock();
379 if (fence) {
380 err = dma_fence_wait(fence, true);
381 dma_fence_put(fence);
382 }
383 }
384
385 return err;
386 }
387
388 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
i915_vma_verify_bind_complete(struct i915_vma * vma)389 static int i915_vma_verify_bind_complete(struct i915_vma *vma)
390 {
391 struct dma_fence *fence = i915_active_fence_get(&vma->active.excl);
392 int err;
393
394 if (!fence)
395 return 0;
396
397 if (dma_fence_is_signaled(fence))
398 err = fence->error;
399 else
400 err = -EBUSY;
401
402 dma_fence_put(fence);
403
404 return err;
405 }
406 #else
407 #define i915_vma_verify_bind_complete(_vma) 0
408 #endif
409
410 I915_SELFTEST_EXPORT void
i915_vma_resource_init_from_vma(struct i915_vma_resource * vma_res,struct i915_vma * vma)411 i915_vma_resource_init_from_vma(struct i915_vma_resource *vma_res,
412 struct i915_vma *vma)
413 {
414 struct drm_i915_gem_object *obj = vma->obj;
415
416 i915_vma_resource_init(vma_res, vma->vm, vma->pages, &vma->page_sizes,
417 obj->mm.rsgt, i915_gem_object_is_readonly(obj),
418 i915_gem_object_is_lmem(obj), obj->mm.region,
419 vma->ops, vma->private, vma->node.start,
420 vma->node.size, vma->size);
421 }
422
423 /**
424 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
425 * @vma: VMA to map
426 * @cache_level: mapping cache level
427 * @flags: flags like global or local mapping
428 * @work: preallocated worker for allocating and binding the PTE
429 * @vma_res: pointer to a preallocated vma resource. The resource is either
430 * consumed or freed.
431 *
432 * DMA addresses are taken from the scatter-gather table of this object (or of
433 * this VMA in case of non-default GGTT views) and PTE entries set up.
434 * Note that DMA addresses are also the only part of the SG table we care about.
435 */
i915_vma_bind(struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags,struct i915_vma_work * work,struct i915_vma_resource * vma_res)436 int i915_vma_bind(struct i915_vma *vma,
437 enum i915_cache_level cache_level,
438 u32 flags,
439 struct i915_vma_work *work,
440 struct i915_vma_resource *vma_res)
441 {
442 u32 bind_flags;
443 u32 vma_flags;
444 int ret;
445
446 lockdep_assert_held(&vma->vm->mutex);
447 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
448 GEM_BUG_ON(vma->size > vma->node.size);
449
450 if (GEM_DEBUG_WARN_ON(range_overflows(vma->node.start,
451 vma->node.size,
452 vma->vm->total))) {
453 i915_vma_resource_free(vma_res);
454 return -ENODEV;
455 }
456
457 if (GEM_DEBUG_WARN_ON(!flags)) {
458 i915_vma_resource_free(vma_res);
459 return -EINVAL;
460 }
461
462 bind_flags = flags;
463 bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
464
465 vma_flags = atomic_read(&vma->flags);
466 vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
467
468 bind_flags &= ~vma_flags;
469 if (bind_flags == 0) {
470 i915_vma_resource_free(vma_res);
471 return 0;
472 }
473
474 GEM_BUG_ON(!atomic_read(&vma->pages_count));
475
476 /* Wait for or await async unbinds touching our range */
477 if (work && bind_flags & vma->vm->bind_async_flags)
478 ret = i915_vma_resource_bind_dep_await(vma->vm,
479 &work->base.chain,
480 vma->node.start,
481 vma->node.size,
482 true,
483 GFP_NOWAIT |
484 __GFP_RETRY_MAYFAIL |
485 __GFP_NOWARN);
486 else
487 ret = i915_vma_resource_bind_dep_sync(vma->vm, vma->node.start,
488 vma->node.size, true);
489 if (ret) {
490 i915_vma_resource_free(vma_res);
491 return ret;
492 }
493
494 if (vma->resource || !vma_res) {
495 /* Rebinding with an additional I915_VMA_*_BIND */
496 GEM_WARN_ON(!vma_flags);
497 i915_vma_resource_free(vma_res);
498 } else {
499 i915_vma_resource_init_from_vma(vma_res, vma);
500 vma->resource = vma_res;
501 }
502 trace_i915_vma_bind(vma, bind_flags);
503 if (work && bind_flags & vma->vm->bind_async_flags) {
504 struct dma_fence *prev;
505
506 work->vma_res = i915_vma_resource_get(vma->resource);
507 work->cache_level = cache_level;
508 work->flags = bind_flags;
509
510 /*
511 * Note we only want to chain up to the migration fence on
512 * the pages (not the object itself). As we don't track that,
513 * yet, we have to use the exclusive fence instead.
514 *
515 * Also note that we do not want to track the async vma as
516 * part of the obj->resv->excl_fence as it only affects
517 * execution and not content or object's backing store lifetime.
518 */
519 prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
520 if (prev) {
521 __i915_sw_fence_await_dma_fence(&work->base.chain,
522 prev,
523 &work->cb);
524 dma_fence_put(prev);
525 }
526
527 work->base.dma.error = 0; /* enable the queue_work() */
528 work->obj = i915_gem_object_get(vma->obj);
529 } else {
530 ret = i915_gem_object_wait_moving_fence(vma->obj, true);
531 if (ret) {
532 i915_vma_resource_free(vma->resource);
533 vma->resource = NULL;
534
535 return ret;
536 }
537 vma->ops->bind_vma(vma->vm, NULL, vma->resource, cache_level,
538 bind_flags);
539 }
540
541 atomic_or(bind_flags, &vma->flags);
542 return 0;
543 }
544
i915_vma_pin_iomap(struct i915_vma * vma)545 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
546 {
547 void __iomem *ptr;
548 int err;
549
550 if (WARN_ON_ONCE(vma->obj->flags & I915_BO_ALLOC_GPU_ONLY))
551 return IOMEM_ERR_PTR(-EINVAL);
552
553 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
554 GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND));
555 GEM_BUG_ON(i915_vma_verify_bind_complete(vma));
556
557 ptr = READ_ONCE(vma->iomap);
558 if (ptr == NULL) {
559 /*
560 * TODO: consider just using i915_gem_object_pin_map() for lmem
561 * instead, which already supports mapping non-contiguous chunks
562 * of pages, that way we can also drop the
563 * I915_BO_ALLOC_CONTIGUOUS when allocating the object.
564 */
565 if (i915_gem_object_is_lmem(vma->obj)) {
566 ptr = i915_gem_object_lmem_io_map(vma->obj, 0,
567 vma->obj->base.size);
568 } else if (i915_vma_is_map_and_fenceable(vma)) {
569 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap,
570 vma->node.start,
571 vma->node.size);
572 } else {
573 ptr = (void __iomem *)
574 i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
575 if (IS_ERR(ptr)) {
576 err = PTR_ERR(ptr);
577 goto err;
578 }
579 ptr = page_pack_bits(ptr, 1);
580 }
581
582 if (ptr == NULL) {
583 err = -ENOMEM;
584 goto err;
585 }
586
587 if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) {
588 if (page_unmask_bits(ptr))
589 __i915_gem_object_release_map(vma->obj);
590 else
591 io_mapping_unmap(ptr);
592 ptr = vma->iomap;
593 }
594 }
595
596 __i915_vma_pin(vma);
597
598 err = i915_vma_pin_fence(vma);
599 if (err)
600 goto err_unpin;
601
602 i915_vma_set_ggtt_write(vma);
603
604 /* NB Access through the GTT requires the device to be awake. */
605 return page_mask_bits(ptr);
606
607 err_unpin:
608 __i915_vma_unpin(vma);
609 err:
610 return IOMEM_ERR_PTR(err);
611 }
612
i915_vma_flush_writes(struct i915_vma * vma)613 void i915_vma_flush_writes(struct i915_vma *vma)
614 {
615 if (i915_vma_unset_ggtt_write(vma))
616 intel_gt_flush_ggtt_writes(vma->vm->gt);
617 }
618
i915_vma_unpin_iomap(struct i915_vma * vma)619 void i915_vma_unpin_iomap(struct i915_vma *vma)
620 {
621 GEM_BUG_ON(vma->iomap == NULL);
622
623 /* XXX We keep the mapping until __i915_vma_unbind()/evict() */
624
625 i915_vma_flush_writes(vma);
626
627 i915_vma_unpin_fence(vma);
628 i915_vma_unpin(vma);
629 }
630
i915_vma_unpin_and_release(struct i915_vma ** p_vma,unsigned int flags)631 void i915_vma_unpin_and_release(struct i915_vma **p_vma, unsigned int flags)
632 {
633 struct i915_vma *vma;
634 struct drm_i915_gem_object *obj;
635
636 vma = fetch_and_zero(p_vma);
637 if (!vma)
638 return;
639
640 obj = vma->obj;
641 GEM_BUG_ON(!obj);
642
643 i915_vma_unpin(vma);
644
645 if (flags & I915_VMA_RELEASE_MAP)
646 i915_gem_object_unpin_map(obj);
647
648 i915_gem_object_put(obj);
649 }
650
i915_vma_misplaced(const struct i915_vma * vma,u64 size,u64 alignment,u64 flags)651 bool i915_vma_misplaced(const struct i915_vma *vma,
652 u64 size, u64 alignment, u64 flags)
653 {
654 if (!drm_mm_node_allocated(&vma->node))
655 return false;
656
657 if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma)))
658 return true;
659
660 if (vma->node.size < size)
661 return true;
662
663 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
664 if (alignment && !IS_ALIGNED(vma->node.start, alignment))
665 return true;
666
667 if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
668 return true;
669
670 if (flags & PIN_OFFSET_BIAS &&
671 vma->node.start < (flags & PIN_OFFSET_MASK))
672 return true;
673
674 if (flags & PIN_OFFSET_FIXED &&
675 vma->node.start != (flags & PIN_OFFSET_MASK))
676 return true;
677
678 return false;
679 }
680
__i915_vma_set_map_and_fenceable(struct i915_vma * vma)681 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
682 {
683 bool mappable, fenceable;
684
685 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
686 GEM_BUG_ON(!vma->fence_size);
687
688 fenceable = (vma->node.size >= vma->fence_size &&
689 IS_ALIGNED(vma->node.start, vma->fence_alignment));
690
691 mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end;
692
693 if (mappable && fenceable)
694 set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
695 else
696 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
697 }
698
i915_gem_valid_gtt_space(struct i915_vma * vma,unsigned long color)699 bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color)
700 {
701 struct drm_mm_node *node = &vma->node;
702 struct drm_mm_node *other;
703
704 /*
705 * On some machines we have to be careful when putting differing types
706 * of snoopable memory together to avoid the prefetcher crossing memory
707 * domains and dying. During vm initialisation, we decide whether or not
708 * these constraints apply and set the drm_mm.color_adjust
709 * appropriately.
710 */
711 if (!i915_vm_has_cache_coloring(vma->vm))
712 return true;
713
714 /* Only valid to be called on an already inserted vma */
715 GEM_BUG_ON(!drm_mm_node_allocated(node));
716 GEM_BUG_ON(list_empty(&node->node_list));
717
718 other = list_prev_entry(node, node_list);
719 if (i915_node_color_differs(other, color) &&
720 !drm_mm_hole_follows(other))
721 return false;
722
723 other = list_next_entry(node, node_list);
724 if (i915_node_color_differs(other, color) &&
725 !drm_mm_hole_follows(node))
726 return false;
727
728 return true;
729 }
730
731 /**
732 * i915_vma_insert - finds a slot for the vma in its address space
733 * @vma: the vma
734 * @size: requested size in bytes (can be larger than the VMA)
735 * @alignment: required alignment
736 * @flags: mask of PIN_* flags to use
737 *
738 * First we try to allocate some free space that meets the requirements for
739 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
740 * preferrably the oldest idle entry to make room for the new VMA.
741 *
742 * Returns:
743 * 0 on success, negative error code otherwise.
744 */
745 static int
i915_vma_insert(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)746 i915_vma_insert(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
747 u64 size, u64 alignment, u64 flags)
748 {
749 unsigned long color;
750 u64 start, end;
751 int ret;
752
753 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
754 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
755
756 size = max(size, vma->size);
757 alignment = max(alignment, vma->display_alignment);
758 if (flags & PIN_MAPPABLE) {
759 size = max_t(typeof(size), size, vma->fence_size);
760 alignment = max_t(typeof(alignment),
761 alignment, vma->fence_alignment);
762 }
763
764 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
765 GEM_BUG_ON(!IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
766 GEM_BUG_ON(!is_power_of_2(alignment));
767
768 start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
769 GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
770
771 end = vma->vm->total;
772 if (flags & PIN_MAPPABLE)
773 end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end);
774 if (flags & PIN_ZONE_4G)
775 end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE);
776 GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
777
778 alignment = max(alignment, i915_vm_obj_min_alignment(vma->vm, vma->obj));
779 /*
780 * for compact-pt we round up the reservation to prevent
781 * any smaller pages being used within the same PDE
782 */
783 if (NEEDS_COMPACT_PT(vma->vm->i915))
784 size = round_up(size, alignment);
785
786 /* If binding the object/GGTT view requires more space than the entire
787 * aperture has, reject it early before evicting everything in a vain
788 * attempt to find space.
789 */
790 if (size > end) {
791 DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu > %s aperture=%llu\n",
792 size, flags & PIN_MAPPABLE ? "mappable" : "total",
793 end);
794 return -ENOSPC;
795 }
796
797 color = 0;
798
799 if (i915_vm_has_cache_coloring(vma->vm))
800 color = vma->obj->cache_level;
801
802 if (flags & PIN_OFFSET_FIXED) {
803 u64 offset = flags & PIN_OFFSET_MASK;
804 if (!IS_ALIGNED(offset, alignment) ||
805 range_overflows(offset, size, end))
806 return -EINVAL;
807
808 ret = i915_gem_gtt_reserve(vma->vm, ww, &vma->node,
809 size, offset, color,
810 flags);
811 if (ret)
812 return ret;
813 } else {
814 /*
815 * We only support huge gtt pages through the 48b PPGTT,
816 * however we also don't want to force any alignment for
817 * objects which need to be tightly packed into the low 32bits.
818 *
819 * Note that we assume that GGTT are limited to 4GiB for the
820 * forseeable future. See also i915_ggtt_offset().
821 */
822 if (upper_32_bits(end - 1) &&
823 vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
824 /*
825 * We can't mix 64K and 4K PTEs in the same page-table
826 * (2M block), and so to avoid the ugliness and
827 * complexity of coloring we opt for just aligning 64K
828 * objects to 2M.
829 */
830 u64 page_alignment =
831 rounddown_pow_of_two(vma->page_sizes.sg |
832 I915_GTT_PAGE_SIZE_2M);
833
834 /*
835 * Check we don't expand for the limited Global GTT
836 * (mappable aperture is even more precious!). This
837 * also checks that we exclude the aliasing-ppgtt.
838 */
839 GEM_BUG_ON(i915_vma_is_ggtt(vma));
840
841 alignment = max(alignment, page_alignment);
842
843 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K)
844 size = round_up(size, I915_GTT_PAGE_SIZE_2M);
845 }
846
847 ret = i915_gem_gtt_insert(vma->vm, ww, &vma->node,
848 size, alignment, color,
849 start, end, flags);
850 if (ret)
851 return ret;
852
853 GEM_BUG_ON(vma->node.start < start);
854 GEM_BUG_ON(vma->node.start + vma->node.size > end);
855 }
856 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
857 GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color));
858
859 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
860
861 return 0;
862 }
863
864 static void
i915_vma_detach(struct i915_vma * vma)865 i915_vma_detach(struct i915_vma *vma)
866 {
867 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
868 GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
869
870 /*
871 * And finally now the object is completely decoupled from this
872 * vma, we can drop its hold on the backing storage and allow
873 * it to be reaped by the shrinker.
874 */
875 list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
876 }
877
try_qad_pin(struct i915_vma * vma,unsigned int flags)878 static bool try_qad_pin(struct i915_vma *vma, unsigned int flags)
879 {
880 unsigned int bound;
881
882 bound = atomic_read(&vma->flags);
883
884 if (flags & PIN_VALIDATE) {
885 flags &= I915_VMA_BIND_MASK;
886
887 return (flags & bound) == flags;
888 }
889
890 /* with the lock mandatory for unbind, we don't race here */
891 flags &= I915_VMA_BIND_MASK;
892 do {
893 if (unlikely(flags & ~bound))
894 return false;
895
896 if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR)))
897 return false;
898
899 GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0);
900 } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1));
901
902 return true;
903 }
904
905 static struct scatterlist *
rotate_pages(struct drm_i915_gem_object * obj,unsigned int offset,unsigned int width,unsigned int height,unsigned int src_stride,unsigned int dst_stride,struct sg_table * st,struct scatterlist * sg)906 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
907 unsigned int width, unsigned int height,
908 unsigned int src_stride, unsigned int dst_stride,
909 struct sg_table *st, struct scatterlist *sg)
910 {
911 unsigned int column, row;
912 unsigned int src_idx;
913
914 for (column = 0; column < width; column++) {
915 unsigned int left;
916
917 src_idx = src_stride * (height - 1) + column + offset;
918 for (row = 0; row < height; row++) {
919 st->nents++;
920 /*
921 * We don't need the pages, but need to initialize
922 * the entries so the sg list can be happily traversed.
923 * The only thing we need are DMA addresses.
924 */
925 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
926 sg_dma_address(sg) =
927 i915_gem_object_get_dma_address(obj, src_idx);
928 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
929 sg = sg_next(sg);
930 src_idx -= src_stride;
931 }
932
933 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
934
935 if (!left)
936 continue;
937
938 st->nents++;
939
940 /*
941 * The DE ignores the PTEs for the padding tiles, the sg entry
942 * here is just a conenience to indicate how many padding PTEs
943 * to insert at this spot.
944 */
945 sg_set_page(sg, NULL, left, 0);
946 sg_dma_address(sg) = 0;
947 sg_dma_len(sg) = left;
948 sg = sg_next(sg);
949 }
950
951 return sg;
952 }
953
954 static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info * rot_info,struct drm_i915_gem_object * obj)955 intel_rotate_pages(struct intel_rotation_info *rot_info,
956 struct drm_i915_gem_object *obj)
957 {
958 unsigned int size = intel_rotation_info_size(rot_info);
959 struct drm_i915_private *i915 = to_i915(obj->base.dev);
960 struct sg_table *st;
961 struct scatterlist *sg;
962 int ret = -ENOMEM;
963 int i;
964
965 /* Allocate target SG list. */
966 st = kmalloc(sizeof(*st), GFP_KERNEL);
967 if (!st)
968 goto err_st_alloc;
969
970 ret = sg_alloc_table(st, size, GFP_KERNEL);
971 if (ret)
972 goto err_sg_alloc;
973
974 st->nents = 0;
975 sg = st->sgl;
976
977 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
978 sg = rotate_pages(obj, rot_info->plane[i].offset,
979 rot_info->plane[i].width, rot_info->plane[i].height,
980 rot_info->plane[i].src_stride,
981 rot_info->plane[i].dst_stride,
982 st, sg);
983
984 return st;
985
986 err_sg_alloc:
987 kfree(st);
988 err_st_alloc:
989
990 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
991 obj->base.size, rot_info->plane[0].width,
992 rot_info->plane[0].height, size);
993
994 return ERR_PTR(ret);
995 }
996
997 static struct scatterlist *
add_padding_pages(unsigned int count,struct sg_table * st,struct scatterlist * sg)998 add_padding_pages(unsigned int count,
999 struct sg_table *st, struct scatterlist *sg)
1000 {
1001 st->nents++;
1002
1003 /*
1004 * The DE ignores the PTEs for the padding tiles, the sg entry
1005 * here is just a convenience to indicate how many padding PTEs
1006 * to insert at this spot.
1007 */
1008 sg_set_page(sg, NULL, count * I915_GTT_PAGE_SIZE, 0);
1009 sg_dma_address(sg) = 0;
1010 sg_dma_len(sg) = count * I915_GTT_PAGE_SIZE;
1011 sg = sg_next(sg);
1012
1013 return sg;
1014 }
1015
1016 static struct scatterlist *
remap_tiled_color_plane_pages(struct drm_i915_gem_object * obj,unsigned int offset,unsigned int alignment_pad,unsigned int width,unsigned int height,unsigned int src_stride,unsigned int dst_stride,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)1017 remap_tiled_color_plane_pages(struct drm_i915_gem_object *obj,
1018 unsigned int offset, unsigned int alignment_pad,
1019 unsigned int width, unsigned int height,
1020 unsigned int src_stride, unsigned int dst_stride,
1021 struct sg_table *st, struct scatterlist *sg,
1022 unsigned int *gtt_offset)
1023 {
1024 unsigned int row;
1025
1026 if (!width || !height)
1027 return sg;
1028
1029 if (alignment_pad)
1030 sg = add_padding_pages(alignment_pad, st, sg);
1031
1032 for (row = 0; row < height; row++) {
1033 unsigned int left = width * I915_GTT_PAGE_SIZE;
1034
1035 while (left) {
1036 dma_addr_t addr;
1037 unsigned int length;
1038
1039 /*
1040 * We don't need the pages, but need to initialize
1041 * the entries so the sg list can be happily traversed.
1042 * The only thing we need are DMA addresses.
1043 */
1044
1045 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1046
1047 length = min(left, length);
1048
1049 st->nents++;
1050
1051 sg_set_page(sg, NULL, length, 0);
1052 sg_dma_address(sg) = addr;
1053 sg_dma_len(sg) = length;
1054 sg = sg_next(sg);
1055
1056 offset += length / I915_GTT_PAGE_SIZE;
1057 left -= length;
1058 }
1059
1060 offset += src_stride - width;
1061
1062 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1063
1064 if (!left)
1065 continue;
1066
1067 sg = add_padding_pages(left >> PAGE_SHIFT, st, sg);
1068 }
1069
1070 *gtt_offset += alignment_pad + dst_stride * height;
1071
1072 return sg;
1073 }
1074
1075 static struct scatterlist *
remap_contiguous_pages(struct drm_i915_gem_object * obj,unsigned int obj_offset,unsigned int count,struct sg_table * st,struct scatterlist * sg)1076 remap_contiguous_pages(struct drm_i915_gem_object *obj,
1077 unsigned int obj_offset,
1078 unsigned int count,
1079 struct sg_table *st, struct scatterlist *sg)
1080 {
1081 struct scatterlist *iter;
1082 unsigned int offset;
1083
1084 iter = i915_gem_object_get_sg_dma(obj, obj_offset, &offset);
1085 GEM_BUG_ON(!iter);
1086
1087 do {
1088 unsigned int len;
1089
1090 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1091 count << PAGE_SHIFT);
1092 sg_set_page(sg, NULL, len, 0);
1093 sg_dma_address(sg) =
1094 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1095 sg_dma_len(sg) = len;
1096
1097 st->nents++;
1098 count -= len >> PAGE_SHIFT;
1099 if (count == 0)
1100 return sg;
1101
1102 sg = __sg_next(sg);
1103 iter = __sg_next(iter);
1104 offset = 0;
1105 } while (1);
1106 }
1107
1108 static struct scatterlist *
remap_linear_color_plane_pages(struct drm_i915_gem_object * obj,unsigned int obj_offset,unsigned int alignment_pad,unsigned int size,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)1109 remap_linear_color_plane_pages(struct drm_i915_gem_object *obj,
1110 unsigned int obj_offset, unsigned int alignment_pad,
1111 unsigned int size,
1112 struct sg_table *st, struct scatterlist *sg,
1113 unsigned int *gtt_offset)
1114 {
1115 if (!size)
1116 return sg;
1117
1118 if (alignment_pad)
1119 sg = add_padding_pages(alignment_pad, st, sg);
1120
1121 sg = remap_contiguous_pages(obj, obj_offset, size, st, sg);
1122 sg = sg_next(sg);
1123
1124 *gtt_offset += alignment_pad + size;
1125
1126 return sg;
1127 }
1128
1129 static struct scatterlist *
remap_color_plane_pages(const struct intel_remapped_info * rem_info,struct drm_i915_gem_object * obj,int color_plane,struct sg_table * st,struct scatterlist * sg,unsigned int * gtt_offset)1130 remap_color_plane_pages(const struct intel_remapped_info *rem_info,
1131 struct drm_i915_gem_object *obj,
1132 int color_plane,
1133 struct sg_table *st, struct scatterlist *sg,
1134 unsigned int *gtt_offset)
1135 {
1136 unsigned int alignment_pad = 0;
1137
1138 if (rem_info->plane_alignment)
1139 alignment_pad = ALIGN(*gtt_offset, rem_info->plane_alignment) - *gtt_offset;
1140
1141 if (rem_info->plane[color_plane].linear)
1142 sg = remap_linear_color_plane_pages(obj,
1143 rem_info->plane[color_plane].offset,
1144 alignment_pad,
1145 rem_info->plane[color_plane].size,
1146 st, sg,
1147 gtt_offset);
1148
1149 else
1150 sg = remap_tiled_color_plane_pages(obj,
1151 rem_info->plane[color_plane].offset,
1152 alignment_pad,
1153 rem_info->plane[color_plane].width,
1154 rem_info->plane[color_plane].height,
1155 rem_info->plane[color_plane].src_stride,
1156 rem_info->plane[color_plane].dst_stride,
1157 st, sg,
1158 gtt_offset);
1159
1160 return sg;
1161 }
1162
1163 static noinline struct sg_table *
intel_remap_pages(struct intel_remapped_info * rem_info,struct drm_i915_gem_object * obj)1164 intel_remap_pages(struct intel_remapped_info *rem_info,
1165 struct drm_i915_gem_object *obj)
1166 {
1167 unsigned int size = intel_remapped_info_size(rem_info);
1168 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1169 struct sg_table *st;
1170 struct scatterlist *sg;
1171 unsigned int gtt_offset = 0;
1172 int ret = -ENOMEM;
1173 int i;
1174
1175 /* Allocate target SG list. */
1176 st = kmalloc(sizeof(*st), GFP_KERNEL);
1177 if (!st)
1178 goto err_st_alloc;
1179
1180 ret = sg_alloc_table(st, size, GFP_KERNEL);
1181 if (ret)
1182 goto err_sg_alloc;
1183
1184 st->nents = 0;
1185 sg = st->sgl;
1186
1187 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
1188 sg = remap_color_plane_pages(rem_info, obj, i, st, sg, >t_offset);
1189
1190 i915_sg_trim(st);
1191
1192 return st;
1193
1194 err_sg_alloc:
1195 kfree(st);
1196 err_st_alloc:
1197
1198 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1199 obj->base.size, rem_info->plane[0].width,
1200 rem_info->plane[0].height, size);
1201
1202 return ERR_PTR(ret);
1203 }
1204
1205 static noinline struct sg_table *
intel_partial_pages(const struct i915_gtt_view * view,struct drm_i915_gem_object * obj)1206 intel_partial_pages(const struct i915_gtt_view *view,
1207 struct drm_i915_gem_object *obj)
1208 {
1209 struct sg_table *st;
1210 struct scatterlist *sg;
1211 unsigned int count = view->partial.size;
1212 int ret = -ENOMEM;
1213
1214 st = kmalloc(sizeof(*st), GFP_KERNEL);
1215 if (!st)
1216 goto err_st_alloc;
1217
1218 ret = sg_alloc_table(st, count, GFP_KERNEL);
1219 if (ret)
1220 goto err_sg_alloc;
1221
1222 st->nents = 0;
1223
1224 sg = remap_contiguous_pages(obj, view->partial.offset, count, st, st->sgl);
1225
1226 sg_mark_end(sg);
1227 i915_sg_trim(st); /* Drop any unused tail entries. */
1228
1229 return st;
1230
1231 err_sg_alloc:
1232 kfree(st);
1233 err_st_alloc:
1234 return ERR_PTR(ret);
1235 }
1236
1237 static int
__i915_vma_get_pages(struct i915_vma * vma)1238 __i915_vma_get_pages(struct i915_vma *vma)
1239 {
1240 struct sg_table *pages;
1241
1242 /*
1243 * The vma->pages are only valid within the lifespan of the borrowed
1244 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1245 * must be the vma->pages. A simple rule is that vma->pages must only
1246 * be accessed when the obj->mm.pages are pinned.
1247 */
1248 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1249
1250 switch (vma->gtt_view.type) {
1251 default:
1252 GEM_BUG_ON(vma->gtt_view.type);
1253 fallthrough;
1254 case I915_GTT_VIEW_NORMAL:
1255 pages = vma->obj->mm.pages;
1256 break;
1257
1258 case I915_GTT_VIEW_ROTATED:
1259 pages =
1260 intel_rotate_pages(&vma->gtt_view.rotated, vma->obj);
1261 break;
1262
1263 case I915_GTT_VIEW_REMAPPED:
1264 pages =
1265 intel_remap_pages(&vma->gtt_view.remapped, vma->obj);
1266 break;
1267
1268 case I915_GTT_VIEW_PARTIAL:
1269 pages = intel_partial_pages(&vma->gtt_view, vma->obj);
1270 break;
1271 }
1272
1273 if (IS_ERR(pages)) {
1274 drm_err(&vma->vm->i915->drm,
1275 "Failed to get pages for VMA view type %u (%ld)!\n",
1276 vma->gtt_view.type, PTR_ERR(pages));
1277 return PTR_ERR(pages);
1278 }
1279
1280 vma->pages = pages;
1281
1282 return 0;
1283 }
1284
i915_vma_get_pages(struct i915_vma * vma)1285 I915_SELFTEST_EXPORT int i915_vma_get_pages(struct i915_vma *vma)
1286 {
1287 int err;
1288
1289 if (atomic_add_unless(&vma->pages_count, 1, 0))
1290 return 0;
1291
1292 err = i915_gem_object_pin_pages(vma->obj);
1293 if (err)
1294 return err;
1295
1296 err = __i915_vma_get_pages(vma);
1297 if (err)
1298 goto err_unpin;
1299
1300 vma->page_sizes = vma->obj->mm.page_sizes;
1301 atomic_inc(&vma->pages_count);
1302
1303 return 0;
1304
1305 err_unpin:
1306 __i915_gem_object_unpin_pages(vma->obj);
1307
1308 return err;
1309 }
1310
vma_invalidate_tlb(struct i915_address_space * vm,u32 * tlb)1311 void vma_invalidate_tlb(struct i915_address_space *vm, u32 *tlb)
1312 {
1313 /*
1314 * Before we release the pages that were bound by this vma, we
1315 * must invalidate all the TLBs that may still have a reference
1316 * back to our physical address. It only needs to be done once,
1317 * so after updating the PTE to point away from the pages, record
1318 * the most recent TLB invalidation seqno, and if we have not yet
1319 * flushed the TLBs upon release, perform a full invalidation.
1320 */
1321 WRITE_ONCE(*tlb, intel_gt_next_invalidate_tlb_full(vm->gt));
1322 }
1323
__vma_put_pages(struct i915_vma * vma,unsigned int count)1324 static void __vma_put_pages(struct i915_vma *vma, unsigned int count)
1325 {
1326 /* We allocate under vma_get_pages, so beware the shrinker */
1327 GEM_BUG_ON(atomic_read(&vma->pages_count) < count);
1328
1329 if (atomic_sub_return(count, &vma->pages_count) == 0) {
1330 if (vma->pages != vma->obj->mm.pages) {
1331 sg_free_table(vma->pages);
1332 kfree(vma->pages);
1333 }
1334 vma->pages = NULL;
1335
1336 i915_gem_object_unpin_pages(vma->obj);
1337 }
1338 }
1339
i915_vma_put_pages(struct i915_vma * vma)1340 I915_SELFTEST_EXPORT void i915_vma_put_pages(struct i915_vma *vma)
1341 {
1342 if (atomic_add_unless(&vma->pages_count, -1, 1))
1343 return;
1344
1345 __vma_put_pages(vma, 1);
1346 }
1347
vma_unbind_pages(struct i915_vma * vma)1348 static void vma_unbind_pages(struct i915_vma *vma)
1349 {
1350 unsigned int count;
1351
1352 lockdep_assert_held(&vma->vm->mutex);
1353
1354 /* The upper portion of pages_count is the number of bindings */
1355 count = atomic_read(&vma->pages_count);
1356 count >>= I915_VMA_PAGES_BIAS;
1357 GEM_BUG_ON(!count);
1358
1359 __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS);
1360 }
1361
i915_vma_pin_ww(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u64 size,u64 alignment,u64 flags)1362 int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1363 u64 size, u64 alignment, u64 flags)
1364 {
1365 struct i915_vma_work *work = NULL;
1366 struct dma_fence *moving = NULL;
1367 struct i915_vma_resource *vma_res = NULL;
1368 intel_wakeref_t wakeref = 0;
1369 unsigned int bound;
1370 int err;
1371
1372 assert_vma_held(vma);
1373 GEM_BUG_ON(!ww);
1374
1375 BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
1376 BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
1377
1378 GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL)));
1379
1380 /* First try and grab the pin without rebinding the vma */
1381 if (try_qad_pin(vma, flags))
1382 return 0;
1383
1384 err = i915_vma_get_pages(vma);
1385 if (err)
1386 return err;
1387
1388 if (flags & PIN_GLOBAL)
1389 wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
1390
1391 if (flags & vma->vm->bind_async_flags) {
1392 /* lock VM */
1393 err = i915_vm_lock_objects(vma->vm, ww);
1394 if (err)
1395 goto err_rpm;
1396
1397 work = i915_vma_work();
1398 if (!work) {
1399 err = -ENOMEM;
1400 goto err_rpm;
1401 }
1402
1403 work->vm = vma->vm;
1404
1405 err = i915_gem_object_get_moving_fence(vma->obj, &moving);
1406 if (err)
1407 goto err_rpm;
1408
1409 dma_fence_work_chain(&work->base, moving);
1410
1411 /* Allocate enough page directories to used PTE */
1412 if (vma->vm->allocate_va_range) {
1413 err = i915_vm_alloc_pt_stash(vma->vm,
1414 &work->stash,
1415 vma->size);
1416 if (err)
1417 goto err_fence;
1418
1419 err = i915_vm_map_pt_stash(vma->vm, &work->stash);
1420 if (err)
1421 goto err_fence;
1422 }
1423 }
1424
1425 vma_res = i915_vma_resource_alloc();
1426 if (IS_ERR(vma_res)) {
1427 err = PTR_ERR(vma_res);
1428 goto err_fence;
1429 }
1430
1431 /*
1432 * Differentiate between user/kernel vma inside the aliasing-ppgtt.
1433 *
1434 * We conflate the Global GTT with the user's vma when using the
1435 * aliasing-ppgtt, but it is still vitally important to try and
1436 * keep the use cases distinct. For example, userptr objects are
1437 * not allowed inside the Global GTT as that will cause lock
1438 * inversions when we have to evict them the mmu_notifier callbacks -
1439 * but they are allowed to be part of the user ppGTT which can never
1440 * be mapped. As such we try to give the distinct users of the same
1441 * mutex, distinct lockclasses [equivalent to how we keep i915_ggtt
1442 * and i915_ppgtt separate].
1443 *
1444 * NB this may cause us to mask real lock inversions -- while the
1445 * code is safe today, lockdep may not be able to spot future
1446 * transgressions.
1447 */
1448 err = mutex_lock_interruptible_nested(&vma->vm->mutex,
1449 !(flags & PIN_GLOBAL));
1450 if (err)
1451 goto err_vma_res;
1452
1453 /* No more allocations allowed now we hold vm->mutex */
1454
1455 if (unlikely(i915_vma_is_closed(vma))) {
1456 err = -ENOENT;
1457 goto err_unlock;
1458 }
1459
1460 bound = atomic_read(&vma->flags);
1461 if (unlikely(bound & I915_VMA_ERROR)) {
1462 err = -ENOMEM;
1463 goto err_unlock;
1464 }
1465
1466 if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) {
1467 err = -EAGAIN; /* pins are meant to be fairly temporary */
1468 goto err_unlock;
1469 }
1470
1471 if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) {
1472 if (!(flags & PIN_VALIDATE))
1473 __i915_vma_pin(vma);
1474 goto err_unlock;
1475 }
1476
1477 err = i915_active_acquire(&vma->active);
1478 if (err)
1479 goto err_unlock;
1480
1481 if (!(bound & I915_VMA_BIND_MASK)) {
1482 err = i915_vma_insert(vma, ww, size, alignment, flags);
1483 if (err)
1484 goto err_active;
1485
1486 if (i915_is_ggtt(vma->vm))
1487 __i915_vma_set_map_and_fenceable(vma);
1488 }
1489
1490 GEM_BUG_ON(!vma->pages);
1491 err = i915_vma_bind(vma,
1492 vma->obj->cache_level,
1493 flags, work, vma_res);
1494 vma_res = NULL;
1495 if (err)
1496 goto err_remove;
1497
1498 /* There should only be at most 2 active bindings (user, global) */
1499 GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound);
1500 atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count);
1501 list_move_tail(&vma->vm_link, &vma->vm->bound_list);
1502
1503 if (!(flags & PIN_VALIDATE)) {
1504 __i915_vma_pin(vma);
1505 GEM_BUG_ON(!i915_vma_is_pinned(vma));
1506 }
1507 GEM_BUG_ON(!i915_vma_is_bound(vma, flags));
1508 GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
1509
1510 err_remove:
1511 if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) {
1512 i915_vma_detach(vma);
1513 drm_mm_remove_node(&vma->node);
1514 }
1515 err_active:
1516 i915_active_release(&vma->active);
1517 err_unlock:
1518 mutex_unlock(&vma->vm->mutex);
1519 err_vma_res:
1520 i915_vma_resource_free(vma_res);
1521 err_fence:
1522 if (work)
1523 dma_fence_work_commit_imm(&work->base);
1524 err_rpm:
1525 if (wakeref)
1526 intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
1527
1528 if (moving)
1529 dma_fence_put(moving);
1530
1531 i915_vma_put_pages(vma);
1532 return err;
1533 }
1534
flush_idle_contexts(struct intel_gt * gt)1535 static void flush_idle_contexts(struct intel_gt *gt)
1536 {
1537 struct intel_engine_cs *engine;
1538 enum intel_engine_id id;
1539
1540 for_each_engine(engine, gt, id)
1541 intel_engine_flush_barriers(engine);
1542
1543 intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT);
1544 }
1545
__i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)1546 static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1547 u32 align, unsigned int flags)
1548 {
1549 struct i915_address_space *vm = vma->vm;
1550 int err;
1551
1552 do {
1553 err = i915_vma_pin_ww(vma, ww, 0, align, flags | PIN_GLOBAL);
1554
1555 if (err != -ENOSPC) {
1556 if (!err) {
1557 err = i915_vma_wait_for_bind(vma);
1558 if (err)
1559 i915_vma_unpin(vma);
1560 }
1561 return err;
1562 }
1563
1564 /* Unlike i915_vma_pin, we don't take no for an answer! */
1565 flush_idle_contexts(vm->gt);
1566 if (mutex_lock_interruptible(&vm->mutex) == 0) {
1567 /*
1568 * We pass NULL ww here, as we don't want to unbind
1569 * locked objects when called from execbuf when pinning
1570 * is removed. This would probably regress badly.
1571 */
1572 i915_gem_evict_vm(vm, NULL, NULL);
1573 mutex_unlock(&vm->mutex);
1574 }
1575 } while (1);
1576 }
1577
i915_ggtt_pin(struct i915_vma * vma,struct i915_gem_ww_ctx * ww,u32 align,unsigned int flags)1578 int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
1579 u32 align, unsigned int flags)
1580 {
1581 struct i915_gem_ww_ctx _ww;
1582 int err;
1583
1584 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
1585
1586 if (ww)
1587 return __i915_ggtt_pin(vma, ww, align, flags);
1588
1589 lockdep_assert_not_held(&vma->obj->base.resv->lock.base);
1590
1591 for_i915_gem_ww(&_ww, err, true) {
1592 err = i915_gem_object_lock(vma->obj, &_ww);
1593 if (!err)
1594 err = __i915_ggtt_pin(vma, &_ww, align, flags);
1595 }
1596
1597 return err;
1598 }
1599
__vma_close(struct i915_vma * vma,struct intel_gt * gt)1600 static void __vma_close(struct i915_vma *vma, struct intel_gt *gt)
1601 {
1602 /*
1603 * We defer actually closing, unbinding and destroying the VMA until
1604 * the next idle point, or if the object is freed in the meantime. By
1605 * postponing the unbind, we allow for it to be resurrected by the
1606 * client, avoiding the work required to rebind the VMA. This is
1607 * advantageous for DRI, where the client/server pass objects
1608 * between themselves, temporarily opening a local VMA to the
1609 * object, and then closing it again. The same object is then reused
1610 * on the next frame (or two, depending on the depth of the swap queue)
1611 * causing us to rebind the VMA once more. This ends up being a lot
1612 * of wasted work for the steady state.
1613 */
1614 GEM_BUG_ON(i915_vma_is_closed(vma));
1615 list_add(&vma->closed_link, >->closed_vma);
1616 }
1617
i915_vma_close(struct i915_vma * vma)1618 void i915_vma_close(struct i915_vma *vma)
1619 {
1620 struct intel_gt *gt = vma->vm->gt;
1621 unsigned long flags;
1622
1623 if (i915_vma_is_ggtt(vma))
1624 return;
1625
1626 GEM_BUG_ON(!atomic_read(&vma->open_count));
1627 if (atomic_dec_and_lock_irqsave(&vma->open_count,
1628 >->closed_lock,
1629 flags)) {
1630 __vma_close(vma, gt);
1631 spin_unlock_irqrestore(>->closed_lock, flags);
1632 }
1633 }
1634
__i915_vma_remove_closed(struct i915_vma * vma)1635 static void __i915_vma_remove_closed(struct i915_vma *vma)
1636 {
1637 list_del_init(&vma->closed_link);
1638 }
1639
i915_vma_reopen(struct i915_vma * vma)1640 void i915_vma_reopen(struct i915_vma *vma)
1641 {
1642 struct intel_gt *gt = vma->vm->gt;
1643
1644 spin_lock_irq(>->closed_lock);
1645 if (i915_vma_is_closed(vma))
1646 __i915_vma_remove_closed(vma);
1647 spin_unlock_irq(>->closed_lock);
1648 }
1649
force_unbind(struct i915_vma * vma)1650 static void force_unbind(struct i915_vma *vma)
1651 {
1652 if (!drm_mm_node_allocated(&vma->node))
1653 return;
1654
1655 atomic_and(~I915_VMA_PIN_MASK, &vma->flags);
1656 WARN_ON(__i915_vma_unbind(vma));
1657 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
1658 }
1659
release_references(struct i915_vma * vma,struct intel_gt * gt,bool vm_ddestroy)1660 static void release_references(struct i915_vma *vma, struct intel_gt *gt,
1661 bool vm_ddestroy)
1662 {
1663 struct drm_i915_gem_object *obj = vma->obj;
1664
1665 GEM_BUG_ON(i915_vma_is_active(vma));
1666
1667 spin_lock(&obj->vma.lock);
1668 list_del(&vma->obj_link);
1669 if (!RB_EMPTY_NODE(&vma->obj_node))
1670 rb_erase(&vma->obj_node, &obj->vma.tree);
1671
1672 spin_unlock(&obj->vma.lock);
1673
1674 spin_lock_irq(>->closed_lock);
1675 __i915_vma_remove_closed(vma);
1676 spin_unlock_irq(>->closed_lock);
1677
1678 if (vm_ddestroy)
1679 i915_vm_resv_put(vma->vm);
1680
1681 i915_active_fini(&vma->active);
1682 GEM_WARN_ON(vma->resource);
1683 i915_vma_free(vma);
1684 }
1685
1686 /**
1687 * i915_vma_destroy_locked - Remove all weak reference to the vma and put
1688 * the initial reference.
1689 *
1690 * This function should be called when it's decided the vma isn't needed
1691 * anymore. The caller must assure that it doesn't race with another lookup
1692 * plus destroy, typically by taking an appropriate reference.
1693 *
1694 * Current callsites are
1695 * - __i915_gem_object_pages_fini()
1696 * - __i915_vm_close() - Blocks the above function by taking a reference on
1697 * the object.
1698 * - __i915_vma_parked() - Blocks the above functions by taking a reference
1699 * on the vm and a reference on the object. Also takes the object lock so
1700 * destruction from __i915_vma_parked() can be blocked by holding the
1701 * object lock. Since the object lock is only allowed from within i915 with
1702 * an object refcount, holding the object lock also implicitly blocks the
1703 * vma freeing from __i915_gem_object_pages_fini().
1704 *
1705 * Because of locks taken during destruction, a vma is also guaranteed to
1706 * stay alive while the following locks are held if it was looked up while
1707 * holding one of the locks:
1708 * - vm->mutex
1709 * - obj->vma.lock
1710 * - gt->closed_lock
1711 */
i915_vma_destroy_locked(struct i915_vma * vma)1712 void i915_vma_destroy_locked(struct i915_vma *vma)
1713 {
1714 lockdep_assert_held(&vma->vm->mutex);
1715
1716 force_unbind(vma);
1717 list_del_init(&vma->vm_link);
1718 release_references(vma, vma->vm->gt, false);
1719 }
1720
i915_vma_destroy(struct i915_vma * vma)1721 void i915_vma_destroy(struct i915_vma *vma)
1722 {
1723 struct intel_gt *gt;
1724 bool vm_ddestroy;
1725
1726 mutex_lock(&vma->vm->mutex);
1727 force_unbind(vma);
1728 list_del_init(&vma->vm_link);
1729 vm_ddestroy = vma->vm_ddestroy;
1730 vma->vm_ddestroy = false;
1731
1732 /* vma->vm may be freed when releasing vma->vm->mutex. */
1733 gt = vma->vm->gt;
1734 mutex_unlock(&vma->vm->mutex);
1735 release_references(vma, gt, vm_ddestroy);
1736 }
1737
i915_vma_parked(struct intel_gt * gt)1738 void i915_vma_parked(struct intel_gt *gt)
1739 {
1740 struct i915_vma *vma, *next;
1741 LIST_HEAD(closed);
1742
1743 spin_lock_irq(>->closed_lock);
1744 list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) {
1745 struct drm_i915_gem_object *obj = vma->obj;
1746 struct i915_address_space *vm = vma->vm;
1747
1748 /* XXX All to avoid keeping a reference on i915_vma itself */
1749
1750 if (!kref_get_unless_zero(&obj->base.refcount))
1751 continue;
1752
1753 if (!i915_vm_tryget(vm)) {
1754 i915_gem_object_put(obj);
1755 continue;
1756 }
1757
1758 list_move(&vma->closed_link, &closed);
1759 }
1760 spin_unlock_irq(>->closed_lock);
1761
1762 /* As the GT is held idle, no vma can be reopened as we destroy them */
1763 list_for_each_entry_safe(vma, next, &closed, closed_link) {
1764 struct drm_i915_gem_object *obj = vma->obj;
1765 struct i915_address_space *vm = vma->vm;
1766
1767 if (i915_gem_object_trylock(obj, NULL)) {
1768 INIT_LIST_HEAD(&vma->closed_link);
1769 i915_vma_destroy(vma);
1770 i915_gem_object_unlock(obj);
1771 } else {
1772 /* back you go.. */
1773 spin_lock_irq(>->closed_lock);
1774 list_add(&vma->closed_link, >->closed_vma);
1775 spin_unlock_irq(>->closed_lock);
1776 }
1777
1778 i915_gem_object_put(obj);
1779 i915_vm_put(vm);
1780 }
1781 }
1782
__i915_vma_iounmap(struct i915_vma * vma)1783 static void __i915_vma_iounmap(struct i915_vma *vma)
1784 {
1785 GEM_BUG_ON(i915_vma_is_pinned(vma));
1786
1787 if (vma->iomap == NULL)
1788 return;
1789
1790 if (page_unmask_bits(vma->iomap))
1791 __i915_gem_object_release_map(vma->obj);
1792 else
1793 io_mapping_unmap(vma->iomap);
1794 vma->iomap = NULL;
1795 }
1796
i915_vma_revoke_mmap(struct i915_vma * vma)1797 void i915_vma_revoke_mmap(struct i915_vma *vma)
1798 {
1799 struct drm_vma_offset_node *node;
1800 u64 vma_offset;
1801
1802 if (!i915_vma_has_userfault(vma))
1803 return;
1804
1805 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
1806 GEM_BUG_ON(!vma->obj->userfault_count);
1807
1808 node = &vma->mmo->vma_node;
1809 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
1810 unmap_mapping_range(vma->vm->i915->drm.anon_inode->i_mapping,
1811 drm_vma_node_offset_addr(node) + vma_offset,
1812 vma->size,
1813 1);
1814
1815 i915_vma_unset_userfault(vma);
1816 if (!--vma->obj->userfault_count)
1817 list_del(&vma->obj->userfault_link);
1818 }
1819
1820 static int
__i915_request_await_bind(struct i915_request * rq,struct i915_vma * vma)1821 __i915_request_await_bind(struct i915_request *rq, struct i915_vma *vma)
1822 {
1823 return __i915_request_await_exclusive(rq, &vma->active);
1824 }
1825
__i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq)1826 static int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
1827 {
1828 int err;
1829
1830 /* Wait for the vma to be bound before we start! */
1831 err = __i915_request_await_bind(rq, vma);
1832 if (err)
1833 return err;
1834
1835 return i915_active_add_request(&vma->active, rq);
1836 }
1837
_i915_vma_move_to_active(struct i915_vma * vma,struct i915_request * rq,struct dma_fence * fence,unsigned int flags)1838 int _i915_vma_move_to_active(struct i915_vma *vma,
1839 struct i915_request *rq,
1840 struct dma_fence *fence,
1841 unsigned int flags)
1842 {
1843 struct drm_i915_gem_object *obj = vma->obj;
1844 int err;
1845
1846 assert_object_held(obj);
1847
1848 GEM_BUG_ON(!vma->pages);
1849
1850 err = __i915_vma_move_to_active(vma, rq);
1851 if (unlikely(err))
1852 return err;
1853
1854 /*
1855 * Reserve fences slot early to prevent an allocation after preparing
1856 * the workload and associating fences with dma_resv.
1857 */
1858 if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) {
1859 struct dma_fence *curr;
1860 int idx;
1861
1862 dma_fence_array_for_each(curr, idx, fence)
1863 ;
1864 err = dma_resv_reserve_fences(vma->obj->base.resv, idx);
1865 if (unlikely(err))
1866 return err;
1867 }
1868
1869 if (flags & EXEC_OBJECT_WRITE) {
1870 struct intel_frontbuffer *front;
1871
1872 front = __intel_frontbuffer_get(obj);
1873 if (unlikely(front)) {
1874 if (intel_frontbuffer_invalidate(front, ORIGIN_CS))
1875 i915_active_add_request(&front->write, rq);
1876 intel_frontbuffer_put(front);
1877 }
1878 }
1879
1880 if (fence) {
1881 struct dma_fence *curr;
1882 enum dma_resv_usage usage;
1883 int idx;
1884
1885 if (flags & EXEC_OBJECT_WRITE) {
1886 usage = DMA_RESV_USAGE_WRITE;
1887 obj->write_domain = I915_GEM_DOMAIN_RENDER;
1888 obj->read_domains = 0;
1889 } else {
1890 usage = DMA_RESV_USAGE_READ;
1891 obj->write_domain = 0;
1892 }
1893
1894 dma_fence_array_for_each(curr, idx, fence)
1895 dma_resv_add_fence(vma->obj->base.resv, curr, usage);
1896 }
1897
1898 if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
1899 i915_active_add_request(&vma->fence->active, rq);
1900
1901 obj->read_domains |= I915_GEM_GPU_DOMAINS;
1902 obj->mm.dirty = true;
1903
1904 GEM_BUG_ON(!i915_vma_is_active(vma));
1905 return 0;
1906 }
1907
__i915_vma_evict(struct i915_vma * vma,bool async)1908 struct dma_fence *__i915_vma_evict(struct i915_vma *vma, bool async)
1909 {
1910 struct i915_vma_resource *vma_res = vma->resource;
1911 struct dma_fence *unbind_fence;
1912
1913 GEM_BUG_ON(i915_vma_is_pinned(vma));
1914 assert_vma_held_evict(vma);
1915
1916 if (i915_vma_is_map_and_fenceable(vma)) {
1917 /* Force a pagefault for domain tracking on next user access */
1918 i915_vma_revoke_mmap(vma);
1919
1920 /*
1921 * Check that we have flushed all writes through the GGTT
1922 * before the unbind, other due to non-strict nature of those
1923 * indirect writes they may end up referencing the GGTT PTE
1924 * after the unbind.
1925 *
1926 * Note that we may be concurrently poking at the GGTT_WRITE
1927 * bit from set-domain, as we mark all GGTT vma associated
1928 * with an object. We know this is for another vma, as we
1929 * are currently unbinding this one -- so if this vma will be
1930 * reused, it will be refaulted and have its dirty bit set
1931 * before the next write.
1932 */
1933 i915_vma_flush_writes(vma);
1934
1935 /* release the fence reg _after_ flushing */
1936 i915_vma_revoke_fence(vma);
1937
1938 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma));
1939 }
1940
1941 __i915_vma_iounmap(vma);
1942
1943 GEM_BUG_ON(vma->fence);
1944 GEM_BUG_ON(i915_vma_has_userfault(vma));
1945
1946 /* Object backend must be async capable. */
1947 GEM_WARN_ON(async && !vma->resource->bi.pages_rsgt);
1948
1949 /* If vm is not open, unbind is a nop. */
1950 vma_res->needs_wakeref = i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND) &&
1951 kref_read(&vma->vm->ref);
1952 vma_res->skip_pte_rewrite = !kref_read(&vma->vm->ref) ||
1953 vma->vm->skip_pte_rewrite;
1954 trace_i915_vma_unbind(vma);
1955
1956 if (async)
1957 unbind_fence = i915_vma_resource_unbind(vma_res,
1958 &vma->obj->mm.tlb);
1959 else
1960 unbind_fence = i915_vma_resource_unbind(vma_res, NULL);
1961
1962 vma->resource = NULL;
1963
1964 atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
1965 &vma->flags);
1966
1967 i915_vma_detach(vma);
1968
1969 if (!async) {
1970 if (unbind_fence) {
1971 dma_fence_wait(unbind_fence, false);
1972 dma_fence_put(unbind_fence);
1973 unbind_fence = NULL;
1974 }
1975 vma_invalidate_tlb(vma->vm, &vma->obj->mm.tlb);
1976 }
1977
1978 /*
1979 * Binding itself may not have completed until the unbind fence signals,
1980 * so don't drop the pages until that happens, unless the resource is
1981 * async_capable.
1982 */
1983
1984 vma_unbind_pages(vma);
1985 return unbind_fence;
1986 }
1987
__i915_vma_unbind(struct i915_vma * vma)1988 int __i915_vma_unbind(struct i915_vma *vma)
1989 {
1990 int ret;
1991
1992 lockdep_assert_held(&vma->vm->mutex);
1993 assert_vma_held_evict(vma);
1994
1995 if (!drm_mm_node_allocated(&vma->node))
1996 return 0;
1997
1998 if (i915_vma_is_pinned(vma)) {
1999 vma_print_allocator(vma, "is pinned");
2000 return -EAGAIN;
2001 }
2002
2003 /*
2004 * After confirming that no one else is pinning this vma, wait for
2005 * any laggards who may have crept in during the wait (through
2006 * a residual pin skipping the vm->mutex) to complete.
2007 */
2008 ret = i915_vma_sync(vma);
2009 if (ret)
2010 return ret;
2011
2012 GEM_BUG_ON(i915_vma_is_active(vma));
2013 __i915_vma_evict(vma, false);
2014
2015 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2016 return 0;
2017 }
2018
__i915_vma_unbind_async(struct i915_vma * vma)2019 static struct dma_fence *__i915_vma_unbind_async(struct i915_vma *vma)
2020 {
2021 struct dma_fence *fence;
2022
2023 lockdep_assert_held(&vma->vm->mutex);
2024
2025 if (!drm_mm_node_allocated(&vma->node))
2026 return NULL;
2027
2028 if (i915_vma_is_pinned(vma) ||
2029 &vma->obj->mm.rsgt->table != vma->resource->bi.pages)
2030 return ERR_PTR(-EAGAIN);
2031
2032 /*
2033 * We probably need to replace this with awaiting the fences of the
2034 * object's dma_resv when the vma active goes away. When doing that
2035 * we need to be careful to not add the vma_resource unbind fence
2036 * immediately to the object's dma_resv, because then unbinding
2037 * the next vma from the object, in case there are many, will
2038 * actually await the unbinding of the previous vmas, which is
2039 * undesirable.
2040 */
2041 if (i915_sw_fence_await_active(&vma->resource->chain, &vma->active,
2042 I915_ACTIVE_AWAIT_EXCL |
2043 I915_ACTIVE_AWAIT_ACTIVE) < 0) {
2044 return ERR_PTR(-EBUSY);
2045 }
2046
2047 fence = __i915_vma_evict(vma, true);
2048
2049 drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */
2050
2051 return fence;
2052 }
2053
i915_vma_unbind(struct i915_vma * vma)2054 int i915_vma_unbind(struct i915_vma *vma)
2055 {
2056 struct i915_address_space *vm = vma->vm;
2057 intel_wakeref_t wakeref = 0;
2058 int err;
2059
2060 assert_object_held_shared(vma->obj);
2061
2062 /* Optimistic wait before taking the mutex */
2063 err = i915_vma_sync(vma);
2064 if (err)
2065 return err;
2066
2067 if (!drm_mm_node_allocated(&vma->node))
2068 return 0;
2069
2070 if (i915_vma_is_pinned(vma)) {
2071 vma_print_allocator(vma, "is pinned");
2072 return -EAGAIN;
2073 }
2074
2075 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2076 /* XXX not always required: nop_clear_range */
2077 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2078
2079 err = mutex_lock_interruptible_nested(&vma->vm->mutex, !wakeref);
2080 if (err)
2081 goto out_rpm;
2082
2083 err = __i915_vma_unbind(vma);
2084 mutex_unlock(&vm->mutex);
2085
2086 out_rpm:
2087 if (wakeref)
2088 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2089 return err;
2090 }
2091
i915_vma_unbind_async(struct i915_vma * vma,bool trylock_vm)2092 int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
2093 {
2094 struct drm_i915_gem_object *obj = vma->obj;
2095 struct i915_address_space *vm = vma->vm;
2096 intel_wakeref_t wakeref = 0;
2097 struct dma_fence *fence;
2098 int err;
2099
2100 /*
2101 * We need the dma-resv lock since we add the
2102 * unbind fence to the dma-resv object.
2103 */
2104 assert_object_held(obj);
2105
2106 if (!drm_mm_node_allocated(&vma->node))
2107 return 0;
2108
2109 if (i915_vma_is_pinned(vma)) {
2110 vma_print_allocator(vma, "is pinned");
2111 return -EAGAIN;
2112 }
2113
2114 if (!obj->mm.rsgt)
2115 return -EBUSY;
2116
2117 err = dma_resv_reserve_fences(obj->base.resv, 2);
2118 if (err)
2119 return -EBUSY;
2120
2121 /*
2122 * It would be great if we could grab this wakeref from the
2123 * async unbind work if needed, but we can't because it uses
2124 * kmalloc and it's in the dma-fence signalling critical path.
2125 */
2126 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
2127 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
2128
2129 if (trylock_vm && !mutex_trylock(&vm->mutex)) {
2130 err = -EBUSY;
2131 goto out_rpm;
2132 } else if (!trylock_vm) {
2133 err = mutex_lock_interruptible_nested(&vm->mutex, !wakeref);
2134 if (err)
2135 goto out_rpm;
2136 }
2137
2138 fence = __i915_vma_unbind_async(vma);
2139 mutex_unlock(&vm->mutex);
2140 if (IS_ERR_OR_NULL(fence)) {
2141 err = PTR_ERR_OR_ZERO(fence);
2142 goto out_rpm;
2143 }
2144
2145 dma_resv_add_fence(obj->base.resv, fence, DMA_RESV_USAGE_READ);
2146 dma_fence_put(fence);
2147
2148 out_rpm:
2149 if (wakeref)
2150 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
2151 return err;
2152 }
2153
i915_vma_unbind_unlocked(struct i915_vma * vma)2154 int i915_vma_unbind_unlocked(struct i915_vma *vma)
2155 {
2156 int err;
2157
2158 i915_gem_object_lock(vma->obj, NULL);
2159 err = i915_vma_unbind(vma);
2160 i915_gem_object_unlock(vma->obj);
2161
2162 return err;
2163 }
2164
i915_vma_make_unshrinkable(struct i915_vma * vma)2165 struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma)
2166 {
2167 i915_gem_object_make_unshrinkable(vma->obj);
2168 return vma;
2169 }
2170
i915_vma_make_shrinkable(struct i915_vma * vma)2171 void i915_vma_make_shrinkable(struct i915_vma *vma)
2172 {
2173 i915_gem_object_make_shrinkable(vma->obj);
2174 }
2175
i915_vma_make_purgeable(struct i915_vma * vma)2176 void i915_vma_make_purgeable(struct i915_vma *vma)
2177 {
2178 i915_gem_object_make_purgeable(vma->obj);
2179 }
2180
2181 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2182 #include "selftests/i915_vma.c"
2183 #endif
2184
i915_vma_module_exit(void)2185 void i915_vma_module_exit(void)
2186 {
2187 kmem_cache_destroy(slab_vmas);
2188 }
2189
i915_vma_module_init(void)2190 int __init i915_vma_module_init(void)
2191 {
2192 slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
2193 if (!slab_vmas)
2194 return -ENOMEM;
2195
2196 return 0;
2197 }
2198