1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2016 Intel Corporation
5 */
6
7 #include <linux/highmem.h>
8 #include <linux/prime_numbers.h>
9
10 #include "gem/i915_gem_internal.h"
11 #include "gem/i915_gem_lmem.h"
12 #include "gem/i915_gem_region.h"
13 #include "gem/i915_gem_ttm.h"
14 #include "gem/i915_gem_ttm_move.h"
15 #include "gt/intel_engine_pm.h"
16 #include "gt/intel_gpu_commands.h"
17 #include "gt/intel_gt.h"
18 #include "gt/intel_gt_pm.h"
19 #include "gt/intel_migrate.h"
20 #include "i915_reg.h"
21 #include "i915_ttm_buddy_manager.h"
22
23 #include "huge_gem_object.h"
24 #include "i915_selftest.h"
25 #include "selftests/i915_random.h"
26 #include "selftests/igt_flush_test.h"
27 #include "selftests/igt_reset.h"
28 #include "selftests/igt_mmap.h"
29
30 struct tile {
31 unsigned int width;
32 unsigned int height;
33 unsigned int stride;
34 unsigned int size;
35 unsigned int tiling;
36 unsigned int swizzle;
37 };
38
swizzle_bit(unsigned int bit,u64 offset)39 static u64 swizzle_bit(unsigned int bit, u64 offset)
40 {
41 return (offset & BIT_ULL(bit)) >> (bit - 6);
42 }
43
tiled_offset(const struct tile * tile,u64 v)44 static u64 tiled_offset(const struct tile *tile, u64 v)
45 {
46 u64 x, y;
47
48 if (tile->tiling == I915_TILING_NONE)
49 return v;
50
51 y = div64_u64_rem(v, tile->stride, &x);
52 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
53
54 if (tile->tiling == I915_TILING_X) {
55 v += y * tile->width;
56 v += div64_u64_rem(x, tile->width, &x) << tile->size;
57 v += x;
58 } else if (tile->width == 128) {
59 const unsigned int ytile_span = 16;
60 const unsigned int ytile_height = 512;
61
62 v += y * ytile_span;
63 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
64 v += x;
65 } else {
66 const unsigned int ytile_span = 32;
67 const unsigned int ytile_height = 256;
68
69 v += y * ytile_span;
70 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
71 v += x;
72 }
73
74 switch (tile->swizzle) {
75 case I915_BIT_6_SWIZZLE_9:
76 v ^= swizzle_bit(9, v);
77 break;
78 case I915_BIT_6_SWIZZLE_9_10:
79 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
80 break;
81 case I915_BIT_6_SWIZZLE_9_11:
82 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
83 break;
84 case I915_BIT_6_SWIZZLE_9_10_11:
85 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
86 break;
87 }
88
89 return v;
90 }
91
check_partial_mapping(struct drm_i915_gem_object * obj,const struct tile * tile,struct rnd_state * prng)92 static int check_partial_mapping(struct drm_i915_gem_object *obj,
93 const struct tile *tile,
94 struct rnd_state *prng)
95 {
96 const unsigned long npages = obj->base.size / PAGE_SIZE;
97 struct drm_i915_private *i915 = to_i915(obj->base.dev);
98 struct i915_gtt_view view;
99 struct i915_vma *vma;
100 unsigned long offset;
101 unsigned long page;
102 u32 __iomem *io;
103 struct page *p;
104 unsigned int n;
105 u32 *cpu;
106 int err;
107
108 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
109 if (err) {
110 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
111 tile->tiling, tile->stride, err);
112 return err;
113 }
114
115 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
116 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
117
118 i915_gem_object_lock(obj, NULL);
119 err = i915_gem_object_set_to_gtt_domain(obj, true);
120 i915_gem_object_unlock(obj);
121 if (err) {
122 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
123 return err;
124 }
125
126 page = i915_prandom_u32_max_state(npages, prng);
127 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
128
129 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
130 if (IS_ERR(vma)) {
131 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
132 page, (int)PTR_ERR(vma));
133 return PTR_ERR(vma);
134 }
135
136 n = page - view.partial.offset;
137 GEM_BUG_ON(n >= view.partial.size);
138
139 io = i915_vma_pin_iomap(vma);
140 i915_vma_unpin(vma);
141 if (IS_ERR(io)) {
142 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
143 page, (int)PTR_ERR(io));
144 err = PTR_ERR(io);
145 goto out;
146 }
147
148 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
149 i915_vma_unpin_iomap(vma);
150
151 offset = tiled_offset(tile, page << PAGE_SHIFT);
152 if (offset >= obj->base.size)
153 goto out;
154
155 intel_gt_flush_ggtt_writes(to_gt(i915));
156
157 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
158 cpu = kmap(p) + offset_in_page(offset);
159 drm_clflush_virt_range(cpu, sizeof(*cpu));
160 if (*cpu != (u32)page) {
161 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
162 page, n,
163 view.partial.offset,
164 view.partial.size,
165 vma->size >> PAGE_SHIFT,
166 tile->tiling ? tile_row_pages(obj) : 0,
167 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
168 offset >> PAGE_SHIFT,
169 (unsigned int)offset_in_page(offset),
170 offset,
171 (u32)page, *cpu);
172 err = -EINVAL;
173 }
174 *cpu = 0;
175 drm_clflush_virt_range(cpu, sizeof(*cpu));
176 kunmap(p);
177
178 out:
179 i915_gem_object_lock(obj, NULL);
180 i915_vma_destroy(vma);
181 i915_gem_object_unlock(obj);
182 return err;
183 }
184
check_partial_mappings(struct drm_i915_gem_object * obj,const struct tile * tile,unsigned long end_time)185 static int check_partial_mappings(struct drm_i915_gem_object *obj,
186 const struct tile *tile,
187 unsigned long end_time)
188 {
189 const unsigned int nreal = obj->scratch / PAGE_SIZE;
190 const unsigned long npages = obj->base.size / PAGE_SIZE;
191 struct drm_i915_private *i915 = to_i915(obj->base.dev);
192 struct i915_vma *vma;
193 unsigned long page;
194 int err;
195
196 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
197 if (err) {
198 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
199 tile->tiling, tile->stride, err);
200 return err;
201 }
202
203 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
204 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
205
206 i915_gem_object_lock(obj, NULL);
207 err = i915_gem_object_set_to_gtt_domain(obj, true);
208 i915_gem_object_unlock(obj);
209 if (err) {
210 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
211 return err;
212 }
213
214 for_each_prime_number_from(page, 1, npages) {
215 struct i915_gtt_view view =
216 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
217 unsigned long offset;
218 u32 __iomem *io;
219 struct page *p;
220 unsigned int n;
221 u32 *cpu;
222
223 GEM_BUG_ON(view.partial.size > nreal);
224 cond_resched();
225
226 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
227 if (IS_ERR(vma)) {
228 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
229 page, (int)PTR_ERR(vma));
230 return PTR_ERR(vma);
231 }
232
233 n = page - view.partial.offset;
234 GEM_BUG_ON(n >= view.partial.size);
235
236 io = i915_vma_pin_iomap(vma);
237 i915_vma_unpin(vma);
238 if (IS_ERR(io)) {
239 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
240 page, (int)PTR_ERR(io));
241 return PTR_ERR(io);
242 }
243
244 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
245 i915_vma_unpin_iomap(vma);
246
247 offset = tiled_offset(tile, page << PAGE_SHIFT);
248 if (offset >= obj->base.size)
249 continue;
250
251 intel_gt_flush_ggtt_writes(to_gt(i915));
252
253 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
254 cpu = kmap(p) + offset_in_page(offset);
255 drm_clflush_virt_range(cpu, sizeof(*cpu));
256 if (*cpu != (u32)page) {
257 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%lu + %u [0x%lx]) of 0x%x, found 0x%x\n",
258 page, n,
259 view.partial.offset,
260 view.partial.size,
261 vma->size >> PAGE_SHIFT,
262 tile->tiling ? tile_row_pages(obj) : 0,
263 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
264 offset >> PAGE_SHIFT,
265 (unsigned int)offset_in_page(offset),
266 offset,
267 (u32)page, *cpu);
268 err = -EINVAL;
269 }
270 *cpu = 0;
271 drm_clflush_virt_range(cpu, sizeof(*cpu));
272 kunmap(p);
273 if (err)
274 return err;
275
276 i915_gem_object_lock(obj, NULL);
277 i915_vma_destroy(vma);
278 i915_gem_object_unlock(obj);
279
280 if (igt_timeout(end_time,
281 "%s: timed out after tiling=%d stride=%d\n",
282 __func__, tile->tiling, tile->stride))
283 return -EINTR;
284 }
285
286 return 0;
287 }
288
289 static unsigned int
setup_tile_size(struct tile * tile,struct drm_i915_private * i915)290 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
291 {
292 if (GRAPHICS_VER(i915) <= 2) {
293 tile->height = 16;
294 tile->width = 128;
295 tile->size = 11;
296 } else if (tile->tiling == I915_TILING_Y &&
297 HAS_128_BYTE_Y_TILING(i915)) {
298 tile->height = 32;
299 tile->width = 128;
300 tile->size = 12;
301 } else {
302 tile->height = 8;
303 tile->width = 512;
304 tile->size = 12;
305 }
306
307 if (GRAPHICS_VER(i915) < 4)
308 return 8192 / tile->width;
309 else if (GRAPHICS_VER(i915) < 7)
310 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
311 else
312 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
313 }
314
igt_partial_tiling(void * arg)315 static int igt_partial_tiling(void *arg)
316 {
317 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
318 struct drm_i915_private *i915 = arg;
319 struct drm_i915_gem_object *obj;
320 intel_wakeref_t wakeref;
321 int tiling;
322 int err;
323
324 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
325 return 0;
326
327 /* We want to check the page mapping and fencing of a large object
328 * mmapped through the GTT. The object we create is larger than can
329 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
330 * We then check that a write through each partial GGTT vma ends up
331 * in the right set of pages within the object, and with the expected
332 * tiling, which we verify by manual swizzling.
333 */
334
335 obj = huge_gem_object(i915,
336 nreal << PAGE_SHIFT,
337 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
338 if (IS_ERR(obj))
339 return PTR_ERR(obj);
340
341 err = i915_gem_object_pin_pages_unlocked(obj);
342 if (err) {
343 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
344 nreal, obj->base.size / PAGE_SIZE, err);
345 goto out;
346 }
347
348 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
349
350 if (1) {
351 IGT_TIMEOUT(end);
352 struct tile tile;
353
354 tile.height = 1;
355 tile.width = 1;
356 tile.size = 0;
357 tile.stride = 0;
358 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
359 tile.tiling = I915_TILING_NONE;
360
361 err = check_partial_mappings(obj, &tile, end);
362 if (err && err != -EINTR)
363 goto out_unlock;
364 }
365
366 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
367 IGT_TIMEOUT(end);
368 unsigned int max_pitch;
369 unsigned int pitch;
370 struct tile tile;
371
372 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
373 /*
374 * The swizzling pattern is actually unknown as it
375 * varies based on physical address of each page.
376 * See i915_gem_detect_bit_6_swizzle().
377 */
378 break;
379
380 tile.tiling = tiling;
381 switch (tiling) {
382 case I915_TILING_X:
383 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
384 break;
385 case I915_TILING_Y:
386 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
387 break;
388 }
389
390 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
391 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
392 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
393 continue;
394
395 max_pitch = setup_tile_size(&tile, i915);
396
397 for (pitch = max_pitch; pitch; pitch >>= 1) {
398 tile.stride = tile.width * pitch;
399 err = check_partial_mappings(obj, &tile, end);
400 if (err == -EINTR)
401 goto next_tiling;
402 if (err)
403 goto out_unlock;
404
405 if (pitch > 2 && GRAPHICS_VER(i915) >= 4) {
406 tile.stride = tile.width * (pitch - 1);
407 err = check_partial_mappings(obj, &tile, end);
408 if (err == -EINTR)
409 goto next_tiling;
410 if (err)
411 goto out_unlock;
412 }
413
414 if (pitch < max_pitch && GRAPHICS_VER(i915) >= 4) {
415 tile.stride = tile.width * (pitch + 1);
416 err = check_partial_mappings(obj, &tile, end);
417 if (err == -EINTR)
418 goto next_tiling;
419 if (err)
420 goto out_unlock;
421 }
422 }
423
424 if (GRAPHICS_VER(i915) >= 4) {
425 for_each_prime_number(pitch, max_pitch) {
426 tile.stride = tile.width * pitch;
427 err = check_partial_mappings(obj, &tile, end);
428 if (err == -EINTR)
429 goto next_tiling;
430 if (err)
431 goto out_unlock;
432 }
433 }
434
435 next_tiling: ;
436 }
437
438 out_unlock:
439 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
440 i915_gem_object_unpin_pages(obj);
441 out:
442 i915_gem_object_put(obj);
443 return err;
444 }
445
igt_smoke_tiling(void * arg)446 static int igt_smoke_tiling(void *arg)
447 {
448 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
449 struct drm_i915_private *i915 = arg;
450 struct drm_i915_gem_object *obj;
451 intel_wakeref_t wakeref;
452 I915_RND_STATE(prng);
453 unsigned long count;
454 IGT_TIMEOUT(end);
455 int err;
456
457 if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt))
458 return 0;
459
460 /*
461 * igt_partial_tiling() does an exhastive check of partial tiling
462 * chunking, but will undoubtably run out of time. Here, we do a
463 * randomised search and hope over many runs of 1s with different
464 * seeds we will do a thorough check.
465 *
466 * Remember to look at the st_seed if we see a flip-flop in BAT!
467 */
468
469 if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES)
470 return 0;
471
472 obj = huge_gem_object(i915,
473 nreal << PAGE_SHIFT,
474 (1 + next_prime_number(to_gt(i915)->ggtt->vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
475 if (IS_ERR(obj))
476 return PTR_ERR(obj);
477
478 err = i915_gem_object_pin_pages_unlocked(obj);
479 if (err) {
480 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
481 nreal, obj->base.size / PAGE_SIZE, err);
482 goto out;
483 }
484
485 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
486
487 count = 0;
488 do {
489 struct tile tile;
490
491 tile.tiling =
492 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
493 switch (tile.tiling) {
494 case I915_TILING_NONE:
495 tile.height = 1;
496 tile.width = 1;
497 tile.size = 0;
498 tile.stride = 0;
499 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
500 break;
501
502 case I915_TILING_X:
503 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x;
504 break;
505 case I915_TILING_Y:
506 tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y;
507 break;
508 }
509
510 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
511 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
512 continue;
513
514 if (tile.tiling != I915_TILING_NONE) {
515 unsigned int max_pitch = setup_tile_size(&tile, i915);
516
517 tile.stride =
518 i915_prandom_u32_max_state(max_pitch, &prng);
519 tile.stride = (1 + tile.stride) * tile.width;
520 if (GRAPHICS_VER(i915) < 4)
521 tile.stride = rounddown_pow_of_two(tile.stride);
522 }
523
524 err = check_partial_mapping(obj, &tile, &prng);
525 if (err)
526 break;
527
528 count++;
529 } while (!__igt_timeout(end, NULL));
530
531 pr_info("%s: Completed %lu trials\n", __func__, count);
532
533 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
534 i915_gem_object_unpin_pages(obj);
535 out:
536 i915_gem_object_put(obj);
537 return err;
538 }
539
make_obj_busy(struct drm_i915_gem_object * obj)540 static int make_obj_busy(struct drm_i915_gem_object *obj)
541 {
542 struct drm_i915_private *i915 = to_i915(obj->base.dev);
543 struct intel_engine_cs *engine;
544
545 for_each_uabi_engine(engine, i915) {
546 struct i915_request *rq;
547 struct i915_vma *vma;
548 struct i915_gem_ww_ctx ww;
549 int err;
550
551 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
552 if (IS_ERR(vma))
553 return PTR_ERR(vma);
554
555 i915_gem_ww_ctx_init(&ww, false);
556 retry:
557 err = i915_gem_object_lock(obj, &ww);
558 if (!err)
559 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
560 if (err)
561 goto err;
562
563 rq = intel_engine_create_kernel_request(engine);
564 if (IS_ERR(rq)) {
565 err = PTR_ERR(rq);
566 goto err_unpin;
567 }
568
569 err = i915_vma_move_to_active(vma, rq,
570 EXEC_OBJECT_WRITE);
571
572 i915_request_add(rq);
573 err_unpin:
574 i915_vma_unpin(vma);
575 err:
576 if (err == -EDEADLK) {
577 err = i915_gem_ww_ctx_backoff(&ww);
578 if (!err)
579 goto retry;
580 }
581 i915_gem_ww_ctx_fini(&ww);
582 if (err)
583 return err;
584 }
585
586 i915_gem_object_put(obj); /* leave it only alive via its active ref */
587 return 0;
588 }
589
default_mapping(struct drm_i915_private * i915)590 static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
591 {
592 if (HAS_LMEM(i915))
593 return I915_MMAP_TYPE_FIXED;
594
595 return I915_MMAP_TYPE_GTT;
596 }
597
598 static struct drm_i915_gem_object *
create_sys_or_internal(struct drm_i915_private * i915,unsigned long size)599 create_sys_or_internal(struct drm_i915_private *i915,
600 unsigned long size)
601 {
602 if (HAS_LMEM(i915)) {
603 struct intel_memory_region *sys_region =
604 i915->mm.regions[INTEL_REGION_SMEM];
605
606 return __i915_gem_object_create_user(i915, size, &sys_region, 1);
607 }
608
609 return i915_gem_object_create_internal(i915, size);
610 }
611
assert_mmap_offset(struct drm_i915_private * i915,unsigned long size,int expected)612 static bool assert_mmap_offset(struct drm_i915_private *i915,
613 unsigned long size,
614 int expected)
615 {
616 struct drm_i915_gem_object *obj;
617 u64 offset;
618 int ret;
619
620 obj = create_sys_or_internal(i915, size);
621 if (IS_ERR(obj))
622 return expected && expected == PTR_ERR(obj);
623
624 ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
625 i915_gem_object_put(obj);
626
627 return ret == expected;
628 }
629
disable_retire_worker(struct drm_i915_private * i915)630 static void disable_retire_worker(struct drm_i915_private *i915)
631 {
632 i915_gem_driver_unregister__shrinker(i915);
633 intel_gt_pm_get(to_gt(i915));
634 cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
635 }
636
restore_retire_worker(struct drm_i915_private * i915)637 static void restore_retire_worker(struct drm_i915_private *i915)
638 {
639 igt_flush_test(i915);
640 intel_gt_pm_put(to_gt(i915));
641 i915_gem_driver_register__shrinker(i915);
642 }
643
mmap_offset_lock(struct drm_i915_private * i915)644 static void mmap_offset_lock(struct drm_i915_private *i915)
645 __acquires(&i915->drm.vma_offset_manager->vm_lock)
646 {
647 write_lock(&i915->drm.vma_offset_manager->vm_lock);
648 }
649
mmap_offset_unlock(struct drm_i915_private * i915)650 static void mmap_offset_unlock(struct drm_i915_private *i915)
651 __releases(&i915->drm.vma_offset_manager->vm_lock)
652 {
653 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
654 }
655
igt_mmap_offset_exhaustion(void * arg)656 static int igt_mmap_offset_exhaustion(void *arg)
657 {
658 struct drm_i915_private *i915 = arg;
659 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
660 struct drm_i915_gem_object *obj;
661 struct drm_mm_node *hole, *next;
662 int loop, err = 0;
663 u64 offset;
664 int enospc = HAS_LMEM(i915) ? -ENXIO : -ENOSPC;
665
666 /* Disable background reaper */
667 disable_retire_worker(i915);
668 GEM_BUG_ON(!to_gt(i915)->awake);
669 intel_gt_retire_requests(to_gt(i915));
670 i915_gem_drain_freed_objects(i915);
671
672 /* Trim the device mmap space to only a page */
673 mmap_offset_lock(i915);
674 loop = 1; /* PAGE_SIZE units */
675 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
676 struct drm_mm_node *resv;
677
678 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
679 if (!resv) {
680 err = -ENOMEM;
681 goto out_park;
682 }
683
684 resv->start = drm_mm_hole_node_start(hole) + loop;
685 resv->size = hole->hole_size - loop;
686 resv->color = -1ul;
687 loop = 0;
688
689 if (!resv->size) {
690 kfree(resv);
691 continue;
692 }
693
694 pr_debug("Reserving hole [%llx + %llx]\n",
695 resv->start, resv->size);
696
697 err = drm_mm_reserve_node(mm, resv);
698 if (err) {
699 pr_err("Failed to trim VMA manager, err=%d\n", err);
700 kfree(resv);
701 goto out_park;
702 }
703 }
704 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
705 mmap_offset_unlock(i915);
706
707 /* Just fits! */
708 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
709 pr_err("Unable to insert object into single page hole\n");
710 err = -EINVAL;
711 goto out;
712 }
713
714 /* Too large */
715 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
716 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
717 err = -EINVAL;
718 goto out;
719 }
720
721 /* Fill the hole, further allocation attempts should then fail */
722 obj = create_sys_or_internal(i915, PAGE_SIZE);
723 if (IS_ERR(obj)) {
724 err = PTR_ERR(obj);
725 pr_err("Unable to create object for reclaimed hole\n");
726 goto out;
727 }
728
729 err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
730 if (err) {
731 pr_err("Unable to insert object into reclaimed hole\n");
732 goto err_obj;
733 }
734
735 if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
736 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
737 err = -EINVAL;
738 goto err_obj;
739 }
740
741 i915_gem_object_put(obj);
742
743 /* Now fill with busy dead objects that we expect to reap */
744 for (loop = 0; loop < 3; loop++) {
745 if (intel_gt_is_wedged(to_gt(i915)))
746 break;
747
748 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
749 if (IS_ERR(obj)) {
750 err = PTR_ERR(obj);
751 goto out;
752 }
753
754 err = make_obj_busy(obj);
755 if (err) {
756 pr_err("[loop %d] Failed to busy the object\n", loop);
757 goto err_obj;
758 }
759 }
760
761 out:
762 mmap_offset_lock(i915);
763 out_park:
764 drm_mm_for_each_node_safe(hole, next, mm) {
765 if (hole->color != -1ul)
766 continue;
767
768 drm_mm_remove_node(hole);
769 kfree(hole);
770 }
771 mmap_offset_unlock(i915);
772 restore_retire_worker(i915);
773 return err;
774 err_obj:
775 i915_gem_object_put(obj);
776 goto out;
777 }
778
gtt_set(struct drm_i915_gem_object * obj)779 static int gtt_set(struct drm_i915_gem_object *obj)
780 {
781 struct i915_vma *vma;
782 void __iomem *map;
783 int err = 0;
784
785 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
786 if (IS_ERR(vma))
787 return PTR_ERR(vma);
788
789 intel_gt_pm_get(vma->vm->gt);
790 map = i915_vma_pin_iomap(vma);
791 i915_vma_unpin(vma);
792 if (IS_ERR(map)) {
793 err = PTR_ERR(map);
794 goto out;
795 }
796
797 memset_io(map, POISON_INUSE, obj->base.size);
798 i915_vma_unpin_iomap(vma);
799
800 out:
801 intel_gt_pm_put(vma->vm->gt);
802 return err;
803 }
804
gtt_check(struct drm_i915_gem_object * obj)805 static int gtt_check(struct drm_i915_gem_object *obj)
806 {
807 struct i915_vma *vma;
808 void __iomem *map;
809 int err = 0;
810
811 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
812 if (IS_ERR(vma))
813 return PTR_ERR(vma);
814
815 intel_gt_pm_get(vma->vm->gt);
816 map = i915_vma_pin_iomap(vma);
817 i915_vma_unpin(vma);
818 if (IS_ERR(map)) {
819 err = PTR_ERR(map);
820 goto out;
821 }
822
823 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
824 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
825 obj->mm.region->name);
826 err = -EINVAL;
827 }
828 i915_vma_unpin_iomap(vma);
829
830 out:
831 intel_gt_pm_put(vma->vm->gt);
832 return err;
833 }
834
wc_set(struct drm_i915_gem_object * obj)835 static int wc_set(struct drm_i915_gem_object *obj)
836 {
837 void *vaddr;
838
839 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
840 if (IS_ERR(vaddr))
841 return PTR_ERR(vaddr);
842
843 memset(vaddr, POISON_INUSE, obj->base.size);
844 i915_gem_object_flush_map(obj);
845 i915_gem_object_unpin_map(obj);
846
847 return 0;
848 }
849
wc_check(struct drm_i915_gem_object * obj)850 static int wc_check(struct drm_i915_gem_object *obj)
851 {
852 void *vaddr;
853 int err = 0;
854
855 vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
856 if (IS_ERR(vaddr))
857 return PTR_ERR(vaddr);
858
859 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
860 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
861 obj->mm.region->name);
862 err = -EINVAL;
863 }
864 i915_gem_object_unpin_map(obj);
865
866 return err;
867 }
868
can_mmap(struct drm_i915_gem_object * obj,enum i915_mmap_type type)869 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
870 {
871 struct drm_i915_private *i915 = to_i915(obj->base.dev);
872 bool no_map;
873
874 if (obj->ops->mmap_offset)
875 return type == I915_MMAP_TYPE_FIXED;
876 else if (type == I915_MMAP_TYPE_FIXED)
877 return false;
878
879 if (type == I915_MMAP_TYPE_GTT &&
880 !i915_ggtt_has_aperture(to_gt(i915)->ggtt))
881 return false;
882
883 i915_gem_object_lock(obj, NULL);
884 no_map = (type != I915_MMAP_TYPE_GTT &&
885 !i915_gem_object_has_struct_page(obj) &&
886 !i915_gem_object_has_iomem(obj));
887 i915_gem_object_unlock(obj);
888
889 return !no_map;
890 }
891
892 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
__igt_mmap(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)893 static int __igt_mmap(struct drm_i915_private *i915,
894 struct drm_i915_gem_object *obj,
895 enum i915_mmap_type type)
896 {
897 struct vm_area_struct *area;
898 unsigned long addr;
899 int err, i;
900 u64 offset;
901
902 if (!can_mmap(obj, type))
903 return 0;
904
905 err = wc_set(obj);
906 if (err == -ENXIO)
907 err = gtt_set(obj);
908 if (err)
909 return err;
910
911 err = __assign_mmap_offset(obj, type, &offset, NULL);
912 if (err)
913 return err;
914
915 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
916 if (IS_ERR_VALUE(addr))
917 return addr;
918
919 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
920
921 mmap_read_lock(current->mm);
922 area = vma_lookup(current->mm, addr);
923 mmap_read_unlock(current->mm);
924 if (!area) {
925 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
926 obj->mm.region->name);
927 err = -EINVAL;
928 goto out_unmap;
929 }
930
931 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
932 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
933 u32 x;
934
935 if (get_user(x, ux)) {
936 pr_err("%s: Unable to read from mmap, offset:%zd\n",
937 obj->mm.region->name, i * sizeof(x));
938 err = -EFAULT;
939 goto out_unmap;
940 }
941
942 if (x != expand32(POISON_INUSE)) {
943 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
944 obj->mm.region->name,
945 i * sizeof(x), x, expand32(POISON_INUSE));
946 err = -EINVAL;
947 goto out_unmap;
948 }
949
950 x = expand32(POISON_FREE);
951 if (put_user(x, ux)) {
952 pr_err("%s: Unable to write to mmap, offset:%zd\n",
953 obj->mm.region->name, i * sizeof(x));
954 err = -EFAULT;
955 goto out_unmap;
956 }
957 }
958
959 if (type == I915_MMAP_TYPE_GTT)
960 intel_gt_flush_ggtt_writes(to_gt(i915));
961
962 err = wc_check(obj);
963 if (err == -ENXIO)
964 err = gtt_check(obj);
965 out_unmap:
966 vm_munmap(addr, obj->base.size);
967 return err;
968 }
969
igt_mmap(void * arg)970 static int igt_mmap(void *arg)
971 {
972 struct drm_i915_private *i915 = arg;
973 struct intel_memory_region *mr;
974 enum intel_region_id id;
975
976 for_each_memory_region(mr, i915, id) {
977 unsigned long sizes[] = {
978 PAGE_SIZE,
979 mr->min_page_size,
980 SZ_4M,
981 };
982 int i;
983
984 if (mr->private)
985 continue;
986
987 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
988 struct drm_i915_gem_object *obj;
989 int err;
990
991 obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
992 if (obj == ERR_PTR(-ENODEV))
993 continue;
994
995 if (IS_ERR(obj))
996 return PTR_ERR(obj);
997
998 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
999 if (err == 0)
1000 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
1001 if (err == 0)
1002 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
1003
1004 i915_gem_object_put(obj);
1005 if (err)
1006 return err;
1007 }
1008 }
1009
1010 return 0;
1011 }
1012
igt_close_objects(struct drm_i915_private * i915,struct list_head * objects)1013 static void igt_close_objects(struct drm_i915_private *i915,
1014 struct list_head *objects)
1015 {
1016 struct drm_i915_gem_object *obj, *on;
1017
1018 list_for_each_entry_safe(obj, on, objects, st_link) {
1019 i915_gem_object_lock(obj, NULL);
1020 if (i915_gem_object_has_pinned_pages(obj))
1021 i915_gem_object_unpin_pages(obj);
1022 /* No polluting the memory region between tests */
1023 __i915_gem_object_put_pages(obj);
1024 i915_gem_object_unlock(obj);
1025 list_del(&obj->st_link);
1026 i915_gem_object_put(obj);
1027 }
1028
1029 cond_resched();
1030
1031 i915_gem_drain_freed_objects(i915);
1032 }
1033
igt_make_evictable(struct list_head * objects)1034 static void igt_make_evictable(struct list_head *objects)
1035 {
1036 struct drm_i915_gem_object *obj;
1037
1038 list_for_each_entry(obj, objects, st_link) {
1039 i915_gem_object_lock(obj, NULL);
1040 if (i915_gem_object_has_pinned_pages(obj))
1041 i915_gem_object_unpin_pages(obj);
1042 i915_gem_object_unlock(obj);
1043 }
1044
1045 cond_resched();
1046 }
1047
igt_fill_mappable(struct intel_memory_region * mr,struct list_head * objects)1048 static int igt_fill_mappable(struct intel_memory_region *mr,
1049 struct list_head *objects)
1050 {
1051 u64 size, total;
1052 int err;
1053
1054 total = 0;
1055 size = mr->io_size;
1056 do {
1057 struct drm_i915_gem_object *obj;
1058
1059 obj = i915_gem_object_create_region(mr, size, 0, 0);
1060 if (IS_ERR(obj)) {
1061 err = PTR_ERR(obj);
1062 goto err_close;
1063 }
1064
1065 list_add(&obj->st_link, objects);
1066
1067 err = i915_gem_object_pin_pages_unlocked(obj);
1068 if (err) {
1069 if (err != -ENXIO && err != -ENOMEM)
1070 goto err_close;
1071
1072 if (size == mr->min_page_size) {
1073 err = 0;
1074 break;
1075 }
1076
1077 size >>= 1;
1078 continue;
1079 }
1080
1081 total += obj->base.size;
1082 } while (1);
1083
1084 pr_info("%s filled=%lluMiB\n", __func__, total >> 20);
1085 return 0;
1086
1087 err_close:
1088 igt_close_objects(mr->i915, objects);
1089 return err;
1090 }
1091
___igt_mmap_migrate(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,unsigned long addr,bool unfaultable)1092 static int ___igt_mmap_migrate(struct drm_i915_private *i915,
1093 struct drm_i915_gem_object *obj,
1094 unsigned long addr,
1095 bool unfaultable)
1096 {
1097 struct vm_area_struct *area;
1098 int err = 0, i;
1099
1100 pr_info("igt_mmap(%s, %d) @ %lx\n",
1101 obj->mm.region->name, I915_MMAP_TYPE_FIXED, addr);
1102
1103 mmap_read_lock(current->mm);
1104 area = vma_lookup(current->mm, addr);
1105 mmap_read_unlock(current->mm);
1106 if (!area) {
1107 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
1108 obj->mm.region->name);
1109 err = -EINVAL;
1110 goto out_unmap;
1111 }
1112
1113 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
1114 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
1115 u32 x;
1116
1117 if (get_user(x, ux)) {
1118 err = -EFAULT;
1119 if (!unfaultable) {
1120 pr_err("%s: Unable to read from mmap, offset:%zd\n",
1121 obj->mm.region->name, i * sizeof(x));
1122 goto out_unmap;
1123 }
1124
1125 continue;
1126 }
1127
1128 if (unfaultable) {
1129 pr_err("%s: Faulted unmappable memory\n",
1130 obj->mm.region->name);
1131 err = -EINVAL;
1132 goto out_unmap;
1133 }
1134
1135 if (x != expand32(POISON_INUSE)) {
1136 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
1137 obj->mm.region->name,
1138 i * sizeof(x), x, expand32(POISON_INUSE));
1139 err = -EINVAL;
1140 goto out_unmap;
1141 }
1142
1143 x = expand32(POISON_FREE);
1144 if (put_user(x, ux)) {
1145 pr_err("%s: Unable to write to mmap, offset:%zd\n",
1146 obj->mm.region->name, i * sizeof(x));
1147 err = -EFAULT;
1148 goto out_unmap;
1149 }
1150 }
1151
1152 if (unfaultable) {
1153 if (err == -EFAULT)
1154 err = 0;
1155 } else {
1156 obj->flags &= ~I915_BO_ALLOC_GPU_ONLY;
1157 err = wc_check(obj);
1158 }
1159 out_unmap:
1160 vm_munmap(addr, obj->base.size);
1161 return err;
1162 }
1163
1164 #define IGT_MMAP_MIGRATE_TOPDOWN (1 << 0)
1165 #define IGT_MMAP_MIGRATE_FILL (1 << 1)
1166 #define IGT_MMAP_MIGRATE_EVICTABLE (1 << 2)
1167 #define IGT_MMAP_MIGRATE_UNFAULTABLE (1 << 3)
1168 #define IGT_MMAP_MIGRATE_FAIL_GPU (1 << 4)
__igt_mmap_migrate(struct intel_memory_region ** placements,int n_placements,struct intel_memory_region * expected_mr,unsigned int flags)1169 static int __igt_mmap_migrate(struct intel_memory_region **placements,
1170 int n_placements,
1171 struct intel_memory_region *expected_mr,
1172 unsigned int flags)
1173 {
1174 struct drm_i915_private *i915 = placements[0]->i915;
1175 struct drm_i915_gem_object *obj;
1176 struct i915_request *rq = NULL;
1177 unsigned long addr;
1178 LIST_HEAD(objects);
1179 u64 offset;
1180 int err;
1181
1182 obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
1183 placements,
1184 n_placements);
1185 if (IS_ERR(obj))
1186 return PTR_ERR(obj);
1187
1188 if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
1189 obj->flags |= I915_BO_ALLOC_GPU_ONLY;
1190
1191 err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL);
1192 if (err)
1193 goto out_put;
1194
1195 /*
1196 * This will eventually create a GEM context, due to opening dummy drm
1197 * file, which needs a tiny amount of mappable device memory for the top
1198 * level paging structures(and perhaps scratch), so make sure we
1199 * allocate early, to avoid tears.
1200 */
1201 addr = igt_mmap_offset(i915, offset, obj->base.size,
1202 PROT_WRITE, MAP_SHARED);
1203 if (IS_ERR_VALUE(addr)) {
1204 err = addr;
1205 goto out_put;
1206 }
1207
1208 if (flags & IGT_MMAP_MIGRATE_FILL) {
1209 err = igt_fill_mappable(placements[0], &objects);
1210 if (err)
1211 goto out_put;
1212 }
1213
1214 err = i915_gem_object_lock(obj, NULL);
1215 if (err)
1216 goto out_put;
1217
1218 err = i915_gem_object_pin_pages(obj);
1219 if (err) {
1220 i915_gem_object_unlock(obj);
1221 goto out_put;
1222 }
1223
1224 err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
1225 obj->mm.pages->sgl, obj->pat_index,
1226 i915_gem_object_is_lmem(obj),
1227 expand32(POISON_INUSE), &rq);
1228 i915_gem_object_unpin_pages(obj);
1229 if (rq) {
1230 err = dma_resv_reserve_fences(obj->base.resv, 1);
1231 if (!err)
1232 dma_resv_add_fence(obj->base.resv, &rq->fence,
1233 DMA_RESV_USAGE_KERNEL);
1234 i915_request_put(rq);
1235 }
1236 i915_gem_object_unlock(obj);
1237 if (err)
1238 goto out_put;
1239
1240 if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
1241 igt_make_evictable(&objects);
1242
1243 if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1244 err = i915_gem_object_lock(obj, NULL);
1245 if (err)
1246 goto out_put;
1247
1248 /*
1249 * Ensure we only simulate the gpu failuire when faulting the
1250 * pages.
1251 */
1252 err = i915_gem_object_wait_moving_fence(obj, true);
1253 i915_gem_object_unlock(obj);
1254 if (err)
1255 goto out_put;
1256 i915_ttm_migrate_set_failure_modes(true, false);
1257 }
1258
1259 err = ___igt_mmap_migrate(i915, obj, addr,
1260 flags & IGT_MMAP_MIGRATE_UNFAULTABLE);
1261
1262 if (!err && obj->mm.region != expected_mr) {
1263 pr_err("%s region mismatch %s\n", __func__, expected_mr->name);
1264 err = -EINVAL;
1265 }
1266
1267 if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
1268 struct intel_gt *gt;
1269 unsigned int id;
1270
1271 i915_ttm_migrate_set_failure_modes(false, false);
1272
1273 for_each_gt(gt, i915, id) {
1274 intel_wakeref_t wakeref;
1275 bool wedged;
1276
1277 mutex_lock(>->reset.mutex);
1278 wedged = test_bit(I915_WEDGED, >->reset.flags);
1279 mutex_unlock(>->reset.mutex);
1280 if (!wedged) {
1281 pr_err("gt(%u) not wedged\n", id);
1282 err = -EINVAL;
1283 continue;
1284 }
1285
1286 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1287 igt_global_reset_lock(gt);
1288 intel_gt_reset(gt, ALL_ENGINES, NULL);
1289 igt_global_reset_unlock(gt);
1290 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1291 }
1292
1293 if (!i915_gem_object_has_unknown_state(obj)) {
1294 pr_err("object missing unknown_state\n");
1295 err = -EINVAL;
1296 }
1297 }
1298
1299 out_put:
1300 i915_gem_object_put(obj);
1301 igt_close_objects(i915, &objects);
1302 return err;
1303 }
1304
igt_mmap_migrate(void * arg)1305 static int igt_mmap_migrate(void *arg)
1306 {
1307 struct drm_i915_private *i915 = arg;
1308 struct intel_memory_region *system = i915->mm.regions[INTEL_REGION_SMEM];
1309 struct intel_memory_region *mr;
1310 enum intel_region_id id;
1311
1312 for_each_memory_region(mr, i915, id) {
1313 struct intel_memory_region *mixed[] = { mr, system };
1314 struct intel_memory_region *single[] = { mr };
1315 struct ttm_resource_manager *man = mr->region_private;
1316 resource_size_t saved_io_size;
1317 int err;
1318
1319 if (mr->private)
1320 continue;
1321
1322 if (!mr->io_size)
1323 continue;
1324
1325 /*
1326 * For testing purposes let's force small BAR, if not already
1327 * present.
1328 */
1329 saved_io_size = mr->io_size;
1330 if (mr->io_size == mr->total) {
1331 resource_size_t io_size = mr->io_size;
1332
1333 io_size = rounddown_pow_of_two(io_size >> 1);
1334 if (io_size < PAGE_SIZE)
1335 continue;
1336
1337 mr->io_size = io_size;
1338 i915_ttm_buddy_man_force_visible_size(man,
1339 io_size >> PAGE_SHIFT);
1340 }
1341
1342 /*
1343 * Allocate in the mappable portion, should be no suprises here.
1344 */
1345 err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0);
1346 if (err)
1347 goto out_io_size;
1348
1349 /*
1350 * Allocate in the non-mappable portion, but force migrating to
1351 * the mappable portion on fault (LMEM -> LMEM)
1352 */
1353 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1354 IGT_MMAP_MIGRATE_TOPDOWN |
1355 IGT_MMAP_MIGRATE_FILL |
1356 IGT_MMAP_MIGRATE_EVICTABLE);
1357 if (err)
1358 goto out_io_size;
1359
1360 /*
1361 * Allocate in the non-mappable portion, but force spilling into
1362 * system memory on fault (LMEM -> SMEM)
1363 */
1364 err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
1365 IGT_MMAP_MIGRATE_TOPDOWN |
1366 IGT_MMAP_MIGRATE_FILL);
1367 if (err)
1368 goto out_io_size;
1369
1370 /*
1371 * Allocate in the non-mappable portion, but since the mappable
1372 * portion is already full, and we can't spill to system memory,
1373 * then we should expect the fault to fail.
1374 */
1375 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1376 IGT_MMAP_MIGRATE_TOPDOWN |
1377 IGT_MMAP_MIGRATE_FILL |
1378 IGT_MMAP_MIGRATE_UNFAULTABLE);
1379 if (err)
1380 goto out_io_size;
1381
1382 /*
1383 * Allocate in the non-mappable portion, but force migrating to
1384 * the mappable portion on fault (LMEM -> LMEM). We then also
1385 * simulate a gpu error when moving the pages when faulting the
1386 * pages, which should result in wedging the gpu and returning
1387 * SIGBUS in the fault handler, since we can't fallback to
1388 * memcpy.
1389 */
1390 err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
1391 IGT_MMAP_MIGRATE_TOPDOWN |
1392 IGT_MMAP_MIGRATE_FILL |
1393 IGT_MMAP_MIGRATE_EVICTABLE |
1394 IGT_MMAP_MIGRATE_FAIL_GPU |
1395 IGT_MMAP_MIGRATE_UNFAULTABLE);
1396 out_io_size:
1397 mr->io_size = saved_io_size;
1398 i915_ttm_buddy_man_force_visible_size(man,
1399 mr->io_size >> PAGE_SHIFT);
1400 if (err)
1401 return err;
1402 }
1403
1404 return 0;
1405 }
1406
repr_mmap_type(enum i915_mmap_type type)1407 static const char *repr_mmap_type(enum i915_mmap_type type)
1408 {
1409 switch (type) {
1410 case I915_MMAP_TYPE_GTT: return "gtt";
1411 case I915_MMAP_TYPE_WB: return "wb";
1412 case I915_MMAP_TYPE_WC: return "wc";
1413 case I915_MMAP_TYPE_UC: return "uc";
1414 case I915_MMAP_TYPE_FIXED: return "fixed";
1415 default: return "unknown";
1416 }
1417 }
1418
can_access(struct drm_i915_gem_object * obj)1419 static bool can_access(struct drm_i915_gem_object *obj)
1420 {
1421 bool access;
1422
1423 i915_gem_object_lock(obj, NULL);
1424 access = i915_gem_object_has_struct_page(obj) ||
1425 i915_gem_object_has_iomem(obj);
1426 i915_gem_object_unlock(obj);
1427
1428 return access;
1429 }
1430
__igt_mmap_access(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1431 static int __igt_mmap_access(struct drm_i915_private *i915,
1432 struct drm_i915_gem_object *obj,
1433 enum i915_mmap_type type)
1434 {
1435 unsigned long __user *ptr;
1436 unsigned long A, B;
1437 unsigned long x, y;
1438 unsigned long addr;
1439 int err;
1440 u64 offset;
1441
1442 memset(&A, 0xAA, sizeof(A));
1443 memset(&B, 0xBB, sizeof(B));
1444
1445 if (!can_mmap(obj, type) || !can_access(obj))
1446 return 0;
1447
1448 err = __assign_mmap_offset(obj, type, &offset, NULL);
1449 if (err)
1450 return err;
1451
1452 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1453 if (IS_ERR_VALUE(addr))
1454 return addr;
1455 ptr = (unsigned long __user *)addr;
1456
1457 err = __put_user(A, ptr);
1458 if (err) {
1459 pr_err("%s(%s): failed to write into user mmap\n",
1460 obj->mm.region->name, repr_mmap_type(type));
1461 goto out_unmap;
1462 }
1463
1464 intel_gt_flush_ggtt_writes(to_gt(i915));
1465
1466 err = access_process_vm(current, addr, &x, sizeof(x), 0);
1467 if (err != sizeof(x)) {
1468 pr_err("%s(%s): access_process_vm() read failed\n",
1469 obj->mm.region->name, repr_mmap_type(type));
1470 goto out_unmap;
1471 }
1472
1473 err = access_process_vm(current, addr, &B, sizeof(B), FOLL_WRITE);
1474 if (err != sizeof(B)) {
1475 pr_err("%s(%s): access_process_vm() write failed\n",
1476 obj->mm.region->name, repr_mmap_type(type));
1477 goto out_unmap;
1478 }
1479
1480 intel_gt_flush_ggtt_writes(to_gt(i915));
1481
1482 err = __get_user(y, ptr);
1483 if (err) {
1484 pr_err("%s(%s): failed to read from user mmap\n",
1485 obj->mm.region->name, repr_mmap_type(type));
1486 goto out_unmap;
1487 }
1488
1489 if (x != A || y != B) {
1490 pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
1491 obj->mm.region->name, repr_mmap_type(type),
1492 x, y);
1493 err = -EINVAL;
1494 goto out_unmap;
1495 }
1496
1497 out_unmap:
1498 vm_munmap(addr, obj->base.size);
1499 return err;
1500 }
1501
igt_mmap_access(void * arg)1502 static int igt_mmap_access(void *arg)
1503 {
1504 struct drm_i915_private *i915 = arg;
1505 struct intel_memory_region *mr;
1506 enum intel_region_id id;
1507
1508 for_each_memory_region(mr, i915, id) {
1509 struct drm_i915_gem_object *obj;
1510 int err;
1511
1512 if (mr->private)
1513 continue;
1514
1515 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1516 if (obj == ERR_PTR(-ENODEV))
1517 continue;
1518
1519 if (IS_ERR(obj))
1520 return PTR_ERR(obj);
1521
1522 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
1523 if (err == 0)
1524 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
1525 if (err == 0)
1526 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
1527 if (err == 0)
1528 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
1529 if (err == 0)
1530 err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
1531
1532 i915_gem_object_put(obj);
1533 if (err)
1534 return err;
1535 }
1536
1537 return 0;
1538 }
1539
__igt_mmap_gpu(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1540 static int __igt_mmap_gpu(struct drm_i915_private *i915,
1541 struct drm_i915_gem_object *obj,
1542 enum i915_mmap_type type)
1543 {
1544 struct intel_engine_cs *engine;
1545 unsigned long addr;
1546 u32 __user *ux;
1547 u32 bbe;
1548 int err;
1549 u64 offset;
1550
1551 /*
1552 * Verify that the mmap access into the backing store aligns with
1553 * that of the GPU, i.e. that mmap is indeed writing into the same
1554 * page as being read by the GPU.
1555 */
1556
1557 if (!can_mmap(obj, type))
1558 return 0;
1559
1560 err = wc_set(obj);
1561 if (err == -ENXIO)
1562 err = gtt_set(obj);
1563 if (err)
1564 return err;
1565
1566 err = __assign_mmap_offset(obj, type, &offset, NULL);
1567 if (err)
1568 return err;
1569
1570 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1571 if (IS_ERR_VALUE(addr))
1572 return addr;
1573
1574 ux = u64_to_user_ptr((u64)addr);
1575 bbe = MI_BATCH_BUFFER_END;
1576 if (put_user(bbe, ux)) {
1577 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
1578 err = -EFAULT;
1579 goto out_unmap;
1580 }
1581
1582 if (type == I915_MMAP_TYPE_GTT)
1583 intel_gt_flush_ggtt_writes(to_gt(i915));
1584
1585 for_each_uabi_engine(engine, i915) {
1586 struct i915_request *rq;
1587 struct i915_vma *vma;
1588 struct i915_gem_ww_ctx ww;
1589
1590 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1591 if (IS_ERR(vma)) {
1592 err = PTR_ERR(vma);
1593 goto out_unmap;
1594 }
1595
1596 i915_gem_ww_ctx_init(&ww, false);
1597 retry:
1598 err = i915_gem_object_lock(obj, &ww);
1599 if (!err)
1600 err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
1601 if (err)
1602 goto out_ww;
1603
1604 rq = i915_request_create(engine->kernel_context);
1605 if (IS_ERR(rq)) {
1606 err = PTR_ERR(rq);
1607 goto out_unpin;
1608 }
1609
1610 err = i915_vma_move_to_active(vma, rq, 0);
1611
1612 err = engine->emit_bb_start(rq, i915_vma_offset(vma), 0, 0);
1613 i915_request_get(rq);
1614 i915_request_add(rq);
1615
1616 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1617 struct drm_printer p =
1618 drm_info_printer(engine->i915->drm.dev);
1619
1620 pr_err("%s(%s, %s): Failed to execute batch\n",
1621 __func__, engine->name, obj->mm.region->name);
1622 intel_engine_dump(engine, &p,
1623 "%s\n", engine->name);
1624
1625 intel_gt_set_wedged(engine->gt);
1626 err = -EIO;
1627 }
1628 i915_request_put(rq);
1629
1630 out_unpin:
1631 i915_vma_unpin(vma);
1632 out_ww:
1633 if (err == -EDEADLK) {
1634 err = i915_gem_ww_ctx_backoff(&ww);
1635 if (!err)
1636 goto retry;
1637 }
1638 i915_gem_ww_ctx_fini(&ww);
1639 if (err)
1640 goto out_unmap;
1641 }
1642
1643 out_unmap:
1644 vm_munmap(addr, obj->base.size);
1645 return err;
1646 }
1647
igt_mmap_gpu(void * arg)1648 static int igt_mmap_gpu(void *arg)
1649 {
1650 struct drm_i915_private *i915 = arg;
1651 struct intel_memory_region *mr;
1652 enum intel_region_id id;
1653
1654 for_each_memory_region(mr, i915, id) {
1655 struct drm_i915_gem_object *obj;
1656 int err;
1657
1658 if (mr->private)
1659 continue;
1660
1661 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1662 if (obj == ERR_PTR(-ENODEV))
1663 continue;
1664
1665 if (IS_ERR(obj))
1666 return PTR_ERR(obj);
1667
1668 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1669 if (err == 0)
1670 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1671 if (err == 0)
1672 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
1673
1674 i915_gem_object_put(obj);
1675 if (err)
1676 return err;
1677 }
1678
1679 return 0;
1680 }
1681
check_present_pte(pte_t * pte,unsigned long addr,void * data)1682 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1683 {
1684 pte_t ptent = ptep_get(pte);
1685
1686 if (!pte_present(ptent) || pte_none(ptent)) {
1687 pr_err("missing PTE:%lx\n",
1688 (addr - (unsigned long)data) >> PAGE_SHIFT);
1689 return -EINVAL;
1690 }
1691
1692 return 0;
1693 }
1694
check_absent_pte(pte_t * pte,unsigned long addr,void * data)1695 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1696 {
1697 pte_t ptent = ptep_get(pte);
1698
1699 if (pte_present(ptent) && !pte_none(ptent)) {
1700 pr_err("present PTE:%lx; expected to be revoked\n",
1701 (addr - (unsigned long)data) >> PAGE_SHIFT);
1702 return -EINVAL;
1703 }
1704
1705 return 0;
1706 }
1707
check_present(unsigned long addr,unsigned long len)1708 static int check_present(unsigned long addr, unsigned long len)
1709 {
1710 return apply_to_page_range(current->mm, addr, len,
1711 check_present_pte, (void *)addr);
1712 }
1713
check_absent(unsigned long addr,unsigned long len)1714 static int check_absent(unsigned long addr, unsigned long len)
1715 {
1716 return apply_to_page_range(current->mm, addr, len,
1717 check_absent_pte, (void *)addr);
1718 }
1719
prefault_range(u64 start,u64 len)1720 static int prefault_range(u64 start, u64 len)
1721 {
1722 const char __user *addr, *end;
1723 char __maybe_unused c;
1724 int err;
1725
1726 addr = u64_to_user_ptr(start);
1727 end = addr + len;
1728
1729 for (; addr < end; addr += PAGE_SIZE) {
1730 err = __get_user(c, addr);
1731 if (err)
1732 return err;
1733 }
1734
1735 return __get_user(c, end - 1);
1736 }
1737
__igt_mmap_revoke(struct drm_i915_private * i915,struct drm_i915_gem_object * obj,enum i915_mmap_type type)1738 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1739 struct drm_i915_gem_object *obj,
1740 enum i915_mmap_type type)
1741 {
1742 unsigned long addr;
1743 int err;
1744 u64 offset;
1745
1746 if (!can_mmap(obj, type))
1747 return 0;
1748
1749 err = __assign_mmap_offset(obj, type, &offset, NULL);
1750 if (err)
1751 return err;
1752
1753 addr = igt_mmap_offset(i915, offset, obj->base.size, PROT_WRITE, MAP_SHARED);
1754 if (IS_ERR_VALUE(addr))
1755 return addr;
1756
1757 err = prefault_range(addr, obj->base.size);
1758 if (err)
1759 goto out_unmap;
1760
1761 err = check_present(addr, obj->base.size);
1762 if (err) {
1763 pr_err("%s: was not present\n", obj->mm.region->name);
1764 goto out_unmap;
1765 }
1766
1767 /*
1768 * After unbinding the object from the GGTT, its address may be reused
1769 * for other objects. Ergo we have to revoke the previous mmap PTE
1770 * access as it no longer points to the same object.
1771 */
1772 i915_gem_object_lock(obj, NULL);
1773 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1774 i915_gem_object_unlock(obj);
1775 if (err) {
1776 pr_err("Failed to unbind object!\n");
1777 goto out_unmap;
1778 }
1779
1780 if (type != I915_MMAP_TYPE_GTT) {
1781 i915_gem_object_lock(obj, NULL);
1782 __i915_gem_object_put_pages(obj);
1783 i915_gem_object_unlock(obj);
1784 if (i915_gem_object_has_pages(obj)) {
1785 pr_err("Failed to put-pages object!\n");
1786 err = -EINVAL;
1787 goto out_unmap;
1788 }
1789 }
1790
1791 err = check_absent(addr, obj->base.size);
1792 if (err) {
1793 pr_err("%s: was not absent\n", obj->mm.region->name);
1794 goto out_unmap;
1795 }
1796
1797 out_unmap:
1798 vm_munmap(addr, obj->base.size);
1799 return err;
1800 }
1801
igt_mmap_revoke(void * arg)1802 static int igt_mmap_revoke(void *arg)
1803 {
1804 struct drm_i915_private *i915 = arg;
1805 struct intel_memory_region *mr;
1806 enum intel_region_id id;
1807
1808 for_each_memory_region(mr, i915, id) {
1809 struct drm_i915_gem_object *obj;
1810 int err;
1811
1812 if (mr->private)
1813 continue;
1814
1815 obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
1816 if (obj == ERR_PTR(-ENODEV))
1817 continue;
1818
1819 if (IS_ERR(obj))
1820 return PTR_ERR(obj);
1821
1822 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1823 if (err == 0)
1824 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1825 if (err == 0)
1826 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
1827
1828 i915_gem_object_put(obj);
1829 if (err)
1830 return err;
1831 }
1832
1833 return 0;
1834 }
1835
i915_gem_mman_live_selftests(struct drm_i915_private * i915)1836 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1837 {
1838 static const struct i915_subtest tests[] = {
1839 SUBTEST(igt_partial_tiling),
1840 SUBTEST(igt_smoke_tiling),
1841 SUBTEST(igt_mmap_offset_exhaustion),
1842 SUBTEST(igt_mmap),
1843 SUBTEST(igt_mmap_migrate),
1844 SUBTEST(igt_mmap_access),
1845 SUBTEST(igt_mmap_revoke),
1846 SUBTEST(igt_mmap_gpu),
1847 };
1848
1849 return i915_live_subtests(tests, i915);
1850 }
1851