1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6 
7 #include <linux/prime_numbers.h>
8 
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gpu_commands.h"
11 #include "gt/intel_gt.h"
12 #include "gt/intel_gt_pm.h"
13 #include "gt/intel_ring.h"
14 
15 #include "i915_selftest.h"
16 #include "selftests/i915_random.h"
17 
18 struct context {
19 	struct drm_i915_gem_object *obj;
20 	struct intel_engine_cs *engine;
21 };
22 
cpu_set(struct context * ctx,unsigned long offset,u32 v)23 static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
24 {
25 	unsigned int needs_clflush;
26 	struct page *page;
27 	void *map;
28 	u32 *cpu;
29 	int err;
30 
31 	i915_gem_object_lock(ctx->obj, NULL);
32 	err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush);
33 	if (err)
34 		goto out;
35 
36 	page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
37 	map = kmap_atomic(page);
38 	cpu = map + offset_in_page(offset);
39 
40 	if (needs_clflush & CLFLUSH_BEFORE)
41 		drm_clflush_virt_range(cpu, sizeof(*cpu));
42 
43 	*cpu = v;
44 
45 	if (needs_clflush & CLFLUSH_AFTER)
46 		drm_clflush_virt_range(cpu, sizeof(*cpu));
47 
48 	kunmap_atomic(map);
49 	i915_gem_object_finish_access(ctx->obj);
50 
51 out:
52 	i915_gem_object_unlock(ctx->obj);
53 	return err;
54 }
55 
cpu_get(struct context * ctx,unsigned long offset,u32 * v)56 static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
57 {
58 	unsigned int needs_clflush;
59 	struct page *page;
60 	void *map;
61 	u32 *cpu;
62 	int err;
63 
64 	i915_gem_object_lock(ctx->obj, NULL);
65 	err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush);
66 	if (err)
67 		goto out;
68 
69 	page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
70 	map = kmap_atomic(page);
71 	cpu = map + offset_in_page(offset);
72 
73 	if (needs_clflush & CLFLUSH_BEFORE)
74 		drm_clflush_virt_range(cpu, sizeof(*cpu));
75 
76 	*v = *cpu;
77 
78 	kunmap_atomic(map);
79 	i915_gem_object_finish_access(ctx->obj);
80 
81 out:
82 	i915_gem_object_unlock(ctx->obj);
83 	return err;
84 }
85 
gtt_set(struct context * ctx,unsigned long offset,u32 v)86 static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
87 {
88 	struct i915_vma *vma;
89 	u32 __iomem *map;
90 	int err = 0;
91 
92 	i915_gem_object_lock(ctx->obj, NULL);
93 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
94 	i915_gem_object_unlock(ctx->obj);
95 	if (err)
96 		return err;
97 
98 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
99 	if (IS_ERR(vma))
100 		return PTR_ERR(vma);
101 
102 	intel_gt_pm_get(vma->vm->gt);
103 
104 	map = i915_vma_pin_iomap(vma);
105 	i915_vma_unpin(vma);
106 	if (IS_ERR(map)) {
107 		err = PTR_ERR(map);
108 		goto out_rpm;
109 	}
110 
111 	iowrite32(v, &map[offset / sizeof(*map)]);
112 	i915_vma_unpin_iomap(vma);
113 
114 out_rpm:
115 	intel_gt_pm_put(vma->vm->gt);
116 	return err;
117 }
118 
gtt_get(struct context * ctx,unsigned long offset,u32 * v)119 static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
120 {
121 	struct i915_vma *vma;
122 	u32 __iomem *map;
123 	int err = 0;
124 
125 	i915_gem_object_lock(ctx->obj, NULL);
126 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, false);
127 	i915_gem_object_unlock(ctx->obj);
128 	if (err)
129 		return err;
130 
131 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE);
132 	if (IS_ERR(vma))
133 		return PTR_ERR(vma);
134 
135 	intel_gt_pm_get(vma->vm->gt);
136 
137 	map = i915_vma_pin_iomap(vma);
138 	i915_vma_unpin(vma);
139 	if (IS_ERR(map)) {
140 		err = PTR_ERR(map);
141 		goto out_rpm;
142 	}
143 
144 	*v = ioread32(&map[offset / sizeof(*map)]);
145 	i915_vma_unpin_iomap(vma);
146 
147 out_rpm:
148 	intel_gt_pm_put(vma->vm->gt);
149 	return err;
150 }
151 
wc_set(struct context * ctx,unsigned long offset,u32 v)152 static int wc_set(struct context *ctx, unsigned long offset, u32 v)
153 {
154 	u32 *map;
155 	int err;
156 
157 	i915_gem_object_lock(ctx->obj, NULL);
158 	err = i915_gem_object_set_to_wc_domain(ctx->obj, true);
159 	i915_gem_object_unlock(ctx->obj);
160 	if (err)
161 		return err;
162 
163 	map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
164 	if (IS_ERR(map))
165 		return PTR_ERR(map);
166 
167 	map[offset / sizeof(*map)] = v;
168 
169 	__i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
170 	i915_gem_object_unpin_map(ctx->obj);
171 
172 	return 0;
173 }
174 
wc_get(struct context * ctx,unsigned long offset,u32 * v)175 static int wc_get(struct context *ctx, unsigned long offset, u32 *v)
176 {
177 	u32 *map;
178 	int err;
179 
180 	i915_gem_object_lock(ctx->obj, NULL);
181 	err = i915_gem_object_set_to_wc_domain(ctx->obj, false);
182 	i915_gem_object_unlock(ctx->obj);
183 	if (err)
184 		return err;
185 
186 	map = i915_gem_object_pin_map_unlocked(ctx->obj, I915_MAP_WC);
187 	if (IS_ERR(map))
188 		return PTR_ERR(map);
189 
190 	*v = map[offset / sizeof(*map)];
191 	i915_gem_object_unpin_map(ctx->obj);
192 
193 	return 0;
194 }
195 
gpu_set(struct context * ctx,unsigned long offset,u32 v)196 static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
197 {
198 	struct i915_request *rq;
199 	struct i915_vma *vma;
200 	u32 *cs;
201 	int err;
202 
203 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
204 	if (IS_ERR(vma))
205 		return PTR_ERR(vma);
206 
207 	i915_gem_object_lock(ctx->obj, NULL);
208 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
209 	if (err)
210 		goto out_unlock;
211 
212 	rq = intel_engine_create_kernel_request(ctx->engine);
213 	if (IS_ERR(rq)) {
214 		err = PTR_ERR(rq);
215 		goto out_unpin;
216 	}
217 
218 	cs = intel_ring_begin(rq, 4);
219 	if (IS_ERR(cs)) {
220 		err = PTR_ERR(cs);
221 		goto out_rq;
222 	}
223 
224 	if (GRAPHICS_VER(ctx->engine->i915) >= 8) {
225 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
226 		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
227 		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
228 		*cs++ = v;
229 	} else if (GRAPHICS_VER(ctx->engine->i915) >= 4) {
230 		*cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
231 		*cs++ = 0;
232 		*cs++ = i915_ggtt_offset(vma) + offset;
233 		*cs++ = v;
234 	} else {
235 		*cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
236 		*cs++ = i915_ggtt_offset(vma) + offset;
237 		*cs++ = v;
238 		*cs++ = MI_NOOP;
239 	}
240 	intel_ring_advance(rq, cs);
241 
242 	err = i915_request_await_object(rq, vma->obj, true);
243 	if (err == 0)
244 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
245 
246 out_rq:
247 	i915_request_add(rq);
248 out_unpin:
249 	i915_vma_unpin(vma);
250 out_unlock:
251 	i915_gem_object_unlock(ctx->obj);
252 
253 	return err;
254 }
255 
always_valid(struct context * ctx)256 static bool always_valid(struct context *ctx)
257 {
258 	return true;
259 }
260 
needs_fence_registers(struct context * ctx)261 static bool needs_fence_registers(struct context *ctx)
262 {
263 	struct intel_gt *gt = ctx->engine->gt;
264 
265 	if (intel_gt_is_wedged(gt))
266 		return false;
267 
268 	return gt->ggtt->num_fences;
269 }
270 
needs_mi_store_dword(struct context * ctx)271 static bool needs_mi_store_dword(struct context *ctx)
272 {
273 	if (intel_gt_is_wedged(ctx->engine->gt))
274 		return false;
275 
276 	return intel_engine_can_store_dword(ctx->engine);
277 }
278 
279 static const struct igt_coherency_mode {
280 	const char *name;
281 	int (*set)(struct context *ctx, unsigned long offset, u32 v);
282 	int (*get)(struct context *ctx, unsigned long offset, u32 *v);
283 	bool (*valid)(struct context *ctx);
284 } igt_coherency_mode[] = {
285 	{ "cpu", cpu_set, cpu_get, always_valid },
286 	{ "gtt", gtt_set, gtt_get, needs_fence_registers },
287 	{ "wc", wc_set, wc_get, always_valid },
288 	{ "gpu", gpu_set, NULL, needs_mi_store_dword },
289 	{ },
290 };
291 
292 static struct intel_engine_cs *
random_engine(struct drm_i915_private * i915,struct rnd_state * prng)293 random_engine(struct drm_i915_private *i915, struct rnd_state *prng)
294 {
295 	struct intel_engine_cs *engine;
296 	unsigned int count;
297 
298 	count = 0;
299 	for_each_uabi_engine(engine, i915)
300 		count++;
301 
302 	count = i915_prandom_u32_max_state(count, prng);
303 	for_each_uabi_engine(engine, i915)
304 		if (count-- == 0)
305 			return engine;
306 
307 	return NULL;
308 }
309 
igt_gem_coherency(void * arg)310 static int igt_gem_coherency(void *arg)
311 {
312 	const unsigned int ncachelines = PAGE_SIZE/64;
313 	struct drm_i915_private *i915 = arg;
314 	const struct igt_coherency_mode *read, *write, *over;
315 	unsigned long count, n;
316 	u32 *offsets, *values;
317 	I915_RND_STATE(prng);
318 	struct context ctx;
319 	int err = 0;
320 
321 	/*
322 	 * We repeatedly write, overwrite and read from a sequence of
323 	 * cachelines in order to try and detect incoherency (unflushed writes
324 	 * from either the CPU or GPU). Each setter/getter uses our cache
325 	 * domain API which should prevent incoherency.
326 	 */
327 
328 	offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
329 	if (!offsets)
330 		return -ENOMEM;
331 	for (count = 0; count < ncachelines; count++)
332 		offsets[count] = count * 64 + 4 * (count % 16);
333 
334 	values = offsets + ncachelines;
335 
336 	ctx.engine = random_engine(i915, &prng);
337 	if (!ctx.engine) {
338 		err = -ENODEV;
339 		goto out_free;
340 	}
341 	pr_info("%s: using %s\n", __func__, ctx.engine->name);
342 	intel_engine_pm_get(ctx.engine);
343 
344 	for (over = igt_coherency_mode; over->name; over++) {
345 		if (!over->set)
346 			continue;
347 
348 		if (!over->valid(&ctx))
349 			continue;
350 
351 		for (write = igt_coherency_mode; write->name; write++) {
352 			if (!write->set)
353 				continue;
354 
355 			if (!write->valid(&ctx))
356 				continue;
357 
358 			for (read = igt_coherency_mode; read->name; read++) {
359 				if (!read->get)
360 					continue;
361 
362 				if (!read->valid(&ctx))
363 					continue;
364 
365 				for_each_prime_number_from(count, 1, ncachelines) {
366 					ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
367 					if (IS_ERR(ctx.obj)) {
368 						err = PTR_ERR(ctx.obj);
369 						goto out_pm;
370 					}
371 
372 					i915_random_reorder(offsets, ncachelines, &prng);
373 					for (n = 0; n < count; n++)
374 						values[n] = prandom_u32_state(&prng);
375 
376 					for (n = 0; n < count; n++) {
377 						err = over->set(&ctx, offsets[n], ~values[n]);
378 						if (err) {
379 							pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
380 							       n, count, over->name, err);
381 							goto put_object;
382 						}
383 					}
384 
385 					for (n = 0; n < count; n++) {
386 						err = write->set(&ctx, offsets[n], values[n]);
387 						if (err) {
388 							pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
389 							       n, count, write->name, err);
390 							goto put_object;
391 						}
392 					}
393 
394 					for (n = 0; n < count; n++) {
395 						u32 found;
396 
397 						err = read->get(&ctx, offsets[n], &found);
398 						if (err) {
399 							pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
400 							       n, count, read->name, err);
401 							goto put_object;
402 						}
403 
404 						if (found != values[n]) {
405 							pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
406 							       n, count, over->name,
407 							       write->name, values[n],
408 							       read->name, found,
409 							       ~values[n], offsets[n]);
410 							err = -EINVAL;
411 							goto put_object;
412 						}
413 					}
414 
415 					i915_gem_object_put(ctx.obj);
416 				}
417 			}
418 		}
419 	}
420 out_pm:
421 	intel_engine_pm_put(ctx.engine);
422 out_free:
423 	kfree(offsets);
424 	return err;
425 
426 put_object:
427 	i915_gem_object_put(ctx.obj);
428 	goto out_pm;
429 }
430 
i915_gem_coherency_live_selftests(struct drm_i915_private * i915)431 int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
432 {
433 	static const struct i915_subtest tests[] = {
434 		SUBTEST(igt_gem_coherency),
435 	};
436 
437 	return i915_live_subtests(tests, i915);
438 }
439