1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2018 Intel Corporation
5  */
6 #include "gt/intel_gpu_commands.h"
7 #include "gt/intel_gt.h"
8 
9 #include "gem/i915_gem_internal.h"
10 #include "gem/selftests/igt_gem_utils.h"
11 
12 #include "igt_spinner.h"
13 
igt_spinner_init(struct igt_spinner * spin,struct intel_gt * gt)14 int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt)
15 {
16 	int err;
17 
18 	memset(spin, 0, sizeof(*spin));
19 	spin->gt = gt;
20 
21 	spin->hws = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
22 	if (IS_ERR(spin->hws)) {
23 		err = PTR_ERR(spin->hws);
24 		goto err;
25 	}
26 	i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
27 
28 	spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
29 	if (IS_ERR(spin->obj)) {
30 		err = PTR_ERR(spin->obj);
31 		goto err_hws;
32 	}
33 
34 	return 0;
35 
36 err_hws:
37 	i915_gem_object_put(spin->hws);
38 err:
39 	return err;
40 }
41 
igt_spinner_pin_obj(struct intel_context * ce,struct i915_gem_ww_ctx * ww,struct drm_i915_gem_object * obj,unsigned int mode,struct i915_vma ** vma)42 static void *igt_spinner_pin_obj(struct intel_context *ce,
43 				 struct i915_gem_ww_ctx *ww,
44 				 struct drm_i915_gem_object *obj,
45 				 unsigned int mode, struct i915_vma **vma)
46 {
47 	void *vaddr;
48 	int ret;
49 
50 	*vma = i915_vma_instance(obj, ce->vm, NULL);
51 	if (IS_ERR(*vma))
52 		return ERR_CAST(*vma);
53 
54 	ret = i915_gem_object_lock(obj, ww);
55 	if (ret)
56 		return ERR_PTR(ret);
57 
58 	vaddr = i915_gem_object_pin_map(obj, mode);
59 
60 	if (!ww)
61 		i915_gem_object_unlock(obj);
62 
63 	if (IS_ERR(vaddr))
64 		return vaddr;
65 
66 	if (ww)
67 		ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER);
68 	else
69 		ret = i915_vma_pin(*vma, 0, 0, PIN_USER);
70 
71 	if (ret) {
72 		i915_gem_object_unpin_map(obj);
73 		return ERR_PTR(ret);
74 	}
75 
76 	return vaddr;
77 }
78 
igt_spinner_pin(struct igt_spinner * spin,struct intel_context * ce,struct i915_gem_ww_ctx * ww)79 int igt_spinner_pin(struct igt_spinner *spin,
80 		    struct intel_context *ce,
81 		    struct i915_gem_ww_ctx *ww)
82 {
83 	void *vaddr;
84 
85 	if (spin->ce && WARN_ON(spin->ce != ce))
86 		return -ENODEV;
87 	spin->ce = ce;
88 
89 	if (!spin->seqno) {
90 		vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma);
91 		if (IS_ERR(vaddr))
92 			return PTR_ERR(vaddr);
93 
94 		spin->seqno = memset(vaddr, 0xff, PAGE_SIZE);
95 	}
96 
97 	if (!spin->batch) {
98 		unsigned int mode;
99 
100 		mode = intel_gt_coherent_map_type(spin->gt, spin->obj, false);
101 		vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma);
102 		if (IS_ERR(vaddr))
103 			return PTR_ERR(vaddr);
104 
105 		spin->batch = vaddr;
106 	}
107 
108 	return 0;
109 }
110 
seqno_offset(u64 fence)111 static unsigned int seqno_offset(u64 fence)
112 {
113 	return offset_in_page(sizeof(u32) * fence);
114 }
115 
hws_address(const struct i915_vma * hws,const struct i915_request * rq)116 static u64 hws_address(const struct i915_vma *hws,
117 		       const struct i915_request *rq)
118 {
119 	return i915_vma_offset(hws) + seqno_offset(rq->fence.context);
120 }
121 
122 struct i915_request *
igt_spinner_create_request(struct igt_spinner * spin,struct intel_context * ce,u32 arbitration_command)123 igt_spinner_create_request(struct igt_spinner *spin,
124 			   struct intel_context *ce,
125 			   u32 arbitration_command)
126 {
127 	struct intel_engine_cs *engine = ce->engine;
128 	struct i915_request *rq = NULL;
129 	struct i915_vma *hws, *vma;
130 	unsigned int flags;
131 	u32 *batch;
132 	int err;
133 
134 	GEM_BUG_ON(spin->gt != ce->vm->gt);
135 
136 	if (!intel_engine_can_store_dword(ce->engine))
137 		return ERR_PTR(-ENODEV);
138 
139 	if (!spin->batch) {
140 		err = igt_spinner_pin(spin, ce, NULL);
141 		if (err)
142 			return ERR_PTR(err);
143 	}
144 
145 	hws = spin->hws_vma;
146 	vma = spin->batch_vma;
147 
148 	rq = intel_context_create_request(ce);
149 	if (IS_ERR(rq))
150 		return ERR_CAST(rq);
151 
152 	err = igt_vma_move_to_active_unlocked(vma, rq, 0);
153 	if (err)
154 		goto cancel_rq;
155 
156 	err = igt_vma_move_to_active_unlocked(hws, rq, 0);
157 	if (err)
158 		goto cancel_rq;
159 
160 	batch = spin->batch;
161 
162 	if (GRAPHICS_VER(rq->i915) >= 8) {
163 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
164 		*batch++ = lower_32_bits(hws_address(hws, rq));
165 		*batch++ = upper_32_bits(hws_address(hws, rq));
166 	} else if (GRAPHICS_VER(rq->i915) >= 6) {
167 		*batch++ = MI_STORE_DWORD_IMM_GEN4;
168 		*batch++ = 0;
169 		*batch++ = hws_address(hws, rq);
170 	} else if (GRAPHICS_VER(rq->i915) >= 4) {
171 		*batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
172 		*batch++ = 0;
173 		*batch++ = hws_address(hws, rq);
174 	} else {
175 		*batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
176 		*batch++ = hws_address(hws, rq);
177 	}
178 	*batch++ = rq->fence.seqno;
179 
180 	*batch++ = arbitration_command;
181 
182 	if (GRAPHICS_VER(rq->i915) >= 8)
183 		*batch++ = MI_BATCH_BUFFER_START | BIT(8) | 1;
184 	else if (IS_HASWELL(rq->i915))
185 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW;
186 	else if (GRAPHICS_VER(rq->i915) >= 6)
187 		*batch++ = MI_BATCH_BUFFER_START;
188 	else
189 		*batch++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
190 	*batch++ = lower_32_bits(i915_vma_offset(vma));
191 	*batch++ = upper_32_bits(i915_vma_offset(vma));
192 
193 	*batch++ = MI_BATCH_BUFFER_END; /* not reached */
194 
195 	intel_gt_chipset_flush(engine->gt);
196 
197 	if (engine->emit_init_breadcrumb) {
198 		err = engine->emit_init_breadcrumb(rq);
199 		if (err)
200 			goto cancel_rq;
201 	}
202 
203 	flags = 0;
204 	if (GRAPHICS_VER(rq->i915) <= 5)
205 		flags |= I915_DISPATCH_SECURE;
206 	err = engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
207 
208 cancel_rq:
209 	if (err) {
210 		i915_request_set_error_once(rq, err);
211 		i915_request_add(rq);
212 	}
213 	return err ? ERR_PTR(err) : rq;
214 }
215 
216 static u32
hws_seqno(const struct igt_spinner * spin,const struct i915_request * rq)217 hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq)
218 {
219 	u32 *seqno = spin->seqno + seqno_offset(rq->fence.context);
220 
221 	return READ_ONCE(*seqno);
222 }
223 
igt_spinner_end(struct igt_spinner * spin)224 void igt_spinner_end(struct igt_spinner *spin)
225 {
226 	if (!spin->batch)
227 		return;
228 
229 	*spin->batch = MI_BATCH_BUFFER_END;
230 	intel_gt_chipset_flush(spin->gt);
231 }
232 
igt_spinner_fini(struct igt_spinner * spin)233 void igt_spinner_fini(struct igt_spinner *spin)
234 {
235 	igt_spinner_end(spin);
236 
237 	if (spin->batch) {
238 		i915_vma_unpin(spin->batch_vma);
239 		i915_gem_object_unpin_map(spin->obj);
240 	}
241 	i915_gem_object_put(spin->obj);
242 
243 	if (spin->seqno) {
244 		i915_vma_unpin(spin->hws_vma);
245 		i915_gem_object_unpin_map(spin->hws);
246 	}
247 	i915_gem_object_put(spin->hws);
248 }
249 
igt_wait_for_spinner(struct igt_spinner * spin,struct i915_request * rq)250 bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
251 {
252 	if (i915_request_is_ready(rq))
253 		intel_engine_flush_submission(rq->engine);
254 
255 	return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
256 					       rq->fence.seqno),
257 			     100) &&
258 		 wait_for(i915_seqno_passed(hws_seqno(spin, rq),
259 					    rq->fence.seqno),
260 			  50));
261 }
262