1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include "../i915_selftest.h"
26 
intel_fw_table_check(const struct intel_forcewake_range * ranges,unsigned int num_ranges,bool is_watertight)27 static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
28 				unsigned int num_ranges,
29 				bool is_watertight)
30 {
31 	unsigned int i;
32 	s32 prev;
33 
34 	for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
35 		/* Check that the table is watertight */
36 		if (is_watertight && (prev + 1) != (s32)ranges->start) {
37 			pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
38 			       __func__, i, ranges->start, ranges->end, prev);
39 			return -EINVAL;
40 		}
41 
42 		/* Check that the table never goes backwards */
43 		if (prev >= (s32)ranges->start) {
44 			pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
45 			       __func__, i, ranges->start, ranges->end, prev);
46 			return -EINVAL;
47 		}
48 
49 		/* Check that the entry is valid */
50 		if (ranges->start >= ranges->end) {
51 			pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
52 			       __func__, i, ranges->start, ranges->end);
53 			return -EINVAL;
54 		}
55 
56 		prev = ranges->end;
57 	}
58 
59 	return 0;
60 }
61 
intel_shadow_table_check(void)62 static int intel_shadow_table_check(void)
63 {
64 	struct {
65 		const struct i915_range *regs;
66 		unsigned int size;
67 	} range_lists[] = {
68 		{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
69 		{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
70 		{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
71 		{ dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
72 		{ pvc_shadowed_regs, ARRAY_SIZE(pvc_shadowed_regs) },
73 	};
74 	const struct i915_range *range;
75 	unsigned int i, j;
76 	s32 prev;
77 
78 	for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
79 		range = range_lists[j].regs;
80 		for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
81 			if (range->end < range->start) {
82 				pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
83 				       __func__, i, range->start, range->end);
84 				return -EINVAL;
85 			}
86 
87 			if (prev >= (s32)range->start) {
88 				pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
89 				       __func__, i, range->start, range->end, prev);
90 				return -EINVAL;
91 			}
92 
93 			if (range->start % 4) {
94 				pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
95 				       __func__, i, range->start, range->end);
96 				return -EINVAL;
97 			}
98 
99 			prev = range->end;
100 		}
101 	}
102 
103 	return 0;
104 }
105 
intel_uncore_mock_selftests(void)106 int intel_uncore_mock_selftests(void)
107 {
108 	struct {
109 		const struct intel_forcewake_range *ranges;
110 		unsigned int num_ranges;
111 		bool is_watertight;
112 	} fw[] = {
113 		{ __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
114 		{ __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
115 		{ __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
116 		{ __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
117 		{ __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
118 		{ __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
119 		{ __pvc_fw_ranges, ARRAY_SIZE(__pvc_fw_ranges), true },
120 	};
121 	int err, i;
122 
123 	for (i = 0; i < ARRAY_SIZE(fw); i++) {
124 		err = intel_fw_table_check(fw[i].ranges,
125 					   fw[i].num_ranges,
126 					   fw[i].is_watertight);
127 		if (err)
128 			return err;
129 	}
130 
131 	err = intel_shadow_table_check();
132 	if (err)
133 		return err;
134 
135 	return 0;
136 }
137 
live_forcewake_ops(void * arg)138 static int live_forcewake_ops(void *arg)
139 {
140 	static const struct reg {
141 		const char *name;
142 		u8 min_graphics_ver;
143 		u8 max_graphics_ver;
144 		unsigned long platforms;
145 		unsigned int offset;
146 	} registers[] = {
147 		{
148 			"RING_START",
149 			6, 7,
150 			0x38,
151 		},
152 		{
153 			"RING_MI_MODE",
154 			8, U8_MAX,
155 			0x9c,
156 		}
157 	};
158 	const struct reg *r;
159 	struct intel_gt *gt = arg;
160 	struct intel_uncore_forcewake_domain *domain;
161 	struct intel_uncore *uncore = gt->uncore;
162 	struct intel_engine_cs *engine;
163 	enum intel_engine_id id;
164 	intel_wakeref_t wakeref;
165 	unsigned int tmp;
166 	int err = 0;
167 
168 	GEM_BUG_ON(gt->awake);
169 
170 	/* vlv/chv with their pcu behave differently wrt reads */
171 	if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) {
172 		pr_debug("PCU fakes forcewake badly; skipping\n");
173 		return 0;
174 	}
175 
176 	/*
177 	 * Not quite as reliable across the gen as one would hope.
178 	 *
179 	 * Either our theory of operation is incorrect, or there remain
180 	 * external parties interfering with the powerwells.
181 	 *
182 	 * https://bugs.freedesktop.org/show_bug.cgi?id=110210
183 	 */
184 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
185 		return 0;
186 
187 	/* We have to pick carefully to get the exact behaviour we need */
188 	for (r = registers; r->name; r++)
189 		if (IS_GRAPHICS_VER(gt->i915, r->min_graphics_ver, r->max_graphics_ver))
190 			break;
191 	if (!r->name) {
192 		pr_debug("Forcewaked register not known for %s; skipping\n",
193 			 intel_platform_name(INTEL_INFO(gt->i915)->platform));
194 		return 0;
195 	}
196 
197 	wakeref = intel_runtime_pm_get(uncore->rpm);
198 
199 	for_each_fw_domain(domain, uncore, tmp) {
200 		smp_store_mb(domain->active, false);
201 		if (!hrtimer_cancel(&domain->timer))
202 			continue;
203 
204 		intel_uncore_fw_release_timer(&domain->timer);
205 	}
206 
207 	for_each_engine(engine, gt, id) {
208 		i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
209 		u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
210 		enum forcewake_domains fw_domains;
211 		u32 val;
212 
213 		if (!engine->default_state)
214 			continue;
215 
216 		fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
217 							    FW_REG_READ);
218 		if (!fw_domains)
219 			continue;
220 
221 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
222 			if (!domain->wake_count)
223 				continue;
224 
225 			pr_err("fw_domain %s still active, aborting test!\n",
226 			       intel_uncore_forcewake_domain_to_str(domain->id));
227 			err = -EINVAL;
228 			goto out_rpm;
229 		}
230 
231 		intel_uncore_forcewake_get(uncore, fw_domains);
232 		val = readl(reg);
233 		intel_uncore_forcewake_put(uncore, fw_domains);
234 
235 		/* Flush the forcewake release (delayed onto a timer) */
236 		for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
237 			smp_store_mb(domain->active, false);
238 			if (hrtimer_cancel(&domain->timer))
239 				intel_uncore_fw_release_timer(&domain->timer);
240 
241 			preempt_disable();
242 			err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
243 			preempt_enable();
244 			if (err) {
245 				pr_err("Failed to clear fw_domain %s\n",
246 				       intel_uncore_forcewake_domain_to_str(domain->id));
247 				goto out_rpm;
248 			}
249 		}
250 
251 		if (!val) {
252 			pr_err("%s:%s was zero while fw was held!\n",
253 			       engine->name, r->name);
254 			err = -EINVAL;
255 			goto out_rpm;
256 		}
257 
258 		/* We then expect the read to return 0 outside of the fw */
259 		if (wait_for(readl(reg) == 0, 100)) {
260 			pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
261 			       engine->name, r->name, readl(reg), fw_domains);
262 			err = -ETIMEDOUT;
263 			goto out_rpm;
264 		}
265 	}
266 
267 out_rpm:
268 	intel_runtime_pm_put(uncore->rpm, wakeref);
269 	return err;
270 }
271 
live_forcewake_domains(void * arg)272 static int live_forcewake_domains(void *arg)
273 {
274 #define FW_RANGE 0x40000
275 	struct intel_gt *gt = arg;
276 	struct intel_uncore *uncore = gt->uncore;
277 	unsigned long *valid;
278 	u32 offset;
279 	int err;
280 
281 	if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) &&
282 	    !IS_VALLEYVIEW(gt->i915) &&
283 	    !IS_CHERRYVIEW(gt->i915))
284 		return 0;
285 
286 	/*
287 	 * This test may lockup the machine or cause GPU hangs afterwards.
288 	 */
289 	if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
290 		return 0;
291 
292 	valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
293 	if (!valid)
294 		return -ENOMEM;
295 
296 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
297 
298 	check_for_unclaimed_mmio(uncore);
299 	for (offset = 0; offset < FW_RANGE; offset += 4) {
300 		i915_reg_t reg = { offset };
301 
302 		intel_uncore_posting_read_fw(uncore, reg);
303 		if (!check_for_unclaimed_mmio(uncore))
304 			set_bit(offset, valid);
305 	}
306 
307 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
308 
309 	err = 0;
310 	for_each_set_bit(offset, valid, FW_RANGE) {
311 		i915_reg_t reg = { offset };
312 
313 		iosf_mbi_punit_acquire();
314 		intel_uncore_forcewake_reset(uncore);
315 		iosf_mbi_punit_release();
316 
317 		check_for_unclaimed_mmio(uncore);
318 
319 		intel_uncore_posting_read_fw(uncore, reg);
320 		if (check_for_unclaimed_mmio(uncore)) {
321 			pr_err("Unclaimed mmio read to register 0x%04x\n",
322 			       offset);
323 			err = -EINVAL;
324 		}
325 	}
326 
327 	bitmap_free(valid);
328 	return err;
329 }
330 
live_fw_table(void * arg)331 static int live_fw_table(void *arg)
332 {
333 	struct intel_gt *gt = arg;
334 
335 	/* Confirm the table we load is still valid */
336 	return intel_fw_table_check(gt->uncore->fw_domains_table,
337 				    gt->uncore->fw_domains_table_entries,
338 				    GRAPHICS_VER(gt->i915) >= 9);
339 }
340 
intel_uncore_live_selftests(struct drm_i915_private * i915)341 int intel_uncore_live_selftests(struct drm_i915_private *i915)
342 {
343 	static const struct i915_subtest tests[] = {
344 		SUBTEST(live_fw_table),
345 		SUBTEST(live_forcewake_ops),
346 		SUBTEST(live_forcewake_domains),
347 	};
348 
349 	return intel_gt_live_subtests(tests, to_gt(i915));
350 }
351