1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2008-2018 Intel Corporation
4  */
5 
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
9 
10 #include "display/intel_display.h"
11 #include "display/intel_overlay.h"
12 
13 #include "gem/i915_gem_context.h"
14 
15 #include "gt/intel_gt_regs.h"
16 
17 #include "i915_drv.h"
18 #include "i915_file_private.h"
19 #include "i915_gpu_error.h"
20 #include "i915_irq.h"
21 #include "intel_breadcrumbs.h"
22 #include "intel_engine_pm.h"
23 #include "intel_engine_regs.h"
24 #include "intel_gt.h"
25 #include "intel_gt_pm.h"
26 #include "intel_gt_requests.h"
27 #include "intel_mchbar_regs.h"
28 #include "intel_pci_config.h"
29 #include "intel_reset.h"
30 
31 #include "uc/intel_guc.h"
32 
33 #define RESET_MAX_RETRIES 3
34 
35 /* XXX How to handle concurrent GGTT updates using tiling registers? */
36 #define RESET_UNDER_STOP_MACHINE 0
37 
rmw_set_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 set)38 static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
39 {
40 	intel_uncore_rmw_fw(uncore, reg, 0, set);
41 }
42 
rmw_clear_fw(struct intel_uncore * uncore,i915_reg_t reg,u32 clr)43 static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
44 {
45 	intel_uncore_rmw_fw(uncore, reg, clr, 0);
46 }
47 
client_mark_guilty(struct i915_gem_context * ctx,bool banned)48 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
49 {
50 	struct drm_i915_file_private *file_priv = ctx->file_priv;
51 	unsigned long prev_hang;
52 	unsigned int score;
53 
54 	if (IS_ERR_OR_NULL(file_priv))
55 		return;
56 
57 	score = 0;
58 	if (banned)
59 		score = I915_CLIENT_SCORE_CONTEXT_BAN;
60 
61 	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
62 	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
63 		score += I915_CLIENT_SCORE_HANG_FAST;
64 
65 	if (score) {
66 		atomic_add(score, &file_priv->ban_score);
67 
68 		drm_dbg(&ctx->i915->drm,
69 			"client %s: gained %u ban score, now %u\n",
70 			ctx->name, score,
71 			atomic_read(&file_priv->ban_score));
72 	}
73 }
74 
mark_guilty(struct i915_request * rq)75 static bool mark_guilty(struct i915_request *rq)
76 {
77 	struct i915_gem_context *ctx;
78 	unsigned long prev_hang;
79 	bool banned;
80 	int i;
81 
82 	if (intel_context_is_closed(rq->context))
83 		return true;
84 
85 	rcu_read_lock();
86 	ctx = rcu_dereference(rq->context->gem_context);
87 	if (ctx && !kref_get_unless_zero(&ctx->ref))
88 		ctx = NULL;
89 	rcu_read_unlock();
90 	if (!ctx)
91 		return intel_context_is_banned(rq->context);
92 
93 	atomic_inc(&ctx->guilty_count);
94 
95 	/* Cool contexts are too cool to be banned! (Used for reset testing.) */
96 	if (!i915_gem_context_is_bannable(ctx)) {
97 		banned = false;
98 		goto out;
99 	}
100 
101 	drm_notice(&ctx->i915->drm,
102 		   "%s context reset due to GPU hang\n",
103 		   ctx->name);
104 
105 	/* Record the timestamp for the last N hangs */
106 	prev_hang = ctx->hang_timestamp[0];
107 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
108 		ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
109 	ctx->hang_timestamp[i] = jiffies;
110 
111 	/* If we have hung N+1 times in rapid succession, we ban the context! */
112 	banned = !i915_gem_context_is_recoverable(ctx);
113 	if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
114 		banned = true;
115 	if (banned)
116 		drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
117 			ctx->name, atomic_read(&ctx->guilty_count));
118 
119 	client_mark_guilty(ctx, banned);
120 
121 out:
122 	i915_gem_context_put(ctx);
123 	return banned;
124 }
125 
mark_innocent(struct i915_request * rq)126 static void mark_innocent(struct i915_request *rq)
127 {
128 	struct i915_gem_context *ctx;
129 
130 	rcu_read_lock();
131 	ctx = rcu_dereference(rq->context->gem_context);
132 	if (ctx)
133 		atomic_inc(&ctx->active_count);
134 	rcu_read_unlock();
135 }
136 
__i915_request_reset(struct i915_request * rq,bool guilty)137 void __i915_request_reset(struct i915_request *rq, bool guilty)
138 {
139 	bool banned = false;
140 
141 	RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
142 	GEM_BUG_ON(__i915_request_is_complete(rq));
143 
144 	rcu_read_lock(); /* protect the GEM context */
145 	if (guilty) {
146 		i915_request_set_error_once(rq, -EIO);
147 		__i915_request_skip(rq);
148 		banned = mark_guilty(rq);
149 	} else {
150 		i915_request_set_error_once(rq, -EAGAIN);
151 		mark_innocent(rq);
152 	}
153 	rcu_read_unlock();
154 
155 	if (banned)
156 		intel_context_ban(rq->context, rq);
157 }
158 
i915_in_reset(struct pci_dev * pdev)159 static bool i915_in_reset(struct pci_dev *pdev)
160 {
161 	u8 gdrst;
162 
163 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
164 	return gdrst & GRDOM_RESET_STATUS;
165 }
166 
i915_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)167 static int i915_do_reset(struct intel_gt *gt,
168 			 intel_engine_mask_t engine_mask,
169 			 unsigned int retry)
170 {
171 	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
172 	int err;
173 
174 	/* Assert reset for at least 20 usec, and wait for acknowledgement. */
175 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
176 	udelay(50);
177 	err = wait_for_atomic(i915_in_reset(pdev), 50);
178 
179 	/* Clear the reset request. */
180 	pci_write_config_byte(pdev, I915_GDRST, 0);
181 	udelay(50);
182 	if (!err)
183 		err = wait_for_atomic(!i915_in_reset(pdev), 50);
184 
185 	return err;
186 }
187 
g4x_reset_complete(struct pci_dev * pdev)188 static bool g4x_reset_complete(struct pci_dev *pdev)
189 {
190 	u8 gdrst;
191 
192 	pci_read_config_byte(pdev, I915_GDRST, &gdrst);
193 	return (gdrst & GRDOM_RESET_ENABLE) == 0;
194 }
195 
g33_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)196 static int g33_do_reset(struct intel_gt *gt,
197 			intel_engine_mask_t engine_mask,
198 			unsigned int retry)
199 {
200 	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
201 
202 	pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
203 	return wait_for_atomic(g4x_reset_complete(pdev), 50);
204 }
205 
g4x_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)206 static int g4x_do_reset(struct intel_gt *gt,
207 			intel_engine_mask_t engine_mask,
208 			unsigned int retry)
209 {
210 	struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
211 	struct intel_uncore *uncore = gt->uncore;
212 	int ret;
213 
214 	/* WaVcpClkGateDisableForMediaReset:ctg,elk */
215 	rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
216 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
217 
218 	pci_write_config_byte(pdev, I915_GDRST,
219 			      GRDOM_MEDIA | GRDOM_RESET_ENABLE);
220 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
221 	if (ret) {
222 		GT_TRACE(gt, "Wait for media reset failed\n");
223 		goto out;
224 	}
225 
226 	pci_write_config_byte(pdev, I915_GDRST,
227 			      GRDOM_RENDER | GRDOM_RESET_ENABLE);
228 	ret =  wait_for_atomic(g4x_reset_complete(pdev), 50);
229 	if (ret) {
230 		GT_TRACE(gt, "Wait for render reset failed\n");
231 		goto out;
232 	}
233 
234 out:
235 	pci_write_config_byte(pdev, I915_GDRST, 0);
236 
237 	rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
238 	intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
239 
240 	return ret;
241 }
242 
ilk_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)243 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
244 			unsigned int retry)
245 {
246 	struct intel_uncore *uncore = gt->uncore;
247 	int ret;
248 
249 	intel_uncore_write_fw(uncore, ILK_GDSR,
250 			      ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
251 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
252 					   ILK_GRDOM_RESET_ENABLE, 0,
253 					   5000, 0,
254 					   NULL);
255 	if (ret) {
256 		GT_TRACE(gt, "Wait for render reset failed\n");
257 		goto out;
258 	}
259 
260 	intel_uncore_write_fw(uncore, ILK_GDSR,
261 			      ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
262 	ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
263 					   ILK_GRDOM_RESET_ENABLE, 0,
264 					   5000, 0,
265 					   NULL);
266 	if (ret) {
267 		GT_TRACE(gt, "Wait for media reset failed\n");
268 		goto out;
269 	}
270 
271 out:
272 	intel_uncore_write_fw(uncore, ILK_GDSR, 0);
273 	intel_uncore_posting_read_fw(uncore, ILK_GDSR);
274 	return ret;
275 }
276 
277 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct intel_gt * gt,u32 hw_domain_mask)278 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
279 {
280 	struct intel_uncore *uncore = gt->uncore;
281 	int loops = 2;
282 	int err;
283 
284 	/*
285 	 * GEN6_GDRST is not in the gt power well, no need to check
286 	 * for fifo space for the write or forcewake the chip for
287 	 * the read
288 	 */
289 	do {
290 		intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
291 
292 		/*
293 		 * Wait for the device to ack the reset requests.
294 		 *
295 		 * On some platforms, e.g. Jasperlake, we see that the
296 		 * engine register state is not cleared until shortly after
297 		 * GDRST reports completion, causing a failure as we try
298 		 * to immediately resume while the internal state is still
299 		 * in flux. If we immediately repeat the reset, the second
300 		 * reset appears to serialise with the first, and since
301 		 * it is a no-op, the registers should retain their reset
302 		 * value. However, there is still a concern that upon
303 		 * leaving the second reset, the internal engine state
304 		 * is still in flux and not ready for resuming.
305 		 */
306 		err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
307 						   hw_domain_mask, 0,
308 						   2000, 0,
309 						   NULL);
310 	} while (err == 0 && --loops);
311 	if (err)
312 		GT_TRACE(gt,
313 			 "Wait for 0x%08x engines reset failed\n",
314 			 hw_domain_mask);
315 
316 	/*
317 	 * As we have observed that the engine state is still volatile
318 	 * after GDRST is acked, impose a small delay to let everything settle.
319 	 */
320 	udelay(50);
321 
322 	return err;
323 }
324 
__gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)325 static int __gen6_reset_engines(struct intel_gt *gt,
326 				intel_engine_mask_t engine_mask,
327 				unsigned int retry)
328 {
329 	struct intel_engine_cs *engine;
330 	u32 hw_mask;
331 
332 	if (engine_mask == ALL_ENGINES) {
333 		hw_mask = GEN6_GRDOM_FULL;
334 	} else {
335 		intel_engine_mask_t tmp;
336 
337 		hw_mask = 0;
338 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
339 			hw_mask |= engine->reset_domain;
340 		}
341 	}
342 
343 	return gen6_hw_domain_reset(gt, hw_mask);
344 }
345 
gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)346 static int gen6_reset_engines(struct intel_gt *gt,
347 			      intel_engine_mask_t engine_mask,
348 			      unsigned int retry)
349 {
350 	unsigned long flags;
351 	int ret;
352 
353 	spin_lock_irqsave(&gt->uncore->lock, flags);
354 	ret = __gen6_reset_engines(gt, engine_mask, retry);
355 	spin_unlock_irqrestore(&gt->uncore->lock, flags);
356 
357 	return ret;
358 }
359 
find_sfc_paired_vecs_engine(struct intel_engine_cs * engine)360 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
361 {
362 	int vecs_id;
363 
364 	GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
365 
366 	vecs_id = _VECS((engine->instance) / 2);
367 
368 	return engine->gt->engine[vecs_id];
369 }
370 
371 struct sfc_lock_data {
372 	i915_reg_t lock_reg;
373 	i915_reg_t ack_reg;
374 	i915_reg_t usage_reg;
375 	u32 lock_bit;
376 	u32 ack_bit;
377 	u32 usage_bit;
378 	u32 reset_bit;
379 };
380 
get_sfc_forced_lock_data(struct intel_engine_cs * engine,struct sfc_lock_data * sfc_lock)381 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
382 				     struct sfc_lock_data *sfc_lock)
383 {
384 	switch (engine->class) {
385 	default:
386 		MISSING_CASE(engine->class);
387 		fallthrough;
388 	case VIDEO_DECODE_CLASS:
389 		sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
390 		sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
391 
392 		sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
393 		sfc_lock->ack_bit  = GEN11_VCS_SFC_LOCK_ACK_BIT;
394 
395 		sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
396 		sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
397 		sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
398 
399 		break;
400 	case VIDEO_ENHANCEMENT_CLASS:
401 		sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
402 		sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
403 
404 		sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
405 		sfc_lock->ack_bit  = GEN11_VECS_SFC_LOCK_ACK_BIT;
406 
407 		sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
408 		sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
409 		sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
410 
411 		break;
412 	}
413 }
414 
gen11_lock_sfc(struct intel_engine_cs * engine,u32 * reset_mask,u32 * unlock_mask)415 static int gen11_lock_sfc(struct intel_engine_cs *engine,
416 			  u32 *reset_mask,
417 			  u32 *unlock_mask)
418 {
419 	struct intel_uncore *uncore = engine->uncore;
420 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
421 	struct sfc_lock_data sfc_lock;
422 	bool lock_obtained, lock_to_other = false;
423 	int ret;
424 
425 	switch (engine->class) {
426 	case VIDEO_DECODE_CLASS:
427 		if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
428 			return 0;
429 
430 		fallthrough;
431 	case VIDEO_ENHANCEMENT_CLASS:
432 		get_sfc_forced_lock_data(engine, &sfc_lock);
433 
434 		break;
435 	default:
436 		return 0;
437 	}
438 
439 	if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
440 		struct intel_engine_cs *paired_vecs;
441 
442 		if (engine->class != VIDEO_DECODE_CLASS ||
443 		    GRAPHICS_VER(engine->i915) != 12)
444 			return 0;
445 
446 		/*
447 		 * Wa_14010733141
448 		 *
449 		 * If the VCS-MFX isn't using the SFC, we also need to check
450 		 * whether VCS-HCP is using it.  If so, we need to issue a *VE*
451 		 * forced lock on the VE engine that shares the same SFC.
452 		 */
453 		if (!(intel_uncore_read_fw(uncore,
454 					   GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
455 		      GEN12_HCP_SFC_USAGE_BIT))
456 			return 0;
457 
458 		paired_vecs = find_sfc_paired_vecs_engine(engine);
459 		get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
460 		lock_to_other = true;
461 		*unlock_mask |= paired_vecs->mask;
462 	} else {
463 		*unlock_mask |= engine->mask;
464 	}
465 
466 	/*
467 	 * If the engine is using an SFC, tell the engine that a software reset
468 	 * is going to happen. The engine will then try to force lock the SFC.
469 	 * If SFC ends up being locked to the engine we want to reset, we have
470 	 * to reset it as well (we will unlock it once the reset sequence is
471 	 * completed).
472 	 */
473 	rmw_set_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
474 
475 	ret = __intel_wait_for_register_fw(uncore,
476 					   sfc_lock.ack_reg,
477 					   sfc_lock.ack_bit,
478 					   sfc_lock.ack_bit,
479 					   1000, 0, NULL);
480 
481 	/*
482 	 * Was the SFC released while we were trying to lock it?
483 	 *
484 	 * We should reset both the engine and the SFC if:
485 	 *  - We were locking the SFC to this engine and the lock succeeded
486 	 *       OR
487 	 *  - We were locking the SFC to a different engine (Wa_14010733141)
488 	 *    but the SFC was released before the lock was obtained.
489 	 *
490 	 * Otherwise we need only reset the engine by itself and we can
491 	 * leave the SFC alone.
492 	 */
493 	lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
494 			sfc_lock.usage_bit) != 0;
495 	if (lock_obtained == lock_to_other)
496 		return 0;
497 
498 	if (ret) {
499 		ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
500 		return ret;
501 	}
502 
503 	*reset_mask |= sfc_lock.reset_bit;
504 	return 0;
505 }
506 
gen11_unlock_sfc(struct intel_engine_cs * engine)507 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
508 {
509 	struct intel_uncore *uncore = engine->uncore;
510 	u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
511 	struct sfc_lock_data sfc_lock = {};
512 
513 	if (engine->class != VIDEO_DECODE_CLASS &&
514 	    engine->class != VIDEO_ENHANCEMENT_CLASS)
515 		return;
516 
517 	if (engine->class == VIDEO_DECODE_CLASS &&
518 	    (BIT(engine->instance) & vdbox_sfc_access) == 0)
519 		return;
520 
521 	get_sfc_forced_lock_data(engine, &sfc_lock);
522 
523 	rmw_clear_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit);
524 }
525 
__gen11_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)526 static int __gen11_reset_engines(struct intel_gt *gt,
527 				 intel_engine_mask_t engine_mask,
528 				 unsigned int retry)
529 {
530 	struct intel_engine_cs *engine;
531 	intel_engine_mask_t tmp;
532 	u32 reset_mask, unlock_mask = 0;
533 	int ret;
534 
535 	if (engine_mask == ALL_ENGINES) {
536 		reset_mask = GEN11_GRDOM_FULL;
537 	} else {
538 		reset_mask = 0;
539 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
540 			reset_mask |= engine->reset_domain;
541 			ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
542 			if (ret)
543 				goto sfc_unlock;
544 		}
545 	}
546 
547 	ret = gen6_hw_domain_reset(gt, reset_mask);
548 
549 sfc_unlock:
550 	/*
551 	 * We unlock the SFC based on the lock status and not the result of
552 	 * gen11_lock_sfc to make sure that we clean properly if something
553 	 * wrong happened during the lock (e.g. lock acquired after timeout
554 	 * expiration).
555 	 *
556 	 * Due to Wa_14010733141, we may have locked an SFC to an engine that
557 	 * wasn't being reset.  So instead of calling gen11_unlock_sfc()
558 	 * on engine_mask, we instead call it on the mask of engines that our
559 	 * gen11_lock_sfc() calls told us actually had locks attempted.
560 	 */
561 	for_each_engine_masked(engine, gt, unlock_mask, tmp)
562 		gen11_unlock_sfc(engine);
563 
564 	return ret;
565 }
566 
gen8_engine_reset_prepare(struct intel_engine_cs * engine)567 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
568 {
569 	struct intel_uncore *uncore = engine->uncore;
570 	const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
571 	u32 request, mask, ack;
572 	int ret;
573 
574 	if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
575 		return -ETIMEDOUT;
576 
577 	ack = intel_uncore_read_fw(uncore, reg);
578 	if (ack & RESET_CTL_CAT_ERROR) {
579 		/*
580 		 * For catastrophic errors, ready-for-reset sequence
581 		 * needs to be bypassed: HAS#396813
582 		 */
583 		request = RESET_CTL_CAT_ERROR;
584 		mask = RESET_CTL_CAT_ERROR;
585 
586 		/* Catastrophic errors need to be cleared by HW */
587 		ack = 0;
588 	} else if (!(ack & RESET_CTL_READY_TO_RESET)) {
589 		request = RESET_CTL_REQUEST_RESET;
590 		mask = RESET_CTL_READY_TO_RESET;
591 		ack = RESET_CTL_READY_TO_RESET;
592 	} else {
593 		return 0;
594 	}
595 
596 	intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
597 	ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
598 					   700, 0, NULL);
599 	if (ret)
600 		drm_err(&engine->i915->drm,
601 			"%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
602 			engine->name, request,
603 			intel_uncore_read_fw(uncore, reg));
604 
605 	return ret;
606 }
607 
gen8_engine_reset_cancel(struct intel_engine_cs * engine)608 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
609 {
610 	intel_uncore_write_fw(engine->uncore,
611 			      RING_RESET_CTL(engine->mmio_base),
612 			      _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
613 }
614 
gen8_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)615 static int gen8_reset_engines(struct intel_gt *gt,
616 			      intel_engine_mask_t engine_mask,
617 			      unsigned int retry)
618 {
619 	struct intel_engine_cs *engine;
620 	const bool reset_non_ready = retry >= 1;
621 	intel_engine_mask_t tmp;
622 	unsigned long flags;
623 	int ret;
624 
625 	spin_lock_irqsave(&gt->uncore->lock, flags);
626 
627 	for_each_engine_masked(engine, gt, engine_mask, tmp) {
628 		ret = gen8_engine_reset_prepare(engine);
629 		if (ret && !reset_non_ready)
630 			goto skip_reset;
631 
632 		/*
633 		 * If this is not the first failed attempt to prepare,
634 		 * we decide to proceed anyway.
635 		 *
636 		 * By doing so we risk context corruption and with
637 		 * some gens (kbl), possible system hang if reset
638 		 * happens during active bb execution.
639 		 *
640 		 * We rather take context corruption instead of
641 		 * failed reset with a wedged driver/gpu. And
642 		 * active bb execution case should be covered by
643 		 * stop_engines() we have before the reset.
644 		 */
645 	}
646 
647 	/*
648 	 * Wa_22011100796:dg2, whenever Full soft reset is required,
649 	 * reset all individual engines firstly, and then do a full soft reset.
650 	 *
651 	 * This is best effort, so ignore any error from the initial reset.
652 	 */
653 	if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
654 		__gen11_reset_engines(gt, gt->info.engine_mask, 0);
655 
656 	if (GRAPHICS_VER(gt->i915) >= 11)
657 		ret = __gen11_reset_engines(gt, engine_mask, retry);
658 	else
659 		ret = __gen6_reset_engines(gt, engine_mask, retry);
660 
661 skip_reset:
662 	for_each_engine_masked(engine, gt, engine_mask, tmp)
663 		gen8_engine_reset_cancel(engine);
664 
665 	spin_unlock_irqrestore(&gt->uncore->lock, flags);
666 
667 	return ret;
668 }
669 
mock_reset(struct intel_gt * gt,intel_engine_mask_t mask,unsigned int retry)670 static int mock_reset(struct intel_gt *gt,
671 		      intel_engine_mask_t mask,
672 		      unsigned int retry)
673 {
674 	return 0;
675 }
676 
677 typedef int (*reset_func)(struct intel_gt *,
678 			  intel_engine_mask_t engine_mask,
679 			  unsigned int retry);
680 
intel_get_gpu_reset(const struct intel_gt * gt)681 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
682 {
683 	struct drm_i915_private *i915 = gt->i915;
684 
685 	if (is_mock_gt(gt))
686 		return mock_reset;
687 	else if (GRAPHICS_VER(i915) >= 8)
688 		return gen8_reset_engines;
689 	else if (GRAPHICS_VER(i915) >= 6)
690 		return gen6_reset_engines;
691 	else if (GRAPHICS_VER(i915) >= 5)
692 		return ilk_do_reset;
693 	else if (IS_G4X(i915))
694 		return g4x_do_reset;
695 	else if (IS_G33(i915) || IS_PINEVIEW(i915))
696 		return g33_do_reset;
697 	else if (GRAPHICS_VER(i915) >= 3)
698 		return i915_do_reset;
699 	else
700 		return NULL;
701 }
702 
__intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask)703 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
704 {
705 	const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
706 	reset_func reset;
707 	int ret = -ETIMEDOUT;
708 	int retry;
709 
710 	reset = intel_get_gpu_reset(gt);
711 	if (!reset)
712 		return -ENODEV;
713 
714 	/*
715 	 * If the power well sleeps during the reset, the reset
716 	 * request may be dropped and never completes (causing -EIO).
717 	 */
718 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
719 	for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
720 		GT_TRACE(gt, "engine_mask=%x\n", engine_mask);
721 		preempt_disable();
722 		ret = reset(gt, engine_mask, retry);
723 		preempt_enable();
724 	}
725 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
726 
727 	return ret;
728 }
729 
intel_has_gpu_reset(const struct intel_gt * gt)730 bool intel_has_gpu_reset(const struct intel_gt *gt)
731 {
732 	if (!gt->i915->params.reset)
733 		return NULL;
734 
735 	return intel_get_gpu_reset(gt);
736 }
737 
intel_has_reset_engine(const struct intel_gt * gt)738 bool intel_has_reset_engine(const struct intel_gt *gt)
739 {
740 	if (gt->i915->params.reset < 2)
741 		return false;
742 
743 	return INTEL_INFO(gt->i915)->has_reset_engine;
744 }
745 
intel_reset_guc(struct intel_gt * gt)746 int intel_reset_guc(struct intel_gt *gt)
747 {
748 	u32 guc_domain =
749 		GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
750 	int ret;
751 
752 	GEM_BUG_ON(!HAS_GT_UC(gt->i915));
753 
754 	intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
755 	ret = gen6_hw_domain_reset(gt, guc_domain);
756 	intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
757 
758 	return ret;
759 }
760 
761 /*
762  * Ensure irq handler finishes, and not run again.
763  * Also return the active request so that we only search for it once.
764  */
reset_prepare_engine(struct intel_engine_cs * engine)765 static void reset_prepare_engine(struct intel_engine_cs *engine)
766 {
767 	/*
768 	 * During the reset sequence, we must prevent the engine from
769 	 * entering RC6. As the context state is undefined until we restart
770 	 * the engine, if it does enter RC6 during the reset, the state
771 	 * written to the powercontext is undefined and so we may lose
772 	 * GPU state upon resume, i.e. fail to restart after a reset.
773 	 */
774 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
775 	if (engine->reset.prepare)
776 		engine->reset.prepare(engine);
777 }
778 
revoke_mmaps(struct intel_gt * gt)779 static void revoke_mmaps(struct intel_gt *gt)
780 {
781 	int i;
782 
783 	for (i = 0; i < gt->ggtt->num_fences; i++) {
784 		struct drm_vma_offset_node *node;
785 		struct i915_vma *vma;
786 		u64 vma_offset;
787 
788 		vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
789 		if (!vma)
790 			continue;
791 
792 		if (!i915_vma_has_userfault(vma))
793 			continue;
794 
795 		GEM_BUG_ON(vma->fence != &gt->ggtt->fence_regs[i]);
796 
797 		if (!vma->mmo)
798 			continue;
799 
800 		node = &vma->mmo->vma_node;
801 		vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
802 
803 		unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
804 				    drm_vma_node_offset_addr(node) + vma_offset,
805 				    vma->size,
806 				    1);
807 	}
808 }
809 
reset_prepare(struct intel_gt * gt)810 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
811 {
812 	struct intel_engine_cs *engine;
813 	intel_engine_mask_t awake = 0;
814 	enum intel_engine_id id;
815 
816 	/* For GuC mode, ensure submission is disabled before stopping ring */
817 	intel_uc_reset_prepare(&gt->uc);
818 
819 	for_each_engine(engine, gt, id) {
820 		if (intel_engine_pm_get_if_awake(engine))
821 			awake |= engine->mask;
822 		reset_prepare_engine(engine);
823 	}
824 
825 	return awake;
826 }
827 
gt_revoke(struct intel_gt * gt)828 static void gt_revoke(struct intel_gt *gt)
829 {
830 	revoke_mmaps(gt);
831 }
832 
gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)833 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
834 {
835 	struct intel_engine_cs *engine;
836 	enum intel_engine_id id;
837 	int err;
838 
839 	/*
840 	 * Everything depends on having the GTT running, so we need to start
841 	 * there.
842 	 */
843 	err = i915_ggtt_enable_hw(gt->i915);
844 	if (err)
845 		return err;
846 
847 	local_bh_disable();
848 	for_each_engine(engine, gt, id)
849 		__intel_engine_reset(engine, stalled_mask & engine->mask);
850 	local_bh_enable();
851 
852 	intel_uc_reset(&gt->uc, ALL_ENGINES);
853 
854 	intel_ggtt_restore_fences(gt->ggtt);
855 
856 	return err;
857 }
858 
reset_finish_engine(struct intel_engine_cs * engine)859 static void reset_finish_engine(struct intel_engine_cs *engine)
860 {
861 	if (engine->reset.finish)
862 		engine->reset.finish(engine);
863 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
864 
865 	intel_engine_signal_breadcrumbs(engine);
866 }
867 
reset_finish(struct intel_gt * gt,intel_engine_mask_t awake)868 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
869 {
870 	struct intel_engine_cs *engine;
871 	enum intel_engine_id id;
872 
873 	for_each_engine(engine, gt, id) {
874 		reset_finish_engine(engine);
875 		if (awake & engine->mask)
876 			intel_engine_pm_put(engine);
877 	}
878 
879 	intel_uc_reset_finish(&gt->uc);
880 }
881 
nop_submit_request(struct i915_request * request)882 static void nop_submit_request(struct i915_request *request)
883 {
884 	RQ_TRACE(request, "-EIO\n");
885 
886 	request = i915_request_mark_eio(request);
887 	if (request) {
888 		i915_request_submit(request);
889 		intel_engine_signal_breadcrumbs(request->engine);
890 
891 		i915_request_put(request);
892 	}
893 }
894 
__intel_gt_set_wedged(struct intel_gt * gt)895 static void __intel_gt_set_wedged(struct intel_gt *gt)
896 {
897 	struct intel_engine_cs *engine;
898 	intel_engine_mask_t awake;
899 	enum intel_engine_id id;
900 
901 	if (test_bit(I915_WEDGED, &gt->reset.flags))
902 		return;
903 
904 	GT_TRACE(gt, "start\n");
905 
906 	/*
907 	 * First, stop submission to hw, but do not yet complete requests by
908 	 * rolling the global seqno forward (since this would complete requests
909 	 * for which we haven't set the fence error to EIO yet).
910 	 */
911 	awake = reset_prepare(gt);
912 
913 	/* Even if the GPU reset fails, it should still stop the engines */
914 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
915 		__intel_gt_reset(gt, ALL_ENGINES);
916 
917 	for_each_engine(engine, gt, id)
918 		engine->submit_request = nop_submit_request;
919 
920 	/*
921 	 * Make sure no request can slip through without getting completed by
922 	 * either this call here to intel_engine_write_global_seqno, or the one
923 	 * in nop_submit_request.
924 	 */
925 	synchronize_rcu_expedited();
926 	set_bit(I915_WEDGED, &gt->reset.flags);
927 
928 	/* Mark all executing requests as skipped */
929 	local_bh_disable();
930 	for_each_engine(engine, gt, id)
931 		if (engine->reset.cancel)
932 			engine->reset.cancel(engine);
933 	intel_uc_cancel_requests(&gt->uc);
934 	local_bh_enable();
935 
936 	reset_finish(gt, awake);
937 
938 	GT_TRACE(gt, "end\n");
939 }
940 
intel_gt_set_wedged(struct intel_gt * gt)941 void intel_gt_set_wedged(struct intel_gt *gt)
942 {
943 	intel_wakeref_t wakeref;
944 
945 	if (test_bit(I915_WEDGED, &gt->reset.flags))
946 		return;
947 
948 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
949 	mutex_lock(&gt->reset.mutex);
950 
951 	if (GEM_SHOW_DEBUG()) {
952 		struct drm_printer p = drm_debug_printer(__func__);
953 		struct intel_engine_cs *engine;
954 		enum intel_engine_id id;
955 
956 		drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
957 		for_each_engine(engine, gt, id) {
958 			if (intel_engine_is_idle(engine))
959 				continue;
960 
961 			intel_engine_dump(engine, &p, "%s\n", engine->name);
962 		}
963 	}
964 
965 	__intel_gt_set_wedged(gt);
966 
967 	mutex_unlock(&gt->reset.mutex);
968 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
969 }
970 
__intel_gt_unset_wedged(struct intel_gt * gt)971 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
972 {
973 	struct intel_gt_timelines *timelines = &gt->timelines;
974 	struct intel_timeline *tl;
975 	bool ok;
976 
977 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
978 		return true;
979 
980 	/* Never fully initialised, recovery impossible */
981 	if (intel_gt_has_unrecoverable_error(gt))
982 		return false;
983 
984 	GT_TRACE(gt, "start\n");
985 
986 	/*
987 	 * Before unwedging, make sure that all pending operations
988 	 * are flushed and errored out - we may have requests waiting upon
989 	 * third party fences. We marked all inflight requests as EIO, and
990 	 * every execbuf since returned EIO, for consistency we want all
991 	 * the currently pending requests to also be marked as EIO, which
992 	 * is done inside our nop_submit_request - and so we must wait.
993 	 *
994 	 * No more can be submitted until we reset the wedged bit.
995 	 */
996 	spin_lock(&timelines->lock);
997 	list_for_each_entry(tl, &timelines->active_list, link) {
998 		struct dma_fence *fence;
999 
1000 		fence = i915_active_fence_get(&tl->last_request);
1001 		if (!fence)
1002 			continue;
1003 
1004 		spin_unlock(&timelines->lock);
1005 
1006 		/*
1007 		 * All internal dependencies (i915_requests) will have
1008 		 * been flushed by the set-wedge, but we may be stuck waiting
1009 		 * for external fences. These should all be capped to 10s
1010 		 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1011 		 * in the worst case.
1012 		 */
1013 		dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1014 		dma_fence_put(fence);
1015 
1016 		/* Restart iteration after droping lock */
1017 		spin_lock(&timelines->lock);
1018 		tl = list_entry(&timelines->active_list, typeof(*tl), link);
1019 	}
1020 	spin_unlock(&timelines->lock);
1021 
1022 	/* We must reset pending GPU events before restoring our submission */
1023 	ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1024 	if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1025 		ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
1026 	if (!ok) {
1027 		/*
1028 		 * Warn CI about the unrecoverable wedged condition.
1029 		 * Time for a reboot.
1030 		 */
1031 		add_taint_for_CI(gt->i915, TAINT_WARN);
1032 		return false;
1033 	}
1034 
1035 	/*
1036 	 * Undo nop_submit_request. We prevent all new i915 requests from
1037 	 * being queued (by disallowing execbuf whilst wedged) so having
1038 	 * waited for all active requests above, we know the system is idle
1039 	 * and do not have to worry about a thread being inside
1040 	 * engine->submit_request() as we swap over. So unlike installing
1041 	 * the nop_submit_request on reset, we can do this from normal
1042 	 * context and do not require stop_machine().
1043 	 */
1044 	intel_engines_reset_default_submission(gt);
1045 
1046 	GT_TRACE(gt, "end\n");
1047 
1048 	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1049 	clear_bit(I915_WEDGED, &gt->reset.flags);
1050 
1051 	return true;
1052 }
1053 
intel_gt_unset_wedged(struct intel_gt * gt)1054 bool intel_gt_unset_wedged(struct intel_gt *gt)
1055 {
1056 	bool result;
1057 
1058 	mutex_lock(&gt->reset.mutex);
1059 	result = __intel_gt_unset_wedged(gt);
1060 	mutex_unlock(&gt->reset.mutex);
1061 
1062 	return result;
1063 }
1064 
do_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)1065 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1066 {
1067 	int err, i;
1068 
1069 	err = __intel_gt_reset(gt, ALL_ENGINES);
1070 	for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1071 		msleep(10 * (i + 1));
1072 		err = __intel_gt_reset(gt, ALL_ENGINES);
1073 	}
1074 	if (err)
1075 		return err;
1076 
1077 	return gt_reset(gt, stalled_mask);
1078 }
1079 
resume(struct intel_gt * gt)1080 static int resume(struct intel_gt *gt)
1081 {
1082 	struct intel_engine_cs *engine;
1083 	enum intel_engine_id id;
1084 	int ret;
1085 
1086 	for_each_engine(engine, gt, id) {
1087 		ret = intel_engine_resume(engine);
1088 		if (ret)
1089 			return ret;
1090 	}
1091 
1092 	return 0;
1093 }
1094 
1095 /**
1096  * intel_gt_reset - reset chip after a hang
1097  * @gt: #intel_gt to reset
1098  * @stalled_mask: mask of the stalled engines with the guilty requests
1099  * @reason: user error message for why we are resetting
1100  *
1101  * Reset the chip.  Useful if a hang is detected. Marks the device as wedged
1102  * on failure.
1103  *
1104  * Procedure is fairly simple:
1105  *   - reset the chip using the reset reg
1106  *   - re-init context state
1107  *   - re-init hardware status page
1108  *   - re-init ring buffer
1109  *   - re-init interrupt state
1110  *   - re-init display
1111  */
intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask,const char * reason)1112 void intel_gt_reset(struct intel_gt *gt,
1113 		    intel_engine_mask_t stalled_mask,
1114 		    const char *reason)
1115 {
1116 	intel_engine_mask_t awake;
1117 	int ret;
1118 
1119 	GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1120 
1121 	might_sleep();
1122 	GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1123 
1124 	/*
1125 	 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1126 	 * critical section like gpu reset.
1127 	 */
1128 	gt_revoke(gt);
1129 
1130 	mutex_lock(&gt->reset.mutex);
1131 
1132 	/* Clear any previous failed attempts at recovery. Time to try again. */
1133 	if (!__intel_gt_unset_wedged(gt))
1134 		goto unlock;
1135 
1136 	if (reason)
1137 		drm_notice(&gt->i915->drm,
1138 			   "Resetting chip for %s\n", reason);
1139 	atomic_inc(&gt->i915->gpu_error.reset_count);
1140 
1141 	awake = reset_prepare(gt);
1142 
1143 	if (!intel_has_gpu_reset(gt)) {
1144 		if (gt->i915->params.reset)
1145 			drm_err(&gt->i915->drm, "GPU reset not supported\n");
1146 		else
1147 			drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
1148 		goto error;
1149 	}
1150 
1151 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1152 		intel_runtime_pm_disable_interrupts(gt->i915);
1153 
1154 	if (do_reset(gt, stalled_mask)) {
1155 		drm_err(&gt->i915->drm, "Failed to reset chip\n");
1156 		goto taint;
1157 	}
1158 
1159 	if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1160 		intel_runtime_pm_enable_interrupts(gt->i915);
1161 
1162 	intel_overlay_reset(gt->i915);
1163 
1164 	/*
1165 	 * Next we need to restore the context, but we don't use those
1166 	 * yet either...
1167 	 *
1168 	 * Ring buffer needs to be re-initialized in the KMS case, or if X
1169 	 * was running at the time of the reset (i.e. we weren't VT
1170 	 * switched away).
1171 	 */
1172 	ret = intel_gt_init_hw(gt);
1173 	if (ret) {
1174 		drm_err(&gt->i915->drm,
1175 			"Failed to initialise HW following reset (%d)\n",
1176 			ret);
1177 		goto taint;
1178 	}
1179 
1180 	ret = resume(gt);
1181 	if (ret)
1182 		goto taint;
1183 
1184 finish:
1185 	reset_finish(gt, awake);
1186 unlock:
1187 	mutex_unlock(&gt->reset.mutex);
1188 	return;
1189 
1190 taint:
1191 	/*
1192 	 * History tells us that if we cannot reset the GPU now, we
1193 	 * never will. This then impacts everything that is run
1194 	 * subsequently. On failing the reset, we mark the driver
1195 	 * as wedged, preventing further execution on the GPU.
1196 	 * We also want to go one step further and add a taint to the
1197 	 * kernel so that any subsequent faults can be traced back to
1198 	 * this failure. This is important for CI, where if the
1199 	 * GPU/driver fails we would like to reboot and restart testing
1200 	 * rather than continue on into oblivion. For everyone else,
1201 	 * the system should still plod along, but they have been warned!
1202 	 */
1203 	add_taint_for_CI(gt->i915, TAINT_WARN);
1204 error:
1205 	__intel_gt_set_wedged(gt);
1206 	goto finish;
1207 }
1208 
intel_gt_reset_engine(struct intel_engine_cs * engine)1209 static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1210 {
1211 	return __intel_gt_reset(engine->gt, engine->mask);
1212 }
1213 
__intel_engine_reset_bh(struct intel_engine_cs * engine,const char * msg)1214 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1215 {
1216 	struct intel_gt *gt = engine->gt;
1217 	int ret;
1218 
1219 	ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1220 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
1221 
1222 	if (intel_engine_uses_guc(engine))
1223 		return -ENODEV;
1224 
1225 	if (!intel_engine_pm_get_if_awake(engine))
1226 		return 0;
1227 
1228 	reset_prepare_engine(engine);
1229 
1230 	if (msg)
1231 		drm_notice(&engine->i915->drm,
1232 			   "Resetting %s for %s\n", engine->name, msg);
1233 	atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
1234 
1235 	ret = intel_gt_reset_engine(engine);
1236 	if (ret) {
1237 		/* If we fail here, we expect to fallback to a global reset */
1238 		ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1239 		goto out;
1240 	}
1241 
1242 	/*
1243 	 * The request that caused the hang is stuck on elsp, we know the
1244 	 * active request and can drop it, adjust head to skip the offending
1245 	 * request to resume executing remaining requests in the queue.
1246 	 */
1247 	__intel_engine_reset(engine, true);
1248 
1249 	/*
1250 	 * The engine and its registers (and workarounds in case of render)
1251 	 * have been reset to their default values. Follow the init_ring
1252 	 * process to program RING_MODE, HWSP and re-enable submission.
1253 	 */
1254 	ret = intel_engine_resume(engine);
1255 
1256 out:
1257 	intel_engine_cancel_stop_cs(engine);
1258 	reset_finish_engine(engine);
1259 	intel_engine_pm_put_async(engine);
1260 	return ret;
1261 }
1262 
1263 /**
1264  * intel_engine_reset - reset GPU engine to recover from a hang
1265  * @engine: engine to reset
1266  * @msg: reason for GPU reset; or NULL for no drm_notice()
1267  *
1268  * Reset a specific GPU engine. Useful if a hang is detected.
1269  * Returns zero on successful reset or otherwise an error code.
1270  *
1271  * Procedure is:
1272  *  - identifies the request that caused the hang and it is dropped
1273  *  - reset engine (which will force the engine to idle)
1274  *  - re-init/configure engine
1275  */
intel_engine_reset(struct intel_engine_cs * engine,const char * msg)1276 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1277 {
1278 	int err;
1279 
1280 	local_bh_disable();
1281 	err = __intel_engine_reset_bh(engine, msg);
1282 	local_bh_enable();
1283 
1284 	return err;
1285 }
1286 
intel_gt_reset_global(struct intel_gt * gt,u32 engine_mask,const char * reason)1287 static void intel_gt_reset_global(struct intel_gt *gt,
1288 				  u32 engine_mask,
1289 				  const char *reason)
1290 {
1291 	struct kobject *kobj = &gt->i915->drm.primary->kdev->kobj;
1292 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1293 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1294 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1295 	struct intel_wedge_me w;
1296 
1297 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1298 
1299 	GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1300 	kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1301 
1302 	/* Use a watchdog to ensure that our reset completes */
1303 	intel_wedge_on_timeout(&w, gt, 5 * HZ) {
1304 		intel_display_prepare_reset(gt->i915);
1305 
1306 		intel_gt_reset(gt, engine_mask, reason);
1307 
1308 		intel_display_finish_reset(gt->i915);
1309 	}
1310 
1311 	if (!test_bit(I915_WEDGED, &gt->reset.flags))
1312 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1313 }
1314 
1315 /**
1316  * intel_gt_handle_error - handle a gpu error
1317  * @gt: the intel_gt
1318  * @engine_mask: mask representing engines that are hung
1319  * @flags: control flags
1320  * @fmt: Error message format string
1321  *
1322  * Do some basic checking of register state at error time and
1323  * dump it to the syslog.  Also call i915_capture_error_state() to make
1324  * sure we get a record and make it available in debugfs.  Fire a uevent
1325  * so userspace knows something bad happened (should trigger collection
1326  * of a ring dump etc.).
1327  */
intel_gt_handle_error(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned long flags,const char * fmt,...)1328 void intel_gt_handle_error(struct intel_gt *gt,
1329 			   intel_engine_mask_t engine_mask,
1330 			   unsigned long flags,
1331 			   const char *fmt, ...)
1332 {
1333 	struct intel_engine_cs *engine;
1334 	intel_wakeref_t wakeref;
1335 	intel_engine_mask_t tmp;
1336 	char error_msg[80];
1337 	char *msg = NULL;
1338 
1339 	if (fmt) {
1340 		va_list args;
1341 
1342 		va_start(args, fmt);
1343 		vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1344 		va_end(args);
1345 
1346 		msg = error_msg;
1347 	}
1348 
1349 	/*
1350 	 * In most cases it's guaranteed that we get here with an RPM
1351 	 * reference held, for example because there is a pending GPU
1352 	 * request that won't finish until the reset is done. This
1353 	 * isn't the case at least when we get here by doing a
1354 	 * simulated reset via debugfs, so get an RPM reference.
1355 	 */
1356 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1357 
1358 	engine_mask &= gt->info.engine_mask;
1359 
1360 	if (flags & I915_ERROR_CAPTURE) {
1361 		i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1362 		intel_gt_clear_error_registers(gt, engine_mask);
1363 	}
1364 
1365 	/*
1366 	 * Try engine reset when available. We fall back to full reset if
1367 	 * single reset fails.
1368 	 */
1369 	if (!intel_uc_uses_guc_submission(&gt->uc) &&
1370 	    intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1371 		local_bh_disable();
1372 		for_each_engine_masked(engine, gt, engine_mask, tmp) {
1373 			BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1374 			if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1375 					     &gt->reset.flags))
1376 				continue;
1377 
1378 			if (__intel_engine_reset_bh(engine, msg) == 0)
1379 				engine_mask &= ~engine->mask;
1380 
1381 			clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1382 					      &gt->reset.flags);
1383 		}
1384 		local_bh_enable();
1385 	}
1386 
1387 	if (!engine_mask)
1388 		goto out;
1389 
1390 	/* Full reset needs the mutex, stop any other user trying to do so. */
1391 	if (test_and_set_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1392 		wait_event(gt->reset.queue,
1393 			   !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
1394 		goto out; /* piggy-back on the other reset */
1395 	}
1396 
1397 	/* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1398 	synchronize_rcu_expedited();
1399 
1400 	/*
1401 	 * Prevent any other reset-engine attempt. We don't do this for GuC
1402 	 * submission the GuC owns the per-engine reset, not the i915.
1403 	 */
1404 	if (!intel_uc_uses_guc_submission(&gt->uc)) {
1405 		for_each_engine(engine, gt, tmp) {
1406 			while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1407 						&gt->reset.flags))
1408 				wait_on_bit(&gt->reset.flags,
1409 					    I915_RESET_ENGINE + engine->id,
1410 					    TASK_UNINTERRUPTIBLE);
1411 		}
1412 	}
1413 
1414 	/* Flush everyone using a resource about to be clobbered */
1415 	synchronize_srcu_expedited(&gt->reset.backoff_srcu);
1416 
1417 	intel_gt_reset_global(gt, engine_mask, msg);
1418 
1419 	if (!intel_uc_uses_guc_submission(&gt->uc)) {
1420 		for_each_engine(engine, gt, tmp)
1421 			clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1422 					 &gt->reset.flags);
1423 	}
1424 	clear_bit_unlock(I915_RESET_BACKOFF, &gt->reset.flags);
1425 	smp_mb__after_atomic();
1426 	wake_up_all(&gt->reset.queue);
1427 
1428 out:
1429 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1430 }
1431 
intel_gt_reset_trylock(struct intel_gt * gt,int * srcu)1432 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1433 {
1434 	might_lock(&gt->reset.backoff_srcu);
1435 	might_sleep();
1436 
1437 	rcu_read_lock();
1438 	while (test_bit(I915_RESET_BACKOFF, &gt->reset.flags)) {
1439 		rcu_read_unlock();
1440 
1441 		if (wait_event_interruptible(gt->reset.queue,
1442 					     !test_bit(I915_RESET_BACKOFF,
1443 						       &gt->reset.flags)))
1444 			return -EINTR;
1445 
1446 		rcu_read_lock();
1447 	}
1448 	*srcu = srcu_read_lock(&gt->reset.backoff_srcu);
1449 	rcu_read_unlock();
1450 
1451 	return 0;
1452 }
1453 
intel_gt_reset_unlock(struct intel_gt * gt,int tag)1454 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1455 __releases(&gt->reset.backoff_srcu)
1456 {
1457 	srcu_read_unlock(&gt->reset.backoff_srcu, tag);
1458 }
1459 
intel_gt_terminally_wedged(struct intel_gt * gt)1460 int intel_gt_terminally_wedged(struct intel_gt *gt)
1461 {
1462 	might_sleep();
1463 
1464 	if (!intel_gt_is_wedged(gt))
1465 		return 0;
1466 
1467 	if (intel_gt_has_unrecoverable_error(gt))
1468 		return -EIO;
1469 
1470 	/* Reset still in progress? Maybe we will recover? */
1471 	if (wait_event_interruptible(gt->reset.queue,
1472 				     !test_bit(I915_RESET_BACKOFF,
1473 					       &gt->reset.flags)))
1474 		return -EINTR;
1475 
1476 	return intel_gt_is_wedged(gt) ? -EIO : 0;
1477 }
1478 
intel_gt_set_wedged_on_init(struct intel_gt * gt)1479 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1480 {
1481 	BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1482 		     I915_WEDGED_ON_INIT);
1483 	intel_gt_set_wedged(gt);
1484 	i915_disable_error_state(gt->i915, -ENODEV);
1485 	set_bit(I915_WEDGED_ON_INIT, &gt->reset.flags);
1486 
1487 	/* Wedged on init is non-recoverable */
1488 	add_taint_for_CI(gt->i915, TAINT_WARN);
1489 }
1490 
intel_gt_set_wedged_on_fini(struct intel_gt * gt)1491 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1492 {
1493 	intel_gt_set_wedged(gt);
1494 	i915_disable_error_state(gt->i915, -ENODEV);
1495 	set_bit(I915_WEDGED_ON_FINI, &gt->reset.flags);
1496 	intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1497 }
1498 
intel_gt_init_reset(struct intel_gt * gt)1499 void intel_gt_init_reset(struct intel_gt *gt)
1500 {
1501 	init_waitqueue_head(&gt->reset.queue);
1502 	mutex_init(&gt->reset.mutex);
1503 	init_srcu_struct(&gt->reset.backoff_srcu);
1504 
1505 	/*
1506 	 * While undesirable to wait inside the shrinker, complain anyway.
1507 	 *
1508 	 * If we have to wait during shrinking, we guarantee forward progress
1509 	 * by forcing the reset. Therefore during the reset we must not
1510 	 * re-enter the shrinker. By declaring that we take the reset mutex
1511 	 * within the shrinker, we forbid ourselves from performing any
1512 	 * fs-reclaim or taking related locks during reset.
1513 	 */
1514 	i915_gem_shrinker_taints_mutex(gt->i915, &gt->reset.mutex);
1515 
1516 	/* no GPU until we are ready! */
1517 	__set_bit(I915_WEDGED, &gt->reset.flags);
1518 }
1519 
intel_gt_fini_reset(struct intel_gt * gt)1520 void intel_gt_fini_reset(struct intel_gt *gt)
1521 {
1522 	cleanup_srcu_struct(&gt->reset.backoff_srcu);
1523 }
1524 
intel_wedge_me(struct work_struct * work)1525 static void intel_wedge_me(struct work_struct *work)
1526 {
1527 	struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1528 
1529 	drm_err(&w->gt->i915->drm,
1530 		"%s timed out, cancelling all in-flight rendering.\n",
1531 		w->name);
1532 	intel_gt_set_wedged(w->gt);
1533 }
1534 
__intel_init_wedge(struct intel_wedge_me * w,struct intel_gt * gt,long timeout,const char * name)1535 void __intel_init_wedge(struct intel_wedge_me *w,
1536 			struct intel_gt *gt,
1537 			long timeout,
1538 			const char *name)
1539 {
1540 	w->gt = gt;
1541 	w->name = name;
1542 
1543 	INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1544 	schedule_delayed_work(&w->work, timeout);
1545 }
1546 
__intel_fini_wedge(struct intel_wedge_me * w)1547 void __intel_fini_wedge(struct intel_wedge_me *w)
1548 {
1549 	cancel_delayed_work_sync(&w->work);
1550 	destroy_delayed_work_on_stack(&w->work);
1551 	w->gt = NULL;
1552 }
1553 
1554 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1555 #include "selftest_reset.c"
1556 #include "selftest_hangcheck.c"
1557 #endif
1558