1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2008-2018 Intel Corporation
4 */
5
6 #include <linux/sched/mm.h>
7 #include <linux/stop_machine.h>
8 #include <linux/string_helpers.h>
9
10 #include "display/intel_display_reset.h"
11 #include "display/intel_overlay.h"
12
13 #include "gem/i915_gem_context.h"
14
15 #include "gt/intel_gt_regs.h"
16
17 #include "gt/uc/intel_gsc_fw.h"
18
19 #include "i915_drv.h"
20 #include "i915_file_private.h"
21 #include "i915_gpu_error.h"
22 #include "i915_irq.h"
23 #include "i915_reg.h"
24 #include "intel_breadcrumbs.h"
25 #include "intel_engine_pm.h"
26 #include "intel_engine_regs.h"
27 #include "intel_gt.h"
28 #include "intel_gt_pm.h"
29 #include "intel_gt_requests.h"
30 #include "intel_mchbar_regs.h"
31 #include "intel_pci_config.h"
32 #include "intel_reset.h"
33
34 #include "uc/intel_guc.h"
35
36 #define RESET_MAX_RETRIES 3
37
client_mark_guilty(struct i915_gem_context * ctx,bool banned)38 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
39 {
40 struct drm_i915_file_private *file_priv = ctx->file_priv;
41 unsigned long prev_hang;
42 unsigned int score;
43
44 if (IS_ERR_OR_NULL(file_priv))
45 return;
46
47 score = 0;
48 if (banned)
49 score = I915_CLIENT_SCORE_CONTEXT_BAN;
50
51 prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
52 if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
53 score += I915_CLIENT_SCORE_HANG_FAST;
54
55 if (score) {
56 atomic_add(score, &file_priv->ban_score);
57
58 drm_dbg(&ctx->i915->drm,
59 "client %s: gained %u ban score, now %u\n",
60 ctx->name, score,
61 atomic_read(&file_priv->ban_score));
62 }
63 }
64
mark_guilty(struct i915_request * rq)65 static bool mark_guilty(struct i915_request *rq)
66 {
67 struct i915_gem_context *ctx;
68 unsigned long prev_hang;
69 bool banned;
70 int i;
71
72 if (intel_context_is_closed(rq->context))
73 return true;
74
75 rcu_read_lock();
76 ctx = rcu_dereference(rq->context->gem_context);
77 if (ctx && !kref_get_unless_zero(&ctx->ref))
78 ctx = NULL;
79 rcu_read_unlock();
80 if (!ctx)
81 return intel_context_is_banned(rq->context);
82
83 atomic_inc(&ctx->guilty_count);
84
85 /* Cool contexts are too cool to be banned! (Used for reset testing.) */
86 if (!i915_gem_context_is_bannable(ctx)) {
87 banned = false;
88 goto out;
89 }
90
91 drm_notice(&ctx->i915->drm,
92 "%s context reset due to GPU hang\n",
93 ctx->name);
94
95 /* Record the timestamp for the last N hangs */
96 prev_hang = ctx->hang_timestamp[0];
97 for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
98 ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
99 ctx->hang_timestamp[i] = jiffies;
100
101 /* If we have hung N+1 times in rapid succession, we ban the context! */
102 banned = !i915_gem_context_is_recoverable(ctx);
103 if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
104 banned = true;
105 if (banned)
106 drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
107 ctx->name, atomic_read(&ctx->guilty_count));
108
109 client_mark_guilty(ctx, banned);
110
111 out:
112 i915_gem_context_put(ctx);
113 return banned;
114 }
115
mark_innocent(struct i915_request * rq)116 static void mark_innocent(struct i915_request *rq)
117 {
118 struct i915_gem_context *ctx;
119
120 rcu_read_lock();
121 ctx = rcu_dereference(rq->context->gem_context);
122 if (ctx)
123 atomic_inc(&ctx->active_count);
124 rcu_read_unlock();
125 }
126
__i915_request_reset(struct i915_request * rq,bool guilty)127 void __i915_request_reset(struct i915_request *rq, bool guilty)
128 {
129 bool banned = false;
130
131 RQ_TRACE(rq, "guilty? %s\n", str_yes_no(guilty));
132 GEM_BUG_ON(__i915_request_is_complete(rq));
133
134 rcu_read_lock(); /* protect the GEM context */
135 if (guilty) {
136 i915_request_set_error_once(rq, -EIO);
137 __i915_request_skip(rq);
138 banned = mark_guilty(rq);
139 } else {
140 i915_request_set_error_once(rq, -EAGAIN);
141 mark_innocent(rq);
142 }
143 rcu_read_unlock();
144
145 if (banned)
146 intel_context_ban(rq->context, rq);
147 }
148
i915_in_reset(struct pci_dev * pdev)149 static bool i915_in_reset(struct pci_dev *pdev)
150 {
151 u8 gdrst;
152
153 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
154 return gdrst & GRDOM_RESET_STATUS;
155 }
156
i915_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)157 static int i915_do_reset(struct intel_gt *gt,
158 intel_engine_mask_t engine_mask,
159 unsigned int retry)
160 {
161 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
162 int err;
163
164 /* Assert reset for at least 20 usec, and wait for acknowledgement. */
165 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
166 udelay(50);
167 err = wait_for_atomic(i915_in_reset(pdev), 50);
168
169 /* Clear the reset request. */
170 pci_write_config_byte(pdev, I915_GDRST, 0);
171 udelay(50);
172 if (!err)
173 err = wait_for_atomic(!i915_in_reset(pdev), 50);
174
175 return err;
176 }
177
g4x_reset_complete(struct pci_dev * pdev)178 static bool g4x_reset_complete(struct pci_dev *pdev)
179 {
180 u8 gdrst;
181
182 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
183 return (gdrst & GRDOM_RESET_ENABLE) == 0;
184 }
185
g33_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)186 static int g33_do_reset(struct intel_gt *gt,
187 intel_engine_mask_t engine_mask,
188 unsigned int retry)
189 {
190 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
191
192 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
193 return wait_for_atomic(g4x_reset_complete(pdev), 50);
194 }
195
g4x_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)196 static int g4x_do_reset(struct intel_gt *gt,
197 intel_engine_mask_t engine_mask,
198 unsigned int retry)
199 {
200 struct pci_dev *pdev = to_pci_dev(gt->i915->drm.dev);
201 struct intel_uncore *uncore = gt->uncore;
202 int ret;
203
204 /* WaVcpClkGateDisableForMediaReset:ctg,elk */
205 intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, 0, VCP_UNIT_CLOCK_GATE_DISABLE);
206 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
207
208 pci_write_config_byte(pdev, I915_GDRST,
209 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
210 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
211 if (ret) {
212 GT_TRACE(gt, "Wait for media reset failed\n");
213 goto out;
214 }
215
216 pci_write_config_byte(pdev, I915_GDRST,
217 GRDOM_RENDER | GRDOM_RESET_ENABLE);
218 ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
219 if (ret) {
220 GT_TRACE(gt, "Wait for render reset failed\n");
221 goto out;
222 }
223
224 out:
225 pci_write_config_byte(pdev, I915_GDRST, 0);
226
227 intel_uncore_rmw_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE, 0);
228 intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
229
230 return ret;
231 }
232
ilk_do_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)233 static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
234 unsigned int retry)
235 {
236 struct intel_uncore *uncore = gt->uncore;
237 int ret;
238
239 intel_uncore_write_fw(uncore, ILK_GDSR,
240 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
241 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
242 ILK_GRDOM_RESET_ENABLE, 0,
243 5000, 0,
244 NULL);
245 if (ret) {
246 GT_TRACE(gt, "Wait for render reset failed\n");
247 goto out;
248 }
249
250 intel_uncore_write_fw(uncore, ILK_GDSR,
251 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
252 ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
253 ILK_GRDOM_RESET_ENABLE, 0,
254 5000, 0,
255 NULL);
256 if (ret) {
257 GT_TRACE(gt, "Wait for media reset failed\n");
258 goto out;
259 }
260
261 out:
262 intel_uncore_write_fw(uncore, ILK_GDSR, 0);
263 intel_uncore_posting_read_fw(uncore, ILK_GDSR);
264 return ret;
265 }
266
267 /* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
gen6_hw_domain_reset(struct intel_gt * gt,u32 hw_domain_mask)268 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
269 {
270 struct intel_uncore *uncore = gt->uncore;
271 int loops;
272 int err;
273
274 /*
275 * On some platforms, e.g. Jasperlake, we see that the engine register
276 * state is not cleared until shortly after GDRST reports completion,
277 * causing a failure as we try to immediately resume while the internal
278 * state is still in flux. If we immediately repeat the reset, the
279 * second reset appears to serialise with the first, and since it is a
280 * no-op, the registers should retain their reset value. However, there
281 * is still a concern that upon leaving the second reset, the internal
282 * engine state is still in flux and not ready for resuming.
283 *
284 * Starting on MTL, there are some prep steps that we need to do when
285 * resetting some engines that need to be applied every time we write to
286 * GEN6_GDRST. As those are time consuming (tens of ms), we don't want
287 * to perform that twice, so, since the Jasperlake issue hasn't been
288 * observed on MTL, we avoid repeating the reset on newer platforms.
289 */
290 loops = GRAPHICS_VER_FULL(gt->i915) < IP_VER(12, 70) ? 2 : 1;
291
292 /*
293 * GEN6_GDRST is not in the gt power well, no need to check
294 * for fifo space for the write or forcewake the chip for
295 * the read
296 */
297 do {
298 intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
299
300 /* Wait for the device to ack the reset requests. */
301 err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
302 hw_domain_mask, 0,
303 2000, 0,
304 NULL);
305 } while (err == 0 && --loops);
306 if (err)
307 GT_TRACE(gt,
308 "Wait for 0x%08x engines reset failed\n",
309 hw_domain_mask);
310
311 /*
312 * As we have observed that the engine state is still volatile
313 * after GDRST is acked, impose a small delay to let everything settle.
314 */
315 udelay(50);
316
317 return err;
318 }
319
__gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)320 static int __gen6_reset_engines(struct intel_gt *gt,
321 intel_engine_mask_t engine_mask,
322 unsigned int retry)
323 {
324 struct intel_engine_cs *engine;
325 u32 hw_mask;
326
327 if (engine_mask == ALL_ENGINES) {
328 hw_mask = GEN6_GRDOM_FULL;
329 } else {
330 intel_engine_mask_t tmp;
331
332 hw_mask = 0;
333 for_each_engine_masked(engine, gt, engine_mask, tmp) {
334 hw_mask |= engine->reset_domain;
335 }
336 }
337
338 return gen6_hw_domain_reset(gt, hw_mask);
339 }
340
gen6_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)341 static int gen6_reset_engines(struct intel_gt *gt,
342 intel_engine_mask_t engine_mask,
343 unsigned int retry)
344 {
345 unsigned long flags;
346 int ret;
347
348 spin_lock_irqsave(>->uncore->lock, flags);
349 ret = __gen6_reset_engines(gt, engine_mask, retry);
350 spin_unlock_irqrestore(>->uncore->lock, flags);
351
352 return ret;
353 }
354
find_sfc_paired_vecs_engine(struct intel_engine_cs * engine)355 static struct intel_engine_cs *find_sfc_paired_vecs_engine(struct intel_engine_cs *engine)
356 {
357 int vecs_id;
358
359 GEM_BUG_ON(engine->class != VIDEO_DECODE_CLASS);
360
361 vecs_id = _VECS((engine->instance) / 2);
362
363 return engine->gt->engine[vecs_id];
364 }
365
366 struct sfc_lock_data {
367 i915_reg_t lock_reg;
368 i915_reg_t ack_reg;
369 i915_reg_t usage_reg;
370 u32 lock_bit;
371 u32 ack_bit;
372 u32 usage_bit;
373 u32 reset_bit;
374 };
375
get_sfc_forced_lock_data(struct intel_engine_cs * engine,struct sfc_lock_data * sfc_lock)376 static void get_sfc_forced_lock_data(struct intel_engine_cs *engine,
377 struct sfc_lock_data *sfc_lock)
378 {
379 switch (engine->class) {
380 default:
381 MISSING_CASE(engine->class);
382 fallthrough;
383 case VIDEO_DECODE_CLASS:
384 sfc_lock->lock_reg = GEN11_VCS_SFC_FORCED_LOCK(engine->mmio_base);
385 sfc_lock->lock_bit = GEN11_VCS_SFC_FORCED_LOCK_BIT;
386
387 sfc_lock->ack_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
388 sfc_lock->ack_bit = GEN11_VCS_SFC_LOCK_ACK_BIT;
389
390 sfc_lock->usage_reg = GEN11_VCS_SFC_LOCK_STATUS(engine->mmio_base);
391 sfc_lock->usage_bit = GEN11_VCS_SFC_USAGE_BIT;
392 sfc_lock->reset_bit = GEN11_VCS_SFC_RESET_BIT(engine->instance);
393
394 break;
395 case VIDEO_ENHANCEMENT_CLASS:
396 sfc_lock->lock_reg = GEN11_VECS_SFC_FORCED_LOCK(engine->mmio_base);
397 sfc_lock->lock_bit = GEN11_VECS_SFC_FORCED_LOCK_BIT;
398
399 sfc_lock->ack_reg = GEN11_VECS_SFC_LOCK_ACK(engine->mmio_base);
400 sfc_lock->ack_bit = GEN11_VECS_SFC_LOCK_ACK_BIT;
401
402 sfc_lock->usage_reg = GEN11_VECS_SFC_USAGE(engine->mmio_base);
403 sfc_lock->usage_bit = GEN11_VECS_SFC_USAGE_BIT;
404 sfc_lock->reset_bit = GEN11_VECS_SFC_RESET_BIT(engine->instance);
405
406 break;
407 }
408 }
409
gen11_lock_sfc(struct intel_engine_cs * engine,u32 * reset_mask,u32 * unlock_mask)410 static int gen11_lock_sfc(struct intel_engine_cs *engine,
411 u32 *reset_mask,
412 u32 *unlock_mask)
413 {
414 struct intel_uncore *uncore = engine->uncore;
415 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
416 struct sfc_lock_data sfc_lock;
417 bool lock_obtained, lock_to_other = false;
418 int ret;
419
420 switch (engine->class) {
421 case VIDEO_DECODE_CLASS:
422 if ((BIT(engine->instance) & vdbox_sfc_access) == 0)
423 return 0;
424
425 fallthrough;
426 case VIDEO_ENHANCEMENT_CLASS:
427 get_sfc_forced_lock_data(engine, &sfc_lock);
428
429 break;
430 default:
431 return 0;
432 }
433
434 if (!(intel_uncore_read_fw(uncore, sfc_lock.usage_reg) & sfc_lock.usage_bit)) {
435 struct intel_engine_cs *paired_vecs;
436
437 if (engine->class != VIDEO_DECODE_CLASS ||
438 GRAPHICS_VER(engine->i915) != 12)
439 return 0;
440
441 /*
442 * Wa_14010733141
443 *
444 * If the VCS-MFX isn't using the SFC, we also need to check
445 * whether VCS-HCP is using it. If so, we need to issue a *VE*
446 * forced lock on the VE engine that shares the same SFC.
447 */
448 if (!(intel_uncore_read_fw(uncore,
449 GEN12_HCP_SFC_LOCK_STATUS(engine->mmio_base)) &
450 GEN12_HCP_SFC_USAGE_BIT))
451 return 0;
452
453 paired_vecs = find_sfc_paired_vecs_engine(engine);
454 get_sfc_forced_lock_data(paired_vecs, &sfc_lock);
455 lock_to_other = true;
456 *unlock_mask |= paired_vecs->mask;
457 } else {
458 *unlock_mask |= engine->mask;
459 }
460
461 /*
462 * If the engine is using an SFC, tell the engine that a software reset
463 * is going to happen. The engine will then try to force lock the SFC.
464 * If SFC ends up being locked to the engine we want to reset, we have
465 * to reset it as well (we will unlock it once the reset sequence is
466 * completed).
467 */
468 intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, 0, sfc_lock.lock_bit);
469
470 ret = __intel_wait_for_register_fw(uncore,
471 sfc_lock.ack_reg,
472 sfc_lock.ack_bit,
473 sfc_lock.ack_bit,
474 1000, 0, NULL);
475
476 /*
477 * Was the SFC released while we were trying to lock it?
478 *
479 * We should reset both the engine and the SFC if:
480 * - We were locking the SFC to this engine and the lock succeeded
481 * OR
482 * - We were locking the SFC to a different engine (Wa_14010733141)
483 * but the SFC was released before the lock was obtained.
484 *
485 * Otherwise we need only reset the engine by itself and we can
486 * leave the SFC alone.
487 */
488 lock_obtained = (intel_uncore_read_fw(uncore, sfc_lock.usage_reg) &
489 sfc_lock.usage_bit) != 0;
490 if (lock_obtained == lock_to_other)
491 return 0;
492
493 if (ret) {
494 ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n");
495 return ret;
496 }
497
498 *reset_mask |= sfc_lock.reset_bit;
499 return 0;
500 }
501
gen11_unlock_sfc(struct intel_engine_cs * engine)502 static void gen11_unlock_sfc(struct intel_engine_cs *engine)
503 {
504 struct intel_uncore *uncore = engine->uncore;
505 u8 vdbox_sfc_access = engine->gt->info.vdbox_sfc_access;
506 struct sfc_lock_data sfc_lock = {};
507
508 if (engine->class != VIDEO_DECODE_CLASS &&
509 engine->class != VIDEO_ENHANCEMENT_CLASS)
510 return;
511
512 if (engine->class == VIDEO_DECODE_CLASS &&
513 (BIT(engine->instance) & vdbox_sfc_access) == 0)
514 return;
515
516 get_sfc_forced_lock_data(engine, &sfc_lock);
517
518 intel_uncore_rmw_fw(uncore, sfc_lock.lock_reg, sfc_lock.lock_bit, 0);
519 }
520
__gen11_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)521 static int __gen11_reset_engines(struct intel_gt *gt,
522 intel_engine_mask_t engine_mask,
523 unsigned int retry)
524 {
525 struct intel_engine_cs *engine;
526 intel_engine_mask_t tmp;
527 u32 reset_mask, unlock_mask = 0;
528 int ret;
529
530 if (engine_mask == ALL_ENGINES) {
531 reset_mask = GEN11_GRDOM_FULL;
532 } else {
533 reset_mask = 0;
534 for_each_engine_masked(engine, gt, engine_mask, tmp) {
535 reset_mask |= engine->reset_domain;
536 ret = gen11_lock_sfc(engine, &reset_mask, &unlock_mask);
537 if (ret)
538 goto sfc_unlock;
539 }
540 }
541
542 ret = gen6_hw_domain_reset(gt, reset_mask);
543
544 sfc_unlock:
545 /*
546 * We unlock the SFC based on the lock status and not the result of
547 * gen11_lock_sfc to make sure that we clean properly if something
548 * wrong happened during the lock (e.g. lock acquired after timeout
549 * expiration).
550 *
551 * Due to Wa_14010733141, we may have locked an SFC to an engine that
552 * wasn't being reset. So instead of calling gen11_unlock_sfc()
553 * on engine_mask, we instead call it on the mask of engines that our
554 * gen11_lock_sfc() calls told us actually had locks attempted.
555 */
556 for_each_engine_masked(engine, gt, unlock_mask, tmp)
557 gen11_unlock_sfc(engine);
558
559 return ret;
560 }
561
gen8_engine_reset_prepare(struct intel_engine_cs * engine)562 static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
563 {
564 struct intel_uncore *uncore = engine->uncore;
565 const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
566 u32 request, mask, ack;
567 int ret;
568
569 if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1)))
570 return -ETIMEDOUT;
571
572 ack = intel_uncore_read_fw(uncore, reg);
573 if (ack & RESET_CTL_CAT_ERROR) {
574 /*
575 * For catastrophic errors, ready-for-reset sequence
576 * needs to be bypassed: HAS#396813
577 */
578 request = RESET_CTL_CAT_ERROR;
579 mask = RESET_CTL_CAT_ERROR;
580
581 /* Catastrophic errors need to be cleared by HW */
582 ack = 0;
583 } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
584 request = RESET_CTL_REQUEST_RESET;
585 mask = RESET_CTL_READY_TO_RESET;
586 ack = RESET_CTL_READY_TO_RESET;
587 } else {
588 return 0;
589 }
590
591 intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
592 ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
593 700, 0, NULL);
594 if (ret)
595 drm_err(&engine->i915->drm,
596 "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
597 engine->name, request,
598 intel_uncore_read_fw(uncore, reg));
599
600 return ret;
601 }
602
gen8_engine_reset_cancel(struct intel_engine_cs * engine)603 static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
604 {
605 intel_uncore_write_fw(engine->uncore,
606 RING_RESET_CTL(engine->mmio_base),
607 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
608 }
609
gen8_reset_engines(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned int retry)610 static int gen8_reset_engines(struct intel_gt *gt,
611 intel_engine_mask_t engine_mask,
612 unsigned int retry)
613 {
614 struct intel_engine_cs *engine;
615 const bool reset_non_ready = retry >= 1;
616 intel_engine_mask_t tmp;
617 unsigned long flags;
618 int ret;
619
620 spin_lock_irqsave(>->uncore->lock, flags);
621
622 for_each_engine_masked(engine, gt, engine_mask, tmp) {
623 ret = gen8_engine_reset_prepare(engine);
624 if (ret && !reset_non_ready)
625 goto skip_reset;
626
627 /*
628 * If this is not the first failed attempt to prepare,
629 * we decide to proceed anyway.
630 *
631 * By doing so we risk context corruption and with
632 * some gens (kbl), possible system hang if reset
633 * happens during active bb execution.
634 *
635 * We rather take context corruption instead of
636 * failed reset with a wedged driver/gpu. And
637 * active bb execution case should be covered by
638 * stop_engines() we have before the reset.
639 */
640 }
641
642 /*
643 * Wa_22011100796:dg2, whenever Full soft reset is required,
644 * reset all individual engines firstly, and then do a full soft reset.
645 *
646 * This is best effort, so ignore any error from the initial reset.
647 */
648 if (IS_DG2(gt->i915) && engine_mask == ALL_ENGINES)
649 __gen11_reset_engines(gt, gt->info.engine_mask, 0);
650
651 if (GRAPHICS_VER(gt->i915) >= 11)
652 ret = __gen11_reset_engines(gt, engine_mask, retry);
653 else
654 ret = __gen6_reset_engines(gt, engine_mask, retry);
655
656 skip_reset:
657 for_each_engine_masked(engine, gt, engine_mask, tmp)
658 gen8_engine_reset_cancel(engine);
659
660 spin_unlock_irqrestore(>->uncore->lock, flags);
661
662 return ret;
663 }
664
mock_reset(struct intel_gt * gt,intel_engine_mask_t mask,unsigned int retry)665 static int mock_reset(struct intel_gt *gt,
666 intel_engine_mask_t mask,
667 unsigned int retry)
668 {
669 return 0;
670 }
671
672 typedef int (*reset_func)(struct intel_gt *,
673 intel_engine_mask_t engine_mask,
674 unsigned int retry);
675
intel_get_gpu_reset(const struct intel_gt * gt)676 static reset_func intel_get_gpu_reset(const struct intel_gt *gt)
677 {
678 struct drm_i915_private *i915 = gt->i915;
679
680 if (is_mock_gt(gt))
681 return mock_reset;
682 else if (GRAPHICS_VER(i915) >= 8)
683 return gen8_reset_engines;
684 else if (GRAPHICS_VER(i915) >= 6)
685 return gen6_reset_engines;
686 else if (GRAPHICS_VER(i915) >= 5)
687 return ilk_do_reset;
688 else if (IS_G4X(i915))
689 return g4x_do_reset;
690 else if (IS_G33(i915) || IS_PINEVIEW(i915))
691 return g33_do_reset;
692 else if (GRAPHICS_VER(i915) >= 3)
693 return i915_do_reset;
694 else
695 return NULL;
696 }
697
__reset_guc(struct intel_gt * gt)698 static int __reset_guc(struct intel_gt *gt)
699 {
700 u32 guc_domain =
701 GRAPHICS_VER(gt->i915) >= 11 ? GEN11_GRDOM_GUC : GEN9_GRDOM_GUC;
702
703 return gen6_hw_domain_reset(gt, guc_domain);
704 }
705
needs_wa_14015076503(struct intel_gt * gt,intel_engine_mask_t engine_mask)706 static bool needs_wa_14015076503(struct intel_gt *gt, intel_engine_mask_t engine_mask)
707 {
708 if (!IS_METEORLAKE(gt->i915) || !HAS_ENGINE(gt, GSC0))
709 return false;
710
711 if (!__HAS_ENGINE(engine_mask, GSC0))
712 return false;
713
714 return intel_gsc_uc_fw_init_done(>->uc.gsc);
715 }
716
717 static intel_engine_mask_t
wa_14015076503_start(struct intel_gt * gt,intel_engine_mask_t engine_mask,bool first)718 wa_14015076503_start(struct intel_gt *gt, intel_engine_mask_t engine_mask, bool first)
719 {
720 if (!needs_wa_14015076503(gt, engine_mask))
721 return engine_mask;
722
723 /*
724 * wa_14015076503: if the GSC FW is loaded, we need to alert it that
725 * we're going to do a GSC engine reset and then wait for 200ms for the
726 * FW to get ready for it. However, if this is the first ALL_ENGINES
727 * reset attempt and the GSC is not busy, we can try to instead reset
728 * the GuC and all the other engines individually to avoid the 200ms
729 * wait.
730 * Skipping the GSC engine is safe because, differently from other
731 * engines, the GSCCS only role is to forward the commands to the GSC
732 * FW, so it doesn't have any HW outside of the CS itself and therefore
733 * it has no state that we don't explicitly re-init on resume or on
734 * context switch LRC or power context). The HW for the GSC uC is
735 * managed by the GSC FW so we don't need to care about that.
736 */
737 if (engine_mask == ALL_ENGINES && first && intel_engine_is_idle(gt->engine[GSC0])) {
738 __reset_guc(gt);
739 engine_mask = gt->info.engine_mask & ~BIT(GSC0);
740 } else {
741 intel_uncore_rmw(gt->uncore,
742 HECI_H_GS1(MTL_GSC_HECI2_BASE),
743 0, HECI_H_GS1_ER_PREP);
744
745 /* make sure the reset bit is clear when writing the CSR reg */
746 intel_uncore_rmw(gt->uncore,
747 HECI_H_CSR(MTL_GSC_HECI2_BASE),
748 HECI_H_CSR_RST, HECI_H_CSR_IG);
749 msleep(200);
750 }
751
752 return engine_mask;
753 }
754
755 static void
wa_14015076503_end(struct intel_gt * gt,intel_engine_mask_t engine_mask)756 wa_14015076503_end(struct intel_gt *gt, intel_engine_mask_t engine_mask)
757 {
758 if (!needs_wa_14015076503(gt, engine_mask))
759 return;
760
761 intel_uncore_rmw(gt->uncore,
762 HECI_H_GS1(MTL_GSC_HECI2_BASE),
763 HECI_H_GS1_ER_PREP, 0);
764 }
765
__intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t engine_mask)766 int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask)
767 {
768 const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
769 reset_func reset;
770 int ret = -ETIMEDOUT;
771 int retry;
772
773 reset = intel_get_gpu_reset(gt);
774 if (!reset)
775 return -ENODEV;
776
777 /*
778 * If the power well sleeps during the reset, the reset
779 * request may be dropped and never completes (causing -EIO).
780 */
781 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
782 for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
783 intel_engine_mask_t reset_mask;
784
785 reset_mask = wa_14015076503_start(gt, engine_mask, !retry);
786
787 GT_TRACE(gt, "engine_mask=%x\n", reset_mask);
788 preempt_disable();
789 ret = reset(gt, reset_mask, retry);
790 preempt_enable();
791
792 wa_14015076503_end(gt, reset_mask);
793 }
794 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
795
796 return ret;
797 }
798
intel_has_gpu_reset(const struct intel_gt * gt)799 bool intel_has_gpu_reset(const struct intel_gt *gt)
800 {
801 if (!gt->i915->params.reset)
802 return NULL;
803
804 return intel_get_gpu_reset(gt);
805 }
806
intel_has_reset_engine(const struct intel_gt * gt)807 bool intel_has_reset_engine(const struct intel_gt *gt)
808 {
809 if (gt->i915->params.reset < 2)
810 return false;
811
812 return INTEL_INFO(gt->i915)->has_reset_engine;
813 }
814
intel_reset_guc(struct intel_gt * gt)815 int intel_reset_guc(struct intel_gt *gt)
816 {
817 int ret;
818
819 GEM_BUG_ON(!HAS_GT_UC(gt->i915));
820
821 intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
822 ret = __reset_guc(gt);
823 intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
824
825 return ret;
826 }
827
828 /*
829 * Ensure irq handler finishes, and not run again.
830 * Also return the active request so that we only search for it once.
831 */
reset_prepare_engine(struct intel_engine_cs * engine)832 static void reset_prepare_engine(struct intel_engine_cs *engine)
833 {
834 /*
835 * During the reset sequence, we must prevent the engine from
836 * entering RC6. As the context state is undefined until we restart
837 * the engine, if it does enter RC6 during the reset, the state
838 * written to the powercontext is undefined and so we may lose
839 * GPU state upon resume, i.e. fail to restart after a reset.
840 */
841 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
842 if (engine->reset.prepare)
843 engine->reset.prepare(engine);
844 }
845
revoke_mmaps(struct intel_gt * gt)846 static void revoke_mmaps(struct intel_gt *gt)
847 {
848 int i;
849
850 for (i = 0; i < gt->ggtt->num_fences; i++) {
851 struct drm_vma_offset_node *node;
852 struct i915_vma *vma;
853 u64 vma_offset;
854
855 vma = READ_ONCE(gt->ggtt->fence_regs[i].vma);
856 if (!vma)
857 continue;
858
859 if (!i915_vma_has_userfault(vma))
860 continue;
861
862 GEM_BUG_ON(vma->fence != >->ggtt->fence_regs[i]);
863
864 if (!vma->mmo)
865 continue;
866
867 node = &vma->mmo->vma_node;
868 vma_offset = vma->gtt_view.partial.offset << PAGE_SHIFT;
869
870 unmap_mapping_range(gt->i915->drm.anon_inode->i_mapping,
871 drm_vma_node_offset_addr(node) + vma_offset,
872 vma->size,
873 1);
874 }
875 }
876
reset_prepare(struct intel_gt * gt)877 static intel_engine_mask_t reset_prepare(struct intel_gt *gt)
878 {
879 struct intel_engine_cs *engine;
880 intel_engine_mask_t awake = 0;
881 enum intel_engine_id id;
882
883 /* For GuC mode, ensure submission is disabled before stopping ring */
884 intel_uc_reset_prepare(>->uc);
885
886 for_each_engine(engine, gt, id) {
887 if (intel_engine_pm_get_if_awake(engine))
888 awake |= engine->mask;
889 reset_prepare_engine(engine);
890 }
891
892 return awake;
893 }
894
gt_revoke(struct intel_gt * gt)895 static void gt_revoke(struct intel_gt *gt)
896 {
897 revoke_mmaps(gt);
898 }
899
gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)900 static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
901 {
902 struct intel_engine_cs *engine;
903 enum intel_engine_id id;
904 int err;
905
906 /*
907 * Everything depends on having the GTT running, so we need to start
908 * there.
909 */
910 err = i915_ggtt_enable_hw(gt->i915);
911 if (err)
912 return err;
913
914 local_bh_disable();
915 for_each_engine(engine, gt, id)
916 __intel_engine_reset(engine, stalled_mask & engine->mask);
917 local_bh_enable();
918
919 intel_uc_reset(>->uc, ALL_ENGINES);
920
921 intel_ggtt_restore_fences(gt->ggtt);
922
923 return err;
924 }
925
reset_finish_engine(struct intel_engine_cs * engine)926 static void reset_finish_engine(struct intel_engine_cs *engine)
927 {
928 if (engine->reset.finish)
929 engine->reset.finish(engine);
930 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
931
932 intel_engine_signal_breadcrumbs(engine);
933 }
934
reset_finish(struct intel_gt * gt,intel_engine_mask_t awake)935 static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
936 {
937 struct intel_engine_cs *engine;
938 enum intel_engine_id id;
939
940 for_each_engine(engine, gt, id) {
941 reset_finish_engine(engine);
942 if (awake & engine->mask)
943 intel_engine_pm_put(engine);
944 }
945
946 intel_uc_reset_finish(>->uc);
947 }
948
nop_submit_request(struct i915_request * request)949 static void nop_submit_request(struct i915_request *request)
950 {
951 RQ_TRACE(request, "-EIO\n");
952
953 request = i915_request_mark_eio(request);
954 if (request) {
955 i915_request_submit(request);
956 intel_engine_signal_breadcrumbs(request->engine);
957
958 i915_request_put(request);
959 }
960 }
961
__intel_gt_set_wedged(struct intel_gt * gt)962 static void __intel_gt_set_wedged(struct intel_gt *gt)
963 {
964 struct intel_engine_cs *engine;
965 intel_engine_mask_t awake;
966 enum intel_engine_id id;
967
968 if (test_bit(I915_WEDGED, >->reset.flags))
969 return;
970
971 GT_TRACE(gt, "start\n");
972
973 /*
974 * First, stop submission to hw, but do not yet complete requests by
975 * rolling the global seqno forward (since this would complete requests
976 * for which we haven't set the fence error to EIO yet).
977 */
978 awake = reset_prepare(gt);
979
980 /* Even if the GPU reset fails, it should still stop the engines */
981 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
982 __intel_gt_reset(gt, ALL_ENGINES);
983
984 for_each_engine(engine, gt, id)
985 engine->submit_request = nop_submit_request;
986
987 /*
988 * Make sure no request can slip through without getting completed by
989 * either this call here to intel_engine_write_global_seqno, or the one
990 * in nop_submit_request.
991 */
992 synchronize_rcu_expedited();
993 set_bit(I915_WEDGED, >->reset.flags);
994
995 /* Mark all executing requests as skipped */
996 local_bh_disable();
997 for_each_engine(engine, gt, id)
998 if (engine->reset.cancel)
999 engine->reset.cancel(engine);
1000 intel_uc_cancel_requests(>->uc);
1001 local_bh_enable();
1002
1003 reset_finish(gt, awake);
1004
1005 GT_TRACE(gt, "end\n");
1006 }
1007
intel_gt_set_wedged(struct intel_gt * gt)1008 void intel_gt_set_wedged(struct intel_gt *gt)
1009 {
1010 intel_wakeref_t wakeref;
1011
1012 if (test_bit(I915_WEDGED, >->reset.flags))
1013 return;
1014
1015 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1016 mutex_lock(>->reset.mutex);
1017
1018 if (GEM_SHOW_DEBUG()) {
1019 struct drm_printer p = drm_debug_printer(__func__);
1020 struct intel_engine_cs *engine;
1021 enum intel_engine_id id;
1022
1023 drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
1024 for_each_engine(engine, gt, id) {
1025 if (intel_engine_is_idle(engine))
1026 continue;
1027
1028 intel_engine_dump(engine, &p, "%s\n", engine->name);
1029 }
1030 }
1031
1032 __intel_gt_set_wedged(gt);
1033
1034 mutex_unlock(>->reset.mutex);
1035 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1036 }
1037
__intel_gt_unset_wedged(struct intel_gt * gt)1038 static bool __intel_gt_unset_wedged(struct intel_gt *gt)
1039 {
1040 struct intel_gt_timelines *timelines = >->timelines;
1041 struct intel_timeline *tl;
1042 bool ok;
1043
1044 if (!test_bit(I915_WEDGED, >->reset.flags))
1045 return true;
1046
1047 /* Never fully initialised, recovery impossible */
1048 if (intel_gt_has_unrecoverable_error(gt))
1049 return false;
1050
1051 GT_TRACE(gt, "start\n");
1052
1053 /*
1054 * Before unwedging, make sure that all pending operations
1055 * are flushed and errored out - we may have requests waiting upon
1056 * third party fences. We marked all inflight requests as EIO, and
1057 * every execbuf since returned EIO, for consistency we want all
1058 * the currently pending requests to also be marked as EIO, which
1059 * is done inside our nop_submit_request - and so we must wait.
1060 *
1061 * No more can be submitted until we reset the wedged bit.
1062 */
1063 spin_lock(&timelines->lock);
1064 list_for_each_entry(tl, &timelines->active_list, link) {
1065 struct dma_fence *fence;
1066
1067 fence = i915_active_fence_get(&tl->last_request);
1068 if (!fence)
1069 continue;
1070
1071 spin_unlock(&timelines->lock);
1072
1073 /*
1074 * All internal dependencies (i915_requests) will have
1075 * been flushed by the set-wedge, but we may be stuck waiting
1076 * for external fences. These should all be capped to 10s
1077 * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
1078 * in the worst case.
1079 */
1080 dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
1081 dma_fence_put(fence);
1082
1083 /* Restart iteration after droping lock */
1084 spin_lock(&timelines->lock);
1085 tl = list_entry(&timelines->active_list, typeof(*tl), link);
1086 }
1087 spin_unlock(&timelines->lock);
1088
1089 /* We must reset pending GPU events before restoring our submission */
1090 ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */
1091 if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1092 ok = __intel_gt_reset(gt, ALL_ENGINES) == 0;
1093 if (!ok) {
1094 /*
1095 * Warn CI about the unrecoverable wedged condition.
1096 * Time for a reboot.
1097 */
1098 add_taint_for_CI(gt->i915, TAINT_WARN);
1099 return false;
1100 }
1101
1102 /*
1103 * Undo nop_submit_request. We prevent all new i915 requests from
1104 * being queued (by disallowing execbuf whilst wedged) so having
1105 * waited for all active requests above, we know the system is idle
1106 * and do not have to worry about a thread being inside
1107 * engine->submit_request() as we swap over. So unlike installing
1108 * the nop_submit_request on reset, we can do this from normal
1109 * context and do not require stop_machine().
1110 */
1111 intel_engines_reset_default_submission(gt);
1112
1113 GT_TRACE(gt, "end\n");
1114
1115 smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
1116 clear_bit(I915_WEDGED, >->reset.flags);
1117
1118 return true;
1119 }
1120
intel_gt_unset_wedged(struct intel_gt * gt)1121 bool intel_gt_unset_wedged(struct intel_gt *gt)
1122 {
1123 bool result;
1124
1125 mutex_lock(>->reset.mutex);
1126 result = __intel_gt_unset_wedged(gt);
1127 mutex_unlock(>->reset.mutex);
1128
1129 return result;
1130 }
1131
do_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask)1132 static int do_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
1133 {
1134 int err, i;
1135
1136 err = __intel_gt_reset(gt, ALL_ENGINES);
1137 for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
1138 msleep(10 * (i + 1));
1139 err = __intel_gt_reset(gt, ALL_ENGINES);
1140 }
1141 if (err)
1142 return err;
1143
1144 return gt_reset(gt, stalled_mask);
1145 }
1146
resume(struct intel_gt * gt)1147 static int resume(struct intel_gt *gt)
1148 {
1149 struct intel_engine_cs *engine;
1150 enum intel_engine_id id;
1151 int ret;
1152
1153 for_each_engine(engine, gt, id) {
1154 ret = intel_engine_resume(engine);
1155 if (ret)
1156 return ret;
1157 }
1158
1159 return 0;
1160 }
1161
1162 /**
1163 * intel_gt_reset - reset chip after a hang
1164 * @gt: #intel_gt to reset
1165 * @stalled_mask: mask of the stalled engines with the guilty requests
1166 * @reason: user error message for why we are resetting
1167 *
1168 * Reset the chip. Useful if a hang is detected. Marks the device as wedged
1169 * on failure.
1170 *
1171 * Procedure is fairly simple:
1172 * - reset the chip using the reset reg
1173 * - re-init context state
1174 * - re-init hardware status page
1175 * - re-init ring buffer
1176 * - re-init interrupt state
1177 * - re-init display
1178 */
intel_gt_reset(struct intel_gt * gt,intel_engine_mask_t stalled_mask,const char * reason)1179 void intel_gt_reset(struct intel_gt *gt,
1180 intel_engine_mask_t stalled_mask,
1181 const char *reason)
1182 {
1183 intel_engine_mask_t awake;
1184 int ret;
1185
1186 GT_TRACE(gt, "flags=%lx\n", gt->reset.flags);
1187
1188 might_sleep();
1189 GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, >->reset.flags));
1190
1191 /*
1192 * FIXME: Revoking cpu mmap ptes cannot be done from a dma_fence
1193 * critical section like gpu reset.
1194 */
1195 gt_revoke(gt);
1196
1197 mutex_lock(>->reset.mutex);
1198
1199 /* Clear any previous failed attempts at recovery. Time to try again. */
1200 if (!__intel_gt_unset_wedged(gt))
1201 goto unlock;
1202
1203 if (reason)
1204 drm_notice(>->i915->drm,
1205 "Resetting chip for %s\n", reason);
1206 atomic_inc(>->i915->gpu_error.reset_count);
1207
1208 awake = reset_prepare(gt);
1209
1210 if (!intel_has_gpu_reset(gt)) {
1211 if (gt->i915->params.reset)
1212 drm_err(>->i915->drm, "GPU reset not supported\n");
1213 else
1214 drm_dbg(>->i915->drm, "GPU reset disabled\n");
1215 goto error;
1216 }
1217
1218 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1219 intel_runtime_pm_disable_interrupts(gt->i915);
1220
1221 if (do_reset(gt, stalled_mask)) {
1222 drm_err(>->i915->drm, "Failed to reset chip\n");
1223 goto taint;
1224 }
1225
1226 if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
1227 intel_runtime_pm_enable_interrupts(gt->i915);
1228
1229 intel_overlay_reset(gt->i915);
1230
1231 /*
1232 * Next we need to restore the context, but we don't use those
1233 * yet either...
1234 *
1235 * Ring buffer needs to be re-initialized in the KMS case, or if X
1236 * was running at the time of the reset (i.e. we weren't VT
1237 * switched away).
1238 */
1239 ret = intel_gt_init_hw(gt);
1240 if (ret) {
1241 drm_err(>->i915->drm,
1242 "Failed to initialise HW following reset (%d)\n",
1243 ret);
1244 goto taint;
1245 }
1246
1247 ret = resume(gt);
1248 if (ret)
1249 goto taint;
1250
1251 finish:
1252 reset_finish(gt, awake);
1253 unlock:
1254 mutex_unlock(>->reset.mutex);
1255 return;
1256
1257 taint:
1258 /*
1259 * History tells us that if we cannot reset the GPU now, we
1260 * never will. This then impacts everything that is run
1261 * subsequently. On failing the reset, we mark the driver
1262 * as wedged, preventing further execution on the GPU.
1263 * We also want to go one step further and add a taint to the
1264 * kernel so that any subsequent faults can be traced back to
1265 * this failure. This is important for CI, where if the
1266 * GPU/driver fails we would like to reboot and restart testing
1267 * rather than continue on into oblivion. For everyone else,
1268 * the system should still plod along, but they have been warned!
1269 */
1270 add_taint_for_CI(gt->i915, TAINT_WARN);
1271 error:
1272 __intel_gt_set_wedged(gt);
1273 goto finish;
1274 }
1275
intel_gt_reset_engine(struct intel_engine_cs * engine)1276 static int intel_gt_reset_engine(struct intel_engine_cs *engine)
1277 {
1278 return __intel_gt_reset(engine->gt, engine->mask);
1279 }
1280
__intel_engine_reset_bh(struct intel_engine_cs * engine,const char * msg)1281 int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
1282 {
1283 struct intel_gt *gt = engine->gt;
1284 int ret;
1285
1286 ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
1287 GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, >->reset.flags));
1288
1289 if (intel_engine_uses_guc(engine))
1290 return -ENODEV;
1291
1292 if (!intel_engine_pm_get_if_awake(engine))
1293 return 0;
1294
1295 reset_prepare_engine(engine);
1296
1297 if (msg)
1298 drm_notice(&engine->i915->drm,
1299 "Resetting %s for %s\n", engine->name, msg);
1300 i915_increase_reset_engine_count(&engine->i915->gpu_error, engine);
1301
1302 ret = intel_gt_reset_engine(engine);
1303 if (ret) {
1304 /* If we fail here, we expect to fallback to a global reset */
1305 ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
1306 goto out;
1307 }
1308
1309 /*
1310 * The request that caused the hang is stuck on elsp, we know the
1311 * active request and can drop it, adjust head to skip the offending
1312 * request to resume executing remaining requests in the queue.
1313 */
1314 __intel_engine_reset(engine, true);
1315
1316 /*
1317 * The engine and its registers (and workarounds in case of render)
1318 * have been reset to their default values. Follow the init_ring
1319 * process to program RING_MODE, HWSP and re-enable submission.
1320 */
1321 ret = intel_engine_resume(engine);
1322
1323 out:
1324 intel_engine_cancel_stop_cs(engine);
1325 reset_finish_engine(engine);
1326 intel_engine_pm_put_async(engine);
1327 return ret;
1328 }
1329
1330 /**
1331 * intel_engine_reset - reset GPU engine to recover from a hang
1332 * @engine: engine to reset
1333 * @msg: reason for GPU reset; or NULL for no drm_notice()
1334 *
1335 * Reset a specific GPU engine. Useful if a hang is detected.
1336 * Returns zero on successful reset or otherwise an error code.
1337 *
1338 * Procedure is:
1339 * - identifies the request that caused the hang and it is dropped
1340 * - reset engine (which will force the engine to idle)
1341 * - re-init/configure engine
1342 */
intel_engine_reset(struct intel_engine_cs * engine,const char * msg)1343 int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
1344 {
1345 int err;
1346
1347 local_bh_disable();
1348 err = __intel_engine_reset_bh(engine, msg);
1349 local_bh_enable();
1350
1351 return err;
1352 }
1353
intel_gt_reset_global(struct intel_gt * gt,u32 engine_mask,const char * reason)1354 static void intel_gt_reset_global(struct intel_gt *gt,
1355 u32 engine_mask,
1356 const char *reason)
1357 {
1358 struct kobject *kobj = >->i915->drm.primary->kdev->kobj;
1359 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
1360 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
1361 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
1362 struct intel_wedge_me w;
1363
1364 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
1365
1366 GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask);
1367 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
1368
1369 /* Use a watchdog to ensure that our reset completes */
1370 intel_wedge_on_timeout(&w, gt, 60 * HZ) {
1371 intel_display_reset_prepare(gt->i915);
1372
1373 intel_gt_reset(gt, engine_mask, reason);
1374
1375 intel_display_reset_finish(gt->i915);
1376 }
1377
1378 if (!test_bit(I915_WEDGED, >->reset.flags))
1379 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
1380 }
1381
1382 /**
1383 * intel_gt_handle_error - handle a gpu error
1384 * @gt: the intel_gt
1385 * @engine_mask: mask representing engines that are hung
1386 * @flags: control flags
1387 * @fmt: Error message format string
1388 *
1389 * Do some basic checking of register state at error time and
1390 * dump it to the syslog. Also call i915_capture_error_state() to make
1391 * sure we get a record and make it available in debugfs. Fire a uevent
1392 * so userspace knows something bad happened (should trigger collection
1393 * of a ring dump etc.).
1394 */
intel_gt_handle_error(struct intel_gt * gt,intel_engine_mask_t engine_mask,unsigned long flags,const char * fmt,...)1395 void intel_gt_handle_error(struct intel_gt *gt,
1396 intel_engine_mask_t engine_mask,
1397 unsigned long flags,
1398 const char *fmt, ...)
1399 {
1400 struct intel_engine_cs *engine;
1401 intel_wakeref_t wakeref;
1402 intel_engine_mask_t tmp;
1403 char error_msg[80];
1404 char *msg = NULL;
1405
1406 if (fmt) {
1407 va_list args;
1408
1409 va_start(args, fmt);
1410 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
1411 va_end(args);
1412
1413 msg = error_msg;
1414 }
1415
1416 /*
1417 * In most cases it's guaranteed that we get here with an RPM
1418 * reference held, for example because there is a pending GPU
1419 * request that won't finish until the reset is done. This
1420 * isn't the case at least when we get here by doing a
1421 * simulated reset via debugfs, so get an RPM reference.
1422 */
1423 wakeref = intel_runtime_pm_get(gt->uncore->rpm);
1424
1425 engine_mask &= gt->info.engine_mask;
1426
1427 if (flags & I915_ERROR_CAPTURE) {
1428 i915_capture_error_state(gt, engine_mask, CORE_DUMP_FLAG_NONE);
1429 intel_gt_clear_error_registers(gt, engine_mask);
1430 }
1431
1432 /*
1433 * Try engine reset when available. We fall back to full reset if
1434 * single reset fails.
1435 */
1436 if (!intel_uc_uses_guc_submission(>->uc) &&
1437 intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
1438 local_bh_disable();
1439 for_each_engine_masked(engine, gt, engine_mask, tmp) {
1440 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
1441 if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1442 >->reset.flags))
1443 continue;
1444
1445 if (__intel_engine_reset_bh(engine, msg) == 0)
1446 engine_mask &= ~engine->mask;
1447
1448 clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id,
1449 >->reset.flags);
1450 }
1451 local_bh_enable();
1452 }
1453
1454 if (!engine_mask)
1455 goto out;
1456
1457 /* Full reset needs the mutex, stop any other user trying to do so. */
1458 if (test_and_set_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1459 wait_event(gt->reset.queue,
1460 !test_bit(I915_RESET_BACKOFF, >->reset.flags));
1461 goto out; /* piggy-back on the other reset */
1462 }
1463
1464 /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
1465 synchronize_rcu_expedited();
1466
1467 /*
1468 * Prevent any other reset-engine attempt. We don't do this for GuC
1469 * submission the GuC owns the per-engine reset, not the i915.
1470 */
1471 if (!intel_uc_uses_guc_submission(>->uc)) {
1472 for_each_engine(engine, gt, tmp) {
1473 while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
1474 >->reset.flags))
1475 wait_on_bit(>->reset.flags,
1476 I915_RESET_ENGINE + engine->id,
1477 TASK_UNINTERRUPTIBLE);
1478 }
1479 }
1480
1481 /* Flush everyone using a resource about to be clobbered */
1482 synchronize_srcu_expedited(>->reset.backoff_srcu);
1483
1484 intel_gt_reset_global(gt, engine_mask, msg);
1485
1486 if (!intel_uc_uses_guc_submission(>->uc)) {
1487 for_each_engine(engine, gt, tmp)
1488 clear_bit_unlock(I915_RESET_ENGINE + engine->id,
1489 >->reset.flags);
1490 }
1491 clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags);
1492 smp_mb__after_atomic();
1493 wake_up_all(>->reset.queue);
1494
1495 out:
1496 intel_runtime_pm_put(gt->uncore->rpm, wakeref);
1497 }
1498
_intel_gt_reset_lock(struct intel_gt * gt,int * srcu,bool retry)1499 static int _intel_gt_reset_lock(struct intel_gt *gt, int *srcu, bool retry)
1500 {
1501 might_lock(>->reset.backoff_srcu);
1502 if (retry)
1503 might_sleep();
1504
1505 rcu_read_lock();
1506 while (test_bit(I915_RESET_BACKOFF, >->reset.flags)) {
1507 rcu_read_unlock();
1508
1509 if (!retry)
1510 return -EBUSY;
1511
1512 if (wait_event_interruptible(gt->reset.queue,
1513 !test_bit(I915_RESET_BACKOFF,
1514 >->reset.flags)))
1515 return -EINTR;
1516
1517 rcu_read_lock();
1518 }
1519 *srcu = srcu_read_lock(>->reset.backoff_srcu);
1520 rcu_read_unlock();
1521
1522 return 0;
1523 }
1524
intel_gt_reset_trylock(struct intel_gt * gt,int * srcu)1525 int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
1526 {
1527 return _intel_gt_reset_lock(gt, srcu, false);
1528 }
1529
intel_gt_reset_lock_interruptible(struct intel_gt * gt,int * srcu)1530 int intel_gt_reset_lock_interruptible(struct intel_gt *gt, int *srcu)
1531 {
1532 return _intel_gt_reset_lock(gt, srcu, true);
1533 }
1534
intel_gt_reset_unlock(struct intel_gt * gt,int tag)1535 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
1536 __releases(>->reset.backoff_srcu)
1537 {
1538 srcu_read_unlock(>->reset.backoff_srcu, tag);
1539 }
1540
intel_gt_terminally_wedged(struct intel_gt * gt)1541 int intel_gt_terminally_wedged(struct intel_gt *gt)
1542 {
1543 might_sleep();
1544
1545 if (!intel_gt_is_wedged(gt))
1546 return 0;
1547
1548 if (intel_gt_has_unrecoverable_error(gt))
1549 return -EIO;
1550
1551 /* Reset still in progress? Maybe we will recover? */
1552 if (wait_event_interruptible(gt->reset.queue,
1553 !test_bit(I915_RESET_BACKOFF,
1554 >->reset.flags)))
1555 return -EINTR;
1556
1557 return intel_gt_is_wedged(gt) ? -EIO : 0;
1558 }
1559
intel_gt_set_wedged_on_init(struct intel_gt * gt)1560 void intel_gt_set_wedged_on_init(struct intel_gt *gt)
1561 {
1562 BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES >
1563 I915_WEDGED_ON_INIT);
1564 intel_gt_set_wedged(gt);
1565 i915_disable_error_state(gt->i915, -ENODEV);
1566 set_bit(I915_WEDGED_ON_INIT, >->reset.flags);
1567
1568 /* Wedged on init is non-recoverable */
1569 add_taint_for_CI(gt->i915, TAINT_WARN);
1570 }
1571
intel_gt_set_wedged_on_fini(struct intel_gt * gt)1572 void intel_gt_set_wedged_on_fini(struct intel_gt *gt)
1573 {
1574 intel_gt_set_wedged(gt);
1575 i915_disable_error_state(gt->i915, -ENODEV);
1576 set_bit(I915_WEDGED_ON_FINI, >->reset.flags);
1577 intel_gt_retire_requests(gt); /* cleanup any wedged requests */
1578 }
1579
intel_gt_init_reset(struct intel_gt * gt)1580 void intel_gt_init_reset(struct intel_gt *gt)
1581 {
1582 init_waitqueue_head(>->reset.queue);
1583 mutex_init(>->reset.mutex);
1584 init_srcu_struct(>->reset.backoff_srcu);
1585
1586 /*
1587 * While undesirable to wait inside the shrinker, complain anyway.
1588 *
1589 * If we have to wait during shrinking, we guarantee forward progress
1590 * by forcing the reset. Therefore during the reset we must not
1591 * re-enter the shrinker. By declaring that we take the reset mutex
1592 * within the shrinker, we forbid ourselves from performing any
1593 * fs-reclaim or taking related locks during reset.
1594 */
1595 i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex);
1596
1597 /* no GPU until we are ready! */
1598 __set_bit(I915_WEDGED, >->reset.flags);
1599 }
1600
intel_gt_fini_reset(struct intel_gt * gt)1601 void intel_gt_fini_reset(struct intel_gt *gt)
1602 {
1603 cleanup_srcu_struct(>->reset.backoff_srcu);
1604 }
1605
intel_wedge_me(struct work_struct * work)1606 static void intel_wedge_me(struct work_struct *work)
1607 {
1608 struct intel_wedge_me *w = container_of(work, typeof(*w), work.work);
1609
1610 drm_err(&w->gt->i915->drm,
1611 "%s timed out, cancelling all in-flight rendering.\n",
1612 w->name);
1613 intel_gt_set_wedged(w->gt);
1614 }
1615
__intel_init_wedge(struct intel_wedge_me * w,struct intel_gt * gt,long timeout,const char * name)1616 void __intel_init_wedge(struct intel_wedge_me *w,
1617 struct intel_gt *gt,
1618 long timeout,
1619 const char *name)
1620 {
1621 w->gt = gt;
1622 w->name = name;
1623
1624 INIT_DELAYED_WORK_ONSTACK(&w->work, intel_wedge_me);
1625 queue_delayed_work(gt->i915->unordered_wq, &w->work, timeout);
1626 }
1627
__intel_fini_wedge(struct intel_wedge_me * w)1628 void __intel_fini_wedge(struct intel_wedge_me *w)
1629 {
1630 cancel_delayed_work_sync(&w->work);
1631 destroy_delayed_work_on_stack(&w->work);
1632 w->gt = NULL;
1633 }
1634
1635 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1636 #include "selftest_reset.c"
1637 #include "selftest_hangcheck.c"
1638 #endif
1639