1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include <linux/sched/clock.h>
7
8 #include "i915_drv.h"
9 #include "i915_irq.h"
10 #include "intel_breadcrumbs.h"
11 #include "intel_gt.h"
12 #include "intel_gt_irq.h"
13 #include "intel_gt_regs.h"
14 #include "intel_uncore.h"
15 #include "intel_rps.h"
16 #include "pxp/intel_pxp_irq.h"
17
guc_irq_handler(struct intel_guc * guc,u16 iir)18 static void guc_irq_handler(struct intel_guc *guc, u16 iir)
19 {
20 if (iir & GUC_INTR_GUC2HOST)
21 intel_guc_to_host_event_handler(guc);
22 }
23
24 static u32
gen11_gt_engine_identity(struct intel_gt * gt,const unsigned int bank,const unsigned int bit)25 gen11_gt_engine_identity(struct intel_gt *gt,
26 const unsigned int bank, const unsigned int bit)
27 {
28 void __iomem * const regs = gt->uncore->regs;
29 u32 timeout_ts;
30 u32 ident;
31
32 lockdep_assert_held(>->irq_lock);
33
34 raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
35
36 /*
37 * NB: Specs do not specify how long to spin wait,
38 * so we do ~100us as an educated guess.
39 */
40 timeout_ts = (local_clock() >> 10) + 100;
41 do {
42 ident = raw_reg_read(regs, GEN11_INTR_IDENTITY_REG(bank));
43 } while (!(ident & GEN11_INTR_DATA_VALID) &&
44 !time_after32(local_clock() >> 10, timeout_ts));
45
46 if (unlikely(!(ident & GEN11_INTR_DATA_VALID))) {
47 DRM_ERROR("INTR_IDENTITY_REG%u:%u 0x%08x not valid!\n",
48 bank, bit, ident);
49 return 0;
50 }
51
52 raw_reg_write(regs, GEN11_INTR_IDENTITY_REG(bank),
53 GEN11_INTR_DATA_VALID);
54
55 return ident;
56 }
57
58 static void
gen11_other_irq_handler(struct intel_gt * gt,const u8 instance,const u16 iir)59 gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
60 const u16 iir)
61 {
62 if (instance == OTHER_GUC_INSTANCE)
63 return guc_irq_handler(>->uc.guc, iir);
64
65 if (instance == OTHER_GTPM_INSTANCE)
66 return gen11_rps_irq_handler(>->rps, iir);
67
68 if (instance == OTHER_KCR_INSTANCE)
69 return intel_pxp_irq_handler(>->pxp, iir);
70
71 if (instance == OTHER_GSC_INSTANCE)
72 return intel_gsc_irq_handler(gt, iir);
73
74 WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
75 instance, iir);
76 }
77
78 static void
gen11_engine_irq_handler(struct intel_gt * gt,const u8 class,const u8 instance,const u16 iir)79 gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
80 const u8 instance, const u16 iir)
81 {
82 struct intel_engine_cs *engine;
83
84 if (instance <= MAX_ENGINE_INSTANCE)
85 engine = gt->engine_class[class][instance];
86 else
87 engine = NULL;
88
89 if (likely(engine))
90 return intel_engine_cs_irq(engine, iir);
91
92 WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
93 class, instance);
94 }
95
96 static void
gen11_gt_identity_handler(struct intel_gt * gt,const u32 identity)97 gen11_gt_identity_handler(struct intel_gt *gt, const u32 identity)
98 {
99 const u8 class = GEN11_INTR_ENGINE_CLASS(identity);
100 const u8 instance = GEN11_INTR_ENGINE_INSTANCE(identity);
101 const u16 intr = GEN11_INTR_ENGINE_INTR(identity);
102
103 if (unlikely(!intr))
104 return;
105
106 if (class <= COPY_ENGINE_CLASS || class == COMPUTE_CLASS)
107 return gen11_engine_irq_handler(gt, class, instance, intr);
108
109 if (class == OTHER_CLASS)
110 return gen11_other_irq_handler(gt, instance, intr);
111
112 WARN_ONCE(1, "unknown interrupt class=0x%x, instance=0x%x, intr=0x%x\n",
113 class, instance, intr);
114 }
115
116 static void
gen11_gt_bank_handler(struct intel_gt * gt,const unsigned int bank)117 gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
118 {
119 void __iomem * const regs = gt->uncore->regs;
120 unsigned long intr_dw;
121 unsigned int bit;
122
123 lockdep_assert_held(>->irq_lock);
124
125 intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
126
127 for_each_set_bit(bit, &intr_dw, 32) {
128 const u32 ident = gen11_gt_engine_identity(gt, bank, bit);
129
130 gen11_gt_identity_handler(gt, ident);
131 }
132
133 /* Clear must be after shared has been served for engine */
134 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), intr_dw);
135 }
136
gen11_gt_irq_handler(struct intel_gt * gt,const u32 master_ctl)137 void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
138 {
139 unsigned int bank;
140
141 spin_lock(>->irq_lock);
142
143 for (bank = 0; bank < 2; bank++) {
144 if (master_ctl & GEN11_GT_DW_IRQ(bank))
145 gen11_gt_bank_handler(gt, bank);
146 }
147
148 spin_unlock(>->irq_lock);
149 }
150
gen11_gt_reset_one_iir(struct intel_gt * gt,const unsigned int bank,const unsigned int bit)151 bool gen11_gt_reset_one_iir(struct intel_gt *gt,
152 const unsigned int bank, const unsigned int bit)
153 {
154 void __iomem * const regs = gt->uncore->regs;
155 u32 dw;
156
157 lockdep_assert_held(>->irq_lock);
158
159 dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
160 if (dw & BIT(bit)) {
161 /*
162 * According to the BSpec, DW_IIR bits cannot be cleared without
163 * first servicing the Selector & Shared IIR registers.
164 */
165 gen11_gt_engine_identity(gt, bank, bit);
166
167 /*
168 * We locked GT INT DW by reading it. If we want to (try
169 * to) recover from this successfully, we need to clear
170 * our bit, otherwise we are locking the register for
171 * everybody.
172 */
173 raw_reg_write(regs, GEN11_GT_INTR_DW(bank), BIT(bit));
174
175 return true;
176 }
177
178 return false;
179 }
180
gen11_gt_irq_reset(struct intel_gt * gt)181 void gen11_gt_irq_reset(struct intel_gt *gt)
182 {
183 struct intel_uncore *uncore = gt->uncore;
184
185 /* Disable RCS, BCS, VCS and VECS class engines. */
186 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, 0);
187 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0);
188 if (CCS_MASK(gt))
189 intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, 0);
190 if (HAS_HECI_GSC(gt->i915))
191 intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE, 0);
192
193 /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
194 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~0);
195 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~0);
196 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~0);
197 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~0);
198 if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
199 intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~0);
200 if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
201 intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~0);
202 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
203 if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
204 intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
205 if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
206 intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~0);
207 if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
208 intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~0);
209 if (HAS_HECI_GSC(gt->i915))
210 intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~0);
211
212 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
213 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
214 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
215 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
216
217 intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
218 intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~0);
219 }
220
gen11_gt_irq_postinstall(struct intel_gt * gt)221 void gen11_gt_irq_postinstall(struct intel_gt *gt)
222 {
223 struct intel_uncore *uncore = gt->uncore;
224 u32 irqs = GT_RENDER_USER_INTERRUPT;
225 const u32 gsc_mask = GSC_IRQ_INTF(0) | GSC_IRQ_INTF(1);
226 u32 dmask;
227 u32 smask;
228
229 if (!intel_uc_wants_guc_submission(>->uc))
230 irqs |= GT_CS_MASTER_ERROR_INTERRUPT |
231 GT_CONTEXT_SWITCH_INTERRUPT |
232 GT_WAIT_SEMAPHORE_INTERRUPT;
233
234 dmask = irqs << 16 | irqs;
235 smask = irqs << 16;
236
237 BUILD_BUG_ON(irqs & 0xffff0000);
238
239 /* Enable RCS, BCS, VCS and VECS class interrupts. */
240 intel_uncore_write(uncore, GEN11_RENDER_COPY_INTR_ENABLE, dmask);
241 intel_uncore_write(uncore, GEN11_VCS_VECS_INTR_ENABLE, dmask);
242 if (CCS_MASK(gt))
243 intel_uncore_write(uncore, GEN12_CCS_RSVD_INTR_ENABLE, smask);
244 if (HAS_HECI_GSC(gt->i915))
245 intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_ENABLE,
246 gsc_mask);
247
248 /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
249 intel_uncore_write(uncore, GEN11_RCS0_RSVD_INTR_MASK, ~smask);
250 intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
251 intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
252 intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
253 if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
254 intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
255 if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
256 intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
257 intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
258 if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
259 intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
260 if (HAS_ENGINE(gt, CCS0) || HAS_ENGINE(gt, CCS1))
261 intel_uncore_write(uncore, GEN12_CCS0_CCS1_INTR_MASK, ~dmask);
262 if (HAS_ENGINE(gt, CCS2) || HAS_ENGINE(gt, CCS3))
263 intel_uncore_write(uncore, GEN12_CCS2_CCS3_INTR_MASK, ~dmask);
264 if (HAS_HECI_GSC(gt->i915))
265 intel_uncore_write(uncore, GEN11_GUNIT_CSME_INTR_MASK, ~gsc_mask);
266
267 /*
268 * RPS interrupts will get enabled/disabled on demand when RPS itself
269 * is enabled/disabled.
270 */
271 gt->pm_ier = 0x0;
272 gt->pm_imr = ~gt->pm_ier;
273 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
274 intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
275
276 /* Same thing for GuC interrupts */
277 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
278 intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
279 }
280
gen5_gt_irq_handler(struct intel_gt * gt,u32 gt_iir)281 void gen5_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
282 {
283 if (gt_iir & GT_RENDER_USER_INTERRUPT)
284 intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
285 gt_iir);
286
287 if (gt_iir & ILK_BSD_USER_INTERRUPT)
288 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
289 gt_iir);
290 }
291
gen7_parity_error_irq_handler(struct intel_gt * gt,u32 iir)292 static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
293 {
294 if (!HAS_L3_DPF(gt->i915))
295 return;
296
297 spin_lock(>->irq_lock);
298 gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
299 spin_unlock(>->irq_lock);
300
301 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
302 gt->i915->l3_parity.which_slice |= 1 << 1;
303
304 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
305 gt->i915->l3_parity.which_slice |= 1 << 0;
306
307 schedule_work(>->i915->l3_parity.error_work);
308 }
309
gen6_gt_irq_handler(struct intel_gt * gt,u32 gt_iir)310 void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
311 {
312 if (gt_iir & GT_RENDER_USER_INTERRUPT)
313 intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
314 gt_iir);
315
316 if (gt_iir & GT_BSD_USER_INTERRUPT)
317 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
318 gt_iir >> 12);
319
320 if (gt_iir & GT_BLT_USER_INTERRUPT)
321 intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
322 gt_iir >> 22);
323
324 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
325 GT_BSD_CS_ERROR_INTERRUPT |
326 GT_CS_MASTER_ERROR_INTERRUPT))
327 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
328
329 if (gt_iir & GT_PARITY_ERROR(gt->i915))
330 gen7_parity_error_irq_handler(gt, gt_iir);
331 }
332
gen8_gt_irq_handler(struct intel_gt * gt,u32 master_ctl)333 void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
334 {
335 void __iomem * const regs = gt->uncore->regs;
336 u32 iir;
337
338 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
339 iir = raw_reg_read(regs, GEN8_GT_IIR(0));
340 if (likely(iir)) {
341 intel_engine_cs_irq(gt->engine_class[RENDER_CLASS][0],
342 iir >> GEN8_RCS_IRQ_SHIFT);
343 intel_engine_cs_irq(gt->engine_class[COPY_ENGINE_CLASS][0],
344 iir >> GEN8_BCS_IRQ_SHIFT);
345 raw_reg_write(regs, GEN8_GT_IIR(0), iir);
346 }
347 }
348
349 if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
350 iir = raw_reg_read(regs, GEN8_GT_IIR(1));
351 if (likely(iir)) {
352 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][0],
353 iir >> GEN8_VCS0_IRQ_SHIFT);
354 intel_engine_cs_irq(gt->engine_class[VIDEO_DECODE_CLASS][1],
355 iir >> GEN8_VCS1_IRQ_SHIFT);
356 raw_reg_write(regs, GEN8_GT_IIR(1), iir);
357 }
358 }
359
360 if (master_ctl & GEN8_GT_VECS_IRQ) {
361 iir = raw_reg_read(regs, GEN8_GT_IIR(3));
362 if (likely(iir)) {
363 intel_engine_cs_irq(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
364 iir >> GEN8_VECS_IRQ_SHIFT);
365 raw_reg_write(regs, GEN8_GT_IIR(3), iir);
366 }
367 }
368
369 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
370 iir = raw_reg_read(regs, GEN8_GT_IIR(2));
371 if (likely(iir)) {
372 gen6_rps_irq_handler(>->rps, iir);
373 guc_irq_handler(>->uc.guc, iir >> 16);
374 raw_reg_write(regs, GEN8_GT_IIR(2), iir);
375 }
376 }
377 }
378
gen8_gt_irq_reset(struct intel_gt * gt)379 void gen8_gt_irq_reset(struct intel_gt *gt)
380 {
381 struct intel_uncore *uncore = gt->uncore;
382
383 GEN8_IRQ_RESET_NDX(uncore, GT, 0);
384 GEN8_IRQ_RESET_NDX(uncore, GT, 1);
385 GEN8_IRQ_RESET_NDX(uncore, GT, 2);
386 GEN8_IRQ_RESET_NDX(uncore, GT, 3);
387 }
388
gen8_gt_irq_postinstall(struct intel_gt * gt)389 void gen8_gt_irq_postinstall(struct intel_gt *gt)
390 {
391 /* These are interrupts we'll toggle with the ring mask register */
392 const u32 irqs =
393 GT_CS_MASTER_ERROR_INTERRUPT |
394 GT_RENDER_USER_INTERRUPT |
395 GT_CONTEXT_SWITCH_INTERRUPT |
396 GT_WAIT_SEMAPHORE_INTERRUPT;
397 const u32 gt_interrupts[] = {
398 irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
399 irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
400 0,
401 irqs << GEN8_VECS_IRQ_SHIFT,
402 };
403 struct intel_uncore *uncore = gt->uncore;
404
405 gt->pm_ier = 0x0;
406 gt->pm_imr = ~gt->pm_ier;
407 GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
408 GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
409 /*
410 * RPS interrupts will get enabled/disabled on demand when RPS itself
411 * is enabled/disabled. Same wil be the case for GuC interrupts.
412 */
413 GEN8_IRQ_INIT_NDX(uncore, GT, 2, gt->pm_imr, gt->pm_ier);
414 GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
415 }
416
gen5_gt_update_irq(struct intel_gt * gt,u32 interrupt_mask,u32 enabled_irq_mask)417 static void gen5_gt_update_irq(struct intel_gt *gt,
418 u32 interrupt_mask,
419 u32 enabled_irq_mask)
420 {
421 lockdep_assert_held(>->irq_lock);
422
423 GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);
424
425 gt->gt_imr &= ~interrupt_mask;
426 gt->gt_imr |= (~enabled_irq_mask & interrupt_mask);
427 intel_uncore_write(gt->uncore, GTIMR, gt->gt_imr);
428 }
429
gen5_gt_enable_irq(struct intel_gt * gt,u32 mask)430 void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask)
431 {
432 gen5_gt_update_irq(gt, mask, mask);
433 intel_uncore_posting_read_fw(gt->uncore, GTIMR);
434 }
435
gen5_gt_disable_irq(struct intel_gt * gt,u32 mask)436 void gen5_gt_disable_irq(struct intel_gt *gt, u32 mask)
437 {
438 gen5_gt_update_irq(gt, mask, 0);
439 }
440
gen5_gt_irq_reset(struct intel_gt * gt)441 void gen5_gt_irq_reset(struct intel_gt *gt)
442 {
443 struct intel_uncore *uncore = gt->uncore;
444
445 GEN3_IRQ_RESET(uncore, GT);
446 if (GRAPHICS_VER(gt->i915) >= 6)
447 GEN3_IRQ_RESET(uncore, GEN6_PM);
448 }
449
gen5_gt_irq_postinstall(struct intel_gt * gt)450 void gen5_gt_irq_postinstall(struct intel_gt *gt)
451 {
452 struct intel_uncore *uncore = gt->uncore;
453 u32 pm_irqs = 0;
454 u32 gt_irqs = 0;
455
456 gt->gt_imr = ~0;
457 if (HAS_L3_DPF(gt->i915)) {
458 /* L3 parity interrupt is always unmasked. */
459 gt->gt_imr = ~GT_PARITY_ERROR(gt->i915);
460 gt_irqs |= GT_PARITY_ERROR(gt->i915);
461 }
462
463 gt_irqs |= GT_RENDER_USER_INTERRUPT;
464 if (GRAPHICS_VER(gt->i915) == 5)
465 gt_irqs |= ILK_BSD_USER_INTERRUPT;
466 else
467 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
468
469 GEN3_IRQ_INIT(uncore, GT, gt->gt_imr, gt_irqs);
470
471 if (GRAPHICS_VER(gt->i915) >= 6) {
472 /*
473 * RPS interrupts will get enabled/disabled on demand when RPS
474 * itself is enabled/disabled.
475 */
476 if (HAS_ENGINE(gt, VECS0)) {
477 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
478 gt->pm_ier |= PM_VEBOX_USER_INTERRUPT;
479 }
480
481 gt->pm_imr = 0xffffffff;
482 GEN3_IRQ_INIT(uncore, GEN6_PM, gt->pm_imr, pm_irqs);
483 }
484 }
485