1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/circ_buf.h>
32 #include <linux/slab.h>
33 #include <linux/sysrq.h>
34
35 #include <drm/drm_drv.h>
36
37 #include "display/icl_dsi_regs.h"
38 #include "display/intel_de.h"
39 #include "display/intel_display_trace.h"
40 #include "display/intel_display_types.h"
41 #include "display/intel_fifo_underrun.h"
42 #include "display/intel_hotplug.h"
43 #include "display/intel_lpe_audio.h"
44 #include "display/intel_psr.h"
45
46 #include "gt/intel_breadcrumbs.h"
47 #include "gt/intel_gt.h"
48 #include "gt/intel_gt_irq.h"
49 #include "gt/intel_gt_pm_irq.h"
50 #include "gt/intel_gt_regs.h"
51 #include "gt/intel_rps.h"
52
53 #include "i915_driver.h"
54 #include "i915_drv.h"
55 #include "i915_irq.h"
56 #include "intel_pm.h"
57
58 /**
59 * DOC: interrupt handling
60 *
61 * These functions provide the basic support for enabling and disabling the
62 * interrupt handling support. There's a lot more functionality in i915_irq.c
63 * and related files, but that will be described in separate chapters.
64 */
65
66 /*
67 * Interrupt statistic for PMU. Increments the counter only if the
68 * interrupt originated from the GPU so interrupts from a device which
69 * shares the interrupt line are not accounted.
70 */
pmu_irq_stats(struct drm_i915_private * i915,irqreturn_t res)71 static inline void pmu_irq_stats(struct drm_i915_private *i915,
72 irqreturn_t res)
73 {
74 if (unlikely(res != IRQ_HANDLED))
75 return;
76
77 /*
78 * A clever compiler translates that into INC. A not so clever one
79 * should at least prevent store tearing.
80 */
81 WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
82 }
83
84 typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
85 typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
86 enum hpd_pin pin);
87
88 static const u32 hpd_ilk[HPD_NUM_PINS] = {
89 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
90 };
91
92 static const u32 hpd_ivb[HPD_NUM_PINS] = {
93 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
94 };
95
96 static const u32 hpd_bdw[HPD_NUM_PINS] = {
97 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
98 };
99
100 static const u32 hpd_ibx[HPD_NUM_PINS] = {
101 [HPD_CRT] = SDE_CRT_HOTPLUG,
102 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
103 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
104 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
105 [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
106 };
107
108 static const u32 hpd_cpt[HPD_NUM_PINS] = {
109 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
110 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
111 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
112 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
113 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
114 };
115
116 static const u32 hpd_spt[HPD_NUM_PINS] = {
117 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
118 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
119 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
120 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
121 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
122 };
123
124 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
125 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
126 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
127 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
128 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
129 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
130 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
131 };
132
133 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
134 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
135 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
136 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
137 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
138 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
139 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
140 };
141
142 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
143 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
144 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
145 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
146 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
147 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
148 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
149 };
150
151 static const u32 hpd_bxt[HPD_NUM_PINS] = {
152 [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
153 [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
154 [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
155 };
156
157 static const u32 hpd_gen11[HPD_NUM_PINS] = {
158 [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
159 [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
160 [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
161 [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
162 [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
163 [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
164 };
165
166 static const u32 hpd_icp[HPD_NUM_PINS] = {
167 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
168 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
169 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
170 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
171 [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
172 [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
173 [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
174 [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
175 [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
176 };
177
178 static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
179 [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
180 [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
181 [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
182 [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
183 [HPD_PORT_TC1] = SDE_TC_HOTPLUG_DG2(HPD_PORT_TC1),
184 };
185
intel_hpd_init_pins(struct drm_i915_private * dev_priv)186 static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
187 {
188 struct intel_hotplug *hpd = &dev_priv->display.hotplug;
189
190 if (HAS_GMCH(dev_priv)) {
191 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
192 IS_CHERRYVIEW(dev_priv))
193 hpd->hpd = hpd_status_g4x;
194 else
195 hpd->hpd = hpd_status_i915;
196 return;
197 }
198
199 if (DISPLAY_VER(dev_priv) >= 11)
200 hpd->hpd = hpd_gen11;
201 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
202 hpd->hpd = hpd_bxt;
203 else if (DISPLAY_VER(dev_priv) >= 8)
204 hpd->hpd = hpd_bdw;
205 else if (DISPLAY_VER(dev_priv) >= 7)
206 hpd->hpd = hpd_ivb;
207 else
208 hpd->hpd = hpd_ilk;
209
210 if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
211 (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
212 return;
213
214 if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
215 hpd->pch_hpd = hpd_sde_dg1;
216 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
217 hpd->pch_hpd = hpd_icp;
218 else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
219 hpd->pch_hpd = hpd_spt;
220 else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_CPT(dev_priv))
221 hpd->pch_hpd = hpd_cpt;
222 else if (HAS_PCH_IBX(dev_priv))
223 hpd->pch_hpd = hpd_ibx;
224 else
225 MISSING_CASE(INTEL_PCH_TYPE(dev_priv));
226 }
227
228 static void
intel_handle_vblank(struct drm_i915_private * dev_priv,enum pipe pipe)229 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
230 {
231 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
232
233 drm_crtc_handle_vblank(&crtc->base);
234 }
235
gen3_irq_reset(struct intel_uncore * uncore,i915_reg_t imr,i915_reg_t iir,i915_reg_t ier)236 void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
237 i915_reg_t iir, i915_reg_t ier)
238 {
239 intel_uncore_write(uncore, imr, 0xffffffff);
240 intel_uncore_posting_read(uncore, imr);
241
242 intel_uncore_write(uncore, ier, 0);
243
244 /* IIR can theoretically queue up two events. Be paranoid. */
245 intel_uncore_write(uncore, iir, 0xffffffff);
246 intel_uncore_posting_read(uncore, iir);
247 intel_uncore_write(uncore, iir, 0xffffffff);
248 intel_uncore_posting_read(uncore, iir);
249 }
250
gen2_irq_reset(struct intel_uncore * uncore)251 void gen2_irq_reset(struct intel_uncore *uncore)
252 {
253 intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
254 intel_uncore_posting_read16(uncore, GEN2_IMR);
255
256 intel_uncore_write16(uncore, GEN2_IER, 0);
257
258 /* IIR can theoretically queue up two events. Be paranoid. */
259 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
260 intel_uncore_posting_read16(uncore, GEN2_IIR);
261 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
262 intel_uncore_posting_read16(uncore, GEN2_IIR);
263 }
264
265 /*
266 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
267 */
gen3_assert_iir_is_zero(struct intel_uncore * uncore,i915_reg_t reg)268 static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
269 {
270 u32 val = intel_uncore_read(uncore, reg);
271
272 if (val == 0)
273 return;
274
275 drm_WARN(&uncore->i915->drm, 1,
276 "Interrupt register 0x%x is not zero: 0x%08x\n",
277 i915_mmio_reg_offset(reg), val);
278 intel_uncore_write(uncore, reg, 0xffffffff);
279 intel_uncore_posting_read(uncore, reg);
280 intel_uncore_write(uncore, reg, 0xffffffff);
281 intel_uncore_posting_read(uncore, reg);
282 }
283
gen2_assert_iir_is_zero(struct intel_uncore * uncore)284 static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
285 {
286 u16 val = intel_uncore_read16(uncore, GEN2_IIR);
287
288 if (val == 0)
289 return;
290
291 drm_WARN(&uncore->i915->drm, 1,
292 "Interrupt register 0x%x is not zero: 0x%08x\n",
293 i915_mmio_reg_offset(GEN2_IIR), val);
294 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
295 intel_uncore_posting_read16(uncore, GEN2_IIR);
296 intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
297 intel_uncore_posting_read16(uncore, GEN2_IIR);
298 }
299
gen3_irq_init(struct intel_uncore * uncore,i915_reg_t imr,u32 imr_val,i915_reg_t ier,u32 ier_val,i915_reg_t iir)300 void gen3_irq_init(struct intel_uncore *uncore,
301 i915_reg_t imr, u32 imr_val,
302 i915_reg_t ier, u32 ier_val,
303 i915_reg_t iir)
304 {
305 gen3_assert_iir_is_zero(uncore, iir);
306
307 intel_uncore_write(uncore, ier, ier_val);
308 intel_uncore_write(uncore, imr, imr_val);
309 intel_uncore_posting_read(uncore, imr);
310 }
311
gen2_irq_init(struct intel_uncore * uncore,u32 imr_val,u32 ier_val)312 void gen2_irq_init(struct intel_uncore *uncore,
313 u32 imr_val, u32 ier_val)
314 {
315 gen2_assert_iir_is_zero(uncore);
316
317 intel_uncore_write16(uncore, GEN2_IER, ier_val);
318 intel_uncore_write16(uncore, GEN2_IMR, imr_val);
319 intel_uncore_posting_read16(uncore, GEN2_IMR);
320 }
321
322 /* For display hotplug interrupt */
323 static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private * dev_priv,u32 mask,u32 bits)324 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
325 u32 mask,
326 u32 bits)
327 {
328 u32 val;
329
330 lockdep_assert_held(&dev_priv->irq_lock);
331 drm_WARN_ON(&dev_priv->drm, bits & ~mask);
332
333 val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN);
334 val &= ~mask;
335 val |= bits;
336 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val);
337 }
338
339 /**
340 * i915_hotplug_interrupt_update - update hotplug interrupt enable
341 * @dev_priv: driver private
342 * @mask: bits to update
343 * @bits: bits to enable
344 * NOTE: the HPD enable bits are modified both inside and outside
345 * of an interrupt context. To avoid that read-modify-write cycles
346 * interfer, these bits are protected by a spinlock. Since this
347 * function is usually not called from a context where the lock is
348 * held already, this function acquires the lock itself. A non-locking
349 * version is also available.
350 */
i915_hotplug_interrupt_update(struct drm_i915_private * dev_priv,u32 mask,u32 bits)351 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
352 u32 mask,
353 u32 bits)
354 {
355 spin_lock_irq(&dev_priv->irq_lock);
356 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
357 spin_unlock_irq(&dev_priv->irq_lock);
358 }
359
360 /**
361 * ilk_update_display_irq - update DEIMR
362 * @dev_priv: driver private
363 * @interrupt_mask: mask of interrupt bits to update
364 * @enabled_irq_mask: mask of interrupt bits to enable
365 */
ilk_update_display_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)366 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
367 u32 interrupt_mask, u32 enabled_irq_mask)
368 {
369 u32 new_val;
370
371 lockdep_assert_held(&dev_priv->irq_lock);
372 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
373
374 new_val = dev_priv->irq_mask;
375 new_val &= ~interrupt_mask;
376 new_val |= (~enabled_irq_mask & interrupt_mask);
377
378 if (new_val != dev_priv->irq_mask &&
379 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
380 dev_priv->irq_mask = new_val;
381 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask);
382 intel_uncore_posting_read(&dev_priv->uncore, DEIMR);
383 }
384 }
385
ilk_enable_display_irq(struct drm_i915_private * i915,u32 bits)386 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
387 {
388 ilk_update_display_irq(i915, bits, bits);
389 }
390
ilk_disable_display_irq(struct drm_i915_private * i915,u32 bits)391 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
392 {
393 ilk_update_display_irq(i915, bits, 0);
394 }
395
396 /**
397 * bdw_update_port_irq - update DE port interrupt
398 * @dev_priv: driver private
399 * @interrupt_mask: mask of interrupt bits to update
400 * @enabled_irq_mask: mask of interrupt bits to enable
401 */
bdw_update_port_irq(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)402 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
403 u32 interrupt_mask,
404 u32 enabled_irq_mask)
405 {
406 u32 new_val;
407 u32 old_val;
408
409 lockdep_assert_held(&dev_priv->irq_lock);
410
411 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
412
413 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
414 return;
415
416 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
417
418 new_val = old_val;
419 new_val &= ~interrupt_mask;
420 new_val |= (~enabled_irq_mask & interrupt_mask);
421
422 if (new_val != old_val) {
423 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val);
424 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR);
425 }
426 }
427
428 /**
429 * bdw_update_pipe_irq - update DE pipe interrupt
430 * @dev_priv: driver private
431 * @pipe: pipe whose interrupt to update
432 * @interrupt_mask: mask of interrupt bits to update
433 * @enabled_irq_mask: mask of interrupt bits to enable
434 */
bdw_update_pipe_irq(struct drm_i915_private * dev_priv,enum pipe pipe,u32 interrupt_mask,u32 enabled_irq_mask)435 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
436 enum pipe pipe, u32 interrupt_mask,
437 u32 enabled_irq_mask)
438 {
439 u32 new_val;
440
441 lockdep_assert_held(&dev_priv->irq_lock);
442
443 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
444
445 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
446 return;
447
448 new_val = dev_priv->de_irq_mask[pipe];
449 new_val &= ~interrupt_mask;
450 new_val |= (~enabled_irq_mask & interrupt_mask);
451
452 if (new_val != dev_priv->de_irq_mask[pipe]) {
453 dev_priv->de_irq_mask[pipe] = new_val;
454 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
455 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe));
456 }
457 }
458
bdw_enable_pipe_irq(struct drm_i915_private * i915,enum pipe pipe,u32 bits)459 void bdw_enable_pipe_irq(struct drm_i915_private *i915,
460 enum pipe pipe, u32 bits)
461 {
462 bdw_update_pipe_irq(i915, pipe, bits, bits);
463 }
464
bdw_disable_pipe_irq(struct drm_i915_private * i915,enum pipe pipe,u32 bits)465 void bdw_disable_pipe_irq(struct drm_i915_private *i915,
466 enum pipe pipe, u32 bits)
467 {
468 bdw_update_pipe_irq(i915, pipe, bits, 0);
469 }
470
471 /**
472 * ibx_display_interrupt_update - update SDEIMR
473 * @dev_priv: driver private
474 * @interrupt_mask: mask of interrupt bits to update
475 * @enabled_irq_mask: mask of interrupt bits to enable
476 */
ibx_display_interrupt_update(struct drm_i915_private * dev_priv,u32 interrupt_mask,u32 enabled_irq_mask)477 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
478 u32 interrupt_mask,
479 u32 enabled_irq_mask)
480 {
481 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
482 sdeimr &= ~interrupt_mask;
483 sdeimr |= (~enabled_irq_mask & interrupt_mask);
484
485 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
486
487 lockdep_assert_held(&dev_priv->irq_lock);
488
489 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
490 return;
491
492 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr);
493 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
494 }
495
ibx_enable_display_interrupt(struct drm_i915_private * i915,u32 bits)496 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
497 {
498 ibx_display_interrupt_update(i915, bits, bits);
499 }
500
ibx_disable_display_interrupt(struct drm_i915_private * i915,u32 bits)501 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
502 {
503 ibx_display_interrupt_update(i915, bits, 0);
504 }
505
i915_pipestat_enable_mask(struct drm_i915_private * dev_priv,enum pipe pipe)506 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
507 enum pipe pipe)
508 {
509 u32 status_mask = dev_priv->pipestat_irq_mask[pipe];
510 u32 enable_mask = status_mask << 16;
511
512 lockdep_assert_held(&dev_priv->irq_lock);
513
514 if (DISPLAY_VER(dev_priv) < 5)
515 goto out;
516
517 /*
518 * On pipe A we don't support the PSR interrupt yet,
519 * on pipe B and C the same bit MBZ.
520 */
521 if (drm_WARN_ON_ONCE(&dev_priv->drm,
522 status_mask & PIPE_A_PSR_STATUS_VLV))
523 return 0;
524 /*
525 * On pipe B and C we don't support the PSR interrupt yet, on pipe
526 * A the same bit is for perf counters which we don't use either.
527 */
528 if (drm_WARN_ON_ONCE(&dev_priv->drm,
529 status_mask & PIPE_B_PSR_STATUS_VLV))
530 return 0;
531
532 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
533 SPRITE0_FLIP_DONE_INT_EN_VLV |
534 SPRITE1_FLIP_DONE_INT_EN_VLV);
535 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
536 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
537 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
538 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
539
540 out:
541 drm_WARN_ONCE(&dev_priv->drm,
542 enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
543 status_mask & ~PIPESTAT_INT_STATUS_MASK,
544 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
545 pipe_name(pipe), enable_mask, status_mask);
546
547 return enable_mask;
548 }
549
i915_enable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)550 void i915_enable_pipestat(struct drm_i915_private *dev_priv,
551 enum pipe pipe, u32 status_mask)
552 {
553 i915_reg_t reg = PIPESTAT(pipe);
554 u32 enable_mask;
555
556 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
557 "pipe %c: status_mask=0x%x\n",
558 pipe_name(pipe), status_mask);
559
560 lockdep_assert_held(&dev_priv->irq_lock);
561 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
562
563 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
564 return;
565
566 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
567 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
568
569 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
570 intel_uncore_posting_read(&dev_priv->uncore, reg);
571 }
572
i915_disable_pipestat(struct drm_i915_private * dev_priv,enum pipe pipe,u32 status_mask)573 void i915_disable_pipestat(struct drm_i915_private *dev_priv,
574 enum pipe pipe, u32 status_mask)
575 {
576 i915_reg_t reg = PIPESTAT(pipe);
577 u32 enable_mask;
578
579 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
580 "pipe %c: status_mask=0x%x\n",
581 pipe_name(pipe), status_mask);
582
583 lockdep_assert_held(&dev_priv->irq_lock);
584 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
585
586 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
587 return;
588
589 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
590 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
591
592 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask);
593 intel_uncore_posting_read(&dev_priv->uncore, reg);
594 }
595
i915_has_asle(struct drm_i915_private * dev_priv)596 static bool i915_has_asle(struct drm_i915_private *dev_priv)
597 {
598 if (!dev_priv->display.opregion.asle)
599 return false;
600
601 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
602 }
603
604 /**
605 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
606 * @dev_priv: i915 device private
607 */
i915_enable_asle_pipestat(struct drm_i915_private * dev_priv)608 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
609 {
610 if (!i915_has_asle(dev_priv))
611 return;
612
613 spin_lock_irq(&dev_priv->irq_lock);
614
615 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
616 if (DISPLAY_VER(dev_priv) >= 4)
617 i915_enable_pipestat(dev_priv, PIPE_A,
618 PIPE_LEGACY_BLC_EVENT_STATUS);
619
620 spin_unlock_irq(&dev_priv->irq_lock);
621 }
622
623 /*
624 * This timing diagram depicts the video signal in and
625 * around the vertical blanking period.
626 *
627 * Assumptions about the fictitious mode used in this example:
628 * vblank_start >= 3
629 * vsync_start = vblank_start + 1
630 * vsync_end = vblank_start + 2
631 * vtotal = vblank_start + 3
632 *
633 * start of vblank:
634 * latch double buffered registers
635 * increment frame counter (ctg+)
636 * generate start of vblank interrupt (gen4+)
637 * |
638 * | frame start:
639 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
640 * | may be shifted forward 1-3 extra lines via PIPECONF
641 * | |
642 * | | start of vsync:
643 * | | generate vsync interrupt
644 * | | |
645 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
646 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
647 * ----va---> <-----------------vb--------------------> <--------va-------------
648 * | | <----vs-----> |
649 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
650 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
651 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
652 * | | |
653 * last visible pixel first visible pixel
654 * | increment frame counter (gen3/4)
655 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
656 *
657 * x = horizontal active
658 * _ = horizontal blanking
659 * hs = horizontal sync
660 * va = vertical active
661 * vb = vertical blanking
662 * vs = vertical sync
663 * vbs = vblank_start (number)
664 *
665 * Summary:
666 * - most events happen at the start of horizontal sync
667 * - frame start happens at the start of horizontal blank, 1-4 lines
668 * (depending on PIPECONF settings) after the start of vblank
669 * - gen3/4 pixel and frame counter are synchronized with the start
670 * of horizontal active on the first line of vertical active
671 */
672
673 /* Called from drm generic code, passed a 'crtc', which
674 * we use as a pipe index
675 */
i915_get_vblank_counter(struct drm_crtc * crtc)676 u32 i915_get_vblank_counter(struct drm_crtc *crtc)
677 {
678 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
679 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
680 const struct drm_display_mode *mode = &vblank->hwmode;
681 enum pipe pipe = to_intel_crtc(crtc)->pipe;
682 i915_reg_t high_frame, low_frame;
683 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
684 unsigned long irqflags;
685
686 /*
687 * On i965gm TV output the frame counter only works up to
688 * the point when we enable the TV encoder. After that the
689 * frame counter ceases to work and reads zero. We need a
690 * vblank wait before enabling the TV encoder and so we
691 * have to enable vblank interrupts while the frame counter
692 * is still in a working state. However the core vblank code
693 * does not like us returning non-zero frame counter values
694 * when we've told it that we don't have a working frame
695 * counter. Thus we must stop non-zero values leaking out.
696 */
697 if (!vblank->max_vblank_count)
698 return 0;
699
700 htotal = mode->crtc_htotal;
701 hsync_start = mode->crtc_hsync_start;
702 vbl_start = mode->crtc_vblank_start;
703 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
704 vbl_start = DIV_ROUND_UP(vbl_start, 2);
705
706 /* Convert to pixel count */
707 vbl_start *= htotal;
708
709 /* Start of vblank event occurs at start of hsync */
710 vbl_start -= htotal - hsync_start;
711
712 high_frame = PIPEFRAME(pipe);
713 low_frame = PIPEFRAMEPIXEL(pipe);
714
715 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
716
717 /*
718 * High & low register fields aren't synchronized, so make sure
719 * we get a low value that's stable across two reads of the high
720 * register.
721 */
722 do {
723 high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
724 low = intel_de_read_fw(dev_priv, low_frame);
725 high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
726 } while (high1 != high2);
727
728 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
729
730 high1 >>= PIPE_FRAME_HIGH_SHIFT;
731 pixel = low & PIPE_PIXEL_MASK;
732 low >>= PIPE_FRAME_LOW_SHIFT;
733
734 /*
735 * The frame counter increments at beginning of active.
736 * Cook up a vblank counter by also checking the pixel
737 * counter against vblank start.
738 */
739 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
740 }
741
g4x_get_vblank_counter(struct drm_crtc * crtc)742 u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
743 {
744 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
745 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
746 enum pipe pipe = to_intel_crtc(crtc)->pipe;
747
748 if (!vblank->max_vblank_count)
749 return 0;
750
751 return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe));
752 }
753
intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc * crtc)754 static u32 intel_crtc_scanlines_since_frame_timestamp(struct intel_crtc *crtc)
755 {
756 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
757 struct drm_vblank_crtc *vblank =
758 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
759 const struct drm_display_mode *mode = &vblank->hwmode;
760 u32 htotal = mode->crtc_htotal;
761 u32 clock = mode->crtc_clock;
762 u32 scan_prev_time, scan_curr_time, scan_post_time;
763
764 /*
765 * To avoid the race condition where we might cross into the
766 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR
767 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR
768 * during the same frame.
769 */
770 do {
771 /*
772 * This field provides read back of the display
773 * pipe frame time stamp. The time stamp value
774 * is sampled at every start of vertical blank.
775 */
776 scan_prev_time = intel_de_read_fw(dev_priv,
777 PIPE_FRMTMSTMP(crtc->pipe));
778
779 /*
780 * The TIMESTAMP_CTR register has the current
781 * time stamp value.
782 */
783 scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
784
785 scan_post_time = intel_de_read_fw(dev_priv,
786 PIPE_FRMTMSTMP(crtc->pipe));
787 } while (scan_post_time != scan_prev_time);
788
789 return div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
790 clock), 1000 * htotal);
791 }
792
793 /*
794 * On certain encoders on certain platforms, pipe
795 * scanline register will not work to get the scanline,
796 * since the timings are driven from the PORT or issues
797 * with scanline register updates.
798 * This function will use Framestamp and current
799 * timestamp registers to calculate the scanline.
800 */
__intel_get_crtc_scanline_from_timestamp(struct intel_crtc * crtc)801 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
802 {
803 struct drm_vblank_crtc *vblank =
804 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
805 const struct drm_display_mode *mode = &vblank->hwmode;
806 u32 vblank_start = mode->crtc_vblank_start;
807 u32 vtotal = mode->crtc_vtotal;
808 u32 scanline;
809
810 scanline = intel_crtc_scanlines_since_frame_timestamp(crtc);
811 scanline = min(scanline, vtotal - 1);
812 scanline = (scanline + vblank_start) % vtotal;
813
814 return scanline;
815 }
816
817 /*
818 * intel_de_read_fw(), only for fast reads of display block, no need for
819 * forcewake etc.
820 */
__intel_get_crtc_scanline(struct intel_crtc * crtc)821 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
822 {
823 struct drm_device *dev = crtc->base.dev;
824 struct drm_i915_private *dev_priv = to_i915(dev);
825 const struct drm_display_mode *mode;
826 struct drm_vblank_crtc *vblank;
827 enum pipe pipe = crtc->pipe;
828 int position, vtotal;
829
830 if (!crtc->active)
831 return 0;
832
833 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)];
834 mode = &vblank->hwmode;
835
836 if (crtc->mode_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP)
837 return __intel_get_crtc_scanline_from_timestamp(crtc);
838
839 vtotal = mode->crtc_vtotal;
840 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
841 vtotal /= 2;
842
843 position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
844
845 /*
846 * On HSW, the DSL reg (0x70000) appears to return 0 if we
847 * read it just before the start of vblank. So try it again
848 * so we don't accidentally end up spanning a vblank frame
849 * increment, causing the pipe_update_end() code to squak at us.
850 *
851 * The nature of this problem means we can't simply check the ISR
852 * bit and return the vblank start value; nor can we use the scanline
853 * debug register in the transcoder as it appears to have the same
854 * problem. We may need to extend this to include other platforms,
855 * but so far testing only shows the problem on HSW.
856 */
857 if (HAS_DDI(dev_priv) && !position) {
858 int i, temp;
859
860 for (i = 0; i < 100; i++) {
861 udelay(1);
862 temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK;
863 if (temp != position) {
864 position = temp;
865 break;
866 }
867 }
868 }
869
870 /*
871 * See update_scanline_offset() for the details on the
872 * scanline_offset adjustment.
873 */
874 return (position + crtc->scanline_offset) % vtotal;
875 }
876
i915_get_crtc_scanoutpos(struct drm_crtc * _crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)877 static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
878 bool in_vblank_irq,
879 int *vpos, int *hpos,
880 ktime_t *stime, ktime_t *etime,
881 const struct drm_display_mode *mode)
882 {
883 struct drm_device *dev = _crtc->dev;
884 struct drm_i915_private *dev_priv = to_i915(dev);
885 struct intel_crtc *crtc = to_intel_crtc(_crtc);
886 enum pipe pipe = crtc->pipe;
887 int position;
888 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
889 unsigned long irqflags;
890 bool use_scanline_counter = DISPLAY_VER(dev_priv) >= 5 ||
891 IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) == 2 ||
892 crtc->mode_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
893
894 if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
895 drm_dbg(&dev_priv->drm,
896 "trying to get scanoutpos for disabled "
897 "pipe %c\n", pipe_name(pipe));
898 return false;
899 }
900
901 htotal = mode->crtc_htotal;
902 hsync_start = mode->crtc_hsync_start;
903 vtotal = mode->crtc_vtotal;
904 vbl_start = mode->crtc_vblank_start;
905 vbl_end = mode->crtc_vblank_end;
906
907 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
908 vbl_start = DIV_ROUND_UP(vbl_start, 2);
909 vbl_end /= 2;
910 vtotal /= 2;
911 }
912
913 /*
914 * Lock uncore.lock, as we will do multiple timing critical raw
915 * register reads, potentially with preemption disabled, so the
916 * following code must not block on uncore.lock.
917 */
918 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
919
920 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
921
922 /* Get optional system timestamp before query. */
923 if (stime)
924 *stime = ktime_get();
925
926 if (crtc->mode_flags & I915_MODE_FLAG_VRR) {
927 int scanlines = intel_crtc_scanlines_since_frame_timestamp(crtc);
928
929 position = __intel_get_crtc_scanline(crtc);
930
931 /*
932 * Already exiting vblank? If so, shift our position
933 * so it looks like we're already apporaching the full
934 * vblank end. This should make the generated timestamp
935 * more or less match when the active portion will start.
936 */
937 if (position >= vbl_start && scanlines < position)
938 position = min(crtc->vmax_vblank_start + scanlines, vtotal - 1);
939 } else if (use_scanline_counter) {
940 /* No obvious pixelcount register. Only query vertical
941 * scanout position from Display scan line register.
942 */
943 position = __intel_get_crtc_scanline(crtc);
944 } else {
945 /* Have access to pixelcount since start of frame.
946 * We can split this into vertical and horizontal
947 * scanout position.
948 */
949 position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
950
951 /* convert to pixel counts */
952 vbl_start *= htotal;
953 vbl_end *= htotal;
954 vtotal *= htotal;
955
956 /*
957 * In interlaced modes, the pixel counter counts all pixels,
958 * so one field will have htotal more pixels. In order to avoid
959 * the reported position from jumping backwards when the pixel
960 * counter is beyond the length of the shorter field, just
961 * clamp the position the length of the shorter field. This
962 * matches how the scanline counter based position works since
963 * the scanline counter doesn't count the two half lines.
964 */
965 if (position >= vtotal)
966 position = vtotal - 1;
967
968 /*
969 * Start of vblank interrupt is triggered at start of hsync,
970 * just prior to the first active line of vblank. However we
971 * consider lines to start at the leading edge of horizontal
972 * active. So, should we get here before we've crossed into
973 * the horizontal active of the first line in vblank, we would
974 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
975 * always add htotal-hsync_start to the current pixel position.
976 */
977 position = (position + htotal - hsync_start) % vtotal;
978 }
979
980 /* Get optional system timestamp after query. */
981 if (etime)
982 *etime = ktime_get();
983
984 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
985
986 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
987
988 /*
989 * While in vblank, position will be negative
990 * counting up towards 0 at vbl_end. And outside
991 * vblank, position will be positive counting
992 * up since vbl_end.
993 */
994 if (position >= vbl_start)
995 position -= vbl_end;
996 else
997 position += vtotal - vbl_end;
998
999 if (use_scanline_counter) {
1000 *vpos = position;
1001 *hpos = 0;
1002 } else {
1003 *vpos = position / htotal;
1004 *hpos = position - (*vpos * htotal);
1005 }
1006
1007 return true;
1008 }
1009
intel_crtc_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)1010 bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
1011 ktime_t *vblank_time, bool in_vblank_irq)
1012 {
1013 return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
1014 crtc, max_error, vblank_time, in_vblank_irq,
1015 i915_get_crtc_scanoutpos);
1016 }
1017
intel_get_crtc_scanline(struct intel_crtc * crtc)1018 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1019 {
1020 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1021 unsigned long irqflags;
1022 int position;
1023
1024 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1025 position = __intel_get_crtc_scanline(crtc);
1026 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1027
1028 return position;
1029 }
1030
1031 /**
1032 * ivb_parity_work - Workqueue called when a parity error interrupt
1033 * occurred.
1034 * @work: workqueue struct
1035 *
1036 * Doesn't actually do anything except notify userspace. As a consequence of
1037 * this event, userspace should try to remap the bad rows since statistically
1038 * it is likely the same row is more likely to go bad again.
1039 */
ivb_parity_work(struct work_struct * work)1040 static void ivb_parity_work(struct work_struct *work)
1041 {
1042 struct drm_i915_private *dev_priv =
1043 container_of(work, typeof(*dev_priv), l3_parity.error_work);
1044 struct intel_gt *gt = to_gt(dev_priv);
1045 u32 error_status, row, bank, subbank;
1046 char *parity_event[6];
1047 u32 misccpctl;
1048 u8 slice = 0;
1049
1050 /* We must turn off DOP level clock gating to access the L3 registers.
1051 * In order to prevent a get/put style interface, acquire struct mutex
1052 * any time we access those registers.
1053 */
1054 mutex_lock(&dev_priv->drm.struct_mutex);
1055
1056 /* If we've screwed up tracking, just let the interrupt fire again */
1057 if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
1058 goto out;
1059
1060 misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1061 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1062 intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL);
1063
1064 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1065 i915_reg_t reg;
1066
1067 slice--;
1068 if (drm_WARN_ON_ONCE(&dev_priv->drm,
1069 slice >= NUM_L3_SLICES(dev_priv)))
1070 break;
1071
1072 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1073
1074 reg = GEN7_L3CDERRST1(slice);
1075
1076 error_status = intel_uncore_read(&dev_priv->uncore, reg);
1077 row = GEN7_PARITY_ERROR_ROW(error_status);
1078 bank = GEN7_PARITY_ERROR_BANK(error_status);
1079 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1080
1081 intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1082 intel_uncore_posting_read(&dev_priv->uncore, reg);
1083
1084 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1085 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1086 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1087 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1088 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1089 parity_event[5] = NULL;
1090
1091 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1092 KOBJ_CHANGE, parity_event);
1093
1094 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1095 slice, row, bank, subbank);
1096
1097 kfree(parity_event[4]);
1098 kfree(parity_event[3]);
1099 kfree(parity_event[2]);
1100 kfree(parity_event[1]);
1101 }
1102
1103 intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl);
1104
1105 out:
1106 drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
1107 spin_lock_irq(gt->irq_lock);
1108 gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
1109 spin_unlock_irq(gt->irq_lock);
1110
1111 mutex_unlock(&dev_priv->drm.struct_mutex);
1112 }
1113
gen11_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1114 static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1115 {
1116 switch (pin) {
1117 case HPD_PORT_TC1:
1118 case HPD_PORT_TC2:
1119 case HPD_PORT_TC3:
1120 case HPD_PORT_TC4:
1121 case HPD_PORT_TC5:
1122 case HPD_PORT_TC6:
1123 return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin);
1124 default:
1125 return false;
1126 }
1127 }
1128
bxt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1129 static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1130 {
1131 switch (pin) {
1132 case HPD_PORT_A:
1133 return val & PORTA_HOTPLUG_LONG_DETECT;
1134 case HPD_PORT_B:
1135 return val & PORTB_HOTPLUG_LONG_DETECT;
1136 case HPD_PORT_C:
1137 return val & PORTC_HOTPLUG_LONG_DETECT;
1138 default:
1139 return false;
1140 }
1141 }
1142
icp_ddi_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1143 static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1144 {
1145 switch (pin) {
1146 case HPD_PORT_A:
1147 case HPD_PORT_B:
1148 case HPD_PORT_C:
1149 case HPD_PORT_D:
1150 return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin);
1151 default:
1152 return false;
1153 }
1154 }
1155
icp_tc_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1156 static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1157 {
1158 switch (pin) {
1159 case HPD_PORT_TC1:
1160 case HPD_PORT_TC2:
1161 case HPD_PORT_TC3:
1162 case HPD_PORT_TC4:
1163 case HPD_PORT_TC5:
1164 case HPD_PORT_TC6:
1165 return val & ICP_TC_HPD_LONG_DETECT(pin);
1166 default:
1167 return false;
1168 }
1169 }
1170
spt_port_hotplug2_long_detect(enum hpd_pin pin,u32 val)1171 static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
1172 {
1173 switch (pin) {
1174 case HPD_PORT_E:
1175 return val & PORTE_HOTPLUG_LONG_DETECT;
1176 default:
1177 return false;
1178 }
1179 }
1180
spt_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1181 static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1182 {
1183 switch (pin) {
1184 case HPD_PORT_A:
1185 return val & PORTA_HOTPLUG_LONG_DETECT;
1186 case HPD_PORT_B:
1187 return val & PORTB_HOTPLUG_LONG_DETECT;
1188 case HPD_PORT_C:
1189 return val & PORTC_HOTPLUG_LONG_DETECT;
1190 case HPD_PORT_D:
1191 return val & PORTD_HOTPLUG_LONG_DETECT;
1192 default:
1193 return false;
1194 }
1195 }
1196
ilk_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1197 static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1198 {
1199 switch (pin) {
1200 case HPD_PORT_A:
1201 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1202 default:
1203 return false;
1204 }
1205 }
1206
pch_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1207 static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1208 {
1209 switch (pin) {
1210 case HPD_PORT_B:
1211 return val & PORTB_HOTPLUG_LONG_DETECT;
1212 case HPD_PORT_C:
1213 return val & PORTC_HOTPLUG_LONG_DETECT;
1214 case HPD_PORT_D:
1215 return val & PORTD_HOTPLUG_LONG_DETECT;
1216 default:
1217 return false;
1218 }
1219 }
1220
i9xx_port_hotplug_long_detect(enum hpd_pin pin,u32 val)1221 static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
1222 {
1223 switch (pin) {
1224 case HPD_PORT_B:
1225 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1226 case HPD_PORT_C:
1227 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1228 case HPD_PORT_D:
1229 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1230 default:
1231 return false;
1232 }
1233 }
1234
1235 /*
1236 * Get a bit mask of pins that have triggered, and which ones may be long.
1237 * This can be called multiple times with the same masks to accumulate
1238 * hotplug detection results from several registers.
1239 *
1240 * Note that the caller is expected to zero out the masks initially.
1241 */
intel_get_hpd_pins(struct drm_i915_private * dev_priv,u32 * pin_mask,u32 * long_mask,u32 hotplug_trigger,u32 dig_hotplug_reg,const u32 hpd[HPD_NUM_PINS],bool long_pulse_detect (enum hpd_pin pin,u32 val))1242 static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
1243 u32 *pin_mask, u32 *long_mask,
1244 u32 hotplug_trigger, u32 dig_hotplug_reg,
1245 const u32 hpd[HPD_NUM_PINS],
1246 bool long_pulse_detect(enum hpd_pin pin, u32 val))
1247 {
1248 enum hpd_pin pin;
1249
1250 BUILD_BUG_ON(BITS_PER_TYPE(*pin_mask) < HPD_NUM_PINS);
1251
1252 for_each_hpd_pin(pin) {
1253 if ((hpd[pin] & hotplug_trigger) == 0)
1254 continue;
1255
1256 *pin_mask |= BIT(pin);
1257
1258 if (long_pulse_detect(pin, dig_hotplug_reg))
1259 *long_mask |= BIT(pin);
1260 }
1261
1262 drm_dbg(&dev_priv->drm,
1263 "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
1264 hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
1265
1266 }
1267
intel_hpd_enabled_irqs(struct drm_i915_private * dev_priv,const u32 hpd[HPD_NUM_PINS])1268 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
1269 const u32 hpd[HPD_NUM_PINS])
1270 {
1271 struct intel_encoder *encoder;
1272 u32 enabled_irqs = 0;
1273
1274 for_each_intel_encoder(&dev_priv->drm, encoder)
1275 if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
1276 enabled_irqs |= hpd[encoder->hpd_pin];
1277
1278 return enabled_irqs;
1279 }
1280
intel_hpd_hotplug_irqs(struct drm_i915_private * dev_priv,const u32 hpd[HPD_NUM_PINS])1281 static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
1282 const u32 hpd[HPD_NUM_PINS])
1283 {
1284 struct intel_encoder *encoder;
1285 u32 hotplug_irqs = 0;
1286
1287 for_each_intel_encoder(&dev_priv->drm, encoder)
1288 hotplug_irqs |= hpd[encoder->hpd_pin];
1289
1290 return hotplug_irqs;
1291 }
1292
intel_hpd_hotplug_enables(struct drm_i915_private * i915,hotplug_enables_func hotplug_enables)1293 static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
1294 hotplug_enables_func hotplug_enables)
1295 {
1296 struct intel_encoder *encoder;
1297 u32 hotplug = 0;
1298
1299 for_each_intel_encoder(&i915->drm, encoder)
1300 hotplug |= hotplug_enables(i915, encoder->hpd_pin);
1301
1302 return hotplug;
1303 }
1304
gmbus_irq_handler(struct drm_i915_private * dev_priv)1305 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1306 {
1307 wake_up_all(&dev_priv->display.gmbus.wait_queue);
1308 }
1309
dp_aux_irq_handler(struct drm_i915_private * dev_priv)1310 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1311 {
1312 wake_up_all(&dev_priv->display.gmbus.wait_queue);
1313 }
1314
1315 #if defined(CONFIG_DEBUG_FS)
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1316 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1317 enum pipe pipe,
1318 u32 crc0, u32 crc1,
1319 u32 crc2, u32 crc3,
1320 u32 crc4)
1321 {
1322 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe);
1323 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
1324 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
1325
1326 trace_intel_pipe_crc(crtc, crcs);
1327
1328 spin_lock(&pipe_crc->lock);
1329 /*
1330 * For some not yet identified reason, the first CRC is
1331 * bonkers. So let's just wait for the next vblank and read
1332 * out the buggy result.
1333 *
1334 * On GEN8+ sometimes the second CRC is bonkers as well, so
1335 * don't trust that one either.
1336 */
1337 if (pipe_crc->skipped <= 0 ||
1338 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) {
1339 pipe_crc->skipped++;
1340 spin_unlock(&pipe_crc->lock);
1341 return;
1342 }
1343 spin_unlock(&pipe_crc->lock);
1344
1345 drm_crtc_add_crc_entry(&crtc->base, true,
1346 drm_crtc_accurate_vblank_count(&crtc->base),
1347 crcs);
1348 }
1349 #else
1350 static inline void
display_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe,u32 crc0,u32 crc1,u32 crc2,u32 crc3,u32 crc4)1351 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1352 enum pipe pipe,
1353 u32 crc0, u32 crc1,
1354 u32 crc2, u32 crc3,
1355 u32 crc4) {}
1356 #endif
1357
flip_done_handler(struct drm_i915_private * i915,enum pipe pipe)1358 static void flip_done_handler(struct drm_i915_private *i915,
1359 enum pipe pipe)
1360 {
1361 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
1362 struct drm_crtc_state *crtc_state = crtc->base.state;
1363 struct drm_pending_vblank_event *e = crtc_state->event;
1364 struct drm_device *dev = &i915->drm;
1365 unsigned long irqflags;
1366
1367 spin_lock_irqsave(&dev->event_lock, irqflags);
1368
1369 crtc_state->event = NULL;
1370
1371 drm_crtc_send_vblank_event(&crtc->base, e);
1372
1373 spin_unlock_irqrestore(&dev->event_lock, irqflags);
1374 }
1375
hsw_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1376 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1377 enum pipe pipe)
1378 {
1379 display_pipe_crc_irq_handler(dev_priv, pipe,
1380 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1381 0, 0, 0, 0);
1382 }
1383
ivb_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1384 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1385 enum pipe pipe)
1386 {
1387 display_pipe_crc_irq_handler(dev_priv, pipe,
1388 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)),
1389 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)),
1390 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)),
1391 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)),
1392 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe)));
1393 }
1394
i9xx_pipe_crc_irq_handler(struct drm_i915_private * dev_priv,enum pipe pipe)1395 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1396 enum pipe pipe)
1397 {
1398 u32 res1, res2;
1399
1400 if (DISPLAY_VER(dev_priv) >= 3)
1401 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe));
1402 else
1403 res1 = 0;
1404
1405 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv))
1406 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe));
1407 else
1408 res2 = 0;
1409
1410 display_pipe_crc_irq_handler(dev_priv, pipe,
1411 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)),
1412 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)),
1413 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)),
1414 res1, res2);
1415 }
1416
i9xx_pipestat_irq_reset(struct drm_i915_private * dev_priv)1417 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv)
1418 {
1419 enum pipe pipe;
1420
1421 for_each_pipe(dev_priv, pipe) {
1422 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe),
1423 PIPESTAT_INT_STATUS_MASK |
1424 PIPE_FIFO_UNDERRUN_STATUS);
1425
1426 dev_priv->pipestat_irq_mask[pipe] = 0;
1427 }
1428 }
1429
i9xx_pipestat_irq_ack(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1430 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1431 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1432 {
1433 enum pipe pipe;
1434
1435 spin_lock(&dev_priv->irq_lock);
1436
1437 if (!dev_priv->display_irqs_enabled) {
1438 spin_unlock(&dev_priv->irq_lock);
1439 return;
1440 }
1441
1442 for_each_pipe(dev_priv, pipe) {
1443 i915_reg_t reg;
1444 u32 status_mask, enable_mask, iir_bit = 0;
1445
1446 /*
1447 * PIPESTAT bits get signalled even when the interrupt is
1448 * disabled with the mask bits, and some of the status bits do
1449 * not generate interrupts at all (like the underrun bit). Hence
1450 * we need to be careful that we only handle what we want to
1451 * handle.
1452 */
1453
1454 /* fifo underruns are filterered in the underrun handler. */
1455 status_mask = PIPE_FIFO_UNDERRUN_STATUS;
1456
1457 switch (pipe) {
1458 default:
1459 case PIPE_A:
1460 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1461 break;
1462 case PIPE_B:
1463 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1464 break;
1465 case PIPE_C:
1466 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1467 break;
1468 }
1469 if (iir & iir_bit)
1470 status_mask |= dev_priv->pipestat_irq_mask[pipe];
1471
1472 if (!status_mask)
1473 continue;
1474
1475 reg = PIPESTAT(pipe);
1476 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask;
1477 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
1478
1479 /*
1480 * Clear the PIPE*STAT regs before the IIR
1481 *
1482 * Toggle the enable bits to make sure we get an
1483 * edge in the ISR pipe event bit if we don't clear
1484 * all the enabled status bits. Otherwise the edge
1485 * triggered IIR on i965/g4x wouldn't notice that
1486 * an interrupt is still pending.
1487 */
1488 if (pipe_stats[pipe]) {
1489 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]);
1490 intel_uncore_write(&dev_priv->uncore, reg, enable_mask);
1491 }
1492 }
1493 spin_unlock(&dev_priv->irq_lock);
1494 }
1495
i8xx_pipestat_irq_handler(struct drm_i915_private * dev_priv,u16 iir,u32 pipe_stats[I915_MAX_PIPES])1496 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1497 u16 iir, u32 pipe_stats[I915_MAX_PIPES])
1498 {
1499 enum pipe pipe;
1500
1501 for_each_pipe(dev_priv, pipe) {
1502 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1503 intel_handle_vblank(dev_priv, pipe);
1504
1505 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1506 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1507
1508 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1509 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1510 }
1511 }
1512
i915_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1513 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1514 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1515 {
1516 bool blc_event = false;
1517 enum pipe pipe;
1518
1519 for_each_pipe(dev_priv, pipe) {
1520 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1521 intel_handle_vblank(dev_priv, pipe);
1522
1523 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1524 blc_event = true;
1525
1526 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1527 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1528
1529 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1530 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1531 }
1532
1533 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1534 intel_opregion_asle_intr(dev_priv);
1535 }
1536
i965_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 iir,u32 pipe_stats[I915_MAX_PIPES])1537 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1538 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1539 {
1540 bool blc_event = false;
1541 enum pipe pipe;
1542
1543 for_each_pipe(dev_priv, pipe) {
1544 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1545 intel_handle_vblank(dev_priv, pipe);
1546
1547 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
1548 blc_event = true;
1549
1550 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1551 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1552
1553 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1554 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1555 }
1556
1557 if (blc_event || (iir & I915_ASLE_INTERRUPT))
1558 intel_opregion_asle_intr(dev_priv);
1559
1560 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1561 gmbus_irq_handler(dev_priv);
1562 }
1563
valleyview_pipestat_irq_handler(struct drm_i915_private * dev_priv,u32 pipe_stats[I915_MAX_PIPES])1564 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1565 u32 pipe_stats[I915_MAX_PIPES])
1566 {
1567 enum pipe pipe;
1568
1569 for_each_pipe(dev_priv, pipe) {
1570 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
1571 intel_handle_vblank(dev_priv, pipe);
1572
1573 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1574 flip_done_handler(dev_priv, pipe);
1575
1576 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1577 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1578
1579 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1580 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1581 }
1582
1583 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1584 gmbus_irq_handler(dev_priv);
1585 }
1586
i9xx_hpd_irq_ack(struct drm_i915_private * dev_priv)1587 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1588 {
1589 u32 hotplug_status = 0, hotplug_status_mask;
1590 int i;
1591
1592 if (IS_G4X(dev_priv) ||
1593 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1594 hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
1595 DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
1596 else
1597 hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
1598
1599 /*
1600 * We absolutely have to clear all the pending interrupt
1601 * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
1602 * interrupt bit won't have an edge, and the i965/g4x
1603 * edge triggered IIR will not notice that an interrupt
1604 * is still pending. We can't use PORT_HOTPLUG_EN to
1605 * guarantee the edge as the act of toggling the enable
1606 * bits can itself generate a new hotplug interrupt :(
1607 */
1608 for (i = 0; i < 10; i++) {
1609 u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask;
1610
1611 if (tmp == 0)
1612 return hotplug_status;
1613
1614 hotplug_status |= tmp;
1615 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status);
1616 }
1617
1618 drm_WARN_ONCE(&dev_priv->drm, 1,
1619 "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
1620 intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
1621
1622 return hotplug_status;
1623 }
1624
i9xx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_status)1625 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1626 u32 hotplug_status)
1627 {
1628 u32 pin_mask = 0, long_mask = 0;
1629 u32 hotplug_trigger;
1630
1631 if (IS_G4X(dev_priv) ||
1632 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1633 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1634 else
1635 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1636
1637 if (hotplug_trigger) {
1638 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1639 hotplug_trigger, hotplug_trigger,
1640 dev_priv->display.hotplug.hpd,
1641 i9xx_port_hotplug_long_detect);
1642
1643 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1644 }
1645
1646 if ((IS_G4X(dev_priv) ||
1647 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1648 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1649 dp_aux_irq_handler(dev_priv);
1650 }
1651
valleyview_irq_handler(int irq,void * arg)1652 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1653 {
1654 struct drm_i915_private *dev_priv = arg;
1655 irqreturn_t ret = IRQ_NONE;
1656
1657 if (!intel_irqs_enabled(dev_priv))
1658 return IRQ_NONE;
1659
1660 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1661 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1662
1663 do {
1664 u32 iir, gt_iir, pm_iir;
1665 u32 pipe_stats[I915_MAX_PIPES] = {};
1666 u32 hotplug_status = 0;
1667 u32 ier = 0;
1668
1669 gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR);
1670 pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
1671 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1672
1673 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1674 break;
1675
1676 ret = IRQ_HANDLED;
1677
1678 /*
1679 * Theory on interrupt generation, based on empirical evidence:
1680 *
1681 * x = ((VLV_IIR & VLV_IER) ||
1682 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1683 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1684 *
1685 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1686 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1687 * guarantee the CPU interrupt will be raised again even if we
1688 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1689 * bits this time around.
1690 */
1691 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
1692 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1693 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1694
1695 if (gt_iir)
1696 intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir);
1697 if (pm_iir)
1698 intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir);
1699
1700 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1701 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1702
1703 /* Call regardless, as some status bits might not be
1704 * signalled in iir */
1705 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1706
1707 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1708 I915_LPE_PIPE_B_INTERRUPT))
1709 intel_lpe_audio_irq_handler(dev_priv);
1710
1711 /*
1712 * VLV_IIR is single buffered, and reflects the level
1713 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1714 */
1715 if (iir)
1716 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1717
1718 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1719 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1720
1721 if (gt_iir)
1722 gen6_gt_irq_handler(to_gt(dev_priv), gt_iir);
1723 if (pm_iir)
1724 gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir);
1725
1726 if (hotplug_status)
1727 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1728
1729 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1730 } while (0);
1731
1732 pmu_irq_stats(dev_priv, ret);
1733
1734 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1735
1736 return ret;
1737 }
1738
cherryview_irq_handler(int irq,void * arg)1739 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1740 {
1741 struct drm_i915_private *dev_priv = arg;
1742 irqreturn_t ret = IRQ_NONE;
1743
1744 if (!intel_irqs_enabled(dev_priv))
1745 return IRQ_NONE;
1746
1747 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1748 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1749
1750 do {
1751 u32 master_ctl, iir;
1752 u32 pipe_stats[I915_MAX_PIPES] = {};
1753 u32 hotplug_status = 0;
1754 u32 ier = 0;
1755
1756 master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1757 iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR);
1758
1759 if (master_ctl == 0 && iir == 0)
1760 break;
1761
1762 ret = IRQ_HANDLED;
1763
1764 /*
1765 * Theory on interrupt generation, based on empirical evidence:
1766 *
1767 * x = ((VLV_IIR & VLV_IER) ||
1768 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1769 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1770 *
1771 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1772 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1773 * guarantee the CPU interrupt will be raised again even if we
1774 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1775 * bits this time around.
1776 */
1777 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
1778 ier = intel_uncore_read(&dev_priv->uncore, VLV_IER);
1779 intel_uncore_write(&dev_priv->uncore, VLV_IER, 0);
1780
1781 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
1782
1783 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1784 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1785
1786 /* Call regardless, as some status bits might not be
1787 * signalled in iir */
1788 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1789
1790 if (iir & (I915_LPE_PIPE_A_INTERRUPT |
1791 I915_LPE_PIPE_B_INTERRUPT |
1792 I915_LPE_PIPE_C_INTERRUPT))
1793 intel_lpe_audio_irq_handler(dev_priv);
1794
1795 /*
1796 * VLV_IIR is single buffered, and reflects the level
1797 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1798 */
1799 if (iir)
1800 intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir);
1801
1802 intel_uncore_write(&dev_priv->uncore, VLV_IER, ier);
1803 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1804
1805 if (hotplug_status)
1806 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1807
1808 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1809 } while (0);
1810
1811 pmu_irq_stats(dev_priv, ret);
1812
1813 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1814
1815 return ret;
1816 }
1817
ibx_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger)1818 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1819 u32 hotplug_trigger)
1820 {
1821 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1822
1823 /*
1824 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1825 * unless we touch the hotplug register, even if hotplug_trigger is
1826 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1827 * errors.
1828 */
1829 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
1830 if (!hotplug_trigger) {
1831 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1832 PORTD_HOTPLUG_STATUS_MASK |
1833 PORTC_HOTPLUG_STATUS_MASK |
1834 PORTB_HOTPLUG_STATUS_MASK;
1835 dig_hotplug_reg &= ~mask;
1836 }
1837
1838 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
1839 if (!hotplug_trigger)
1840 return;
1841
1842 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1843 hotplug_trigger, dig_hotplug_reg,
1844 dev_priv->display.hotplug.pch_hpd,
1845 pch_port_hotplug_long_detect);
1846
1847 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1848 }
1849
ibx_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1850 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1851 {
1852 enum pipe pipe;
1853 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1854
1855 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1856
1857 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1858 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1859 SDE_AUDIO_POWER_SHIFT);
1860 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
1861 port_name(port));
1862 }
1863
1864 if (pch_iir & SDE_AUX_MASK)
1865 dp_aux_irq_handler(dev_priv);
1866
1867 if (pch_iir & SDE_GMBUS)
1868 gmbus_irq_handler(dev_priv);
1869
1870 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1871 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
1872
1873 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1874 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
1875
1876 if (pch_iir & SDE_POISON)
1877 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1878
1879 if (pch_iir & SDE_FDI_MASK) {
1880 for_each_pipe(dev_priv, pipe)
1881 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1882 pipe_name(pipe),
1883 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1884 }
1885
1886 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1887 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
1888
1889 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1890 drm_dbg(&dev_priv->drm,
1891 "PCH transcoder CRC error interrupt\n");
1892
1893 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1894 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
1895
1896 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1897 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B);
1898 }
1899
ivb_err_int_handler(struct drm_i915_private * dev_priv)1900 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1901 {
1902 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT);
1903 enum pipe pipe;
1904
1905 if (err_int & ERR_INT_POISON)
1906 drm_err(&dev_priv->drm, "Poison interrupt\n");
1907
1908 for_each_pipe(dev_priv, pipe) {
1909 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1910 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1911
1912 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1913 if (IS_IVYBRIDGE(dev_priv))
1914 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1915 else
1916 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1917 }
1918 }
1919
1920 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int);
1921 }
1922
cpt_serr_int_handler(struct drm_i915_private * dev_priv)1923 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
1924 {
1925 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT);
1926 enum pipe pipe;
1927
1928 if (serr_int & SERR_INT_POISON)
1929 drm_err(&dev_priv->drm, "PCH poison interrupt\n");
1930
1931 for_each_pipe(dev_priv, pipe)
1932 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
1933 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe);
1934
1935 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int);
1936 }
1937
cpt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1938 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1939 {
1940 enum pipe pipe;
1941 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1942
1943 ibx_hpd_irq_handler(dev_priv, hotplug_trigger);
1944
1945 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1946 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1947 SDE_AUDIO_POWER_SHIFT_CPT);
1948 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
1949 port_name(port));
1950 }
1951
1952 if (pch_iir & SDE_AUX_MASK_CPT)
1953 dp_aux_irq_handler(dev_priv);
1954
1955 if (pch_iir & SDE_GMBUS_CPT)
1956 gmbus_irq_handler(dev_priv);
1957
1958 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1959 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
1960
1961 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1962 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
1963
1964 if (pch_iir & SDE_FDI_MASK_CPT) {
1965 for_each_pipe(dev_priv, pipe)
1966 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
1967 pipe_name(pipe),
1968 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe)));
1969 }
1970
1971 if (pch_iir & SDE_ERROR_CPT)
1972 cpt_serr_int_handler(dev_priv);
1973 }
1974
icp_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)1975 static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1976 {
1977 u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
1978 u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
1979 u32 pin_mask = 0, long_mask = 0;
1980
1981 if (ddi_hotplug_trigger) {
1982 u32 dig_hotplug_reg;
1983
1984 /* Locking due to DSI native GPIO sequences */
1985 spin_lock(&dev_priv->irq_lock);
1986 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
1987 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg);
1988 spin_unlock(&dev_priv->irq_lock);
1989
1990 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
1991 ddi_hotplug_trigger, dig_hotplug_reg,
1992 dev_priv->display.hotplug.pch_hpd,
1993 icp_ddi_port_hotplug_long_detect);
1994 }
1995
1996 if (tc_hotplug_trigger) {
1997 u32 dig_hotplug_reg;
1998
1999 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
2000 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg);
2001
2002 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2003 tc_hotplug_trigger, dig_hotplug_reg,
2004 dev_priv->display.hotplug.pch_hpd,
2005 icp_tc_port_hotplug_long_detect);
2006 }
2007
2008 if (pin_mask)
2009 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2010
2011 if (pch_iir & SDE_GMBUS_ICP)
2012 gmbus_irq_handler(dev_priv);
2013 }
2014
spt_irq_handler(struct drm_i915_private * dev_priv,u32 pch_iir)2015 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2016 {
2017 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2018 ~SDE_PORTE_HOTPLUG_SPT;
2019 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2020 u32 pin_mask = 0, long_mask = 0;
2021
2022 if (hotplug_trigger) {
2023 u32 dig_hotplug_reg;
2024
2025 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2026 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2027
2028 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2029 hotplug_trigger, dig_hotplug_reg,
2030 dev_priv->display.hotplug.pch_hpd,
2031 spt_port_hotplug_long_detect);
2032 }
2033
2034 if (hotplug2_trigger) {
2035 u32 dig_hotplug_reg;
2036
2037 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
2038 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2039
2040 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2041 hotplug2_trigger, dig_hotplug_reg,
2042 dev_priv->display.hotplug.pch_hpd,
2043 spt_port_hotplug2_long_detect);
2044 }
2045
2046 if (pin_mask)
2047 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2048
2049 if (pch_iir & SDE_GMBUS_CPT)
2050 gmbus_irq_handler(dev_priv);
2051 }
2052
ilk_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger)2053 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2054 u32 hotplug_trigger)
2055 {
2056 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2057
2058 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
2059 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2060
2061 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2062 hotplug_trigger, dig_hotplug_reg,
2063 dev_priv->display.hotplug.hpd,
2064 ilk_port_hotplug_long_detect);
2065
2066 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2067 }
2068
ilk_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2069 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2070 u32 de_iir)
2071 {
2072 enum pipe pipe;
2073 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2074
2075 if (hotplug_trigger)
2076 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2077
2078 if (de_iir & DE_AUX_CHANNEL_A)
2079 dp_aux_irq_handler(dev_priv);
2080
2081 if (de_iir & DE_GSE)
2082 intel_opregion_asle_intr(dev_priv);
2083
2084 if (de_iir & DE_POISON)
2085 drm_err(&dev_priv->drm, "Poison interrupt\n");
2086
2087 for_each_pipe(dev_priv, pipe) {
2088 if (de_iir & DE_PIPE_VBLANK(pipe))
2089 intel_handle_vblank(dev_priv, pipe);
2090
2091 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2092 flip_done_handler(dev_priv, pipe);
2093
2094 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2095 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2096
2097 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2098 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2099 }
2100
2101 /* check event from PCH */
2102 if (de_iir & DE_PCH_EVENT) {
2103 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2104
2105 if (HAS_PCH_CPT(dev_priv))
2106 cpt_irq_handler(dev_priv, pch_iir);
2107 else
2108 ibx_irq_handler(dev_priv, pch_iir);
2109
2110 /* should clear PCH hotplug event before clear CPU irq */
2111 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2112 }
2113
2114 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT)
2115 gen5_rps_irq_handler(&to_gt(dev_priv)->rps);
2116 }
2117
ivb_display_irq_handler(struct drm_i915_private * dev_priv,u32 de_iir)2118 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2119 u32 de_iir)
2120 {
2121 enum pipe pipe;
2122 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2123
2124 if (hotplug_trigger)
2125 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2126
2127 if (de_iir & DE_ERR_INT_IVB)
2128 ivb_err_int_handler(dev_priv);
2129
2130 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2131 dp_aux_irq_handler(dev_priv);
2132
2133 if (de_iir & DE_GSE_IVB)
2134 intel_opregion_asle_intr(dev_priv);
2135
2136 for_each_pipe(dev_priv, pipe) {
2137 if (de_iir & DE_PIPE_VBLANK_IVB(pipe))
2138 intel_handle_vblank(dev_priv, pipe);
2139
2140 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2141 flip_done_handler(dev_priv, pipe);
2142 }
2143
2144 /* check event from PCH */
2145 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2146 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2147
2148 cpt_irq_handler(dev_priv, pch_iir);
2149
2150 /* clear PCH hotplug event before clear CPU irq */
2151 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir);
2152 }
2153 }
2154
2155 /*
2156 * To handle irqs with the minimum potential races with fresh interrupts, we:
2157 * 1 - Disable Master Interrupt Control.
2158 * 2 - Find the source(s) of the interrupt.
2159 * 3 - Clear the Interrupt Identity bits (IIR).
2160 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2161 * 5 - Re-enable Master Interrupt Control.
2162 */
ilk_irq_handler(int irq,void * arg)2163 static irqreturn_t ilk_irq_handler(int irq, void *arg)
2164 {
2165 struct drm_i915_private *i915 = arg;
2166 void __iomem * const regs = i915->uncore.regs;
2167 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2168 irqreturn_t ret = IRQ_NONE;
2169
2170 if (unlikely(!intel_irqs_enabled(i915)))
2171 return IRQ_NONE;
2172
2173 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2174 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2175
2176 /* disable master interrupt before clearing iir */
2177 de_ier = raw_reg_read(regs, DEIER);
2178 raw_reg_write(regs, DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2179
2180 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2181 * interrupts will will be stored on its back queue, and then we'll be
2182 * able to process them after we restore SDEIER (as soon as we restore
2183 * it, we'll get an interrupt if SDEIIR still has something to process
2184 * due to its back queue). */
2185 if (!HAS_PCH_NOP(i915)) {
2186 sde_ier = raw_reg_read(regs, SDEIER);
2187 raw_reg_write(regs, SDEIER, 0);
2188 }
2189
2190 /* Find, clear, then process each source of interrupt */
2191
2192 gt_iir = raw_reg_read(regs, GTIIR);
2193 if (gt_iir) {
2194 raw_reg_write(regs, GTIIR, gt_iir);
2195 if (GRAPHICS_VER(i915) >= 6)
2196 gen6_gt_irq_handler(to_gt(i915), gt_iir);
2197 else
2198 gen5_gt_irq_handler(to_gt(i915), gt_iir);
2199 ret = IRQ_HANDLED;
2200 }
2201
2202 de_iir = raw_reg_read(regs, DEIIR);
2203 if (de_iir) {
2204 raw_reg_write(regs, DEIIR, de_iir);
2205 if (DISPLAY_VER(i915) >= 7)
2206 ivb_display_irq_handler(i915, de_iir);
2207 else
2208 ilk_display_irq_handler(i915, de_iir);
2209 ret = IRQ_HANDLED;
2210 }
2211
2212 if (GRAPHICS_VER(i915) >= 6) {
2213 u32 pm_iir = raw_reg_read(regs, GEN6_PMIIR);
2214 if (pm_iir) {
2215 raw_reg_write(regs, GEN6_PMIIR, pm_iir);
2216 gen6_rps_irq_handler(&to_gt(i915)->rps, pm_iir);
2217 ret = IRQ_HANDLED;
2218 }
2219 }
2220
2221 raw_reg_write(regs, DEIER, de_ier);
2222 if (sde_ier)
2223 raw_reg_write(regs, SDEIER, sde_ier);
2224
2225 pmu_irq_stats(i915, ret);
2226
2227 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2228 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2229
2230 return ret;
2231 }
2232
bxt_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 hotplug_trigger)2233 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2234 u32 hotplug_trigger)
2235 {
2236 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2237
2238 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
2239 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg);
2240
2241 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2242 hotplug_trigger, dig_hotplug_reg,
2243 dev_priv->display.hotplug.hpd,
2244 bxt_port_hotplug_long_detect);
2245
2246 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2247 }
2248
gen11_hpd_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2249 static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2250 {
2251 u32 pin_mask = 0, long_mask = 0;
2252 u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK;
2253 u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK;
2254
2255 if (trigger_tc) {
2256 u32 dig_hotplug_reg;
2257
2258 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
2259 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg);
2260
2261 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2262 trigger_tc, dig_hotplug_reg,
2263 dev_priv->display.hotplug.hpd,
2264 gen11_port_hotplug_long_detect);
2265 }
2266
2267 if (trigger_tbt) {
2268 u32 dig_hotplug_reg;
2269
2270 dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
2271 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg);
2272
2273 intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
2274 trigger_tbt, dig_hotplug_reg,
2275 dev_priv->display.hotplug.hpd,
2276 gen11_port_hotplug_long_detect);
2277 }
2278
2279 if (pin_mask)
2280 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2281 else
2282 drm_err(&dev_priv->drm,
2283 "Unexpected DE HPD interrupt 0x%08x\n", iir);
2284 }
2285
gen8_de_port_aux_mask(struct drm_i915_private * dev_priv)2286 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
2287 {
2288 u32 mask;
2289
2290 if (DISPLAY_VER(dev_priv) >= 13)
2291 return TGL_DE_PORT_AUX_DDIA |
2292 TGL_DE_PORT_AUX_DDIB |
2293 TGL_DE_PORT_AUX_DDIC |
2294 XELPD_DE_PORT_AUX_DDID |
2295 XELPD_DE_PORT_AUX_DDIE |
2296 TGL_DE_PORT_AUX_USBC1 |
2297 TGL_DE_PORT_AUX_USBC2 |
2298 TGL_DE_PORT_AUX_USBC3 |
2299 TGL_DE_PORT_AUX_USBC4;
2300 else if (DISPLAY_VER(dev_priv) >= 12)
2301 return TGL_DE_PORT_AUX_DDIA |
2302 TGL_DE_PORT_AUX_DDIB |
2303 TGL_DE_PORT_AUX_DDIC |
2304 TGL_DE_PORT_AUX_USBC1 |
2305 TGL_DE_PORT_AUX_USBC2 |
2306 TGL_DE_PORT_AUX_USBC3 |
2307 TGL_DE_PORT_AUX_USBC4 |
2308 TGL_DE_PORT_AUX_USBC5 |
2309 TGL_DE_PORT_AUX_USBC6;
2310
2311
2312 mask = GEN8_AUX_CHANNEL_A;
2313 if (DISPLAY_VER(dev_priv) >= 9)
2314 mask |= GEN9_AUX_CHANNEL_B |
2315 GEN9_AUX_CHANNEL_C |
2316 GEN9_AUX_CHANNEL_D;
2317
2318 if (DISPLAY_VER(dev_priv) == 11) {
2319 mask |= ICL_AUX_CHANNEL_F;
2320 mask |= ICL_AUX_CHANNEL_E;
2321 }
2322
2323 return mask;
2324 }
2325
gen8_de_pipe_fault_mask(struct drm_i915_private * dev_priv)2326 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv)
2327 {
2328 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
2329 return RKL_DE_PIPE_IRQ_FAULT_ERRORS;
2330 else if (DISPLAY_VER(dev_priv) >= 11)
2331 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS;
2332 else if (DISPLAY_VER(dev_priv) >= 9)
2333 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2334 else
2335 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2336 }
2337
2338 static void
gen8_de_misc_irq_handler(struct drm_i915_private * dev_priv,u32 iir)2339 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
2340 {
2341 bool found = false;
2342
2343 if (iir & GEN8_DE_MISC_GSE) {
2344 intel_opregion_asle_intr(dev_priv);
2345 found = true;
2346 }
2347
2348 if (iir & GEN8_DE_EDP_PSR) {
2349 struct intel_encoder *encoder;
2350 u32 psr_iir;
2351 i915_reg_t iir_reg;
2352
2353 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
2354 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2355
2356 if (DISPLAY_VER(dev_priv) >= 12)
2357 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder);
2358 else
2359 iir_reg = EDP_PSR_IIR;
2360
2361 psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg);
2362 intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir);
2363
2364 if (psr_iir)
2365 found = true;
2366
2367 intel_psr_irq_handler(intel_dp, psr_iir);
2368
2369 /* prior GEN12 only have one EDP PSR */
2370 if (DISPLAY_VER(dev_priv) < 12)
2371 break;
2372 }
2373 }
2374
2375 if (!found)
2376 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
2377 }
2378
gen11_dsi_te_interrupt_handler(struct drm_i915_private * dev_priv,u32 te_trigger)2379 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
2380 u32 te_trigger)
2381 {
2382 enum pipe pipe = INVALID_PIPE;
2383 enum transcoder dsi_trans;
2384 enum port port;
2385 u32 val, tmp;
2386
2387 /*
2388 * Incase of dual link, TE comes from DSI_1
2389 * this is to check if dual link is enabled
2390 */
2391 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
2392 val &= PORT_SYNC_MODE_ENABLE;
2393
2394 /*
2395 * if dual link is enabled, then read DSI_0
2396 * transcoder registers
2397 */
2398 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
2399 PORT_A : PORT_B;
2400 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
2401
2402 /* Check if DSI configured in command mode */
2403 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans));
2404 val = val & OP_MODE_MASK;
2405
2406 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
2407 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
2408 return;
2409 }
2410
2411 /* Get PIPE for handling VBLANK event */
2412 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans));
2413 switch (val & TRANS_DDI_EDP_INPUT_MASK) {
2414 case TRANS_DDI_EDP_INPUT_A_ON:
2415 pipe = PIPE_A;
2416 break;
2417 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2418 pipe = PIPE_B;
2419 break;
2420 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2421 pipe = PIPE_C;
2422 break;
2423 default:
2424 drm_err(&dev_priv->drm, "Invalid PIPE\n");
2425 return;
2426 }
2427
2428 intel_handle_vblank(dev_priv, pipe);
2429
2430 /* clear TE in dsi IIR */
2431 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
2432 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2433 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2434 }
2435
gen8_de_pipe_flip_done_mask(struct drm_i915_private * i915)2436 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915)
2437 {
2438 if (DISPLAY_VER(i915) >= 9)
2439 return GEN9_PIPE_PLANE1_FLIP_DONE;
2440 else
2441 return GEN8_PIPE_PRIMARY_FLIP_DONE;
2442 }
2443
gen8_de_pipe_underrun_mask(struct drm_i915_private * dev_priv)2444 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv)
2445 {
2446 u32 mask = GEN8_PIPE_FIFO_UNDERRUN;
2447
2448 if (DISPLAY_VER(dev_priv) >= 13)
2449 mask |= XELPD_PIPE_SOFT_UNDERRUN |
2450 XELPD_PIPE_HARD_UNDERRUN;
2451
2452 return mask;
2453 }
2454
2455 static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private * dev_priv,u32 master_ctl)2456 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2457 {
2458 irqreturn_t ret = IRQ_NONE;
2459 u32 iir;
2460 enum pipe pipe;
2461
2462 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv));
2463
2464 if (master_ctl & GEN8_DE_MISC_IRQ) {
2465 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR);
2466 if (iir) {
2467 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir);
2468 ret = IRQ_HANDLED;
2469 gen8_de_misc_irq_handler(dev_priv, iir);
2470 } else {
2471 drm_err(&dev_priv->drm,
2472 "The master control interrupt lied (DE MISC)!\n");
2473 }
2474 }
2475
2476 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) {
2477 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR);
2478 if (iir) {
2479 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir);
2480 ret = IRQ_HANDLED;
2481 gen11_hpd_irq_handler(dev_priv, iir);
2482 } else {
2483 drm_err(&dev_priv->drm,
2484 "The master control interrupt lied, (DE HPD)!\n");
2485 }
2486 }
2487
2488 if (master_ctl & GEN8_DE_PORT_IRQ) {
2489 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR);
2490 if (iir) {
2491 bool found = false;
2492
2493 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
2494 ret = IRQ_HANDLED;
2495
2496 if (iir & gen8_de_port_aux_mask(dev_priv)) {
2497 dp_aux_irq_handler(dev_priv);
2498 found = true;
2499 }
2500
2501 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
2502 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
2503
2504 if (hotplug_trigger) {
2505 bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
2506 found = true;
2507 }
2508 } else if (IS_BROADWELL(dev_priv)) {
2509 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
2510
2511 if (hotplug_trigger) {
2512 ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
2513 found = true;
2514 }
2515 }
2516
2517 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) &&
2518 (iir & BXT_DE_PORT_GMBUS)) {
2519 gmbus_irq_handler(dev_priv);
2520 found = true;
2521 }
2522
2523 if (DISPLAY_VER(dev_priv) >= 11) {
2524 u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
2525
2526 if (te_trigger) {
2527 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
2528 found = true;
2529 }
2530 }
2531
2532 if (!found)
2533 drm_err(&dev_priv->drm,
2534 "Unexpected DE Port interrupt\n");
2535 }
2536 else
2537 drm_err(&dev_priv->drm,
2538 "The master control interrupt lied (DE PORT)!\n");
2539 }
2540
2541 for_each_pipe(dev_priv, pipe) {
2542 u32 fault_errors;
2543
2544 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2545 continue;
2546
2547 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe));
2548 if (!iir) {
2549 drm_err(&dev_priv->drm,
2550 "The master control interrupt lied (DE PIPE)!\n");
2551 continue;
2552 }
2553
2554 ret = IRQ_HANDLED;
2555 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir);
2556
2557 if (iir & GEN8_PIPE_VBLANK)
2558 intel_handle_vblank(dev_priv, pipe);
2559
2560 if (iir & gen8_de_pipe_flip_done_mask(dev_priv))
2561 flip_done_handler(dev_priv, pipe);
2562
2563 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2564 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2565
2566 if (iir & gen8_de_pipe_underrun_mask(dev_priv))
2567 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2568
2569 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
2570 if (fault_errors)
2571 drm_err(&dev_priv->drm,
2572 "Fault errors on pipe %c: 0x%08x\n",
2573 pipe_name(pipe),
2574 fault_errors);
2575 }
2576
2577 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2578 master_ctl & GEN8_DE_PCH_IRQ) {
2579 /*
2580 * FIXME(BDW): Assume for now that the new interrupt handling
2581 * scheme also closed the SDE interrupt handling race we've seen
2582 * on older pch-split platforms. But this needs testing.
2583 */
2584 iir = intel_uncore_read(&dev_priv->uncore, SDEIIR);
2585 if (iir) {
2586 intel_uncore_write(&dev_priv->uncore, SDEIIR, iir);
2587 ret = IRQ_HANDLED;
2588
2589 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
2590 icp_irq_handler(dev_priv, iir);
2591 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
2592 spt_irq_handler(dev_priv, iir);
2593 else
2594 cpt_irq_handler(dev_priv, iir);
2595 } else {
2596 /*
2597 * Like on previous PCH there seems to be something
2598 * fishy going on with forwarding PCH interrupts.
2599 */
2600 drm_dbg(&dev_priv->drm,
2601 "The master control interrupt lied (SDE)!\n");
2602 }
2603 }
2604
2605 return ret;
2606 }
2607
gen8_master_intr_disable(void __iomem * const regs)2608 static inline u32 gen8_master_intr_disable(void __iomem * const regs)
2609 {
2610 raw_reg_write(regs, GEN8_MASTER_IRQ, 0);
2611
2612 /*
2613 * Now with master disabled, get a sample of level indications
2614 * for this interrupt. Indications will be cleared on related acks.
2615 * New indications can and will light up during processing,
2616 * and will generate new interrupt after enabling master.
2617 */
2618 return raw_reg_read(regs, GEN8_MASTER_IRQ);
2619 }
2620
gen8_master_intr_enable(void __iomem * const regs)2621 static inline void gen8_master_intr_enable(void __iomem * const regs)
2622 {
2623 raw_reg_write(regs, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2624 }
2625
gen8_irq_handler(int irq,void * arg)2626 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2627 {
2628 struct drm_i915_private *dev_priv = arg;
2629 void __iomem * const regs = dev_priv->uncore.regs;
2630 u32 master_ctl;
2631
2632 if (!intel_irqs_enabled(dev_priv))
2633 return IRQ_NONE;
2634
2635 master_ctl = gen8_master_intr_disable(regs);
2636 if (!master_ctl) {
2637 gen8_master_intr_enable(regs);
2638 return IRQ_NONE;
2639 }
2640
2641 /* Find, queue (onto bottom-halves), then clear each source */
2642 gen8_gt_irq_handler(to_gt(dev_priv), master_ctl);
2643
2644 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2645 if (master_ctl & ~GEN8_GT_IRQS) {
2646 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2647 gen8_de_irq_handler(dev_priv, master_ctl);
2648 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
2649 }
2650
2651 gen8_master_intr_enable(regs);
2652
2653 pmu_irq_stats(dev_priv, IRQ_HANDLED);
2654
2655 return IRQ_HANDLED;
2656 }
2657
2658 static u32
gen11_gu_misc_irq_ack(struct drm_i915_private * i915,const u32 master_ctl)2659 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl)
2660 {
2661 void __iomem * const regs = i915->uncore.regs;
2662 u32 iir;
2663
2664 if (!(master_ctl & GEN11_GU_MISC_IRQ))
2665 return 0;
2666
2667 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
2668 if (likely(iir))
2669 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
2670
2671 return iir;
2672 }
2673
2674 static void
gen11_gu_misc_irq_handler(struct drm_i915_private * i915,const u32 iir)2675 gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir)
2676 {
2677 if (iir & GEN11_GU_MISC_GSE)
2678 intel_opregion_asle_intr(i915);
2679 }
2680
gen11_master_intr_disable(void __iomem * const regs)2681 static inline u32 gen11_master_intr_disable(void __iomem * const regs)
2682 {
2683 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, 0);
2684
2685 /*
2686 * Now with master disabled, get a sample of level indications
2687 * for this interrupt. Indications will be cleared on related acks.
2688 * New indications can and will light up during processing,
2689 * and will generate new interrupt after enabling master.
2690 */
2691 return raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2692 }
2693
gen11_master_intr_enable(void __iomem * const regs)2694 static inline void gen11_master_intr_enable(void __iomem * const regs)
2695 {
2696 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ);
2697 }
2698
2699 static void
gen11_display_irq_handler(struct drm_i915_private * i915)2700 gen11_display_irq_handler(struct drm_i915_private *i915)
2701 {
2702 void __iomem * const regs = i915->uncore.regs;
2703 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL);
2704
2705 disable_rpm_wakeref_asserts(&i915->runtime_pm);
2706 /*
2707 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ
2708 * for the display related bits.
2709 */
2710 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0);
2711 gen8_de_irq_handler(i915, disp_ctl);
2712 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL,
2713 GEN11_DISPLAY_IRQ_ENABLE);
2714
2715 enable_rpm_wakeref_asserts(&i915->runtime_pm);
2716 }
2717
gen11_irq_handler(int irq,void * arg)2718 static irqreturn_t gen11_irq_handler(int irq, void *arg)
2719 {
2720 struct drm_i915_private *i915 = arg;
2721 void __iomem * const regs = i915->uncore.regs;
2722 struct intel_gt *gt = to_gt(i915);
2723 u32 master_ctl;
2724 u32 gu_misc_iir;
2725
2726 if (!intel_irqs_enabled(i915))
2727 return IRQ_NONE;
2728
2729 master_ctl = gen11_master_intr_disable(regs);
2730 if (!master_ctl) {
2731 gen11_master_intr_enable(regs);
2732 return IRQ_NONE;
2733 }
2734
2735 /* Find, queue (onto bottom-halves), then clear each source */
2736 gen11_gt_irq_handler(gt, master_ctl);
2737
2738 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2739 if (master_ctl & GEN11_DISPLAY_IRQ)
2740 gen11_display_irq_handler(i915);
2741
2742 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2743
2744 gen11_master_intr_enable(regs);
2745
2746 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2747
2748 pmu_irq_stats(i915, IRQ_HANDLED);
2749
2750 return IRQ_HANDLED;
2751 }
2752
dg1_master_intr_disable(void __iomem * const regs)2753 static inline u32 dg1_master_intr_disable(void __iomem * const regs)
2754 {
2755 u32 val;
2756
2757 /* First disable interrupts */
2758 raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
2759
2760 /* Get the indication levels and ack the master unit */
2761 val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
2762 if (unlikely(!val))
2763 return 0;
2764
2765 raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
2766
2767 return val;
2768 }
2769
dg1_master_intr_enable(void __iomem * const regs)2770 static inline void dg1_master_intr_enable(void __iomem * const regs)
2771 {
2772 raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
2773 }
2774
dg1_irq_handler(int irq,void * arg)2775 static irqreturn_t dg1_irq_handler(int irq, void *arg)
2776 {
2777 struct drm_i915_private * const i915 = arg;
2778 struct intel_gt *gt = to_gt(i915);
2779 void __iomem * const regs = gt->uncore->regs;
2780 u32 master_tile_ctl, master_ctl;
2781 u32 gu_misc_iir;
2782
2783 if (!intel_irqs_enabled(i915))
2784 return IRQ_NONE;
2785
2786 master_tile_ctl = dg1_master_intr_disable(regs);
2787 if (!master_tile_ctl) {
2788 dg1_master_intr_enable(regs);
2789 return IRQ_NONE;
2790 }
2791
2792 /* FIXME: we only support tile 0 for now. */
2793 if (master_tile_ctl & DG1_MSTR_TILE(0)) {
2794 master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
2795 raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
2796 } else {
2797 DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
2798 dg1_master_intr_enable(regs);
2799 return IRQ_NONE;
2800 }
2801
2802 gen11_gt_irq_handler(gt, master_ctl);
2803
2804 if (master_ctl & GEN11_DISPLAY_IRQ)
2805 gen11_display_irq_handler(i915);
2806
2807 gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
2808
2809 dg1_master_intr_enable(regs);
2810
2811 gen11_gu_misc_irq_handler(i915, gu_misc_iir);
2812
2813 pmu_irq_stats(i915, IRQ_HANDLED);
2814
2815 return IRQ_HANDLED;
2816 }
2817
2818 /* Called from drm generic code, passed 'crtc' which
2819 * we use as a pipe index
2820 */
i8xx_enable_vblank(struct drm_crtc * crtc)2821 int i8xx_enable_vblank(struct drm_crtc *crtc)
2822 {
2823 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2824 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2825 unsigned long irqflags;
2826
2827 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2828 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2829 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2830
2831 return 0;
2832 }
2833
i915gm_enable_vblank(struct drm_crtc * crtc)2834 int i915gm_enable_vblank(struct drm_crtc *crtc)
2835 {
2836 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2837
2838 /*
2839 * Vblank interrupts fail to wake the device up from C2+.
2840 * Disabling render clock gating during C-states avoids
2841 * the problem. There is a small power cost so we do this
2842 * only when vblank interrupts are actually enabled.
2843 */
2844 if (dev_priv->vblank_enabled++ == 0)
2845 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2846
2847 return i8xx_enable_vblank(crtc);
2848 }
2849
i965_enable_vblank(struct drm_crtc * crtc)2850 int i965_enable_vblank(struct drm_crtc *crtc)
2851 {
2852 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2853 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2854 unsigned long irqflags;
2855
2856 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2857 i915_enable_pipestat(dev_priv, pipe,
2858 PIPE_START_VBLANK_INTERRUPT_STATUS);
2859 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2860
2861 return 0;
2862 }
2863
ilk_enable_vblank(struct drm_crtc * crtc)2864 int ilk_enable_vblank(struct drm_crtc *crtc)
2865 {
2866 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2867 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2868 unsigned long irqflags;
2869 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2870 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2871
2872 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2873 ilk_enable_display_irq(dev_priv, bit);
2874 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2875
2876 /* Even though there is no DMC, frame counter can get stuck when
2877 * PSR is active as no frames are generated.
2878 */
2879 if (HAS_PSR(dev_priv))
2880 drm_crtc_vblank_restore(crtc);
2881
2882 return 0;
2883 }
2884
gen11_dsi_configure_te(struct intel_crtc * intel_crtc,bool enable)2885 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
2886 bool enable)
2887 {
2888 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2889 enum port port;
2890 u32 tmp;
2891
2892 if (!(intel_crtc->mode_flags &
2893 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
2894 return false;
2895
2896 /* for dual link cases we consider TE from slave */
2897 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
2898 port = PORT_B;
2899 else
2900 port = PORT_A;
2901
2902 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port));
2903 if (enable)
2904 tmp &= ~DSI_TE_EVENT;
2905 else
2906 tmp |= DSI_TE_EVENT;
2907
2908 intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp);
2909
2910 tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port));
2911 intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp);
2912
2913 return true;
2914 }
2915
bdw_enable_vblank(struct drm_crtc * _crtc)2916 int bdw_enable_vblank(struct drm_crtc *_crtc)
2917 {
2918 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2919 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2920 enum pipe pipe = crtc->pipe;
2921 unsigned long irqflags;
2922
2923 if (gen11_dsi_configure_te(crtc, true))
2924 return 0;
2925
2926 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2927 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2928 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2929
2930 /* Even if there is no DMC, frame counter can get stuck when
2931 * PSR is active as no frames are generated, so check only for PSR.
2932 */
2933 if (HAS_PSR(dev_priv))
2934 drm_crtc_vblank_restore(&crtc->base);
2935
2936 return 0;
2937 }
2938
2939 /* Called from drm generic code, passed 'crtc' which
2940 * we use as a pipe index
2941 */
i8xx_disable_vblank(struct drm_crtc * crtc)2942 void i8xx_disable_vblank(struct drm_crtc *crtc)
2943 {
2944 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2945 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2946 unsigned long irqflags;
2947
2948 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2949 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS);
2950 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2951 }
2952
i915gm_disable_vblank(struct drm_crtc * crtc)2953 void i915gm_disable_vblank(struct drm_crtc *crtc)
2954 {
2955 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2956
2957 i8xx_disable_vblank(crtc);
2958
2959 if (--dev_priv->vblank_enabled == 0)
2960 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
2961 }
2962
i965_disable_vblank(struct drm_crtc * crtc)2963 void i965_disable_vblank(struct drm_crtc *crtc)
2964 {
2965 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2966 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2967 unsigned long irqflags;
2968
2969 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2970 i915_disable_pipestat(dev_priv, pipe,
2971 PIPE_START_VBLANK_INTERRUPT_STATUS);
2972 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2973 }
2974
ilk_disable_vblank(struct drm_crtc * crtc)2975 void ilk_disable_vblank(struct drm_crtc *crtc)
2976 {
2977 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2978 enum pipe pipe = to_intel_crtc(crtc)->pipe;
2979 unsigned long irqflags;
2980 u32 bit = DISPLAY_VER(dev_priv) >= 7 ?
2981 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe);
2982
2983 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2984 ilk_disable_display_irq(dev_priv, bit);
2985 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2986 }
2987
bdw_disable_vblank(struct drm_crtc * _crtc)2988 void bdw_disable_vblank(struct drm_crtc *_crtc)
2989 {
2990 struct intel_crtc *crtc = to_intel_crtc(_crtc);
2991 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2992 enum pipe pipe = crtc->pipe;
2993 unsigned long irqflags;
2994
2995 if (gen11_dsi_configure_te(crtc, false))
2996 return;
2997
2998 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2999 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
3000 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3001 }
3002
ibx_irq_reset(struct drm_i915_private * dev_priv)3003 static void ibx_irq_reset(struct drm_i915_private *dev_priv)
3004 {
3005 struct intel_uncore *uncore = &dev_priv->uncore;
3006
3007 if (HAS_PCH_NOP(dev_priv))
3008 return;
3009
3010 GEN3_IRQ_RESET(uncore, SDE);
3011
3012 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3013 intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff);
3014 }
3015
vlv_display_irq_reset(struct drm_i915_private * dev_priv)3016 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3017 {
3018 struct intel_uncore *uncore = &dev_priv->uncore;
3019
3020 if (IS_CHERRYVIEW(dev_priv))
3021 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3022 else
3023 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV);
3024
3025 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3026 intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
3027
3028 i9xx_pipestat_irq_reset(dev_priv);
3029
3030 GEN3_IRQ_RESET(uncore, VLV_);
3031 dev_priv->irq_mask = ~0u;
3032 }
3033
vlv_display_irq_postinstall(struct drm_i915_private * dev_priv)3034 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3035 {
3036 struct intel_uncore *uncore = &dev_priv->uncore;
3037
3038 u32 pipestat_mask;
3039 u32 enable_mask;
3040 enum pipe pipe;
3041
3042 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS;
3043
3044 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3045 for_each_pipe(dev_priv, pipe)
3046 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3047
3048 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3049 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3050 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3051 I915_LPE_PIPE_A_INTERRUPT |
3052 I915_LPE_PIPE_B_INTERRUPT;
3053
3054 if (IS_CHERRYVIEW(dev_priv))
3055 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
3056 I915_LPE_PIPE_C_INTERRUPT;
3057
3058 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
3059
3060 dev_priv->irq_mask = ~enable_mask;
3061
3062 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
3063 }
3064
3065 /* drm_dma.h hooks
3066 */
ilk_irq_reset(struct drm_i915_private * dev_priv)3067 static void ilk_irq_reset(struct drm_i915_private *dev_priv)
3068 {
3069 struct intel_uncore *uncore = &dev_priv->uncore;
3070
3071 GEN3_IRQ_RESET(uncore, DE);
3072 dev_priv->irq_mask = ~0u;
3073
3074 if (GRAPHICS_VER(dev_priv) == 7)
3075 intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
3076
3077 if (IS_HASWELL(dev_priv)) {
3078 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3079 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3080 }
3081
3082 gen5_gt_irq_reset(to_gt(dev_priv));
3083
3084 ibx_irq_reset(dev_priv);
3085 }
3086
valleyview_irq_reset(struct drm_i915_private * dev_priv)3087 static void valleyview_irq_reset(struct drm_i915_private *dev_priv)
3088 {
3089 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0);
3090 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3091
3092 gen5_gt_irq_reset(to_gt(dev_priv));
3093
3094 spin_lock_irq(&dev_priv->irq_lock);
3095 if (dev_priv->display_irqs_enabled)
3096 vlv_display_irq_reset(dev_priv);
3097 spin_unlock_irq(&dev_priv->irq_lock);
3098 }
3099
gen8_display_irq_reset(struct drm_i915_private * dev_priv)3100 static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
3101 {
3102 struct intel_uncore *uncore = &dev_priv->uncore;
3103 enum pipe pipe;
3104
3105 if (!HAS_DISPLAY(dev_priv))
3106 return;
3107
3108 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3109 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3110
3111 for_each_pipe(dev_priv, pipe)
3112 if (intel_display_power_is_enabled(dev_priv,
3113 POWER_DOMAIN_PIPE(pipe)))
3114 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3115
3116 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3117 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3118 }
3119
gen8_irq_reset(struct drm_i915_private * dev_priv)3120 static void gen8_irq_reset(struct drm_i915_private *dev_priv)
3121 {
3122 struct intel_uncore *uncore = &dev_priv->uncore;
3123
3124 gen8_master_intr_disable(dev_priv->uncore.regs);
3125
3126 gen8_gt_irq_reset(to_gt(dev_priv));
3127 gen8_display_irq_reset(dev_priv);
3128 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3129
3130 if (HAS_PCH_SPLIT(dev_priv))
3131 ibx_irq_reset(dev_priv);
3132
3133 }
3134
gen11_display_irq_reset(struct drm_i915_private * dev_priv)3135 static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
3136 {
3137 struct intel_uncore *uncore = &dev_priv->uncore;
3138 enum pipe pipe;
3139 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3140 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3141
3142 if (!HAS_DISPLAY(dev_priv))
3143 return;
3144
3145 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0);
3146
3147 if (DISPLAY_VER(dev_priv) >= 12) {
3148 enum transcoder trans;
3149
3150 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3151 enum intel_display_power_domain domain;
3152
3153 domain = POWER_DOMAIN_TRANSCODER(trans);
3154 if (!intel_display_power_is_enabled(dev_priv, domain))
3155 continue;
3156
3157 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff);
3158 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff);
3159 }
3160 } else {
3161 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff);
3162 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff);
3163 }
3164
3165 for_each_pipe(dev_priv, pipe)
3166 if (intel_display_power_is_enabled(dev_priv,
3167 POWER_DOMAIN_PIPE(pipe)))
3168 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3169
3170 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
3171 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
3172 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
3173
3174 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3175 GEN3_IRQ_RESET(uncore, SDE);
3176 }
3177
gen11_irq_reset(struct drm_i915_private * dev_priv)3178 static void gen11_irq_reset(struct drm_i915_private *dev_priv)
3179 {
3180 struct intel_gt *gt = to_gt(dev_priv);
3181 struct intel_uncore *uncore = gt->uncore;
3182
3183 gen11_master_intr_disable(dev_priv->uncore.regs);
3184
3185 gen11_gt_irq_reset(gt);
3186 gen11_display_irq_reset(dev_priv);
3187
3188 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3189 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3190 }
3191
dg1_irq_reset(struct drm_i915_private * dev_priv)3192 static void dg1_irq_reset(struct drm_i915_private *dev_priv)
3193 {
3194 struct intel_gt *gt = to_gt(dev_priv);
3195 struct intel_uncore *uncore = gt->uncore;
3196
3197 dg1_master_intr_disable(dev_priv->uncore.regs);
3198
3199 gen11_gt_irq_reset(gt);
3200 gen11_display_irq_reset(dev_priv);
3201
3202 GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
3203 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3204 }
3205
gen8_irq_power_well_post_enable(struct drm_i915_private * dev_priv,u8 pipe_mask)3206 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3207 u8 pipe_mask)
3208 {
3209 struct intel_uncore *uncore = &dev_priv->uncore;
3210 u32 extra_ier = GEN8_PIPE_VBLANK |
3211 gen8_de_pipe_underrun_mask(dev_priv) |
3212 gen8_de_pipe_flip_done_mask(dev_priv);
3213 enum pipe pipe;
3214
3215 spin_lock_irq(&dev_priv->irq_lock);
3216
3217 if (!intel_irqs_enabled(dev_priv)) {
3218 spin_unlock_irq(&dev_priv->irq_lock);
3219 return;
3220 }
3221
3222 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3223 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3224 dev_priv->de_irq_mask[pipe],
3225 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3226
3227 spin_unlock_irq(&dev_priv->irq_lock);
3228 }
3229
gen8_irq_power_well_pre_disable(struct drm_i915_private * dev_priv,u8 pipe_mask)3230 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3231 u8 pipe_mask)
3232 {
3233 struct intel_uncore *uncore = &dev_priv->uncore;
3234 enum pipe pipe;
3235
3236 spin_lock_irq(&dev_priv->irq_lock);
3237
3238 if (!intel_irqs_enabled(dev_priv)) {
3239 spin_unlock_irq(&dev_priv->irq_lock);
3240 return;
3241 }
3242
3243 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3244 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
3245
3246 spin_unlock_irq(&dev_priv->irq_lock);
3247
3248 /* make sure we're done processing display irqs */
3249 intel_synchronize_irq(dev_priv);
3250 }
3251
cherryview_irq_reset(struct drm_i915_private * dev_priv)3252 static void cherryview_irq_reset(struct drm_i915_private *dev_priv)
3253 {
3254 struct intel_uncore *uncore = &dev_priv->uncore;
3255
3256 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0);
3257 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3258
3259 gen8_gt_irq_reset(to_gt(dev_priv));
3260
3261 GEN3_IRQ_RESET(uncore, GEN8_PCU_);
3262
3263 spin_lock_irq(&dev_priv->irq_lock);
3264 if (dev_priv->display_irqs_enabled)
3265 vlv_display_irq_reset(dev_priv);
3266 spin_unlock_irq(&dev_priv->irq_lock);
3267 }
3268
ibx_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3269 static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
3270 enum hpd_pin pin)
3271 {
3272 switch (pin) {
3273 case HPD_PORT_A:
3274 /*
3275 * When CPU and PCH are on the same package, port A
3276 * HPD must be enabled in both north and south.
3277 */
3278 return HAS_PCH_LPT_LP(i915) ?
3279 PORTA_HOTPLUG_ENABLE : 0;
3280 case HPD_PORT_B:
3281 return PORTB_HOTPLUG_ENABLE |
3282 PORTB_PULSE_DURATION_2ms;
3283 case HPD_PORT_C:
3284 return PORTC_HOTPLUG_ENABLE |
3285 PORTC_PULSE_DURATION_2ms;
3286 case HPD_PORT_D:
3287 return PORTD_HOTPLUG_ENABLE |
3288 PORTD_PULSE_DURATION_2ms;
3289 default:
3290 return 0;
3291 }
3292 }
3293
ibx_hpd_detection_setup(struct drm_i915_private * dev_priv)3294 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
3295 {
3296 u32 hotplug;
3297
3298 /*
3299 * Enable digital hotplug on the PCH, and configure the DP short pulse
3300 * duration to 2ms (which is the minimum in the Display Port spec).
3301 * The pulse duration bits are reserved on LPT+.
3302 */
3303 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3304 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3305 PORTB_HOTPLUG_ENABLE |
3306 PORTC_HOTPLUG_ENABLE |
3307 PORTD_HOTPLUG_ENABLE |
3308 PORTB_PULSE_DURATION_MASK |
3309 PORTC_PULSE_DURATION_MASK |
3310 PORTD_PULSE_DURATION_MASK);
3311 hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
3312 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3313 }
3314
ibx_hpd_irq_setup(struct drm_i915_private * dev_priv)3315 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3316 {
3317 u32 hotplug_irqs, enabled_irqs;
3318
3319 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3320 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3321
3322 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3323
3324 ibx_hpd_detection_setup(dev_priv);
3325 }
3326
icp_ddi_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3327 static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
3328 enum hpd_pin pin)
3329 {
3330 switch (pin) {
3331 case HPD_PORT_A:
3332 case HPD_PORT_B:
3333 case HPD_PORT_C:
3334 case HPD_PORT_D:
3335 return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
3336 default:
3337 return 0;
3338 }
3339 }
3340
icp_tc_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3341 static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
3342 enum hpd_pin pin)
3343 {
3344 switch (pin) {
3345 case HPD_PORT_TC1:
3346 case HPD_PORT_TC2:
3347 case HPD_PORT_TC3:
3348 case HPD_PORT_TC4:
3349 case HPD_PORT_TC5:
3350 case HPD_PORT_TC6:
3351 return ICP_TC_HPD_ENABLE(pin);
3352 default:
3353 return 0;
3354 }
3355 }
3356
icp_ddi_hpd_detection_setup(struct drm_i915_private * dev_priv)3357 static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
3358 {
3359 u32 hotplug;
3360
3361 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI);
3362 hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
3363 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
3364 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
3365 SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
3366 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
3367 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug);
3368 }
3369
icp_tc_hpd_detection_setup(struct drm_i915_private * dev_priv)3370 static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3371 {
3372 u32 hotplug;
3373
3374 hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC);
3375 hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
3376 ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
3377 ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
3378 ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
3379 ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
3380 ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
3381 hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
3382 intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug);
3383 }
3384
icp_hpd_irq_setup(struct drm_i915_private * dev_priv)3385 static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
3386 {
3387 u32 hotplug_irqs, enabled_irqs;
3388
3389 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3390 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3391
3392 if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP)
3393 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3394
3395 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3396
3397 icp_ddi_hpd_detection_setup(dev_priv);
3398 icp_tc_hpd_detection_setup(dev_priv);
3399 }
3400
gen11_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3401 static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
3402 enum hpd_pin pin)
3403 {
3404 switch (pin) {
3405 case HPD_PORT_TC1:
3406 case HPD_PORT_TC2:
3407 case HPD_PORT_TC3:
3408 case HPD_PORT_TC4:
3409 case HPD_PORT_TC5:
3410 case HPD_PORT_TC6:
3411 return GEN11_HOTPLUG_CTL_ENABLE(pin);
3412 default:
3413 return 0;
3414 }
3415 }
3416
dg1_hpd_irq_setup(struct drm_i915_private * dev_priv)3417 static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
3418 {
3419 u32 val;
3420
3421 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3422 val |= (INVERT_DDIA_HPD |
3423 INVERT_DDIB_HPD |
3424 INVERT_DDIC_HPD |
3425 INVERT_DDID_HPD);
3426 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3427
3428 icp_hpd_irq_setup(dev_priv);
3429 }
3430
gen11_tc_hpd_detection_setup(struct drm_i915_private * dev_priv)3431 static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
3432 {
3433 u32 hotplug;
3434
3435 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL);
3436 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3437 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3438 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3439 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3440 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3441 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3442 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3443 intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug);
3444 }
3445
gen11_tbt_hpd_detection_setup(struct drm_i915_private * dev_priv)3446 static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3447 {
3448 u32 hotplug;
3449
3450 hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL);
3451 hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
3452 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
3453 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
3454 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
3455 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
3456 GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
3457 hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
3458 intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug);
3459 }
3460
gen11_hpd_irq_setup(struct drm_i915_private * dev_priv)3461 static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
3462 {
3463 u32 hotplug_irqs, enabled_irqs;
3464 u32 val;
3465
3466 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3467 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3468
3469 val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3470 val &= ~hotplug_irqs;
3471 val |= ~enabled_irqs & hotplug_irqs;
3472 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val);
3473 intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR);
3474
3475 gen11_tc_hpd_detection_setup(dev_priv);
3476 gen11_tbt_hpd_detection_setup(dev_priv);
3477
3478 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3479 icp_hpd_irq_setup(dev_priv);
3480 }
3481
spt_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3482 static u32 spt_hotplug_enables(struct drm_i915_private *i915,
3483 enum hpd_pin pin)
3484 {
3485 switch (pin) {
3486 case HPD_PORT_A:
3487 return PORTA_HOTPLUG_ENABLE;
3488 case HPD_PORT_B:
3489 return PORTB_HOTPLUG_ENABLE;
3490 case HPD_PORT_C:
3491 return PORTC_HOTPLUG_ENABLE;
3492 case HPD_PORT_D:
3493 return PORTD_HOTPLUG_ENABLE;
3494 default:
3495 return 0;
3496 }
3497 }
3498
spt_hotplug2_enables(struct drm_i915_private * i915,enum hpd_pin pin)3499 static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
3500 enum hpd_pin pin)
3501 {
3502 switch (pin) {
3503 case HPD_PORT_E:
3504 return PORTE_HOTPLUG_ENABLE;
3505 default:
3506 return 0;
3507 }
3508 }
3509
spt_hpd_detection_setup(struct drm_i915_private * dev_priv)3510 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3511 {
3512 u32 val, hotplug;
3513
3514 /* Display WA #1179 WaHardHangonHotPlug: cnp */
3515 if (HAS_PCH_CNP(dev_priv)) {
3516 val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1);
3517 val &= ~CHASSIS_CLK_REQ_DURATION_MASK;
3518 val |= CHASSIS_CLK_REQ_DURATION(0xf);
3519 intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val);
3520 }
3521
3522 /* Enable digital hotplug on the PCH */
3523 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3524 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3525 PORTB_HOTPLUG_ENABLE |
3526 PORTC_HOTPLUG_ENABLE |
3527 PORTD_HOTPLUG_ENABLE);
3528 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
3529 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3530
3531 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2);
3532 hotplug &= ~PORTE_HOTPLUG_ENABLE;
3533 hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
3534 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug);
3535 }
3536
spt_hpd_irq_setup(struct drm_i915_private * dev_priv)3537 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3538 {
3539 u32 hotplug_irqs, enabled_irqs;
3540
3541 if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
3542 intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ);
3543
3544 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3545 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd);
3546
3547 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3548
3549 spt_hpd_detection_setup(dev_priv);
3550 }
3551
ilk_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3552 static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
3553 enum hpd_pin pin)
3554 {
3555 switch (pin) {
3556 case HPD_PORT_A:
3557 return DIGITAL_PORTA_HOTPLUG_ENABLE |
3558 DIGITAL_PORTA_PULSE_DURATION_2ms;
3559 default:
3560 return 0;
3561 }
3562 }
3563
ilk_hpd_detection_setup(struct drm_i915_private * dev_priv)3564 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
3565 {
3566 u32 hotplug;
3567
3568 /*
3569 * Enable digital hotplug on the CPU, and configure the DP short pulse
3570 * duration to 2ms (which is the minimum in the Display Port spec)
3571 * The pulse duration bits are reserved on HSW+.
3572 */
3573 hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL);
3574 hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
3575 DIGITAL_PORTA_PULSE_DURATION_MASK);
3576 hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
3577 intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3578 }
3579
ilk_hpd_irq_setup(struct drm_i915_private * dev_priv)3580 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3581 {
3582 u32 hotplug_irqs, enabled_irqs;
3583
3584 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3585 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3586
3587 if (DISPLAY_VER(dev_priv) >= 8)
3588 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3589 else
3590 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3591
3592 ilk_hpd_detection_setup(dev_priv);
3593
3594 ibx_hpd_irq_setup(dev_priv);
3595 }
3596
bxt_hotplug_enables(struct drm_i915_private * i915,enum hpd_pin pin)3597 static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
3598 enum hpd_pin pin)
3599 {
3600 u32 hotplug;
3601
3602 switch (pin) {
3603 case HPD_PORT_A:
3604 hotplug = PORTA_HOTPLUG_ENABLE;
3605 if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
3606 hotplug |= BXT_DDIA_HPD_INVERT;
3607 return hotplug;
3608 case HPD_PORT_B:
3609 hotplug = PORTB_HOTPLUG_ENABLE;
3610 if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
3611 hotplug |= BXT_DDIB_HPD_INVERT;
3612 return hotplug;
3613 case HPD_PORT_C:
3614 hotplug = PORTC_HOTPLUG_ENABLE;
3615 if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
3616 hotplug |= BXT_DDIC_HPD_INVERT;
3617 return hotplug;
3618 default:
3619 return 0;
3620 }
3621 }
3622
bxt_hpd_detection_setup(struct drm_i915_private * dev_priv)3623 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
3624 {
3625 u32 hotplug;
3626
3627 hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG);
3628 hotplug &= ~(PORTA_HOTPLUG_ENABLE |
3629 PORTB_HOTPLUG_ENABLE |
3630 PORTC_HOTPLUG_ENABLE |
3631 BXT_DDIA_HPD_INVERT |
3632 BXT_DDIB_HPD_INVERT |
3633 BXT_DDIC_HPD_INVERT);
3634 hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
3635 intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug);
3636 }
3637
bxt_hpd_irq_setup(struct drm_i915_private * dev_priv)3638 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3639 {
3640 u32 hotplug_irqs, enabled_irqs;
3641
3642 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3643 hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd);
3644
3645 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3646
3647 bxt_hpd_detection_setup(dev_priv);
3648 }
3649
3650 /*
3651 * SDEIER is also touched by the interrupt handler to work around missed PCH
3652 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3653 * instead we unconditionally enable all PCH interrupt sources here, but then
3654 * only unmask them as needed with SDEIMR.
3655 *
3656 * Note that we currently do this after installing the interrupt handler,
3657 * but before we enable the master interrupt. That should be sufficient
3658 * to avoid races with the irq handler, assuming we have MSI. Shared legacy
3659 * interrupts could still race.
3660 */
ibx_irq_postinstall(struct drm_i915_private * dev_priv)3661 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
3662 {
3663 struct intel_uncore *uncore = &dev_priv->uncore;
3664 u32 mask;
3665
3666 if (HAS_PCH_NOP(dev_priv))
3667 return;
3668
3669 if (HAS_PCH_IBX(dev_priv))
3670 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3671 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
3672 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3673 else
3674 mask = SDE_GMBUS_CPT;
3675
3676 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3677 }
3678
ilk_irq_postinstall(struct drm_i915_private * dev_priv)3679 static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
3680 {
3681 struct intel_uncore *uncore = &dev_priv->uncore;
3682 u32 display_mask, extra_mask;
3683
3684 if (GRAPHICS_VER(dev_priv) >= 7) {
3685 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3686 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB);
3687 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3688 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3689 DE_PLANE_FLIP_DONE_IVB(PLANE_C) |
3690 DE_PLANE_FLIP_DONE_IVB(PLANE_B) |
3691 DE_PLANE_FLIP_DONE_IVB(PLANE_A) |
3692 DE_DP_A_HOTPLUG_IVB);
3693 } else {
3694 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3695 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
3696 DE_PIPEA_CRC_DONE | DE_POISON);
3697 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
3698 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3699 DE_PLANE_FLIP_DONE(PLANE_A) |
3700 DE_PLANE_FLIP_DONE(PLANE_B) |
3701 DE_DP_A_HOTPLUG);
3702 }
3703
3704 if (IS_HASWELL(dev_priv)) {
3705 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3706 display_mask |= DE_EDP_PSR_INT_HSW;
3707 }
3708
3709 if (IS_IRONLAKE_M(dev_priv))
3710 extra_mask |= DE_PCU_EVENT;
3711
3712 dev_priv->irq_mask = ~display_mask;
3713
3714 ibx_irq_postinstall(dev_priv);
3715
3716 gen5_gt_irq_postinstall(to_gt(dev_priv));
3717
3718 GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
3719 display_mask | extra_mask);
3720 }
3721
valleyview_enable_display_irqs(struct drm_i915_private * dev_priv)3722 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3723 {
3724 lockdep_assert_held(&dev_priv->irq_lock);
3725
3726 if (dev_priv->display_irqs_enabled)
3727 return;
3728
3729 dev_priv->display_irqs_enabled = true;
3730
3731 if (intel_irqs_enabled(dev_priv)) {
3732 vlv_display_irq_reset(dev_priv);
3733 vlv_display_irq_postinstall(dev_priv);
3734 }
3735 }
3736
valleyview_disable_display_irqs(struct drm_i915_private * dev_priv)3737 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3738 {
3739 lockdep_assert_held(&dev_priv->irq_lock);
3740
3741 if (!dev_priv->display_irqs_enabled)
3742 return;
3743
3744 dev_priv->display_irqs_enabled = false;
3745
3746 if (intel_irqs_enabled(dev_priv))
3747 vlv_display_irq_reset(dev_priv);
3748 }
3749
3750
valleyview_irq_postinstall(struct drm_i915_private * dev_priv)3751 static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv)
3752 {
3753 gen5_gt_irq_postinstall(to_gt(dev_priv));
3754
3755 spin_lock_irq(&dev_priv->irq_lock);
3756 if (dev_priv->display_irqs_enabled)
3757 vlv_display_irq_postinstall(dev_priv);
3758 spin_unlock_irq(&dev_priv->irq_lock);
3759
3760 intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3761 intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER);
3762 }
3763
gen8_de_irq_postinstall(struct drm_i915_private * dev_priv)3764 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3765 {
3766 struct intel_uncore *uncore = &dev_priv->uncore;
3767
3768 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) |
3769 GEN8_PIPE_CDCLK_CRC_DONE;
3770 u32 de_pipe_enables;
3771 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv);
3772 u32 de_port_enables;
3773 u32 de_misc_masked = GEN8_DE_EDP_PSR;
3774 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |
3775 BIT(TRANSCODER_C) | BIT(TRANSCODER_D);
3776 enum pipe pipe;
3777
3778 if (!HAS_DISPLAY(dev_priv))
3779 return;
3780
3781 if (DISPLAY_VER(dev_priv) <= 10)
3782 de_misc_masked |= GEN8_DE_MISC_GSE;
3783
3784 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3785 de_port_masked |= BXT_DE_PORT_GMBUS;
3786
3787 if (DISPLAY_VER(dev_priv) >= 11) {
3788 enum port port;
3789
3790 if (intel_bios_is_dsi_present(dev_priv, &port))
3791 de_port_masked |= DSI0_TE | DSI1_TE;
3792 }
3793
3794 de_pipe_enables = de_pipe_masked |
3795 GEN8_PIPE_VBLANK |
3796 gen8_de_pipe_underrun_mask(dev_priv) |
3797 gen8_de_pipe_flip_done_mask(dev_priv);
3798
3799 de_port_enables = de_port_masked;
3800 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
3801 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3802 else if (IS_BROADWELL(dev_priv))
3803 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
3804
3805 if (DISPLAY_VER(dev_priv) >= 12) {
3806 enum transcoder trans;
3807
3808 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) {
3809 enum intel_display_power_domain domain;
3810
3811 domain = POWER_DOMAIN_TRANSCODER(trans);
3812 if (!intel_display_power_is_enabled(dev_priv, domain))
3813 continue;
3814
3815 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans));
3816 }
3817 } else {
3818 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
3819 }
3820
3821 for_each_pipe(dev_priv, pipe) {
3822 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked;
3823
3824 if (intel_display_power_is_enabled(dev_priv,
3825 POWER_DOMAIN_PIPE(pipe)))
3826 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
3827 dev_priv->de_irq_mask[pipe],
3828 de_pipe_enables);
3829 }
3830
3831 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3832 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3833
3834 if (DISPLAY_VER(dev_priv) >= 11) {
3835 u32 de_hpd_masked = 0;
3836 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
3837 GEN11_DE_TBT_HOTPLUG_MASK;
3838
3839 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
3840 de_hpd_enables);
3841 }
3842 }
3843
icp_irq_postinstall(struct drm_i915_private * dev_priv)3844 static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
3845 {
3846 struct intel_uncore *uncore = &dev_priv->uncore;
3847 u32 mask = SDE_GMBUS_ICP;
3848
3849 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
3850 }
3851
gen8_irq_postinstall(struct drm_i915_private * dev_priv)3852 static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
3853 {
3854 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3855 icp_irq_postinstall(dev_priv);
3856 else if (HAS_PCH_SPLIT(dev_priv))
3857 ibx_irq_postinstall(dev_priv);
3858
3859 gen8_gt_irq_postinstall(to_gt(dev_priv));
3860 gen8_de_irq_postinstall(dev_priv);
3861
3862 gen8_master_intr_enable(dev_priv->uncore.regs);
3863 }
3864
gen11_de_irq_postinstall(struct drm_i915_private * dev_priv)3865 static void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv)
3866 {
3867 if (!HAS_DISPLAY(dev_priv))
3868 return;
3869
3870 gen8_de_irq_postinstall(dev_priv);
3871
3872 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3873 GEN11_DISPLAY_IRQ_ENABLE);
3874 }
3875
gen11_irq_postinstall(struct drm_i915_private * dev_priv)3876 static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
3877 {
3878 struct intel_gt *gt = to_gt(dev_priv);
3879 struct intel_uncore *uncore = gt->uncore;
3880 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3881
3882 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
3883 icp_irq_postinstall(dev_priv);
3884
3885 gen11_gt_irq_postinstall(gt);
3886 gen11_de_irq_postinstall(dev_priv);
3887
3888 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3889
3890 gen11_master_intr_enable(uncore->regs);
3891 intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
3892 }
3893
dg1_irq_postinstall(struct drm_i915_private * dev_priv)3894 static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
3895 {
3896 struct intel_gt *gt = to_gt(dev_priv);
3897 struct intel_uncore *uncore = gt->uncore;
3898 u32 gu_misc_masked = GEN11_GU_MISC_GSE;
3899
3900 gen11_gt_irq_postinstall(gt);
3901
3902 GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
3903
3904 if (HAS_DISPLAY(dev_priv)) {
3905 icp_irq_postinstall(dev_priv);
3906 gen8_de_irq_postinstall(dev_priv);
3907 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
3908 GEN11_DISPLAY_IRQ_ENABLE);
3909 }
3910
3911 dg1_master_intr_enable(uncore->regs);
3912 intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR);
3913 }
3914
cherryview_irq_postinstall(struct drm_i915_private * dev_priv)3915 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
3916 {
3917 gen8_gt_irq_postinstall(to_gt(dev_priv));
3918
3919 spin_lock_irq(&dev_priv->irq_lock);
3920 if (dev_priv->display_irqs_enabled)
3921 vlv_display_irq_postinstall(dev_priv);
3922 spin_unlock_irq(&dev_priv->irq_lock);
3923
3924 intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3925 intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ);
3926 }
3927
i8xx_irq_reset(struct drm_i915_private * dev_priv)3928 static void i8xx_irq_reset(struct drm_i915_private *dev_priv)
3929 {
3930 struct intel_uncore *uncore = &dev_priv->uncore;
3931
3932 i9xx_pipestat_irq_reset(dev_priv);
3933
3934 GEN2_IRQ_RESET(uncore);
3935 dev_priv->irq_mask = ~0u;
3936 }
3937
i8xx_irq_postinstall(struct drm_i915_private * dev_priv)3938 static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
3939 {
3940 struct intel_uncore *uncore = &dev_priv->uncore;
3941 u16 enable_mask;
3942
3943 intel_uncore_write16(uncore,
3944 EMR,
3945 ~(I915_ERROR_PAGE_TABLE |
3946 I915_ERROR_MEMORY_REFRESH));
3947
3948 /* Unmask the interrupts that we always want on. */
3949 dev_priv->irq_mask =
3950 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3951 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3952 I915_MASTER_ERROR_INTERRUPT);
3953
3954 enable_mask =
3955 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3956 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3957 I915_MASTER_ERROR_INTERRUPT |
3958 I915_USER_INTERRUPT;
3959
3960 GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
3961
3962 /* Interrupt setup is already guaranteed to be single-threaded, this is
3963 * just to make the assert_spin_locked check happy. */
3964 spin_lock_irq(&dev_priv->irq_lock);
3965 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3966 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3967 spin_unlock_irq(&dev_priv->irq_lock);
3968 }
3969
i8xx_error_irq_ack(struct drm_i915_private * i915,u16 * eir,u16 * eir_stuck)3970 static void i8xx_error_irq_ack(struct drm_i915_private *i915,
3971 u16 *eir, u16 *eir_stuck)
3972 {
3973 struct intel_uncore *uncore = &i915->uncore;
3974 u16 emr;
3975
3976 *eir = intel_uncore_read16(uncore, EIR);
3977
3978 if (*eir)
3979 intel_uncore_write16(uncore, EIR, *eir);
3980
3981 *eir_stuck = intel_uncore_read16(uncore, EIR);
3982 if (*eir_stuck == 0)
3983 return;
3984
3985 /*
3986 * Toggle all EMR bits to make sure we get an edge
3987 * in the ISR master error bit if we don't clear
3988 * all the EIR bits. Otherwise the edge triggered
3989 * IIR on i965/g4x wouldn't notice that an interrupt
3990 * is still pending. Also some EIR bits can't be
3991 * cleared except by handling the underlying error
3992 * (or by a GPU reset) so we mask any bit that
3993 * remains set.
3994 */
3995 emr = intel_uncore_read16(uncore, EMR);
3996 intel_uncore_write16(uncore, EMR, 0xffff);
3997 intel_uncore_write16(uncore, EMR, emr | *eir_stuck);
3998 }
3999
i8xx_error_irq_handler(struct drm_i915_private * dev_priv,u16 eir,u16 eir_stuck)4000 static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
4001 u16 eir, u16 eir_stuck)
4002 {
4003 DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
4004
4005 if (eir_stuck)
4006 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
4007 eir_stuck);
4008 }
4009
i9xx_error_irq_ack(struct drm_i915_private * dev_priv,u32 * eir,u32 * eir_stuck)4010 static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
4011 u32 *eir, u32 *eir_stuck)
4012 {
4013 u32 emr;
4014
4015 *eir = intel_uncore_read(&dev_priv->uncore, EIR);
4016
4017 intel_uncore_write(&dev_priv->uncore, EIR, *eir);
4018
4019 *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR);
4020 if (*eir_stuck == 0)
4021 return;
4022
4023 /*
4024 * Toggle all EMR bits to make sure we get an edge
4025 * in the ISR master error bit if we don't clear
4026 * all the EIR bits. Otherwise the edge triggered
4027 * IIR on i965/g4x wouldn't notice that an interrupt
4028 * is still pending. Also some EIR bits can't be
4029 * cleared except by handling the underlying error
4030 * (or by a GPU reset) so we mask any bit that
4031 * remains set.
4032 */
4033 emr = intel_uncore_read(&dev_priv->uncore, EMR);
4034 intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff);
4035 intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck);
4036 }
4037
i9xx_error_irq_handler(struct drm_i915_private * dev_priv,u32 eir,u32 eir_stuck)4038 static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
4039 u32 eir, u32 eir_stuck)
4040 {
4041 DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
4042
4043 if (eir_stuck)
4044 drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
4045 eir_stuck);
4046 }
4047
i8xx_irq_handler(int irq,void * arg)4048 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4049 {
4050 struct drm_i915_private *dev_priv = arg;
4051 irqreturn_t ret = IRQ_NONE;
4052
4053 if (!intel_irqs_enabled(dev_priv))
4054 return IRQ_NONE;
4055
4056 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4057 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4058
4059 do {
4060 u32 pipe_stats[I915_MAX_PIPES] = {};
4061 u16 eir = 0, eir_stuck = 0;
4062 u16 iir;
4063
4064 iir = intel_uncore_read16(&dev_priv->uncore, GEN2_IIR);
4065 if (iir == 0)
4066 break;
4067
4068 ret = IRQ_HANDLED;
4069
4070 /* Call regardless, as some status bits might not be
4071 * signalled in iir */
4072 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4073
4074 if (iir & I915_MASTER_ERROR_INTERRUPT)
4075 i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4076
4077 intel_uncore_write16(&dev_priv->uncore, GEN2_IIR, iir);
4078
4079 if (iir & I915_USER_INTERRUPT)
4080 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4081
4082 if (iir & I915_MASTER_ERROR_INTERRUPT)
4083 i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
4084
4085 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4086 } while (0);
4087
4088 pmu_irq_stats(dev_priv, ret);
4089
4090 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4091
4092 return ret;
4093 }
4094
i915_irq_reset(struct drm_i915_private * dev_priv)4095 static void i915_irq_reset(struct drm_i915_private *dev_priv)
4096 {
4097 struct intel_uncore *uncore = &dev_priv->uncore;
4098
4099 if (I915_HAS_HOTPLUG(dev_priv)) {
4100 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4101 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4102 }
4103
4104 i9xx_pipestat_irq_reset(dev_priv);
4105
4106 GEN3_IRQ_RESET(uncore, GEN2_);
4107 dev_priv->irq_mask = ~0u;
4108 }
4109
i915_irq_postinstall(struct drm_i915_private * dev_priv)4110 static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
4111 {
4112 struct intel_uncore *uncore = &dev_priv->uncore;
4113 u32 enable_mask;
4114
4115 intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE |
4116 I915_ERROR_MEMORY_REFRESH));
4117
4118 /* Unmask the interrupts that we always want on. */
4119 dev_priv->irq_mask =
4120 ~(I915_ASLE_INTERRUPT |
4121 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4122 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4123 I915_MASTER_ERROR_INTERRUPT);
4124
4125 enable_mask =
4126 I915_ASLE_INTERRUPT |
4127 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4128 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4129 I915_MASTER_ERROR_INTERRUPT |
4130 I915_USER_INTERRUPT;
4131
4132 if (I915_HAS_HOTPLUG(dev_priv)) {
4133 /* Enable in IER... */
4134 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4135 /* and unmask in IMR */
4136 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4137 }
4138
4139 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4140
4141 /* Interrupt setup is already guaranteed to be single-threaded, this is
4142 * just to make the assert_spin_locked check happy. */
4143 spin_lock_irq(&dev_priv->irq_lock);
4144 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4145 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4146 spin_unlock_irq(&dev_priv->irq_lock);
4147
4148 i915_enable_asle_pipestat(dev_priv);
4149 }
4150
i915_irq_handler(int irq,void * arg)4151 static irqreturn_t i915_irq_handler(int irq, void *arg)
4152 {
4153 struct drm_i915_private *dev_priv = arg;
4154 irqreturn_t ret = IRQ_NONE;
4155
4156 if (!intel_irqs_enabled(dev_priv))
4157 return IRQ_NONE;
4158
4159 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4160 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4161
4162 do {
4163 u32 pipe_stats[I915_MAX_PIPES] = {};
4164 u32 eir = 0, eir_stuck = 0;
4165 u32 hotplug_status = 0;
4166 u32 iir;
4167
4168 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4169 if (iir == 0)
4170 break;
4171
4172 ret = IRQ_HANDLED;
4173
4174 if (I915_HAS_HOTPLUG(dev_priv) &&
4175 iir & I915_DISPLAY_PORT_INTERRUPT)
4176 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4177
4178 /* Call regardless, as some status bits might not be
4179 * signalled in iir */
4180 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4181
4182 if (iir & I915_MASTER_ERROR_INTERRUPT)
4183 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4184
4185 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4186
4187 if (iir & I915_USER_INTERRUPT)
4188 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0], iir);
4189
4190 if (iir & I915_MASTER_ERROR_INTERRUPT)
4191 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4192
4193 if (hotplug_status)
4194 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4195
4196 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4197 } while (0);
4198
4199 pmu_irq_stats(dev_priv, ret);
4200
4201 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4202
4203 return ret;
4204 }
4205
i965_irq_reset(struct drm_i915_private * dev_priv)4206 static void i965_irq_reset(struct drm_i915_private *dev_priv)
4207 {
4208 struct intel_uncore *uncore = &dev_priv->uncore;
4209
4210 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4211 intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT));
4212
4213 i9xx_pipestat_irq_reset(dev_priv);
4214
4215 GEN3_IRQ_RESET(uncore, GEN2_);
4216 dev_priv->irq_mask = ~0u;
4217 }
4218
i965_irq_postinstall(struct drm_i915_private * dev_priv)4219 static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
4220 {
4221 struct intel_uncore *uncore = &dev_priv->uncore;
4222 u32 enable_mask;
4223 u32 error_mask;
4224
4225 /*
4226 * Enable some error detection, note the instruction error mask
4227 * bit is reserved, so we leave it masked.
4228 */
4229 if (IS_G4X(dev_priv)) {
4230 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4231 GM45_ERROR_MEM_PRIV |
4232 GM45_ERROR_CP_PRIV |
4233 I915_ERROR_MEMORY_REFRESH);
4234 } else {
4235 error_mask = ~(I915_ERROR_PAGE_TABLE |
4236 I915_ERROR_MEMORY_REFRESH);
4237 }
4238 intel_uncore_write(&dev_priv->uncore, EMR, error_mask);
4239
4240 /* Unmask the interrupts that we always want on. */
4241 dev_priv->irq_mask =
4242 ~(I915_ASLE_INTERRUPT |
4243 I915_DISPLAY_PORT_INTERRUPT |
4244 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4245 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4246 I915_MASTER_ERROR_INTERRUPT);
4247
4248 enable_mask =
4249 I915_ASLE_INTERRUPT |
4250 I915_DISPLAY_PORT_INTERRUPT |
4251 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4252 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4253 I915_MASTER_ERROR_INTERRUPT |
4254 I915_USER_INTERRUPT;
4255
4256 if (IS_G4X(dev_priv))
4257 enable_mask |= I915_BSD_USER_INTERRUPT;
4258
4259 GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
4260
4261 /* Interrupt setup is already guaranteed to be single-threaded, this is
4262 * just to make the assert_spin_locked check happy. */
4263 spin_lock_irq(&dev_priv->irq_lock);
4264 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4265 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4266 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4267 spin_unlock_irq(&dev_priv->irq_lock);
4268
4269 i915_enable_asle_pipestat(dev_priv);
4270 }
4271
i915_hpd_irq_setup(struct drm_i915_private * dev_priv)4272 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4273 {
4274 u32 hotplug_en;
4275
4276 lockdep_assert_held(&dev_priv->irq_lock);
4277
4278 /* Note HDMI and DP share hotplug bits */
4279 /* enable bits are the same for all generations */
4280 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4281 /* Programming the CRT detection parameters tends
4282 to generate a spurious hotplug event about three
4283 seconds later. So just do it once.
4284 */
4285 if (IS_G4X(dev_priv))
4286 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4287 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4288
4289 /* Ignore TV since it's buggy */
4290 i915_hotplug_interrupt_update_locked(dev_priv,
4291 HOTPLUG_INT_EN_MASK |
4292 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4293 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4294 hotplug_en);
4295 }
4296
i965_irq_handler(int irq,void * arg)4297 static irqreturn_t i965_irq_handler(int irq, void *arg)
4298 {
4299 struct drm_i915_private *dev_priv = arg;
4300 irqreturn_t ret = IRQ_NONE;
4301
4302 if (!intel_irqs_enabled(dev_priv))
4303 return IRQ_NONE;
4304
4305 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4306 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4307
4308 do {
4309 u32 pipe_stats[I915_MAX_PIPES] = {};
4310 u32 eir = 0, eir_stuck = 0;
4311 u32 hotplug_status = 0;
4312 u32 iir;
4313
4314 iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR);
4315 if (iir == 0)
4316 break;
4317
4318 ret = IRQ_HANDLED;
4319
4320 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4321 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4322
4323 /* Call regardless, as some status bits might not be
4324 * signalled in iir */
4325 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats);
4326
4327 if (iir & I915_MASTER_ERROR_INTERRUPT)
4328 i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
4329
4330 intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir);
4331
4332 if (iir & I915_USER_INTERRUPT)
4333 intel_engine_cs_irq(to_gt(dev_priv)->engine[RCS0],
4334 iir);
4335
4336 if (iir & I915_BSD_USER_INTERRUPT)
4337 intel_engine_cs_irq(to_gt(dev_priv)->engine[VCS0],
4338 iir >> 25);
4339
4340 if (iir & I915_MASTER_ERROR_INTERRUPT)
4341 i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
4342
4343 if (hotplug_status)
4344 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4345
4346 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
4347 } while (0);
4348
4349 pmu_irq_stats(dev_priv, IRQ_HANDLED);
4350
4351 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
4352
4353 return ret;
4354 }
4355
4356 struct intel_hotplug_funcs {
4357 void (*hpd_irq_setup)(struct drm_i915_private *i915);
4358 };
4359
4360 #define HPD_FUNCS(platform) \
4361 static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
4362 .hpd_irq_setup = platform##_hpd_irq_setup, \
4363 }
4364
4365 HPD_FUNCS(i915);
4366 HPD_FUNCS(dg1);
4367 HPD_FUNCS(gen11);
4368 HPD_FUNCS(bxt);
4369 HPD_FUNCS(icp);
4370 HPD_FUNCS(spt);
4371 HPD_FUNCS(ilk);
4372 #undef HPD_FUNCS
4373
intel_hpd_irq_setup(struct drm_i915_private * i915)4374 void intel_hpd_irq_setup(struct drm_i915_private *i915)
4375 {
4376 if (i915->display_irqs_enabled && i915->display.funcs.hotplug)
4377 i915->display.funcs.hotplug->hpd_irq_setup(i915);
4378 }
4379
4380 /**
4381 * intel_irq_init - initializes irq support
4382 * @dev_priv: i915 device instance
4383 *
4384 * This function initializes all the irq support including work items, timers
4385 * and all the vtables. It does not setup the interrupt itself though.
4386 */
intel_irq_init(struct drm_i915_private * dev_priv)4387 void intel_irq_init(struct drm_i915_private *dev_priv)
4388 {
4389 struct drm_device *dev = &dev_priv->drm;
4390 int i;
4391
4392 INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
4393 for (i = 0; i < MAX_L3_SLICES; ++i)
4394 dev_priv->l3_parity.remap_info[i] = NULL;
4395
4396 /* pre-gen11 the guc irqs bits are in the upper 16 bits of the pm reg */
4397 if (HAS_GT_UC(dev_priv) && GRAPHICS_VER(dev_priv) < 11)
4398 to_gt(dev_priv)->pm_guc_events = GUC_INTR_GUC2HOST << 16;
4399
4400 if (!HAS_DISPLAY(dev_priv))
4401 return;
4402
4403 intel_hpd_init_pins(dev_priv);
4404
4405 intel_hpd_init_work(dev_priv);
4406
4407 dev->vblank_disable_immediate = true;
4408
4409 /* Most platforms treat the display irq block as an always-on
4410 * power domain. vlv/chv can disable it at runtime and need
4411 * special care to avoid writing any of the display block registers
4412 * outside of the power domain. We defer setting up the display irqs
4413 * in this case to the runtime pm.
4414 */
4415 dev_priv->display_irqs_enabled = true;
4416 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4417 dev_priv->display_irqs_enabled = false;
4418
4419 dev_priv->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
4420 /* If we have MST support, we want to avoid doing short HPD IRQ storm
4421 * detection, as short HPD storms will occur as a natural part of
4422 * sideband messaging with MST.
4423 * On older platforms however, IRQ storms can occur with both long and
4424 * short pulses, as seen on some G4x systems.
4425 */
4426 dev_priv->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
4427
4428 if (HAS_GMCH(dev_priv)) {
4429 if (I915_HAS_HOTPLUG(dev_priv))
4430 dev_priv->display.funcs.hotplug = &i915_hpd_funcs;
4431 } else {
4432 if (HAS_PCH_DG2(dev_priv))
4433 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4434 else if (HAS_PCH_DG1(dev_priv))
4435 dev_priv->display.funcs.hotplug = &dg1_hpd_funcs;
4436 else if (DISPLAY_VER(dev_priv) >= 11)
4437 dev_priv->display.funcs.hotplug = &gen11_hpd_funcs;
4438 else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
4439 dev_priv->display.funcs.hotplug = &bxt_hpd_funcs;
4440 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
4441 dev_priv->display.funcs.hotplug = &icp_hpd_funcs;
4442 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
4443 dev_priv->display.funcs.hotplug = &spt_hpd_funcs;
4444 else
4445 dev_priv->display.funcs.hotplug = &ilk_hpd_funcs;
4446 }
4447 }
4448
4449 /**
4450 * intel_irq_fini - deinitializes IRQ support
4451 * @i915: i915 device instance
4452 *
4453 * This function deinitializes all the IRQ support.
4454 */
intel_irq_fini(struct drm_i915_private * i915)4455 void intel_irq_fini(struct drm_i915_private *i915)
4456 {
4457 int i;
4458
4459 for (i = 0; i < MAX_L3_SLICES; ++i)
4460 kfree(i915->l3_parity.remap_info[i]);
4461 }
4462
intel_irq_handler(struct drm_i915_private * dev_priv)4463 static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
4464 {
4465 if (HAS_GMCH(dev_priv)) {
4466 if (IS_CHERRYVIEW(dev_priv))
4467 return cherryview_irq_handler;
4468 else if (IS_VALLEYVIEW(dev_priv))
4469 return valleyview_irq_handler;
4470 else if (GRAPHICS_VER(dev_priv) == 4)
4471 return i965_irq_handler;
4472 else if (GRAPHICS_VER(dev_priv) == 3)
4473 return i915_irq_handler;
4474 else
4475 return i8xx_irq_handler;
4476 } else {
4477 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4478 return dg1_irq_handler;
4479 else if (GRAPHICS_VER(dev_priv) >= 11)
4480 return gen11_irq_handler;
4481 else if (GRAPHICS_VER(dev_priv) >= 8)
4482 return gen8_irq_handler;
4483 else
4484 return ilk_irq_handler;
4485 }
4486 }
4487
intel_irq_reset(struct drm_i915_private * dev_priv)4488 static void intel_irq_reset(struct drm_i915_private *dev_priv)
4489 {
4490 if (HAS_GMCH(dev_priv)) {
4491 if (IS_CHERRYVIEW(dev_priv))
4492 cherryview_irq_reset(dev_priv);
4493 else if (IS_VALLEYVIEW(dev_priv))
4494 valleyview_irq_reset(dev_priv);
4495 else if (GRAPHICS_VER(dev_priv) == 4)
4496 i965_irq_reset(dev_priv);
4497 else if (GRAPHICS_VER(dev_priv) == 3)
4498 i915_irq_reset(dev_priv);
4499 else
4500 i8xx_irq_reset(dev_priv);
4501 } else {
4502 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4503 dg1_irq_reset(dev_priv);
4504 else if (GRAPHICS_VER(dev_priv) >= 11)
4505 gen11_irq_reset(dev_priv);
4506 else if (GRAPHICS_VER(dev_priv) >= 8)
4507 gen8_irq_reset(dev_priv);
4508 else
4509 ilk_irq_reset(dev_priv);
4510 }
4511 }
4512
intel_irq_postinstall(struct drm_i915_private * dev_priv)4513 static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
4514 {
4515 if (HAS_GMCH(dev_priv)) {
4516 if (IS_CHERRYVIEW(dev_priv))
4517 cherryview_irq_postinstall(dev_priv);
4518 else if (IS_VALLEYVIEW(dev_priv))
4519 valleyview_irq_postinstall(dev_priv);
4520 else if (GRAPHICS_VER(dev_priv) == 4)
4521 i965_irq_postinstall(dev_priv);
4522 else if (GRAPHICS_VER(dev_priv) == 3)
4523 i915_irq_postinstall(dev_priv);
4524 else
4525 i8xx_irq_postinstall(dev_priv);
4526 } else {
4527 if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
4528 dg1_irq_postinstall(dev_priv);
4529 else if (GRAPHICS_VER(dev_priv) >= 11)
4530 gen11_irq_postinstall(dev_priv);
4531 else if (GRAPHICS_VER(dev_priv) >= 8)
4532 gen8_irq_postinstall(dev_priv);
4533 else
4534 ilk_irq_postinstall(dev_priv);
4535 }
4536 }
4537
4538 /**
4539 * intel_irq_install - enables the hardware interrupt
4540 * @dev_priv: i915 device instance
4541 *
4542 * This function enables the hardware interrupt handling, but leaves the hotplug
4543 * handling still disabled. It is called after intel_irq_init().
4544 *
4545 * In the driver load and resume code we need working interrupts in a few places
4546 * but don't want to deal with the hassle of concurrent probe and hotplug
4547 * workers. Hence the split into this two-stage approach.
4548 */
intel_irq_install(struct drm_i915_private * dev_priv)4549 int intel_irq_install(struct drm_i915_private *dev_priv)
4550 {
4551 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4552 int ret;
4553
4554 /*
4555 * We enable some interrupt sources in our postinstall hooks, so mark
4556 * interrupts as enabled _before_ actually enabling them to avoid
4557 * special cases in our ordering checks.
4558 */
4559 dev_priv->runtime_pm.irqs_enabled = true;
4560
4561 dev_priv->irq_enabled = true;
4562
4563 intel_irq_reset(dev_priv);
4564
4565 ret = request_irq(irq, intel_irq_handler(dev_priv),
4566 IRQF_SHARED, DRIVER_NAME, dev_priv);
4567 if (ret < 0) {
4568 dev_priv->irq_enabled = false;
4569 return ret;
4570 }
4571
4572 intel_irq_postinstall(dev_priv);
4573
4574 return ret;
4575 }
4576
4577 /**
4578 * intel_irq_uninstall - finilizes all irq handling
4579 * @dev_priv: i915 device instance
4580 *
4581 * This stops interrupt and hotplug handling and unregisters and frees all
4582 * resources acquired in the init functions.
4583 */
intel_irq_uninstall(struct drm_i915_private * dev_priv)4584 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4585 {
4586 int irq = to_pci_dev(dev_priv->drm.dev)->irq;
4587
4588 /*
4589 * FIXME we can get called twice during driver probe
4590 * error handling as well as during driver remove due to
4591 * intel_modeset_driver_remove() calling us out of sequence.
4592 * Would be nice if it didn't do that...
4593 */
4594 if (!dev_priv->irq_enabled)
4595 return;
4596
4597 dev_priv->irq_enabled = false;
4598
4599 intel_irq_reset(dev_priv);
4600
4601 free_irq(irq, dev_priv);
4602
4603 intel_hpd_cancel_work(dev_priv);
4604 dev_priv->runtime_pm.irqs_enabled = false;
4605 }
4606
4607 /**
4608 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4609 * @dev_priv: i915 device instance
4610 *
4611 * This function is used to disable interrupts at runtime, both in the runtime
4612 * pm and the system suspend/resume code.
4613 */
intel_runtime_pm_disable_interrupts(struct drm_i915_private * dev_priv)4614 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4615 {
4616 intel_irq_reset(dev_priv);
4617 dev_priv->runtime_pm.irqs_enabled = false;
4618 intel_synchronize_irq(dev_priv);
4619 }
4620
4621 /**
4622 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4623 * @dev_priv: i915 device instance
4624 *
4625 * This function is used to enable interrupts at runtime, both in the runtime
4626 * pm and the system suspend/resume code.
4627 */
intel_runtime_pm_enable_interrupts(struct drm_i915_private * dev_priv)4628 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4629 {
4630 dev_priv->runtime_pm.irqs_enabled = true;
4631 intel_irq_reset(dev_priv);
4632 intel_irq_postinstall(dev_priv);
4633 }
4634
intel_irqs_enabled(struct drm_i915_private * dev_priv)4635 bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
4636 {
4637 return dev_priv->runtime_pm.irqs_enabled;
4638 }
4639
intel_synchronize_irq(struct drm_i915_private * i915)4640 void intel_synchronize_irq(struct drm_i915_private *i915)
4641 {
4642 synchronize_irq(to_pci_dev(i915->drm.dev)->irq);
4643 }
4644
intel_synchronize_hardirq(struct drm_i915_private * i915)4645 void intel_synchronize_hardirq(struct drm_i915_private *i915)
4646 {
4647 synchronize_hardirq(to_pci_dev(i915->drm.dev)->irq);
4648 }
4649