1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "intel_de.h"
7 #include "intel_display_types.h"
8 #include "intel_panel.h"
9 #include "intel_pch_refclk.h"
10 #include "intel_sbi.h"
11
lpt_fdi_reset_mphy(struct drm_i915_private * dev_priv)12 static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
13 {
14 u32 tmp;
15
16 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
17 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
18 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
19
20 if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
21 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
22 drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
23
24 tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
25 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
26 intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
27
28 if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
29 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
30 drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
31 }
32
33 /* WaMPhyProgramming:hsw */
lpt_fdi_program_mphy(struct drm_i915_private * dev_priv)34 static void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
35 {
36 u32 tmp;
37
38 lpt_fdi_reset_mphy(dev_priv);
39
40 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
41 tmp &= ~(0xFF << 24);
42 tmp |= (0x12 << 24);
43 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
44
45 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
46 tmp |= (1 << 11);
47 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
48
49 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
50 tmp |= (1 << 11);
51 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
52
53 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
54 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
55 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
56
57 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
58 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
59 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
60
61 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
62 tmp &= ~(7 << 13);
63 tmp |= (5 << 13);
64 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
65
66 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
67 tmp &= ~(7 << 13);
68 tmp |= (5 << 13);
69 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
70
71 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
72 tmp &= ~0xFF;
73 tmp |= 0x1C;
74 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
75
76 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
77 tmp &= ~0xFF;
78 tmp |= 0x1C;
79 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
80
81 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
82 tmp &= ~(0xFF << 16);
83 tmp |= (0x1C << 16);
84 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
85
86 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
87 tmp &= ~(0xFF << 16);
88 tmp |= (0x1C << 16);
89 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
90
91 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
92 tmp |= (1 << 27);
93 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
94
95 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
96 tmp |= (1 << 27);
97 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
98
99 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
100 tmp &= ~(0xF << 28);
101 tmp |= (4 << 28);
102 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
103
104 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
105 tmp &= ~(0xF << 28);
106 tmp |= (4 << 28);
107 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
108 }
109
lpt_disable_iclkip(struct drm_i915_private * dev_priv)110 void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
111 {
112 u32 temp;
113
114 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
115
116 mutex_lock(&dev_priv->sb_lock);
117
118 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
119 temp |= SBI_SSCCTL_DISABLE;
120 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
121
122 mutex_unlock(&dev_priv->sb_lock);
123 }
124
125 struct iclkip_params {
126 u32 iclk_virtual_root_freq;
127 u32 iclk_pi_range;
128 u32 divsel, phaseinc, auxdiv, phasedir, desired_divisor;
129 };
130
iclkip_params_init(struct iclkip_params * p)131 static void iclkip_params_init(struct iclkip_params *p)
132 {
133 memset(p, 0, sizeof(*p));
134
135 p->iclk_virtual_root_freq = 172800 * 1000;
136 p->iclk_pi_range = 64;
137 }
138
lpt_iclkip_freq(struct iclkip_params * p)139 static int lpt_iclkip_freq(struct iclkip_params *p)
140 {
141 return DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
142 p->desired_divisor << p->auxdiv);
143 }
144
lpt_compute_iclkip(struct iclkip_params * p,int clock)145 static void lpt_compute_iclkip(struct iclkip_params *p, int clock)
146 {
147 iclkip_params_init(p);
148
149 /* The iCLK virtual clock root frequency is in MHz,
150 * but the adjusted_mode->crtc_clock in KHz. To get the
151 * divisors, it is necessary to divide one by another, so we
152 * convert the virtual clock precision to KHz here for higher
153 * precision.
154 */
155 for (p->auxdiv = 0; p->auxdiv < 2; p->auxdiv++) {
156 p->desired_divisor = DIV_ROUND_CLOSEST(p->iclk_virtual_root_freq,
157 clock << p->auxdiv);
158 p->divsel = (p->desired_divisor / p->iclk_pi_range) - 2;
159 p->phaseinc = p->desired_divisor % p->iclk_pi_range;
160
161 /*
162 * Near 20MHz is a corner case which is
163 * out of range for the 7-bit divisor
164 */
165 if (p->divsel <= 0x7f)
166 break;
167 }
168 }
169
lpt_iclkip(const struct intel_crtc_state * crtc_state)170 int lpt_iclkip(const struct intel_crtc_state *crtc_state)
171 {
172 struct iclkip_params p;
173
174 lpt_compute_iclkip(&p, crtc_state->hw.adjusted_mode.crtc_clock);
175
176 return lpt_iclkip_freq(&p);
177 }
178
179 /* Program iCLKIP clock to the desired frequency */
lpt_program_iclkip(const struct intel_crtc_state * crtc_state)180 void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
181 {
182 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
183 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
184 int clock = crtc_state->hw.adjusted_mode.crtc_clock;
185 struct iclkip_params p;
186 u32 temp;
187
188 lpt_disable_iclkip(dev_priv);
189
190 lpt_compute_iclkip(&p, clock);
191 drm_WARN_ON(&dev_priv->drm, lpt_iclkip_freq(&p) != clock);
192
193 /* This should not happen with any sane values */
194 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(p.divsel) &
195 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
196 drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(p.phasedir) &
197 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
198
199 drm_dbg_kms(&dev_priv->drm,
200 "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
201 clock, p.auxdiv, p.divsel, p.phasedir, p.phaseinc);
202
203 mutex_lock(&dev_priv->sb_lock);
204
205 /* Program SSCDIVINTPHASE6 */
206 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
207 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
208 temp |= SBI_SSCDIVINTPHASE_DIVSEL(p.divsel);
209 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
210 temp |= SBI_SSCDIVINTPHASE_INCVAL(p.phaseinc);
211 temp |= SBI_SSCDIVINTPHASE_DIR(p.phasedir);
212 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
213 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
214
215 /* Program SSCAUXDIV */
216 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
217 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
218 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(p.auxdiv);
219 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
220
221 /* Enable modulator and associated divider */
222 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
223 temp &= ~SBI_SSCCTL_DISABLE;
224 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
225
226 mutex_unlock(&dev_priv->sb_lock);
227
228 /* Wait for initialization time */
229 udelay(24);
230
231 intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
232 }
233
lpt_get_iclkip(struct drm_i915_private * dev_priv)234 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
235 {
236 struct iclkip_params p;
237 u32 temp;
238
239 if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
240 return 0;
241
242 iclkip_params_init(&p);
243
244 mutex_lock(&dev_priv->sb_lock);
245
246 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
247 if (temp & SBI_SSCCTL_DISABLE) {
248 mutex_unlock(&dev_priv->sb_lock);
249 return 0;
250 }
251
252 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
253 p.divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
254 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
255 p.phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
256 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
257
258 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
259 p.auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
260 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
261
262 mutex_unlock(&dev_priv->sb_lock);
263
264 p.desired_divisor = (p.divsel + 2) * p.iclk_pi_range + p.phaseinc;
265
266 return lpt_iclkip_freq(&p);
267 }
268
269 /* Implements 3 different sequences from BSpec chapter "Display iCLK
270 * Programming" based on the parameters passed:
271 * - Sequence to enable CLKOUT_DP
272 * - Sequence to enable CLKOUT_DP without spread
273 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
274 */
lpt_enable_clkout_dp(struct drm_i915_private * dev_priv,bool with_spread,bool with_fdi)275 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
276 bool with_spread, bool with_fdi)
277 {
278 u32 reg, tmp;
279
280 if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
281 "FDI requires downspread\n"))
282 with_spread = true;
283 if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
284 with_fdi, "LP PCH doesn't have FDI\n"))
285 with_fdi = false;
286
287 mutex_lock(&dev_priv->sb_lock);
288
289 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
290 tmp &= ~SBI_SSCCTL_DISABLE;
291 tmp |= SBI_SSCCTL_PATHALT;
292 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
293
294 udelay(24);
295
296 if (with_spread) {
297 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
298 tmp &= ~SBI_SSCCTL_PATHALT;
299 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
300
301 if (with_fdi)
302 lpt_fdi_program_mphy(dev_priv);
303 }
304
305 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
306 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
307 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
308 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
309
310 mutex_unlock(&dev_priv->sb_lock);
311 }
312
313 /* Sequence to disable CLKOUT_DP */
lpt_disable_clkout_dp(struct drm_i915_private * dev_priv)314 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
315 {
316 u32 reg, tmp;
317
318 mutex_lock(&dev_priv->sb_lock);
319
320 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
321 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
322 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
323 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
324
325 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
326 if (!(tmp & SBI_SSCCTL_DISABLE)) {
327 if (!(tmp & SBI_SSCCTL_PATHALT)) {
328 tmp |= SBI_SSCCTL_PATHALT;
329 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
330 udelay(32);
331 }
332 tmp |= SBI_SSCCTL_DISABLE;
333 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
334 }
335
336 mutex_unlock(&dev_priv->sb_lock);
337 }
338
339 #define BEND_IDX(steps) ((50 + (steps)) / 5)
340
341 static const u16 sscdivintphase[] = {
342 [BEND_IDX( 50)] = 0x3B23,
343 [BEND_IDX( 45)] = 0x3B23,
344 [BEND_IDX( 40)] = 0x3C23,
345 [BEND_IDX( 35)] = 0x3C23,
346 [BEND_IDX( 30)] = 0x3D23,
347 [BEND_IDX( 25)] = 0x3D23,
348 [BEND_IDX( 20)] = 0x3E23,
349 [BEND_IDX( 15)] = 0x3E23,
350 [BEND_IDX( 10)] = 0x3F23,
351 [BEND_IDX( 5)] = 0x3F23,
352 [BEND_IDX( 0)] = 0x0025,
353 [BEND_IDX( -5)] = 0x0025,
354 [BEND_IDX(-10)] = 0x0125,
355 [BEND_IDX(-15)] = 0x0125,
356 [BEND_IDX(-20)] = 0x0225,
357 [BEND_IDX(-25)] = 0x0225,
358 [BEND_IDX(-30)] = 0x0325,
359 [BEND_IDX(-35)] = 0x0325,
360 [BEND_IDX(-40)] = 0x0425,
361 [BEND_IDX(-45)] = 0x0425,
362 [BEND_IDX(-50)] = 0x0525,
363 };
364
365 /*
366 * Bend CLKOUT_DP
367 * steps -50 to 50 inclusive, in steps of 5
368 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
369 * change in clock period = -(steps / 10) * 5.787 ps
370 */
lpt_bend_clkout_dp(struct drm_i915_private * dev_priv,int steps)371 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
372 {
373 u32 tmp;
374 int idx = BEND_IDX(steps);
375
376 if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
377 return;
378
379 if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
380 return;
381
382 mutex_lock(&dev_priv->sb_lock);
383
384 if (steps % 10 != 0)
385 tmp = 0xAAAAAAAB;
386 else
387 tmp = 0x00000000;
388 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
389
390 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
391 tmp &= 0xffff0000;
392 tmp |= sscdivintphase[idx];
393 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
394
395 mutex_unlock(&dev_priv->sb_lock);
396 }
397
398 #undef BEND_IDX
399
spll_uses_pch_ssc(struct drm_i915_private * dev_priv)400 static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
401 {
402 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
403 u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
404
405 if ((ctl & SPLL_PLL_ENABLE) == 0)
406 return false;
407
408 if ((ctl & SPLL_REF_MASK) == SPLL_REF_MUXED_SSC &&
409 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
410 return true;
411
412 if (IS_BROADWELL(dev_priv) &&
413 (ctl & SPLL_REF_MASK) == SPLL_REF_PCH_SSC_BDW)
414 return true;
415
416 return false;
417 }
418
wrpll_uses_pch_ssc(struct drm_i915_private * dev_priv,enum intel_dpll_id id)419 static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
420 enum intel_dpll_id id)
421 {
422 u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
423 u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
424
425 if ((ctl & WRPLL_PLL_ENABLE) == 0)
426 return false;
427
428 if ((ctl & WRPLL_REF_MASK) == WRPLL_REF_PCH_SSC)
429 return true;
430
431 if ((IS_BROADWELL(dev_priv) || IS_HSW_ULT(dev_priv)) &&
432 (ctl & WRPLL_REF_MASK) == WRPLL_REF_MUXED_SSC_BDW &&
433 (fuse_strap & HSW_CPU_SSC_ENABLE) == 0)
434 return true;
435
436 return false;
437 }
438
lpt_init_pch_refclk(struct drm_i915_private * dev_priv)439 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
440 {
441 struct intel_encoder *encoder;
442 bool has_fdi = false;
443
444 for_each_intel_encoder(&dev_priv->drm, encoder) {
445 switch (encoder->type) {
446 case INTEL_OUTPUT_ANALOG:
447 has_fdi = true;
448 break;
449 default:
450 break;
451 }
452 }
453
454 /*
455 * The BIOS may have decided to use the PCH SSC
456 * reference so we must not disable it until the
457 * relevant PLLs have stopped relying on it. We'll
458 * just leave the PCH SSC reference enabled in case
459 * any active PLL is using it. It will get disabled
460 * after runtime suspend if we don't have FDI.
461 *
462 * TODO: Move the whole reference clock handling
463 * to the modeset sequence proper so that we can
464 * actually enable/disable/reconfigure these things
465 * safely. To do that we need to introduce a real
466 * clock hierarchy. That would also allow us to do
467 * clock bending finally.
468 */
469 dev_priv->pch_ssc_use = 0;
470
471 if (spll_uses_pch_ssc(dev_priv)) {
472 drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
473 dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
474 }
475
476 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
477 drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
478 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
479 }
480
481 if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
482 drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
483 dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
484 }
485
486 if (dev_priv->pch_ssc_use)
487 return;
488
489 if (has_fdi) {
490 lpt_bend_clkout_dp(dev_priv, 0);
491 lpt_enable_clkout_dp(dev_priv, true, true);
492 } else {
493 lpt_disable_clkout_dp(dev_priv);
494 }
495 }
496
ilk_init_pch_refclk(struct drm_i915_private * dev_priv)497 static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
498 {
499 struct intel_encoder *encoder;
500 int i;
501 u32 val, final;
502 bool has_lvds = false;
503 bool has_cpu_edp = false;
504 bool has_panel = false;
505 bool has_ck505 = false;
506 bool can_ssc = false;
507 bool using_ssc_source = false;
508
509 /* We need to take the global config into account */
510 for_each_intel_encoder(&dev_priv->drm, encoder) {
511 switch (encoder->type) {
512 case INTEL_OUTPUT_LVDS:
513 has_panel = true;
514 has_lvds = true;
515 break;
516 case INTEL_OUTPUT_EDP:
517 has_panel = true;
518 if (encoder->port == PORT_A)
519 has_cpu_edp = true;
520 break;
521 default:
522 break;
523 }
524 }
525
526 if (HAS_PCH_IBX(dev_priv)) {
527 has_ck505 = dev_priv->display.vbt.display_clock_mode;
528 can_ssc = has_ck505;
529 } else {
530 has_ck505 = false;
531 can_ssc = true;
532 }
533
534 /* Check if any DPLLs are using the SSC source */
535 for (i = 0; i < dev_priv->display.dpll.num_shared_dpll; i++) {
536 u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
537
538 if (!(temp & DPLL_VCO_ENABLE))
539 continue;
540
541 if ((temp & PLL_REF_INPUT_MASK) ==
542 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
543 using_ssc_source = true;
544 break;
545 }
546 }
547
548 drm_dbg_kms(&dev_priv->drm,
549 "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
550 has_panel, has_lvds, has_ck505, using_ssc_source);
551
552 /* Ironlake: try to setup display ref clock before DPLL
553 * enabling. This is only under driver's control after
554 * PCH B stepping, previous chipset stepping should be
555 * ignoring this setting.
556 */
557 val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
558
559 /* As we must carefully and slowly disable/enable each source in turn,
560 * compute the final state we want first and check if we need to
561 * make any changes at all.
562 */
563 final = val;
564 final &= ~DREF_NONSPREAD_SOURCE_MASK;
565 if (has_ck505)
566 final |= DREF_NONSPREAD_CK505_ENABLE;
567 else
568 final |= DREF_NONSPREAD_SOURCE_ENABLE;
569
570 final &= ~DREF_SSC_SOURCE_MASK;
571 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
572 final &= ~DREF_SSC1_ENABLE;
573
574 if (has_panel) {
575 final |= DREF_SSC_SOURCE_ENABLE;
576
577 if (intel_panel_use_ssc(dev_priv) && can_ssc)
578 final |= DREF_SSC1_ENABLE;
579
580 if (has_cpu_edp) {
581 if (intel_panel_use_ssc(dev_priv) && can_ssc)
582 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
583 else
584 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
585 } else {
586 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
587 }
588 } else if (using_ssc_source) {
589 final |= DREF_SSC_SOURCE_ENABLE;
590 final |= DREF_SSC1_ENABLE;
591 }
592
593 if (final == val)
594 return;
595
596 /* Always enable nonspread source */
597 val &= ~DREF_NONSPREAD_SOURCE_MASK;
598
599 if (has_ck505)
600 val |= DREF_NONSPREAD_CK505_ENABLE;
601 else
602 val |= DREF_NONSPREAD_SOURCE_ENABLE;
603
604 if (has_panel) {
605 val &= ~DREF_SSC_SOURCE_MASK;
606 val |= DREF_SSC_SOURCE_ENABLE;
607
608 /* SSC must be turned on before enabling the CPU output */
609 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
610 drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
611 val |= DREF_SSC1_ENABLE;
612 } else {
613 val &= ~DREF_SSC1_ENABLE;
614 }
615
616 /* Get SSC going before enabling the outputs */
617 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
618 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
619 udelay(200);
620
621 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
622
623 /* Enable CPU source on CPU attached eDP */
624 if (has_cpu_edp) {
625 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
626 drm_dbg_kms(&dev_priv->drm,
627 "Using SSC on eDP\n");
628 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
629 } else {
630 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
631 }
632 } else {
633 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
634 }
635
636 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
637 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
638 udelay(200);
639 } else {
640 drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
641
642 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
643
644 /* Turn off CPU output */
645 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
646
647 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
648 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
649 udelay(200);
650
651 if (!using_ssc_source) {
652 drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
653
654 /* Turn off the SSC source */
655 val &= ~DREF_SSC_SOURCE_MASK;
656 val |= DREF_SSC_SOURCE_DISABLE;
657
658 /* Turn off SSC1 */
659 val &= ~DREF_SSC1_ENABLE;
660
661 intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
662 intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
663 udelay(200);
664 }
665 }
666
667 drm_WARN_ON(&dev_priv->drm, val != final);
668 }
669
670 /*
671 * Initialize reference clocks when the driver loads
672 */
intel_init_pch_refclk(struct drm_i915_private * dev_priv)673 void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
674 {
675 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
676 ilk_init_pch_refclk(dev_priv);
677 else if (HAS_PCH_LPT(dev_priv))
678 lpt_init_pch_refclk(dev_priv);
679 }
680