1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2020 Intel Corporation
4  */
5 
6 #include <linux/string_helpers.h>
7 
8 #include "intel_atomic.h"
9 #include "intel_crtc.h"
10 #include "intel_ddi.h"
11 #include "intel_de.h"
12 #include "intel_display_types.h"
13 #include "intel_fdi.h"
14 
15 struct intel_fdi_funcs {
16 	void (*fdi_link_train)(struct intel_crtc *crtc,
17 			       const struct intel_crtc_state *crtc_state);
18 };
19 
assert_fdi_tx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)20 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
21 			  enum pipe pipe, bool state)
22 {
23 	bool cur_state;
24 
25 	if (HAS_DDI(dev_priv)) {
26 		/*
27 		 * DDI does not have a specific FDI_TX register.
28 		 *
29 		 * FDI is never fed from EDP transcoder
30 		 * so pipe->transcoder cast is fine here.
31 		 */
32 		enum transcoder cpu_transcoder = (enum transcoder)pipe;
33 		cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
34 	} else {
35 		cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
36 	}
37 	I915_STATE_WARN(cur_state != state,
38 			"FDI TX state assertion failure (expected %s, current %s)\n",
39 			str_on_off(state), str_on_off(cur_state));
40 }
41 
assert_fdi_tx_enabled(struct drm_i915_private * i915,enum pipe pipe)42 void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
43 {
44 	assert_fdi_tx(i915, pipe, true);
45 }
46 
assert_fdi_tx_disabled(struct drm_i915_private * i915,enum pipe pipe)47 void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
48 {
49 	assert_fdi_tx(i915, pipe, false);
50 }
51 
assert_fdi_rx(struct drm_i915_private * dev_priv,enum pipe pipe,bool state)52 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
53 			  enum pipe pipe, bool state)
54 {
55 	bool cur_state;
56 
57 	cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
58 	I915_STATE_WARN(cur_state != state,
59 			"FDI RX state assertion failure (expected %s, current %s)\n",
60 			str_on_off(state), str_on_off(cur_state));
61 }
62 
assert_fdi_rx_enabled(struct drm_i915_private * i915,enum pipe pipe)63 void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
64 {
65 	assert_fdi_rx(i915, pipe, true);
66 }
67 
assert_fdi_rx_disabled(struct drm_i915_private * i915,enum pipe pipe)68 void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
69 {
70 	assert_fdi_rx(i915, pipe, false);
71 }
72 
assert_fdi_tx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)73 void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
74 			       enum pipe pipe)
75 {
76 	bool cur_state;
77 
78 	/* ILK FDI PLL is always enabled */
79 	if (IS_IRONLAKE(i915))
80 		return;
81 
82 	/* On Haswell, DDI ports are responsible for the FDI PLL setup */
83 	if (HAS_DDI(i915))
84 		return;
85 
86 	cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
87 	I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
88 }
89 
assert_fdi_rx_pll(struct drm_i915_private * i915,enum pipe pipe,bool state)90 static void assert_fdi_rx_pll(struct drm_i915_private *i915,
91 			      enum pipe pipe, bool state)
92 {
93 	bool cur_state;
94 
95 	cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
96 	I915_STATE_WARN(cur_state != state,
97 			"FDI RX PLL assertion failure (expected %s, current %s)\n",
98 			str_on_off(state), str_on_off(cur_state));
99 }
100 
assert_fdi_rx_pll_enabled(struct drm_i915_private * i915,enum pipe pipe)101 void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
102 {
103 	assert_fdi_rx_pll(i915, pipe, true);
104 }
105 
assert_fdi_rx_pll_disabled(struct drm_i915_private * i915,enum pipe pipe)106 void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
107 {
108 	assert_fdi_rx_pll(i915, pipe, false);
109 }
110 
intel_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)111 void intel_fdi_link_train(struct intel_crtc *crtc,
112 			  const struct intel_crtc_state *crtc_state)
113 {
114 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
115 
116 	dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
117 }
118 
119 /* units of 100MHz */
pipe_required_fdi_lanes(struct intel_crtc_state * crtc_state)120 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
121 {
122 	if (crtc_state->hw.enable && crtc_state->has_pch_encoder)
123 		return crtc_state->fdi_lanes;
124 
125 	return 0;
126 }
127 
ilk_check_fdi_lanes(struct drm_device * dev,enum pipe pipe,struct intel_crtc_state * pipe_config)128 static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
129 			       struct intel_crtc_state *pipe_config)
130 {
131 	struct drm_i915_private *dev_priv = to_i915(dev);
132 	struct drm_atomic_state *state = pipe_config->uapi.state;
133 	struct intel_crtc *other_crtc;
134 	struct intel_crtc_state *other_crtc_state;
135 
136 	drm_dbg_kms(&dev_priv->drm,
137 		    "checking fdi config on pipe %c, lanes %i\n",
138 		    pipe_name(pipe), pipe_config->fdi_lanes);
139 	if (pipe_config->fdi_lanes > 4) {
140 		drm_dbg_kms(&dev_priv->drm,
141 			    "invalid fdi lane config on pipe %c: %i lanes\n",
142 			    pipe_name(pipe), pipe_config->fdi_lanes);
143 		return -EINVAL;
144 	}
145 
146 	if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
147 		if (pipe_config->fdi_lanes > 2) {
148 			drm_dbg_kms(&dev_priv->drm,
149 				    "only 2 lanes on haswell, required: %i lanes\n",
150 				    pipe_config->fdi_lanes);
151 			return -EINVAL;
152 		} else {
153 			return 0;
154 		}
155 	}
156 
157 	if (INTEL_NUM_PIPES(dev_priv) == 2)
158 		return 0;
159 
160 	/* Ivybridge 3 pipe is really complicated */
161 	switch (pipe) {
162 	case PIPE_A:
163 		return 0;
164 	case PIPE_B:
165 		if (pipe_config->fdi_lanes <= 2)
166 			return 0;
167 
168 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_C);
169 		other_crtc_state =
170 			intel_atomic_get_crtc_state(state, other_crtc);
171 		if (IS_ERR(other_crtc_state))
172 			return PTR_ERR(other_crtc_state);
173 
174 		if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
175 			drm_dbg_kms(&dev_priv->drm,
176 				    "invalid shared fdi lane config on pipe %c: %i lanes\n",
177 				    pipe_name(pipe), pipe_config->fdi_lanes);
178 			return -EINVAL;
179 		}
180 		return 0;
181 	case PIPE_C:
182 		if (pipe_config->fdi_lanes > 2) {
183 			drm_dbg_kms(&dev_priv->drm,
184 				    "only 2 lanes on pipe %c: required %i lanes\n",
185 				    pipe_name(pipe), pipe_config->fdi_lanes);
186 			return -EINVAL;
187 		}
188 
189 		other_crtc = intel_crtc_for_pipe(dev_priv, PIPE_B);
190 		other_crtc_state =
191 			intel_atomic_get_crtc_state(state, other_crtc);
192 		if (IS_ERR(other_crtc_state))
193 			return PTR_ERR(other_crtc_state);
194 
195 		if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
196 			drm_dbg_kms(&dev_priv->drm,
197 				    "fdi link B uses too many lanes to enable link C\n");
198 			return -EINVAL;
199 		}
200 		return 0;
201 	default:
202 		MISSING_CASE(pipe);
203 		return 0;
204 	}
205 }
206 
intel_fdi_pll_freq_update(struct drm_i915_private * i915)207 void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
208 {
209 	if (IS_IRONLAKE(i915)) {
210 		u32 fdi_pll_clk =
211 			intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
212 
213 		i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
214 	} else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
215 		i915->fdi_pll_freq = 270000;
216 	} else {
217 		return;
218 	}
219 
220 	drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
221 }
222 
intel_fdi_link_freq(struct drm_i915_private * i915,const struct intel_crtc_state * pipe_config)223 int intel_fdi_link_freq(struct drm_i915_private *i915,
224 			const struct intel_crtc_state *pipe_config)
225 {
226 	if (HAS_DDI(i915))
227 		return pipe_config->port_clock; /* SPLL */
228 	else
229 		return i915->fdi_pll_freq;
230 }
231 
ilk_fdi_compute_config(struct intel_crtc * crtc,struct intel_crtc_state * pipe_config)232 int ilk_fdi_compute_config(struct intel_crtc *crtc,
233 			   struct intel_crtc_state *pipe_config)
234 {
235 	struct drm_device *dev = crtc->base.dev;
236 	struct drm_i915_private *i915 = to_i915(dev);
237 	const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
238 	int lane, link_bw, fdi_dotclock, ret;
239 	bool needs_recompute = false;
240 
241 retry:
242 	/* FDI is a binary signal running at ~2.7GHz, encoding
243 	 * each output octet as 10 bits. The actual frequency
244 	 * is stored as a divider into a 100MHz clock, and the
245 	 * mode pixel clock is stored in units of 1KHz.
246 	 * Hence the bw of each lane in terms of the mode signal
247 	 * is:
248 	 */
249 	link_bw = intel_fdi_link_freq(i915, pipe_config);
250 
251 	fdi_dotclock = adjusted_mode->crtc_clock;
252 
253 	lane = ilk_get_lanes_required(fdi_dotclock, link_bw,
254 				      pipe_config->pipe_bpp);
255 
256 	pipe_config->fdi_lanes = lane;
257 
258 	intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
259 			       link_bw, &pipe_config->fdi_m_n, false, false);
260 
261 	ret = ilk_check_fdi_lanes(dev, crtc->pipe, pipe_config);
262 	if (ret == -EDEADLK)
263 		return ret;
264 
265 	if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
266 		pipe_config->pipe_bpp -= 2*3;
267 		drm_dbg_kms(&i915->drm,
268 			    "fdi link bw constraint, reducing pipe bpp to %i\n",
269 			    pipe_config->pipe_bpp);
270 		needs_recompute = true;
271 		pipe_config->bw_constrained = true;
272 
273 		goto retry;
274 	}
275 
276 	if (needs_recompute)
277 		return -EAGAIN;
278 
279 	return ret;
280 }
281 
cpt_set_fdi_bc_bifurcation(struct drm_i915_private * dev_priv,bool enable)282 static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
283 {
284 	u32 temp;
285 
286 	temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
287 	if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
288 		return;
289 
290 	drm_WARN_ON(&dev_priv->drm,
291 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
292 		    FDI_RX_ENABLE);
293 	drm_WARN_ON(&dev_priv->drm,
294 		    intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
295 		    FDI_RX_ENABLE);
296 
297 	temp &= ~FDI_BC_BIFURCATION_SELECT;
298 	if (enable)
299 		temp |= FDI_BC_BIFURCATION_SELECT;
300 
301 	drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
302 		    enable ? "en" : "dis");
303 	intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
304 	intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
305 }
306 
ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state * crtc_state)307 static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
308 {
309 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
310 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
311 
312 	switch (crtc->pipe) {
313 	case PIPE_A:
314 		break;
315 	case PIPE_B:
316 		if (crtc_state->fdi_lanes > 2)
317 			cpt_set_fdi_bc_bifurcation(dev_priv, false);
318 		else
319 			cpt_set_fdi_bc_bifurcation(dev_priv, true);
320 
321 		break;
322 	case PIPE_C:
323 		cpt_set_fdi_bc_bifurcation(dev_priv, true);
324 
325 		break;
326 	default:
327 		MISSING_CASE(crtc->pipe);
328 	}
329 }
330 
intel_fdi_normal_train(struct intel_crtc * crtc)331 void intel_fdi_normal_train(struct intel_crtc *crtc)
332 {
333 	struct drm_device *dev = crtc->base.dev;
334 	struct drm_i915_private *dev_priv = to_i915(dev);
335 	enum pipe pipe = crtc->pipe;
336 	i915_reg_t reg;
337 	u32 temp;
338 
339 	/* enable normal train */
340 	reg = FDI_TX_CTL(pipe);
341 	temp = intel_de_read(dev_priv, reg);
342 	if (IS_IVYBRIDGE(dev_priv)) {
343 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
344 		temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
345 	} else {
346 		temp &= ~FDI_LINK_TRAIN_NONE;
347 		temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
348 	}
349 	intel_de_write(dev_priv, reg, temp);
350 
351 	reg = FDI_RX_CTL(pipe);
352 	temp = intel_de_read(dev_priv, reg);
353 	if (HAS_PCH_CPT(dev_priv)) {
354 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
355 		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
356 	} else {
357 		temp &= ~FDI_LINK_TRAIN_NONE;
358 		temp |= FDI_LINK_TRAIN_NONE;
359 	}
360 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
361 
362 	/* wait one idle pattern time */
363 	intel_de_posting_read(dev_priv, reg);
364 	udelay(1000);
365 
366 	/* IVB wants error correction enabled */
367 	if (IS_IVYBRIDGE(dev_priv))
368 		intel_de_write(dev_priv, reg,
369 			       intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
370 }
371 
372 /* The FDI link training functions for ILK/Ibexpeak. */
ilk_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)373 static void ilk_fdi_link_train(struct intel_crtc *crtc,
374 			       const struct intel_crtc_state *crtc_state)
375 {
376 	struct drm_device *dev = crtc->base.dev;
377 	struct drm_i915_private *dev_priv = to_i915(dev);
378 	enum pipe pipe = crtc->pipe;
379 	i915_reg_t reg;
380 	u32 temp, tries;
381 
382 	/*
383 	 * Write the TU size bits before fdi link training, so that error
384 	 * detection works.
385 	 */
386 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
387 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
388 
389 	/* FDI needs bits from pipe first */
390 	assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
391 
392 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
393 	   for train result */
394 	reg = FDI_RX_IMR(pipe);
395 	temp = intel_de_read(dev_priv, reg);
396 	temp &= ~FDI_RX_SYMBOL_LOCK;
397 	temp &= ~FDI_RX_BIT_LOCK;
398 	intel_de_write(dev_priv, reg, temp);
399 	intel_de_read(dev_priv, reg);
400 	udelay(150);
401 
402 	/* enable CPU FDI TX and PCH FDI RX */
403 	reg = FDI_TX_CTL(pipe);
404 	temp = intel_de_read(dev_priv, reg);
405 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
406 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
407 	temp &= ~FDI_LINK_TRAIN_NONE;
408 	temp |= FDI_LINK_TRAIN_PATTERN_1;
409 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
410 
411 	reg = FDI_RX_CTL(pipe);
412 	temp = intel_de_read(dev_priv, reg);
413 	temp &= ~FDI_LINK_TRAIN_NONE;
414 	temp |= FDI_LINK_TRAIN_PATTERN_1;
415 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
416 
417 	intel_de_posting_read(dev_priv, reg);
418 	udelay(150);
419 
420 	/* Ironlake workaround, enable clock pointer after FDI enable*/
421 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
422 		       FDI_RX_PHASE_SYNC_POINTER_OVR);
423 	intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
424 		       FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
425 
426 	reg = FDI_RX_IIR(pipe);
427 	for (tries = 0; tries < 5; tries++) {
428 		temp = intel_de_read(dev_priv, reg);
429 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
430 
431 		if ((temp & FDI_RX_BIT_LOCK)) {
432 			drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
433 			intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
434 			break;
435 		}
436 	}
437 	if (tries == 5)
438 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
439 
440 	/* Train 2 */
441 	reg = FDI_TX_CTL(pipe);
442 	temp = intel_de_read(dev_priv, reg);
443 	temp &= ~FDI_LINK_TRAIN_NONE;
444 	temp |= FDI_LINK_TRAIN_PATTERN_2;
445 	intel_de_write(dev_priv, reg, temp);
446 
447 	reg = FDI_RX_CTL(pipe);
448 	temp = intel_de_read(dev_priv, reg);
449 	temp &= ~FDI_LINK_TRAIN_NONE;
450 	temp |= FDI_LINK_TRAIN_PATTERN_2;
451 	intel_de_write(dev_priv, reg, temp);
452 
453 	intel_de_posting_read(dev_priv, reg);
454 	udelay(150);
455 
456 	reg = FDI_RX_IIR(pipe);
457 	for (tries = 0; tries < 5; tries++) {
458 		temp = intel_de_read(dev_priv, reg);
459 		drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
460 
461 		if (temp & FDI_RX_SYMBOL_LOCK) {
462 			intel_de_write(dev_priv, reg,
463 				       temp | FDI_RX_SYMBOL_LOCK);
464 			drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
465 			break;
466 		}
467 	}
468 	if (tries == 5)
469 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
470 
471 	drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
472 
473 }
474 
475 static const int snb_b_fdi_train_param[] = {
476 	FDI_LINK_TRAIN_400MV_0DB_SNB_B,
477 	FDI_LINK_TRAIN_400MV_6DB_SNB_B,
478 	FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
479 	FDI_LINK_TRAIN_800MV_0DB_SNB_B,
480 };
481 
482 /* The FDI link training functions for SNB/Cougarpoint. */
gen6_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)483 static void gen6_fdi_link_train(struct intel_crtc *crtc,
484 				const struct intel_crtc_state *crtc_state)
485 {
486 	struct drm_device *dev = crtc->base.dev;
487 	struct drm_i915_private *dev_priv = to_i915(dev);
488 	enum pipe pipe = crtc->pipe;
489 	i915_reg_t reg;
490 	u32 temp, i, retry;
491 
492 	/*
493 	 * Write the TU size bits before fdi link training, so that error
494 	 * detection works.
495 	 */
496 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
497 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
498 
499 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
500 	   for train result */
501 	reg = FDI_RX_IMR(pipe);
502 	temp = intel_de_read(dev_priv, reg);
503 	temp &= ~FDI_RX_SYMBOL_LOCK;
504 	temp &= ~FDI_RX_BIT_LOCK;
505 	intel_de_write(dev_priv, reg, temp);
506 
507 	intel_de_posting_read(dev_priv, reg);
508 	udelay(150);
509 
510 	/* enable CPU FDI TX and PCH FDI RX */
511 	reg = FDI_TX_CTL(pipe);
512 	temp = intel_de_read(dev_priv, reg);
513 	temp &= ~FDI_DP_PORT_WIDTH_MASK;
514 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
515 	temp &= ~FDI_LINK_TRAIN_NONE;
516 	temp |= FDI_LINK_TRAIN_PATTERN_1;
517 	temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
518 	/* SNB-B */
519 	temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
520 	intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
521 
522 	intel_de_write(dev_priv, FDI_RX_MISC(pipe),
523 		       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
524 
525 	reg = FDI_RX_CTL(pipe);
526 	temp = intel_de_read(dev_priv, reg);
527 	if (HAS_PCH_CPT(dev_priv)) {
528 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
529 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
530 	} else {
531 		temp &= ~FDI_LINK_TRAIN_NONE;
532 		temp |= FDI_LINK_TRAIN_PATTERN_1;
533 	}
534 	intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
535 
536 	intel_de_posting_read(dev_priv, reg);
537 	udelay(150);
538 
539 	for (i = 0; i < 4; i++) {
540 		reg = FDI_TX_CTL(pipe);
541 		temp = intel_de_read(dev_priv, reg);
542 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
543 		temp |= snb_b_fdi_train_param[i];
544 		intel_de_write(dev_priv, reg, temp);
545 
546 		intel_de_posting_read(dev_priv, reg);
547 		udelay(500);
548 
549 		for (retry = 0; retry < 5; retry++) {
550 			reg = FDI_RX_IIR(pipe);
551 			temp = intel_de_read(dev_priv, reg);
552 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
553 			if (temp & FDI_RX_BIT_LOCK) {
554 				intel_de_write(dev_priv, reg,
555 					       temp | FDI_RX_BIT_LOCK);
556 				drm_dbg_kms(&dev_priv->drm,
557 					    "FDI train 1 done.\n");
558 				break;
559 			}
560 			udelay(50);
561 		}
562 		if (retry < 5)
563 			break;
564 	}
565 	if (i == 4)
566 		drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
567 
568 	/* Train 2 */
569 	reg = FDI_TX_CTL(pipe);
570 	temp = intel_de_read(dev_priv, reg);
571 	temp &= ~FDI_LINK_TRAIN_NONE;
572 	temp |= FDI_LINK_TRAIN_PATTERN_2;
573 	if (IS_SANDYBRIDGE(dev_priv)) {
574 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
575 		/* SNB-B */
576 		temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
577 	}
578 	intel_de_write(dev_priv, reg, temp);
579 
580 	reg = FDI_RX_CTL(pipe);
581 	temp = intel_de_read(dev_priv, reg);
582 	if (HAS_PCH_CPT(dev_priv)) {
583 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
584 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
585 	} else {
586 		temp &= ~FDI_LINK_TRAIN_NONE;
587 		temp |= FDI_LINK_TRAIN_PATTERN_2;
588 	}
589 	intel_de_write(dev_priv, reg, temp);
590 
591 	intel_de_posting_read(dev_priv, reg);
592 	udelay(150);
593 
594 	for (i = 0; i < 4; i++) {
595 		reg = FDI_TX_CTL(pipe);
596 		temp = intel_de_read(dev_priv, reg);
597 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
598 		temp |= snb_b_fdi_train_param[i];
599 		intel_de_write(dev_priv, reg, temp);
600 
601 		intel_de_posting_read(dev_priv, reg);
602 		udelay(500);
603 
604 		for (retry = 0; retry < 5; retry++) {
605 			reg = FDI_RX_IIR(pipe);
606 			temp = intel_de_read(dev_priv, reg);
607 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
608 			if (temp & FDI_RX_SYMBOL_LOCK) {
609 				intel_de_write(dev_priv, reg,
610 					       temp | FDI_RX_SYMBOL_LOCK);
611 				drm_dbg_kms(&dev_priv->drm,
612 					    "FDI train 2 done.\n");
613 				break;
614 			}
615 			udelay(50);
616 		}
617 		if (retry < 5)
618 			break;
619 	}
620 	if (i == 4)
621 		drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
622 
623 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
624 }
625 
626 /* Manual link training for Ivy Bridge A0 parts */
ivb_manual_fdi_link_train(struct intel_crtc * crtc,const struct intel_crtc_state * crtc_state)627 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
628 				      const struct intel_crtc_state *crtc_state)
629 {
630 	struct drm_device *dev = crtc->base.dev;
631 	struct drm_i915_private *dev_priv = to_i915(dev);
632 	enum pipe pipe = crtc->pipe;
633 	i915_reg_t reg;
634 	u32 temp, i, j;
635 
636 	ivb_update_fdi_bc_bifurcation(crtc_state);
637 
638 	/*
639 	 * Write the TU size bits before fdi link training, so that error
640 	 * detection works.
641 	 */
642 	intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
643 		       intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
644 
645 	/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
646 	   for train result */
647 	reg = FDI_RX_IMR(pipe);
648 	temp = intel_de_read(dev_priv, reg);
649 	temp &= ~FDI_RX_SYMBOL_LOCK;
650 	temp &= ~FDI_RX_BIT_LOCK;
651 	intel_de_write(dev_priv, reg, temp);
652 
653 	intel_de_posting_read(dev_priv, reg);
654 	udelay(150);
655 
656 	drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
657 		    intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
658 
659 	/* Try each vswing and preemphasis setting twice before moving on */
660 	for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
661 		/* disable first in case we need to retry */
662 		reg = FDI_TX_CTL(pipe);
663 		temp = intel_de_read(dev_priv, reg);
664 		temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
665 		temp &= ~FDI_TX_ENABLE;
666 		intel_de_write(dev_priv, reg, temp);
667 
668 		reg = FDI_RX_CTL(pipe);
669 		temp = intel_de_read(dev_priv, reg);
670 		temp &= ~FDI_LINK_TRAIN_AUTO;
671 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
672 		temp &= ~FDI_RX_ENABLE;
673 		intel_de_write(dev_priv, reg, temp);
674 
675 		/* enable CPU FDI TX and PCH FDI RX */
676 		reg = FDI_TX_CTL(pipe);
677 		temp = intel_de_read(dev_priv, reg);
678 		temp &= ~FDI_DP_PORT_WIDTH_MASK;
679 		temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
680 		temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
681 		temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
682 		temp |= snb_b_fdi_train_param[j/2];
683 		temp |= FDI_COMPOSITE_SYNC;
684 		intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
685 
686 		intel_de_write(dev_priv, FDI_RX_MISC(pipe),
687 			       FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
688 
689 		reg = FDI_RX_CTL(pipe);
690 		temp = intel_de_read(dev_priv, reg);
691 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
692 		temp |= FDI_COMPOSITE_SYNC;
693 		intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
694 
695 		intel_de_posting_read(dev_priv, reg);
696 		udelay(1); /* should be 0.5us */
697 
698 		for (i = 0; i < 4; i++) {
699 			reg = FDI_RX_IIR(pipe);
700 			temp = intel_de_read(dev_priv, reg);
701 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
702 
703 			if (temp & FDI_RX_BIT_LOCK ||
704 			    (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
705 				intel_de_write(dev_priv, reg,
706 					       temp | FDI_RX_BIT_LOCK);
707 				drm_dbg_kms(&dev_priv->drm,
708 					    "FDI train 1 done, level %i.\n",
709 					    i);
710 				break;
711 			}
712 			udelay(1); /* should be 0.5us */
713 		}
714 		if (i == 4) {
715 			drm_dbg_kms(&dev_priv->drm,
716 				    "FDI train 1 fail on vswing %d\n", j / 2);
717 			continue;
718 		}
719 
720 		/* Train 2 */
721 		reg = FDI_TX_CTL(pipe);
722 		temp = intel_de_read(dev_priv, reg);
723 		temp &= ~FDI_LINK_TRAIN_NONE_IVB;
724 		temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
725 		intel_de_write(dev_priv, reg, temp);
726 
727 		reg = FDI_RX_CTL(pipe);
728 		temp = intel_de_read(dev_priv, reg);
729 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
730 		temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
731 		intel_de_write(dev_priv, reg, temp);
732 
733 		intel_de_posting_read(dev_priv, reg);
734 		udelay(2); /* should be 1.5us */
735 
736 		for (i = 0; i < 4; i++) {
737 			reg = FDI_RX_IIR(pipe);
738 			temp = intel_de_read(dev_priv, reg);
739 			drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
740 
741 			if (temp & FDI_RX_SYMBOL_LOCK ||
742 			    (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
743 				intel_de_write(dev_priv, reg,
744 					       temp | FDI_RX_SYMBOL_LOCK);
745 				drm_dbg_kms(&dev_priv->drm,
746 					    "FDI train 2 done, level %i.\n",
747 					    i);
748 				goto train_done;
749 			}
750 			udelay(2); /* should be 1.5us */
751 		}
752 		if (i == 4)
753 			drm_dbg_kms(&dev_priv->drm,
754 				    "FDI train 2 fail on vswing %d\n", j / 2);
755 	}
756 
757 train_done:
758 	drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
759 }
760 
761 /* Starting with Haswell, different DDI ports can work in FDI mode for
762  * connection to the PCH-located connectors. For this, it is necessary to train
763  * both the DDI port and PCH receiver for the desired DDI buffer settings.
764  *
765  * The recommended port to work in FDI mode is DDI E, which we use here. Also,
766  * please note that when FDI mode is active on DDI E, it shares 2 lines with
767  * DDI A (which is used for eDP)
768  */
hsw_fdi_link_train(struct intel_encoder * encoder,const struct intel_crtc_state * crtc_state)769 void hsw_fdi_link_train(struct intel_encoder *encoder,
770 			const struct intel_crtc_state *crtc_state)
771 {
772 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
773 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
774 	u32 temp, i, rx_ctl_val;
775 	int n_entries;
776 
777 	encoder->get_buf_trans(encoder, crtc_state, &n_entries);
778 
779 	hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
780 
781 	/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
782 	 * mode set "sequence for CRT port" document:
783 	 * - TP1 to TP2 time with the default value
784 	 * - FDI delay to 90h
785 	 *
786 	 * WaFDIAutoLinkSetTimingOverrride:hsw
787 	 */
788 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
789 		       FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
790 
791 	/* Enable the PCH Receiver FDI PLL */
792 	rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
793 		     FDI_RX_PLL_ENABLE |
794 		     FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
795 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
796 	intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
797 	udelay(220);
798 
799 	/* Switch from Rawclk to PCDclk */
800 	rx_ctl_val |= FDI_PCDCLK;
801 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
802 
803 	/* Configure Port Clock Select */
804 	drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll->info->id != DPLL_ID_SPLL);
805 	intel_ddi_enable_clock(encoder, crtc_state);
806 
807 	/* Start the training iterating through available voltages and emphasis,
808 	 * testing each value twice. */
809 	for (i = 0; i < n_entries * 2; i++) {
810 		/* Configure DP_TP_CTL with auto-training */
811 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
812 			       DP_TP_CTL_FDI_AUTOTRAIN |
813 			       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
814 			       DP_TP_CTL_LINK_TRAIN_PAT1 |
815 			       DP_TP_CTL_ENABLE);
816 
817 		/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
818 		 * DDI E does not support port reversal, the functionality is
819 		 * achieved on the PCH side in FDI_RX_CTL, so no need to set the
820 		 * port reversal bit */
821 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
822 			       DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
823 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
824 
825 		udelay(600);
826 
827 		/* Program PCH FDI Receiver TU */
828 		intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
829 
830 		/* Enable PCH FDI Receiver with auto-training */
831 		rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
832 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
833 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
834 
835 		/* Wait for FDI receiver lane calibration */
836 		udelay(30);
837 
838 		/* Unset FDI_RX_MISC pwrdn lanes */
839 		temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
840 		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
841 		intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
842 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
843 
844 		/* Wait for FDI auto training time */
845 		udelay(5);
846 
847 		temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
848 		if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
849 			drm_dbg_kms(&dev_priv->drm,
850 				    "FDI link training done on step %d\n", i);
851 			break;
852 		}
853 
854 		/*
855 		 * Leave things enabled even if we failed to train FDI.
856 		 * Results in less fireworks from the state checker.
857 		 */
858 		if (i == n_entries * 2 - 1) {
859 			drm_err(&dev_priv->drm, "FDI link training failed!\n");
860 			break;
861 		}
862 
863 		rx_ctl_val &= ~FDI_RX_ENABLE;
864 		intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
865 		intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
866 
867 		temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
868 		temp &= ~DDI_BUF_CTL_ENABLE;
869 		intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
870 		intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
871 
872 		/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
873 		temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
874 		temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
875 		temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
876 		intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
877 		intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
878 
879 		intel_wait_ddi_buf_idle(dev_priv, PORT_E);
880 
881 		/* Reset FDI_RX_MISC pwrdn lanes */
882 		temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
883 		temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
884 		temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
885 		intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
886 		intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
887 	}
888 
889 	/* Enable normal pixel sending for FDI */
890 	intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
891 		       DP_TP_CTL_FDI_AUTOTRAIN |
892 		       DP_TP_CTL_LINK_TRAIN_NORMAL |
893 		       DP_TP_CTL_ENHANCED_FRAME_ENABLE |
894 		       DP_TP_CTL_ENABLE);
895 }
896 
hsw_fdi_disable(struct intel_encoder * encoder)897 void hsw_fdi_disable(struct intel_encoder *encoder)
898 {
899 	struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
900 	u32 val;
901 
902 	/*
903 	 * Bspec lists this as both step 13 (before DDI_BUF_CTL disable)
904 	 * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN,
905 	 * step 13 is the correct place for it. Step 18 is where it was
906 	 * originally before the BUN.
907 	 */
908 	val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
909 	val &= ~FDI_RX_ENABLE;
910 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
911 
912 	val = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
913 	val &= ~DDI_BUF_CTL_ENABLE;
914 	intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), val);
915 
916 	intel_wait_ddi_buf_idle(dev_priv, PORT_E);
917 
918 	intel_ddi_disable_clock(encoder);
919 
920 	val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
921 	val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
922 	val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
923 	intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
924 
925 	val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
926 	val &= ~FDI_PCDCLK;
927 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
928 
929 	val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
930 	val &= ~FDI_RX_PLL_ENABLE;
931 	intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
932 }
933 
ilk_fdi_pll_enable(const struct intel_crtc_state * crtc_state)934 void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
935 {
936 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
937 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
938 	enum pipe pipe = crtc->pipe;
939 	i915_reg_t reg;
940 	u32 temp;
941 
942 	/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
943 	reg = FDI_RX_CTL(pipe);
944 	temp = intel_de_read(dev_priv, reg);
945 	temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
946 	temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
947 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
948 	intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
949 
950 	intel_de_posting_read(dev_priv, reg);
951 	udelay(200);
952 
953 	/* Switch from Rawclk to PCDclk */
954 	temp = intel_de_read(dev_priv, reg);
955 	intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
956 
957 	intel_de_posting_read(dev_priv, reg);
958 	udelay(200);
959 
960 	/* Enable CPU FDI TX PLL, always on for Ironlake */
961 	reg = FDI_TX_CTL(pipe);
962 	temp = intel_de_read(dev_priv, reg);
963 	if ((temp & FDI_TX_PLL_ENABLE) == 0) {
964 		intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
965 
966 		intel_de_posting_read(dev_priv, reg);
967 		udelay(100);
968 	}
969 }
970 
ilk_fdi_pll_disable(struct intel_crtc * crtc)971 void ilk_fdi_pll_disable(struct intel_crtc *crtc)
972 {
973 	struct drm_device *dev = crtc->base.dev;
974 	struct drm_i915_private *dev_priv = to_i915(dev);
975 	enum pipe pipe = crtc->pipe;
976 	i915_reg_t reg;
977 	u32 temp;
978 
979 	/* Switch from PCDclk to Rawclk */
980 	reg = FDI_RX_CTL(pipe);
981 	temp = intel_de_read(dev_priv, reg);
982 	intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
983 
984 	/* Disable CPU FDI TX PLL */
985 	reg = FDI_TX_CTL(pipe);
986 	temp = intel_de_read(dev_priv, reg);
987 	intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
988 
989 	intel_de_posting_read(dev_priv, reg);
990 	udelay(100);
991 
992 	reg = FDI_RX_CTL(pipe);
993 	temp = intel_de_read(dev_priv, reg);
994 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
995 
996 	/* Wait for the clocks to turn off. */
997 	intel_de_posting_read(dev_priv, reg);
998 	udelay(100);
999 }
1000 
ilk_fdi_disable(struct intel_crtc * crtc)1001 void ilk_fdi_disable(struct intel_crtc *crtc)
1002 {
1003 	struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1004 	enum pipe pipe = crtc->pipe;
1005 	i915_reg_t reg;
1006 	u32 temp;
1007 
1008 	/* disable CPU FDI tx and PCH FDI rx */
1009 	reg = FDI_TX_CTL(pipe);
1010 	temp = intel_de_read(dev_priv, reg);
1011 	intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
1012 	intel_de_posting_read(dev_priv, reg);
1013 
1014 	reg = FDI_RX_CTL(pipe);
1015 	temp = intel_de_read(dev_priv, reg);
1016 	temp &= ~(0x7 << 16);
1017 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
1018 	intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
1019 
1020 	intel_de_posting_read(dev_priv, reg);
1021 	udelay(100);
1022 
1023 	/* Ironlake workaround, disable clock pointer after downing FDI */
1024 	if (HAS_PCH_IBX(dev_priv))
1025 		intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
1026 			       FDI_RX_PHASE_SYNC_POINTER_OVR);
1027 
1028 	/* still set train pattern 1 */
1029 	reg = FDI_TX_CTL(pipe);
1030 	temp = intel_de_read(dev_priv, reg);
1031 	temp &= ~FDI_LINK_TRAIN_NONE;
1032 	temp |= FDI_LINK_TRAIN_PATTERN_1;
1033 	intel_de_write(dev_priv, reg, temp);
1034 
1035 	reg = FDI_RX_CTL(pipe);
1036 	temp = intel_de_read(dev_priv, reg);
1037 	if (HAS_PCH_CPT(dev_priv)) {
1038 		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1039 		temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
1040 	} else {
1041 		temp &= ~FDI_LINK_TRAIN_NONE;
1042 		temp |= FDI_LINK_TRAIN_PATTERN_1;
1043 	}
1044 	/* BPC in FDI rx is consistent with that in PIPECONF */
1045 	temp &= ~(0x07 << 16);
1046 	temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
1047 	intel_de_write(dev_priv, reg, temp);
1048 
1049 	intel_de_posting_read(dev_priv, reg);
1050 	udelay(100);
1051 }
1052 
1053 static const struct intel_fdi_funcs ilk_funcs = {
1054 	.fdi_link_train = ilk_fdi_link_train,
1055 };
1056 
1057 static const struct intel_fdi_funcs gen6_funcs = {
1058 	.fdi_link_train = gen6_fdi_link_train,
1059 };
1060 
1061 static const struct intel_fdi_funcs ivb_funcs = {
1062 	.fdi_link_train = ivb_manual_fdi_link_train,
1063 };
1064 
1065 void
intel_fdi_init_hook(struct drm_i915_private * dev_priv)1066 intel_fdi_init_hook(struct drm_i915_private *dev_priv)
1067 {
1068 	if (IS_IRONLAKE(dev_priv)) {
1069 		dev_priv->fdi_funcs = &ilk_funcs;
1070 	} else if (IS_SANDYBRIDGE(dev_priv)) {
1071 		dev_priv->fdi_funcs = &gen6_funcs;
1072 	} else if (IS_IVYBRIDGE(dev_priv)) {
1073 		/* FIXME: detect B0+ stepping and use auto training */
1074 		dev_priv->fdi_funcs = &ivb_funcs;
1075 	}
1076 }
1077