1 /*
2  * Copyright 2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "dp.h"
25 #include "conn.h"
26 #include "head.h"
27 #include "ior.h"
28 
29 #include <subdev/bios.h>
30 #include <subdev/bios/init.h>
31 #include <subdev/gpio.h>
32 #include <subdev/i2c.h>
33 
34 #include <nvif/event.h>
35 
36 /* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
37  * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
38  * so we're unable to detect this in a nice way.
39  */
40 #define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
41 
42 struct lt_state {
43 	struct nvkm_dp *dp;
44 
45 	int repeaters;
46 	int repeater;
47 
48 	u8  stat[6];
49 	u8  conf[4];
50 	bool pc2;
51 	u8  pc2stat;
52 	u8  pc2conf[2];
53 };
54 
55 static int
nvkm_dp_train_sense(struct lt_state * lt,bool pc,u32 delay)56 nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
57 {
58 	struct nvkm_dp *dp = lt->dp;
59 	u32 addr;
60 	int ret;
61 
62 	usleep_range(delay, delay * 2);
63 
64 	if (lt->repeater)
65 		addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
66 	else
67 		addr = DPCD_LS02;
68 
69 	ret = nvkm_rdaux(dp->aux, addr, &lt->stat[0], 3);
70 	if (ret)
71 		return ret;
72 
73 	if (lt->repeater)
74 		addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
75 	else
76 		addr = DPCD_LS06;
77 
78 	ret = nvkm_rdaux(dp->aux, addr, &lt->stat[4], 2);
79 	if (ret)
80 		return ret;
81 
82 	if (pc) {
83 		ret = nvkm_rdaux(dp->aux, DPCD_LS0C, &lt->pc2stat, 1);
84 		if (ret)
85 			lt->pc2stat = 0x00;
86 		OUTP_TRACE(&dp->outp, "status %6ph pc2 %02x",
87 			   lt->stat, lt->pc2stat);
88 	} else {
89 		OUTP_TRACE(&dp->outp, "status %6ph", lt->stat);
90 	}
91 
92 	return 0;
93 }
94 
95 static int
nvkm_dp_train_drive(struct lt_state * lt,bool pc)96 nvkm_dp_train_drive(struct lt_state *lt, bool pc)
97 {
98 	struct nvkm_dp *dp = lt->dp;
99 	struct nvkm_ior *ior = dp->outp.ior;
100 	struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
101 	struct nvbios_dpout info;
102 	struct nvbios_dpcfg ocfg;
103 	u8  ver, hdr, cnt, len;
104 	u32 addr;
105 	u32 data;
106 	int ret, i;
107 
108 	for (i = 0; i < ior->dp.nr; i++) {
109 		u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
110 		u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
111 		u8 lpre = (lane & 0x0c) >> 2;
112 		u8 lvsw = (lane & 0x03) >> 0;
113 		u8 hivs = 3 - lpre;
114 		u8 hipe = 3;
115 		u8 hipc = 3;
116 
117 		if (lpc2 >= hipc)
118 			lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
119 		if (lpre >= hipe) {
120 			lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
121 			lvsw = hivs = 3 - (lpre & 3);
122 		} else
123 		if (lvsw >= hivs) {
124 			lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
125 		}
126 
127 		lt->conf[i] = (lpre << 3) | lvsw;
128 		lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
129 
130 		OUTP_TRACE(&dp->outp, "config lane %d %02x %02x",
131 			   i, lt->conf[i], lpc2);
132 
133 		if (lt->repeater != lt->repeaters)
134 			continue;
135 
136 		data = nvbios_dpout_match(bios, dp->outp.info.hasht,
137 						dp->outp.info.hashm,
138 					  &ver, &hdr, &cnt, &len, &info);
139 		if (!data)
140 			continue;
141 
142 		data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3,
143 					  lpre & 3, &ver, &hdr, &cnt, &len,
144 					  &ocfg);
145 		if (!data)
146 			continue;
147 
148 		ior->func->dp.drive(ior, i, ocfg.pc, ocfg.dc,
149 					    ocfg.pe, ocfg.tx_pu);
150 	}
151 
152 	if (lt->repeater)
153 		addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
154 	else
155 		addr = DPCD_LC03(0);
156 
157 	ret = nvkm_wraux(dp->aux, addr, lt->conf, 4);
158 	if (ret)
159 		return ret;
160 
161 	if (pc) {
162 		ret = nvkm_wraux(dp->aux, DPCD_LC0F, lt->pc2conf, 2);
163 		if (ret)
164 			return ret;
165 	}
166 
167 	return 0;
168 }
169 
170 static void
nvkm_dp_train_pattern(struct lt_state * lt,u8 pattern)171 nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
172 {
173 	struct nvkm_dp *dp = lt->dp;
174 	u32 addr;
175 	u8 sink_tp;
176 
177 	OUTP_TRACE(&dp->outp, "training pattern %d", pattern);
178 	dp->outp.ior->func->dp.pattern(dp->outp.ior, pattern);
179 
180 	if (lt->repeater)
181 		addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
182 	else
183 		addr = DPCD_LC02;
184 
185 	nvkm_rdaux(dp->aux, addr, &sink_tp, 1);
186 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
187 	sink_tp |= (pattern != 4) ? pattern : 7;
188 
189 	if (pattern != 0)
190 		sink_tp |=  DPCD_LC02_SCRAMBLING_DISABLE;
191 	else
192 		sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
193 	nvkm_wraux(dp->aux, addr, &sink_tp, 1);
194 }
195 
196 static int
nvkm_dp_train_eq(struct lt_state * lt)197 nvkm_dp_train_eq(struct lt_state *lt)
198 {
199 	struct nvkm_i2c_aux *aux = lt->dp->aux;
200 	bool eq_done = false, cr_done = true;
201 	int tries = 0, usec = 0, i;
202 	u8 data;
203 
204 	if (lt->repeater) {
205 		if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
206 			usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
207 
208 		nvkm_dp_train_pattern(lt, 4);
209 	} else {
210 		if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
211 		    lt->dp->dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
212 			nvkm_dp_train_pattern(lt, 4);
213 		else
214 		if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
215 		    lt->dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
216 			nvkm_dp_train_pattern(lt, 3);
217 		else
218 			nvkm_dp_train_pattern(lt, 2);
219 
220 		usec = (lt->dp->dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
221 	}
222 
223 	do {
224 		if ((tries &&
225 		    nvkm_dp_train_drive(lt, lt->pc2)) ||
226 		    nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
227 			break;
228 
229 		eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
230 		for (i = 0; i < lt->dp->outp.ior->dp.nr && eq_done; i++) {
231 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
232 			if (!(lane & DPCD_LS02_LANE0_CR_DONE))
233 				cr_done = false;
234 			if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
235 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
236 				eq_done = false;
237 		}
238 	} while (!eq_done && cr_done && ++tries <= 5);
239 
240 	return eq_done ? 0 : -1;
241 }
242 
243 static int
nvkm_dp_train_cr(struct lt_state * lt)244 nvkm_dp_train_cr(struct lt_state *lt)
245 {
246 	bool cr_done = false, abort = false;
247 	int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
248 	int tries = 0, usec = 0, i;
249 
250 	nvkm_dp_train_pattern(lt, 1);
251 
252 	if (lt->dp->dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
253 		usec = (lt->dp->dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
254 
255 	do {
256 		if (nvkm_dp_train_drive(lt, false) ||
257 		    nvkm_dp_train_sense(lt, false, usec ? usec : 100))
258 			break;
259 
260 		cr_done = true;
261 		for (i = 0; i < lt->dp->outp.ior->dp.nr; i++) {
262 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
263 			if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
264 				cr_done = false;
265 				if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
266 					abort = true;
267 				break;
268 			}
269 		}
270 
271 		if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
272 			voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
273 			tries = 0;
274 		}
275 	} while (!cr_done && !abort && ++tries < 5);
276 
277 	return cr_done ? 0 : -1;
278 }
279 
280 static int
nvkm_dp_train_links(struct nvkm_dp * dp,int rate)281 nvkm_dp_train_links(struct nvkm_dp *dp, int rate)
282 {
283 	struct nvkm_ior *ior = dp->outp.ior;
284 	struct nvkm_disp *disp = dp->outp.disp;
285 	struct nvkm_subdev *subdev = &disp->engine.subdev;
286 	struct nvkm_bios *bios = subdev->device->bios;
287 	struct lt_state lt = {
288 		.dp = dp,
289 	};
290 	u32 lnkcmp;
291 	u8 sink[2], data;
292 	int ret;
293 
294 	OUTP_DBG(&dp->outp, "training %d x %d MB/s",
295 		 ior->dp.nr, ior->dp.bw * 27);
296 
297 	/* Intersect misc. capabilities of the OR and sink. */
298 	if (disp->engine.subdev.device->chipset < 0x110)
299 		dp->dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
300 	if (disp->engine.subdev.device->chipset < 0xd0)
301 		dp->dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
302 	lt.pc2 = dp->dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
303 
304 	if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.dp->info.script[0])) {
305 		/* Execute BeforeLinkTraining script from DP Info table. */
306 		while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
307 			lnkcmp += 3;
308 		lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
309 
310 		nvbios_init(&dp->outp.disp->engine.subdev, lnkcmp,
311 			init.outp = &dp->outp.info;
312 			init.or   = ior->id;
313 			init.link = ior->asy.link;
314 		);
315 	}
316 
317 	/* Set desired link configuration on the source. */
318 	if ((lnkcmp = lt.dp->info.lnkcmp)) {
319 		if (dp->version < 0x30) {
320 			while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
321 				lnkcmp += 4;
322 			lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
323 		} else {
324 			while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
325 				lnkcmp += 3;
326 			lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
327 		}
328 
329 		nvbios_init(subdev, lnkcmp,
330 			init.outp = &dp->outp.info;
331 			init.or   = ior->id;
332 			init.link = ior->asy.link;
333 		);
334 	}
335 
336 	ret = ior->func->dp.links(ior, dp->aux);
337 	if (ret) {
338 		if (ret < 0) {
339 			OUTP_ERR(&dp->outp, "train failed with %d", ret);
340 			return ret;
341 		}
342 		return 0;
343 	}
344 
345 	ior->func->dp.power(ior, ior->dp.nr);
346 
347 	/* Select LTTPR non-transparent mode if we have a valid configuration,
348 	 * use transparent mode otherwise.
349 	 */
350 	if (dp->lttpr[0] >= 0x14) {
351 		data = DPCD_LTTPR_MODE_TRANSPARENT;
352 		nvkm_wraux(dp->aux, DPCD_LTTPR_MODE, &data, sizeof(data));
353 
354 		if (dp->lttprs) {
355 			data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
356 			nvkm_wraux(dp->aux, DPCD_LTTPR_MODE, &data, sizeof(data));
357 			lt.repeaters = dp->lttprs;
358 		}
359 	}
360 
361 	/* Set desired link configuration on the sink. */
362 	sink[0] = (dp->rate[rate].dpcd < 0) ? ior->dp.bw : 0;
363 	sink[1] = ior->dp.nr;
364 	if (ior->dp.ef)
365 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
366 
367 	ret = nvkm_wraux(dp->aux, DPCD_LC00_LINK_BW_SET, sink, 2);
368 	if (ret)
369 		return ret;
370 
371 	if (dp->rate[rate].dpcd >= 0) {
372 		ret = nvkm_rdaux(dp->aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
373 		if (ret)
374 			return ret;
375 
376 		sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
377 		sink[0] |= dp->rate[rate].dpcd;
378 
379 		ret = nvkm_wraux(dp->aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
380 		if (ret)
381 			return ret;
382 	}
383 
384 	/* Attempt to train the link in this configuration. */
385 	for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
386 		if (lt.repeater)
387 			OUTP_DBG(&dp->outp, "training LTTPR%d", lt.repeater);
388 		else
389 			OUTP_DBG(&dp->outp, "training sink");
390 
391 		memset(lt.stat, 0x00, sizeof(lt.stat));
392 		ret = nvkm_dp_train_cr(&lt);
393 		if (ret == 0)
394 			ret = nvkm_dp_train_eq(&lt);
395 		nvkm_dp_train_pattern(&lt, 0);
396 	}
397 
398 	return ret;
399 }
400 
401 static void
nvkm_dp_train_fini(struct nvkm_dp * dp)402 nvkm_dp_train_fini(struct nvkm_dp *dp)
403 {
404 	/* Execute AfterLinkTraining script from DP Info table. */
405 	nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[1],
406 		init.outp = &dp->outp.info;
407 		init.or   = dp->outp.ior->id;
408 		init.link = dp->outp.ior->asy.link;
409 	);
410 }
411 
412 static void
nvkm_dp_train_init(struct nvkm_dp * dp)413 nvkm_dp_train_init(struct nvkm_dp *dp)
414 {
415 	/* Execute EnableSpread/DisableSpread script from DP Info table. */
416 	if (dp->dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
417 		nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[2],
418 			init.outp = &dp->outp.info;
419 			init.or   = dp->outp.ior->id;
420 			init.link = dp->outp.ior->asy.link;
421 		);
422 	} else {
423 		nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[3],
424 			init.outp = &dp->outp.info;
425 			init.or   = dp->outp.ior->id;
426 			init.link = dp->outp.ior->asy.link;
427 		);
428 	}
429 
430 	if (!AMPERE_IED_HACK(dp->outp.disp)) {
431 		/* Execute BeforeLinkTraining script from DP Info table. */
432 		nvbios_init(&dp->outp.disp->engine.subdev, dp->info.script[0],
433 			init.outp = &dp->outp.info;
434 			init.or   = dp->outp.ior->id;
435 			init.link = dp->outp.ior->asy.link;
436 		);
437 	}
438 }
439 
440 static int
nvkm_dp_train(struct nvkm_dp * dp,u32 dataKBps)441 nvkm_dp_train(struct nvkm_dp *dp, u32 dataKBps)
442 {
443 	struct nvkm_ior *ior = dp->outp.ior;
444 	int ret = -EINVAL, nr, rate;
445 	u8  pwr;
446 
447 	/* Ensure sink is not in a low-power state. */
448 	if (!nvkm_rdaux(dp->aux, DPCD_SC00, &pwr, 1)) {
449 		if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
450 			pwr &= ~DPCD_SC00_SET_POWER;
451 			pwr |=  DPCD_SC00_SET_POWER_D0;
452 			nvkm_wraux(dp->aux, DPCD_SC00, &pwr, 1);
453 		}
454 	}
455 
456 	ior->dp.mst = dp->lt.mst;
457 	ior->dp.ef = dp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
458 	ior->dp.nr = 0;
459 
460 	/* Link training. */
461 	OUTP_DBG(&dp->outp, "training");
462 	nvkm_dp_train_init(dp);
463 	for (nr = dp->links; ret < 0 && nr; nr >>= 1) {
464 		for (rate = 0; ret < 0 && rate < dp->rates; rate++) {
465 			if (dp->rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
466 				/* Program selected link configuration. */
467 				ior->dp.bw = dp->rate[rate].rate / 27000;
468 				ior->dp.nr = nr;
469 				ret = nvkm_dp_train_links(dp, rate);
470 			}
471 		}
472 	}
473 	nvkm_dp_train_fini(dp);
474 	if (ret < 0)
475 		OUTP_ERR(&dp->outp, "training failed");
476 	else
477 		OUTP_DBG(&dp->outp, "training done");
478 	atomic_set(&dp->lt.done, 1);
479 	return ret;
480 }
481 
482 void
nvkm_dp_disable(struct nvkm_outp * outp,struct nvkm_ior * ior)483 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
484 {
485 	struct nvkm_dp *dp = nvkm_dp(outp);
486 
487 	/* Execute DisableLT script from DP Info Table. */
488 	nvbios_init(&ior->disp->engine.subdev, dp->info.script[4],
489 		init.outp = &dp->outp.info;
490 		init.or   = ior->id;
491 		init.link = ior->arm.link;
492 	);
493 }
494 
495 static void
nvkm_dp_release(struct nvkm_outp * outp)496 nvkm_dp_release(struct nvkm_outp *outp)
497 {
498 	struct nvkm_dp *dp = nvkm_dp(outp);
499 
500 	/* Prevent link from being retrained if sink sends an IRQ. */
501 	atomic_set(&dp->lt.done, 0);
502 	dp->outp.ior->dp.nr = 0;
503 }
504 
505 static int
nvkm_dp_acquire(struct nvkm_outp * outp)506 nvkm_dp_acquire(struct nvkm_outp *outp)
507 {
508 	struct nvkm_dp *dp = nvkm_dp(outp);
509 	struct nvkm_ior *ior = dp->outp.ior;
510 	struct nvkm_head *head;
511 	bool retrain = true;
512 	u32 datakbps = 0;
513 	u32 dataKBps;
514 	u32 linkKBps;
515 	u8  stat[3];
516 	int ret, i;
517 
518 	mutex_lock(&dp->mutex);
519 
520 	/* Check that link configuration meets current requirements. */
521 	list_for_each_entry(head, &outp->disp->head, head) {
522 		if (ior->asy.head & (1 << head->id)) {
523 			u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
524 			datakbps += khz * head->asy.or.depth;
525 		}
526 	}
527 
528 	linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
529 	dataKBps = DIV_ROUND_UP(datakbps, 8);
530 	OUTP_DBG(&dp->outp, "data %d KB/s link %d KB/s mst %d->%d",
531 		 dataKBps, linkKBps, ior->dp.mst, dp->lt.mst);
532 	if (linkKBps < dataKBps || ior->dp.mst != dp->lt.mst) {
533 		OUTP_DBG(&dp->outp, "link requirements changed");
534 		goto done;
535 	}
536 
537 	/* Check that link is still trained. */
538 	ret = nvkm_rdaux(dp->aux, DPCD_LS02, stat, 3);
539 	if (ret) {
540 		OUTP_DBG(&dp->outp,
541 			 "failed to read link status, assuming no sink");
542 		goto done;
543 	}
544 
545 	if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
546 		for (i = 0; i < ior->dp.nr; i++) {
547 			u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
548 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
549 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
550 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
551 				OUTP_DBG(&dp->outp,
552 					 "lane %d not equalised", lane);
553 				goto done;
554 			}
555 		}
556 		retrain = false;
557 	} else {
558 		OUTP_DBG(&dp->outp, "no inter-lane alignment");
559 	}
560 
561 done:
562 	if (retrain || !atomic_read(&dp->lt.done))
563 		ret = nvkm_dp_train(dp, dataKBps);
564 	mutex_unlock(&dp->mutex);
565 	return ret;
566 }
567 
568 static bool
nvkm_dp_enable_supported_link_rates(struct nvkm_dp * dp)569 nvkm_dp_enable_supported_link_rates(struct nvkm_dp *dp)
570 {
571 	u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
572 	int i, j, k;
573 
574 	if (dp->outp.conn->info.type != DCB_CONNECTOR_eDP ||
575 	    dp->dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
576 	    nvkm_rdaux(dp->aux, DPCD_RC10_SUPPORTED_LINK_RATES(0), sink_rates, sizeof(sink_rates)))
577 		return false;
578 
579 	for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
580 		const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
581 
582 		if (!rate || WARN_ON(dp->rates == ARRAY_SIZE(dp->rate)))
583 			break;
584 
585 		if (rate > dp->outp.info.dpconf.link_bw * 27000) {
586 			OUTP_DBG(&dp->outp, "rate %d !outp", rate);
587 			continue;
588 		}
589 
590 		for (j = 0; j < dp->rates; j++) {
591 			if (rate > dp->rate[j].rate) {
592 				for (k = dp->rates; k > j; k--)
593 					dp->rate[k] = dp->rate[k - 1];
594 				break;
595 			}
596 		}
597 
598 		dp->rate[j].dpcd = i / 2;
599 		dp->rate[j].rate = rate;
600 		dp->rates++;
601 	}
602 
603 	for (i = 0; i < dp->rates; i++)
604 		OUTP_DBG(&dp->outp, "link_rate[%d] = %d", dp->rate[i].dpcd, dp->rate[i].rate);
605 
606 	return dp->rates != 0;
607 }
608 
609 static bool
nvkm_dp_enable(struct nvkm_dp * dp,bool enable)610 nvkm_dp_enable(struct nvkm_dp *dp, bool enable)
611 {
612 	struct nvkm_i2c_aux *aux = dp->aux;
613 
614 	if (enable) {
615 		if (!dp->present) {
616 			OUTP_DBG(&dp->outp, "aux power -> always");
617 			nvkm_i2c_aux_monitor(aux, true);
618 			dp->present = true;
619 		}
620 
621 		/* Detect any LTTPRs before reading DPCD receiver caps. */
622 		if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, dp->lttpr, sizeof(dp->lttpr)) &&
623 		    dp->lttpr[0] >= 0x14 && dp->lttpr[2]) {
624 			switch (dp->lttpr[2]) {
625 			case 0x80: dp->lttprs = 1; break;
626 			case 0x40: dp->lttprs = 2; break;
627 			case 0x20: dp->lttprs = 3; break;
628 			case 0x10: dp->lttprs = 4; break;
629 			case 0x08: dp->lttprs = 5; break;
630 			case 0x04: dp->lttprs = 6; break;
631 			case 0x02: dp->lttprs = 7; break;
632 			case 0x01: dp->lttprs = 8; break;
633 			default:
634 				/* Unknown LTTPR count, we'll switch to transparent mode. */
635 				WARN_ON(1);
636 				dp->lttprs = 0;
637 				break;
638 			}
639 		} else {
640 			/* No LTTPR support, or zero LTTPR count - don't touch it at all. */
641 			memset(dp->lttpr, 0x00, sizeof(dp->lttpr));
642 		}
643 
644 		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, dp->dpcd, sizeof(dp->dpcd))) {
645 			const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
646 			const u8 *rate;
647 			int rate_max;
648 
649 			dp->rates = 0;
650 			dp->links = dp->dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
651 			dp->links = min(dp->links, dp->outp.info.dpconf.link_nr);
652 			if (dp->lttprs && dp->lttpr[4])
653 				dp->links = min_t(int, dp->links, dp->lttpr[4]);
654 
655 			rate_max = dp->dpcd[DPCD_RC01_MAX_LINK_RATE];
656 			rate_max = min(rate_max, dp->outp.info.dpconf.link_bw);
657 			if (dp->lttprs && dp->lttpr[1])
658 				rate_max = min_t(int, rate_max, dp->lttpr[1]);
659 
660 			if (!nvkm_dp_enable_supported_link_rates(dp)) {
661 				for (rate = rates; *rate; rate++) {
662 					if (*rate <= rate_max) {
663 						if (WARN_ON(dp->rates == ARRAY_SIZE(dp->rate)))
664 							break;
665 
666 						dp->rate[dp->rates].dpcd = -1;
667 						dp->rate[dp->rates].rate = *rate * 27000;
668 						dp->rates++;
669 					}
670 				}
671 			}
672 
673 			return true;
674 		}
675 	}
676 
677 	if (dp->present) {
678 		OUTP_DBG(&dp->outp, "aux power -> demand");
679 		nvkm_i2c_aux_monitor(aux, false);
680 		dp->present = false;
681 	}
682 
683 	atomic_set(&dp->lt.done, 0);
684 	return false;
685 }
686 
687 static int
nvkm_dp_hpd(struct nvkm_notify * notify)688 nvkm_dp_hpd(struct nvkm_notify *notify)
689 {
690 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
691 	struct nvkm_dp *dp = container_of(notify, typeof(*dp), hpd);
692 	struct nvkm_conn *conn = dp->outp.conn;
693 	struct nvkm_disp *disp = dp->outp.disp;
694 	struct nvif_notify_conn_rep_v0 rep = {};
695 
696 	OUTP_DBG(&dp->outp, "HPD: %d", line->mask);
697 	if (line->mask & NVKM_I2C_IRQ) {
698 		if (atomic_read(&dp->lt.done))
699 			dp->outp.func->acquire(&dp->outp);
700 		rep.mask |= NVIF_NOTIFY_CONN_V0_IRQ;
701 	} else {
702 		nvkm_dp_enable(dp, true);
703 	}
704 
705 	if (line->mask & NVKM_I2C_UNPLUG)
706 		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
707 	if (line->mask & NVKM_I2C_PLUG)
708 		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
709 
710 	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
711 	return NVKM_NOTIFY_KEEP;
712 }
713 
714 static void
nvkm_dp_fini(struct nvkm_outp * outp)715 nvkm_dp_fini(struct nvkm_outp *outp)
716 {
717 	struct nvkm_dp *dp = nvkm_dp(outp);
718 	nvkm_notify_put(&dp->hpd);
719 	nvkm_dp_enable(dp, false);
720 }
721 
722 static void
nvkm_dp_init(struct nvkm_outp * outp)723 nvkm_dp_init(struct nvkm_outp *outp)
724 {
725 	struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
726 	struct nvkm_dp *dp = nvkm_dp(outp);
727 
728 	nvkm_notify_put(&dp->outp.conn->hpd);
729 
730 	/* eDP panels need powering on by us (if the VBIOS doesn't default it
731 	 * to on) before doing any AUX channel transactions.  LVDS panel power
732 	 * is handled by the SOR itself, and not required for LVDS DDC.
733 	 */
734 	if (dp->outp.conn->info.type == DCB_CONNECTOR_eDP) {
735 		int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
736 		if (power == 0)
737 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
738 
739 		/* We delay here unconditionally, even if already powered,
740 		 * because some laptop panels having a significant resume
741 		 * delay before the panel begins responding.
742 		 *
743 		 * This is likely a bit of a hack, but no better idea for
744 		 * handling this at the moment.
745 		 */
746 		msleep(300);
747 
748 		/* If the eDP panel can't be detected, we need to restore
749 		 * the panel power GPIO to avoid breaking another output.
750 		 */
751 		if (!nvkm_dp_enable(dp, true) && power == 0)
752 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
753 	} else {
754 		nvkm_dp_enable(dp, true);
755 	}
756 
757 	nvkm_notify_get(&dp->hpd);
758 }
759 
760 static void *
nvkm_dp_dtor(struct nvkm_outp * outp)761 nvkm_dp_dtor(struct nvkm_outp *outp)
762 {
763 	struct nvkm_dp *dp = nvkm_dp(outp);
764 	nvkm_notify_fini(&dp->hpd);
765 	return dp;
766 }
767 
768 static const struct nvkm_outp_func
769 nvkm_dp_func = {
770 	.dtor = nvkm_dp_dtor,
771 	.init = nvkm_dp_init,
772 	.fini = nvkm_dp_fini,
773 	.acquire = nvkm_dp_acquire,
774 	.release = nvkm_dp_release,
775 	.disable = nvkm_dp_disable,
776 };
777 
778 static int
nvkm_dp_ctor(struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_i2c_aux * aux,struct nvkm_dp * dp)779 nvkm_dp_ctor(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
780 	     struct nvkm_i2c_aux *aux, struct nvkm_dp *dp)
781 {
782 	struct nvkm_device *device = disp->engine.subdev.device;
783 	struct nvkm_bios *bios = device->bios;
784 	struct nvkm_i2c *i2c = device->i2c;
785 	u8  hdr, cnt, len;
786 	u32 data;
787 	int ret;
788 
789 	ret = nvkm_outp_ctor(&nvkm_dp_func, disp, index, dcbE, &dp->outp);
790 	if (ret)
791 		return ret;
792 
793 	dp->aux = aux;
794 	if (!dp->aux) {
795 		OUTP_ERR(&dp->outp, "no aux");
796 		return -EINVAL;
797 	}
798 
799 	/* bios data is not optional */
800 	data = nvbios_dpout_match(bios, dp->outp.info.hasht,
801 				  dp->outp.info.hashm, &dp->version,
802 				  &hdr, &cnt, &len, &dp->info);
803 	if (!data) {
804 		OUTP_ERR(&dp->outp, "no bios dp data");
805 		return -EINVAL;
806 	}
807 
808 	OUTP_DBG(&dp->outp, "bios dp %02x %02x %02x %02x",
809 		 dp->version, hdr, cnt, len);
810 
811 	/* hotplug detect, replaces gpio-based mechanism with aux events */
812 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true,
813 			       &(struct nvkm_i2c_ntfy_req) {
814 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG |
815 					NVKM_I2C_IRQ,
816 				.port = dp->aux->id,
817 			       },
818 			       sizeof(struct nvkm_i2c_ntfy_req),
819 			       sizeof(struct nvkm_i2c_ntfy_rep),
820 			       &dp->hpd);
821 	if (ret) {
822 		OUTP_ERR(&dp->outp, "error monitoring aux hpd: %d", ret);
823 		return ret;
824 	}
825 
826 	mutex_init(&dp->mutex);
827 	atomic_set(&dp->lt.done, 0);
828 	return 0;
829 }
830 
831 int
nvkm_dp_new(struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_outp ** poutp)832 nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE,
833 	    struct nvkm_outp **poutp)
834 {
835 	struct nvkm_i2c *i2c = disp->engine.subdev.device->i2c;
836 	struct nvkm_i2c_aux *aux;
837 	struct nvkm_dp *dp;
838 
839 	if (dcbE->location == 0)
840 		aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
841 	else
842 		aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
843 
844 	if (!(dp = kzalloc(sizeof(*dp), GFP_KERNEL)))
845 		return -ENOMEM;
846 	*poutp = &dp->outp;
847 
848 	return nvkm_dp_ctor(disp, index, dcbE, aux, dp);
849 }
850