1 /*
2  * Copyright 2014 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Ben Skeggs
23  */
24 #include "dp.h"
25 #include "conn.h"
26 #include "head.h"
27 #include "ior.h"
28 
29 #include <subdev/bios.h>
30 #include <subdev/bios/init.h>
31 #include <subdev/gpio.h>
32 #include <subdev/i2c.h>
33 
34 #include <nvif/event.h>
35 
36 /* IED scripts are no longer used by UEFI/RM from Ampere, but have been updated for
37  * the x86 option ROM.  However, the relevant VBIOS table versions weren't modified,
38  * so we're unable to detect this in a nice way.
39  */
40 #define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
41 
42 struct lt_state {
43 	struct nvkm_outp *outp;
44 
45 	int repeaters;
46 	int repeater;
47 
48 	u8  stat[6];
49 	u8  conf[4];
50 	bool pc2;
51 	u8  pc2stat;
52 	u8  pc2conf[2];
53 };
54 
55 static int
nvkm_dp_train_sense(struct lt_state * lt,bool pc,u32 delay)56 nvkm_dp_train_sense(struct lt_state *lt, bool pc, u32 delay)
57 {
58 	struct nvkm_outp *outp = lt->outp;
59 	u32 addr;
60 	int ret;
61 
62 	usleep_range(delay, delay * 2);
63 
64 	if (lt->repeater)
65 		addr = DPCD_LTTPR_LANE0_1_STATUS(lt->repeater);
66 	else
67 		addr = DPCD_LS02;
68 
69 	ret = nvkm_rdaux(outp->dp.aux, addr, &lt->stat[0], 3);
70 	if (ret)
71 		return ret;
72 
73 	if (lt->repeater)
74 		addr = DPCD_LTTPR_LANE0_1_ADJUST(lt->repeater);
75 	else
76 		addr = DPCD_LS06;
77 
78 	ret = nvkm_rdaux(outp->dp.aux, addr, &lt->stat[4], 2);
79 	if (ret)
80 		return ret;
81 
82 	if (pc) {
83 		ret = nvkm_rdaux(outp->dp.aux, DPCD_LS0C, &lt->pc2stat, 1);
84 		if (ret)
85 			lt->pc2stat = 0x00;
86 
87 		OUTP_TRACE(outp, "status %6ph pc2 %02x", lt->stat, lt->pc2stat);
88 	} else {
89 		OUTP_TRACE(outp, "status %6ph", lt->stat);
90 	}
91 
92 	return 0;
93 }
94 
95 static int
nvkm_dp_train_drive(struct lt_state * lt,bool pc)96 nvkm_dp_train_drive(struct lt_state *lt, bool pc)
97 {
98 	struct nvkm_outp *outp = lt->outp;
99 	struct nvkm_ior *ior = outp->ior;
100 	struct nvkm_bios *bios = ior->disp->engine.subdev.device->bios;
101 	struct nvbios_dpout info;
102 	struct nvbios_dpcfg ocfg;
103 	u8  ver, hdr, cnt, len;
104 	u32 addr;
105 	u32 data;
106 	int ret, i;
107 
108 	for (i = 0; i < ior->dp.nr; i++) {
109 		u8 lane = (lt->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf;
110 		u8 lpc2 = (lt->pc2stat >> (i * 2)) & 0x3;
111 		u8 lpre = (lane & 0x0c) >> 2;
112 		u8 lvsw = (lane & 0x03) >> 0;
113 		u8 hivs = 3 - lpre;
114 		u8 hipe = 3;
115 		u8 hipc = 3;
116 
117 		if (lpc2 >= hipc)
118 			lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED;
119 		if (lpre >= hipe) {
120 			lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */
121 			lvsw = hivs = 3 - (lpre & 3);
122 		} else
123 		if (lvsw >= hivs) {
124 			lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED;
125 		}
126 
127 		lt->conf[i] = (lpre << 3) | lvsw;
128 		lt->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4);
129 
130 		OUTP_TRACE(outp, "config lane %d %02x %02x", i, lt->conf[i], lpc2);
131 
132 		if (lt->repeater != lt->repeaters)
133 			continue;
134 
135 		data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
136 					  &ver, &hdr, &cnt, &len, &info);
137 		if (!data)
138 			continue;
139 
140 		data = nvbios_dpcfg_match(bios, data, lpc2 & 3, lvsw & 3, lpre & 3,
141 					  &ver, &hdr, &cnt, &len, &ocfg);
142 		if (!data)
143 			continue;
144 
145 		ior->func->dp->drive(ior, i, ocfg.pc, ocfg.dc, ocfg.pe, ocfg.tx_pu);
146 	}
147 
148 	if (lt->repeater)
149 		addr = DPCD_LTTPR_LANE0_SET(lt->repeater);
150 	else
151 		addr = DPCD_LC03(0);
152 
153 	ret = nvkm_wraux(outp->dp.aux, addr, lt->conf, 4);
154 	if (ret)
155 		return ret;
156 
157 	if (pc) {
158 		ret = nvkm_wraux(outp->dp.aux, DPCD_LC0F, lt->pc2conf, 2);
159 		if (ret)
160 			return ret;
161 	}
162 
163 	return 0;
164 }
165 
166 static void
nvkm_dp_train_pattern(struct lt_state * lt,u8 pattern)167 nvkm_dp_train_pattern(struct lt_state *lt, u8 pattern)
168 {
169 	struct nvkm_outp *outp = lt->outp;
170 	u32 addr;
171 	u8 sink_tp;
172 
173 	OUTP_TRACE(outp, "training pattern %d", pattern);
174 	outp->ior->func->dp->pattern(outp->ior, pattern);
175 
176 	if (lt->repeater)
177 		addr = DPCD_LTTPR_PATTERN_SET(lt->repeater);
178 	else
179 		addr = DPCD_LC02;
180 
181 	nvkm_rdaux(outp->dp.aux, addr, &sink_tp, 1);
182 	sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET;
183 	sink_tp |= (pattern != 4) ? pattern : 7;
184 
185 	if (pattern != 0)
186 		sink_tp |=  DPCD_LC02_SCRAMBLING_DISABLE;
187 	else
188 		sink_tp &= ~DPCD_LC02_SCRAMBLING_DISABLE;
189 	nvkm_wraux(outp->dp.aux, addr, &sink_tp, 1);
190 }
191 
192 static int
nvkm_dp_train_eq(struct lt_state * lt)193 nvkm_dp_train_eq(struct lt_state *lt)
194 {
195 	struct nvkm_i2c_aux *aux = lt->outp->dp.aux;
196 	bool eq_done = false, cr_done = true;
197 	int tries = 0, usec = 0, i;
198 	u8 data;
199 
200 	if (lt->repeater) {
201 		if (!nvkm_rdaux(aux, DPCD_LTTPR_AUX_RD_INTERVAL(lt->repeater), &data, sizeof(data)))
202 			usec = (data & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
203 
204 		nvkm_dp_train_pattern(lt, 4);
205 	} else {
206 		if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x14 &&
207 		    lt->outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_TPS4_SUPPORTED)
208 			nvkm_dp_train_pattern(lt, 4);
209 		else
210 		if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] >= 0x12 &&
211 		    lt->outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED)
212 			nvkm_dp_train_pattern(lt, 3);
213 		else
214 			nvkm_dp_train_pattern(lt, 2);
215 
216 		usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
217 	}
218 
219 	do {
220 		if ((tries &&
221 		    nvkm_dp_train_drive(lt, lt->pc2)) ||
222 		    nvkm_dp_train_sense(lt, lt->pc2, usec ? usec : 400))
223 			break;
224 
225 		eq_done = !!(lt->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
226 		for (i = 0; i < lt->outp->ior->dp.nr && eq_done; i++) {
227 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
228 			if (!(lane & DPCD_LS02_LANE0_CR_DONE))
229 				cr_done = false;
230 			if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
231 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
232 				eq_done = false;
233 		}
234 	} while (!eq_done && cr_done && ++tries <= 5);
235 
236 	return eq_done ? 0 : -1;
237 }
238 
239 static int
nvkm_dp_train_cr(struct lt_state * lt)240 nvkm_dp_train_cr(struct lt_state *lt)
241 {
242 	bool cr_done = false, abort = false;
243 	int voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
244 	int tries = 0, usec = 0, i;
245 
246 	nvkm_dp_train_pattern(lt, 1);
247 
248 	if (lt->outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x14 && !lt->repeater)
249 		usec = (lt->outp->dp.dpcd[DPCD_RC0E] & DPCD_RC0E_AUX_RD_INTERVAL) * 4000;
250 
251 	do {
252 		if (nvkm_dp_train_drive(lt, false) ||
253 		    nvkm_dp_train_sense(lt, false, usec ? usec : 100))
254 			break;
255 
256 		cr_done = true;
257 		for (i = 0; i < lt->outp->ior->dp.nr; i++) {
258 			u8 lane = (lt->stat[i >> 1] >> ((i & 1) * 4)) & 0xf;
259 			if (!(lane & DPCD_LS02_LANE0_CR_DONE)) {
260 				cr_done = false;
261 				if (lt->conf[i] & DPCD_LC03_MAX_SWING_REACHED)
262 					abort = true;
263 				break;
264 			}
265 		}
266 
267 		if ((lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) {
268 			voltage = lt->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET;
269 			tries = 0;
270 		}
271 	} while (!cr_done && !abort && ++tries < 5);
272 
273 	return cr_done ? 0 : -1;
274 }
275 
276 static int
nvkm_dp_train_links(struct nvkm_outp * outp,int rate)277 nvkm_dp_train_links(struct nvkm_outp *outp, int rate)
278 {
279 	struct nvkm_ior *ior = outp->ior;
280 	struct nvkm_disp *disp = outp->disp;
281 	struct nvkm_subdev *subdev = &disp->engine.subdev;
282 	struct nvkm_bios *bios = subdev->device->bios;
283 	struct lt_state lt = {
284 		.outp = outp,
285 	};
286 	u32 lnkcmp;
287 	u8 sink[2], data;
288 	int ret;
289 
290 	OUTP_DBG(outp, "training %d x %d MB/s", ior->dp.nr, ior->dp.bw * 27);
291 
292 	/* Intersect misc. capabilities of the OR and sink. */
293 	if (disp->engine.subdev.device->chipset < 0x110)
294 		outp->dp.dpcd[DPCD_RC03] &= ~DPCD_RC03_TPS4_SUPPORTED;
295 	if (disp->engine.subdev.device->chipset < 0xd0)
296 		outp->dp.dpcd[DPCD_RC02] &= ~DPCD_RC02_TPS3_SUPPORTED;
297 	lt.pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED;
298 
299 	if (AMPERE_IED_HACK(disp) && (lnkcmp = lt.outp->dp.info.script[0])) {
300 		/* Execute BeforeLinkTraining script from DP Info table. */
301 		while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
302 			lnkcmp += 3;
303 		lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
304 
305 		nvbios_init(&outp->disp->engine.subdev, lnkcmp,
306 			init.outp = &outp->info;
307 			init.or   = ior->id;
308 			init.link = ior->asy.link;
309 		);
310 	}
311 
312 	/* Set desired link configuration on the source. */
313 	if ((lnkcmp = lt.outp->dp.info.lnkcmp)) {
314 		if (outp->dp.version < 0x30) {
315 			while ((ior->dp.bw * 2700) < nvbios_rd16(bios, lnkcmp))
316 				lnkcmp += 4;
317 			lnkcmp = nvbios_rd16(bios, lnkcmp + 2);
318 		} else {
319 			while (ior->dp.bw < nvbios_rd08(bios, lnkcmp))
320 				lnkcmp += 3;
321 			lnkcmp = nvbios_rd16(bios, lnkcmp + 1);
322 		}
323 
324 		nvbios_init(subdev, lnkcmp,
325 			init.outp = &outp->info;
326 			init.or   = ior->id;
327 			init.link = ior->asy.link;
328 		);
329 	}
330 
331 	ret = ior->func->dp->links(ior, outp->dp.aux);
332 	if (ret) {
333 		if (ret < 0) {
334 			OUTP_ERR(outp, "train failed with %d", ret);
335 			return ret;
336 		}
337 		return 0;
338 	}
339 
340 	ior->func->dp->power(ior, ior->dp.nr);
341 
342 	/* Select LTTPR non-transparent mode if we have a valid configuration,
343 	 * use transparent mode otherwise.
344 	 */
345 	if (outp->dp.lttpr[0] >= 0x14) {
346 		data = DPCD_LTTPR_MODE_TRANSPARENT;
347 		nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
348 
349 		if (outp->dp.lttprs) {
350 			data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
351 			nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
352 			lt.repeaters = outp->dp.lttprs;
353 		}
354 	}
355 
356 	/* Set desired link configuration on the sink. */
357 	sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0;
358 	sink[1] = ior->dp.nr;
359 	if (ior->dp.ef)
360 		sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
361 
362 	ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2);
363 	if (ret)
364 		return ret;
365 
366 	if (outp->dp.rate[rate].dpcd >= 0) {
367 		ret = nvkm_rdaux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
368 		if (ret)
369 			return ret;
370 
371 		sink[0] &= ~DPCD_LC15_LINK_RATE_SET_MASK;
372 		sink[0] |= outp->dp.rate[rate].dpcd;
373 
374 		ret = nvkm_wraux(outp->dp.aux, DPCD_LC15_LINK_RATE_SET, &sink[0], sizeof(sink[0]));
375 		if (ret)
376 			return ret;
377 	}
378 
379 	/* Attempt to train the link in this configuration. */
380 	for (lt.repeater = lt.repeaters; lt.repeater >= 0; lt.repeater--) {
381 		if (lt.repeater)
382 			OUTP_DBG(outp, "training LTTPR%d", lt.repeater);
383 		else
384 			OUTP_DBG(outp, "training sink");
385 
386 		memset(lt.stat, 0x00, sizeof(lt.stat));
387 		ret = nvkm_dp_train_cr(&lt);
388 		if (ret == 0)
389 			ret = nvkm_dp_train_eq(&lt);
390 		nvkm_dp_train_pattern(&lt, 0);
391 	}
392 
393 	return ret;
394 }
395 
396 static void
nvkm_dp_train_fini(struct nvkm_outp * outp)397 nvkm_dp_train_fini(struct nvkm_outp *outp)
398 {
399 	/* Execute AfterLinkTraining script from DP Info table. */
400 	nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[1],
401 		init.outp = &outp->info;
402 		init.or   = outp->ior->id;
403 		init.link = outp->ior->asy.link;
404 	);
405 }
406 
407 static void
nvkm_dp_train_init(struct nvkm_outp * outp)408 nvkm_dp_train_init(struct nvkm_outp *outp)
409 {
410 	/* Execute EnableSpread/DisableSpread script from DP Info table. */
411 	if (outp->dp.dpcd[DPCD_RC03] & DPCD_RC03_MAX_DOWNSPREAD) {
412 		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[2],
413 			init.outp = &outp->info;
414 			init.or   = outp->ior->id;
415 			init.link = outp->ior->asy.link;
416 		);
417 	} else {
418 		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[3],
419 			init.outp = &outp->info;
420 			init.or   = outp->ior->id;
421 			init.link = outp->ior->asy.link;
422 		);
423 	}
424 
425 	if (!AMPERE_IED_HACK(outp->disp)) {
426 		/* Execute BeforeLinkTraining script from DP Info table. */
427 		nvbios_init(&outp->disp->engine.subdev, outp->dp.info.script[0],
428 			init.outp = &outp->info;
429 			init.or   = outp->ior->id;
430 			init.link = outp->ior->asy.link;
431 		);
432 	}
433 }
434 
435 static int
nvkm_dp_train(struct nvkm_outp * outp,u32 dataKBps)436 nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
437 {
438 	struct nvkm_ior *ior = outp->ior;
439 	int ret = -EINVAL, nr, rate;
440 	u8  pwr;
441 
442 	/* Ensure sink is not in a low-power state. */
443 	if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) {
444 		if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
445 			pwr &= ~DPCD_SC00_SET_POWER;
446 			pwr |=  DPCD_SC00_SET_POWER_D0;
447 			nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1);
448 		}
449 	}
450 
451 	ior->dp.mst = outp->dp.lt.mst;
452 	ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
453 	ior->dp.nr = 0;
454 
455 	/* Link training. */
456 	OUTP_DBG(outp, "training");
457 	nvkm_dp_train_init(outp);
458 	for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
459 		for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) {
460 			if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
461 				/* Program selected link configuration. */
462 				ior->dp.bw = outp->dp.rate[rate].rate / 27000;
463 				ior->dp.nr = nr;
464 				ret = nvkm_dp_train_links(outp, rate);
465 			}
466 		}
467 	}
468 	nvkm_dp_train_fini(outp);
469 	if (ret < 0)
470 		OUTP_ERR(outp, "training failed");
471 	else
472 		OUTP_DBG(outp, "training done");
473 	atomic_set(&outp->dp.lt.done, 1);
474 	return ret;
475 }
476 
477 void
nvkm_dp_disable(struct nvkm_outp * outp,struct nvkm_ior * ior)478 nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
479 {
480 	/* Execute DisableLT script from DP Info Table. */
481 	nvbios_init(&ior->disp->engine.subdev, outp->dp.info.script[4],
482 		init.outp = &outp->info;
483 		init.or   = ior->id;
484 		init.link = ior->arm.link;
485 	);
486 }
487 
488 static void
nvkm_dp_release(struct nvkm_outp * outp)489 nvkm_dp_release(struct nvkm_outp *outp)
490 {
491 	/* Prevent link from being retrained if sink sends an IRQ. */
492 	atomic_set(&outp->dp.lt.done, 0);
493 	outp->ior->dp.nr = 0;
494 }
495 
496 static int
nvkm_dp_acquire(struct nvkm_outp * outp)497 nvkm_dp_acquire(struct nvkm_outp *outp)
498 {
499 	struct nvkm_ior *ior = outp->ior;
500 	struct nvkm_head *head;
501 	bool retrain = true;
502 	u32 datakbps = 0;
503 	u32 dataKBps;
504 	u32 linkKBps;
505 	u8  stat[3];
506 	int ret, i;
507 
508 	mutex_lock(&outp->dp.mutex);
509 
510 	/* Check that link configuration meets current requirements. */
511 	list_for_each_entry(head, &outp->disp->heads, head) {
512 		if (ior->asy.head & (1 << head->id)) {
513 			u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
514 			datakbps += khz * head->asy.or.depth;
515 		}
516 	}
517 
518 	linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
519 	dataKBps = DIV_ROUND_UP(datakbps, 8);
520 	OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d",
521 		 dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst);
522 	if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) {
523 		OUTP_DBG(outp, "link requirements changed");
524 		goto done;
525 	}
526 
527 	/* Check that link is still trained. */
528 	ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3);
529 	if (ret) {
530 		OUTP_DBG(outp, "failed to read link status, assuming no sink");
531 		goto done;
532 	}
533 
534 	if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
535 		for (i = 0; i < ior->dp.nr; i++) {
536 			u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
537 			if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
538 			    !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
539 			    !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
540 				OUTP_DBG(outp, "lane %d not equalised", lane);
541 				goto done;
542 			}
543 		}
544 		retrain = false;
545 	} else {
546 		OUTP_DBG(outp, "no inter-lane alignment");
547 	}
548 
549 done:
550 	if (retrain || !atomic_read(&outp->dp.lt.done))
551 		ret = nvkm_dp_train(outp, dataKBps);
552 	mutex_unlock(&outp->dp.mutex);
553 	return ret;
554 }
555 
556 static bool
nvkm_dp_enable_supported_link_rates(struct nvkm_outp * outp)557 nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
558 {
559 	u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
560 	int i, j, k;
561 
562 	if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
563 	    outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
564 	    nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0),
565 		       sink_rates, sizeof(sink_rates)))
566 		return false;
567 
568 	for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
569 		const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
570 
571 		if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
572 			break;
573 
574 		if (rate > outp->info.dpconf.link_bw * 27000) {
575 			OUTP_DBG(outp, "rate %d !outp", rate);
576 			continue;
577 		}
578 
579 		for (j = 0; j < outp->dp.rates; j++) {
580 			if (rate > outp->dp.rate[j].rate) {
581 				for (k = outp->dp.rates; k > j; k--)
582 					outp->dp.rate[k] = outp->dp.rate[k - 1];
583 				break;
584 			}
585 		}
586 
587 		outp->dp.rate[j].dpcd = i / 2;
588 		outp->dp.rate[j].rate = rate;
589 		outp->dp.rates++;
590 	}
591 
592 	for (i = 0; i < outp->dp.rates; i++)
593 		OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate);
594 
595 	return outp->dp.rates != 0;
596 }
597 
598 static bool
nvkm_dp_enable(struct nvkm_outp * outp,bool enable)599 nvkm_dp_enable(struct nvkm_outp *outp, bool enable)
600 {
601 	struct nvkm_i2c_aux *aux = outp->dp.aux;
602 
603 	if (enable) {
604 		if (!outp->dp.present) {
605 			OUTP_DBG(outp, "aux power -> always");
606 			nvkm_i2c_aux_monitor(aux, true);
607 			outp->dp.present = true;
608 		}
609 
610 		/* Detect any LTTPRs before reading DPCD receiver caps. */
611 		if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) &&
612 		    outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) {
613 			switch (outp->dp.lttpr[2]) {
614 			case 0x80: outp->dp.lttprs = 1; break;
615 			case 0x40: outp->dp.lttprs = 2; break;
616 			case 0x20: outp->dp.lttprs = 3; break;
617 			case 0x10: outp->dp.lttprs = 4; break;
618 			case 0x08: outp->dp.lttprs = 5; break;
619 			case 0x04: outp->dp.lttprs = 6; break;
620 			case 0x02: outp->dp.lttprs = 7; break;
621 			case 0x01: outp->dp.lttprs = 8; break;
622 			default:
623 				/* Unknown LTTPR count, we'll switch to transparent mode. */
624 				WARN_ON(1);
625 				outp->dp.lttprs = 0;
626 				break;
627 			}
628 		} else {
629 			/* No LTTPR support, or zero LTTPR count - don't touch it at all. */
630 			memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
631 		}
632 
633 		if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
634 			const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
635 			const u8 *rate;
636 			int rate_max;
637 
638 			outp->dp.rates = 0;
639 			outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
640 			outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr);
641 			if (outp->dp.lttprs && outp->dp.lttpr[4])
642 				outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]);
643 
644 			rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE];
645 			rate_max = min(rate_max, outp->info.dpconf.link_bw);
646 			if (outp->dp.lttprs && outp->dp.lttpr[1])
647 				rate_max = min_t(int, rate_max, outp->dp.lttpr[1]);
648 
649 			if (!nvkm_dp_enable_supported_link_rates(outp)) {
650 				for (rate = rates; *rate; rate++) {
651 					if (*rate > rate_max)
652 						continue;
653 
654 					if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
655 						break;
656 
657 					outp->dp.rate[outp->dp.rates].dpcd = -1;
658 					outp->dp.rate[outp->dp.rates].rate = *rate * 27000;
659 					outp->dp.rates++;
660 				}
661 			}
662 
663 			return true;
664 		}
665 	}
666 
667 	if (outp->dp.present) {
668 		OUTP_DBG(outp, "aux power -> demand");
669 		nvkm_i2c_aux_monitor(aux, false);
670 		outp->dp.present = false;
671 	}
672 
673 	atomic_set(&outp->dp.lt.done, 0);
674 	return false;
675 }
676 
677 static int
nvkm_dp_hpd(struct nvkm_notify * notify)678 nvkm_dp_hpd(struct nvkm_notify *notify)
679 {
680 	const struct nvkm_i2c_ntfy_rep *line = notify->data;
681 	struct nvkm_outp *outp = container_of(notify, typeof(*outp), dp.hpd);
682 	struct nvkm_conn *conn = outp->conn;
683 	struct nvkm_disp *disp = outp->disp;
684 	struct nvif_notify_conn_rep_v0 rep = {};
685 
686 	OUTP_DBG(outp, "HPD: %d", line->mask);
687 	if (line->mask & NVKM_I2C_IRQ) {
688 		if (atomic_read(&outp->dp.lt.done))
689 			outp->func->acquire(outp);
690 		rep.mask |= NVIF_NOTIFY_CONN_V0_IRQ;
691 	} else {
692 		nvkm_dp_enable(outp, true);
693 	}
694 
695 	if (line->mask & NVKM_I2C_UNPLUG)
696 		rep.mask |= NVIF_NOTIFY_CONN_V0_UNPLUG;
697 	if (line->mask & NVKM_I2C_PLUG)
698 		rep.mask |= NVIF_NOTIFY_CONN_V0_PLUG;
699 
700 	nvkm_event_send(&disp->hpd, rep.mask, conn->index, &rep, sizeof(rep));
701 	return NVKM_NOTIFY_KEEP;
702 }
703 
704 static void
nvkm_dp_fini(struct nvkm_outp * outp)705 nvkm_dp_fini(struct nvkm_outp *outp)
706 {
707 	nvkm_notify_put(&outp->dp.hpd);
708 	nvkm_dp_enable(outp, false);
709 }
710 
711 static void
nvkm_dp_init(struct nvkm_outp * outp)712 nvkm_dp_init(struct nvkm_outp *outp)
713 {
714 	struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
715 
716 	nvkm_notify_put(&outp->conn->hpd);
717 
718 	/* eDP panels need powering on by us (if the VBIOS doesn't default it
719 	 * to on) before doing any AUX channel transactions.  LVDS panel power
720 	 * is handled by the SOR itself, and not required for LVDS DDC.
721 	 */
722 	if (outp->conn->info.type == DCB_CONNECTOR_eDP) {
723 		int power = nvkm_gpio_get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
724 		if (power == 0)
725 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
726 
727 		/* We delay here unconditionally, even if already powered,
728 		 * because some laptop panels having a significant resume
729 		 * delay before the panel begins responding.
730 		 *
731 		 * This is likely a bit of a hack, but no better idea for
732 		 * handling this at the moment.
733 		 */
734 		msleep(300);
735 
736 		/* If the eDP panel can't be detected, we need to restore
737 		 * the panel power GPIO to avoid breaking another output.
738 		 */
739 		if (!nvkm_dp_enable(outp, true) && power == 0)
740 			nvkm_gpio_set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 0);
741 	} else {
742 		nvkm_dp_enable(outp, true);
743 	}
744 
745 	nvkm_notify_get(&outp->dp.hpd);
746 }
747 
748 static void *
nvkm_dp_dtor(struct nvkm_outp * outp)749 nvkm_dp_dtor(struct nvkm_outp *outp)
750 {
751 	nvkm_notify_fini(&outp->dp.hpd);
752 	return outp;
753 }
754 
755 static const struct nvkm_outp_func
756 nvkm_dp_func = {
757 	.dtor = nvkm_dp_dtor,
758 	.init = nvkm_dp_init,
759 	.fini = nvkm_dp_fini,
760 	.acquire = nvkm_dp_acquire,
761 	.release = nvkm_dp_release,
762 	.disable = nvkm_dp_disable,
763 };
764 
765 int
nvkm_dp_new(struct nvkm_disp * disp,int index,struct dcb_output * dcbE,struct nvkm_outp ** poutp)766 nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct nvkm_outp **poutp)
767 {
768 	struct nvkm_device *device = disp->engine.subdev.device;
769 	struct nvkm_bios *bios = device->bios;
770 	struct nvkm_i2c *i2c = device->i2c;
771 	struct nvkm_outp *outp;
772 	u8  hdr, cnt, len;
773 	u32 data;
774 	int ret;
775 
776 	ret = nvkm_outp_new_(&nvkm_dp_func, disp, index, dcbE, poutp);
777 	outp = *poutp;
778 	if (ret)
779 		return ret;
780 
781 	if (dcbE->location == 0)
782 		outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_CCB(dcbE->i2c_index));
783 	else
784 		outp->dp.aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbE->extdev));
785 	if (!outp->dp.aux) {
786 		OUTP_ERR(outp, "no aux");
787 		return -EINVAL;
788 	}
789 
790 	/* bios data is not optional */
791 	data = nvbios_dpout_match(bios, outp->info.hasht, outp->info.hashm,
792 				  &outp->dp.version, &hdr, &cnt, &len, &outp->dp.info);
793 	if (!data) {
794 		OUTP_ERR(outp, "no bios dp data");
795 		return -EINVAL;
796 	}
797 
798 	OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len);
799 
800 	/* hotplug detect, replaces gpio-based mechanism with aux events */
801 	ret = nvkm_notify_init(NULL, &i2c->event, nvkm_dp_hpd, true,
802 			       &(struct nvkm_i2c_ntfy_req) {
803 				.mask = NVKM_I2C_PLUG | NVKM_I2C_UNPLUG |
804 					NVKM_I2C_IRQ,
805 				.port = outp->dp.aux->id,
806 			       },
807 			       sizeof(struct nvkm_i2c_ntfy_req),
808 			       sizeof(struct nvkm_i2c_ntfy_rep),
809 			       &outp->dp.hpd);
810 	if (ret) {
811 		OUTP_ERR(outp, "error monitoring aux hpd: %d", ret);
812 		return ret;
813 	}
814 
815 	mutex_init(&outp->dp.mutex);
816 	atomic_set(&outp->dp.lt.done, 0);
817 	return 0;
818 }
819