1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "nv50.h"
25 #include "head.h"
26 #include "ior.h"
27 #include "channv50.h"
28 #include "rootnv50.h"
29
30 #include <core/client.h>
31 #include <core/ramht.h>
32 #include <subdev/bios.h>
33 #include <subdev/bios/disp.h>
34 #include <subdev/bios/init.h>
35 #include <subdev/bios/pll.h>
36 #include <subdev/devinit.h>
37 #include <subdev/timer.h>
38
39 static const struct nvkm_disp_oclass *
nv50_disp_root_(struct nvkm_disp * base)40 nv50_disp_root_(struct nvkm_disp *base)
41 {
42 return nv50_disp(base)->func->root;
43 }
44
45 static void
nv50_disp_intr_(struct nvkm_disp * base)46 nv50_disp_intr_(struct nvkm_disp *base)
47 {
48 struct nv50_disp *disp = nv50_disp(base);
49 disp->func->intr(disp);
50 }
51
52 static void
nv50_disp_fini_(struct nvkm_disp * base)53 nv50_disp_fini_(struct nvkm_disp *base)
54 {
55 struct nv50_disp *disp = nv50_disp(base);
56 disp->func->fini(disp);
57 }
58
59 static int
nv50_disp_init_(struct nvkm_disp * base)60 nv50_disp_init_(struct nvkm_disp *base)
61 {
62 struct nv50_disp *disp = nv50_disp(base);
63 return disp->func->init(disp);
64 }
65
66 static void *
nv50_disp_dtor_(struct nvkm_disp * base)67 nv50_disp_dtor_(struct nvkm_disp *base)
68 {
69 struct nv50_disp *disp = nv50_disp(base);
70
71 nvkm_ramht_del(&disp->ramht);
72 nvkm_gpuobj_del(&disp->inst);
73
74 nvkm_event_fini(&disp->uevent);
75 if (disp->wq)
76 destroy_workqueue(disp->wq);
77
78 return disp;
79 }
80
81 static int
nv50_disp_oneinit_(struct nvkm_disp * base)82 nv50_disp_oneinit_(struct nvkm_disp *base)
83 {
84 struct nv50_disp *disp = nv50_disp(base);
85 const struct nv50_disp_func *func = disp->func;
86 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
87 struct nvkm_device *device = subdev->device;
88 int ret, i;
89
90 if (func->wndw.cnt) {
91 disp->wndw.nr = func->wndw.cnt(&disp->base, &disp->wndw.mask);
92 nvkm_debug(subdev, "Window(s): %d (%08lx)\n",
93 disp->wndw.nr, disp->wndw.mask);
94 }
95
96 disp->head.nr = func->head.cnt(&disp->base, &disp->head.mask);
97 nvkm_debug(subdev, " Head(s): %d (%02lx)\n",
98 disp->head.nr, disp->head.mask);
99 for_each_set_bit(i, &disp->head.mask, disp->head.nr) {
100 ret = func->head.new(&disp->base, i);
101 if (ret)
102 return ret;
103 }
104
105 if (func->dac.cnt) {
106 disp->dac.nr = func->dac.cnt(&disp->base, &disp->dac.mask);
107 nvkm_debug(subdev, " DAC(s): %d (%02lx)\n",
108 disp->dac.nr, disp->dac.mask);
109 for_each_set_bit(i, &disp->dac.mask, disp->dac.nr) {
110 ret = func->dac.new(&disp->base, i);
111 if (ret)
112 return ret;
113 }
114 }
115
116 if (func->pior.cnt) {
117 disp->pior.nr = func->pior.cnt(&disp->base, &disp->pior.mask);
118 nvkm_debug(subdev, " PIOR(s): %d (%02lx)\n",
119 disp->pior.nr, disp->pior.mask);
120 for_each_set_bit(i, &disp->pior.mask, disp->pior.nr) {
121 ret = func->pior.new(&disp->base, i);
122 if (ret)
123 return ret;
124 }
125 }
126
127 disp->sor.nr = func->sor.cnt(&disp->base, &disp->sor.mask);
128 nvkm_debug(subdev, " SOR(s): %d (%02lx)\n",
129 disp->sor.nr, disp->sor.mask);
130 for_each_set_bit(i, &disp->sor.mask, disp->sor.nr) {
131 ret = func->sor.new(&disp->base, i);
132 if (ret)
133 return ret;
134 }
135
136 ret = nvkm_gpuobj_new(device, 0x10000, 0x10000, false, NULL,
137 &disp->inst);
138 if (ret)
139 return ret;
140
141 return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
142 0x1000, 0, disp->inst, &disp->ramht);
143 }
144
145 static const struct nvkm_disp_func
146 nv50_disp_ = {
147 .dtor = nv50_disp_dtor_,
148 .oneinit = nv50_disp_oneinit_,
149 .init = nv50_disp_init_,
150 .fini = nv50_disp_fini_,
151 .intr = nv50_disp_intr_,
152 .root = nv50_disp_root_,
153 };
154
155 int
nv50_disp_new_(const struct nv50_disp_func * func,struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)156 nv50_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
157 enum nvkm_subdev_type type, int inst, struct nvkm_disp **pdisp)
158 {
159 struct nv50_disp *disp;
160 int ret;
161
162 if (!(disp = kzalloc(sizeof(*disp), GFP_KERNEL)))
163 return -ENOMEM;
164 disp->func = func;
165 *pdisp = &disp->base;
166
167 ret = nvkm_disp_ctor(&nv50_disp_, device, type, inst, &disp->base);
168 if (ret)
169 return ret;
170
171 disp->wq = create_singlethread_workqueue("nvkm-disp");
172 if (!disp->wq)
173 return -ENOMEM;
174
175 INIT_WORK(&disp->supervisor, func->super);
176
177 return nvkm_event_init(func->uevent, 1, ARRAY_SIZE(disp->chan),
178 &disp->uevent);
179 }
180
181 static u32
nv50_disp_super_iedt(struct nvkm_head * head,struct nvkm_outp * outp,u8 * ver,u8 * hdr,u8 * cnt,u8 * len,struct nvbios_outp * iedt)182 nv50_disp_super_iedt(struct nvkm_head *head, struct nvkm_outp *outp,
183 u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
184 struct nvbios_outp *iedt)
185 {
186 struct nvkm_bios *bios = head->disp->engine.subdev.device->bios;
187 const u8 l = ffs(outp->info.link);
188 const u16 t = outp->info.hasht;
189 const u16 m = (0x0100 << head->id) | (l << 6) | outp->info.or;
190 u32 data = nvbios_outp_match(bios, t, m, ver, hdr, cnt, len, iedt);
191 if (!data)
192 OUTP_DBG(outp, "missing IEDT for %04x:%04x", t, m);
193 return data;
194 }
195
196 static void
nv50_disp_super_ied_on(struct nvkm_head * head,struct nvkm_ior * ior,int id,u32 khz)197 nv50_disp_super_ied_on(struct nvkm_head *head,
198 struct nvkm_ior *ior, int id, u32 khz)
199 {
200 struct nvkm_subdev *subdev = &head->disp->engine.subdev;
201 struct nvkm_bios *bios = subdev->device->bios;
202 struct nvkm_outp *outp = ior->asy.outp;
203 struct nvbios_ocfg iedtrs;
204 struct nvbios_outp iedt;
205 u8 ver, hdr, cnt, len, flags = 0x00;
206 u32 data;
207
208 if (!outp) {
209 IOR_DBG(ior, "nothing to attach");
210 return;
211 }
212
213 /* Lookup IED table for the device. */
214 data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
215 if (!data)
216 return;
217
218 /* Lookup IEDT runtime settings for the current configuration. */
219 if (ior->type == SOR) {
220 if (ior->asy.proto == LVDS) {
221 if (head->asy.or.depth == 24)
222 flags |= 0x02;
223 }
224 if (ior->asy.link == 3)
225 flags |= 0x01;
226 }
227
228 data = nvbios_ocfg_match(bios, data, ior->asy.proto_evo, flags,
229 &ver, &hdr, &cnt, &len, &iedtrs);
230 if (!data) {
231 OUTP_DBG(outp, "missing IEDT RS for %02x:%02x",
232 ior->asy.proto_evo, flags);
233 return;
234 }
235
236 /* Execute the OnInt[23] script for the current frequency. */
237 data = nvbios_oclk_match(bios, iedtrs.clkcmp[id], khz);
238 if (!data) {
239 OUTP_DBG(outp, "missing IEDT RSS %d for %02x:%02x %d khz",
240 id, ior->asy.proto_evo, flags, khz);
241 return;
242 }
243
244 nvbios_init(subdev, data,
245 init.outp = &outp->info;
246 init.or = ior->id;
247 init.link = ior->asy.link;
248 init.head = head->id;
249 );
250 }
251
252 static void
nv50_disp_super_ied_off(struct nvkm_head * head,struct nvkm_ior * ior,int id)253 nv50_disp_super_ied_off(struct nvkm_head *head, struct nvkm_ior *ior, int id)
254 {
255 struct nvkm_outp *outp = ior->arm.outp;
256 struct nvbios_outp iedt;
257 u8 ver, hdr, cnt, len;
258 u32 data;
259
260 if (!outp) {
261 IOR_DBG(ior, "nothing attached");
262 return;
263 }
264
265 data = nv50_disp_super_iedt(head, outp, &ver, &hdr, &cnt, &len, &iedt);
266 if (!data)
267 return;
268
269 nvbios_init(&head->disp->engine.subdev, iedt.script[id],
270 init.outp = &outp->info;
271 init.or = ior->id;
272 init.link = ior->arm.link;
273 init.head = head->id;
274 );
275 }
276
277 static struct nvkm_ior *
nv50_disp_super_ior_asy(struct nvkm_head * head)278 nv50_disp_super_ior_asy(struct nvkm_head *head)
279 {
280 struct nvkm_ior *ior;
281 list_for_each_entry(ior, &head->disp->ior, head) {
282 if (ior->asy.head & (1 << head->id)) {
283 HEAD_DBG(head, "to %s", ior->name);
284 return ior;
285 }
286 }
287 HEAD_DBG(head, "nothing to attach");
288 return NULL;
289 }
290
291 static struct nvkm_ior *
nv50_disp_super_ior_arm(struct nvkm_head * head)292 nv50_disp_super_ior_arm(struct nvkm_head *head)
293 {
294 struct nvkm_ior *ior;
295 list_for_each_entry(ior, &head->disp->ior, head) {
296 if (ior->arm.head & (1 << head->id)) {
297 HEAD_DBG(head, "on %s", ior->name);
298 return ior;
299 }
300 }
301 HEAD_DBG(head, "nothing attached");
302 return NULL;
303 }
304
305 void
nv50_disp_super_3_0(struct nv50_disp * disp,struct nvkm_head * head)306 nv50_disp_super_3_0(struct nv50_disp *disp, struct nvkm_head *head)
307 {
308 struct nvkm_ior *ior;
309
310 /* Determine which OR, if any, we're attaching to the head. */
311 HEAD_DBG(head, "supervisor 3.0");
312 ior = nv50_disp_super_ior_asy(head);
313 if (!ior)
314 return;
315
316 /* Execute OnInt3 IED script. */
317 nv50_disp_super_ied_on(head, ior, 1, head->asy.hz / 1000);
318
319 /* OR-specific handling. */
320 if (ior->func->war_3)
321 ior->func->war_3(ior);
322 }
323
324 static void
nv50_disp_super_2_2_dp(struct nvkm_head * head,struct nvkm_ior * ior)325 nv50_disp_super_2_2_dp(struct nvkm_head *head, struct nvkm_ior *ior)
326 {
327 struct nvkm_subdev *subdev = &head->disp->engine.subdev;
328 const u32 khz = head->asy.hz / 1000;
329 const u32 linkKBps = ior->dp.bw * 27000;
330 const u32 symbol = 100000;
331 int bestTU = 0, bestVTUi = 0, bestVTUf = 0, bestVTUa = 0;
332 int TU, VTUi, VTUf, VTUa;
333 u64 link_data_rate, link_ratio, unk;
334 u32 best_diff = 64 * symbol;
335 u64 h, v;
336
337 /* symbols/hblank - algorithm taken from comments in tegra driver */
338 h = head->asy.hblanke + head->asy.htotal - head->asy.hblanks - 7;
339 h = h * linkKBps;
340 do_div(h, khz);
341 h = h - (3 * ior->dp.ef) - (12 / ior->dp.nr);
342
343 /* symbols/vblank - algorithm taken from comments in tegra driver */
344 v = head->asy.vblanks - head->asy.vblanke - 25;
345 v = v * linkKBps;
346 do_div(v, khz);
347 v = v - ((36 / ior->dp.nr) + 3) - 1;
348
349 ior->func->dp.audio_sym(ior, head->id, h, v);
350
351 /* watermark / activesym */
352 link_data_rate = (khz * head->asy.or.depth / 8) / ior->dp.nr;
353
354 /* calculate ratio of packed data rate to link symbol rate */
355 link_ratio = link_data_rate * symbol;
356 do_div(link_ratio, linkKBps);
357
358 for (TU = 64; ior->func->dp.activesym && TU >= 32; TU--) {
359 /* calculate average number of valid symbols in each TU */
360 u32 tu_valid = link_ratio * TU;
361 u32 calc, diff;
362
363 /* find a hw representation for the fraction.. */
364 VTUi = tu_valid / symbol;
365 calc = VTUi * symbol;
366 diff = tu_valid - calc;
367 if (diff) {
368 if (diff >= (symbol / 2)) {
369 VTUf = symbol / (symbol - diff);
370 if (symbol - (VTUf * diff))
371 VTUf++;
372
373 if (VTUf <= 15) {
374 VTUa = 1;
375 calc += symbol - (symbol / VTUf);
376 } else {
377 VTUa = 0;
378 VTUf = 1;
379 calc += symbol;
380 }
381 } else {
382 VTUa = 0;
383 VTUf = min((int)(symbol / diff), 15);
384 calc += symbol / VTUf;
385 }
386
387 diff = calc - tu_valid;
388 } else {
389 /* no remainder, but the hw doesn't like the fractional
390 * part to be zero. decrement the integer part and
391 * have the fraction add a whole symbol back
392 */
393 VTUa = 0;
394 VTUf = 1;
395 VTUi--;
396 }
397
398 if (diff < best_diff) {
399 best_diff = diff;
400 bestTU = TU;
401 bestVTUa = VTUa;
402 bestVTUf = VTUf;
403 bestVTUi = VTUi;
404 if (diff == 0)
405 break;
406 }
407 }
408
409 if (ior->func->dp.activesym) {
410 if (!bestTU) {
411 nvkm_error(subdev, "unable to determine dp config\n");
412 return;
413 }
414 ior->func->dp.activesym(ior, head->id, bestTU,
415 bestVTUa, bestVTUf, bestVTUi);
416 } else {
417 bestTU = 64;
418 }
419
420 /* XXX close to vbios numbers, but not right */
421 unk = (symbol - link_ratio) * bestTU;
422 unk *= link_ratio;
423 do_div(unk, symbol);
424 do_div(unk, symbol);
425 unk += 6;
426
427 ior->func->dp.watermark(ior, head->id, unk);
428 }
429
430 void
nv50_disp_super_2_2(struct nv50_disp * disp,struct nvkm_head * head)431 nv50_disp_super_2_2(struct nv50_disp *disp, struct nvkm_head *head)
432 {
433 const u32 khz = head->asy.hz / 1000;
434 struct nvkm_outp *outp;
435 struct nvkm_ior *ior;
436
437 /* Determine which OR, if any, we're attaching from the head. */
438 HEAD_DBG(head, "supervisor 2.2");
439 ior = nv50_disp_super_ior_asy(head);
440 if (!ior)
441 return;
442
443 /* For some reason, NVIDIA decided not to:
444 *
445 * A) Give dual-link LVDS a separate EVO protocol, like for TMDS.
446 * and
447 * B) Use SetControlOutputResource.PixelDepth on LVDS.
448 *
449 * Override the values we usually read from HW with the same
450 * data we pass though an ioctl instead.
451 */
452 if (ior->type == SOR && ior->asy.proto == LVDS) {
453 head->asy.or.depth = (disp->sor.lvdsconf & 0x0200) ? 24 : 18;
454 ior->asy.link = (disp->sor.lvdsconf & 0x0100) ? 3 : 1;
455 }
456
457 /* Handle any link training, etc. */
458 if ((outp = ior->asy.outp) && outp->func->acquire)
459 outp->func->acquire(outp);
460
461 /* Execute OnInt2 IED script. */
462 nv50_disp_super_ied_on(head, ior, 0, khz);
463
464 /* Program RG clock divider. */
465 head->func->rgclk(head, ior->asy.rgdiv);
466
467 /* Mode-specific internal DP configuration. */
468 if (ior->type == SOR && ior->asy.proto == DP)
469 nv50_disp_super_2_2_dp(head, ior);
470
471 /* OR-specific handling. */
472 ior->func->clock(ior);
473 if (ior->func->war_2)
474 ior->func->war_2(ior);
475 }
476
477 void
nv50_disp_super_2_1(struct nv50_disp * disp,struct nvkm_head * head)478 nv50_disp_super_2_1(struct nv50_disp *disp, struct nvkm_head *head)
479 {
480 struct nvkm_devinit *devinit = disp->base.engine.subdev.device->devinit;
481 const u32 khz = head->asy.hz / 1000;
482 HEAD_DBG(head, "supervisor 2.1 - %d khz", khz);
483 if (khz)
484 nvkm_devinit_pll_set(devinit, PLL_VPLL0 + head->id, khz);
485 }
486
487 void
nv50_disp_super_2_0(struct nv50_disp * disp,struct nvkm_head * head)488 nv50_disp_super_2_0(struct nv50_disp *disp, struct nvkm_head *head)
489 {
490 struct nvkm_outp *outp;
491 struct nvkm_ior *ior;
492
493 /* Determine which OR, if any, we're detaching from the head. */
494 HEAD_DBG(head, "supervisor 2.0");
495 ior = nv50_disp_super_ior_arm(head);
496 if (!ior)
497 return;
498
499 /* Execute OffInt2 IED script. */
500 nv50_disp_super_ied_off(head, ior, 2);
501
502 /* If we're shutting down the OR's only active head, execute
503 * the output path's disable function.
504 */
505 if (ior->arm.head == (1 << head->id)) {
506 if ((outp = ior->arm.outp) && outp->func->disable)
507 outp->func->disable(outp, ior);
508 }
509 }
510
511 void
nv50_disp_super_1_0(struct nv50_disp * disp,struct nvkm_head * head)512 nv50_disp_super_1_0(struct nv50_disp *disp, struct nvkm_head *head)
513 {
514 struct nvkm_ior *ior;
515
516 /* Determine which OR, if any, we're detaching from the head. */
517 HEAD_DBG(head, "supervisor 1.0");
518 ior = nv50_disp_super_ior_arm(head);
519 if (!ior)
520 return;
521
522 /* Execute OffInt1 IED script. */
523 nv50_disp_super_ied_off(head, ior, 1);
524 }
525
526 void
nv50_disp_super_1(struct nv50_disp * disp)527 nv50_disp_super_1(struct nv50_disp *disp)
528 {
529 struct nvkm_head *head;
530 struct nvkm_ior *ior;
531
532 list_for_each_entry(head, &disp->base.head, head) {
533 head->func->state(head, &head->arm);
534 head->func->state(head, &head->asy);
535 }
536
537 list_for_each_entry(ior, &disp->base.ior, head) {
538 ior->func->state(ior, &ior->arm);
539 ior->func->state(ior, &ior->asy);
540 }
541 }
542
543 void
nv50_disp_super(struct work_struct * work)544 nv50_disp_super(struct work_struct *work)
545 {
546 struct nv50_disp *disp =
547 container_of(work, struct nv50_disp, supervisor);
548 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
549 struct nvkm_device *device = subdev->device;
550 struct nvkm_head *head;
551 u32 super = nvkm_rd32(device, 0x610030);
552
553 nvkm_debug(subdev, "supervisor %08x %08x\n", disp->super, super);
554
555 if (disp->super & 0x00000010) {
556 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
557 nv50_disp_super_1(disp);
558 list_for_each_entry(head, &disp->base.head, head) {
559 if (!(super & (0x00000020 << head->id)))
560 continue;
561 if (!(super & (0x00000080 << head->id)))
562 continue;
563 nv50_disp_super_1_0(disp, head);
564 }
565 } else
566 if (disp->super & 0x00000020) {
567 list_for_each_entry(head, &disp->base.head, head) {
568 if (!(super & (0x00000080 << head->id)))
569 continue;
570 nv50_disp_super_2_0(disp, head);
571 }
572 nvkm_outp_route(&disp->base);
573 list_for_each_entry(head, &disp->base.head, head) {
574 if (!(super & (0x00000200 << head->id)))
575 continue;
576 nv50_disp_super_2_1(disp, head);
577 }
578 list_for_each_entry(head, &disp->base.head, head) {
579 if (!(super & (0x00000080 << head->id)))
580 continue;
581 nv50_disp_super_2_2(disp, head);
582 }
583 } else
584 if (disp->super & 0x00000040) {
585 list_for_each_entry(head, &disp->base.head, head) {
586 if (!(super & (0x00000080 << head->id)))
587 continue;
588 nv50_disp_super_3_0(disp, head);
589 }
590 }
591
592 nvkm_wr32(device, 0x610030, 0x80000000);
593 }
594
595 const struct nvkm_enum
596 nv50_disp_intr_error_type[] = {
597 { 0, "NONE" },
598 { 1, "PUSHBUFFER_ERR" },
599 { 2, "TRAP" },
600 { 3, "RESERVED_METHOD" },
601 { 4, "INVALID_ARG" },
602 { 5, "INVALID_STATE" },
603 { 7, "UNRESOLVABLE_HANDLE" },
604 {}
605 };
606
607 static const struct nvkm_enum
608 nv50_disp_intr_error_code[] = {
609 { 0x00, "" },
610 {}
611 };
612
613 static void
nv50_disp_intr_error(struct nv50_disp * disp,int chid)614 nv50_disp_intr_error(struct nv50_disp *disp, int chid)
615 {
616 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
617 struct nvkm_device *device = subdev->device;
618 u32 data = nvkm_rd32(device, 0x610084 + (chid * 0x08));
619 u32 addr = nvkm_rd32(device, 0x610080 + (chid * 0x08));
620 u32 code = (addr & 0x00ff0000) >> 16;
621 u32 type = (addr & 0x00007000) >> 12;
622 u32 mthd = (addr & 0x00000ffc);
623 const struct nvkm_enum *ec, *et;
624
625 et = nvkm_enum_find(nv50_disp_intr_error_type, type);
626 ec = nvkm_enum_find(nv50_disp_intr_error_code, code);
627
628 nvkm_error(subdev,
629 "ERROR %d [%s] %02x [%s] chid %d mthd %04x data %08x\n",
630 type, et ? et->name : "", code, ec ? ec->name : "",
631 chid, mthd, data);
632
633 if (chid < ARRAY_SIZE(disp->chan)) {
634 switch (mthd) {
635 case 0x0080:
636 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
637 break;
638 default:
639 break;
640 }
641 }
642
643 nvkm_wr32(device, 0x610020, 0x00010000 << chid);
644 nvkm_wr32(device, 0x610080 + (chid * 0x08), 0x90000000);
645 }
646
647 void
nv50_disp_intr(struct nv50_disp * disp)648 nv50_disp_intr(struct nv50_disp *disp)
649 {
650 struct nvkm_device *device = disp->base.engine.subdev.device;
651 u32 intr0 = nvkm_rd32(device, 0x610020);
652 u32 intr1 = nvkm_rd32(device, 0x610024);
653
654 while (intr0 & 0x001f0000) {
655 u32 chid = __ffs(intr0 & 0x001f0000) - 16;
656 nv50_disp_intr_error(disp, chid);
657 intr0 &= ~(0x00010000 << chid);
658 }
659
660 while (intr0 & 0x0000001f) {
661 u32 chid = __ffs(intr0 & 0x0000001f);
662 nv50_disp_chan_uevent_send(disp, chid);
663 intr0 &= ~(0x00000001 << chid);
664 }
665
666 if (intr1 & 0x00000004) {
667 nvkm_disp_vblank(&disp->base, 0);
668 nvkm_wr32(device, 0x610024, 0x00000004);
669 }
670
671 if (intr1 & 0x00000008) {
672 nvkm_disp_vblank(&disp->base, 1);
673 nvkm_wr32(device, 0x610024, 0x00000008);
674 }
675
676 if (intr1 & 0x00000070) {
677 disp->super = (intr1 & 0x00000070);
678 queue_work(disp->wq, &disp->supervisor);
679 nvkm_wr32(device, 0x610024, disp->super);
680 }
681 }
682
683 void
nv50_disp_fini(struct nv50_disp * disp)684 nv50_disp_fini(struct nv50_disp *disp)
685 {
686 struct nvkm_device *device = disp->base.engine.subdev.device;
687 /* disable all interrupts */
688 nvkm_wr32(device, 0x610024, 0x00000000);
689 nvkm_wr32(device, 0x610020, 0x00000000);
690 }
691
692 int
nv50_disp_init(struct nv50_disp * disp)693 nv50_disp_init(struct nv50_disp *disp)
694 {
695 struct nvkm_device *device = disp->base.engine.subdev.device;
696 struct nvkm_head *head;
697 u32 tmp;
698 int i;
699
700 /* The below segments of code copying values from one register to
701 * another appear to inform EVO of the display capabilities or
702 * something similar. NFI what the 0x614004 caps are for..
703 */
704 tmp = nvkm_rd32(device, 0x614004);
705 nvkm_wr32(device, 0x610184, tmp);
706
707 /* ... CRTC caps */
708 list_for_each_entry(head, &disp->base.head, head) {
709 tmp = nvkm_rd32(device, 0x616100 + (head->id * 0x800));
710 nvkm_wr32(device, 0x610190 + (head->id * 0x10), tmp);
711 tmp = nvkm_rd32(device, 0x616104 + (head->id * 0x800));
712 nvkm_wr32(device, 0x610194 + (head->id * 0x10), tmp);
713 tmp = nvkm_rd32(device, 0x616108 + (head->id * 0x800));
714 nvkm_wr32(device, 0x610198 + (head->id * 0x10), tmp);
715 tmp = nvkm_rd32(device, 0x61610c + (head->id * 0x800));
716 nvkm_wr32(device, 0x61019c + (head->id * 0x10), tmp);
717 }
718
719 /* ... DAC caps */
720 for (i = 0; i < disp->dac.nr; i++) {
721 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
722 nvkm_wr32(device, 0x6101d0 + (i * 0x04), tmp);
723 }
724
725 /* ... SOR caps */
726 for (i = 0; i < disp->sor.nr; i++) {
727 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
728 nvkm_wr32(device, 0x6101e0 + (i * 0x04), tmp);
729 }
730
731 /* ... PIOR caps */
732 for (i = 0; i < disp->pior.nr; i++) {
733 tmp = nvkm_rd32(device, 0x61e000 + (i * 0x800));
734 nvkm_wr32(device, 0x6101f0 + (i * 0x04), tmp);
735 }
736
737 /* steal display away from vbios, or something like that */
738 if (nvkm_rd32(device, 0x610024) & 0x00000100) {
739 nvkm_wr32(device, 0x610024, 0x00000100);
740 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
741 if (nvkm_msec(device, 2000,
742 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
743 break;
744 ) < 0)
745 return -EBUSY;
746 }
747
748 /* point at display engine memory area (hash table, objects) */
749 nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
750
751 /* enable supervisor interrupts, disable everything else */
752 nvkm_wr32(device, 0x61002c, 0x00000370);
753 nvkm_wr32(device, 0x610028, 0x00000000);
754 return 0;
755 }
756
757 static const struct nv50_disp_func
758 nv50_disp = {
759 .init = nv50_disp_init,
760 .fini = nv50_disp_fini,
761 .intr = nv50_disp_intr,
762 .uevent = &nv50_disp_chan_uevent,
763 .super = nv50_disp_super,
764 .root = &nv50_disp_root_oclass,
765 .head = { .cnt = nv50_head_cnt, .new = nv50_head_new },
766 .dac = { .cnt = nv50_dac_cnt, .new = nv50_dac_new },
767 .sor = { .cnt = nv50_sor_cnt, .new = nv50_sor_new },
768 .pior = { .cnt = nv50_pior_cnt, .new = nv50_pior_new },
769 };
770
771 int
nv50_disp_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)772 nv50_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
773 struct nvkm_disp **pdisp)
774 {
775 return nv50_disp_new_(&nv50_disp, device, type, inst, pdisp);
776 }
777