1 /*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24 #include "priv.h"
25 #include "chan.h"
26 #include "hdmi.h"
27 #include "head.h"
28 #include "ior.h"
29 #include "outp.h"
30
31 #include <core/ramht.h>
32 #include <subdev/timer.h>
33
34 #include <nvif/class.h>
35
36 static void
gf119_sor_hda_device_entry(struct nvkm_ior * ior,int head)37 gf119_sor_hda_device_entry(struct nvkm_ior *ior, int head)
38 {
39 struct nvkm_device *device = ior->disp->engine.subdev.device;
40 const u32 hoff = 0x800 * head;
41
42 nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
43 }
44
45 void
gf119_sor_hda_eld(struct nvkm_ior * ior,int head,u8 * data,u8 size)46 gf119_sor_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
47 {
48 struct nvkm_device *device = ior->disp->engine.subdev.device;
49 const u32 soff = 0x030 * ior->id + (head * 0x04);
50 int i;
51
52 for (i = 0; i < size; i++)
53 nvkm_wr32(device, 0x10ec00 + soff, (i << 8) | data[i]);
54 for (; i < 0x60; i++)
55 nvkm_wr32(device, 0x10ec00 + soff, (i << 8));
56 nvkm_mask(device, 0x10ec10 + soff, 0x80000002, 0x80000002);
57 }
58
59 void
gf119_sor_hda_hpd(struct nvkm_ior * ior,int head,bool present)60 gf119_sor_hda_hpd(struct nvkm_ior *ior, int head, bool present)
61 {
62 struct nvkm_device *device = ior->disp->engine.subdev.device;
63 const u32 soff = 0x030 * ior->id + (head * 0x04);
64 u32 data = 0x80000000;
65 u32 mask = 0x80000001;
66
67 if (present) {
68 ior->func->hda->device_entry(ior, head);
69 data |= 0x00000001;
70 } else {
71 mask |= 0x00000002;
72 }
73
74 nvkm_mask(device, 0x10ec10 + soff, mask, data);
75 }
76
77 const struct nvkm_ior_func_hda
78 gf119_sor_hda = {
79 .hpd = gf119_sor_hda_hpd,
80 .eld = gf119_sor_hda_eld,
81 .device_entry = gf119_sor_hda_device_entry,
82 };
83
84 void
gf119_sor_dp_watermark(struct nvkm_ior * sor,int head,u8 watermark)85 gf119_sor_dp_watermark(struct nvkm_ior *sor, int head, u8 watermark)
86 {
87 struct nvkm_device *device = sor->disp->engine.subdev.device;
88 const u32 hoff = head * 0x800;
89
90 nvkm_mask(device, 0x616610 + hoff, 0x0800003f, 0x08000000 | watermark);
91 }
92
93 void
gf119_sor_dp_audio_sym(struct nvkm_ior * sor,int head,u16 h,u32 v)94 gf119_sor_dp_audio_sym(struct nvkm_ior *sor, int head, u16 h, u32 v)
95 {
96 struct nvkm_device *device = sor->disp->engine.subdev.device;
97 const u32 hoff = head * 0x800;
98
99 nvkm_mask(device, 0x616620 + hoff, 0x0000ffff, h);
100 nvkm_mask(device, 0x616624 + hoff, 0x00ffffff, v);
101 }
102
103 void
gf119_sor_dp_audio(struct nvkm_ior * sor,int head,bool enable)104 gf119_sor_dp_audio(struct nvkm_ior *sor, int head, bool enable)
105 {
106 struct nvkm_device *device = sor->disp->engine.subdev.device;
107 const u32 hoff = 0x800 * head;
108 const u32 data = 0x80000000 | (0x00000001 * enable);
109 const u32 mask = 0x8000000d;
110
111 nvkm_mask(device, 0x616618 + hoff, mask, data);
112 nvkm_msec(device, 2000,
113 if (!(nvkm_rd32(device, 0x616618 + hoff) & 0x80000000))
114 break;
115 );
116 }
117
118 void
gf119_sor_dp_vcpi(struct nvkm_ior * sor,int head,u8 slot,u8 slot_nr,u16 pbn,u16 aligned)119 gf119_sor_dp_vcpi(struct nvkm_ior *sor, int head, u8 slot, u8 slot_nr, u16 pbn, u16 aligned)
120 {
121 struct nvkm_device *device = sor->disp->engine.subdev.device;
122 const u32 hoff = head * 0x800;
123
124 nvkm_mask(device, 0x616588 + hoff, 0x00003f3f, (slot_nr << 8) | slot);
125 nvkm_mask(device, 0x61658c + hoff, 0xffffffff, (aligned << 16) | pbn);
126 }
127
128 void
gf119_sor_dp_drive(struct nvkm_ior * sor,int ln,int pc,int dc,int pe,int pu)129 gf119_sor_dp_drive(struct nvkm_ior *sor, int ln, int pc, int dc, int pe, int pu)
130 {
131 struct nvkm_device *device = sor->disp->engine.subdev.device;
132 const u32 loff = nv50_sor_link(sor);
133 const u32 shift = sor->func->dp->lanes[ln] * 8;
134 u32 data[4];
135
136 data[0] = nvkm_rd32(device, 0x61c118 + loff) & ~(0x000000ff << shift);
137 data[1] = nvkm_rd32(device, 0x61c120 + loff) & ~(0x000000ff << shift);
138 data[2] = nvkm_rd32(device, 0x61c130 + loff);
139 if ((data[2] & 0x0000ff00) < (pu << 8) || ln == 0)
140 data[2] = (data[2] & ~0x0000ff00) | (pu << 8);
141
142 nvkm_wr32(device, 0x61c118 + loff, data[0] | (dc << shift));
143 nvkm_wr32(device, 0x61c120 + loff, data[1] | (pe << shift));
144 nvkm_wr32(device, 0x61c130 + loff, data[2]);
145
146 data[3] = nvkm_rd32(device, 0x61c13c + loff) & ~(0x000000ff << shift);
147 nvkm_wr32(device, 0x61c13c + loff, data[3] | (pc << shift));
148 }
149
150 static void
gf119_sor_dp_pattern(struct nvkm_ior * sor,int pattern)151 gf119_sor_dp_pattern(struct nvkm_ior *sor, int pattern)
152 {
153 struct nvkm_device *device = sor->disp->engine.subdev.device;
154 const u32 soff = nv50_ior_base(sor);
155 u32 data;
156
157 switch (pattern) {
158 case 0: data = 0x10101010; break;
159 case 1: data = 0x01010101; break;
160 case 2: data = 0x02020202; break;
161 case 3: data = 0x03030303; break;
162 default:
163 WARN_ON(1);
164 return;
165 }
166
167 nvkm_mask(device, 0x61c110 + soff, 0x1f1f1f1f, data);
168 }
169
170 int
gf119_sor_dp_links(struct nvkm_ior * sor,struct nvkm_i2c_aux * aux)171 gf119_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
172 {
173 struct nvkm_device *device = sor->disp->engine.subdev.device;
174 const u32 soff = nv50_ior_base(sor);
175 const u32 loff = nv50_sor_link(sor);
176 u32 dpctrl = 0x00000000;
177 u32 clksor = 0x00000000;
178
179 clksor |= sor->dp.bw << 18;
180 dpctrl |= ((1 << sor->dp.nr) - 1) << 16;
181 if (sor->dp.mst)
182 dpctrl |= 0x40000000;
183 if (sor->dp.ef)
184 dpctrl |= 0x00004000;
185
186 nvkm_mask(device, 0x612300 + soff, 0x007c0000, clksor);
187 nvkm_mask(device, 0x61c10c + loff, 0x401f4000, dpctrl);
188 return 0;
189 }
190
191 const struct nvkm_ior_func_dp
192 gf119_sor_dp = {
193 .lanes = { 2, 1, 0, 3 },
194 .links = gf119_sor_dp_links,
195 .power = g94_sor_dp_power,
196 .pattern = gf119_sor_dp_pattern,
197 .drive = gf119_sor_dp_drive,
198 .vcpi = gf119_sor_dp_vcpi,
199 .audio = gf119_sor_dp_audio,
200 .audio_sym = gf119_sor_dp_audio_sym,
201 .watermark = gf119_sor_dp_watermark,
202 };
203
204 static void
gf119_sor_hdmi_ctrl(struct nvkm_ior * ior,int head,bool enable,u8 max_ac_packet,u8 rekey,u8 * avi,u8 avi_size,u8 * vendor,u8 vendor_size)205 gf119_sor_hdmi_ctrl(struct nvkm_ior *ior, int head, bool enable, u8 max_ac_packet,
206 u8 rekey, u8 *avi, u8 avi_size, u8 *vendor, u8 vendor_size)
207 {
208 struct nvkm_device *device = ior->disp->engine.subdev.device;
209 const u32 ctrl = 0x40000000 * enable |
210 max_ac_packet << 16 |
211 rekey;
212 const u32 hoff = head * 0x800;
213 struct packed_hdmi_infoframe avi_infoframe;
214 struct packed_hdmi_infoframe vendor_infoframe;
215
216 pack_hdmi_infoframe(&avi_infoframe, avi, avi_size);
217 pack_hdmi_infoframe(&vendor_infoframe, vendor, vendor_size);
218
219 if (!(ctrl & 0x40000000)) {
220 nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000);
221 nvkm_mask(device, 0x616730 + hoff, 0x00000001, 0x00000000);
222 nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
223 nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
224 return;
225 }
226
227 /* AVI InfoFrame */
228 nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000000);
229 if (avi_size) {
230 nvkm_wr32(device, 0x61671c + hoff, avi_infoframe.header);
231 nvkm_wr32(device, 0x616720 + hoff, avi_infoframe.subpack0_low);
232 nvkm_wr32(device, 0x616724 + hoff, avi_infoframe.subpack0_high);
233 nvkm_wr32(device, 0x616728 + hoff, avi_infoframe.subpack1_low);
234 nvkm_wr32(device, 0x61672c + hoff, avi_infoframe.subpack1_high);
235 nvkm_mask(device, 0x616714 + hoff, 0x00000001, 0x00000001);
236 }
237
238 /* GENERIC(?) / Vendor InfoFrame? */
239 nvkm_mask(device, 0x616730 + hoff, 0x00010001, 0x00010000);
240 if (vendor_size) {
241 /*
242 * These appear to be the audio infoframe registers,
243 * but no other set of infoframe registers has yet
244 * been found.
245 */
246 nvkm_wr32(device, 0x616738 + hoff, vendor_infoframe.header);
247 nvkm_wr32(device, 0x61673c + hoff, vendor_infoframe.subpack0_low);
248 nvkm_wr32(device, 0x616740 + hoff, vendor_infoframe.subpack0_high);
249 /* Is there a second (or further?) set of subpack registers here? */
250 nvkm_mask(device, 0x616730 + hoff, 0x00000001, 0x00000001);
251 }
252
253 /* ??? InfoFrame? */
254 nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000000);
255 nvkm_wr32(device, 0x6167ac + hoff, 0x00000010);
256 nvkm_mask(device, 0x6167a4 + hoff, 0x00000001, 0x00000001);
257
258 /* HDMI_CTRL */
259 nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl);
260 }
261
262 void
gf119_sor_clock(struct nvkm_ior * sor)263 gf119_sor_clock(struct nvkm_ior *sor)
264 {
265 struct nvkm_device *device = sor->disp->engine.subdev.device;
266 const u32 soff = nv50_ior_base(sor);
267 u32 div1 = sor->asy.link == 3;
268 u32 div2 = sor->asy.link == 3;
269
270 if (sor->asy.proto == TMDS) {
271 const u32 speed = sor->tmds.high_speed ? 0x14 : 0x0a;
272 nvkm_mask(device, 0x612300 + soff, 0x007c0000, speed << 18);
273 if (sor->tmds.high_speed)
274 div2 = 1;
275 }
276
277 nvkm_mask(device, 0x612300 + soff, 0x00000707, (div2 << 8) | div1);
278 }
279
280 void
gf119_sor_state(struct nvkm_ior * sor,struct nvkm_ior_state * state)281 gf119_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
282 {
283 struct nvkm_device *device = sor->disp->engine.subdev.device;
284 const u32 coff = (state == &sor->asy) * 0x20000 + sor->id * 0x20;
285 u32 ctrl = nvkm_rd32(device, 0x640200 + coff);
286
287 state->proto_evo = (ctrl & 0x00000f00) >> 8;
288 switch (state->proto_evo) {
289 case 0: state->proto = LVDS; state->link = 1; break;
290 case 1: state->proto = TMDS; state->link = 1; break;
291 case 2: state->proto = TMDS; state->link = 2; break;
292 case 5: state->proto = TMDS; state->link = 3; break;
293 case 8: state->proto = DP; state->link = 1; break;
294 case 9: state->proto = DP; state->link = 2; break;
295 default:
296 state->proto = UNKNOWN;
297 break;
298 }
299
300 state->head = ctrl & 0x0000000f;
301 }
302
303 static const struct nvkm_ior_func
304 gf119_sor = {
305 .state = gf119_sor_state,
306 .power = nv50_sor_power,
307 .clock = gf119_sor_clock,
308 .hdmi = {
309 .ctrl = gf119_sor_hdmi_ctrl,
310 },
311 .dp = &gf119_sor_dp,
312 .hda = &gf119_sor_hda,
313 };
314
315 static int
gf119_sor_new(struct nvkm_disp * disp,int id)316 gf119_sor_new(struct nvkm_disp *disp, int id)
317 {
318 return nvkm_ior_new_(&gf119_sor, disp, SOR, id, true);
319 }
320
321 int
gf119_sor_cnt(struct nvkm_disp * disp,unsigned long * pmask)322 gf119_sor_cnt(struct nvkm_disp *disp, unsigned long *pmask)
323 {
324 struct nvkm_device *device = disp->engine.subdev.device;
325 *pmask = (nvkm_rd32(device, 0x612004) & 0x0000ff00) >> 8;
326 return 8;
327 }
328
329 static void
gf119_dac_clock(struct nvkm_ior * dac)330 gf119_dac_clock(struct nvkm_ior *dac)
331 {
332 struct nvkm_device *device = dac->disp->engine.subdev.device;
333 const u32 doff = nv50_ior_base(dac);
334 nvkm_mask(device, 0x612280 + doff, 0x07070707, 0x00000000);
335 }
336
337 static void
gf119_dac_state(struct nvkm_ior * dac,struct nvkm_ior_state * state)338 gf119_dac_state(struct nvkm_ior *dac, struct nvkm_ior_state *state)
339 {
340 struct nvkm_device *device = dac->disp->engine.subdev.device;
341 const u32 coff = (state == &dac->asy) * 0x20000 + dac->id * 0x20;
342 u32 ctrl = nvkm_rd32(device, 0x640180 + coff);
343
344 state->proto_evo = (ctrl & 0x00000f00) >> 8;
345 switch (state->proto_evo) {
346 case 0: state->proto = CRT; break;
347 default:
348 state->proto = UNKNOWN;
349 break;
350 }
351
352 state->head = ctrl & 0x0000000f;
353 }
354
355 static const struct nvkm_ior_func
356 gf119_dac = {
357 .state = gf119_dac_state,
358 .power = nv50_dac_power,
359 .sense = nv50_dac_sense,
360 .clock = gf119_dac_clock,
361 };
362
363 int
gf119_dac_new(struct nvkm_disp * disp,int id)364 gf119_dac_new(struct nvkm_disp *disp, int id)
365 {
366 return nvkm_ior_new_(&gf119_dac, disp, DAC, id, false);
367 }
368
369 int
gf119_dac_cnt(struct nvkm_disp * disp,unsigned long * pmask)370 gf119_dac_cnt(struct nvkm_disp *disp, unsigned long *pmask)
371 {
372 struct nvkm_device *device = disp->engine.subdev.device;
373 *pmask = (nvkm_rd32(device, 0x612004) & 0x000000f0) >> 4;
374 return 4;
375 }
376
377 static void
gf119_head_vblank_put(struct nvkm_head * head)378 gf119_head_vblank_put(struct nvkm_head *head)
379 {
380 struct nvkm_device *device = head->disp->engine.subdev.device;
381 const u32 hoff = head->id * 0x800;
382 nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000000);
383 }
384
385 static void
gf119_head_vblank_get(struct nvkm_head * head)386 gf119_head_vblank_get(struct nvkm_head *head)
387 {
388 struct nvkm_device *device = head->disp->engine.subdev.device;
389 const u32 hoff = head->id * 0x800;
390 nvkm_mask(device, 0x6100c0 + hoff, 0x00000001, 0x00000001);
391 }
392
393 void
gf119_head_rgclk(struct nvkm_head * head,int div)394 gf119_head_rgclk(struct nvkm_head *head, int div)
395 {
396 struct nvkm_device *device = head->disp->engine.subdev.device;
397 nvkm_mask(device, 0x612200 + (head->id * 0x800), 0x0000000f, div);
398 }
399
400 static void
gf119_head_state(struct nvkm_head * head,struct nvkm_head_state * state)401 gf119_head_state(struct nvkm_head *head, struct nvkm_head_state *state)
402 {
403 struct nvkm_device *device = head->disp->engine.subdev.device;
404 const u32 hoff = (state == &head->asy) * 0x20000 + head->id * 0x300;
405 u32 data;
406
407 data = nvkm_rd32(device, 0x640414 + hoff);
408 state->vtotal = (data & 0xffff0000) >> 16;
409 state->htotal = (data & 0x0000ffff);
410 data = nvkm_rd32(device, 0x640418 + hoff);
411 state->vsynce = (data & 0xffff0000) >> 16;
412 state->hsynce = (data & 0x0000ffff);
413 data = nvkm_rd32(device, 0x64041c + hoff);
414 state->vblanke = (data & 0xffff0000) >> 16;
415 state->hblanke = (data & 0x0000ffff);
416 data = nvkm_rd32(device, 0x640420 + hoff);
417 state->vblanks = (data & 0xffff0000) >> 16;
418 state->hblanks = (data & 0x0000ffff);
419 state->hz = nvkm_rd32(device, 0x640450 + hoff);
420
421 data = nvkm_rd32(device, 0x640404 + hoff);
422 switch ((data & 0x000003c0) >> 6) {
423 case 6: state->or.depth = 30; break;
424 case 5: state->or.depth = 24; break;
425 case 2: state->or.depth = 18; break;
426 case 0: state->or.depth = 18; break; /*XXX: "default" */
427 default:
428 state->or.depth = 18;
429 WARN_ON(1);
430 break;
431 }
432 }
433
434 static const struct nvkm_head_func
435 gf119_head = {
436 .state = gf119_head_state,
437 .rgpos = nv50_head_rgpos,
438 .rgclk = gf119_head_rgclk,
439 .vblank_get = gf119_head_vblank_get,
440 .vblank_put = gf119_head_vblank_put,
441 };
442
443 int
gf119_head_new(struct nvkm_disp * disp,int id)444 gf119_head_new(struct nvkm_disp *disp, int id)
445 {
446 return nvkm_head_new_(&gf119_head, disp, id);
447 }
448
449 int
gf119_head_cnt(struct nvkm_disp * disp,unsigned long * pmask)450 gf119_head_cnt(struct nvkm_disp *disp, unsigned long *pmask)
451 {
452 struct nvkm_device *device = disp->engine.subdev.device;
453 *pmask = nvkm_rd32(device, 0x612004) & 0x0000000f;
454 return nvkm_rd32(device, 0x022448);
455 }
456
457 static void
gf119_disp_chan_uevent_fini(struct nvkm_event * event,int type,int index)458 gf119_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
459 {
460 struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
461 struct nvkm_device *device = disp->engine.subdev.device;
462 nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000000 << index);
463 nvkm_wr32(device, 0x61008c, 0x00000001 << index);
464 }
465
466 static void
gf119_disp_chan_uevent_init(struct nvkm_event * event,int types,int index)467 gf119_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
468 {
469 struct nvkm_disp *disp = container_of(event, typeof(*disp), uevent);
470 struct nvkm_device *device = disp->engine.subdev.device;
471 nvkm_wr32(device, 0x61008c, 0x00000001 << index);
472 nvkm_mask(device, 0x610090, 0x00000001 << index, 0x00000001 << index);
473 }
474
475 const struct nvkm_event_func
476 gf119_disp_chan_uevent = {
477 .init = gf119_disp_chan_uevent_init,
478 .fini = gf119_disp_chan_uevent_fini,
479 };
480
481 void
gf119_disp_chan_intr(struct nvkm_disp_chan * chan,bool en)482 gf119_disp_chan_intr(struct nvkm_disp_chan *chan, bool en)
483 {
484 struct nvkm_device *device = chan->disp->engine.subdev.device;
485 const u32 mask = 0x00000001 << chan->chid.user;
486 if (!en) {
487 nvkm_mask(device, 0x610090, mask, 0x00000000);
488 nvkm_mask(device, 0x6100a0, mask, 0x00000000);
489 } else {
490 nvkm_mask(device, 0x6100a0, mask, mask);
491 }
492 }
493
494 static void
gf119_disp_pioc_fini(struct nvkm_disp_chan * chan)495 gf119_disp_pioc_fini(struct nvkm_disp_chan *chan)
496 {
497 struct nvkm_disp *disp = chan->disp;
498 struct nvkm_subdev *subdev = &disp->engine.subdev;
499 struct nvkm_device *device = subdev->device;
500 int ctrl = chan->chid.ctrl;
501 int user = chan->chid.user;
502
503 nvkm_mask(device, 0x610490 + (ctrl * 0x10), 0x00000001, 0x00000000);
504 if (nvkm_msec(device, 2000,
505 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x00030000))
506 break;
507 ) < 0) {
508 nvkm_error(subdev, "ch %d fini: %08x\n", user,
509 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
510 }
511 }
512
513 static int
gf119_disp_pioc_init(struct nvkm_disp_chan * chan)514 gf119_disp_pioc_init(struct nvkm_disp_chan *chan)
515 {
516 struct nvkm_disp *disp = chan->disp;
517 struct nvkm_subdev *subdev = &disp->engine.subdev;
518 struct nvkm_device *device = subdev->device;
519 int ctrl = chan->chid.ctrl;
520 int user = chan->chid.user;
521
522 /* activate channel */
523 nvkm_wr32(device, 0x610490 + (ctrl * 0x10), 0x00000001);
524 if (nvkm_msec(device, 2000,
525 u32 tmp = nvkm_rd32(device, 0x610490 + (ctrl * 0x10));
526 if ((tmp & 0x00030000) == 0x00010000)
527 break;
528 ) < 0) {
529 nvkm_error(subdev, "ch %d init: %08x\n", user,
530 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
531 return -EBUSY;
532 }
533
534 return 0;
535 }
536
537 const struct nvkm_disp_chan_func
538 gf119_disp_pioc_func = {
539 .init = gf119_disp_pioc_init,
540 .fini = gf119_disp_pioc_fini,
541 .intr = gf119_disp_chan_intr,
542 .user = nv50_disp_chan_user,
543 };
544
545 int
gf119_disp_dmac_bind(struct nvkm_disp_chan * chan,struct nvkm_object * object,u32 handle)546 gf119_disp_dmac_bind(struct nvkm_disp_chan *chan, struct nvkm_object *object, u32 handle)
547 {
548 return nvkm_ramht_insert(chan->disp->ramht, object, chan->chid.user, -9, handle,
549 chan->chid.user << 27 | 0x00000001);
550 }
551
552 void
gf119_disp_dmac_fini(struct nvkm_disp_chan * chan)553 gf119_disp_dmac_fini(struct nvkm_disp_chan *chan)
554 {
555 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
556 struct nvkm_device *device = subdev->device;
557 int ctrl = chan->chid.ctrl;
558 int user = chan->chid.user;
559
560 /* deactivate channel */
561 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00001010, 0x00001000);
562 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000003, 0x00000000);
563 if (nvkm_msec(device, 2000,
564 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x001e0000))
565 break;
566 ) < 0) {
567 nvkm_error(subdev, "ch %d fini: %08x\n", user,
568 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
569 }
570
571 chan->suspend_put = nvkm_rd32(device, 0x640000 + (ctrl * 0x1000));
572 }
573
574 static int
gf119_disp_dmac_init(struct nvkm_disp_chan * chan)575 gf119_disp_dmac_init(struct nvkm_disp_chan *chan)
576 {
577 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
578 struct nvkm_device *device = subdev->device;
579 int ctrl = chan->chid.ctrl;
580 int user = chan->chid.user;
581
582 /* initialise channel for dma command submission */
583 nvkm_wr32(device, 0x610494 + (ctrl * 0x0010), chan->push);
584 nvkm_wr32(device, 0x610498 + (ctrl * 0x0010), 0x00010000);
585 nvkm_wr32(device, 0x61049c + (ctrl * 0x0010), 0x00000001);
586 nvkm_mask(device, 0x610490 + (ctrl * 0x0010), 0x00000010, 0x00000010);
587 nvkm_wr32(device, 0x640000 + (ctrl * 0x1000), chan->suspend_put);
588 nvkm_wr32(device, 0x610490 + (ctrl * 0x0010), 0x00000013);
589
590 /* wait for it to go inactive */
591 if (nvkm_msec(device, 2000,
592 if (!(nvkm_rd32(device, 0x610490 + (ctrl * 0x10)) & 0x80000000))
593 break;
594 ) < 0) {
595 nvkm_error(subdev, "ch %d init: %08x\n", user,
596 nvkm_rd32(device, 0x610490 + (ctrl * 0x10)));
597 return -EBUSY;
598 }
599
600 return 0;
601 }
602
603 const struct nvkm_disp_chan_func
604 gf119_disp_dmac_func = {
605 .push = nv50_disp_dmac_push,
606 .init = gf119_disp_dmac_init,
607 .fini = gf119_disp_dmac_fini,
608 .intr = gf119_disp_chan_intr,
609 .user = nv50_disp_chan_user,
610 .bind = gf119_disp_dmac_bind,
611 };
612
613 const struct nvkm_disp_chan_user
614 gf119_disp_curs = {
615 .func = &gf119_disp_pioc_func,
616 .ctrl = 13,
617 .user = 13,
618 };
619
620 const struct nvkm_disp_chan_user
621 gf119_disp_oimm = {
622 .func = &gf119_disp_pioc_func,
623 .ctrl = 9,
624 .user = 9,
625 };
626
627 static const struct nvkm_disp_mthd_list
628 gf119_disp_ovly_mthd_base = {
629 .mthd = 0x0000,
630 .data = {
631 { 0x0080, 0x665080 },
632 { 0x0084, 0x665084 },
633 { 0x0088, 0x665088 },
634 { 0x008c, 0x66508c },
635 { 0x0090, 0x665090 },
636 { 0x0094, 0x665094 },
637 { 0x00a0, 0x6650a0 },
638 { 0x00a4, 0x6650a4 },
639 { 0x00b0, 0x6650b0 },
640 { 0x00b4, 0x6650b4 },
641 { 0x00b8, 0x6650b8 },
642 { 0x00c0, 0x6650c0 },
643 { 0x00e0, 0x6650e0 },
644 { 0x00e4, 0x6650e4 },
645 { 0x00e8, 0x6650e8 },
646 { 0x0100, 0x665100 },
647 { 0x0104, 0x665104 },
648 { 0x0108, 0x665108 },
649 { 0x010c, 0x66510c },
650 { 0x0110, 0x665110 },
651 { 0x0118, 0x665118 },
652 { 0x011c, 0x66511c },
653 { 0x0120, 0x665120 },
654 { 0x0124, 0x665124 },
655 { 0x0130, 0x665130 },
656 { 0x0134, 0x665134 },
657 { 0x0138, 0x665138 },
658 { 0x013c, 0x66513c },
659 { 0x0140, 0x665140 },
660 { 0x0144, 0x665144 },
661 { 0x0148, 0x665148 },
662 { 0x014c, 0x66514c },
663 { 0x0150, 0x665150 },
664 { 0x0154, 0x665154 },
665 { 0x0158, 0x665158 },
666 { 0x015c, 0x66515c },
667 { 0x0160, 0x665160 },
668 { 0x0164, 0x665164 },
669 { 0x0168, 0x665168 },
670 { 0x016c, 0x66516c },
671 { 0x0400, 0x665400 },
672 { 0x0408, 0x665408 },
673 { 0x040c, 0x66540c },
674 { 0x0410, 0x665410 },
675 {}
676 }
677 };
678
679 static const struct nvkm_disp_chan_mthd
680 gf119_disp_ovly_mthd = {
681 .name = "Overlay",
682 .addr = 0x001000,
683 .prev = -0x020000,
684 .data = {
685 { "Global", 1, &gf119_disp_ovly_mthd_base },
686 {}
687 }
688 };
689
690 static const struct nvkm_disp_chan_user
691 gf119_disp_ovly = {
692 .func = &gf119_disp_dmac_func,
693 .ctrl = 5,
694 .user = 5,
695 .mthd = &gf119_disp_ovly_mthd,
696 };
697
698 static const struct nvkm_disp_mthd_list
699 gf119_disp_base_mthd_base = {
700 .mthd = 0x0000,
701 .addr = 0x000000,
702 .data = {
703 { 0x0080, 0x661080 },
704 { 0x0084, 0x661084 },
705 { 0x0088, 0x661088 },
706 { 0x008c, 0x66108c },
707 { 0x0090, 0x661090 },
708 { 0x0094, 0x661094 },
709 { 0x00a0, 0x6610a0 },
710 { 0x00a4, 0x6610a4 },
711 { 0x00c0, 0x6610c0 },
712 { 0x00c4, 0x6610c4 },
713 { 0x00c8, 0x6610c8 },
714 { 0x00cc, 0x6610cc },
715 { 0x00e0, 0x6610e0 },
716 { 0x00e4, 0x6610e4 },
717 { 0x00e8, 0x6610e8 },
718 { 0x00ec, 0x6610ec },
719 { 0x00fc, 0x6610fc },
720 { 0x0100, 0x661100 },
721 { 0x0104, 0x661104 },
722 { 0x0108, 0x661108 },
723 { 0x010c, 0x66110c },
724 { 0x0110, 0x661110 },
725 { 0x0114, 0x661114 },
726 { 0x0118, 0x661118 },
727 { 0x011c, 0x66111c },
728 { 0x0130, 0x661130 },
729 { 0x0134, 0x661134 },
730 { 0x0138, 0x661138 },
731 { 0x013c, 0x66113c },
732 { 0x0140, 0x661140 },
733 { 0x0144, 0x661144 },
734 { 0x0148, 0x661148 },
735 { 0x014c, 0x66114c },
736 { 0x0150, 0x661150 },
737 { 0x0154, 0x661154 },
738 { 0x0158, 0x661158 },
739 { 0x015c, 0x66115c },
740 { 0x0160, 0x661160 },
741 { 0x0164, 0x661164 },
742 { 0x0168, 0x661168 },
743 { 0x016c, 0x66116c },
744 {}
745 }
746 };
747
748 static const struct nvkm_disp_mthd_list
749 gf119_disp_base_mthd_image = {
750 .mthd = 0x0020,
751 .addr = 0x000020,
752 .data = {
753 { 0x0400, 0x661400 },
754 { 0x0404, 0x661404 },
755 { 0x0408, 0x661408 },
756 { 0x040c, 0x66140c },
757 { 0x0410, 0x661410 },
758 {}
759 }
760 };
761
762 const struct nvkm_disp_chan_mthd
763 gf119_disp_base_mthd = {
764 .name = "Base",
765 .addr = 0x001000,
766 .prev = -0x020000,
767 .data = {
768 { "Global", 1, &gf119_disp_base_mthd_base },
769 { "Image", 2, &gf119_disp_base_mthd_image },
770 {}
771 }
772 };
773
774 const struct nvkm_disp_chan_user
775 gf119_disp_base = {
776 .func = &gf119_disp_dmac_func,
777 .ctrl = 1,
778 .user = 1,
779 .mthd = &gf119_disp_base_mthd,
780 };
781
782 const struct nvkm_disp_mthd_list
783 gf119_disp_core_mthd_base = {
784 .mthd = 0x0000,
785 .addr = 0x000000,
786 .data = {
787 { 0x0080, 0x660080 },
788 { 0x0084, 0x660084 },
789 { 0x0088, 0x660088 },
790 { 0x008c, 0x000000 },
791 {}
792 }
793 };
794
795 const struct nvkm_disp_mthd_list
796 gf119_disp_core_mthd_dac = {
797 .mthd = 0x0020,
798 .addr = 0x000020,
799 .data = {
800 { 0x0180, 0x660180 },
801 { 0x0184, 0x660184 },
802 { 0x0188, 0x660188 },
803 { 0x0190, 0x660190 },
804 {}
805 }
806 };
807
808 const struct nvkm_disp_mthd_list
809 gf119_disp_core_mthd_sor = {
810 .mthd = 0x0020,
811 .addr = 0x000020,
812 .data = {
813 { 0x0200, 0x660200 },
814 { 0x0204, 0x660204 },
815 { 0x0208, 0x660208 },
816 { 0x0210, 0x660210 },
817 {}
818 }
819 };
820
821 const struct nvkm_disp_mthd_list
822 gf119_disp_core_mthd_pior = {
823 .mthd = 0x0020,
824 .addr = 0x000020,
825 .data = {
826 { 0x0300, 0x660300 },
827 { 0x0304, 0x660304 },
828 { 0x0308, 0x660308 },
829 { 0x0310, 0x660310 },
830 {}
831 }
832 };
833
834 static const struct nvkm_disp_mthd_list
835 gf119_disp_core_mthd_head = {
836 .mthd = 0x0300,
837 .addr = 0x000300,
838 .data = {
839 { 0x0400, 0x660400 },
840 { 0x0404, 0x660404 },
841 { 0x0408, 0x660408 },
842 { 0x040c, 0x66040c },
843 { 0x0410, 0x660410 },
844 { 0x0414, 0x660414 },
845 { 0x0418, 0x660418 },
846 { 0x041c, 0x66041c },
847 { 0x0420, 0x660420 },
848 { 0x0424, 0x660424 },
849 { 0x0428, 0x660428 },
850 { 0x042c, 0x66042c },
851 { 0x0430, 0x660430 },
852 { 0x0434, 0x660434 },
853 { 0x0438, 0x660438 },
854 { 0x0440, 0x660440 },
855 { 0x0444, 0x660444 },
856 { 0x0448, 0x660448 },
857 { 0x044c, 0x66044c },
858 { 0x0450, 0x660450 },
859 { 0x0454, 0x660454 },
860 { 0x0458, 0x660458 },
861 { 0x045c, 0x66045c },
862 { 0x0460, 0x660460 },
863 { 0x0468, 0x660468 },
864 { 0x046c, 0x66046c },
865 { 0x0470, 0x660470 },
866 { 0x0474, 0x660474 },
867 { 0x0480, 0x660480 },
868 { 0x0484, 0x660484 },
869 { 0x048c, 0x66048c },
870 { 0x0490, 0x660490 },
871 { 0x0494, 0x660494 },
872 { 0x0498, 0x660498 },
873 { 0x04b0, 0x6604b0 },
874 { 0x04b8, 0x6604b8 },
875 { 0x04bc, 0x6604bc },
876 { 0x04c0, 0x6604c0 },
877 { 0x04c4, 0x6604c4 },
878 { 0x04c8, 0x6604c8 },
879 { 0x04d0, 0x6604d0 },
880 { 0x04d4, 0x6604d4 },
881 { 0x04e0, 0x6604e0 },
882 { 0x04e4, 0x6604e4 },
883 { 0x04e8, 0x6604e8 },
884 { 0x04ec, 0x6604ec },
885 { 0x04f0, 0x6604f0 },
886 { 0x04f4, 0x6604f4 },
887 { 0x04f8, 0x6604f8 },
888 { 0x04fc, 0x6604fc },
889 { 0x0500, 0x660500 },
890 { 0x0504, 0x660504 },
891 { 0x0508, 0x660508 },
892 { 0x050c, 0x66050c },
893 { 0x0510, 0x660510 },
894 { 0x0514, 0x660514 },
895 { 0x0518, 0x660518 },
896 { 0x051c, 0x66051c },
897 { 0x052c, 0x66052c },
898 { 0x0530, 0x660530 },
899 { 0x054c, 0x66054c },
900 { 0x0550, 0x660550 },
901 { 0x0554, 0x660554 },
902 { 0x0558, 0x660558 },
903 { 0x055c, 0x66055c },
904 {}
905 }
906 };
907
908 static const struct nvkm_disp_chan_mthd
909 gf119_disp_core_mthd = {
910 .name = "Core",
911 .addr = 0x000000,
912 .prev = -0x020000,
913 .data = {
914 { "Global", 1, &gf119_disp_core_mthd_base },
915 { "DAC", 3, &gf119_disp_core_mthd_dac },
916 { "SOR", 8, &gf119_disp_core_mthd_sor },
917 { "PIOR", 4, &gf119_disp_core_mthd_pior },
918 { "HEAD", 4, &gf119_disp_core_mthd_head },
919 {}
920 }
921 };
922
923 void
gf119_disp_core_fini(struct nvkm_disp_chan * chan)924 gf119_disp_core_fini(struct nvkm_disp_chan *chan)
925 {
926 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
927 struct nvkm_device *device = subdev->device;
928
929 /* deactivate channel */
930 nvkm_mask(device, 0x610490, 0x00000010, 0x00000000);
931 nvkm_mask(device, 0x610490, 0x00000003, 0x00000000);
932 if (nvkm_msec(device, 2000,
933 if (!(nvkm_rd32(device, 0x610490) & 0x001e0000))
934 break;
935 ) < 0) {
936 nvkm_error(subdev, "core fini: %08x\n",
937 nvkm_rd32(device, 0x610490));
938 }
939
940 chan->suspend_put = nvkm_rd32(device, 0x640000);
941 }
942
943 static int
gf119_disp_core_init(struct nvkm_disp_chan * chan)944 gf119_disp_core_init(struct nvkm_disp_chan *chan)
945 {
946 struct nvkm_subdev *subdev = &chan->disp->engine.subdev;
947 struct nvkm_device *device = subdev->device;
948
949 /* initialise channel for dma command submission */
950 nvkm_wr32(device, 0x610494, chan->push);
951 nvkm_wr32(device, 0x610498, 0x00010000);
952 nvkm_wr32(device, 0x61049c, 0x00000001);
953 nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
954 nvkm_wr32(device, 0x640000, chan->suspend_put);
955 nvkm_wr32(device, 0x610490, 0x01000013);
956
957 /* wait for it to go inactive */
958 if (nvkm_msec(device, 2000,
959 if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
960 break;
961 ) < 0) {
962 nvkm_error(subdev, "core init: %08x\n",
963 nvkm_rd32(device, 0x610490));
964 return -EBUSY;
965 }
966
967 return 0;
968 }
969
970 const struct nvkm_disp_chan_func
971 gf119_disp_core_func = {
972 .push = nv50_disp_dmac_push,
973 .init = gf119_disp_core_init,
974 .fini = gf119_disp_core_fini,
975 .intr = gf119_disp_chan_intr,
976 .user = nv50_disp_chan_user,
977 .bind = gf119_disp_dmac_bind,
978 };
979
980 static const struct nvkm_disp_chan_user
981 gf119_disp_core = {
982 .func = &gf119_disp_core_func,
983 .ctrl = 0,
984 .user = 0,
985 .mthd = &gf119_disp_core_mthd,
986 };
987
988 void
gf119_disp_super(struct work_struct * work)989 gf119_disp_super(struct work_struct *work)
990 {
991 struct nvkm_disp *disp = container_of(work, struct nvkm_disp, super.work);
992 struct nvkm_subdev *subdev = &disp->engine.subdev;
993 struct nvkm_device *device = subdev->device;
994 struct nvkm_head *head;
995 u32 mask[4];
996
997 nvkm_debug(subdev, "supervisor %d\n", ffs(disp->super.pending));
998 mutex_lock(&disp->super.mutex);
999
1000 list_for_each_entry(head, &disp->heads, head) {
1001 mask[head->id] = nvkm_rd32(device, 0x6101d4 + (head->id * 0x800));
1002 HEAD_DBG(head, "%08x", mask[head->id]);
1003 }
1004
1005 if (disp->super.pending & 0x00000001) {
1006 nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
1007 nv50_disp_super_1(disp);
1008 list_for_each_entry(head, &disp->heads, head) {
1009 if (!(mask[head->id] & 0x00001000))
1010 continue;
1011 nv50_disp_super_1_0(disp, head);
1012 }
1013 } else
1014 if (disp->super.pending & 0x00000002) {
1015 list_for_each_entry(head, &disp->heads, head) {
1016 if (!(mask[head->id] & 0x00001000))
1017 continue;
1018 nv50_disp_super_2_0(disp, head);
1019 }
1020 nvkm_outp_route(disp);
1021 list_for_each_entry(head, &disp->heads, head) {
1022 if (!(mask[head->id] & 0x00010000))
1023 continue;
1024 nv50_disp_super_2_1(disp, head);
1025 }
1026 list_for_each_entry(head, &disp->heads, head) {
1027 if (!(mask[head->id] & 0x00001000))
1028 continue;
1029 nv50_disp_super_2_2(disp, head);
1030 }
1031 } else
1032 if (disp->super.pending & 0x00000004) {
1033 list_for_each_entry(head, &disp->heads, head) {
1034 if (!(mask[head->id] & 0x00001000))
1035 continue;
1036 nv50_disp_super_3_0(disp, head);
1037 }
1038 }
1039
1040 list_for_each_entry(head, &disp->heads, head)
1041 nvkm_wr32(device, 0x6101d4 + (head->id * 0x800), 0x00000000);
1042
1043 nvkm_wr32(device, 0x6101d0, 0x80000000);
1044 mutex_unlock(&disp->super.mutex);
1045 }
1046
1047 void
gf119_disp_intr_error(struct nvkm_disp * disp,int chid)1048 gf119_disp_intr_error(struct nvkm_disp *disp, int chid)
1049 {
1050 struct nvkm_subdev *subdev = &disp->engine.subdev;
1051 struct nvkm_device *device = subdev->device;
1052 u32 stat = nvkm_rd32(device, 0x6101f0 + (chid * 12));
1053 u32 type = (stat & 0x00007000) >> 12;
1054 u32 mthd = (stat & 0x00000ffc);
1055 u32 data = nvkm_rd32(device, 0x6101f4 + (chid * 12));
1056 u32 code = nvkm_rd32(device, 0x6101f8 + (chid * 12));
1057 const struct nvkm_enum *reason =
1058 nvkm_enum_find(nv50_disp_intr_error_type, type);
1059
1060 nvkm_error(subdev, "chid %d stat %08x reason %d [%s] mthd %04x "
1061 "data %08x code %08x\n",
1062 chid, stat, type, reason ? reason->name : "",
1063 mthd, data, code);
1064
1065 if (chid < ARRAY_SIZE(disp->chan)) {
1066 switch (mthd) {
1067 case 0x0080:
1068 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
1069 break;
1070 default:
1071 break;
1072 }
1073 }
1074
1075 nvkm_wr32(device, 0x61009c, (1 << chid));
1076 nvkm_wr32(device, 0x6101f0 + (chid * 12), 0x90000000);
1077 }
1078
1079 void
gf119_disp_intr(struct nvkm_disp * disp)1080 gf119_disp_intr(struct nvkm_disp *disp)
1081 {
1082 struct nvkm_subdev *subdev = &disp->engine.subdev;
1083 struct nvkm_device *device = subdev->device;
1084 struct nvkm_head *head;
1085 u32 intr = nvkm_rd32(device, 0x610088);
1086
1087 if (intr & 0x00000001) {
1088 u32 stat = nvkm_rd32(device, 0x61008c);
1089 while (stat) {
1090 int chid = __ffs(stat); stat &= ~(1 << chid);
1091 nv50_disp_chan_uevent_send(disp, chid);
1092 nvkm_wr32(device, 0x61008c, 1 << chid);
1093 }
1094 intr &= ~0x00000001;
1095 }
1096
1097 if (intr & 0x00000002) {
1098 u32 stat = nvkm_rd32(device, 0x61009c);
1099 int chid = ffs(stat) - 1;
1100 if (chid >= 0)
1101 disp->func->intr_error(disp, chid);
1102 intr &= ~0x00000002;
1103 }
1104
1105 if (intr & 0x00100000) {
1106 u32 stat = nvkm_rd32(device, 0x6100ac);
1107 if (stat & 0x00000007) {
1108 disp->super.pending = (stat & 0x00000007);
1109 queue_work(disp->super.wq, &disp->super.work);
1110 nvkm_wr32(device, 0x6100ac, disp->super.pending);
1111 stat &= ~0x00000007;
1112 }
1113
1114 if (stat) {
1115 nvkm_warn(subdev, "intr24 %08x\n", stat);
1116 nvkm_wr32(device, 0x6100ac, stat);
1117 }
1118
1119 intr &= ~0x00100000;
1120 }
1121
1122 list_for_each_entry(head, &disp->heads, head) {
1123 const u32 hoff = head->id * 0x800;
1124 u32 mask = 0x01000000 << head->id;
1125 if (mask & intr) {
1126 u32 stat = nvkm_rd32(device, 0x6100bc + hoff);
1127 if (stat & 0x00000001)
1128 nvkm_disp_vblank(disp, head->id);
1129 nvkm_mask(device, 0x6100bc + hoff, 0, 0);
1130 nvkm_rd32(device, 0x6100c0 + hoff);
1131 }
1132 }
1133 }
1134
1135 void
gf119_disp_fini(struct nvkm_disp * disp)1136 gf119_disp_fini(struct nvkm_disp *disp)
1137 {
1138 struct nvkm_device *device = disp->engine.subdev.device;
1139 /* disable all interrupts */
1140 nvkm_wr32(device, 0x6100b0, 0x00000000);
1141 }
1142
1143 int
gf119_disp_init(struct nvkm_disp * disp)1144 gf119_disp_init(struct nvkm_disp *disp)
1145 {
1146 struct nvkm_device *device = disp->engine.subdev.device;
1147 struct nvkm_head *head;
1148 u32 tmp;
1149 int i;
1150
1151 /* The below segments of code copying values from one register to
1152 * another appear to inform EVO of the display capabilities or
1153 * something similar.
1154 */
1155
1156 /* ... CRTC caps */
1157 list_for_each_entry(head, &disp->heads, head) {
1158 const u32 hoff = head->id * 0x800;
1159 tmp = nvkm_rd32(device, 0x616104 + hoff);
1160 nvkm_wr32(device, 0x6101b4 + hoff, tmp);
1161 tmp = nvkm_rd32(device, 0x616108 + hoff);
1162 nvkm_wr32(device, 0x6101b8 + hoff, tmp);
1163 tmp = nvkm_rd32(device, 0x61610c + hoff);
1164 nvkm_wr32(device, 0x6101bc + hoff, tmp);
1165 }
1166
1167 /* ... DAC caps */
1168 for (i = 0; i < disp->dac.nr; i++) {
1169 tmp = nvkm_rd32(device, 0x61a000 + (i * 0x800));
1170 nvkm_wr32(device, 0x6101c0 + (i * 0x800), tmp);
1171 }
1172
1173 /* ... SOR caps */
1174 for (i = 0; i < disp->sor.nr; i++) {
1175 tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
1176 nvkm_wr32(device, 0x6301c4 + (i * 0x800), tmp);
1177 }
1178
1179 /* steal display away from vbios, or something like that */
1180 if (nvkm_rd32(device, 0x6100ac) & 0x00000100) {
1181 nvkm_wr32(device, 0x6100ac, 0x00000100);
1182 nvkm_mask(device, 0x6194e8, 0x00000001, 0x00000000);
1183 if (nvkm_msec(device, 2000,
1184 if (!(nvkm_rd32(device, 0x6194e8) & 0x00000002))
1185 break;
1186 ) < 0)
1187 return -EBUSY;
1188 }
1189
1190 /* point at display engine memory area (hash table, objects) */
1191 nvkm_wr32(device, 0x610010, (disp->inst->addr >> 8) | 9);
1192
1193 /* enable supervisor interrupts, disable everything else */
1194 nvkm_wr32(device, 0x610090, 0x00000000);
1195 nvkm_wr32(device, 0x6100a0, 0x00000000);
1196 nvkm_wr32(device, 0x6100b0, 0x00000307);
1197
1198 /* disable underflow reporting, preventing an intermittent issue
1199 * on some gk104 boards where the production vbios left this
1200 * setting enabled by default.
1201 *
1202 * ftp://download.nvidia.com/open-gpu-doc/gk104-disable-underflow-reporting/1/gk104-disable-underflow-reporting.txt
1203 */
1204 list_for_each_entry(head, &disp->heads, head) {
1205 const u32 hoff = head->id * 0x800;
1206 nvkm_mask(device, 0x616308 + hoff, 0x00000111, 0x00000010);
1207 }
1208
1209 return 0;
1210 }
1211
1212 static const struct nvkm_disp_func
1213 gf119_disp = {
1214 .oneinit = nv50_disp_oneinit,
1215 .init = gf119_disp_init,
1216 .fini = gf119_disp_fini,
1217 .intr = gf119_disp_intr,
1218 .intr_error = gf119_disp_intr_error,
1219 .super = gf119_disp_super,
1220 .uevent = &gf119_disp_chan_uevent,
1221 .head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
1222 .dac = { .cnt = gf119_dac_cnt, .new = gf119_dac_new },
1223 .sor = { .cnt = gf119_sor_cnt, .new = gf119_sor_new },
1224 .root = { 0,0,GF110_DISP },
1225 .user = {
1226 {{0,0,GF110_DISP_CURSOR }, nvkm_disp_chan_new, &gf119_disp_curs },
1227 {{0,0,GF110_DISP_OVERLAY }, nvkm_disp_chan_new, &gf119_disp_oimm },
1228 {{0,0,GF110_DISP_BASE_CHANNEL_DMA }, nvkm_disp_chan_new, &gf119_disp_base },
1229 {{0,0,GF110_DISP_CORE_CHANNEL_DMA }, nvkm_disp_core_new, &gf119_disp_core },
1230 {{0,0,GF110_DISP_OVERLAY_CONTROL_DMA}, nvkm_disp_chan_new, &gf119_disp_ovly },
1231 {}
1232 },
1233 };
1234
1235 int
gf119_disp_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)1236 gf119_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
1237 struct nvkm_disp **pdisp)
1238 {
1239 return nvkm_disp_new_(&gf119_disp, device, type, inst, pdisp);
1240 }
1241