1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nv50.h"
23 #include "head.h"
24 #include "ior.h"
25 #include "channv50.h"
26 #include "rootnv50.h"
27 
28 #include <core/gpuobj.h>
29 #include <subdev/timer.h>
30 
31 int
gv100_disp_wndw_cnt(struct nvkm_disp * disp,unsigned long * pmask)32 gv100_disp_wndw_cnt(struct nvkm_disp *disp, unsigned long *pmask)
33 {
34 	struct nvkm_device *device = disp->engine.subdev.device;
35 	*pmask = nvkm_rd32(device, 0x610064);
36 	return (nvkm_rd32(device, 0x610074) & 0x03f00000) >> 20;
37 }
38 
39 void
gv100_disp_super(struct work_struct * work)40 gv100_disp_super(struct work_struct *work)
41 {
42 	struct nv50_disp *disp =
43 		container_of(work, struct nv50_disp, supervisor);
44 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
45 	struct nvkm_device *device = subdev->device;
46 	struct nvkm_head *head;
47 	u32 stat = nvkm_rd32(device, 0x6107a8);
48 	u32 mask[4];
49 
50 	nvkm_debug(subdev, "supervisor %d: %08x\n", ffs(disp->super), stat);
51 	list_for_each_entry(head, &disp->base.head, head) {
52 		mask[head->id] = nvkm_rd32(device, 0x6107ac + (head->id * 4));
53 		HEAD_DBG(head, "%08x", mask[head->id]);
54 	}
55 
56 	if (disp->super & 0x00000001) {
57 		nv50_disp_chan_mthd(disp->chan[0], NV_DBG_DEBUG);
58 		nv50_disp_super_1(disp);
59 		list_for_each_entry(head, &disp->base.head, head) {
60 			if (!(mask[head->id] & 0x00001000))
61 				continue;
62 			nv50_disp_super_1_0(disp, head);
63 		}
64 	} else
65 	if (disp->super & 0x00000002) {
66 		list_for_each_entry(head, &disp->base.head, head) {
67 			if (!(mask[head->id] & 0x00001000))
68 				continue;
69 			nv50_disp_super_2_0(disp, head);
70 		}
71 		nvkm_outp_route(&disp->base);
72 		list_for_each_entry(head, &disp->base.head, head) {
73 			if (!(mask[head->id] & 0x00010000))
74 				continue;
75 			nv50_disp_super_2_1(disp, head);
76 		}
77 		list_for_each_entry(head, &disp->base.head, head) {
78 			if (!(mask[head->id] & 0x00001000))
79 				continue;
80 			nv50_disp_super_2_2(disp, head);
81 		}
82 	} else
83 	if (disp->super & 0x00000004) {
84 		list_for_each_entry(head, &disp->base.head, head) {
85 			if (!(mask[head->id] & 0x00001000))
86 				continue;
87 			nv50_disp_super_3_0(disp, head);
88 		}
89 	}
90 
91 	list_for_each_entry(head, &disp->base.head, head)
92 		nvkm_wr32(device, 0x6107ac + (head->id * 4), 0x00000000);
93 	nvkm_wr32(device, 0x6107a8, 0x80000000);
94 }
95 
96 static void
gv100_disp_exception(struct nv50_disp * disp,int chid)97 gv100_disp_exception(struct nv50_disp *disp, int chid)
98 {
99 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
100 	struct nvkm_device *device = subdev->device;
101 	u32 stat = nvkm_rd32(device, 0x611020 + (chid * 12));
102 	u32 type = (stat & 0x00007000) >> 12;
103 	u32 mthd = (stat & 0x00000fff) << 2;
104 	const struct nvkm_enum *reason =
105 		nvkm_enum_find(nv50_disp_intr_error_type, type);
106 
107 	/*TODO: Suspect 33->41 are for WRBK channel exceptions, but we
108 	 *      don't support those currently.
109 	 *
110 	 *      CORE+WIN CHIDs map directly to the FE_EXCEPT() slots.
111 	 */
112 	if (chid <= 32) {
113 		u32 data = nvkm_rd32(device, 0x611024 + (chid * 12));
114 		u32 code = nvkm_rd32(device, 0x611028 + (chid * 12));
115 		nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
116 				   "mthd %04x data %08x code %08x\n",
117 			   chid, stat, type, reason ? reason->name : "",
118 			   mthd, data, code);
119 	} else {
120 		nvkm_error(subdev, "chid %d stat %08x reason %d [%s] "
121 				   "mthd %04x\n",
122 			   chid, stat, type, reason ? reason->name : "", mthd);
123 	}
124 
125 	if (chid < ARRAY_SIZE(disp->chan) && disp->chan[chid]) {
126 		switch (mthd) {
127 		case 0x0200:
128 			nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
129 			break;
130 		default:
131 			break;
132 		}
133 	}
134 
135 	nvkm_wr32(device, 0x611020 + (chid * 12), 0x90000000);
136 }
137 
138 static void
gv100_disp_intr_ctrl_disp(struct nv50_disp * disp)139 gv100_disp_intr_ctrl_disp(struct nv50_disp *disp)
140 {
141 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
142 	struct nvkm_device *device = subdev->device;
143 	u32 stat = nvkm_rd32(device, 0x611c30);
144 
145 	if (stat & 0x00000007) {
146 		disp->super = (stat & 0x00000007);
147 		queue_work(disp->wq, &disp->supervisor);
148 		nvkm_wr32(device, 0x611860, disp->super);
149 		stat &= ~0x00000007;
150 	}
151 
152 	/*TODO: I would guess this is VBIOS_RELEASE, however, NFI how to
153 	 *      ACK it, nor does RM appear to bother.
154 	 */
155 	if (stat & 0x00000008)
156 		stat &= ~0x00000008;
157 
158 	if (stat & 0x00000080) {
159 		u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000);
160 		nvkm_warn(subdev, "error %08x\n", error);
161 		stat &= ~0x00000080;
162 	}
163 
164 	if (stat & 0x00000100) {
165 		unsigned long wndws = nvkm_rd32(device, 0x611858);
166 		unsigned long other = nvkm_rd32(device, 0x61185c);
167 		int wndw;
168 
169 		nvkm_wr32(device, 0x611858, wndws);
170 		nvkm_wr32(device, 0x61185c, other);
171 
172 		/* AWAKEN_OTHER_CORE. */
173 		if (other & 0x00000001)
174 			nv50_disp_chan_uevent_send(disp, 0);
175 
176 		/* AWAKEN_WIN_CH(n). */
177 		for_each_set_bit(wndw, &wndws, disp->wndw.nr) {
178 			nv50_disp_chan_uevent_send(disp, 1 + wndw);
179 		}
180 	}
181 
182 	if (stat)
183 		nvkm_warn(subdev, "ctrl %08x\n", stat);
184 }
185 
186 static void
gv100_disp_intr_exc_other(struct nv50_disp * disp)187 gv100_disp_intr_exc_other(struct nv50_disp *disp)
188 {
189 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
190 	struct nvkm_device *device = subdev->device;
191 	u32 stat = nvkm_rd32(device, 0x611854);
192 	unsigned long mask;
193 	int head;
194 
195 	if (stat & 0x00000001) {
196 		nvkm_wr32(device, 0x611854, 0x00000001);
197 		gv100_disp_exception(disp, 0);
198 		stat &= ~0x00000001;
199 	}
200 
201 	if ((mask = (stat & 0x00ff0000) >> 16)) {
202 		for_each_set_bit(head, &mask, disp->wndw.nr) {
203 			nvkm_wr32(device, 0x611854, 0x00010000 << head);
204 			gv100_disp_exception(disp, 73 + head);
205 			stat &= ~(0x00010000 << head);
206 		}
207 	}
208 
209 	if (stat) {
210 		nvkm_warn(subdev, "exception %08x\n", stat);
211 		nvkm_wr32(device, 0x611854, stat);
212 	}
213 }
214 
215 static void
gv100_disp_intr_exc_winim(struct nv50_disp * disp)216 gv100_disp_intr_exc_winim(struct nv50_disp *disp)
217 {
218 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
219 	struct nvkm_device *device = subdev->device;
220 	unsigned long stat = nvkm_rd32(device, 0x611850);
221 	int wndw;
222 
223 	for_each_set_bit(wndw, &stat, disp->wndw.nr) {
224 		nvkm_wr32(device, 0x611850, BIT(wndw));
225 		gv100_disp_exception(disp, 33 + wndw);
226 		stat &= ~BIT(wndw);
227 	}
228 
229 	if (stat) {
230 		nvkm_warn(subdev, "wimm %08x\n", (u32)stat);
231 		nvkm_wr32(device, 0x611850, stat);
232 	}
233 }
234 
235 static void
gv100_disp_intr_exc_win(struct nv50_disp * disp)236 gv100_disp_intr_exc_win(struct nv50_disp *disp)
237 {
238 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
239 	struct nvkm_device *device = subdev->device;
240 	unsigned long stat = nvkm_rd32(device, 0x61184c);
241 	int wndw;
242 
243 	for_each_set_bit(wndw, &stat, disp->wndw.nr) {
244 		nvkm_wr32(device, 0x61184c, BIT(wndw));
245 		gv100_disp_exception(disp, 1 + wndw);
246 		stat &= ~BIT(wndw);
247 	}
248 
249 	if (stat) {
250 		nvkm_warn(subdev, "wndw %08x\n", (u32)stat);
251 		nvkm_wr32(device, 0x61184c, stat);
252 	}
253 }
254 
255 static void
gv100_disp_intr_head_timing(struct nv50_disp * disp,int head)256 gv100_disp_intr_head_timing(struct nv50_disp *disp, int head)
257 {
258 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
259 	struct nvkm_device *device = subdev->device;
260 	u32 stat = nvkm_rd32(device, 0x611800 + (head * 0x04));
261 
262 	/* LAST_DATA, LOADV. */
263 	if (stat & 0x00000003) {
264 		nvkm_wr32(device, 0x611800 + (head * 0x04), stat & 0x00000003);
265 		stat &= ~0x00000003;
266 	}
267 
268 	if (stat & 0x00000004) {
269 		nvkm_disp_vblank(&disp->base, head);
270 		nvkm_wr32(device, 0x611800 + (head * 0x04), 0x00000004);
271 		stat &= ~0x00000004;
272 	}
273 
274 	if (stat) {
275 		nvkm_warn(subdev, "head %08x\n", stat);
276 		nvkm_wr32(device, 0x611800 + (head * 0x04), stat);
277 	}
278 }
279 
280 void
gv100_disp_intr(struct nv50_disp * disp)281 gv100_disp_intr(struct nv50_disp *disp)
282 {
283 	struct nvkm_subdev *subdev = &disp->base.engine.subdev;
284 	struct nvkm_device *device = subdev->device;
285 	u32 stat = nvkm_rd32(device, 0x611ec0);
286 	unsigned long mask;
287 	int head;
288 
289 	if ((mask = (stat & 0x000000ff))) {
290 		for_each_set_bit(head, &mask, 8) {
291 			gv100_disp_intr_head_timing(disp, head);
292 			stat &= ~BIT(head);
293 		}
294 	}
295 
296 	if (stat & 0x00000200) {
297 		gv100_disp_intr_exc_win(disp);
298 		stat &= ~0x00000200;
299 	}
300 
301 	if (stat & 0x00000400) {
302 		gv100_disp_intr_exc_winim(disp);
303 		stat &= ~0x00000400;
304 	}
305 
306 	if (stat & 0x00000800) {
307 		gv100_disp_intr_exc_other(disp);
308 		stat &= ~0x00000800;
309 	}
310 
311 	if (stat & 0x00001000) {
312 		gv100_disp_intr_ctrl_disp(disp);
313 		stat &= ~0x00001000;
314 	}
315 
316 	if (stat)
317 		nvkm_warn(subdev, "intr %08x\n", stat);
318 }
319 
320 void
gv100_disp_fini(struct nv50_disp * disp)321 gv100_disp_fini(struct nv50_disp *disp)
322 {
323 	struct nvkm_device *device = disp->base.engine.subdev.device;
324 	nvkm_wr32(device, 0x611db0, 0x00000000);
325 }
326 
327 static int
gv100_disp_init(struct nv50_disp * disp)328 gv100_disp_init(struct nv50_disp *disp)
329 {
330 	struct nvkm_device *device = disp->base.engine.subdev.device;
331 	struct nvkm_head *head;
332 	int i, j;
333 	u32 tmp;
334 
335 	/* Claim ownership of display. */
336 	if (nvkm_rd32(device, 0x6254e8) & 0x00000002) {
337 		nvkm_mask(device, 0x6254e8, 0x00000001, 0x00000000);
338 		if (nvkm_msec(device, 2000,
339 			if (!(nvkm_rd32(device, 0x6254e8) & 0x00000002))
340 				break;
341 		) < 0)
342 			return -EBUSY;
343 	}
344 
345 	/* Lock pin capabilities. */
346 	tmp = nvkm_rd32(device, 0x610068);
347 	nvkm_wr32(device, 0x640008, tmp);
348 
349 	/* SOR capabilities. */
350 	for (i = 0; i < disp->sor.nr; i++) {
351 		tmp = nvkm_rd32(device, 0x61c000 + (i * 0x800));
352 		nvkm_mask(device, 0x640000, 0x00000100 << i, 0x00000100 << i);
353 		nvkm_wr32(device, 0x640144 + (i * 0x08), tmp);
354 	}
355 
356 	/* Head capabilities. */
357 	list_for_each_entry(head, &disp->base.head, head) {
358 		const int id = head->id;
359 
360 		/* RG. */
361 		tmp = nvkm_rd32(device, 0x616300 + (id * 0x800));
362 		nvkm_wr32(device, 0x640048 + (id * 0x020), tmp);
363 
364 		/* POSTCOMP. */
365 		for (j = 0; j < 6 * 4; j += 4) {
366 			tmp = nvkm_rd32(device, 0x616100 + (id * 0x800) + j);
367 			nvkm_wr32(device, 0x640030 + (id * 0x20) + j, tmp);
368 		}
369 	}
370 
371 	/* Window capabilities. */
372 	for (i = 0; i < disp->wndw.nr; i++) {
373 		nvkm_mask(device, 0x640004, 1 << i, 1 << i);
374 		for (j = 0; j < 6 * 4; j += 4) {
375 			tmp = nvkm_rd32(device, 0x630050 + (i * 0x800) + j);
376 			nvkm_wr32(device, 0x6401e4 + (i * 0x20) + j, tmp);
377 		}
378 	}
379 
380 	/* IHUB capabilities. */
381 	for (i = 0; i < 4; i++) {
382 		tmp = nvkm_rd32(device, 0x62e000 + (i * 0x04));
383 		nvkm_wr32(device, 0x640010 + (i * 0x04), tmp);
384 	}
385 
386 	nvkm_mask(device, 0x610078, 0x00000001, 0x00000001);
387 
388 	/* Setup instance memory. */
389 	switch (nvkm_memory_target(disp->inst->memory)) {
390 	case NVKM_MEM_TARGET_VRAM: tmp = 0x00000001; break;
391 	case NVKM_MEM_TARGET_NCOH: tmp = 0x00000002; break;
392 	case NVKM_MEM_TARGET_HOST: tmp = 0x00000003; break;
393 	default:
394 		break;
395 	}
396 	nvkm_wr32(device, 0x610010, 0x00000008 | tmp);
397 	nvkm_wr32(device, 0x610014, disp->inst->addr >> 16);
398 
399 	/* CTRL_DISP: AWAKEN, ERROR, SUPERVISOR[1-3]. */
400 	nvkm_wr32(device, 0x611cf0, 0x00000187); /* MSK. */
401 	nvkm_wr32(device, 0x611db0, 0x00000187); /* EN. */
402 
403 	/* EXC_OTHER: CURSn, CORE. */
404 	nvkm_wr32(device, 0x611cec, disp->head.mask << 16 |
405 				    0x00000001); /* MSK. */
406 	nvkm_wr32(device, 0x611dac, 0x00000000); /* EN. */
407 
408 	/* EXC_WINIM. */
409 	nvkm_wr32(device, 0x611ce8, disp->wndw.mask); /* MSK. */
410 	nvkm_wr32(device, 0x611da8, 0x00000000); /* EN. */
411 
412 	/* EXC_WIN. */
413 	nvkm_wr32(device, 0x611ce4, disp->wndw.mask); /* MSK. */
414 	nvkm_wr32(device, 0x611da4, 0x00000000); /* EN. */
415 
416 	/* HEAD_TIMING(n): VBLANK. */
417 	list_for_each_entry(head, &disp->base.head, head) {
418 		const u32 hoff = head->id * 4;
419 		nvkm_wr32(device, 0x611cc0 + hoff, 0x00000004); /* MSK. */
420 		nvkm_wr32(device, 0x611d80 + hoff, 0x00000000); /* EN. */
421 	}
422 
423 	/* OR. */
424 	nvkm_wr32(device, 0x611cf4, 0x00000000); /* MSK. */
425 	nvkm_wr32(device, 0x611db4, 0x00000000); /* EN. */
426 	return 0;
427 }
428 
429 static const struct nv50_disp_func
430 gv100_disp = {
431 	.init = gv100_disp_init,
432 	.fini = gv100_disp_fini,
433 	.intr = gv100_disp_intr,
434 	.uevent = &gv100_disp_chan_uevent,
435 	.super = gv100_disp_super,
436 	.root = &gv100_disp_root_oclass,
437 	.wndw = { .cnt = gv100_disp_wndw_cnt },
438 	.head = { .cnt = gv100_head_cnt, .new = gv100_head_new },
439 	.sor = { .cnt = gv100_sor_cnt, .new = gv100_sor_new },
440 	.ramht_size = 0x2000,
441 };
442 
443 int
gv100_disp_new(struct nvkm_device * device,enum nvkm_subdev_type type,int inst,struct nvkm_disp ** pdisp)444 gv100_disp_new(struct nvkm_device *device, enum nvkm_subdev_type type, int inst,
445 	       struct nvkm_disp **pdisp)
446 {
447 	return nv50_disp_new_(&gv100_disp, device, type, inst, pdisp);
448 }
449