1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Broadcom
4 */
5
6 /**
7 * DOC: VC4 HVS module.
8 *
9 * The Hardware Video Scaler (HVS) is the piece of hardware that does
10 * translation, scaling, colorspace conversion, and compositing of
11 * pixels stored in framebuffers into a FIFO of pixels going out to
12 * the Pixel Valve (CRTC). It operates at the system clock rate (the
13 * system audio clock gate, specifically), which is much higher than
14 * the pixel clock rate.
15 *
16 * There is a single global HVS, with multiple output FIFOs that can
17 * be consumed by the PVs. This file just manages the resources for
18 * the HVS, while the vc4_crtc.c code actually drives HVS setup for
19 * each CRTC.
20 */
21
22 #include <linux/bitfield.h>
23 #include <linux/clk.h>
24 #include <linux/component.h>
25 #include <linux/platform_device.h>
26
27 #include <drm/drm_atomic_helper.h>
28 #include <drm/drm_vblank.h>
29
30 #include "vc4_drv.h"
31 #include "vc4_regs.h"
32
33 static const struct debugfs_reg32 hvs_regs[] = {
34 VC4_REG32(SCALER_DISPCTRL),
35 VC4_REG32(SCALER_DISPSTAT),
36 VC4_REG32(SCALER_DISPID),
37 VC4_REG32(SCALER_DISPECTRL),
38 VC4_REG32(SCALER_DISPPROF),
39 VC4_REG32(SCALER_DISPDITHER),
40 VC4_REG32(SCALER_DISPEOLN),
41 VC4_REG32(SCALER_DISPLIST0),
42 VC4_REG32(SCALER_DISPLIST1),
43 VC4_REG32(SCALER_DISPLIST2),
44 VC4_REG32(SCALER_DISPLSTAT),
45 VC4_REG32(SCALER_DISPLACT0),
46 VC4_REG32(SCALER_DISPLACT1),
47 VC4_REG32(SCALER_DISPLACT2),
48 VC4_REG32(SCALER_DISPCTRL0),
49 VC4_REG32(SCALER_DISPBKGND0),
50 VC4_REG32(SCALER_DISPSTAT0),
51 VC4_REG32(SCALER_DISPBASE0),
52 VC4_REG32(SCALER_DISPCTRL1),
53 VC4_REG32(SCALER_DISPBKGND1),
54 VC4_REG32(SCALER_DISPSTAT1),
55 VC4_REG32(SCALER_DISPBASE1),
56 VC4_REG32(SCALER_DISPCTRL2),
57 VC4_REG32(SCALER_DISPBKGND2),
58 VC4_REG32(SCALER_DISPSTAT2),
59 VC4_REG32(SCALER_DISPBASE2),
60 VC4_REG32(SCALER_DISPALPHA2),
61 VC4_REG32(SCALER_OLEDOFFS),
62 VC4_REG32(SCALER_OLEDCOEF0),
63 VC4_REG32(SCALER_OLEDCOEF1),
64 VC4_REG32(SCALER_OLEDCOEF2),
65 };
66
vc4_hvs_dump_state(struct vc4_hvs * hvs)67 void vc4_hvs_dump_state(struct vc4_hvs *hvs)
68 {
69 struct drm_printer p = drm_info_printer(&hvs->pdev->dev);
70 int i;
71
72 drm_print_regset32(&p, &hvs->regset);
73
74 DRM_INFO("HVS ctx:\n");
75 for (i = 0; i < 64; i += 4) {
76 DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
77 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
78 readl((u32 __iomem *)hvs->dlist + i + 0),
79 readl((u32 __iomem *)hvs->dlist + i + 1),
80 readl((u32 __iomem *)hvs->dlist + i + 2),
81 readl((u32 __iomem *)hvs->dlist + i + 3));
82 }
83 }
84
vc4_hvs_debugfs_underrun(struct seq_file * m,void * data)85 static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
86 {
87 struct drm_info_node *node = m->private;
88 struct drm_device *dev = node->minor->dev;
89 struct vc4_dev *vc4 = to_vc4_dev(dev);
90 struct drm_printer p = drm_seq_file_printer(m);
91
92 drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
93
94 return 0;
95 }
96
97 /* The filter kernel is composed of dwords each containing 3 9-bit
98 * signed integers packed next to each other.
99 */
100 #define VC4_INT_TO_COEFF(coeff) (coeff & 0x1ff)
101 #define VC4_PPF_FILTER_WORD(c0, c1, c2) \
102 ((((c0) & 0x1ff) << 0) | \
103 (((c1) & 0x1ff) << 9) | \
104 (((c2) & 0x1ff) << 18))
105
106 /* The whole filter kernel is arranged as the coefficients 0-16 going
107 * up, then a pad, then 17-31 going down and reversed within the
108 * dwords. This means that a linear phase kernel (where it's
109 * symmetrical at the boundary between 15 and 16) has the last 5
110 * dwords matching the first 5, but reversed.
111 */
112 #define VC4_LINEAR_PHASE_KERNEL(c0, c1, c2, c3, c4, c5, c6, c7, c8, \
113 c9, c10, c11, c12, c13, c14, c15) \
114 {VC4_PPF_FILTER_WORD(c0, c1, c2), \
115 VC4_PPF_FILTER_WORD(c3, c4, c5), \
116 VC4_PPF_FILTER_WORD(c6, c7, c8), \
117 VC4_PPF_FILTER_WORD(c9, c10, c11), \
118 VC4_PPF_FILTER_WORD(c12, c13, c14), \
119 VC4_PPF_FILTER_WORD(c15, c15, 0)}
120
121 #define VC4_LINEAR_PHASE_KERNEL_DWORDS 6
122 #define VC4_KERNEL_DWORDS (VC4_LINEAR_PHASE_KERNEL_DWORDS * 2 - 1)
123
124 /* Recommended B=1/3, C=1/3 filter choice from Mitchell/Netravali.
125 * http://www.cs.utexas.edu/~fussell/courses/cs384g/lectures/mitchell/Mitchell.pdf
126 */
127 static const u32 mitchell_netravali_1_3_1_3_kernel[] =
128 VC4_LINEAR_PHASE_KERNEL(0, -2, -6, -8, -10, -8, -3, 2, 18,
129 50, 82, 119, 155, 187, 213, 227);
130
vc4_hvs_upload_linear_kernel(struct vc4_hvs * hvs,struct drm_mm_node * space,const u32 * kernel)131 static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
132 struct drm_mm_node *space,
133 const u32 *kernel)
134 {
135 int ret, i;
136 u32 __iomem *dst_kernel;
137
138 ret = drm_mm_insert_node(&hvs->dlist_mm, space, VC4_KERNEL_DWORDS);
139 if (ret) {
140 DRM_ERROR("Failed to allocate space for filter kernel: %d\n",
141 ret);
142 return ret;
143 }
144
145 dst_kernel = hvs->dlist + space->start;
146
147 for (i = 0; i < VC4_KERNEL_DWORDS; i++) {
148 if (i < VC4_LINEAR_PHASE_KERNEL_DWORDS)
149 writel(kernel[i], &dst_kernel[i]);
150 else {
151 writel(kernel[VC4_KERNEL_DWORDS - i - 1],
152 &dst_kernel[i]);
153 }
154 }
155
156 return 0;
157 }
158
vc4_hvs_lut_load(struct vc4_hvs * hvs,struct vc4_crtc * vc4_crtc)159 static void vc4_hvs_lut_load(struct vc4_hvs *hvs,
160 struct vc4_crtc *vc4_crtc)
161 {
162 struct drm_crtc *crtc = &vc4_crtc->base;
163 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
164 u32 i;
165
166 /* The LUT memory is laid out with each HVS channel in order,
167 * each of which takes 256 writes for R, 256 for G, then 256
168 * for B.
169 */
170 HVS_WRITE(SCALER_GAMADDR,
171 SCALER_GAMADDR_AUTOINC |
172 (vc4_state->assigned_channel * 3 * crtc->gamma_size));
173
174 for (i = 0; i < crtc->gamma_size; i++)
175 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_r[i]);
176 for (i = 0; i < crtc->gamma_size; i++)
177 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_g[i]);
178 for (i = 0; i < crtc->gamma_size; i++)
179 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
180 }
181
vc4_hvs_update_gamma_lut(struct vc4_hvs * hvs,struct vc4_crtc * vc4_crtc)182 static void vc4_hvs_update_gamma_lut(struct vc4_hvs *hvs,
183 struct vc4_crtc *vc4_crtc)
184 {
185 struct drm_crtc_state *crtc_state = vc4_crtc->base.state;
186 struct drm_color_lut *lut = crtc_state->gamma_lut->data;
187 u32 length = drm_color_lut_size(crtc_state->gamma_lut);
188 u32 i;
189
190 for (i = 0; i < length; i++) {
191 vc4_crtc->lut_r[i] = drm_color_lut_extract(lut[i].red, 8);
192 vc4_crtc->lut_g[i] = drm_color_lut_extract(lut[i].green, 8);
193 vc4_crtc->lut_b[i] = drm_color_lut_extract(lut[i].blue, 8);
194 }
195
196 vc4_hvs_lut_load(hvs, vc4_crtc);
197 }
198
vc4_hvs_get_fifo_frame_count(struct vc4_hvs * hvs,unsigned int fifo)199 u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo)
200 {
201 u8 field = 0;
202
203 switch (fifo) {
204 case 0:
205 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
206 SCALER_DISPSTAT1_FRCNT0);
207 break;
208 case 1:
209 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT1),
210 SCALER_DISPSTAT1_FRCNT1);
211 break;
212 case 2:
213 field = VC4_GET_FIELD(HVS_READ(SCALER_DISPSTAT2),
214 SCALER_DISPSTAT2_FRCNT2);
215 break;
216 }
217
218 return field;
219 }
220
vc4_hvs_get_fifo_from_output(struct vc4_hvs * hvs,unsigned int output)221 int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output)
222 {
223 struct vc4_dev *vc4 = hvs->vc4;
224 u32 reg;
225 int ret;
226
227 if (!vc4->is_vc5)
228 return output;
229
230 switch (output) {
231 case 0:
232 return 0;
233
234 case 1:
235 return 1;
236
237 case 2:
238 reg = HVS_READ(SCALER_DISPECTRL);
239 ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg);
240 if (ret == 0)
241 return 2;
242
243 return 0;
244
245 case 3:
246 reg = HVS_READ(SCALER_DISPCTRL);
247 ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg);
248 if (ret == 3)
249 return -EPIPE;
250
251 return ret;
252
253 case 4:
254 reg = HVS_READ(SCALER_DISPEOLN);
255 ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg);
256 if (ret == 3)
257 return -EPIPE;
258
259 return ret;
260
261 case 5:
262 reg = HVS_READ(SCALER_DISPDITHER);
263 ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg);
264 if (ret == 3)
265 return -EPIPE;
266
267 return ret;
268
269 default:
270 return -EPIPE;
271 }
272 }
273
vc4_hvs_init_channel(struct vc4_hvs * hvs,struct drm_crtc * crtc,struct drm_display_mode * mode,bool oneshot)274 static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc,
275 struct drm_display_mode *mode, bool oneshot)
276 {
277 struct vc4_dev *vc4 = hvs->vc4;
278 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
279 struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state);
280 unsigned int chan = vc4_crtc_state->assigned_channel;
281 bool interlace = mode->flags & DRM_MODE_FLAG_INTERLACE;
282 u32 dispbkgndx;
283 u32 dispctrl;
284
285 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
286 HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET);
287 HVS_WRITE(SCALER_DISPCTRLX(chan), 0);
288
289 /* Turn on the scaler, which will wait for vstart to start
290 * compositing.
291 * When feeding the transposer, we should operate in oneshot
292 * mode.
293 */
294 dispctrl = SCALER_DISPCTRLX_ENABLE;
295
296 if (!vc4->is_vc5)
297 dispctrl |= VC4_SET_FIELD(mode->hdisplay,
298 SCALER_DISPCTRLX_WIDTH) |
299 VC4_SET_FIELD(mode->vdisplay,
300 SCALER_DISPCTRLX_HEIGHT) |
301 (oneshot ? SCALER_DISPCTRLX_ONESHOT : 0);
302 else
303 dispctrl |= VC4_SET_FIELD(mode->hdisplay,
304 SCALER5_DISPCTRLX_WIDTH) |
305 VC4_SET_FIELD(mode->vdisplay,
306 SCALER5_DISPCTRLX_HEIGHT) |
307 (oneshot ? SCALER5_DISPCTRLX_ONESHOT : 0);
308
309 HVS_WRITE(SCALER_DISPCTRLX(chan), dispctrl);
310
311 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
312 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
313 dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE;
314
315 HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx |
316 SCALER_DISPBKGND_AUTOHS |
317 ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) |
318 (interlace ? SCALER_DISPBKGND_INTERLACE : 0));
319
320 /* Reload the LUT, since the SRAMs would have been disabled if
321 * all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
322 */
323 vc4_hvs_lut_load(hvs, vc4_crtc);
324
325 return 0;
326 }
327
vc4_hvs_stop_channel(struct vc4_hvs * hvs,unsigned int chan)328 void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan)
329 {
330 if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)
331 return;
332
333 HVS_WRITE(SCALER_DISPCTRLX(chan),
334 HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET);
335 HVS_WRITE(SCALER_DISPCTRLX(chan),
336 HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE);
337
338 /* Once we leave, the scaler should be disabled and its fifo empty. */
339 WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET);
340
341 WARN_ON_ONCE(VC4_GET_FIELD(HVS_READ(SCALER_DISPSTATX(chan)),
342 SCALER_DISPSTATX_MODE) !=
343 SCALER_DISPSTATX_MODE_DISABLED);
344
345 WARN_ON_ONCE((HVS_READ(SCALER_DISPSTATX(chan)) &
346 (SCALER_DISPSTATX_FULL | SCALER_DISPSTATX_EMPTY)) !=
347 SCALER_DISPSTATX_EMPTY);
348 }
349
vc4_hvs_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)350 int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
351 {
352 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
353 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
354 struct drm_device *dev = crtc->dev;
355 struct vc4_dev *vc4 = to_vc4_dev(dev);
356 struct drm_plane *plane;
357 unsigned long flags;
358 const struct drm_plane_state *plane_state;
359 u32 dlist_count = 0;
360 int ret;
361
362 /* The pixelvalve can only feed one encoder (and encoders are
363 * 1:1 with connectors.)
364 */
365 if (hweight32(crtc_state->connector_mask) > 1)
366 return -EINVAL;
367
368 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state)
369 dlist_count += vc4_plane_dlist_size(plane_state);
370
371 dlist_count++; /* Account for SCALER_CTL0_END. */
372
373 spin_lock_irqsave(&vc4->hvs->mm_lock, flags);
374 ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm,
375 dlist_count);
376 spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags);
377 if (ret)
378 return ret;
379
380 return 0;
381 }
382
vc4_hvs_install_dlist(struct drm_crtc * crtc)383 static void vc4_hvs_install_dlist(struct drm_crtc *crtc)
384 {
385 struct drm_device *dev = crtc->dev;
386 struct vc4_dev *vc4 = to_vc4_dev(dev);
387 struct vc4_hvs *hvs = vc4->hvs;
388 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
389
390 HVS_WRITE(SCALER_DISPLISTX(vc4_state->assigned_channel),
391 vc4_state->mm.start);
392 }
393
vc4_hvs_update_dlist(struct drm_crtc * crtc)394 static void vc4_hvs_update_dlist(struct drm_crtc *crtc)
395 {
396 struct drm_device *dev = crtc->dev;
397 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
398 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
399 unsigned long flags;
400
401 if (crtc->state->event) {
402 crtc->state->event->pipe = drm_crtc_index(crtc);
403
404 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
405
406 spin_lock_irqsave(&dev->event_lock, flags);
407
408 if (!vc4_crtc->feeds_txp || vc4_state->txp_armed) {
409 vc4_crtc->event = crtc->state->event;
410 crtc->state->event = NULL;
411 }
412
413 spin_unlock_irqrestore(&dev->event_lock, flags);
414 }
415
416 spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
417 vc4_crtc->current_dlist = vc4_state->mm.start;
418 spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
419 }
420
vc4_hvs_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)421 void vc4_hvs_atomic_begin(struct drm_crtc *crtc,
422 struct drm_atomic_state *state)
423 {
424 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
425 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
426 unsigned long flags;
427
428 spin_lock_irqsave(&vc4_crtc->irq_lock, flags);
429 vc4_crtc->current_hvs_channel = vc4_state->assigned_channel;
430 spin_unlock_irqrestore(&vc4_crtc->irq_lock, flags);
431 }
432
vc4_hvs_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)433 void vc4_hvs_atomic_enable(struct drm_crtc *crtc,
434 struct drm_atomic_state *state)
435 {
436 struct drm_device *dev = crtc->dev;
437 struct vc4_dev *vc4 = to_vc4_dev(dev);
438 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
439 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
440 bool oneshot = vc4_crtc->feeds_txp;
441
442 vc4_hvs_install_dlist(crtc);
443 vc4_hvs_update_dlist(crtc);
444 vc4_hvs_init_channel(vc4->hvs, crtc, mode, oneshot);
445 }
446
vc4_hvs_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)447 void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
448 struct drm_atomic_state *state)
449 {
450 struct drm_device *dev = crtc->dev;
451 struct vc4_dev *vc4 = to_vc4_dev(dev);
452 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc);
453 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state);
454 unsigned int chan = vc4_state->assigned_channel;
455
456 vc4_hvs_stop_channel(vc4->hvs, chan);
457 }
458
vc4_hvs_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)459 void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
460 struct drm_atomic_state *state)
461 {
462 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
463 crtc);
464 struct drm_device *dev = crtc->dev;
465 struct vc4_dev *vc4 = to_vc4_dev(dev);
466 struct vc4_hvs *hvs = vc4->hvs;
467 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
468 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
469 unsigned int channel = vc4_state->assigned_channel;
470 struct drm_plane *plane;
471 struct vc4_plane_state *vc4_plane_state;
472 bool debug_dump_regs = false;
473 bool enable_bg_fill = false;
474 u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
475 u32 __iomem *dlist_next = dlist_start;
476
477 if (debug_dump_regs) {
478 DRM_INFO("CRTC %d HVS before:\n", drm_crtc_index(crtc));
479 vc4_hvs_dump_state(hvs);
480 }
481
482 /* Copy all the active planes' dlist contents to the hardware dlist. */
483 drm_atomic_crtc_for_each_plane(plane, crtc) {
484 /* Is this the first active plane? */
485 if (dlist_next == dlist_start) {
486 /* We need to enable background fill when a plane
487 * could be alpha blending from the background, i.e.
488 * where no other plane is underneath. It suffices to
489 * consider the first active plane here since we set
490 * needs_bg_fill such that either the first plane
491 * already needs it or all planes on top blend from
492 * the first or a lower plane.
493 */
494 vc4_plane_state = to_vc4_plane_state(plane->state);
495 enable_bg_fill = vc4_plane_state->needs_bg_fill;
496 }
497
498 dlist_next += vc4_plane_write_dlist(plane, dlist_next);
499 }
500
501 writel(SCALER_CTL0_END, dlist_next);
502 dlist_next++;
503
504 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
505
506 if (enable_bg_fill)
507 /* This sets a black background color fill, as is the case
508 * with other DRM drivers.
509 */
510 HVS_WRITE(SCALER_DISPBKGNDX(channel),
511 HVS_READ(SCALER_DISPBKGNDX(channel)) |
512 SCALER_DISPBKGND_FILL);
513
514 /* Only update DISPLIST if the CRTC was already running and is not
515 * being disabled.
516 * vc4_crtc_enable() takes care of updating the dlist just after
517 * re-enabling VBLANK interrupts and before enabling the engine.
518 * If the CRTC is being disabled, there's no point in updating this
519 * information.
520 */
521 if (crtc->state->active && old_state->active) {
522 vc4_hvs_install_dlist(crtc);
523 vc4_hvs_update_dlist(crtc);
524 }
525
526 if (crtc->state->color_mgmt_changed) {
527 u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
528
529 if (crtc->state->gamma_lut) {
530 vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
531 dispbkgndx |= SCALER_DISPBKGND_GAMMA;
532 } else {
533 /* Unsetting DISPBKGND_GAMMA skips the gamma lut step
534 * in hardware, which is the same as a linear lut that
535 * DRM expects us to use in absence of a user lut.
536 */
537 dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
538 }
539 HVS_WRITE(SCALER_DISPBKGNDX(channel), dispbkgndx);
540 }
541
542 if (debug_dump_regs) {
543 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
544 vc4_hvs_dump_state(hvs);
545 }
546 }
547
vc4_hvs_mask_underrun(struct vc4_hvs * hvs,int channel)548 void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
549 {
550 u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
551
552 dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
553
554 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
555 }
556
vc4_hvs_unmask_underrun(struct vc4_hvs * hvs,int channel)557 void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
558 {
559 u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
560
561 dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
562
563 HVS_WRITE(SCALER_DISPSTAT,
564 SCALER_DISPSTAT_EUFLOW(channel));
565 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
566 }
567
vc4_hvs_report_underrun(struct drm_device * dev)568 static void vc4_hvs_report_underrun(struct drm_device *dev)
569 {
570 struct vc4_dev *vc4 = to_vc4_dev(dev);
571
572 atomic_inc(&vc4->underrun);
573 DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
574 }
575
vc4_hvs_irq_handler(int irq,void * data)576 static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
577 {
578 struct drm_device *dev = data;
579 struct vc4_dev *vc4 = to_vc4_dev(dev);
580 struct vc4_hvs *hvs = vc4->hvs;
581 irqreturn_t irqret = IRQ_NONE;
582 int channel;
583 u32 control;
584 u32 status;
585
586 status = HVS_READ(SCALER_DISPSTAT);
587 control = HVS_READ(SCALER_DISPCTRL);
588
589 for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
590 /* Interrupt masking is not always honored, so check it here. */
591 if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
592 control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
593 vc4_hvs_mask_underrun(hvs, channel);
594 vc4_hvs_report_underrun(dev);
595
596 irqret = IRQ_HANDLED;
597 }
598 }
599
600 /* Clear every per-channel interrupt flag. */
601 HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
602 SCALER_DISPSTAT_IRQMASK(1) |
603 SCALER_DISPSTAT_IRQMASK(2));
604
605 return irqret;
606 }
607
vc4_hvs_bind(struct device * dev,struct device * master,void * data)608 static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
609 {
610 struct platform_device *pdev = to_platform_device(dev);
611 struct drm_device *drm = dev_get_drvdata(master);
612 struct vc4_dev *vc4 = to_vc4_dev(drm);
613 struct vc4_hvs *hvs = NULL;
614 int ret;
615 u32 dispctrl;
616 u32 reg;
617
618 hvs = devm_kzalloc(&pdev->dev, sizeof(*hvs), GFP_KERNEL);
619 if (!hvs)
620 return -ENOMEM;
621
622 hvs->vc4 = vc4;
623 hvs->pdev = pdev;
624
625 hvs->regs = vc4_ioremap_regs(pdev, 0);
626 if (IS_ERR(hvs->regs))
627 return PTR_ERR(hvs->regs);
628
629 hvs->regset.base = hvs->regs;
630 hvs->regset.regs = hvs_regs;
631 hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
632
633 if (vc4->is_vc5) {
634 hvs->core_clk = devm_clk_get(&pdev->dev, NULL);
635 if (IS_ERR(hvs->core_clk)) {
636 dev_err(&pdev->dev, "Couldn't get core clock\n");
637 return PTR_ERR(hvs->core_clk);
638 }
639
640 ret = clk_prepare_enable(hvs->core_clk);
641 if (ret) {
642 dev_err(&pdev->dev, "Couldn't enable the core clock\n");
643 return ret;
644 }
645 }
646
647 if (!vc4->is_vc5)
648 hvs->dlist = hvs->regs + SCALER_DLIST_START;
649 else
650 hvs->dlist = hvs->regs + SCALER5_DLIST_START;
651
652 spin_lock_init(&hvs->mm_lock);
653
654 /* Set up the HVS display list memory manager. We never
655 * overwrite the setup from the bootloader (just 128b out of
656 * our 16K), since we don't want to scramble the screen when
657 * transitioning from the firmware's boot setup to runtime.
658 */
659 drm_mm_init(&hvs->dlist_mm,
660 HVS_BOOTLOADER_DLIST_END,
661 (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END);
662
663 /* Set up the HVS LBM memory manager. We could have some more
664 * complicated data structure that allowed reuse of LBM areas
665 * between planes when they don't overlap on the screen, but
666 * for now we just allocate globally.
667 */
668 if (!vc4->is_vc5)
669 /* 48k words of 2x12-bit pixels */
670 drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
671 else
672 /* 60k words of 4x12-bit pixels */
673 drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024);
674
675 /* Upload filter kernels. We only have the one for now, so we
676 * keep it around for the lifetime of the driver.
677 */
678 ret = vc4_hvs_upload_linear_kernel(hvs,
679 &hvs->mitchell_netravali_filter,
680 mitchell_netravali_1_3_1_3_kernel);
681 if (ret)
682 return ret;
683
684 vc4->hvs = hvs;
685
686 reg = HVS_READ(SCALER_DISPECTRL);
687 reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK;
688 HVS_WRITE(SCALER_DISPECTRL,
689 reg | VC4_SET_FIELD(0, SCALER_DISPECTRL_DSP2_MUX));
690
691 reg = HVS_READ(SCALER_DISPCTRL);
692 reg &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
693 HVS_WRITE(SCALER_DISPCTRL,
694 reg | VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX));
695
696 reg = HVS_READ(SCALER_DISPEOLN);
697 reg &= ~SCALER_DISPEOLN_DSP4_MUX_MASK;
698 HVS_WRITE(SCALER_DISPEOLN,
699 reg | VC4_SET_FIELD(3, SCALER_DISPEOLN_DSP4_MUX));
700
701 reg = HVS_READ(SCALER_DISPDITHER);
702 reg &= ~SCALER_DISPDITHER_DSP5_MUX_MASK;
703 HVS_WRITE(SCALER_DISPDITHER,
704 reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX));
705
706 dispctrl = HVS_READ(SCALER_DISPCTRL);
707
708 dispctrl |= SCALER_DISPCTRL_ENABLE;
709 dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
710 SCALER_DISPCTRL_DISPEIRQ(1) |
711 SCALER_DISPCTRL_DISPEIRQ(2);
712
713 dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
714 SCALER_DISPCTRL_SLVWREIRQ |
715 SCALER_DISPCTRL_SLVRDEIRQ |
716 SCALER_DISPCTRL_DSPEIEOF(0) |
717 SCALER_DISPCTRL_DSPEIEOF(1) |
718 SCALER_DISPCTRL_DSPEIEOF(2) |
719 SCALER_DISPCTRL_DSPEIEOLN(0) |
720 SCALER_DISPCTRL_DSPEIEOLN(1) |
721 SCALER_DISPCTRL_DSPEIEOLN(2) |
722 SCALER_DISPCTRL_DSPEISLUR(0) |
723 SCALER_DISPCTRL_DSPEISLUR(1) |
724 SCALER_DISPCTRL_DSPEISLUR(2) |
725 SCALER_DISPCTRL_SCLEIRQ);
726
727 HVS_WRITE(SCALER_DISPCTRL, dispctrl);
728
729 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
730 vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
731 if (ret)
732 return ret;
733
734 vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
735 vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
736 NULL);
737
738 return 0;
739 }
740
vc4_hvs_unbind(struct device * dev,struct device * master,void * data)741 static void vc4_hvs_unbind(struct device *dev, struct device *master,
742 void *data)
743 {
744 struct drm_device *drm = dev_get_drvdata(master);
745 struct vc4_dev *vc4 = to_vc4_dev(drm);
746 struct vc4_hvs *hvs = vc4->hvs;
747
748 if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter))
749 drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter);
750
751 drm_mm_takedown(&vc4->hvs->dlist_mm);
752 drm_mm_takedown(&vc4->hvs->lbm_mm);
753
754 clk_disable_unprepare(hvs->core_clk);
755
756 vc4->hvs = NULL;
757 }
758
759 static const struct component_ops vc4_hvs_ops = {
760 .bind = vc4_hvs_bind,
761 .unbind = vc4_hvs_unbind,
762 };
763
vc4_hvs_dev_probe(struct platform_device * pdev)764 static int vc4_hvs_dev_probe(struct platform_device *pdev)
765 {
766 return component_add(&pdev->dev, &vc4_hvs_ops);
767 }
768
vc4_hvs_dev_remove(struct platform_device * pdev)769 static int vc4_hvs_dev_remove(struct platform_device *pdev)
770 {
771 component_del(&pdev->dev, &vc4_hvs_ops);
772 return 0;
773 }
774
775 static const struct of_device_id vc4_hvs_dt_match[] = {
776 { .compatible = "brcm,bcm2711-hvs" },
777 { .compatible = "brcm,bcm2835-hvs" },
778 {}
779 };
780
781 struct platform_driver vc4_hvs_driver = {
782 .probe = vc4_hvs_dev_probe,
783 .remove = vc4_hvs_dev_remove,
784 .driver = {
785 .name = "vc4_hvs",
786 .of_match_table = vc4_hvs_dt_match,
787 },
788 };
789