1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2015 Free Electrons
4 * Copyright (C) 2015 NextThing Co
5 *
6 * Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9 #include <linux/component.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/of_device.h>
13 #include <linux/of_graph.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/platform_device.h>
16 #include <linux/reset.h>
17
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_crtc.h>
21 #include <drm/drm_fb_cma_helper.h>
22 #include <drm/drm_fourcc.h>
23 #include <drm/drm_gem_cma_helper.h>
24 #include <drm/drm_plane_helper.h>
25 #include <drm/drm_probe_helper.h>
26
27 #include "sun4i_backend.h"
28 #include "sun4i_drv.h"
29 #include "sun4i_frontend.h"
30 #include "sun4i_layer.h"
31 #include "sunxi_engine.h"
32
33 struct sun4i_backend_quirks {
34 /* backend <-> TCON muxing selection done in backend */
35 bool needs_output_muxing;
36
37 /* alpha at the lowest z position is not always supported */
38 bool supports_lowest_plane_alpha;
39 };
40
41 static const u32 sunxi_rgb2yuv_coef[12] = {
42 0x00000107, 0x00000204, 0x00000064, 0x00000108,
43 0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
44 0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
45 };
46
sun4i_backend_apply_color_correction(struct sunxi_engine * engine)47 static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
48 {
49 int i;
50
51 DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
52
53 /* Set color correction */
54 regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
55 SUN4I_BACKEND_OCCTL_ENABLE);
56
57 for (i = 0; i < 12; i++)
58 regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
59 sunxi_rgb2yuv_coef[i]);
60 }
61
sun4i_backend_disable_color_correction(struct sunxi_engine * engine)62 static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
63 {
64 DRM_DEBUG_DRIVER("Disabling color correction\n");
65
66 /* Disable color correction */
67 regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
68 SUN4I_BACKEND_OCCTL_ENABLE, 0);
69 }
70
sun4i_backend_commit(struct sunxi_engine * engine)71 static void sun4i_backend_commit(struct sunxi_engine *engine)
72 {
73 DRM_DEBUG_DRIVER("Committing changes\n");
74
75 regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
76 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
77 SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
78 }
79
sun4i_backend_layer_enable(struct sun4i_backend * backend,int layer,bool enable)80 void sun4i_backend_layer_enable(struct sun4i_backend *backend,
81 int layer, bool enable)
82 {
83 u32 val;
84
85 DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
86 layer);
87
88 if (enable)
89 val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
90 else
91 val = 0;
92
93 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
94 SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
95 }
96
sun4i_backend_drm_format_to_layer(u32 format,u32 * mode)97 static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
98 {
99 switch (format) {
100 case DRM_FORMAT_ARGB8888:
101 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
102 break;
103
104 case DRM_FORMAT_ARGB4444:
105 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
106 break;
107
108 case DRM_FORMAT_ARGB1555:
109 *mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
110 break;
111
112 case DRM_FORMAT_RGBA5551:
113 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
114 break;
115
116 case DRM_FORMAT_RGBA4444:
117 *mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
118 break;
119
120 case DRM_FORMAT_XRGB8888:
121 *mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
122 break;
123
124 case DRM_FORMAT_RGB888:
125 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
126 break;
127
128 case DRM_FORMAT_RGB565:
129 *mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
130 break;
131
132 default:
133 return -EINVAL;
134 }
135
136 return 0;
137 }
138
139 static const uint32_t sun4i_backend_formats[] = {
140 DRM_FORMAT_ARGB1555,
141 DRM_FORMAT_ARGB4444,
142 DRM_FORMAT_ARGB8888,
143 DRM_FORMAT_RGB565,
144 DRM_FORMAT_RGB888,
145 DRM_FORMAT_RGBA4444,
146 DRM_FORMAT_RGBA5551,
147 DRM_FORMAT_UYVY,
148 DRM_FORMAT_VYUY,
149 DRM_FORMAT_XRGB8888,
150 DRM_FORMAT_YUYV,
151 DRM_FORMAT_YVYU,
152 };
153
sun4i_backend_format_is_supported(uint32_t fmt,uint64_t modifier)154 bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
155 {
156 unsigned int i;
157
158 if (modifier != DRM_FORMAT_MOD_LINEAR)
159 return false;
160
161 for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
162 if (sun4i_backend_formats[i] == fmt)
163 return true;
164
165 return false;
166 }
167
sun4i_backend_update_layer_coord(struct sun4i_backend * backend,int layer,struct drm_plane * plane)168 int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
169 int layer, struct drm_plane *plane)
170 {
171 struct drm_plane_state *state = plane->state;
172
173 DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
174
175 /* Set height and width */
176 DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
177 state->crtc_w, state->crtc_h);
178 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
179 SUN4I_BACKEND_LAYSIZE(state->crtc_w,
180 state->crtc_h));
181
182 /* Set base coordinates */
183 DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
184 state->crtc_x, state->crtc_y);
185 regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
186 SUN4I_BACKEND_LAYCOOR(state->crtc_x,
187 state->crtc_y));
188
189 return 0;
190 }
191
sun4i_backend_update_yuv_format(struct sun4i_backend * backend,int layer,struct drm_plane * plane)192 static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
193 int layer, struct drm_plane *plane)
194 {
195 struct drm_plane_state *state = plane->state;
196 struct drm_framebuffer *fb = state->fb;
197 const struct drm_format_info *format = fb->format;
198 const uint32_t fmt = format->format;
199 u32 val = SUN4I_BACKEND_IYUVCTL_EN;
200 int i;
201
202 for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
203 regmap_write(backend->engine.regs,
204 SUN4I_BACKEND_YGCOEF_REG(i),
205 sunxi_bt601_yuv2rgb_coef[i]);
206
207 /*
208 * We should do that only for a single plane, but the
209 * framebuffer's atomic_check has our back on this.
210 */
211 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
212 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
213 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
214
215 /* TODO: Add support for the multi-planar YUV formats */
216 if (drm_format_info_is_yuv_packed(format) &&
217 drm_format_info_is_yuv_sampling_422(format))
218 val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
219 else
220 DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
221
222 /*
223 * Allwinner seems to list the pixel sequence from right to left, while
224 * DRM lists it from left to right.
225 */
226 switch (fmt) {
227 case DRM_FORMAT_YUYV:
228 val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
229 break;
230 case DRM_FORMAT_YVYU:
231 val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
232 break;
233 case DRM_FORMAT_UYVY:
234 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
235 break;
236 case DRM_FORMAT_VYUY:
237 val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
238 break;
239 default:
240 DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
241 fmt);
242 }
243
244 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
245
246 return 0;
247 }
248
sun4i_backend_update_layer_formats(struct sun4i_backend * backend,int layer,struct drm_plane * plane)249 int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
250 int layer, struct drm_plane *plane)
251 {
252 struct drm_plane_state *state = plane->state;
253 struct drm_framebuffer *fb = state->fb;
254 u32 val;
255 int ret;
256
257 /* Clear the YUV mode */
258 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
259 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
260
261 val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
262 if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
263 val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
264 regmap_update_bits(backend->engine.regs,
265 SUN4I_BACKEND_ATTCTL_REG0(layer),
266 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
267 SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
268 val);
269
270 if (fb->format->is_yuv)
271 return sun4i_backend_update_yuv_format(backend, layer, plane);
272
273 ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
274 if (ret) {
275 DRM_DEBUG_DRIVER("Invalid format\n");
276 return ret;
277 }
278
279 regmap_update_bits(backend->engine.regs,
280 SUN4I_BACKEND_ATTCTL_REG1(layer),
281 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
282
283 return 0;
284 }
285
sun4i_backend_update_layer_frontend(struct sun4i_backend * backend,int layer,uint32_t fmt)286 int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
287 int layer, uint32_t fmt)
288 {
289 u32 val;
290 int ret;
291
292 ret = sun4i_backend_drm_format_to_layer(fmt, &val);
293 if (ret) {
294 DRM_DEBUG_DRIVER("Invalid format\n");
295 return ret;
296 }
297
298 regmap_update_bits(backend->engine.regs,
299 SUN4I_BACKEND_ATTCTL_REG0(layer),
300 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
301 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
302
303 regmap_update_bits(backend->engine.regs,
304 SUN4I_BACKEND_ATTCTL_REG1(layer),
305 SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
306
307 return 0;
308 }
309
sun4i_backend_update_yuv_buffer(struct sun4i_backend * backend,struct drm_framebuffer * fb,dma_addr_t paddr)310 static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
311 struct drm_framebuffer *fb,
312 dma_addr_t paddr)
313 {
314 /* TODO: Add support for the multi-planar YUV formats */
315 DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
316 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
317
318 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
319 regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
320 fb->pitches[0] * 8);
321
322 return 0;
323 }
324
sun4i_backend_update_layer_buffer(struct sun4i_backend * backend,int layer,struct drm_plane * plane)325 int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
326 int layer, struct drm_plane *plane)
327 {
328 struct drm_plane_state *state = plane->state;
329 struct drm_framebuffer *fb = state->fb;
330 u32 lo_paddr, hi_paddr;
331 dma_addr_t paddr;
332
333 /* Set the line width */
334 DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
335 regmap_write(backend->engine.regs,
336 SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
337 fb->pitches[0] * 8);
338
339 /* Get the start of the displayed memory */
340 paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
341 DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
342
343 if (fb->format->is_yuv)
344 return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
345
346 /* Write the 32 lower bits of the address (in bits) */
347 lo_paddr = paddr << 3;
348 DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
349 regmap_write(backend->engine.regs,
350 SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
351 lo_paddr);
352
353 /* And the upper bits */
354 hi_paddr = paddr >> 29;
355 DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
356 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
357 SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
358 SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
359
360 return 0;
361 }
362
sun4i_backend_update_layer_zpos(struct sun4i_backend * backend,int layer,struct drm_plane * plane)363 int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
364 struct drm_plane *plane)
365 {
366 struct drm_plane_state *state = plane->state;
367 struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
368 unsigned int priority = state->normalized_zpos;
369 unsigned int pipe = p_state->pipe;
370
371 DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
372 layer, priority, pipe);
373 regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
374 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
375 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
376 SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
377 SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
378
379 return 0;
380 }
381
sun4i_backend_cleanup_layer(struct sun4i_backend * backend,int layer)382 void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
383 int layer)
384 {
385 regmap_update_bits(backend->engine.regs,
386 SUN4I_BACKEND_ATTCTL_REG0(layer),
387 SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
388 SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
389 }
390
sun4i_backend_plane_uses_scaler(struct drm_plane_state * state)391 static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
392 {
393 u16 src_h = state->src_h >> 16;
394 u16 src_w = state->src_w >> 16;
395
396 DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
397 src_w, src_h, state->crtc_w, state->crtc_h);
398
399 if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
400 return true;
401
402 return false;
403 }
404
sun4i_backend_plane_uses_frontend(struct drm_plane_state * state)405 static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
406 {
407 struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
408 struct sun4i_backend *backend = layer->backend;
409 uint32_t format = state->fb->format->format;
410 uint64_t modifier = state->fb->modifier;
411
412 if (IS_ERR(backend->frontend))
413 return false;
414
415 if (!sun4i_frontend_format_is_supported(format, modifier))
416 return false;
417
418 if (!sun4i_backend_format_is_supported(format, modifier))
419 return true;
420
421 /*
422 * TODO: The backend alone allows 2x and 4x integer scaling, including
423 * support for an alpha component (which the frontend doesn't support).
424 * Use the backend directly instead of the frontend in this case, with
425 * another test to return false.
426 */
427
428 if (sun4i_backend_plane_uses_scaler(state))
429 return true;
430
431 /*
432 * Here the format is supported by both the frontend and the backend
433 * and no frontend scaling is required, so use the backend directly.
434 */
435 return false;
436 }
437
sun4i_backend_plane_is_supported(struct drm_plane_state * state,bool * uses_frontend)438 static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
439 bool *uses_frontend)
440 {
441 if (sun4i_backend_plane_uses_frontend(state)) {
442 *uses_frontend = true;
443 return true;
444 }
445
446 *uses_frontend = false;
447
448 /* Scaling is not supported without the frontend. */
449 if (sun4i_backend_plane_uses_scaler(state))
450 return false;
451
452 return true;
453 }
454
sun4i_backend_atomic_begin(struct sunxi_engine * engine,struct drm_crtc_state * old_state)455 static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
456 struct drm_crtc_state *old_state)
457 {
458 u32 val;
459
460 WARN_ON(regmap_read_poll_timeout(engine->regs,
461 SUN4I_BACKEND_REGBUFFCTL_REG,
462 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
463 100, 50000));
464 }
465
sun4i_backend_atomic_check(struct sunxi_engine * engine,struct drm_crtc_state * crtc_state)466 static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
467 struct drm_crtc_state *crtc_state)
468 {
469 struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
470 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
471 struct drm_atomic_state *state = crtc_state->state;
472 struct drm_device *drm = state->dev;
473 struct drm_plane *plane;
474 unsigned int num_planes = 0;
475 unsigned int num_alpha_planes = 0;
476 unsigned int num_frontend_planes = 0;
477 unsigned int num_alpha_planes_max = 1;
478 unsigned int num_yuv_planes = 0;
479 unsigned int current_pipe = 0;
480 unsigned int i;
481
482 DRM_DEBUG_DRIVER("Starting checking our planes\n");
483
484 if (!crtc_state->planes_changed)
485 return 0;
486
487 drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
488 struct drm_plane_state *plane_state =
489 drm_atomic_get_plane_state(state, plane);
490 struct sun4i_layer_state *layer_state =
491 state_to_sun4i_layer_state(plane_state);
492 struct drm_framebuffer *fb = plane_state->fb;
493
494 if (!sun4i_backend_plane_is_supported(plane_state,
495 &layer_state->uses_frontend))
496 return -EINVAL;
497
498 if (layer_state->uses_frontend) {
499 DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
500 plane->index);
501 num_frontend_planes++;
502 } else {
503 if (fb->format->is_yuv) {
504 DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
505 num_yuv_planes++;
506 }
507 }
508
509 DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n",
510 &fb->format->format);
511 if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
512 num_alpha_planes++;
513
514 DRM_DEBUG_DRIVER("Plane zpos is %d\n",
515 plane_state->normalized_zpos);
516
517 /* Sort our planes by Zpos */
518 plane_states[plane_state->normalized_zpos] = plane_state;
519
520 num_planes++;
521 }
522
523 /* All our planes were disabled, bail out */
524 if (!num_planes)
525 return 0;
526
527 /*
528 * The hardware is a bit unusual here.
529 *
530 * Even though it supports 4 layers, it does the composition
531 * in two separate steps.
532 *
533 * The first one is assigning a layer to one of its two
534 * pipes. If more that 1 layer is assigned to the same pipe,
535 * and if pixels overlaps, the pipe will take the pixel from
536 * the layer with the highest priority.
537 *
538 * The second step is the actual alpha blending, that takes
539 * the two pipes as input, and uses the potential alpha
540 * component to do the transparency between the two.
541 *
542 * This two-step scenario makes us unable to guarantee a
543 * robust alpha blending between the 4 layers in all
544 * situations, since this means that we need to have one layer
545 * with alpha at the lowest position of our two pipes.
546 *
547 * However, we cannot even do that on every platform, since
548 * the hardware has a bug where the lowest plane of the lowest
549 * pipe (pipe 0, priority 0), if it has any alpha, will
550 * discard the pixel data entirely and just display the pixels
551 * in the background color (black by default).
552 *
553 * This means that on the affected platforms, we effectively
554 * have only three valid configurations with alpha, all of
555 * them with the alpha being on pipe1 with the lowest
556 * position, which can be 1, 2 or 3 depending on the number of
557 * planes and their zpos.
558 */
559
560 /* For platforms that are not affected by the issue described above. */
561 if (backend->quirks->supports_lowest_plane_alpha)
562 num_alpha_planes_max++;
563
564 if (num_alpha_planes > num_alpha_planes_max) {
565 DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
566 return -EINVAL;
567 }
568
569 /* We can't have an alpha plane at the lowest position */
570 if (!backend->quirks->supports_lowest_plane_alpha &&
571 (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
572 return -EINVAL;
573
574 for (i = 1; i < num_planes; i++) {
575 struct drm_plane_state *p_state = plane_states[i];
576 struct drm_framebuffer *fb = p_state->fb;
577 struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
578
579 /*
580 * The only alpha position is the lowest plane of the
581 * second pipe.
582 */
583 if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
584 current_pipe++;
585
586 s_state->pipe = current_pipe;
587 }
588
589 /* We can only have a single YUV plane at a time */
590 if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
591 DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
592 return -EINVAL;
593 }
594
595 if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
596 DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
597 return -EINVAL;
598 }
599
600 DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
601 num_planes, num_alpha_planes, num_frontend_planes,
602 num_yuv_planes);
603
604 return 0;
605 }
606
sun4i_backend_vblank_quirk(struct sunxi_engine * engine)607 static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
608 {
609 struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
610 struct sun4i_frontend *frontend = backend->frontend;
611
612 if (!frontend)
613 return;
614
615 /*
616 * In a teardown scenario with the frontend involved, we have
617 * to keep the frontend enabled until the next vblank, and
618 * only then disable it.
619 *
620 * This is due to the fact that the backend will not take into
621 * account the new configuration (with the plane that used to
622 * be fed by the frontend now disabled) until we write to the
623 * commit bit and the hardware fetches the new configuration
624 * during the next vblank.
625 *
626 * So we keep the frontend around in order to prevent any
627 * visual artifacts.
628 */
629 spin_lock(&backend->frontend_lock);
630 if (backend->frontend_teardown) {
631 sun4i_frontend_exit(frontend);
632 backend->frontend_teardown = false;
633 }
634 spin_unlock(&backend->frontend_lock);
635 };
636
sun4i_backend_mode_set(struct sunxi_engine * engine,const struct drm_display_mode * mode)637 static void sun4i_backend_mode_set(struct sunxi_engine *engine,
638 const struct drm_display_mode *mode)
639 {
640 bool interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
641
642 DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
643 mode->hdisplay, mode->vdisplay);
644
645 regmap_write(engine->regs, SUN4I_BACKEND_DISSIZE_REG,
646 SUN4I_BACKEND_DISSIZE(mode->hdisplay, mode->vdisplay));
647
648 regmap_update_bits(engine->regs, SUN4I_BACKEND_MODCTL_REG,
649 SUN4I_BACKEND_MODCTL_ITLMOD_EN,
650 interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
651
652 DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
653 interlaced ? "on" : "off");
654 }
655
sun4i_backend_init_sat(struct device * dev)656 static int sun4i_backend_init_sat(struct device *dev) {
657 struct sun4i_backend *backend = dev_get_drvdata(dev);
658 int ret;
659
660 backend->sat_reset = devm_reset_control_get(dev, "sat");
661 if (IS_ERR(backend->sat_reset)) {
662 dev_err(dev, "Couldn't get the SAT reset line\n");
663 return PTR_ERR(backend->sat_reset);
664 }
665
666 ret = reset_control_deassert(backend->sat_reset);
667 if (ret) {
668 dev_err(dev, "Couldn't deassert the SAT reset line\n");
669 return ret;
670 }
671
672 backend->sat_clk = devm_clk_get(dev, "sat");
673 if (IS_ERR(backend->sat_clk)) {
674 dev_err(dev, "Couldn't get our SAT clock\n");
675 ret = PTR_ERR(backend->sat_clk);
676 goto err_assert_reset;
677 }
678
679 ret = clk_prepare_enable(backend->sat_clk);
680 if (ret) {
681 dev_err(dev, "Couldn't enable the SAT clock\n");
682 return ret;
683 }
684
685 return 0;
686
687 err_assert_reset:
688 reset_control_assert(backend->sat_reset);
689 return ret;
690 }
691
sun4i_backend_free_sat(struct device * dev)692 static int sun4i_backend_free_sat(struct device *dev) {
693 struct sun4i_backend *backend = dev_get_drvdata(dev);
694
695 clk_disable_unprepare(backend->sat_clk);
696 reset_control_assert(backend->sat_reset);
697
698 return 0;
699 }
700
701 /*
702 * The display backend can take video output from the display frontend, or
703 * the display enhancement unit on the A80, as input for one it its layers.
704 * This relationship within the display pipeline is encoded in the device
705 * tree with of_graph, and we use it here to figure out which backend, if
706 * there are 2 or more, we are currently probing. The number would be in
707 * the "reg" property of the upstream output port endpoint.
708 */
sun4i_backend_of_get_id(struct device_node * node)709 static int sun4i_backend_of_get_id(struct device_node *node)
710 {
711 struct device_node *ep, *remote;
712 struct of_endpoint of_ep;
713
714 /* Input port is 0, and we want the first endpoint. */
715 ep = of_graph_get_endpoint_by_regs(node, 0, -1);
716 if (!ep)
717 return -EINVAL;
718
719 remote = of_graph_get_remote_endpoint(ep);
720 of_node_put(ep);
721 if (!remote)
722 return -EINVAL;
723
724 of_graph_parse_endpoint(remote, &of_ep);
725 of_node_put(remote);
726 return of_ep.id;
727 }
728
729 /* TODO: This needs to take multiple pipelines into account */
sun4i_backend_find_frontend(struct sun4i_drv * drv,struct device_node * node)730 static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
731 struct device_node *node)
732 {
733 struct device_node *port, *ep, *remote;
734 struct sun4i_frontend *frontend;
735
736 port = of_graph_get_port_by_id(node, 0);
737 if (!port)
738 return ERR_PTR(-EINVAL);
739
740 for_each_available_child_of_node(port, ep) {
741 remote = of_graph_get_remote_port_parent(ep);
742 if (!remote)
743 continue;
744 of_node_put(remote);
745
746 /* does this node match any registered engines? */
747 list_for_each_entry(frontend, &drv->frontend_list, list) {
748 if (remote == frontend->node) {
749 of_node_put(port);
750 of_node_put(ep);
751 return frontend;
752 }
753 }
754 }
755 of_node_put(port);
756 return ERR_PTR(-EINVAL);
757 }
758
759 static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
760 .atomic_begin = sun4i_backend_atomic_begin,
761 .atomic_check = sun4i_backend_atomic_check,
762 .commit = sun4i_backend_commit,
763 .layers_init = sun4i_layers_init,
764 .apply_color_correction = sun4i_backend_apply_color_correction,
765 .disable_color_correction = sun4i_backend_disable_color_correction,
766 .vblank_quirk = sun4i_backend_vblank_quirk,
767 .mode_set = sun4i_backend_mode_set,
768 };
769
770 static const struct regmap_config sun4i_backend_regmap_config = {
771 .reg_bits = 32,
772 .val_bits = 32,
773 .reg_stride = 4,
774 .max_register = 0x5800,
775 };
776
sun4i_backend_bind(struct device * dev,struct device * master,void * data)777 static int sun4i_backend_bind(struct device *dev, struct device *master,
778 void *data)
779 {
780 struct platform_device *pdev = to_platform_device(dev);
781 struct drm_device *drm = data;
782 struct sun4i_drv *drv = drm->dev_private;
783 struct sun4i_backend *backend;
784 const struct sun4i_backend_quirks *quirks;
785 void __iomem *regs;
786 int i, ret;
787
788 backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
789 if (!backend)
790 return -ENOMEM;
791 dev_set_drvdata(dev, backend);
792 spin_lock_init(&backend->frontend_lock);
793
794 if (of_find_property(dev->of_node, "interconnects", NULL)) {
795 /*
796 * This assume we have the same DMA constraints for all our the
797 * devices in our pipeline (all the backends, but also the
798 * frontends). This sounds bad, but it has always been the case
799 * for us, and DRM doesn't do per-device allocation either, so
800 * we would need to fix DRM first...
801 */
802 ret = of_dma_configure(drm->dev, dev->of_node, true);
803 if (ret)
804 return ret;
805 }
806
807 backend->engine.node = dev->of_node;
808 backend->engine.ops = &sun4i_backend_engine_ops;
809 backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
810 if (backend->engine.id < 0)
811 return backend->engine.id;
812
813 backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
814 if (IS_ERR(backend->frontend))
815 dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
816
817 regs = devm_platform_ioremap_resource(pdev, 0);
818 if (IS_ERR(regs))
819 return PTR_ERR(regs);
820
821 backend->reset = devm_reset_control_get(dev, NULL);
822 if (IS_ERR(backend->reset)) {
823 dev_err(dev, "Couldn't get our reset line\n");
824 return PTR_ERR(backend->reset);
825 }
826
827 ret = reset_control_deassert(backend->reset);
828 if (ret) {
829 dev_err(dev, "Couldn't deassert our reset line\n");
830 return ret;
831 }
832
833 backend->bus_clk = devm_clk_get(dev, "ahb");
834 if (IS_ERR(backend->bus_clk)) {
835 dev_err(dev, "Couldn't get the backend bus clock\n");
836 ret = PTR_ERR(backend->bus_clk);
837 goto err_assert_reset;
838 }
839 clk_prepare_enable(backend->bus_clk);
840
841 backend->mod_clk = devm_clk_get(dev, "mod");
842 if (IS_ERR(backend->mod_clk)) {
843 dev_err(dev, "Couldn't get the backend module clock\n");
844 ret = PTR_ERR(backend->mod_clk);
845 goto err_disable_bus_clk;
846 }
847
848 ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
849 if (ret) {
850 dev_err(dev, "Couldn't set the module clock frequency\n");
851 goto err_disable_bus_clk;
852 }
853
854 clk_prepare_enable(backend->mod_clk);
855
856 backend->ram_clk = devm_clk_get(dev, "ram");
857 if (IS_ERR(backend->ram_clk)) {
858 dev_err(dev, "Couldn't get the backend RAM clock\n");
859 ret = PTR_ERR(backend->ram_clk);
860 goto err_disable_mod_clk;
861 }
862 clk_prepare_enable(backend->ram_clk);
863
864 if (of_device_is_compatible(dev->of_node,
865 "allwinner,sun8i-a33-display-backend")) {
866 ret = sun4i_backend_init_sat(dev);
867 if (ret) {
868 dev_err(dev, "Couldn't init SAT resources\n");
869 goto err_disable_ram_clk;
870 }
871 }
872
873 backend->engine.regs = devm_regmap_init_mmio(dev, regs,
874 &sun4i_backend_regmap_config);
875 if (IS_ERR(backend->engine.regs)) {
876 dev_err(dev, "Couldn't create the backend regmap\n");
877 return PTR_ERR(backend->engine.regs);
878 }
879
880 list_add_tail(&backend->engine.list, &drv->engine_list);
881
882 /*
883 * Many of the backend's layer configuration registers have
884 * undefined default values. This poses a risk as we use
885 * regmap_update_bits in some places, and don't overwrite
886 * the whole register.
887 *
888 * Clear the registers here to have something predictable.
889 */
890 for (i = 0x800; i < 0x1000; i += 4)
891 regmap_write(backend->engine.regs, i, 0);
892
893 /* Disable registers autoloading */
894 regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
895 SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
896
897 /* Enable the backend */
898 regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
899 SUN4I_BACKEND_MODCTL_DEBE_EN |
900 SUN4I_BACKEND_MODCTL_START_CTL);
901
902 /* Set output selection if needed */
903 quirks = of_device_get_match_data(dev);
904 if (quirks->needs_output_muxing) {
905 /*
906 * We assume there is no dynamic muxing of backends
907 * and TCONs, so we select the backend with same ID.
908 *
909 * While dynamic selection might be interesting, since
910 * the CRTC is tied to the TCON, while the layers are
911 * tied to the backends, this means, we will need to
912 * switch between groups of layers. There might not be
913 * a way to represent this constraint in DRM.
914 */
915 regmap_update_bits(backend->engine.regs,
916 SUN4I_BACKEND_MODCTL_REG,
917 SUN4I_BACKEND_MODCTL_OUT_SEL,
918 (backend->engine.id
919 ? SUN4I_BACKEND_MODCTL_OUT_LCD1
920 : SUN4I_BACKEND_MODCTL_OUT_LCD0));
921 }
922
923 backend->quirks = quirks;
924
925 return 0;
926
927 err_disable_ram_clk:
928 clk_disable_unprepare(backend->ram_clk);
929 err_disable_mod_clk:
930 clk_rate_exclusive_put(backend->mod_clk);
931 clk_disable_unprepare(backend->mod_clk);
932 err_disable_bus_clk:
933 clk_disable_unprepare(backend->bus_clk);
934 err_assert_reset:
935 reset_control_assert(backend->reset);
936 return ret;
937 }
938
sun4i_backend_unbind(struct device * dev,struct device * master,void * data)939 static void sun4i_backend_unbind(struct device *dev, struct device *master,
940 void *data)
941 {
942 struct sun4i_backend *backend = dev_get_drvdata(dev);
943
944 list_del(&backend->engine.list);
945
946 if (of_device_is_compatible(dev->of_node,
947 "allwinner,sun8i-a33-display-backend"))
948 sun4i_backend_free_sat(dev);
949
950 clk_disable_unprepare(backend->ram_clk);
951 clk_rate_exclusive_put(backend->mod_clk);
952 clk_disable_unprepare(backend->mod_clk);
953 clk_disable_unprepare(backend->bus_clk);
954 reset_control_assert(backend->reset);
955 }
956
957 static const struct component_ops sun4i_backend_ops = {
958 .bind = sun4i_backend_bind,
959 .unbind = sun4i_backend_unbind,
960 };
961
sun4i_backend_probe(struct platform_device * pdev)962 static int sun4i_backend_probe(struct platform_device *pdev)
963 {
964 return component_add(&pdev->dev, &sun4i_backend_ops);
965 }
966
sun4i_backend_remove(struct platform_device * pdev)967 static int sun4i_backend_remove(struct platform_device *pdev)
968 {
969 component_del(&pdev->dev, &sun4i_backend_ops);
970
971 return 0;
972 }
973
974 static const struct sun4i_backend_quirks sun4i_backend_quirks = {
975 .needs_output_muxing = true,
976 };
977
978 static const struct sun4i_backend_quirks sun5i_backend_quirks = {
979 };
980
981 static const struct sun4i_backend_quirks sun6i_backend_quirks = {
982 };
983
984 static const struct sun4i_backend_quirks sun7i_backend_quirks = {
985 .needs_output_muxing = true,
986 };
987
988 static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
989 .supports_lowest_plane_alpha = true,
990 };
991
992 static const struct sun4i_backend_quirks sun9i_backend_quirks = {
993 };
994
995 static const struct of_device_id sun4i_backend_of_table[] = {
996 {
997 .compatible = "allwinner,sun4i-a10-display-backend",
998 .data = &sun4i_backend_quirks,
999 },
1000 {
1001 .compatible = "allwinner,sun5i-a13-display-backend",
1002 .data = &sun5i_backend_quirks,
1003 },
1004 {
1005 .compatible = "allwinner,sun6i-a31-display-backend",
1006 .data = &sun6i_backend_quirks,
1007 },
1008 {
1009 .compatible = "allwinner,sun7i-a20-display-backend",
1010 .data = &sun7i_backend_quirks,
1011 },
1012 {
1013 .compatible = "allwinner,sun8i-a23-display-backend",
1014 .data = &sun8i_a33_backend_quirks,
1015 },
1016 {
1017 .compatible = "allwinner,sun8i-a33-display-backend",
1018 .data = &sun8i_a33_backend_quirks,
1019 },
1020 {
1021 .compatible = "allwinner,sun9i-a80-display-backend",
1022 .data = &sun9i_backend_quirks,
1023 },
1024 { }
1025 };
1026 MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1027
1028 static struct platform_driver sun4i_backend_platform_driver = {
1029 .probe = sun4i_backend_probe,
1030 .remove = sun4i_backend_remove,
1031 .driver = {
1032 .name = "sun4i-backend",
1033 .of_match_table = sun4i_backend_of_table,
1034 },
1035 };
1036 module_platform_driver(sun4i_backend_platform_driver);
1037
1038 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1039 MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1040 MODULE_LICENSE("GPL");
1041