1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2015 MediaTek Inc.
4 */
5
6 #include <linux/clk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/mailbox_controller.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/soc/mediatek/mtk-cmdq.h>
11 #include <linux/soc/mediatek/mtk-mmsys.h>
12 #include <linux/soc/mediatek/mtk-mutex.h>
13
14 #include <asm/barrier.h>
15
16 #include <drm/drm_atomic.h>
17 #include <drm/drm_atomic_helper.h>
18 #include <drm/drm_plane_helper.h>
19 #include <drm/drm_probe_helper.h>
20 #include <drm/drm_vblank.h>
21
22 #include "mtk_drm_drv.h"
23 #include "mtk_drm_crtc.h"
24 #include "mtk_drm_ddp_comp.h"
25 #include "mtk_drm_gem.h"
26 #include "mtk_drm_plane.h"
27
28 /*
29 * struct mtk_drm_crtc - MediaTek specific crtc structure.
30 * @base: crtc object.
31 * @enabled: records whether crtc_enable succeeded
32 * @planes: array of 4 drm_plane structures, one for each overlay plane
33 * @pending_planes: whether any plane has pending changes to be applied
34 * @mmsys_dev: pointer to the mmsys device for configuration registers
35 * @mutex: handle to one of the ten disp_mutex streams
36 * @ddp_comp_nr: number of components in ddp_comp
37 * @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
38 *
39 * TODO: Needs update: this header is missing a bunch of member descriptions.
40 */
41 struct mtk_drm_crtc {
42 struct drm_crtc base;
43 bool enabled;
44
45 bool pending_needs_vblank;
46 struct drm_pending_vblank_event *event;
47
48 struct drm_plane *planes;
49 unsigned int layer_nr;
50 bool pending_planes;
51 bool pending_async_planes;
52
53 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
54 struct cmdq_client cmdq_client;
55 struct cmdq_pkt cmdq_handle;
56 u32 cmdq_event;
57 u32 cmdq_vblank_cnt;
58 wait_queue_head_t cb_blocking_queue;
59 #endif
60
61 struct device *mmsys_dev;
62 struct mtk_mutex *mutex;
63 unsigned int ddp_comp_nr;
64 struct mtk_ddp_comp **ddp_comp;
65
66 /* lock for display hardware access */
67 struct mutex hw_lock;
68 bool config_updating;
69 };
70
71 struct mtk_crtc_state {
72 struct drm_crtc_state base;
73
74 bool pending_config;
75 unsigned int pending_width;
76 unsigned int pending_height;
77 unsigned int pending_vrefresh;
78 };
79
to_mtk_crtc(struct drm_crtc * c)80 static inline struct mtk_drm_crtc *to_mtk_crtc(struct drm_crtc *c)
81 {
82 return container_of(c, struct mtk_drm_crtc, base);
83 }
84
to_mtk_crtc_state(struct drm_crtc_state * s)85 static inline struct mtk_crtc_state *to_mtk_crtc_state(struct drm_crtc_state *s)
86 {
87 return container_of(s, struct mtk_crtc_state, base);
88 }
89
mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc * mtk_crtc)90 static void mtk_drm_crtc_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
91 {
92 struct drm_crtc *crtc = &mtk_crtc->base;
93 unsigned long flags;
94
95 spin_lock_irqsave(&crtc->dev->event_lock, flags);
96 drm_crtc_send_vblank_event(crtc, mtk_crtc->event);
97 drm_crtc_vblank_put(crtc);
98 mtk_crtc->event = NULL;
99 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
100 }
101
mtk_drm_finish_page_flip(struct mtk_drm_crtc * mtk_crtc)102 static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
103 {
104 drm_crtc_handle_vblank(&mtk_crtc->base);
105 if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
106 mtk_drm_crtc_finish_page_flip(mtk_crtc);
107 mtk_crtc->pending_needs_vblank = false;
108 }
109 }
110
111 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_drm_cmdq_pkt_create(struct cmdq_client * client,struct cmdq_pkt * pkt,size_t size)112 static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
113 size_t size)
114 {
115 struct device *dev;
116 dma_addr_t dma_addr;
117
118 pkt->va_base = kzalloc(size, GFP_KERNEL);
119 if (!pkt->va_base) {
120 kfree(pkt);
121 return -ENOMEM;
122 }
123 pkt->buf_size = size;
124 pkt->cl = (void *)client;
125
126 dev = client->chan->mbox->dev;
127 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
128 DMA_TO_DEVICE);
129 if (dma_mapping_error(dev, dma_addr)) {
130 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
131 kfree(pkt->va_base);
132 kfree(pkt);
133 return -ENOMEM;
134 }
135
136 pkt->pa_base = dma_addr;
137
138 return 0;
139 }
140
mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt * pkt)141 static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
142 {
143 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
144
145 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
146 DMA_TO_DEVICE);
147 kfree(pkt->va_base);
148 kfree(pkt);
149 }
150 #endif
151
mtk_drm_crtc_destroy(struct drm_crtc * crtc)152 static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
153 {
154 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
155 int i;
156
157 mtk_mutex_put(mtk_crtc->mutex);
158 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
159 mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
160
161 if (mtk_crtc->cmdq_client.chan) {
162 mbox_free_channel(mtk_crtc->cmdq_client.chan);
163 mtk_crtc->cmdq_client.chan = NULL;
164 }
165 #endif
166
167 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
168 struct mtk_ddp_comp *comp;
169
170 comp = mtk_crtc->ddp_comp[i];
171 mtk_ddp_comp_unregister_vblank_cb(comp);
172 }
173
174 drm_crtc_cleanup(crtc);
175 }
176
mtk_drm_crtc_reset(struct drm_crtc * crtc)177 static void mtk_drm_crtc_reset(struct drm_crtc *crtc)
178 {
179 struct mtk_crtc_state *state;
180
181 if (crtc->state)
182 __drm_atomic_helper_crtc_destroy_state(crtc->state);
183
184 kfree(to_mtk_crtc_state(crtc->state));
185 crtc->state = NULL;
186
187 state = kzalloc(sizeof(*state), GFP_KERNEL);
188 if (state)
189 __drm_atomic_helper_crtc_reset(crtc, &state->base);
190 }
191
mtk_drm_crtc_duplicate_state(struct drm_crtc * crtc)192 static struct drm_crtc_state *mtk_drm_crtc_duplicate_state(struct drm_crtc *crtc)
193 {
194 struct mtk_crtc_state *state;
195
196 state = kmalloc(sizeof(*state), GFP_KERNEL);
197 if (!state)
198 return NULL;
199
200 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
201
202 WARN_ON(state->base.crtc != crtc);
203 state->base.crtc = crtc;
204 state->pending_config = false;
205
206 return &state->base;
207 }
208
mtk_drm_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)209 static void mtk_drm_crtc_destroy_state(struct drm_crtc *crtc,
210 struct drm_crtc_state *state)
211 {
212 __drm_atomic_helper_crtc_destroy_state(state);
213 kfree(to_mtk_crtc_state(state));
214 }
215
mtk_drm_crtc_mode_fixup(struct drm_crtc * crtc,const struct drm_display_mode * mode,struct drm_display_mode * adjusted_mode)216 static bool mtk_drm_crtc_mode_fixup(struct drm_crtc *crtc,
217 const struct drm_display_mode *mode,
218 struct drm_display_mode *adjusted_mode)
219 {
220 /* Nothing to do here, but this callback is mandatory. */
221 return true;
222 }
223
mtk_drm_crtc_mode_set_nofb(struct drm_crtc * crtc)224 static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
225 {
226 struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
227
228 state->pending_width = crtc->mode.hdisplay;
229 state->pending_height = crtc->mode.vdisplay;
230 state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
231 wmb(); /* Make sure the above parameters are set before update */
232 state->pending_config = true;
233 }
234
mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc * mtk_crtc)235 static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
236 {
237 int ret;
238 int i;
239
240 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
241 ret = mtk_ddp_comp_clk_enable(mtk_crtc->ddp_comp[i]);
242 if (ret) {
243 DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
244 goto err;
245 }
246 }
247
248 return 0;
249 err:
250 while (--i >= 0)
251 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
252 return ret;
253 }
254
mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc * mtk_crtc)255 static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
256 {
257 int i;
258
259 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
260 mtk_ddp_comp_clk_disable(mtk_crtc->ddp_comp[i]);
261 }
262
263 static
mtk_drm_ddp_comp_for_plane(struct drm_crtc * crtc,struct drm_plane * plane,unsigned int * local_layer)264 struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc,
265 struct drm_plane *plane,
266 unsigned int *local_layer)
267 {
268 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
269 struct mtk_ddp_comp *comp;
270 int i, count = 0;
271 unsigned int local_index = plane - mtk_crtc->planes;
272
273 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
274 comp = mtk_crtc->ddp_comp[i];
275 if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) {
276 *local_layer = local_index - count;
277 return comp;
278 }
279 count += mtk_ddp_comp_layer_nr(comp);
280 }
281
282 WARN(1, "Failed to find component for plane %d\n", plane->index);
283 return NULL;
284 }
285
286 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
ddp_cmdq_cb(struct mbox_client * cl,void * mssg)287 static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
288 {
289 struct cmdq_cb_data *data = mssg;
290 struct cmdq_client *cmdq_cl = container_of(cl, struct cmdq_client, client);
291 struct mtk_drm_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_drm_crtc, cmdq_client);
292 struct mtk_crtc_state *state;
293 unsigned int i;
294
295 if (data->sta < 0)
296 return;
297
298 state = to_mtk_crtc_state(mtk_crtc->base.state);
299
300 state->pending_config = false;
301
302 if (mtk_crtc->pending_planes) {
303 for (i = 0; i < mtk_crtc->layer_nr; i++) {
304 struct drm_plane *plane = &mtk_crtc->planes[i];
305 struct mtk_plane_state *plane_state;
306
307 plane_state = to_mtk_plane_state(plane->state);
308
309 plane_state->pending.config = false;
310 }
311 mtk_crtc->pending_planes = false;
312 }
313
314 if (mtk_crtc->pending_async_planes) {
315 for (i = 0; i < mtk_crtc->layer_nr; i++) {
316 struct drm_plane *plane = &mtk_crtc->planes[i];
317 struct mtk_plane_state *plane_state;
318
319 plane_state = to_mtk_plane_state(plane->state);
320
321 plane_state->pending.async_config = false;
322 }
323 mtk_crtc->pending_async_planes = false;
324 }
325
326 mtk_crtc->cmdq_vblank_cnt = 0;
327 wake_up(&mtk_crtc->cb_blocking_queue);
328 }
329 #endif
330
mtk_crtc_ddp_hw_init(struct mtk_drm_crtc * mtk_crtc)331 static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
332 {
333 struct drm_crtc *crtc = &mtk_crtc->base;
334 struct drm_connector *connector;
335 struct drm_encoder *encoder;
336 struct drm_connector_list_iter conn_iter;
337 unsigned int width, height, vrefresh, bpc = MTK_MAX_BPC;
338 int ret;
339 int i;
340
341 if (WARN_ON(!crtc->state))
342 return -EINVAL;
343
344 width = crtc->state->adjusted_mode.hdisplay;
345 height = crtc->state->adjusted_mode.vdisplay;
346 vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
347
348 drm_for_each_encoder(encoder, crtc->dev) {
349 if (encoder->crtc != crtc)
350 continue;
351
352 drm_connector_list_iter_begin(crtc->dev, &conn_iter);
353 drm_for_each_connector_iter(connector, &conn_iter) {
354 if (connector->encoder != encoder)
355 continue;
356 if (connector->display_info.bpc != 0 &&
357 bpc > connector->display_info.bpc)
358 bpc = connector->display_info.bpc;
359 }
360 drm_connector_list_iter_end(&conn_iter);
361 }
362
363 ret = pm_runtime_resume_and_get(crtc->dev->dev);
364 if (ret < 0) {
365 DRM_ERROR("Failed to enable power domain: %d\n", ret);
366 return ret;
367 }
368
369 ret = mtk_mutex_prepare(mtk_crtc->mutex);
370 if (ret < 0) {
371 DRM_ERROR("Failed to enable mutex clock: %d\n", ret);
372 goto err_pm_runtime_put;
373 }
374
375 ret = mtk_crtc_ddp_clk_enable(mtk_crtc);
376 if (ret < 0) {
377 DRM_ERROR("Failed to enable component clocks: %d\n", ret);
378 goto err_mutex_unprepare;
379 }
380
381 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
382 mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
383 mtk_crtc->ddp_comp[i]->id,
384 mtk_crtc->ddp_comp[i + 1]->id);
385 mtk_mutex_add_comp(mtk_crtc->mutex,
386 mtk_crtc->ddp_comp[i]->id);
387 }
388 mtk_mutex_add_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
389 mtk_mutex_enable(mtk_crtc->mutex);
390
391 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
392 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i];
393
394 if (i == 1)
395 mtk_ddp_comp_bgclr_in_on(comp);
396
397 mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL);
398 mtk_ddp_comp_start(comp);
399 }
400
401 /* Initially configure all planes */
402 for (i = 0; i < mtk_crtc->layer_nr; i++) {
403 struct drm_plane *plane = &mtk_crtc->planes[i];
404 struct mtk_plane_state *plane_state;
405 struct mtk_ddp_comp *comp;
406 unsigned int local_layer;
407
408 plane_state = to_mtk_plane_state(plane->state);
409 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
410 if (comp)
411 mtk_ddp_comp_layer_config(comp, local_layer,
412 plane_state, NULL);
413 }
414
415 return 0;
416
417 err_mutex_unprepare:
418 mtk_mutex_unprepare(mtk_crtc->mutex);
419 err_pm_runtime_put:
420 pm_runtime_put(crtc->dev->dev);
421 return ret;
422 }
423
mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc * mtk_crtc)424 static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
425 {
426 struct drm_device *drm = mtk_crtc->base.dev;
427 struct drm_crtc *crtc = &mtk_crtc->base;
428 int i;
429
430 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
431 mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
432 if (i == 1)
433 mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]);
434 }
435
436 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
437 mtk_mutex_remove_comp(mtk_crtc->mutex,
438 mtk_crtc->ddp_comp[i]->id);
439 mtk_mutex_disable(mtk_crtc->mutex);
440 for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
441 mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
442 mtk_crtc->ddp_comp[i]->id,
443 mtk_crtc->ddp_comp[i + 1]->id);
444 mtk_mutex_remove_comp(mtk_crtc->mutex,
445 mtk_crtc->ddp_comp[i]->id);
446 }
447 mtk_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id);
448 mtk_crtc_ddp_clk_disable(mtk_crtc);
449 mtk_mutex_unprepare(mtk_crtc->mutex);
450
451 pm_runtime_put(drm->dev);
452
453 if (crtc->state->event && !crtc->state->active) {
454 spin_lock_irq(&crtc->dev->event_lock);
455 drm_crtc_send_vblank_event(crtc, crtc->state->event);
456 crtc->state->event = NULL;
457 spin_unlock_irq(&crtc->dev->event_lock);
458 }
459 }
460
mtk_crtc_ddp_config(struct drm_crtc * crtc,struct cmdq_pkt * cmdq_handle)461 static void mtk_crtc_ddp_config(struct drm_crtc *crtc,
462 struct cmdq_pkt *cmdq_handle)
463 {
464 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
465 struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
466 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
467 unsigned int i;
468 unsigned int local_layer;
469
470 /*
471 * TODO: instead of updating the registers here, we should prepare
472 * working registers in atomic_commit and let the hardware command
473 * queue update module registers on vblank.
474 */
475 if (state->pending_config) {
476 mtk_ddp_comp_config(comp, state->pending_width,
477 state->pending_height,
478 state->pending_vrefresh, 0,
479 cmdq_handle);
480
481 if (!cmdq_handle)
482 state->pending_config = false;
483 }
484
485 if (mtk_crtc->pending_planes) {
486 for (i = 0; i < mtk_crtc->layer_nr; i++) {
487 struct drm_plane *plane = &mtk_crtc->planes[i];
488 struct mtk_plane_state *plane_state;
489
490 plane_state = to_mtk_plane_state(plane->state);
491
492 if (!plane_state->pending.config)
493 continue;
494
495 comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
496 &local_layer);
497
498 if (comp)
499 mtk_ddp_comp_layer_config(comp, local_layer,
500 plane_state,
501 cmdq_handle);
502 if (!cmdq_handle)
503 plane_state->pending.config = false;
504 }
505
506 if (!cmdq_handle)
507 mtk_crtc->pending_planes = false;
508 }
509
510 if (mtk_crtc->pending_async_planes) {
511 for (i = 0; i < mtk_crtc->layer_nr; i++) {
512 struct drm_plane *plane = &mtk_crtc->planes[i];
513 struct mtk_plane_state *plane_state;
514
515 plane_state = to_mtk_plane_state(plane->state);
516
517 if (!plane_state->pending.async_config)
518 continue;
519
520 comp = mtk_drm_ddp_comp_for_plane(crtc, plane,
521 &local_layer);
522
523 if (comp)
524 mtk_ddp_comp_layer_config(comp, local_layer,
525 plane_state,
526 cmdq_handle);
527 if (!cmdq_handle)
528 plane_state->pending.async_config = false;
529 }
530
531 if (!cmdq_handle)
532 mtk_crtc->pending_async_planes = false;
533 }
534 }
535
mtk_drm_crtc_update_config(struct mtk_drm_crtc * mtk_crtc,bool needs_vblank)536 static void mtk_drm_crtc_update_config(struct mtk_drm_crtc *mtk_crtc,
537 bool needs_vblank)
538 {
539 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
540 struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
541 #endif
542 struct drm_crtc *crtc = &mtk_crtc->base;
543 struct mtk_drm_private *priv = crtc->dev->dev_private;
544 unsigned int pending_planes = 0, pending_async_planes = 0;
545 int i;
546
547 mutex_lock(&mtk_crtc->hw_lock);
548 mtk_crtc->config_updating = true;
549 if (needs_vblank)
550 mtk_crtc->pending_needs_vblank = true;
551
552 for (i = 0; i < mtk_crtc->layer_nr; i++) {
553 struct drm_plane *plane = &mtk_crtc->planes[i];
554 struct mtk_plane_state *plane_state;
555
556 plane_state = to_mtk_plane_state(plane->state);
557 if (plane_state->pending.dirty) {
558 plane_state->pending.config = true;
559 plane_state->pending.dirty = false;
560 pending_planes |= BIT(i);
561 } else if (plane_state->pending.async_dirty) {
562 plane_state->pending.async_config = true;
563 plane_state->pending.async_dirty = false;
564 pending_async_planes |= BIT(i);
565 }
566 }
567 if (pending_planes)
568 mtk_crtc->pending_planes = true;
569 if (pending_async_planes)
570 mtk_crtc->pending_async_planes = true;
571
572 if (priv->data->shadow_register) {
573 mtk_mutex_acquire(mtk_crtc->mutex);
574 mtk_crtc_ddp_config(crtc, NULL);
575 mtk_mutex_release(mtk_crtc->mutex);
576 }
577 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
578 if (mtk_crtc->cmdq_client.chan) {
579 mbox_flush(mtk_crtc->cmdq_client.chan, 2000);
580 cmdq_handle->cmd_buf_size = 0;
581 cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
582 cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
583 mtk_crtc_ddp_config(crtc, cmdq_handle);
584 cmdq_pkt_finalize(cmdq_handle);
585 dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
586 cmdq_handle->pa_base,
587 cmdq_handle->cmd_buf_size,
588 DMA_TO_DEVICE);
589 /*
590 * CMDQ command should execute in next 3 vblank.
591 * One vblank interrupt before send message (occasionally)
592 * and one vblank interrupt after cmdq done,
593 * so it's timeout after 3 vblank interrupt.
594 * If it fail to execute in next 3 vblank, timeout happen.
595 */
596 mtk_crtc->cmdq_vblank_cnt = 3;
597
598 mbox_send_message(mtk_crtc->cmdq_client.chan, cmdq_handle);
599 mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
600 }
601 #endif
602 mtk_crtc->config_updating = false;
603 mutex_unlock(&mtk_crtc->hw_lock);
604 }
605
mtk_crtc_ddp_irq(void * data)606 static void mtk_crtc_ddp_irq(void *data)
607 {
608 struct drm_crtc *crtc = data;
609 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
610 struct mtk_drm_private *priv = crtc->dev->dev_private;
611
612 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
613 if (!priv->data->shadow_register && !mtk_crtc->cmdq_client.chan)
614 mtk_crtc_ddp_config(crtc, NULL);
615 else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
616 DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
617 drm_crtc_index(&mtk_crtc->base));
618 #else
619 if (!priv->data->shadow_register)
620 mtk_crtc_ddp_config(crtc, NULL);
621 #endif
622 mtk_drm_finish_page_flip(mtk_crtc);
623 }
624
mtk_drm_crtc_enable_vblank(struct drm_crtc * crtc)625 static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
626 {
627 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
628 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
629
630 mtk_ddp_comp_enable_vblank(comp);
631
632 return 0;
633 }
634
mtk_drm_crtc_disable_vblank(struct drm_crtc * crtc)635 static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
636 {
637 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
638 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
639
640 mtk_ddp_comp_disable_vblank(comp);
641 }
642
mtk_drm_crtc_plane_check(struct drm_crtc * crtc,struct drm_plane * plane,struct mtk_plane_state * state)643 int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
644 struct mtk_plane_state *state)
645 {
646 unsigned int local_layer;
647 struct mtk_ddp_comp *comp;
648
649 comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer);
650 if (comp)
651 return mtk_ddp_comp_layer_check(comp, local_layer, state);
652 return 0;
653 }
654
mtk_drm_crtc_async_update(struct drm_crtc * crtc,struct drm_plane * plane,struct drm_atomic_state * state)655 void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
656 struct drm_atomic_state *state)
657 {
658 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
659
660 if (!mtk_crtc->enabled)
661 return;
662
663 mtk_drm_crtc_update_config(mtk_crtc, false);
664 }
665
mtk_drm_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)666 static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
667 struct drm_atomic_state *state)
668 {
669 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
670 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
671 int ret;
672
673 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
674
675 ret = pm_runtime_resume_and_get(comp->dev);
676 if (ret < 0) {
677 DRM_DEV_ERROR(comp->dev, "Failed to enable power domain: %d\n", ret);
678 return;
679 }
680
681 ret = mtk_crtc_ddp_hw_init(mtk_crtc);
682 if (ret) {
683 pm_runtime_put(comp->dev);
684 return;
685 }
686
687 drm_crtc_vblank_on(crtc);
688 mtk_crtc->enabled = true;
689 }
690
mtk_drm_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)691 static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
692 struct drm_atomic_state *state)
693 {
694 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
695 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
696 int i, ret;
697
698 DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
699 if (!mtk_crtc->enabled)
700 return;
701
702 /* Set all pending plane state to disabled */
703 for (i = 0; i < mtk_crtc->layer_nr; i++) {
704 struct drm_plane *plane = &mtk_crtc->planes[i];
705 struct mtk_plane_state *plane_state;
706
707 plane_state = to_mtk_plane_state(plane->state);
708 plane_state->pending.enable = false;
709 plane_state->pending.config = true;
710 }
711 mtk_crtc->pending_planes = true;
712
713 mtk_drm_crtc_update_config(mtk_crtc, false);
714 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
715 /* Wait for planes to be disabled by cmdq */
716 if (mtk_crtc->cmdq_client.chan)
717 wait_event_timeout(mtk_crtc->cb_blocking_queue,
718 mtk_crtc->cmdq_vblank_cnt == 0,
719 msecs_to_jiffies(500));
720 #endif
721 /* Wait for planes to be disabled */
722 drm_crtc_wait_one_vblank(crtc);
723
724 drm_crtc_vblank_off(crtc);
725 mtk_crtc_ddp_hw_fini(mtk_crtc);
726 ret = pm_runtime_put(comp->dev);
727 if (ret < 0)
728 DRM_DEV_ERROR(comp->dev, "Failed to disable power domain: %d\n", ret);
729
730 mtk_crtc->enabled = false;
731 }
732
mtk_drm_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)733 static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
734 struct drm_atomic_state *state)
735 {
736 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
737 crtc);
738 struct mtk_crtc_state *mtk_crtc_state = to_mtk_crtc_state(crtc_state);
739 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
740
741 if (mtk_crtc->event && mtk_crtc_state->base.event)
742 DRM_ERROR("new event while there is still a pending event\n");
743
744 if (mtk_crtc_state->base.event) {
745 mtk_crtc_state->base.event->pipe = drm_crtc_index(crtc);
746 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
747 mtk_crtc->event = mtk_crtc_state->base.event;
748 mtk_crtc_state->base.event = NULL;
749 }
750 }
751
mtk_drm_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)752 static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
753 struct drm_atomic_state *state)
754 {
755 struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
756 int i;
757
758 if (crtc->state->color_mgmt_changed)
759 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
760 mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
761 mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state);
762 }
763 mtk_drm_crtc_update_config(mtk_crtc, !!mtk_crtc->event);
764 }
765
766 static const struct drm_crtc_funcs mtk_crtc_funcs = {
767 .set_config = drm_atomic_helper_set_config,
768 .page_flip = drm_atomic_helper_page_flip,
769 .destroy = mtk_drm_crtc_destroy,
770 .reset = mtk_drm_crtc_reset,
771 .atomic_duplicate_state = mtk_drm_crtc_duplicate_state,
772 .atomic_destroy_state = mtk_drm_crtc_destroy_state,
773 .enable_vblank = mtk_drm_crtc_enable_vblank,
774 .disable_vblank = mtk_drm_crtc_disable_vblank,
775 };
776
777 static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
778 .mode_fixup = mtk_drm_crtc_mode_fixup,
779 .mode_set_nofb = mtk_drm_crtc_mode_set_nofb,
780 .atomic_begin = mtk_drm_crtc_atomic_begin,
781 .atomic_flush = mtk_drm_crtc_atomic_flush,
782 .atomic_enable = mtk_drm_crtc_atomic_enable,
783 .atomic_disable = mtk_drm_crtc_atomic_disable,
784 };
785
mtk_drm_crtc_init(struct drm_device * drm,struct mtk_drm_crtc * mtk_crtc,unsigned int pipe)786 static int mtk_drm_crtc_init(struct drm_device *drm,
787 struct mtk_drm_crtc *mtk_crtc,
788 unsigned int pipe)
789 {
790 struct drm_plane *primary = NULL;
791 struct drm_plane *cursor = NULL;
792 int i, ret;
793
794 for (i = 0; i < mtk_crtc->layer_nr; i++) {
795 if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
796 primary = &mtk_crtc->planes[i];
797 else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
798 cursor = &mtk_crtc->planes[i];
799 }
800
801 ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
802 &mtk_crtc_funcs, NULL);
803 if (ret)
804 goto err_cleanup_crtc;
805
806 drm_crtc_helper_add(&mtk_crtc->base, &mtk_crtc_helper_funcs);
807
808 return 0;
809
810 err_cleanup_crtc:
811 drm_crtc_cleanup(&mtk_crtc->base);
812 return ret;
813 }
814
mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc * mtk_crtc,int comp_idx)815 static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
816 int comp_idx)
817 {
818 struct mtk_ddp_comp *comp;
819
820 if (comp_idx > 1)
821 return 0;
822
823 comp = mtk_crtc->ddp_comp[comp_idx];
824 if (!comp->funcs)
825 return 0;
826
827 if (comp_idx == 1 && !comp->funcs->bgclr_in_on)
828 return 0;
829
830 return mtk_ddp_comp_layer_nr(comp);
831 }
832
833 static inline
mtk_drm_crtc_plane_type(unsigned int plane_idx,unsigned int num_planes)834 enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
835 unsigned int num_planes)
836 {
837 if (plane_idx == 0)
838 return DRM_PLANE_TYPE_PRIMARY;
839 else if (plane_idx == (num_planes - 1))
840 return DRM_PLANE_TYPE_CURSOR;
841 else
842 return DRM_PLANE_TYPE_OVERLAY;
843
844 }
845
mtk_drm_crtc_init_comp_planes(struct drm_device * drm_dev,struct mtk_drm_crtc * mtk_crtc,int comp_idx,int pipe)846 static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
847 struct mtk_drm_crtc *mtk_crtc,
848 int comp_idx, int pipe)
849 {
850 int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx);
851 struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx];
852 int i, ret;
853
854 for (i = 0; i < num_planes; i++) {
855 ret = mtk_plane_init(drm_dev,
856 &mtk_crtc->planes[mtk_crtc->layer_nr],
857 BIT(pipe),
858 mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
859 num_planes),
860 mtk_ddp_comp_supported_rotations(comp));
861 if (ret)
862 return ret;
863
864 mtk_crtc->layer_nr++;
865 }
866 return 0;
867 }
868
mtk_drm_crtc_create(struct drm_device * drm_dev,const enum mtk_ddp_comp_id * path,unsigned int path_len)869 int mtk_drm_crtc_create(struct drm_device *drm_dev,
870 const enum mtk_ddp_comp_id *path, unsigned int path_len)
871 {
872 struct mtk_drm_private *priv = drm_dev->dev_private;
873 struct device *dev = drm_dev->dev;
874 struct mtk_drm_crtc *mtk_crtc;
875 unsigned int num_comp_planes = 0;
876 int pipe = priv->num_pipes;
877 int ret;
878 int i;
879 bool has_ctm = false;
880 uint gamma_lut_size = 0;
881
882 if (!path)
883 return 0;
884
885 for (i = 0; i < path_len; i++) {
886 enum mtk_ddp_comp_id comp_id = path[i];
887 struct device_node *node;
888 struct mtk_ddp_comp *comp;
889
890 node = priv->comp_node[comp_id];
891 comp = &priv->ddp_comp[comp_id];
892
893 if (!node) {
894 dev_info(dev,
895 "Not creating crtc %d because component %d is disabled or missing\n",
896 pipe, comp_id);
897 return 0;
898 }
899
900 if (!comp->dev) {
901 dev_err(dev, "Component %pOF not initialized\n", node);
902 return -ENODEV;
903 }
904 }
905
906 mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
907 if (!mtk_crtc)
908 return -ENOMEM;
909
910 mtk_crtc->mmsys_dev = priv->mmsys_dev;
911 mtk_crtc->ddp_comp_nr = path_len;
912 mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
913 sizeof(*mtk_crtc->ddp_comp),
914 GFP_KERNEL);
915 if (!mtk_crtc->ddp_comp)
916 return -ENOMEM;
917
918 mtk_crtc->mutex = mtk_mutex_get(priv->mutex_dev);
919 if (IS_ERR(mtk_crtc->mutex)) {
920 ret = PTR_ERR(mtk_crtc->mutex);
921 dev_err(dev, "Failed to get mutex: %d\n", ret);
922 return ret;
923 }
924
925 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
926 enum mtk_ddp_comp_id comp_id = path[i];
927 struct mtk_ddp_comp *comp;
928
929 comp = &priv->ddp_comp[comp_id];
930 mtk_crtc->ddp_comp[i] = comp;
931
932 if (comp->funcs) {
933 if (comp->funcs->gamma_set)
934 gamma_lut_size = MTK_LUT_SIZE;
935
936 if (comp->funcs->ctm_set)
937 has_ctm = true;
938 }
939
940 mtk_ddp_comp_register_vblank_cb(comp, mtk_crtc_ddp_irq,
941 &mtk_crtc->base);
942 }
943
944 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
945 num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i);
946
947 mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes,
948 sizeof(struct drm_plane), GFP_KERNEL);
949
950 for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
951 ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i,
952 pipe);
953 if (ret)
954 return ret;
955 }
956
957 ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
958 if (ret < 0)
959 return ret;
960
961 if (gamma_lut_size)
962 drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
963 drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
964 priv->num_pipes++;
965 mutex_init(&mtk_crtc->hw_lock);
966
967 #if IS_REACHABLE(CONFIG_MTK_CMDQ)
968 mtk_crtc->cmdq_client.client.dev = mtk_crtc->mmsys_dev;
969 mtk_crtc->cmdq_client.client.tx_block = false;
970 mtk_crtc->cmdq_client.client.knows_txdone = true;
971 mtk_crtc->cmdq_client.client.rx_callback = ddp_cmdq_cb;
972 mtk_crtc->cmdq_client.chan =
973 mbox_request_channel(&mtk_crtc->cmdq_client.client,
974 drm_crtc_index(&mtk_crtc->base));
975 if (IS_ERR(mtk_crtc->cmdq_client.chan)) {
976 dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
977 drm_crtc_index(&mtk_crtc->base));
978 mtk_crtc->cmdq_client.chan = NULL;
979 }
980
981 if (mtk_crtc->cmdq_client.chan) {
982 ret = of_property_read_u32_index(priv->mutex_node,
983 "mediatek,gce-events",
984 drm_crtc_index(&mtk_crtc->base),
985 &mtk_crtc->cmdq_event);
986 if (ret) {
987 dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
988 drm_crtc_index(&mtk_crtc->base));
989 mbox_free_channel(mtk_crtc->cmdq_client.chan);
990 mtk_crtc->cmdq_client.chan = NULL;
991 } else {
992 ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
993 &mtk_crtc->cmdq_handle,
994 PAGE_SIZE);
995 if (ret) {
996 dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
997 drm_crtc_index(&mtk_crtc->base));
998 mbox_free_channel(mtk_crtc->cmdq_client.chan);
999 mtk_crtc->cmdq_client.chan = NULL;
1000 }
1001 }
1002
1003 /* for sending blocking cmd in crtc disable */
1004 init_waitqueue_head(&mtk_crtc->cb_blocking_queue);
1005 }
1006 #endif
1007 return 0;
1008 }
1009