1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
9 #include <linux/sort.h>
10 #include <linux/debugfs.h>
11 #include <linux/ktime.h>
12 #include <linux/bits.h>
13
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_flip_work.h>
17 #include <drm/drm_mode.h>
18 #include <drm/drm_probe_helper.h>
19 #include <drm/drm_rect.h>
20 #include <drm/drm_vblank.h>
21
22 #include "dpu_kms.h"
23 #include "dpu_hw_lm.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_crtc.h"
27 #include "dpu_plane.h"
28 #include "dpu_encoder.h"
29 #include "dpu_vbif.h"
30 #include "dpu_core_perf.h"
31 #include "dpu_trace.h"
32
33 /* layer mixer index on dpu_crtc */
34 #define LEFT_MIXER 0
35 #define RIGHT_MIXER 1
36
37 /* timeout in ms waiting for frame done */
38 #define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
39
40 #define CONVERT_S3_15(val) \
41 (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
42
_dpu_crtc_get_kms(struct drm_crtc * crtc)43 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
44 {
45 struct msm_drm_private *priv = crtc->dev->dev_private;
46
47 return to_dpu_kms(priv->kms);
48 }
49
dpu_crtc_destroy(struct drm_crtc * crtc)50 static void dpu_crtc_destroy(struct drm_crtc *crtc)
51 {
52 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
53
54 if (!crtc)
55 return;
56
57 drm_crtc_cleanup(crtc);
58 kfree(dpu_crtc);
59 }
60
get_encoder_from_crtc(struct drm_crtc * crtc)61 static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
62 {
63 struct drm_device *dev = crtc->dev;
64 struct drm_encoder *encoder;
65
66 drm_for_each_encoder(encoder, dev)
67 if (encoder->crtc == crtc)
68 return encoder;
69
70 return NULL;
71 }
72
dpu_crtc_parse_crc_source(const char * src_name)73 static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
74 {
75 if (!src_name ||
76 !strcmp(src_name, "none"))
77 return DPU_CRTC_CRC_SOURCE_NONE;
78 if (!strcmp(src_name, "auto") ||
79 !strcmp(src_name, "lm"))
80 return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
81
82 return DPU_CRTC_CRC_SOURCE_INVALID;
83 }
84
dpu_crtc_verify_crc_source(struct drm_crtc * crtc,const char * src_name,size_t * values_cnt)85 static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
86 const char *src_name, size_t *values_cnt)
87 {
88 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
89 struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
90
91 if (source < 0) {
92 DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
93 return -EINVAL;
94 }
95
96 if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
97 *values_cnt = crtc_state->num_mixers;
98
99 return 0;
100 }
101
dpu_crtc_set_crc_source(struct drm_crtc * crtc,const char * src_name)102 static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
103 {
104 enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
105 enum dpu_crtc_crc_source current_source;
106 struct dpu_crtc_state *crtc_state;
107 struct drm_device *drm_dev = crtc->dev;
108 struct dpu_crtc_mixer *m;
109
110 bool was_enabled;
111 bool enable = false;
112 int i, ret = 0;
113
114 if (source < 0) {
115 DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
116 return -EINVAL;
117 }
118
119 ret = drm_modeset_lock(&crtc->mutex, NULL);
120
121 if (ret)
122 return ret;
123
124 enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
125 crtc_state = to_dpu_crtc_state(crtc->state);
126
127 spin_lock_irq(&drm_dev->event_lock);
128 current_source = crtc_state->crc_source;
129 spin_unlock_irq(&drm_dev->event_lock);
130
131 was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
132
133 if (!was_enabled && enable) {
134 ret = drm_crtc_vblank_get(crtc);
135
136 if (ret)
137 goto cleanup;
138
139 } else if (was_enabled && !enable) {
140 drm_crtc_vblank_put(crtc);
141 }
142
143 spin_lock_irq(&drm_dev->event_lock);
144 crtc_state->crc_source = source;
145 spin_unlock_irq(&drm_dev->event_lock);
146
147 crtc_state->crc_frame_skip_count = 0;
148
149 for (i = 0; i < crtc_state->num_mixers; ++i) {
150 m = &crtc_state->mixers[i];
151
152 if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
153 continue;
154
155 /* Calculate MISR over 1 frame */
156 m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
157 }
158
159
160 cleanup:
161 drm_modeset_unlock(&crtc->mutex);
162
163 return ret;
164 }
165
dpu_crtc_get_vblank_counter(struct drm_crtc * crtc)166 static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
167 {
168 struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
169 if (!encoder) {
170 DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
171 return 0;
172 }
173
174 return dpu_encoder_get_vsync_count(encoder);
175 }
176
177
dpu_crtc_get_crc(struct drm_crtc * crtc)178 static int dpu_crtc_get_crc(struct drm_crtc *crtc)
179 {
180 struct dpu_crtc_state *crtc_state;
181 struct dpu_crtc_mixer *m;
182 u32 crcs[CRTC_DUAL_MIXERS];
183
184 int i = 0;
185 int rc = 0;
186
187 crtc_state = to_dpu_crtc_state(crtc->state);
188
189 BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
190
191 /* Skip first 2 frames in case of "uncooked" CRCs */
192 if (crtc_state->crc_frame_skip_count < 2) {
193 crtc_state->crc_frame_skip_count++;
194 return 0;
195 }
196
197 for (i = 0; i < crtc_state->num_mixers; ++i) {
198
199 m = &crtc_state->mixers[i];
200
201 if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
202 continue;
203
204 rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
205
206 if (rc) {
207 if (rc != -ENODATA)
208 DRM_DEBUG_DRIVER("MISR read failed\n");
209 return rc;
210 }
211 }
212
213 return drm_crtc_add_crc_entry(crtc, true,
214 drm_crtc_accurate_vblank_count(crtc), crcs);
215 }
216
dpu_crtc_get_scanout_position(struct drm_crtc * crtc,bool in_vblank_irq,int * vpos,int * hpos,ktime_t * stime,ktime_t * etime,const struct drm_display_mode * mode)217 static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
218 bool in_vblank_irq,
219 int *vpos, int *hpos,
220 ktime_t *stime, ktime_t *etime,
221 const struct drm_display_mode *mode)
222 {
223 unsigned int pipe = crtc->index;
224 struct drm_encoder *encoder;
225 int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
226
227 encoder = get_encoder_from_crtc(crtc);
228 if (!encoder) {
229 DRM_ERROR("no encoder found for crtc %d\n", pipe);
230 return false;
231 }
232
233 vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
234 vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
235
236 /*
237 * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
238 * the end of VFP. Translate the porch values relative to the line
239 * counter positions.
240 */
241
242 vactive_start = vsw + vbp + 1;
243 vactive_end = vactive_start + mode->crtc_vdisplay;
244
245 /* last scan line before VSYNC */
246 vfp_end = mode->crtc_vtotal;
247
248 if (stime)
249 *stime = ktime_get();
250
251 line = dpu_encoder_get_linecount(encoder);
252
253 if (line < vactive_start)
254 line -= vactive_start;
255 else if (line > vactive_end)
256 line = line - vfp_end - vactive_start;
257 else
258 line -= vactive_start;
259
260 *vpos = line;
261 *hpos = 0;
262
263 if (etime)
264 *etime = ktime_get();
265
266 return true;
267 }
268
_dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer * mixer,struct dpu_plane_state * pstate,struct dpu_format * format)269 static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
270 struct dpu_plane_state *pstate, struct dpu_format *format)
271 {
272 struct dpu_hw_mixer *lm = mixer->hw_lm;
273 uint32_t blend_op;
274 uint32_t fg_alpha, bg_alpha;
275
276 fg_alpha = pstate->base.alpha >> 8;
277 bg_alpha = 0xff - fg_alpha;
278
279 /* default to opaque blending */
280 if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE ||
281 !format->alpha_enable) {
282 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
283 DPU_BLEND_BG_ALPHA_BG_CONST;
284 } else if (pstate->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
285 blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
286 DPU_BLEND_BG_ALPHA_FG_PIXEL;
287 if (fg_alpha != 0xff) {
288 bg_alpha = fg_alpha;
289 blend_op |= DPU_BLEND_BG_MOD_ALPHA |
290 DPU_BLEND_BG_INV_MOD_ALPHA;
291 } else {
292 blend_op |= DPU_BLEND_BG_INV_ALPHA;
293 }
294 } else {
295 /* coverage blending */
296 blend_op = DPU_BLEND_FG_ALPHA_FG_PIXEL |
297 DPU_BLEND_BG_ALPHA_FG_PIXEL;
298 if (fg_alpha != 0xff) {
299 bg_alpha = fg_alpha;
300 blend_op |= DPU_BLEND_FG_MOD_ALPHA |
301 DPU_BLEND_FG_INV_MOD_ALPHA |
302 DPU_BLEND_BG_MOD_ALPHA |
303 DPU_BLEND_BG_INV_MOD_ALPHA;
304 } else {
305 blend_op |= DPU_BLEND_BG_INV_ALPHA;
306 }
307 }
308
309 lm->ops.setup_blend_config(lm, pstate->stage,
310 fg_alpha, bg_alpha, blend_op);
311
312 DRM_DEBUG_ATOMIC("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
313 &format->base.pixel_format, format->alpha_enable, blend_op);
314 }
315
_dpu_crtc_program_lm_output_roi(struct drm_crtc * crtc)316 static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
317 {
318 struct dpu_crtc_state *crtc_state;
319 int lm_idx, lm_horiz_position;
320
321 crtc_state = to_dpu_crtc_state(crtc->state);
322
323 lm_horiz_position = 0;
324 for (lm_idx = 0; lm_idx < crtc_state->num_mixers; lm_idx++) {
325 const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
326 struct dpu_hw_mixer *hw_lm = crtc_state->mixers[lm_idx].hw_lm;
327 struct dpu_hw_mixer_cfg cfg;
328
329 if (!lm_roi || !drm_rect_visible(lm_roi))
330 continue;
331
332 cfg.out_width = drm_rect_width(lm_roi);
333 cfg.out_height = drm_rect_height(lm_roi);
334 cfg.right_mixer = lm_horiz_position++;
335 cfg.flags = 0;
336 hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
337 }
338 }
339
_dpu_crtc_blend_setup_mixer(struct drm_crtc * crtc,struct dpu_crtc * dpu_crtc,struct dpu_crtc_mixer * mixer,struct dpu_hw_stage_cfg * stage_cfg)340 static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
341 struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
342 struct dpu_hw_stage_cfg *stage_cfg)
343 {
344 struct drm_plane *plane;
345 struct drm_framebuffer *fb;
346 struct drm_plane_state *state;
347 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
348 struct dpu_plane_state *pstate = NULL;
349 struct dpu_format *format;
350 struct dpu_hw_ctl *ctl = mixer->lm_ctl;
351
352 u32 flush_mask;
353 uint32_t stage_idx, lm_idx;
354 int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
355 bool bg_alpha_enable = false;
356 DECLARE_BITMAP(fetch_active, SSPP_MAX);
357
358 memset(fetch_active, 0, sizeof(fetch_active));
359 drm_atomic_crtc_for_each_plane(plane, crtc) {
360 state = plane->state;
361 if (!state)
362 continue;
363
364 if (!state->visible)
365 continue;
366
367 pstate = to_dpu_plane_state(state);
368 fb = state->fb;
369
370 dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
371 set_bit(dpu_plane_pipe(plane), fetch_active);
372
373 DRM_DEBUG_ATOMIC("crtc %d stage:%d - plane %d sspp %d fb %d\n",
374 crtc->base.id,
375 pstate->stage,
376 plane->base.id,
377 dpu_plane_pipe(plane) - SSPP_VIG0,
378 state->fb ? state->fb->base.id : -1);
379
380 format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
381
382 if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
383 bg_alpha_enable = true;
384
385 stage_idx = zpos_cnt[pstate->stage]++;
386 stage_cfg->stage[pstate->stage][stage_idx] =
387 dpu_plane_pipe(plane);
388 stage_cfg->multirect_index[pstate->stage][stage_idx] =
389 pstate->multirect_index;
390
391 trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
392 state, pstate, stage_idx,
393 dpu_plane_pipe(plane) - SSPP_VIG0,
394 format->base.pixel_format,
395 fb ? fb->modifier : 0);
396
397 /* blend config update */
398 for (lm_idx = 0; lm_idx < cstate->num_mixers; lm_idx++) {
399 _dpu_crtc_setup_blend_cfg(mixer + lm_idx,
400 pstate, format);
401
402 mixer[lm_idx].flush_mask |= flush_mask;
403
404 if (bg_alpha_enable && !format->alpha_enable)
405 mixer[lm_idx].mixer_op_mode = 0;
406 else
407 mixer[lm_idx].mixer_op_mode |=
408 1 << pstate->stage;
409 }
410 }
411
412 if (ctl->ops.set_active_pipes)
413 ctl->ops.set_active_pipes(ctl, fetch_active);
414
415 _dpu_crtc_program_lm_output_roi(crtc);
416 }
417
418 /**
419 * _dpu_crtc_blend_setup - configure crtc mixers
420 * @crtc: Pointer to drm crtc structure
421 */
_dpu_crtc_blend_setup(struct drm_crtc * crtc)422 static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
423 {
424 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
425 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
426 struct dpu_crtc_mixer *mixer = cstate->mixers;
427 struct dpu_hw_ctl *ctl;
428 struct dpu_hw_mixer *lm;
429 struct dpu_hw_stage_cfg stage_cfg;
430 int i;
431
432 DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
433
434 for (i = 0; i < cstate->num_mixers; i++) {
435 mixer[i].mixer_op_mode = 0;
436 mixer[i].flush_mask = 0;
437 if (mixer[i].lm_ctl->ops.clear_all_blendstages)
438 mixer[i].lm_ctl->ops.clear_all_blendstages(
439 mixer[i].lm_ctl);
440 }
441
442 /* initialize stage cfg */
443 memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
444
445 _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
446
447 for (i = 0; i < cstate->num_mixers; i++) {
448 ctl = mixer[i].lm_ctl;
449 lm = mixer[i].hw_lm;
450
451 lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
452
453 mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
454 mixer[i].hw_lm->idx);
455
456 /* stage config flush mask */
457 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
458
459 DRM_DEBUG_ATOMIC("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
460 mixer[i].hw_lm->idx - LM_0,
461 mixer[i].mixer_op_mode,
462 ctl->idx - CTL_0,
463 mixer[i].flush_mask);
464
465 ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
466 &stage_cfg);
467 }
468 }
469
470 /**
471 * _dpu_crtc_complete_flip - signal pending page_flip events
472 * Any pending vblank events are added to the vblank_event_list
473 * so that the next vblank interrupt shall signal them.
474 * However PAGE_FLIP events are not handled through the vblank_event_list.
475 * This API signals any pending PAGE_FLIP events requested through
476 * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
477 * @crtc: Pointer to drm crtc structure
478 */
_dpu_crtc_complete_flip(struct drm_crtc * crtc)479 static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
480 {
481 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
482 struct drm_device *dev = crtc->dev;
483 unsigned long flags;
484
485 spin_lock_irqsave(&dev->event_lock, flags);
486 if (dpu_crtc->event) {
487 DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
488 dpu_crtc->event);
489 trace_dpu_crtc_complete_flip(DRMID(crtc));
490 drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
491 dpu_crtc->event = NULL;
492 }
493 spin_unlock_irqrestore(&dev->event_lock, flags);
494 }
495
dpu_crtc_get_intf_mode(struct drm_crtc * crtc)496 enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
497 {
498 struct drm_encoder *encoder;
499
500 /*
501 * TODO: This function is called from dpu debugfs and as part of atomic
502 * check. When called from debugfs, the crtc->mutex must be held to
503 * read crtc->state. However reading crtc->state from atomic check isn't
504 * allowed (unless you have a good reason, a big comment, and a deep
505 * understanding of how the atomic/modeset locks work (<- and this is
506 * probably not possible)). So we'll keep the WARN_ON here for now, but
507 * really we need to figure out a better way to track our operating mode
508 */
509 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
510
511 /* TODO: Returns the first INTF_MODE, could there be multiple values? */
512 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
513 return dpu_encoder_get_intf_mode(encoder);
514
515 return INTF_MODE_NONE;
516 }
517
dpu_crtc_vblank_callback(struct drm_crtc * crtc)518 void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
519 {
520 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
521
522 /* keep statistics on vblank callback - with auto reset via debugfs */
523 if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
524 dpu_crtc->vblank_cb_time = ktime_get();
525 else
526 dpu_crtc->vblank_cb_count++;
527
528 dpu_crtc_get_crc(crtc);
529
530 drm_crtc_handle_vblank(crtc);
531 trace_dpu_crtc_vblank_cb(DRMID(crtc));
532 }
533
dpu_crtc_frame_event_work(struct kthread_work * work)534 static void dpu_crtc_frame_event_work(struct kthread_work *work)
535 {
536 struct dpu_crtc_frame_event *fevent = container_of(work,
537 struct dpu_crtc_frame_event, work);
538 struct drm_crtc *crtc = fevent->crtc;
539 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
540 unsigned long flags;
541 bool frame_done = false;
542
543 DPU_ATRACE_BEGIN("crtc_frame_event");
544
545 DRM_DEBUG_ATOMIC("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
546 ktime_to_ns(fevent->ts));
547
548 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
549 | DPU_ENCODER_FRAME_EVENT_ERROR
550 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
551
552 if (atomic_read(&dpu_crtc->frame_pending) < 1) {
553 /* ignore vblank when not pending */
554 } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
555 /* release bandwidth and other resources */
556 trace_dpu_crtc_frame_event_done(DRMID(crtc),
557 fevent->event);
558 dpu_core_perf_crtc_release_bw(crtc);
559 } else {
560 trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
561 fevent->event);
562 }
563
564 if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
565 | DPU_ENCODER_FRAME_EVENT_ERROR))
566 frame_done = true;
567 }
568
569 if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
570 DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
571 crtc->base.id, ktime_to_ns(fevent->ts));
572
573 if (frame_done)
574 complete_all(&dpu_crtc->frame_done_comp);
575
576 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
577 list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
578 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
579 DPU_ATRACE_END("crtc_frame_event");
580 }
581
582 /*
583 * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
584 * registers this API to encoder for all frame event callbacks like
585 * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
586 * from different context - IRQ, user thread, commit_thread, etc. Each event
587 * should be carefully reviewed and should be processed in proper task context
588 * to avoid schedulin delay or properly manage the irq context's bottom half
589 * processing.
590 */
dpu_crtc_frame_event_cb(void * data,u32 event)591 static void dpu_crtc_frame_event_cb(void *data, u32 event)
592 {
593 struct drm_crtc *crtc = (struct drm_crtc *)data;
594 struct dpu_crtc *dpu_crtc;
595 struct msm_drm_private *priv;
596 struct dpu_crtc_frame_event *fevent;
597 unsigned long flags;
598 u32 crtc_id;
599
600 /* Nothing to do on idle event */
601 if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
602 return;
603
604 dpu_crtc = to_dpu_crtc(crtc);
605 priv = crtc->dev->dev_private;
606 crtc_id = drm_crtc_index(crtc);
607
608 trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
609
610 spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
611 fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
612 struct dpu_crtc_frame_event, list);
613 if (fevent)
614 list_del_init(&fevent->list);
615 spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
616
617 if (!fevent) {
618 DRM_ERROR_RATELIMITED("crtc%d event %d overflow\n", crtc->base.id, event);
619 return;
620 }
621
622 fevent->event = event;
623 fevent->crtc = crtc;
624 fevent->ts = ktime_get();
625 kthread_queue_work(priv->event_thread[crtc_id].worker, &fevent->work);
626 }
627
dpu_crtc_complete_commit(struct drm_crtc * crtc)628 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
629 {
630 trace_dpu_crtc_complete_commit(DRMID(crtc));
631 dpu_core_perf_crtc_update(crtc, 0, false);
632 _dpu_crtc_complete_flip(crtc);
633 }
634
_dpu_crtc_setup_lm_bounds(struct drm_crtc * crtc,struct drm_crtc_state * state)635 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
636 struct drm_crtc_state *state)
637 {
638 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
639 struct drm_display_mode *adj_mode = &state->adjusted_mode;
640 u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
641 int i;
642
643 for (i = 0; i < cstate->num_mixers; i++) {
644 struct drm_rect *r = &cstate->lm_bounds[i];
645 r->x1 = crtc_split_width * i;
646 r->y1 = 0;
647 r->x2 = r->x1 + crtc_split_width;
648 r->y2 = adj_mode->vdisplay;
649
650 trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
651 }
652 }
653
_dpu_crtc_get_pcc_coeff(struct drm_crtc_state * state,struct dpu_hw_pcc_cfg * cfg)654 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
655 struct dpu_hw_pcc_cfg *cfg)
656 {
657 struct drm_color_ctm *ctm;
658
659 memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
660
661 ctm = (struct drm_color_ctm *)state->ctm->data;
662
663 if (!ctm)
664 return;
665
666 cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
667 cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
668 cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
669
670 cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
671 cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
672 cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
673
674 cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
675 cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
676 cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
677 }
678
_dpu_crtc_setup_cp_blocks(struct drm_crtc * crtc)679 static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
680 {
681 struct drm_crtc_state *state = crtc->state;
682 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
683 struct dpu_crtc_mixer *mixer = cstate->mixers;
684 struct dpu_hw_pcc_cfg cfg;
685 struct dpu_hw_ctl *ctl;
686 struct dpu_hw_dspp *dspp;
687 int i;
688
689
690 if (!state->color_mgmt_changed)
691 return;
692
693 for (i = 0; i < cstate->num_mixers; i++) {
694 ctl = mixer[i].lm_ctl;
695 dspp = mixer[i].hw_dspp;
696
697 if (!dspp || !dspp->ops.setup_pcc)
698 continue;
699
700 if (!state->ctm) {
701 dspp->ops.setup_pcc(dspp, NULL);
702 } else {
703 _dpu_crtc_get_pcc_coeff(state, &cfg);
704 dspp->ops.setup_pcc(dspp, &cfg);
705 }
706
707 mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
708 mixer[i].hw_dspp->idx);
709
710 /* stage config flush mask */
711 ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
712
713 DRM_DEBUG_ATOMIC("lm %d, ctl %d, flush mask 0x%x\n",
714 mixer[i].hw_lm->idx - DSPP_0,
715 ctl->idx - CTL_0,
716 mixer[i].flush_mask);
717 }
718 }
719
dpu_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)720 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
721 struct drm_atomic_state *state)
722 {
723 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
724 struct drm_encoder *encoder;
725
726 if (!crtc->state->enable) {
727 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_begin\n",
728 crtc->base.id, crtc->state->enable);
729 return;
730 }
731
732 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
733
734 _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
735
736 /* encoder will trigger pending mask now */
737 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
738 dpu_encoder_trigger_kickoff_pending(encoder);
739
740 /*
741 * If no mixers have been allocated in dpu_crtc_atomic_check(),
742 * it means we are trying to flush a CRTC whose state is disabled:
743 * nothing else needs to be done.
744 */
745 if (unlikely(!cstate->num_mixers))
746 return;
747
748 _dpu_crtc_blend_setup(crtc);
749
750 _dpu_crtc_setup_cp_blocks(crtc);
751
752 /*
753 * PP_DONE irq is only used by command mode for now.
754 * It is better to request pending before FLUSH and START trigger
755 * to make sure no pp_done irq missed.
756 * This is safe because no pp_done will happen before SW trigger
757 * in command mode.
758 */
759 }
760
dpu_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)761 static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
762 struct drm_atomic_state *state)
763 {
764 struct dpu_crtc *dpu_crtc;
765 struct drm_device *dev;
766 struct drm_plane *plane;
767 struct msm_drm_private *priv;
768 unsigned long flags;
769 struct dpu_crtc_state *cstate;
770
771 if (!crtc->state->enable) {
772 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, skip atomic_flush\n",
773 crtc->base.id, crtc->state->enable);
774 return;
775 }
776
777 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
778
779 dpu_crtc = to_dpu_crtc(crtc);
780 cstate = to_dpu_crtc_state(crtc->state);
781 dev = crtc->dev;
782 priv = dev->dev_private;
783
784 if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
785 DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
786 return;
787 }
788
789 WARN_ON(dpu_crtc->event);
790 spin_lock_irqsave(&dev->event_lock, flags);
791 dpu_crtc->event = crtc->state->event;
792 crtc->state->event = NULL;
793 spin_unlock_irqrestore(&dev->event_lock, flags);
794
795 /*
796 * If no mixers has been allocated in dpu_crtc_atomic_check(),
797 * it means we are trying to flush a CRTC whose state is disabled:
798 * nothing else needs to be done.
799 */
800 if (unlikely(!cstate->num_mixers))
801 return;
802
803 /* update performance setting before crtc kickoff */
804 dpu_core_perf_crtc_update(crtc, 1, false);
805
806 /*
807 * Final plane updates: Give each plane a chance to complete all
808 * required writes/flushing before crtc's "flush
809 * everything" call below.
810 */
811 drm_atomic_crtc_for_each_plane(plane, crtc) {
812 if (dpu_crtc->smmu_state.transition_error)
813 dpu_plane_set_error(plane, true);
814 dpu_plane_flush(plane);
815 }
816
817 /* Kickoff will be scheduled by outer layer */
818 }
819
820 /**
821 * dpu_crtc_destroy_state - state destroy hook
822 * @crtc: drm CRTC
823 * @state: CRTC state object to release
824 */
dpu_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)825 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
826 struct drm_crtc_state *state)
827 {
828 struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
829
830 DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
831
832 __drm_atomic_helper_crtc_destroy_state(state);
833
834 kfree(cstate);
835 }
836
_dpu_crtc_wait_for_frame_done(struct drm_crtc * crtc)837 static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
838 {
839 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
840 int ret, rc = 0;
841
842 if (!atomic_read(&dpu_crtc->frame_pending)) {
843 DRM_DEBUG_ATOMIC("no frames pending\n");
844 return 0;
845 }
846
847 DPU_ATRACE_BEGIN("frame done completion wait");
848 ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
849 msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
850 if (!ret) {
851 DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
852 rc = -ETIMEDOUT;
853 }
854 DPU_ATRACE_END("frame done completion wait");
855
856 return rc;
857 }
858
dpu_crtc_commit_kickoff(struct drm_crtc * crtc)859 void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
860 {
861 struct drm_encoder *encoder;
862 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
863 struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
864 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
865
866 /*
867 * If no mixers has been allocated in dpu_crtc_atomic_check(),
868 * it means we are trying to start a CRTC whose state is disabled:
869 * nothing else needs to be done.
870 */
871 if (unlikely(!cstate->num_mixers))
872 return;
873
874 DPU_ATRACE_BEGIN("crtc_commit");
875
876 drm_for_each_encoder_mask(encoder, crtc->dev,
877 crtc->state->encoder_mask) {
878 if (!dpu_encoder_is_valid_for_commit(encoder)) {
879 DRM_DEBUG_ATOMIC("invalid FB not kicking off crtc\n");
880 goto end;
881 }
882 }
883 /*
884 * Encoder will flush/start now, unless it has a tx pending. If so, it
885 * may delay and flush at an irq event (e.g. ppdone)
886 */
887 drm_for_each_encoder_mask(encoder, crtc->dev,
888 crtc->state->encoder_mask)
889 dpu_encoder_prepare_for_kickoff(encoder);
890
891 if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
892 /* acquire bandwidth and other resources */
893 DRM_DEBUG_ATOMIC("crtc%d first commit\n", crtc->base.id);
894 } else
895 DRM_DEBUG_ATOMIC("crtc%d commit\n", crtc->base.id);
896
897 dpu_crtc->play_count++;
898
899 dpu_vbif_clear_errors(dpu_kms);
900
901 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
902 dpu_encoder_kickoff(encoder);
903
904 reinit_completion(&dpu_crtc->frame_done_comp);
905
906 end:
907 DPU_ATRACE_END("crtc_commit");
908 }
909
dpu_crtc_reset(struct drm_crtc * crtc)910 static void dpu_crtc_reset(struct drm_crtc *crtc)
911 {
912 struct dpu_crtc_state *cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
913
914 if (crtc->state)
915 dpu_crtc_destroy_state(crtc, crtc->state);
916
917 __drm_atomic_helper_crtc_reset(crtc, &cstate->base);
918 }
919
920 /**
921 * dpu_crtc_duplicate_state - state duplicate hook
922 * @crtc: Pointer to drm crtc structure
923 */
dpu_crtc_duplicate_state(struct drm_crtc * crtc)924 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
925 {
926 struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
927
928 cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
929 if (!cstate) {
930 DPU_ERROR("failed to allocate state\n");
931 return NULL;
932 }
933
934 /* duplicate base helper */
935 __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
936
937 return &cstate->base;
938 }
939
dpu_crtc_atomic_print_state(struct drm_printer * p,const struct drm_crtc_state * state)940 static void dpu_crtc_atomic_print_state(struct drm_printer *p,
941 const struct drm_crtc_state *state)
942 {
943 const struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
944 int i;
945
946 for (i = 0; i < cstate->num_mixers; i++) {
947 drm_printf(p, "\tlm[%d]=%d\n", i, cstate->mixers[i].hw_lm->idx - LM_0);
948 drm_printf(p, "\tctl[%d]=%d\n", i, cstate->mixers[i].lm_ctl->idx - CTL_0);
949 if (cstate->mixers[i].hw_dspp)
950 drm_printf(p, "\tdspp[%d]=%d\n", i, cstate->mixers[i].hw_dspp->idx - DSPP_0);
951 }
952 }
953
dpu_crtc_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)954 static void dpu_crtc_disable(struct drm_crtc *crtc,
955 struct drm_atomic_state *state)
956 {
957 struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
958 crtc);
959 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
960 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
961 struct drm_encoder *encoder;
962 unsigned long flags;
963 bool release_bandwidth = false;
964
965 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
966
967 /* Disable/save vblank irq handling */
968 drm_crtc_vblank_off(crtc);
969
970 drm_for_each_encoder_mask(encoder, crtc->dev,
971 old_crtc_state->encoder_mask) {
972 /* in video mode, we hold an extra bandwidth reference
973 * as we cannot drop bandwidth at frame-done if any
974 * crtc is being used in video mode.
975 */
976 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
977 release_bandwidth = true;
978 dpu_encoder_assign_crtc(encoder, NULL);
979 }
980
981 /* wait for frame_event_done completion */
982 if (_dpu_crtc_wait_for_frame_done(crtc))
983 DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
984 crtc->base.id,
985 atomic_read(&dpu_crtc->frame_pending));
986
987 trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
988 dpu_crtc->enabled = false;
989
990 if (atomic_read(&dpu_crtc->frame_pending)) {
991 trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
992 atomic_read(&dpu_crtc->frame_pending));
993 if (release_bandwidth)
994 dpu_core_perf_crtc_release_bw(crtc);
995 atomic_set(&dpu_crtc->frame_pending, 0);
996 }
997
998 dpu_core_perf_crtc_update(crtc, 0, true);
999
1000 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1001 dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
1002
1003 memset(cstate->mixers, 0, sizeof(cstate->mixers));
1004 cstate->num_mixers = 0;
1005
1006 /* disable clk & bw control until clk & bw properties are set */
1007 cstate->bw_control = false;
1008 cstate->bw_split_vote = false;
1009
1010 if (crtc->state->event && !crtc->state->active) {
1011 spin_lock_irqsave(&crtc->dev->event_lock, flags);
1012 drm_crtc_send_vblank_event(crtc, crtc->state->event);
1013 crtc->state->event = NULL;
1014 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
1015 }
1016
1017 pm_runtime_put_sync(crtc->dev->dev);
1018 }
1019
dpu_crtc_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)1020 static void dpu_crtc_enable(struct drm_crtc *crtc,
1021 struct drm_atomic_state *state)
1022 {
1023 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1024 struct drm_encoder *encoder;
1025 bool request_bandwidth = false;
1026
1027 pm_runtime_get_sync(crtc->dev->dev);
1028
1029 DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
1030
1031 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
1032 /* in video mode, we hold an extra bandwidth reference
1033 * as we cannot drop bandwidth at frame-done if any
1034 * crtc is being used in video mode.
1035 */
1036 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_VIDEO)
1037 request_bandwidth = true;
1038 dpu_encoder_register_frame_event_callback(encoder,
1039 dpu_crtc_frame_event_cb, (void *)crtc);
1040 }
1041
1042 if (request_bandwidth)
1043 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1044
1045 trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
1046 dpu_crtc->enabled = true;
1047
1048 drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
1049 dpu_encoder_assign_crtc(encoder, crtc);
1050
1051 /* Enable/restore vblank irq handling */
1052 drm_crtc_vblank_on(crtc);
1053 }
1054
1055 struct plane_state {
1056 struct dpu_plane_state *dpu_pstate;
1057 const struct drm_plane_state *drm_pstate;
1058 int stage;
1059 u32 pipe_id;
1060 };
1061
dpu_crtc_needs_dirtyfb(struct drm_crtc_state * cstate)1062 static bool dpu_crtc_needs_dirtyfb(struct drm_crtc_state *cstate)
1063 {
1064 struct drm_crtc *crtc = cstate->crtc;
1065 struct drm_encoder *encoder;
1066
1067 drm_for_each_encoder_mask (encoder, crtc->dev, cstate->encoder_mask) {
1068 if (dpu_encoder_get_intf_mode(encoder) == INTF_MODE_CMD) {
1069 return true;
1070 }
1071 }
1072
1073 return false;
1074 }
1075
dpu_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)1076 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
1077 struct drm_atomic_state *state)
1078 {
1079 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
1080 crtc);
1081 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1082 struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
1083 struct plane_state *pstates;
1084
1085 const struct drm_plane_state *pstate;
1086 struct drm_plane *plane;
1087 struct drm_display_mode *mode;
1088
1089 int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
1090
1091 struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
1092 int multirect_count = 0;
1093 const struct drm_plane_state *pipe_staged[SSPP_MAX];
1094 int left_zpos_cnt = 0, right_zpos_cnt = 0;
1095 struct drm_rect crtc_rect = { 0 };
1096 bool needs_dirtyfb = dpu_crtc_needs_dirtyfb(crtc_state);
1097
1098 pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
1099
1100 if (!crtc_state->enable || !crtc_state->active) {
1101 DRM_DEBUG_ATOMIC("crtc%d -> enable %d, active %d, skip atomic_check\n",
1102 crtc->base.id, crtc_state->enable,
1103 crtc_state->active);
1104 memset(&cstate->new_perf, 0, sizeof(cstate->new_perf));
1105 goto end;
1106 }
1107
1108 mode = &crtc_state->adjusted_mode;
1109 DRM_DEBUG_ATOMIC("%s: check\n", dpu_crtc->name);
1110
1111 /* force a full mode set if active state changed */
1112 if (crtc_state->active_changed)
1113 crtc_state->mode_changed = true;
1114
1115 memset(pipe_staged, 0, sizeof(pipe_staged));
1116
1117 if (cstate->num_mixers) {
1118 mixer_width = mode->hdisplay / cstate->num_mixers;
1119
1120 _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
1121 }
1122
1123 crtc_rect.x2 = mode->hdisplay;
1124 crtc_rect.y2 = mode->vdisplay;
1125
1126 /* get plane state for all drm planes associated with crtc state */
1127 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
1128 struct dpu_plane_state *dpu_pstate = to_dpu_plane_state(pstate);
1129 struct drm_rect dst, clip = crtc_rect;
1130
1131 if (IS_ERR_OR_NULL(pstate)) {
1132 rc = PTR_ERR(pstate);
1133 DPU_ERROR("%s: failed to get plane%d state, %d\n",
1134 dpu_crtc->name, plane->base.id, rc);
1135 goto end;
1136 }
1137 if (cnt >= DPU_STAGE_MAX * 4)
1138 continue;
1139
1140 if (!pstate->visible)
1141 continue;
1142
1143 pstates[cnt].dpu_pstate = dpu_pstate;
1144 pstates[cnt].drm_pstate = pstate;
1145 pstates[cnt].stage = pstate->normalized_zpos;
1146 pstates[cnt].pipe_id = dpu_plane_pipe(plane);
1147
1148 dpu_pstate->needs_dirtyfb = needs_dirtyfb;
1149
1150 if (pipe_staged[pstates[cnt].pipe_id]) {
1151 multirect_plane[multirect_count].r0 =
1152 pipe_staged[pstates[cnt].pipe_id];
1153 multirect_plane[multirect_count].r1 = pstate;
1154 multirect_count++;
1155
1156 pipe_staged[pstates[cnt].pipe_id] = NULL;
1157 } else {
1158 pipe_staged[pstates[cnt].pipe_id] = pstate;
1159 }
1160
1161 cnt++;
1162
1163 dst = drm_plane_state_dest(pstate);
1164 if (!drm_rect_intersect(&clip, &dst)) {
1165 DPU_ERROR("invalid vertical/horizontal destination\n");
1166 DPU_ERROR("display: " DRM_RECT_FMT " plane: "
1167 DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
1168 DRM_RECT_ARG(&dst));
1169 rc = -E2BIG;
1170 goto end;
1171 }
1172 }
1173
1174 for (i = 1; i < SSPP_MAX; i++) {
1175 if (pipe_staged[i]) {
1176 dpu_plane_clear_multirect(pipe_staged[i]);
1177
1178 if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
1179 DPU_ERROR(
1180 "r1 only virt plane:%d not supported\n",
1181 pipe_staged[i]->plane->base.id);
1182 rc = -EINVAL;
1183 goto end;
1184 }
1185 }
1186 }
1187
1188 z_pos = -1;
1189 for (i = 0; i < cnt; i++) {
1190 /* reset counts at every new blend stage */
1191 if (pstates[i].stage != z_pos) {
1192 left_zpos_cnt = 0;
1193 right_zpos_cnt = 0;
1194 z_pos = pstates[i].stage;
1195 }
1196
1197 /* verify z_pos setting before using it */
1198 if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
1199 DPU_ERROR("> %d plane stages assigned\n",
1200 DPU_STAGE_MAX - DPU_STAGE_0);
1201 rc = -EINVAL;
1202 goto end;
1203 } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
1204 if (left_zpos_cnt == 2) {
1205 DPU_ERROR("> 2 planes @ stage %d on left\n",
1206 z_pos);
1207 rc = -EINVAL;
1208 goto end;
1209 }
1210 left_zpos_cnt++;
1211
1212 } else {
1213 if (right_zpos_cnt == 2) {
1214 DPU_ERROR("> 2 planes @ stage %d on right\n",
1215 z_pos);
1216 rc = -EINVAL;
1217 goto end;
1218 }
1219 right_zpos_cnt++;
1220 }
1221
1222 pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
1223 DRM_DEBUG_ATOMIC("%s: zpos %d\n", dpu_crtc->name, z_pos);
1224 }
1225
1226 for (i = 0; i < multirect_count; i++) {
1227 if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
1228 DPU_ERROR(
1229 "multirect validation failed for planes (%d - %d)\n",
1230 multirect_plane[i].r0->plane->base.id,
1231 multirect_plane[i].r1->plane->base.id);
1232 rc = -EINVAL;
1233 goto end;
1234 }
1235 }
1236
1237 atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
1238
1239 rc = dpu_core_perf_crtc_check(crtc, crtc_state);
1240 if (rc) {
1241 DPU_ERROR("crtc%d failed performance check %d\n",
1242 crtc->base.id, rc);
1243 goto end;
1244 }
1245
1246 /* validate source split:
1247 * use pstates sorted by stage to check planes on same stage
1248 * we assume that all pipes are in source split so its valid to compare
1249 * without taking into account left/right mixer placement
1250 */
1251 for (i = 1; i < cnt; i++) {
1252 struct plane_state *prv_pstate, *cur_pstate;
1253 struct drm_rect left_rect, right_rect;
1254 int32_t left_pid, right_pid;
1255 int32_t stage;
1256
1257 prv_pstate = &pstates[i - 1];
1258 cur_pstate = &pstates[i];
1259 if (prv_pstate->stage != cur_pstate->stage)
1260 continue;
1261
1262 stage = cur_pstate->stage;
1263
1264 left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
1265 left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
1266
1267 right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
1268 right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
1269
1270 if (right_rect.x1 < left_rect.x1) {
1271 swap(left_pid, right_pid);
1272 swap(left_rect, right_rect);
1273 }
1274
1275 /**
1276 * - planes are enumerated in pipe-priority order such that
1277 * planes with lower drm_id must be left-most in a shared
1278 * blend-stage when using source split.
1279 * - planes in source split must be contiguous in width
1280 * - planes in source split must have same dest yoff and height
1281 */
1282 if (right_pid < left_pid) {
1283 DPU_ERROR(
1284 "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
1285 stage, left_pid, right_pid);
1286 rc = -EINVAL;
1287 goto end;
1288 } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
1289 DPU_ERROR("non-contiguous coordinates for src split. "
1290 "stage: %d left: " DRM_RECT_FMT " right: "
1291 DRM_RECT_FMT "\n", stage,
1292 DRM_RECT_ARG(&left_rect),
1293 DRM_RECT_ARG(&right_rect));
1294 rc = -EINVAL;
1295 goto end;
1296 } else if (left_rect.y1 != right_rect.y1 ||
1297 drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
1298 DPU_ERROR("source split at stage: %d. invalid "
1299 "yoff/height: left: " DRM_RECT_FMT " right: "
1300 DRM_RECT_FMT "\n", stage,
1301 DRM_RECT_ARG(&left_rect),
1302 DRM_RECT_ARG(&right_rect));
1303 rc = -EINVAL;
1304 goto end;
1305 }
1306 }
1307
1308 end:
1309 kfree(pstates);
1310 return rc;
1311 }
1312
dpu_crtc_vblank(struct drm_crtc * crtc,bool en)1313 int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
1314 {
1315 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1316 struct drm_encoder *enc;
1317
1318 trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
1319
1320 /*
1321 * Normally we would iterate through encoder_mask in crtc state to find
1322 * attached encoders. In this case, we might be disabling vblank _after_
1323 * encoder_mask has been cleared.
1324 *
1325 * Instead, we "assign" a crtc to the encoder in enable and clear it in
1326 * disable (which is also after encoder_mask is cleared). So instead of
1327 * using encoder mask, we'll ask the encoder to toggle itself iff it's
1328 * currently assigned to our crtc.
1329 *
1330 * Note also that this function cannot be called while crtc is disabled
1331 * since we use drm_crtc_vblank_on/off. So we don't need to worry
1332 * about the assigned crtcs being inconsistent with the current state
1333 * (which means no need to worry about modeset locks).
1334 */
1335 list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
1336 trace_dpu_crtc_vblank_enable(DRMID(crtc), DRMID(enc), en,
1337 dpu_crtc);
1338
1339 dpu_encoder_toggle_vblank_for_crtc(enc, crtc, en);
1340 }
1341
1342 return 0;
1343 }
1344
1345 #ifdef CONFIG_DEBUG_FS
_dpu_debugfs_status_show(struct seq_file * s,void * data)1346 static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
1347 {
1348 struct dpu_crtc *dpu_crtc;
1349 struct dpu_plane_state *pstate = NULL;
1350 struct dpu_crtc_mixer *m;
1351
1352 struct drm_crtc *crtc;
1353 struct drm_plane *plane;
1354 struct drm_display_mode *mode;
1355 struct drm_framebuffer *fb;
1356 struct drm_plane_state *state;
1357 struct dpu_crtc_state *cstate;
1358
1359 int i, out_width;
1360
1361 dpu_crtc = s->private;
1362 crtc = &dpu_crtc->base;
1363
1364 drm_modeset_lock_all(crtc->dev);
1365 cstate = to_dpu_crtc_state(crtc->state);
1366
1367 mode = &crtc->state->adjusted_mode;
1368 out_width = mode->hdisplay / cstate->num_mixers;
1369
1370 seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
1371 mode->hdisplay, mode->vdisplay);
1372
1373 seq_puts(s, "\n");
1374
1375 for (i = 0; i < cstate->num_mixers; ++i) {
1376 m = &cstate->mixers[i];
1377 seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
1378 m->hw_lm->idx - LM_0, m->lm_ctl->idx - CTL_0,
1379 out_width, mode->vdisplay);
1380 }
1381
1382 seq_puts(s, "\n");
1383
1384 drm_atomic_crtc_for_each_plane(plane, crtc) {
1385 pstate = to_dpu_plane_state(plane->state);
1386 state = plane->state;
1387
1388 if (!pstate || !state)
1389 continue;
1390
1391 seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
1392 pstate->stage);
1393
1394 if (plane->state->fb) {
1395 fb = plane->state->fb;
1396
1397 seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
1398 fb->base.id, (char *) &fb->format->format,
1399 fb->width, fb->height);
1400 for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
1401 seq_printf(s, "cpp[%d]:%u ",
1402 i, fb->format->cpp[i]);
1403 seq_puts(s, "\n\t");
1404
1405 seq_printf(s, "modifier:%8llu ", fb->modifier);
1406 seq_puts(s, "\n");
1407
1408 seq_puts(s, "\t");
1409 for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
1410 seq_printf(s, "pitches[%d]:%8u ", i,
1411 fb->pitches[i]);
1412 seq_puts(s, "\n");
1413
1414 seq_puts(s, "\t");
1415 for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
1416 seq_printf(s, "offsets[%d]:%8u ", i,
1417 fb->offsets[i]);
1418 seq_puts(s, "\n");
1419 }
1420
1421 seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
1422 state->src_x, state->src_y, state->src_w, state->src_h);
1423
1424 seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
1425 state->crtc_x, state->crtc_y, state->crtc_w,
1426 state->crtc_h);
1427 seq_printf(s, "\tmultirect: mode: %d index: %d\n",
1428 pstate->multirect_mode, pstate->multirect_index);
1429
1430 seq_puts(s, "\n");
1431 }
1432 if (dpu_crtc->vblank_cb_count) {
1433 ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
1434 s64 diff_ms = ktime_to_ms(diff);
1435 s64 fps = diff_ms ? div_s64(
1436 dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
1437
1438 seq_printf(s,
1439 "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
1440 fps, dpu_crtc->vblank_cb_count,
1441 ktime_to_ms(diff), dpu_crtc->play_count);
1442
1443 /* reset time & count for next measurement */
1444 dpu_crtc->vblank_cb_count = 0;
1445 dpu_crtc->vblank_cb_time = ktime_set(0, 0);
1446 }
1447
1448 drm_modeset_unlock_all(crtc->dev);
1449
1450 return 0;
1451 }
1452
1453 DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
1454
dpu_crtc_debugfs_state_show(struct seq_file * s,void * v)1455 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
1456 {
1457 struct drm_crtc *crtc = (struct drm_crtc *) s->private;
1458 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1459
1460 seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
1461 seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
1462 seq_printf(s, "core_clk_rate: %llu\n",
1463 dpu_crtc->cur_perf.core_clk_rate);
1464 seq_printf(s, "bw_ctl: %llu\n", dpu_crtc->cur_perf.bw_ctl);
1465 seq_printf(s, "max_per_pipe_ib: %llu\n",
1466 dpu_crtc->cur_perf.max_per_pipe_ib);
1467
1468 return 0;
1469 }
1470 DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
1471
_dpu_crtc_init_debugfs(struct drm_crtc * crtc)1472 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1473 {
1474 struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
1475 struct dentry *debugfs_root;
1476
1477 debugfs_root = debugfs_create_dir(dpu_crtc->name,
1478 crtc->dev->primary->debugfs_root);
1479
1480 debugfs_create_file("status", 0400,
1481 debugfs_root,
1482 dpu_crtc, &_dpu_debugfs_status_fops);
1483 debugfs_create_file("state", 0600,
1484 debugfs_root,
1485 &dpu_crtc->base,
1486 &dpu_crtc_debugfs_state_fops);
1487
1488 return 0;
1489 }
1490 #else
_dpu_crtc_init_debugfs(struct drm_crtc * crtc)1491 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
1492 {
1493 return 0;
1494 }
1495 #endif /* CONFIG_DEBUG_FS */
1496
dpu_crtc_late_register(struct drm_crtc * crtc)1497 static int dpu_crtc_late_register(struct drm_crtc *crtc)
1498 {
1499 return _dpu_crtc_init_debugfs(crtc);
1500 }
1501
1502 static const struct drm_crtc_funcs dpu_crtc_funcs = {
1503 .set_config = drm_atomic_helper_set_config,
1504 .destroy = dpu_crtc_destroy,
1505 .page_flip = drm_atomic_helper_page_flip,
1506 .reset = dpu_crtc_reset,
1507 .atomic_duplicate_state = dpu_crtc_duplicate_state,
1508 .atomic_destroy_state = dpu_crtc_destroy_state,
1509 .atomic_print_state = dpu_crtc_atomic_print_state,
1510 .late_register = dpu_crtc_late_register,
1511 .verify_crc_source = dpu_crtc_verify_crc_source,
1512 .set_crc_source = dpu_crtc_set_crc_source,
1513 .enable_vblank = msm_crtc_enable_vblank,
1514 .disable_vblank = msm_crtc_disable_vblank,
1515 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
1516 .get_vblank_counter = dpu_crtc_get_vblank_counter,
1517 };
1518
1519 static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
1520 .atomic_disable = dpu_crtc_disable,
1521 .atomic_enable = dpu_crtc_enable,
1522 .atomic_check = dpu_crtc_atomic_check,
1523 .atomic_begin = dpu_crtc_atomic_begin,
1524 .atomic_flush = dpu_crtc_atomic_flush,
1525 .get_scanout_position = dpu_crtc_get_scanout_position,
1526 };
1527
1528 /* initialize crtc */
dpu_crtc_init(struct drm_device * dev,struct drm_plane * plane,struct drm_plane * cursor)1529 struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
1530 struct drm_plane *cursor)
1531 {
1532 struct drm_crtc *crtc = NULL;
1533 struct dpu_crtc *dpu_crtc = NULL;
1534 int i;
1535
1536 dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
1537 if (!dpu_crtc)
1538 return ERR_PTR(-ENOMEM);
1539
1540 crtc = &dpu_crtc->base;
1541 crtc->dev = dev;
1542
1543 spin_lock_init(&dpu_crtc->spin_lock);
1544 atomic_set(&dpu_crtc->frame_pending, 0);
1545
1546 init_completion(&dpu_crtc->frame_done_comp);
1547
1548 INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
1549
1550 for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
1551 INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
1552 list_add(&dpu_crtc->frame_events[i].list,
1553 &dpu_crtc->frame_event_list);
1554 kthread_init_work(&dpu_crtc->frame_events[i].work,
1555 dpu_crtc_frame_event_work);
1556 }
1557
1558 drm_crtc_init_with_planes(dev, crtc, plane, cursor, &dpu_crtc_funcs,
1559 NULL);
1560
1561 drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
1562
1563 drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
1564
1565 /* save user friendly CRTC name for later */
1566 snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
1567
1568 /* initialize event handling */
1569 spin_lock_init(&dpu_crtc->event_lock);
1570
1571 DRM_DEBUG_KMS("%s: successfully initialized crtc\n", dpu_crtc->name);
1572 return crtc;
1573 }
1574