1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
4 * Copyright (C) 2013 Red Hat
5 * Author: Rob Clark <robdclark@gmail.com>
6 */
7
8 #include <drm/drm_atomic.h>
9 #include <drm/drm_blend.h>
10 #include <drm/drm_damage_helper.h>
11 #include <drm/drm_fourcc.h>
12 #include <drm/drm_framebuffer.h>
13 #include <drm/drm_gem_atomic_helper.h>
14 #include <drm/drm_print.h>
15
16 #include "mdp5_kms.h"
17
18 struct mdp5_plane {
19 struct drm_plane base;
20
21 uint32_t nformats;
22 uint32_t formats[32];
23 };
24 #define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
25
26 static int mdp5_plane_mode_set(struct drm_plane *plane,
27 struct drm_crtc *crtc, struct drm_framebuffer *fb,
28 struct drm_rect *src, struct drm_rect *dest);
29
get_kms(struct drm_plane * plane)30 static struct mdp5_kms *get_kms(struct drm_plane *plane)
31 {
32 struct msm_drm_private *priv = plane->dev->dev_private;
33 return to_mdp5_kms(to_mdp_kms(priv->kms));
34 }
35
plane_enabled(struct drm_plane_state * state)36 static bool plane_enabled(struct drm_plane_state *state)
37 {
38 return state->visible;
39 }
40
mdp5_plane_destroy(struct drm_plane * plane)41 static void mdp5_plane_destroy(struct drm_plane *plane)
42 {
43 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
44
45 drm_plane_cleanup(plane);
46
47 kfree(mdp5_plane);
48 }
49
50 /* helper to install properties which are common to planes and crtcs */
mdp5_plane_install_properties(struct drm_plane * plane,struct drm_mode_object * obj)51 static void mdp5_plane_install_properties(struct drm_plane *plane,
52 struct drm_mode_object *obj)
53 {
54 unsigned int zpos;
55
56 drm_plane_create_rotation_property(plane,
57 DRM_MODE_ROTATE_0,
58 DRM_MODE_ROTATE_0 |
59 DRM_MODE_ROTATE_180 |
60 DRM_MODE_REFLECT_X |
61 DRM_MODE_REFLECT_Y);
62 drm_plane_create_alpha_property(plane);
63 drm_plane_create_blend_mode_property(plane,
64 BIT(DRM_MODE_BLEND_PIXEL_NONE) |
65 BIT(DRM_MODE_BLEND_PREMULTI) |
66 BIT(DRM_MODE_BLEND_COVERAGE));
67
68 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
69 zpos = STAGE_BASE;
70 else
71 zpos = STAGE0 + drm_plane_index(plane);
72 drm_plane_create_zpos_property(plane, zpos, 1, 255);
73 }
74
75 static void
mdp5_plane_atomic_print_state(struct drm_printer * p,const struct drm_plane_state * state)76 mdp5_plane_atomic_print_state(struct drm_printer *p,
77 const struct drm_plane_state *state)
78 {
79 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
80 struct mdp5_kms *mdp5_kms = get_kms(state->plane);
81
82 drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
83 pstate->hwpipe->name : "(null)");
84 if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
85 drm_printf(p, "\tright-hwpipe=%s\n",
86 pstate->r_hwpipe ? pstate->r_hwpipe->name :
87 "(null)");
88 drm_printf(p, "\tblend_mode=%u\n", pstate->base.pixel_blend_mode);
89 drm_printf(p, "\tzpos=%u\n", pstate->base.zpos);
90 drm_printf(p, "\tnormalized_zpos=%u\n", pstate->base.normalized_zpos);
91 drm_printf(p, "\talpha=%u\n", pstate->base.alpha);
92 drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
93 }
94
mdp5_plane_reset(struct drm_plane * plane)95 static void mdp5_plane_reset(struct drm_plane *plane)
96 {
97 struct mdp5_plane_state *mdp5_state;
98
99 if (plane->state)
100 __drm_atomic_helper_plane_destroy_state(plane->state);
101
102 kfree(to_mdp5_plane_state(plane->state));
103 plane->state = NULL;
104 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
105 if (!mdp5_state)
106 return;
107 __drm_atomic_helper_plane_reset(plane, &mdp5_state->base);
108 }
109
110 static struct drm_plane_state *
mdp5_plane_duplicate_state(struct drm_plane * plane)111 mdp5_plane_duplicate_state(struct drm_plane *plane)
112 {
113 struct mdp5_plane_state *mdp5_state;
114
115 if (WARN_ON(!plane->state))
116 return NULL;
117
118 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
119 sizeof(*mdp5_state), GFP_KERNEL);
120 if (!mdp5_state)
121 return NULL;
122
123 __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base);
124
125 return &mdp5_state->base;
126 }
127
mdp5_plane_destroy_state(struct drm_plane * plane,struct drm_plane_state * state)128 static void mdp5_plane_destroy_state(struct drm_plane *plane,
129 struct drm_plane_state *state)
130 {
131 struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
132
133 __drm_atomic_helper_plane_destroy_state(state);
134
135 kfree(pstate);
136 }
137
138 static const struct drm_plane_funcs mdp5_plane_funcs = {
139 .update_plane = drm_atomic_helper_update_plane,
140 .disable_plane = drm_atomic_helper_disable_plane,
141 .destroy = mdp5_plane_destroy,
142 .reset = mdp5_plane_reset,
143 .atomic_duplicate_state = mdp5_plane_duplicate_state,
144 .atomic_destroy_state = mdp5_plane_destroy_state,
145 .atomic_print_state = mdp5_plane_atomic_print_state,
146 };
147
mdp5_plane_prepare_fb(struct drm_plane * plane,struct drm_plane_state * new_state)148 static int mdp5_plane_prepare_fb(struct drm_plane *plane,
149 struct drm_plane_state *new_state)
150 {
151 struct msm_drm_private *priv = plane->dev->dev_private;
152 struct msm_kms *kms = priv->kms;
153 bool needs_dirtyfb = to_mdp5_plane_state(new_state)->needs_dirtyfb;
154
155 if (!new_state->fb)
156 return 0;
157
158 drm_gem_plane_helper_prepare_fb(plane, new_state);
159
160 return msm_framebuffer_prepare(new_state->fb, kms->aspace, needs_dirtyfb);
161 }
162
mdp5_plane_cleanup_fb(struct drm_plane * plane,struct drm_plane_state * old_state)163 static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
164 struct drm_plane_state *old_state)
165 {
166 struct mdp5_kms *mdp5_kms = get_kms(plane);
167 struct msm_kms *kms = &mdp5_kms->base.base;
168 struct drm_framebuffer *fb = old_state->fb;
169 bool needed_dirtyfb = to_mdp5_plane_state(old_state)->needs_dirtyfb;
170
171 if (!fb)
172 return;
173
174 DBG("%s: cleanup: FB[%u]", plane->name, fb->base.id);
175 msm_framebuffer_cleanup(fb, kms->aspace, needed_dirtyfb);
176 }
177
mdp5_plane_atomic_check_with_state(struct drm_crtc_state * crtc_state,struct drm_plane_state * state)178 static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
179 struct drm_plane_state *state)
180 {
181 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
182 struct drm_plane *plane = state->plane;
183 struct drm_plane_state *old_state = plane->state;
184 struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
185 bool new_hwpipe = false;
186 bool need_right_hwpipe = false;
187 uint32_t max_width, max_height;
188 bool out_of_bounds = false;
189 uint32_t caps = 0;
190 int min_scale, max_scale;
191 int ret;
192
193 DBG("%s: check (%d -> %d)", plane->name,
194 plane_enabled(old_state), plane_enabled(state));
195
196 max_width = config->hw->lm.max_width << 16;
197 max_height = config->hw->lm.max_height << 16;
198
199 /* Make sure source dimensions are within bounds. */
200 if (state->src_h > max_height)
201 out_of_bounds = true;
202
203 if (state->src_w > max_width) {
204 /* If source split is supported, we can go up to 2x
205 * the max LM width, but we'd need to stage another
206 * hwpipe to the right LM. So, the drm_plane would
207 * consist of 2 hwpipes.
208 */
209 if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
210 (state->src_w <= 2 * max_width))
211 need_right_hwpipe = true;
212 else
213 out_of_bounds = true;
214 }
215
216 if (out_of_bounds) {
217 struct drm_rect src = drm_plane_state_src(state);
218 DBG("Invalid source size "DRM_RECT_FP_FMT,
219 DRM_RECT_FP_ARG(&src));
220 return -ERANGE;
221 }
222
223 min_scale = FRAC_16_16(1, 8);
224 max_scale = FRAC_16_16(8, 1);
225
226 ret = drm_atomic_helper_check_plane_state(state, crtc_state,
227 min_scale, max_scale,
228 true, true);
229 if (ret)
230 return ret;
231
232 if (plane_enabled(state)) {
233 unsigned int rotation;
234 const struct mdp_format *format;
235 struct mdp5_kms *mdp5_kms = get_kms(plane);
236 uint32_t blkcfg = 0;
237
238 format = to_mdp_format(msm_framebuffer_format(state->fb));
239 if (MDP_FORMAT_IS_YUV(format))
240 caps |= MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC;
241
242 if (((state->src_w >> 16) != state->crtc_w) ||
243 ((state->src_h >> 16) != state->crtc_h))
244 caps |= MDP_PIPE_CAP_SCALE;
245
246 rotation = drm_rotation_simplify(state->rotation,
247 DRM_MODE_ROTATE_0 |
248 DRM_MODE_REFLECT_X |
249 DRM_MODE_REFLECT_Y);
250
251 if (rotation & DRM_MODE_REFLECT_X)
252 caps |= MDP_PIPE_CAP_HFLIP;
253
254 if (rotation & DRM_MODE_REFLECT_Y)
255 caps |= MDP_PIPE_CAP_VFLIP;
256
257 if (plane->type == DRM_PLANE_TYPE_CURSOR)
258 caps |= MDP_PIPE_CAP_CURSOR;
259
260 /* (re)allocate hw pipe if we don't have one or caps-mismatch: */
261 if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
262 new_hwpipe = true;
263
264 /*
265 * (re)allocte hw pipe if we're either requesting for 2 hw pipes
266 * or we're switching from 2 hw pipes to 1 hw pipe because the
267 * new src_w can be supported by 1 hw pipe itself.
268 */
269 if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
270 (!need_right_hwpipe && mdp5_state->r_hwpipe))
271 new_hwpipe = true;
272
273 if (mdp5_kms->smp) {
274 const struct mdp_format *format =
275 to_mdp_format(msm_framebuffer_format(state->fb));
276
277 blkcfg = mdp5_smp_calculate(mdp5_kms->smp, format,
278 state->src_w >> 16, false);
279
280 if (mdp5_state->hwpipe && (mdp5_state->hwpipe->blkcfg != blkcfg))
281 new_hwpipe = true;
282 }
283
284 /* (re)assign hwpipe if needed, otherwise keep old one: */
285 if (new_hwpipe) {
286 /* TODO maybe we want to re-assign hwpipe sometimes
287 * in cases when we no-longer need some caps to make
288 * it available for other planes?
289 */
290 struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
291 struct mdp5_hw_pipe *old_right_hwpipe =
292 mdp5_state->r_hwpipe;
293 struct mdp5_hw_pipe *new_hwpipe = NULL;
294 struct mdp5_hw_pipe *new_right_hwpipe = NULL;
295
296 ret = mdp5_pipe_assign(state->state, plane, caps,
297 blkcfg, &new_hwpipe,
298 need_right_hwpipe ?
299 &new_right_hwpipe : NULL);
300 if (ret) {
301 DBG("%s: failed to assign hwpipe(s)!",
302 plane->name);
303 return ret;
304 }
305
306 mdp5_state->hwpipe = new_hwpipe;
307 if (need_right_hwpipe)
308 mdp5_state->r_hwpipe = new_right_hwpipe;
309 else
310 /*
311 * set it to NULL so that the driver knows we
312 * don't have a right hwpipe when committing a
313 * new state
314 */
315 mdp5_state->r_hwpipe = NULL;
316
317
318 ret = mdp5_pipe_release(state->state, old_hwpipe);
319 if (ret)
320 return ret;
321
322 ret = mdp5_pipe_release(state->state, old_right_hwpipe);
323 if (ret)
324 return ret;
325
326 }
327 } else {
328 ret = mdp5_pipe_release(state->state, mdp5_state->hwpipe);
329 if (ret)
330 return ret;
331
332 ret = mdp5_pipe_release(state->state, mdp5_state->r_hwpipe);
333 if (ret)
334 return ret;
335
336 mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL;
337 }
338
339 return 0;
340 }
341
mdp5_plane_atomic_check(struct drm_plane * plane,struct drm_atomic_state * state)342 static int mdp5_plane_atomic_check(struct drm_plane *plane,
343 struct drm_atomic_state *state)
344 {
345 struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
346 plane);
347 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
348 plane);
349 struct drm_crtc *crtc;
350 struct drm_crtc_state *crtc_state;
351
352 crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
353 if (!crtc)
354 return 0;
355
356 crtc_state = drm_atomic_get_existing_crtc_state(state,
357 crtc);
358 if (WARN_ON(!crtc_state))
359 return -EINVAL;
360
361 return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
362 }
363
mdp5_plane_atomic_update(struct drm_plane * plane,struct drm_atomic_state * state)364 static void mdp5_plane_atomic_update(struct drm_plane *plane,
365 struct drm_atomic_state *state)
366 {
367 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
368 plane);
369
370 DBG("%s: update", plane->name);
371
372 if (plane_enabled(new_state)) {
373 int ret;
374
375 ret = mdp5_plane_mode_set(plane,
376 new_state->crtc, new_state->fb,
377 &new_state->src, &new_state->dst);
378 /* atomic_check should have ensured that this doesn't fail */
379 WARN_ON(ret < 0);
380 }
381 }
382
mdp5_plane_atomic_async_check(struct drm_plane * plane,struct drm_atomic_state * state)383 static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
384 struct drm_atomic_state *state)
385 {
386 struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
387 plane);
388 struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
389 struct drm_crtc_state *crtc_state;
390 int min_scale, max_scale;
391 int ret;
392
393 crtc_state = drm_atomic_get_existing_crtc_state(state,
394 new_plane_state->crtc);
395 if (WARN_ON(!crtc_state))
396 return -EINVAL;
397
398 if (!crtc_state->active)
399 return -EINVAL;
400
401 /* don't use fast path if we don't have a hwpipe allocated yet */
402 if (!mdp5_state->hwpipe)
403 return -EINVAL;
404
405 /* only allow changing of position(crtc x/y or src x/y) in fast path */
406 if (plane->state->crtc != new_plane_state->crtc ||
407 plane->state->src_w != new_plane_state->src_w ||
408 plane->state->src_h != new_plane_state->src_h ||
409 plane->state->crtc_w != new_plane_state->crtc_w ||
410 plane->state->crtc_h != new_plane_state->crtc_h ||
411 !plane->state->fb ||
412 plane->state->fb != new_plane_state->fb)
413 return -EINVAL;
414
415 min_scale = FRAC_16_16(1, 8);
416 max_scale = FRAC_16_16(8, 1);
417
418 ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
419 min_scale, max_scale,
420 true, true);
421 if (ret)
422 return ret;
423
424 /*
425 * if the visibility of the plane changes (i.e, if the cursor is
426 * clipped out completely, we can't take the async path because
427 * we need to stage/unstage the plane from the Layer Mixer(s). We
428 * also assign/unassign the hwpipe(s) tied to the plane. We avoid
429 * taking the fast path for both these reasons.
430 */
431 if (new_plane_state->visible != plane->state->visible)
432 return -EINVAL;
433
434 return 0;
435 }
436
mdp5_plane_atomic_async_update(struct drm_plane * plane,struct drm_atomic_state * state)437 static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
438 struct drm_atomic_state *state)
439 {
440 struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
441 plane);
442 struct drm_framebuffer *old_fb = plane->state->fb;
443
444 plane->state->src_x = new_state->src_x;
445 plane->state->src_y = new_state->src_y;
446 plane->state->crtc_x = new_state->crtc_x;
447 plane->state->crtc_y = new_state->crtc_y;
448
449 if (plane_enabled(new_state)) {
450 struct mdp5_ctl *ctl;
451 struct mdp5_pipeline *pipeline =
452 mdp5_crtc_get_pipeline(new_state->crtc);
453 int ret;
454
455 ret = mdp5_plane_mode_set(plane, new_state->crtc, new_state->fb,
456 &new_state->src, &new_state->dst);
457 WARN_ON(ret < 0);
458
459 ctl = mdp5_crtc_get_ctl(new_state->crtc);
460
461 mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
462 }
463
464 *to_mdp5_plane_state(plane->state) =
465 *to_mdp5_plane_state(new_state);
466
467 new_state->fb = old_fb;
468 }
469
470 static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
471 .prepare_fb = mdp5_plane_prepare_fb,
472 .cleanup_fb = mdp5_plane_cleanup_fb,
473 .atomic_check = mdp5_plane_atomic_check,
474 .atomic_update = mdp5_plane_atomic_update,
475 .atomic_async_check = mdp5_plane_atomic_async_check,
476 .atomic_async_update = mdp5_plane_atomic_async_update,
477 };
478
set_scanout_locked(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe,struct drm_framebuffer * fb)479 static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
480 enum mdp5_pipe pipe,
481 struct drm_framebuffer *fb)
482 {
483 struct msm_kms *kms = &mdp5_kms->base.base;
484
485 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
486 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
487 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
488
489 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
490 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
491 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
492
493 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
494 msm_framebuffer_iova(fb, kms->aspace, 0));
495 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
496 msm_framebuffer_iova(fb, kms->aspace, 1));
497 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
498 msm_framebuffer_iova(fb, kms->aspace, 2));
499 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
500 msm_framebuffer_iova(fb, kms->aspace, 3));
501 }
502
503 /* Note: mdp5_plane->pipe_lock must be locked */
csc_disable(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe)504 static void csc_disable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe)
505 {
506 uint32_t value = mdp5_read(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe)) &
507 ~MDP5_PIPE_OP_MODE_CSC_1_EN;
508
509 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), value);
510 }
511
512 /* Note: mdp5_plane->pipe_lock must be locked */
csc_enable(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe,struct csc_cfg * csc)513 static void csc_enable(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
514 struct csc_cfg *csc)
515 {
516 uint32_t i, mode = 0; /* RGB, no CSC */
517 uint32_t *matrix;
518
519 if (unlikely(!csc))
520 return;
521
522 if ((csc->type == CSC_YUV2RGB) || (CSC_YUV2YUV == csc->type))
523 mode |= MDP5_PIPE_OP_MODE_CSC_SRC_DATA_FORMAT(DATA_FORMAT_YUV);
524 if ((csc->type == CSC_RGB2YUV) || (CSC_YUV2YUV == csc->type))
525 mode |= MDP5_PIPE_OP_MODE_CSC_DST_DATA_FORMAT(DATA_FORMAT_YUV);
526 mode |= MDP5_PIPE_OP_MODE_CSC_1_EN;
527 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OP_MODE(pipe), mode);
528
529 matrix = csc->matrix;
530 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_0(pipe),
531 MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_11(matrix[0]) |
532 MDP5_PIPE_CSC_1_MATRIX_COEFF_0_COEFF_12(matrix[1]));
533 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_1(pipe),
534 MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_13(matrix[2]) |
535 MDP5_PIPE_CSC_1_MATRIX_COEFF_1_COEFF_21(matrix[3]));
536 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_2(pipe),
537 MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_22(matrix[4]) |
538 MDP5_PIPE_CSC_1_MATRIX_COEFF_2_COEFF_23(matrix[5]));
539 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_3(pipe),
540 MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_31(matrix[6]) |
541 MDP5_PIPE_CSC_1_MATRIX_COEFF_3_COEFF_32(matrix[7]));
542 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_MATRIX_COEFF_4(pipe),
543 MDP5_PIPE_CSC_1_MATRIX_COEFF_4_COEFF_33(matrix[8]));
544
545 for (i = 0; i < ARRAY_SIZE(csc->pre_bias); i++) {
546 uint32_t *pre_clamp = csc->pre_clamp;
547 uint32_t *post_clamp = csc->post_clamp;
548
549 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_CLAMP(pipe, i),
550 MDP5_PIPE_CSC_1_PRE_CLAMP_REG_HIGH(pre_clamp[2*i+1]) |
551 MDP5_PIPE_CSC_1_PRE_CLAMP_REG_LOW(pre_clamp[2*i]));
552
553 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_CLAMP(pipe, i),
554 MDP5_PIPE_CSC_1_POST_CLAMP_REG_HIGH(post_clamp[2*i+1]) |
555 MDP5_PIPE_CSC_1_POST_CLAMP_REG_LOW(post_clamp[2*i]));
556
557 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_PRE_BIAS(pipe, i),
558 MDP5_PIPE_CSC_1_PRE_BIAS_REG_VALUE(csc->pre_bias[i]));
559
560 mdp5_write(mdp5_kms, REG_MDP5_PIPE_CSC_1_POST_BIAS(pipe, i),
561 MDP5_PIPE_CSC_1_POST_BIAS_REG_VALUE(csc->post_bias[i]));
562 }
563 }
564
565 #define PHASE_STEP_SHIFT 21
566 #define DOWN_SCALE_RATIO_MAX 32 /* 2^(26-21) */
567
calc_phase_step(uint32_t src,uint32_t dst,uint32_t * out_phase)568 static int calc_phase_step(uint32_t src, uint32_t dst, uint32_t *out_phase)
569 {
570 uint32_t unit;
571
572 if (src == 0 || dst == 0)
573 return -EINVAL;
574
575 /*
576 * PHASE_STEP_X/Y is coded on 26 bits (25:0),
577 * where 2^21 represents the unity "1" in fixed-point hardware design.
578 * This leaves 5 bits for the integer part (downscale case):
579 * -> maximum downscale ratio = 0b1_1111 = 31
580 */
581 if (src > (dst * DOWN_SCALE_RATIO_MAX))
582 return -EOVERFLOW;
583
584 unit = 1 << PHASE_STEP_SHIFT;
585 *out_phase = mult_frac(unit, src, dst);
586
587 return 0;
588 }
589
calc_scalex_steps(struct drm_plane * plane,uint32_t pixel_format,uint32_t src,uint32_t dest,uint32_t phasex_steps[COMP_MAX])590 static int calc_scalex_steps(struct drm_plane *plane,
591 uint32_t pixel_format, uint32_t src, uint32_t dest,
592 uint32_t phasex_steps[COMP_MAX])
593 {
594 const struct drm_format_info *info = drm_format_info(pixel_format);
595 struct mdp5_kms *mdp5_kms = get_kms(plane);
596 struct device *dev = mdp5_kms->dev->dev;
597 uint32_t phasex_step;
598 int ret;
599
600 ret = calc_phase_step(src, dest, &phasex_step);
601 if (ret) {
602 DRM_DEV_ERROR(dev, "X scaling (%d->%d) failed: %d\n", src, dest, ret);
603 return ret;
604 }
605
606 phasex_steps[COMP_0] = phasex_step;
607 phasex_steps[COMP_3] = phasex_step;
608 phasex_steps[COMP_1_2] = phasex_step / info->hsub;
609
610 return 0;
611 }
612
calc_scaley_steps(struct drm_plane * plane,uint32_t pixel_format,uint32_t src,uint32_t dest,uint32_t phasey_steps[COMP_MAX])613 static int calc_scaley_steps(struct drm_plane *plane,
614 uint32_t pixel_format, uint32_t src, uint32_t dest,
615 uint32_t phasey_steps[COMP_MAX])
616 {
617 const struct drm_format_info *info = drm_format_info(pixel_format);
618 struct mdp5_kms *mdp5_kms = get_kms(plane);
619 struct device *dev = mdp5_kms->dev->dev;
620 uint32_t phasey_step;
621 int ret;
622
623 ret = calc_phase_step(src, dest, &phasey_step);
624 if (ret) {
625 DRM_DEV_ERROR(dev, "Y scaling (%d->%d) failed: %d\n", src, dest, ret);
626 return ret;
627 }
628
629 phasey_steps[COMP_0] = phasey_step;
630 phasey_steps[COMP_3] = phasey_step;
631 phasey_steps[COMP_1_2] = phasey_step / info->vsub;
632
633 return 0;
634 }
635
get_scale_config(const struct mdp_format * format,uint32_t src,uint32_t dst,bool horz)636 static uint32_t get_scale_config(const struct mdp_format *format,
637 uint32_t src, uint32_t dst, bool horz)
638 {
639 const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
640 bool scaling = format->is_yuv ? true : (src != dst);
641 uint32_t sub;
642 uint32_t ya_filter, uv_filter;
643 bool yuv = format->is_yuv;
644
645 if (!scaling)
646 return 0;
647
648 if (yuv) {
649 sub = horz ? info->hsub : info->vsub;
650 uv_filter = ((src / sub) <= dst) ?
651 SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
652 }
653 ya_filter = (src <= dst) ? SCALE_FILTER_BIL : SCALE_FILTER_PCMN;
654
655 if (horz)
656 return MDP5_PIPE_SCALE_CONFIG_SCALEX_EN |
657 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_0(ya_filter) |
658 MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_3(ya_filter) |
659 COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEX_FILTER_COMP_1_2(uv_filter));
660 else
661 return MDP5_PIPE_SCALE_CONFIG_SCALEY_EN |
662 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_0(ya_filter) |
663 MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_3(ya_filter) |
664 COND(yuv, MDP5_PIPE_SCALE_CONFIG_SCALEY_FILTER_COMP_1_2(uv_filter));
665 }
666
calc_pixel_ext(const struct mdp_format * format,uint32_t src,uint32_t dst,uint32_t phase_step[2],int pix_ext_edge1[COMP_MAX],int pix_ext_edge2[COMP_MAX],bool horz)667 static void calc_pixel_ext(const struct mdp_format *format,
668 uint32_t src, uint32_t dst, uint32_t phase_step[2],
669 int pix_ext_edge1[COMP_MAX], int pix_ext_edge2[COMP_MAX],
670 bool horz)
671 {
672 bool scaling = format->is_yuv ? true : (src != dst);
673 int i;
674
675 /*
676 * Note:
677 * We assume here that:
678 * 1. PCMN filter is used for downscale
679 * 2. bilinear filter is used for upscale
680 * 3. we are in a single pipe configuration
681 */
682
683 for (i = 0; i < COMP_MAX; i++) {
684 pix_ext_edge1[i] = 0;
685 pix_ext_edge2[i] = scaling ? 1 : 0;
686 }
687 }
688
mdp5_write_pixel_ext(struct mdp5_kms * mdp5_kms,enum mdp5_pipe pipe,const struct mdp_format * format,uint32_t src_w,int pe_left[COMP_MAX],int pe_right[COMP_MAX],uint32_t src_h,int pe_top[COMP_MAX],int pe_bottom[COMP_MAX])689 static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
690 const struct mdp_format *format,
691 uint32_t src_w, int pe_left[COMP_MAX], int pe_right[COMP_MAX],
692 uint32_t src_h, int pe_top[COMP_MAX], int pe_bottom[COMP_MAX])
693 {
694 const struct drm_format_info *info = drm_format_info(format->base.pixel_format);
695 uint32_t lr, tb, req;
696 int i;
697
698 for (i = 0; i < COMP_MAX; i++) {
699 uint32_t roi_w = src_w;
700 uint32_t roi_h = src_h;
701
702 if (format->is_yuv && i == COMP_1_2) {
703 roi_w /= info->hsub;
704 roi_h /= info->vsub;
705 }
706
707 lr = (pe_left[i] >= 0) ?
708 MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT(pe_left[i]) :
709 MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF(pe_left[i]);
710
711 lr |= (pe_right[i] >= 0) ?
712 MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT(pe_right[i]) :
713 MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF(pe_right[i]);
714
715 tb = (pe_top[i] >= 0) ?
716 MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT(pe_top[i]) :
717 MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF(pe_top[i]);
718
719 tb |= (pe_bottom[i] >= 0) ?
720 MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT(pe_bottom[i]) :
721 MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF(pe_bottom[i]);
722
723 req = MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT(roi_w +
724 pe_left[i] + pe_right[i]);
725
726 req |= MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM(roi_h +
727 pe_top[i] + pe_bottom[i]);
728
729 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_LR(pipe, i), lr);
730 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_TB(pipe, i), tb);
731 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS(pipe, i), req);
732
733 DBG("comp-%d (L/R): rpt=%d/%d, ovf=%d/%d, req=%d", i,
734 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_RPT),
735 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_RPT),
736 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_LEFT_OVF),
737 FIELD(lr, MDP5_PIPE_SW_PIX_EXT_LR_RIGHT_OVF),
738 FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_LEFT_RIGHT));
739
740 DBG("comp-%d (T/B): rpt=%d/%d, ovf=%d/%d, req=%d", i,
741 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_RPT),
742 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_RPT),
743 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_TOP_OVF),
744 FIELD(tb, MDP5_PIPE_SW_PIX_EXT_TB_BOTTOM_OVF),
745 FIELD(req, MDP5_PIPE_SW_PIX_EXT_REQ_PIXELS_TOP_BOTTOM));
746 }
747 }
748
749 struct pixel_ext {
750 int left[COMP_MAX];
751 int right[COMP_MAX];
752 int top[COMP_MAX];
753 int bottom[COMP_MAX];
754 };
755
756 struct phase_step {
757 u32 x[COMP_MAX];
758 u32 y[COMP_MAX];
759 };
760
mdp5_hwpipe_mode_set(struct mdp5_kms * mdp5_kms,struct mdp5_hw_pipe * hwpipe,struct drm_framebuffer * fb,struct phase_step * step,struct pixel_ext * pe,u32 scale_config,u32 hdecm,u32 vdecm,bool hflip,bool vflip,int crtc_x,int crtc_y,unsigned int crtc_w,unsigned int crtc_h,u32 src_img_w,u32 src_img_h,u32 src_x,u32 src_y,u32 src_w,u32 src_h)761 static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
762 struct mdp5_hw_pipe *hwpipe,
763 struct drm_framebuffer *fb,
764 struct phase_step *step,
765 struct pixel_ext *pe,
766 u32 scale_config, u32 hdecm, u32 vdecm,
767 bool hflip, bool vflip,
768 int crtc_x, int crtc_y,
769 unsigned int crtc_w, unsigned int crtc_h,
770 u32 src_img_w, u32 src_img_h,
771 u32 src_x, u32 src_y,
772 u32 src_w, u32 src_h)
773 {
774 enum mdp5_pipe pipe = hwpipe->pipe;
775 bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
776 const struct mdp_format *format =
777 to_mdp_format(msm_framebuffer_format(fb));
778
779 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
780 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
781 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
782
783 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
784 MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
785 MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
786
787 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
788 MDP5_PIPE_SRC_XY_X(src_x) |
789 MDP5_PIPE_SRC_XY_Y(src_y));
790
791 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
792 MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
793 MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
794
795 mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
796 MDP5_PIPE_OUT_XY_X(crtc_x) |
797 MDP5_PIPE_OUT_XY_Y(crtc_y));
798
799 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
800 MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
801 MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
802 MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
803 MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
804 COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
805 MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
806 MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
807 COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
808 MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
809 MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
810
811 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
812 MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
813 MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
814 MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
815 MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
816
817 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
818 (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
819 (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
820 COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
821 MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
822
823 /* not using secure mode: */
824 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
825
826 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
827 mdp5_write_pixel_ext(mdp5_kms, pipe, format,
828 src_w, pe->left, pe->right,
829 src_h, pe->top, pe->bottom);
830
831 if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
832 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
833 step->x[COMP_0]);
834 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
835 step->y[COMP_0]);
836 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
837 step->x[COMP_1_2]);
838 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
839 step->y[COMP_1_2]);
840 mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
841 MDP5_PIPE_DECIMATION_VERT(vdecm) |
842 MDP5_PIPE_DECIMATION_HORZ(hdecm));
843 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
844 scale_config);
845 }
846
847 if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
848 if (MDP_FORMAT_IS_YUV(format))
849 csc_enable(mdp5_kms, pipe,
850 mdp_get_default_csc_cfg(CSC_YUV2RGB));
851 else
852 csc_disable(mdp5_kms, pipe);
853 }
854
855 set_scanout_locked(mdp5_kms, pipe, fb);
856 }
857
mdp5_plane_mode_set(struct drm_plane * plane,struct drm_crtc * crtc,struct drm_framebuffer * fb,struct drm_rect * src,struct drm_rect * dest)858 static int mdp5_plane_mode_set(struct drm_plane *plane,
859 struct drm_crtc *crtc, struct drm_framebuffer *fb,
860 struct drm_rect *src, struct drm_rect *dest)
861 {
862 struct drm_plane_state *pstate = plane->state;
863 struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
864 struct mdp5_kms *mdp5_kms = get_kms(plane);
865 enum mdp5_pipe pipe = hwpipe->pipe;
866 struct mdp5_hw_pipe *right_hwpipe;
867 const struct mdp_format *format;
868 uint32_t nplanes, config = 0;
869 struct phase_step step = { { 0 } };
870 struct pixel_ext pe = { { 0 } };
871 uint32_t hdecm = 0, vdecm = 0;
872 uint32_t pix_format;
873 unsigned int rotation;
874 bool vflip, hflip;
875 int crtc_x, crtc_y;
876 unsigned int crtc_w, crtc_h;
877 uint32_t src_x, src_y;
878 uint32_t src_w, src_h;
879 uint32_t src_img_w, src_img_h;
880 int ret;
881
882 nplanes = fb->format->num_planes;
883
884 /* bad formats should already be rejected: */
885 if (WARN_ON(nplanes > pipe2nclients(pipe)))
886 return -EINVAL;
887
888 format = to_mdp_format(msm_framebuffer_format(fb));
889 pix_format = format->base.pixel_format;
890
891 src_x = src->x1;
892 src_y = src->y1;
893 src_w = drm_rect_width(src);
894 src_h = drm_rect_height(src);
895
896 crtc_x = dest->x1;
897 crtc_y = dest->y1;
898 crtc_w = drm_rect_width(dest);
899 crtc_h = drm_rect_height(dest);
900
901 /* src values are in Q16 fixed point, convert to integer: */
902 src_x = src_x >> 16;
903 src_y = src_y >> 16;
904 src_w = src_w >> 16;
905 src_h = src_h >> 16;
906
907 src_img_w = min(fb->width, src_w);
908 src_img_h = min(fb->height, src_h);
909
910 DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
911 fb->base.id, src_x, src_y, src_w, src_h,
912 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
913
914 right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
915 if (right_hwpipe) {
916 /*
917 * if the plane comprises of 2 hw pipes, assume that the width
918 * is split equally across them. The only parameters that varies
919 * between the 2 pipes are src_x and crtc_x
920 */
921 crtc_w /= 2;
922 src_w /= 2;
923 src_img_w /= 2;
924 }
925
926 ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
927 if (ret)
928 return ret;
929
930 ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
931 if (ret)
932 return ret;
933
934 if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
935 calc_pixel_ext(format, src_w, crtc_w, step.x,
936 pe.left, pe.right, true);
937 calc_pixel_ext(format, src_h, crtc_h, step.y,
938 pe.top, pe.bottom, false);
939 }
940
941 /* TODO calc hdecm, vdecm */
942
943 /* SCALE is used to both scale and up-sample chroma components */
944 config |= get_scale_config(format, src_w, crtc_w, true);
945 config |= get_scale_config(format, src_h, crtc_h, false);
946 DBG("scale config = %x", config);
947
948 rotation = drm_rotation_simplify(pstate->rotation,
949 DRM_MODE_ROTATE_0 |
950 DRM_MODE_REFLECT_X |
951 DRM_MODE_REFLECT_Y);
952 hflip = !!(rotation & DRM_MODE_REFLECT_X);
953 vflip = !!(rotation & DRM_MODE_REFLECT_Y);
954
955 mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
956 config, hdecm, vdecm, hflip, vflip,
957 crtc_x, crtc_y, crtc_w, crtc_h,
958 src_img_w, src_img_h,
959 src_x, src_y, src_w, src_h);
960 if (right_hwpipe)
961 mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
962 config, hdecm, vdecm, hflip, vflip,
963 crtc_x + crtc_w, crtc_y, crtc_w, crtc_h,
964 src_img_w, src_img_h,
965 src_x + src_w, src_y, src_w, src_h);
966
967 return ret;
968 }
969
970 /*
971 * Use this func and the one below only after the atomic state has been
972 * successfully swapped
973 */
mdp5_plane_pipe(struct drm_plane * plane)974 enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
975 {
976 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
977
978 if (WARN_ON(!pstate->hwpipe))
979 return SSPP_NONE;
980
981 return pstate->hwpipe->pipe;
982 }
983
mdp5_plane_right_pipe(struct drm_plane * plane)984 enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
985 {
986 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
987
988 if (!pstate->r_hwpipe)
989 return SSPP_NONE;
990
991 return pstate->r_hwpipe->pipe;
992 }
993
mdp5_plane_get_flush(struct drm_plane * plane)994 uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
995 {
996 struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
997 u32 mask;
998
999 if (WARN_ON(!pstate->hwpipe))
1000 return 0;
1001
1002 mask = pstate->hwpipe->flush_mask;
1003
1004 if (pstate->r_hwpipe)
1005 mask |= pstate->r_hwpipe->flush_mask;
1006
1007 return mask;
1008 }
1009
1010 /* initialize plane */
mdp5_plane_init(struct drm_device * dev,enum drm_plane_type type)1011 struct drm_plane *mdp5_plane_init(struct drm_device *dev,
1012 enum drm_plane_type type)
1013 {
1014 struct drm_plane *plane = NULL;
1015 struct mdp5_plane *mdp5_plane;
1016 int ret;
1017
1018 mdp5_plane = kzalloc(sizeof(*mdp5_plane), GFP_KERNEL);
1019 if (!mdp5_plane) {
1020 ret = -ENOMEM;
1021 goto fail;
1022 }
1023
1024 plane = &mdp5_plane->base;
1025
1026 mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
1027 ARRAY_SIZE(mdp5_plane->formats), false);
1028
1029 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
1030 mdp5_plane->formats, mdp5_plane->nformats,
1031 NULL, type, NULL);
1032 if (ret)
1033 goto fail;
1034
1035 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
1036
1037 mdp5_plane_install_properties(plane, &plane->base);
1038
1039 drm_plane_enable_fb_damage_clips(plane);
1040
1041 return plane;
1042
1043 fail:
1044 if (plane)
1045 mdp5_plane_destroy(plane);
1046
1047 return ERR_PTR(ret);
1048 }
1049