1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Author: Rob Clark <robdclark@gmail.com>
8 */
9
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_crtc.h>
17 #include <drm/drm_file.h>
18 #include <drm/drm_probe_helper.h>
19
20 #include "msm_drv.h"
21 #include "dpu_kms.h"
22 #include "dpu_hwio.h"
23 #include "dpu_hw_catalog.h"
24 #include "dpu_hw_intf.h"
25 #include "dpu_hw_ctl.h"
26 #include "dpu_hw_dspp.h"
27 #include "dpu_hw_dsc.h"
28 #include "dpu_hw_merge3d.h"
29 #include "dpu_formats.h"
30 #include "dpu_encoder_phys.h"
31 #include "dpu_crtc.h"
32 #include "dpu_trace.h"
33 #include "dpu_core_irq.h"
34 #include "disp/msm_disp_snapshot.h"
35
36 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
37 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
38
39 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
40 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
41
42 #define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
43 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
44
45 /*
46 * Two to anticipate panels that can do cmd/vid dynamic switching
47 * plan is to create all possible physical encoder types, and switch between
48 * them at runtime
49 */
50 #define NUM_PHYS_ENCODER_TYPES 2
51
52 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
53 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
54
55 #define MAX_CHANNELS_PER_ENC 2
56
57 #define IDLE_SHORT_TIMEOUT 1
58
59 #define MAX_HDISPLAY_SPLIT 1080
60
61 /* timeout in frames waiting for frame done */
62 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
63
64 /**
65 * enum dpu_enc_rc_events - events for resource control state machine
66 * @DPU_ENC_RC_EVENT_KICKOFF:
67 * This event happens at NORMAL priority.
68 * Event that signals the start of the transfer. When this event is
69 * received, enable MDP/DSI core clocks. Regardless of the previous
70 * state, the resource should be in ON state at the end of this event.
71 * @DPU_ENC_RC_EVENT_FRAME_DONE:
72 * This event happens at INTERRUPT level.
73 * Event signals the end of the data transfer after the PP FRAME_DONE
74 * event. At the end of this event, a delayed work is scheduled to go to
75 * IDLE_PC state after IDLE_TIMEOUT time.
76 * @DPU_ENC_RC_EVENT_PRE_STOP:
77 * This event happens at NORMAL priority.
78 * This event, when received during the ON state, leave the RC STATE
79 * in the PRE_OFF state. It should be followed by the STOP event as
80 * part of encoder disable.
81 * If received during IDLE or OFF states, it will do nothing.
82 * @DPU_ENC_RC_EVENT_STOP:
83 * This event happens at NORMAL priority.
84 * When this event is received, disable all the MDP/DSI core clocks, and
85 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
86 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
87 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
88 * Resource state should be in OFF at the end of the event.
89 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
90 * This event happens at NORMAL priority from a work item.
91 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
92 * This would disable MDP/DSI core clocks and change the resource state
93 * to IDLE.
94 */
95 enum dpu_enc_rc_events {
96 DPU_ENC_RC_EVENT_KICKOFF = 1,
97 DPU_ENC_RC_EVENT_FRAME_DONE,
98 DPU_ENC_RC_EVENT_PRE_STOP,
99 DPU_ENC_RC_EVENT_STOP,
100 DPU_ENC_RC_EVENT_ENTER_IDLE
101 };
102
103 /*
104 * enum dpu_enc_rc_states - states that the resource control maintains
105 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
106 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
107 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
108 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
109 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
110 */
111 enum dpu_enc_rc_states {
112 DPU_ENC_RC_STATE_OFF,
113 DPU_ENC_RC_STATE_PRE_OFF,
114 DPU_ENC_RC_STATE_ON,
115 DPU_ENC_RC_STATE_IDLE
116 };
117
118 /**
119 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
120 * encoders. Virtual encoder manages one "logical" display. Physical
121 * encoders manage one intf block, tied to a specific panel/sub-panel.
122 * Virtual encoder defers as much as possible to the physical encoders.
123 * Virtual encoder registers itself with the DRM Framework as the encoder.
124 * @base: drm_encoder base class for registration with DRM
125 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
126 * @enabled: True if the encoder is active, protected by enc_lock
127 * @num_phys_encs: Actual number of physical encoders contained.
128 * @phys_encs: Container of physical encoders managed.
129 * @cur_master: Pointer to the current master in this mode. Optimization
130 * Only valid after enable. Cleared as disable.
131 * @cur_slave: As above but for the slave encoder.
132 * @hw_pp: Handle to the pingpong blocks used for the display. No.
133 * pingpong blocks can be different than num_phys_encs.
134 * @hw_dsc: Handle to the DSC blocks used for the display.
135 * @dsc_mask: Bitmask of used DSC blocks.
136 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
137 * for partial update right-only cases, such as pingpong
138 * split where virtual pingpong does not generate IRQs
139 * @crtc: Pointer to the currently assigned crtc. Normally you
140 * would use crtc->state->encoder_mask to determine the
141 * link between encoder/crtc. However in this case we need
142 * to track crtc in the disable() hook which is called
143 * _after_ encoder_mask is cleared.
144 * @connector: If a mode is set, cached pointer to the active connector
145 * @crtc_kickoff_cb: Callback into CRTC that will flush & start
146 * all CTL paths
147 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
148 * @debugfs_root: Debug file system root file node
149 * @enc_lock: Lock around physical encoder
150 * create/destroy/enable/disable
151 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
152 * busy processing current command.
153 * Bit0 = phys_encs[0] etc.
154 * @crtc_frame_event_cb: callback handler for frame event
155 * @crtc_frame_event_cb_data: callback handler private data
156 * @frame_done_timeout_ms: frame done timeout in ms
157 * @frame_done_timer: watchdog timer for frame done event
158 * @disp_info: local copy of msm_display_info struct
159 * @idle_pc_supported: indicate if idle power collaps is supported
160 * @rc_lock: resource control mutex lock to protect
161 * virt encoder over various state changes
162 * @rc_state: resource controller state
163 * @delayed_off_work: delayed worker to schedule disabling of
164 * clks and resources after IDLE_TIMEOUT time.
165 * @topology: topology of the display
166 * @idle_timeout: idle timeout duration in milliseconds
167 * @wide_bus_en: wide bus is enabled on this interface
168 * @dsc: drm_dsc_config pointer, for DSC-enabled encoders
169 */
170 struct dpu_encoder_virt {
171 struct drm_encoder base;
172 spinlock_t enc_spinlock;
173
174 bool enabled;
175
176 unsigned int num_phys_encs;
177 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
178 struct dpu_encoder_phys *cur_master;
179 struct dpu_encoder_phys *cur_slave;
180 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
181 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
182
183 unsigned int dsc_mask;
184
185 bool intfs_swapped;
186
187 struct drm_crtc *crtc;
188 struct drm_connector *connector;
189
190 struct dentry *debugfs_root;
191 struct mutex enc_lock;
192 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
193 void (*crtc_frame_event_cb)(void *, u32 event);
194 void *crtc_frame_event_cb_data;
195
196 atomic_t frame_done_timeout_ms;
197 struct timer_list frame_done_timer;
198
199 struct msm_display_info disp_info;
200
201 bool idle_pc_supported;
202 struct mutex rc_lock;
203 enum dpu_enc_rc_states rc_state;
204 struct delayed_work delayed_off_work;
205 struct msm_display_topology topology;
206
207 u32 idle_timeout;
208
209 bool wide_bus_en;
210
211 /* DSC configuration */
212 struct drm_dsc_config *dsc;
213 };
214
215 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
216
217 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
218 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
219 };
220
221
dpu_encoder_is_widebus_enabled(const struct drm_encoder * drm_enc)222 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
223 {
224 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
225
226 return dpu_enc->wide_bus_en;
227 }
228
dpu_encoder_get_crc_values_cnt(const struct drm_encoder * drm_enc)229 int dpu_encoder_get_crc_values_cnt(const struct drm_encoder *drm_enc)
230 {
231 struct dpu_encoder_virt *dpu_enc;
232 int i, num_intf = 0;
233
234 dpu_enc = to_dpu_encoder_virt(drm_enc);
235
236 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
237 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
238
239 if (phys->hw_intf && phys->hw_intf->ops.setup_misr
240 && phys->hw_intf->ops.collect_misr)
241 num_intf++;
242 }
243
244 return num_intf;
245 }
246
dpu_encoder_setup_misr(const struct drm_encoder * drm_enc)247 void dpu_encoder_setup_misr(const struct drm_encoder *drm_enc)
248 {
249 struct dpu_encoder_virt *dpu_enc;
250
251 int i;
252
253 dpu_enc = to_dpu_encoder_virt(drm_enc);
254
255 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
256 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
257
258 if (!phys->hw_intf || !phys->hw_intf->ops.setup_misr)
259 continue;
260
261 phys->hw_intf->ops.setup_misr(phys->hw_intf);
262 }
263 }
264
dpu_encoder_get_crc(const struct drm_encoder * drm_enc,u32 * crcs,int pos)265 int dpu_encoder_get_crc(const struct drm_encoder *drm_enc, u32 *crcs, int pos)
266 {
267 struct dpu_encoder_virt *dpu_enc;
268
269 int i, rc = 0, entries_added = 0;
270
271 if (!drm_enc->crtc) {
272 DRM_ERROR("no crtc found for encoder %d\n", drm_enc->index);
273 return -EINVAL;
274 }
275
276 dpu_enc = to_dpu_encoder_virt(drm_enc);
277
278 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
279 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
280
281 if (!phys->hw_intf || !phys->hw_intf->ops.collect_misr)
282 continue;
283
284 rc = phys->hw_intf->ops.collect_misr(phys->hw_intf, &crcs[pos + entries_added]);
285 if (rc)
286 return rc;
287 entries_added++;
288 }
289
290 return entries_added;
291 }
292
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)293 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
294 {
295 struct dpu_hw_dither_cfg dither_cfg = { 0 };
296
297 if (!hw_pp->ops.setup_dither)
298 return;
299
300 switch (bpc) {
301 case 6:
302 dither_cfg.c0_bitdepth = 6;
303 dither_cfg.c1_bitdepth = 6;
304 dither_cfg.c2_bitdepth = 6;
305 dither_cfg.c3_bitdepth = 6;
306 dither_cfg.temporal_en = 0;
307 break;
308 default:
309 hw_pp->ops.setup_dither(hw_pp, NULL);
310 return;
311 }
312
313 memcpy(&dither_cfg.matrix, dither_matrix,
314 sizeof(u32) * DITHER_MATRIX_SZ);
315
316 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
317 }
318
dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)319 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
320 {
321 switch (intf_mode) {
322 case INTF_MODE_VIDEO:
323 return "INTF_MODE_VIDEO";
324 case INTF_MODE_CMD:
325 return "INTF_MODE_CMD";
326 case INTF_MODE_WB_BLOCK:
327 return "INTF_MODE_WB_BLOCK";
328 case INTF_MODE_WB_LINE:
329 return "INTF_MODE_WB_LINE";
330 default:
331 return "INTF_MODE_UNKNOWN";
332 }
333 }
334
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)335 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
336 enum dpu_intr_idx intr_idx)
337 {
338 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
339 DRMID(phys_enc->parent),
340 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
341 phys_enc->hw_intf ? phys_enc->hw_intf->idx - INTF_0 : -1,
342 phys_enc->hw_wb ? phys_enc->hw_wb->idx - WB_0 : -1,
343 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
344
345 dpu_encoder_frame_done_callback(phys_enc->parent, phys_enc,
346 DPU_ENCODER_FRAME_EVENT_ERROR);
347 }
348
349 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
350 u32 irq_idx, struct dpu_encoder_wait_info *info);
351
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,int irq,void (* func)(void * arg,int irq_idx),struct dpu_encoder_wait_info * wait_info)352 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
353 int irq,
354 void (*func)(void *arg, int irq_idx),
355 struct dpu_encoder_wait_info *wait_info)
356 {
357 u32 irq_status;
358 int ret;
359
360 if (!wait_info) {
361 DPU_ERROR("invalid params\n");
362 return -EINVAL;
363 }
364 /* note: do master / slave checking outside */
365
366 /* return EWOULDBLOCK since we know the wait isn't necessary */
367 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
368 DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
369 DRMID(phys_enc->parent), func,
370 irq);
371 return -EWOULDBLOCK;
372 }
373
374 if (irq < 0) {
375 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
376 DRMID(phys_enc->parent), func);
377 return 0;
378 }
379
380 DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
381 DRMID(phys_enc->parent), func,
382 irq, phys_enc->hw_pp->idx - PINGPONG_0,
383 atomic_read(wait_info->atomic_cnt));
384
385 ret = dpu_encoder_helper_wait_event_timeout(
386 DRMID(phys_enc->parent),
387 irq,
388 wait_info);
389
390 if (ret <= 0) {
391 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
392 if (irq_status) {
393 unsigned long flags;
394
395 DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
396 DRMID(phys_enc->parent), func,
397 irq,
398 phys_enc->hw_pp->idx - PINGPONG_0,
399 atomic_read(wait_info->atomic_cnt));
400 local_irq_save(flags);
401 func(phys_enc, irq);
402 local_irq_restore(flags);
403 ret = 0;
404 } else {
405 ret = -ETIMEDOUT;
406 DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
407 DRMID(phys_enc->parent), func,
408 irq,
409 phys_enc->hw_pp->idx - PINGPONG_0,
410 atomic_read(wait_info->atomic_cnt));
411 }
412 } else {
413 ret = 0;
414 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
415 func, irq,
416 phys_enc->hw_pp->idx - PINGPONG_0,
417 atomic_read(wait_info->atomic_cnt));
418 }
419
420 return ret;
421 }
422
dpu_encoder_get_vsync_count(struct drm_encoder * drm_enc)423 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
424 {
425 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
426 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
427 return phys ? atomic_read(&phys->vsync_cnt) : 0;
428 }
429
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)430 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
431 {
432 struct dpu_encoder_virt *dpu_enc;
433 struct dpu_encoder_phys *phys;
434 int linecount = 0;
435
436 dpu_enc = to_dpu_encoder_virt(drm_enc);
437 phys = dpu_enc ? dpu_enc->cur_master : NULL;
438
439 if (phys && phys->ops.get_line_count)
440 linecount = phys->ops.get_line_count(phys);
441
442 return linecount;
443 }
444
dpu_encoder_destroy(struct drm_encoder * drm_enc)445 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
446 {
447 struct dpu_encoder_virt *dpu_enc = NULL;
448 int i = 0;
449
450 if (!drm_enc) {
451 DPU_ERROR("invalid encoder\n");
452 return;
453 }
454
455 dpu_enc = to_dpu_encoder_virt(drm_enc);
456 DPU_DEBUG_ENC(dpu_enc, "\n");
457
458 mutex_lock(&dpu_enc->enc_lock);
459
460 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
461 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
462
463 if (phys->ops.destroy) {
464 phys->ops.destroy(phys);
465 --dpu_enc->num_phys_encs;
466 dpu_enc->phys_encs[i] = NULL;
467 }
468 }
469
470 if (dpu_enc->num_phys_encs)
471 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
472 dpu_enc->num_phys_encs);
473 dpu_enc->num_phys_encs = 0;
474 mutex_unlock(&dpu_enc->enc_lock);
475
476 drm_encoder_cleanup(drm_enc);
477 mutex_destroy(&dpu_enc->enc_lock);
478 }
479
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)480 void dpu_encoder_helper_split_config(
481 struct dpu_encoder_phys *phys_enc,
482 enum dpu_intf interface)
483 {
484 struct dpu_encoder_virt *dpu_enc;
485 struct split_pipe_cfg cfg = { 0 };
486 struct dpu_hw_mdp *hw_mdptop;
487 struct msm_display_info *disp_info;
488
489 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
490 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
491 return;
492 }
493
494 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
495 hw_mdptop = phys_enc->hw_mdptop;
496 disp_info = &dpu_enc->disp_info;
497
498 if (disp_info->intf_type != INTF_DSI)
499 return;
500
501 /**
502 * disable split modes since encoder will be operating in as the only
503 * encoder, either for the entire use case in the case of, for example,
504 * single DSI, or for this frame in the case of left/right only partial
505 * update.
506 */
507 if (phys_enc->split_role == ENC_ROLE_SOLO) {
508 if (hw_mdptop->ops.setup_split_pipe)
509 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
510 return;
511 }
512
513 cfg.en = true;
514 cfg.mode = phys_enc->intf_mode;
515 cfg.intf = interface;
516
517 if (cfg.en && phys_enc->ops.needs_single_flush &&
518 phys_enc->ops.needs_single_flush(phys_enc))
519 cfg.split_flush_en = true;
520
521 if (phys_enc->split_role == ENC_ROLE_MASTER) {
522 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
523
524 if (hw_mdptop->ops.setup_split_pipe)
525 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
526 }
527 }
528
dpu_encoder_use_dsc_merge(struct drm_encoder * drm_enc)529 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
530 {
531 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
532 int i, intf_count = 0, num_dsc = 0;
533
534 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
535 if (dpu_enc->phys_encs[i])
536 intf_count++;
537
538 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
539 if (dpu_enc->dsc)
540 num_dsc = 2;
541
542 return (num_dsc > 0) && (num_dsc > intf_count);
543 }
544
dpu_encoder_get_dsc_config(struct drm_encoder * drm_enc)545 static struct drm_dsc_config *dpu_encoder_get_dsc_config(struct drm_encoder *drm_enc)
546 {
547 struct msm_drm_private *priv = drm_enc->dev->dev_private;
548 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
549 int index = dpu_enc->disp_info.h_tile_instance[0];
550
551 if (dpu_enc->disp_info.intf_type == INTF_DSI)
552 return msm_dsi_get_dsc_config(priv->dsi[index]);
553
554 return NULL;
555 }
556
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode,struct drm_crtc_state * crtc_state,struct drm_dsc_config * dsc)557 static struct msm_display_topology dpu_encoder_get_topology(
558 struct dpu_encoder_virt *dpu_enc,
559 struct dpu_kms *dpu_kms,
560 struct drm_display_mode *mode,
561 struct drm_crtc_state *crtc_state,
562 struct drm_dsc_config *dsc)
563 {
564 struct msm_display_topology topology = {0};
565 int i, intf_count = 0;
566
567 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
568 if (dpu_enc->phys_encs[i])
569 intf_count++;
570
571 /* Datapath topology selection
572 *
573 * Dual display
574 * 2 LM, 2 INTF ( Split display using 2 interfaces)
575 *
576 * Single display
577 * 1 LM, 1 INTF
578 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
579 *
580 * Add dspps to the reservation requirements if ctm is requested
581 */
582 if (intf_count == 2)
583 topology.num_lm = 2;
584 else if (!dpu_kms->catalog->caps->has_3d_merge)
585 topology.num_lm = 1;
586 else
587 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
588
589 if (crtc_state->ctm)
590 topology.num_dspp = topology.num_lm;
591
592 topology.num_intf = intf_count;
593
594 if (dsc) {
595 /*
596 * In case of Display Stream Compression (DSC), we would use
597 * 2 DSC encoders, 2 layer mixers and 1 interface
598 * this is power optimal and can drive up to (including) 4k
599 * screens
600 */
601 topology.num_dsc = 2;
602 topology.num_lm = 2;
603 topology.num_intf = 1;
604 }
605
606 return topology;
607 }
608
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)609 static int dpu_encoder_virt_atomic_check(
610 struct drm_encoder *drm_enc,
611 struct drm_crtc_state *crtc_state,
612 struct drm_connector_state *conn_state)
613 {
614 struct dpu_encoder_virt *dpu_enc;
615 struct msm_drm_private *priv;
616 struct dpu_kms *dpu_kms;
617 struct drm_display_mode *adj_mode;
618 struct msm_display_topology topology;
619 struct dpu_global_state *global_state;
620 struct drm_dsc_config *dsc;
621 int i = 0;
622 int ret = 0;
623
624 if (!drm_enc || !crtc_state || !conn_state) {
625 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
626 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
627 return -EINVAL;
628 }
629
630 dpu_enc = to_dpu_encoder_virt(drm_enc);
631 DPU_DEBUG_ENC(dpu_enc, "\n");
632
633 priv = drm_enc->dev->dev_private;
634 dpu_kms = to_dpu_kms(priv->kms);
635 adj_mode = &crtc_state->adjusted_mode;
636 global_state = dpu_kms_get_global_state(crtc_state->state);
637 if (IS_ERR(global_state))
638 return PTR_ERR(global_state);
639
640 trace_dpu_enc_atomic_check(DRMID(drm_enc));
641
642 /* perform atomic check on the first physical encoder (master) */
643 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
644 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
645
646 if (phys->ops.atomic_check)
647 ret = phys->ops.atomic_check(phys, crtc_state,
648 conn_state);
649 if (ret) {
650 DPU_ERROR_ENC(dpu_enc,
651 "mode unsupported, phys idx %d\n", i);
652 return ret;
653 }
654 }
655
656 dsc = dpu_encoder_get_dsc_config(drm_enc);
657
658 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode, crtc_state, dsc);
659
660 /*
661 * Release and Allocate resources on every modeset
662 * Dont allocate when active is false.
663 */
664 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
665 dpu_rm_release(global_state, drm_enc);
666
667 if (!crtc_state->active_changed || crtc_state->enable)
668 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
669 drm_enc, crtc_state, topology);
670 }
671
672 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
673
674 return ret;
675 }
676
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)677 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
678 struct msm_display_info *disp_info)
679 {
680 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
681 struct msm_drm_private *priv;
682 struct dpu_kms *dpu_kms;
683 struct dpu_hw_mdp *hw_mdptop;
684 struct drm_encoder *drm_enc;
685 struct dpu_encoder_phys *phys_enc;
686 int i;
687
688 if (!dpu_enc || !disp_info) {
689 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
690 dpu_enc != NULL, disp_info != NULL);
691 return;
692 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
693 DPU_ERROR("invalid num phys enc %d/%d\n",
694 dpu_enc->num_phys_encs,
695 (int) ARRAY_SIZE(dpu_enc->hw_pp));
696 return;
697 }
698
699 drm_enc = &dpu_enc->base;
700 /* this pointers are checked in virt_enable_helper */
701 priv = drm_enc->dev->dev_private;
702
703 dpu_kms = to_dpu_kms(priv->kms);
704 hw_mdptop = dpu_kms->hw_mdp;
705 if (!hw_mdptop) {
706 DPU_ERROR("invalid mdptop\n");
707 return;
708 }
709
710 if (hw_mdptop->ops.setup_vsync_source &&
711 disp_info->is_cmd_mode) {
712 for (i = 0; i < dpu_enc->num_phys_encs; i++)
713 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
714
715 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
716 vsync_cfg.frame_rate = drm_mode_vrefresh(&dpu_enc->base.crtc->state->adjusted_mode);
717
718 if (disp_info->is_te_using_watchdog_timer)
719 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
720 else
721 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
722
723 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
724
725 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
726 phys_enc = dpu_enc->phys_encs[i];
727
728 if (phys_enc->has_intf_te && phys_enc->hw_intf->ops.vsync_sel)
729 phys_enc->hw_intf->ops.vsync_sel(phys_enc->hw_intf,
730 vsync_cfg.vsync_source);
731 }
732 }
733 }
734
_dpu_encoder_irq_control(struct drm_encoder * drm_enc,bool enable)735 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
736 {
737 struct dpu_encoder_virt *dpu_enc;
738 int i;
739
740 if (!drm_enc) {
741 DPU_ERROR("invalid encoder\n");
742 return;
743 }
744
745 dpu_enc = to_dpu_encoder_virt(drm_enc);
746
747 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
748 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
749 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
750
751 if (phys->ops.irq_control)
752 phys->ops.irq_control(phys, enable);
753 }
754
755 }
756
_dpu_encoder_resource_control_helper(struct drm_encoder * drm_enc,bool enable)757 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
758 bool enable)
759 {
760 struct msm_drm_private *priv;
761 struct dpu_kms *dpu_kms;
762 struct dpu_encoder_virt *dpu_enc;
763
764 dpu_enc = to_dpu_encoder_virt(drm_enc);
765 priv = drm_enc->dev->dev_private;
766 dpu_kms = to_dpu_kms(priv->kms);
767
768 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
769
770 if (!dpu_enc->cur_master) {
771 DPU_ERROR("encoder master not set\n");
772 return;
773 }
774
775 if (enable) {
776 /* enable DPU core clks */
777 pm_runtime_get_sync(&dpu_kms->pdev->dev);
778
779 /* enable all the irq */
780 _dpu_encoder_irq_control(drm_enc, true);
781
782 } else {
783 /* disable all the irq */
784 _dpu_encoder_irq_control(drm_enc, false);
785
786 /* disable DPU core clks */
787 pm_runtime_put_sync(&dpu_kms->pdev->dev);
788 }
789
790 }
791
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)792 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
793 u32 sw_event)
794 {
795 struct dpu_encoder_virt *dpu_enc;
796 struct msm_drm_private *priv;
797 bool is_vid_mode = false;
798
799 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
800 DPU_ERROR("invalid parameters\n");
801 return -EINVAL;
802 }
803 dpu_enc = to_dpu_encoder_virt(drm_enc);
804 priv = drm_enc->dev->dev_private;
805 is_vid_mode = !dpu_enc->disp_info.is_cmd_mode;
806
807 /*
808 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
809 * events and return early for other events (ie wb display).
810 */
811 if (!dpu_enc->idle_pc_supported &&
812 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
813 sw_event != DPU_ENC_RC_EVENT_STOP &&
814 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
815 return 0;
816
817 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
818 dpu_enc->rc_state, "begin");
819
820 switch (sw_event) {
821 case DPU_ENC_RC_EVENT_KICKOFF:
822 /* cancel delayed off work, if any */
823 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
824 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
825 sw_event);
826
827 mutex_lock(&dpu_enc->rc_lock);
828
829 /* return if the resource control is already in ON state */
830 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
831 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
832 DRMID(drm_enc), sw_event);
833 mutex_unlock(&dpu_enc->rc_lock);
834 return 0;
835 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
836 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
837 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
838 DRMID(drm_enc), sw_event,
839 dpu_enc->rc_state);
840 mutex_unlock(&dpu_enc->rc_lock);
841 return -EINVAL;
842 }
843
844 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
845 _dpu_encoder_irq_control(drm_enc, true);
846 else
847 _dpu_encoder_resource_control_helper(drm_enc, true);
848
849 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
850
851 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
852 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
853 "kickoff");
854
855 mutex_unlock(&dpu_enc->rc_lock);
856 break;
857
858 case DPU_ENC_RC_EVENT_FRAME_DONE:
859 /*
860 * mutex lock is not used as this event happens at interrupt
861 * context. And locking is not required as, the other events
862 * like KICKOFF and STOP does a wait-for-idle before executing
863 * the resource_control
864 */
865 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
866 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
867 DRMID(drm_enc), sw_event,
868 dpu_enc->rc_state);
869 return -EINVAL;
870 }
871
872 /*
873 * schedule off work item only when there are no
874 * frames pending
875 */
876 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
877 DRM_DEBUG_KMS("id:%d skip schedule work\n",
878 DRMID(drm_enc));
879 return 0;
880 }
881
882 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
883 msecs_to_jiffies(dpu_enc->idle_timeout));
884
885 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
886 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
887 "frame done");
888 break;
889
890 case DPU_ENC_RC_EVENT_PRE_STOP:
891 /* cancel delayed off work, if any */
892 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
893 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
894 sw_event);
895
896 mutex_lock(&dpu_enc->rc_lock);
897
898 if (is_vid_mode &&
899 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
900 _dpu_encoder_irq_control(drm_enc, true);
901 }
902 /* skip if is already OFF or IDLE, resources are off already */
903 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
904 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
905 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
906 DRMID(drm_enc), sw_event,
907 dpu_enc->rc_state);
908 mutex_unlock(&dpu_enc->rc_lock);
909 return 0;
910 }
911
912 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
913
914 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
915 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
916 "pre stop");
917
918 mutex_unlock(&dpu_enc->rc_lock);
919 break;
920
921 case DPU_ENC_RC_EVENT_STOP:
922 mutex_lock(&dpu_enc->rc_lock);
923
924 /* return if the resource control is already in OFF state */
925 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
926 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
927 DRMID(drm_enc), sw_event);
928 mutex_unlock(&dpu_enc->rc_lock);
929 return 0;
930 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
931 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
932 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
933 mutex_unlock(&dpu_enc->rc_lock);
934 return -EINVAL;
935 }
936
937 /**
938 * expect to arrive here only if in either idle state or pre-off
939 * and in IDLE state the resources are already disabled
940 */
941 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
942 _dpu_encoder_resource_control_helper(drm_enc, false);
943
944 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
945
946 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
947 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
948 "stop");
949
950 mutex_unlock(&dpu_enc->rc_lock);
951 break;
952
953 case DPU_ENC_RC_EVENT_ENTER_IDLE:
954 mutex_lock(&dpu_enc->rc_lock);
955
956 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
957 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
958 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
959 mutex_unlock(&dpu_enc->rc_lock);
960 return 0;
961 }
962
963 /*
964 * if we are in ON but a frame was just kicked off,
965 * ignore the IDLE event, it's probably a stale timer event
966 */
967 if (dpu_enc->frame_busy_mask[0]) {
968 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
969 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
970 mutex_unlock(&dpu_enc->rc_lock);
971 return 0;
972 }
973
974 if (is_vid_mode)
975 _dpu_encoder_irq_control(drm_enc, false);
976 else
977 _dpu_encoder_resource_control_helper(drm_enc, false);
978
979 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
980
981 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
982 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
983 "idle");
984
985 mutex_unlock(&dpu_enc->rc_lock);
986 break;
987
988 default:
989 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
990 sw_event);
991 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
992 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
993 "error");
994 break;
995 }
996
997 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
998 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
999 "end");
1000 return 0;
1001 }
1002
dpu_encoder_prepare_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1003 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
1004 struct drm_writeback_job *job)
1005 {
1006 struct dpu_encoder_virt *dpu_enc;
1007 int i;
1008
1009 dpu_enc = to_dpu_encoder_virt(drm_enc);
1010
1011 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1012 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1013
1014 if (phys->ops.prepare_wb_job)
1015 phys->ops.prepare_wb_job(phys, job);
1016
1017 }
1018 }
1019
dpu_encoder_cleanup_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)1020 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
1021 struct drm_writeback_job *job)
1022 {
1023 struct dpu_encoder_virt *dpu_enc;
1024 int i;
1025
1026 dpu_enc = to_dpu_encoder_virt(drm_enc);
1027
1028 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1029 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1030
1031 if (phys->ops.cleanup_wb_job)
1032 phys->ops.cleanup_wb_job(phys, job);
1033
1034 }
1035 }
1036
dpu_encoder_virt_atomic_mode_set(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)1037 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
1038 struct drm_crtc_state *crtc_state,
1039 struct drm_connector_state *conn_state)
1040 {
1041 struct dpu_encoder_virt *dpu_enc;
1042 struct msm_drm_private *priv;
1043 struct dpu_kms *dpu_kms;
1044 struct dpu_crtc_state *cstate;
1045 struct dpu_global_state *global_state;
1046 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
1047 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
1048 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
1049 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
1050 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
1051 int num_lm, num_ctl, num_pp, num_dsc;
1052 unsigned int dsc_mask = 0;
1053 int i;
1054
1055 if (!drm_enc) {
1056 DPU_ERROR("invalid encoder\n");
1057 return;
1058 }
1059
1060 dpu_enc = to_dpu_encoder_virt(drm_enc);
1061 DPU_DEBUG_ENC(dpu_enc, "\n");
1062
1063 priv = drm_enc->dev->dev_private;
1064 dpu_kms = to_dpu_kms(priv->kms);
1065
1066 global_state = dpu_kms_get_existing_global_state(dpu_kms);
1067 if (IS_ERR_OR_NULL(global_state)) {
1068 DPU_ERROR("Failed to get global state");
1069 return;
1070 }
1071
1072 trace_dpu_enc_mode_set(DRMID(drm_enc));
1073
1074 /* Query resource that have been reserved in atomic check step. */
1075 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1076 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
1077 ARRAY_SIZE(hw_pp));
1078 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1079 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
1080 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1081 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1082 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1083 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1084 ARRAY_SIZE(hw_dspp));
1085
1086 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1087 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1088 : NULL;
1089
1090 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1091 drm_enc->base.id, DPU_HW_BLK_DSC,
1092 hw_dsc, ARRAY_SIZE(hw_dsc));
1093 for (i = 0; i < num_dsc; i++) {
1094 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1095 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1096 }
1097
1098 dpu_enc->dsc_mask = dsc_mask;
1099
1100 cstate = to_dpu_crtc_state(crtc_state);
1101
1102 for (i = 0; i < num_lm; i++) {
1103 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1104
1105 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1106 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1107 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1108 }
1109
1110 cstate->num_mixers = num_lm;
1111
1112 dpu_enc->connector = conn_state->connector;
1113
1114 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1115 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1116
1117 if (!dpu_enc->hw_pp[i]) {
1118 DPU_ERROR_ENC(dpu_enc,
1119 "no pp block assigned at idx: %d\n", i);
1120 return;
1121 }
1122
1123 if (!hw_ctl[i]) {
1124 DPU_ERROR_ENC(dpu_enc,
1125 "no ctl block assigned at idx: %d\n", i);
1126 return;
1127 }
1128
1129 phys->hw_pp = dpu_enc->hw_pp[i];
1130 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1131
1132 phys->cached_mode = crtc_state->adjusted_mode;
1133 if (phys->ops.atomic_mode_set)
1134 phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1135 }
1136 }
1137
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1138 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1139 {
1140 struct dpu_encoder_virt *dpu_enc = NULL;
1141 int i;
1142
1143 if (!drm_enc || !drm_enc->dev) {
1144 DPU_ERROR("invalid parameters\n");
1145 return;
1146 }
1147
1148 dpu_enc = to_dpu_encoder_virt(drm_enc);
1149 if (!dpu_enc || !dpu_enc->cur_master) {
1150 DPU_ERROR("invalid dpu encoder/master\n");
1151 return;
1152 }
1153
1154
1155 if (dpu_enc->disp_info.intf_type == INTF_DP &&
1156 dpu_enc->cur_master->hw_mdptop &&
1157 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1158 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1159 dpu_enc->cur_master->hw_mdptop);
1160
1161 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1162
1163 if (dpu_enc->disp_info.intf_type == INTF_DSI &&
1164 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1165 unsigned bpc = dpu_enc->connector->display_info.bpc;
1166 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1167 if (!dpu_enc->hw_pp[i])
1168 continue;
1169 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1170 }
1171 }
1172 }
1173
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1174 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1175 {
1176 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1177
1178 mutex_lock(&dpu_enc->enc_lock);
1179
1180 if (!dpu_enc->enabled)
1181 goto out;
1182
1183 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1184 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1185 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1186 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1187
1188 _dpu_encoder_virt_enable_helper(drm_enc);
1189
1190 out:
1191 mutex_unlock(&dpu_enc->enc_lock);
1192 }
1193
dpu_encoder_virt_atomic_enable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1194 static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
1195 struct drm_atomic_state *state)
1196 {
1197 struct dpu_encoder_virt *dpu_enc = NULL;
1198 int ret = 0;
1199 struct drm_display_mode *cur_mode = NULL;
1200
1201 dpu_enc = to_dpu_encoder_virt(drm_enc);
1202
1203 dpu_enc->dsc = dpu_encoder_get_dsc_config(drm_enc);
1204
1205 mutex_lock(&dpu_enc->enc_lock);
1206 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1207
1208 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1209 cur_mode->vdisplay);
1210
1211 /* always enable slave encoder before master */
1212 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1213 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1214
1215 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1216 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1217
1218 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1219 if (ret) {
1220 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1221 ret);
1222 goto out;
1223 }
1224
1225 _dpu_encoder_virt_enable_helper(drm_enc);
1226
1227 dpu_enc->enabled = true;
1228
1229 out:
1230 mutex_unlock(&dpu_enc->enc_lock);
1231 }
1232
dpu_encoder_virt_atomic_disable(struct drm_encoder * drm_enc,struct drm_atomic_state * state)1233 static void dpu_encoder_virt_atomic_disable(struct drm_encoder *drm_enc,
1234 struct drm_atomic_state *state)
1235 {
1236 struct dpu_encoder_virt *dpu_enc = NULL;
1237 struct drm_crtc *crtc;
1238 struct drm_crtc_state *old_state = NULL;
1239 int i = 0;
1240
1241 dpu_enc = to_dpu_encoder_virt(drm_enc);
1242 DPU_DEBUG_ENC(dpu_enc, "\n");
1243
1244 crtc = drm_atomic_get_old_crtc_for_encoder(state, drm_enc);
1245 if (crtc)
1246 old_state = drm_atomic_get_old_crtc_state(state, crtc);
1247
1248 /*
1249 * The encoder is already disabled if self refresh mode was set earlier,
1250 * in the old_state for the corresponding crtc.
1251 */
1252 if (old_state && old_state->self_refresh_active)
1253 return;
1254
1255 mutex_lock(&dpu_enc->enc_lock);
1256 dpu_enc->enabled = false;
1257
1258 trace_dpu_enc_disable(DRMID(drm_enc));
1259
1260 /* wait for idle */
1261 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1262
1263 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1264
1265 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1266 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1267
1268 if (phys->ops.disable)
1269 phys->ops.disable(phys);
1270 }
1271
1272
1273 /* after phys waits for frame-done, should be no more frames pending */
1274 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1275 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1276 del_timer_sync(&dpu_enc->frame_done_timer);
1277 }
1278
1279 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1280
1281 dpu_enc->connector = NULL;
1282
1283 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1284
1285 mutex_unlock(&dpu_enc->enc_lock);
1286 }
1287
dpu_encoder_get_intf(const struct dpu_mdss_cfg * catalog,struct dpu_rm * dpu_rm,enum dpu_intf_type type,u32 controller_id)1288 static struct dpu_hw_intf *dpu_encoder_get_intf(const struct dpu_mdss_cfg *catalog,
1289 struct dpu_rm *dpu_rm,
1290 enum dpu_intf_type type, u32 controller_id)
1291 {
1292 int i = 0;
1293
1294 if (type == INTF_WB)
1295 return NULL;
1296
1297 for (i = 0; i < catalog->intf_count; i++) {
1298 if (catalog->intf[i].type == type
1299 && catalog->intf[i].controller_id == controller_id) {
1300 return dpu_rm_get_intf(dpu_rm, catalog->intf[i].id);
1301 }
1302 }
1303
1304 return NULL;
1305 }
1306
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1307 void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1308 struct dpu_encoder_phys *phy_enc)
1309 {
1310 struct dpu_encoder_virt *dpu_enc = NULL;
1311 unsigned long lock_flags;
1312
1313 if (!drm_enc || !phy_enc)
1314 return;
1315
1316 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1317 dpu_enc = to_dpu_encoder_virt(drm_enc);
1318
1319 atomic_inc(&phy_enc->vsync_cnt);
1320
1321 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1322 if (dpu_enc->crtc)
1323 dpu_crtc_vblank_callback(dpu_enc->crtc);
1324 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1325
1326 DPU_ATRACE_END("encoder_vblank_callback");
1327 }
1328
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1329 void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1330 struct dpu_encoder_phys *phy_enc)
1331 {
1332 if (!phy_enc)
1333 return;
1334
1335 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1336 atomic_inc(&phy_enc->underrun_cnt);
1337
1338 /* trigger dump only on the first underrun */
1339 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1340 msm_disp_snapshot_state(drm_enc->dev);
1341
1342 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1343 atomic_read(&phy_enc->underrun_cnt));
1344 DPU_ATRACE_END("encoder_underrun_callback");
1345 }
1346
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1347 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1348 {
1349 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1350 unsigned long lock_flags;
1351
1352 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1353 /* crtc should always be cleared before re-assigning */
1354 WARN_ON(crtc && dpu_enc->crtc);
1355 dpu_enc->crtc = crtc;
1356 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1357 }
1358
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1359 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1360 struct drm_crtc *crtc, bool enable)
1361 {
1362 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1363 unsigned long lock_flags;
1364 int i;
1365
1366 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1367
1368 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1369 if (dpu_enc->crtc != crtc) {
1370 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1371 return;
1372 }
1373 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1374
1375 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1376 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1377
1378 if (phys->ops.control_vblank_irq)
1379 phys->ops.control_vblank_irq(phys, enable);
1380 }
1381 }
1382
dpu_encoder_register_frame_event_callback(struct drm_encoder * drm_enc,void (* frame_event_cb)(void *,u32 event),void * frame_event_cb_data)1383 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1384 void (*frame_event_cb)(void *, u32 event),
1385 void *frame_event_cb_data)
1386 {
1387 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1388 unsigned long lock_flags;
1389 bool enable;
1390
1391 enable = frame_event_cb ? true : false;
1392
1393 if (!drm_enc) {
1394 DPU_ERROR("invalid encoder\n");
1395 return;
1396 }
1397 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1398
1399 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1400 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1401 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1402 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1403 }
1404
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1405 void dpu_encoder_frame_done_callback(
1406 struct drm_encoder *drm_enc,
1407 struct dpu_encoder_phys *ready_phys, u32 event)
1408 {
1409 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1410 unsigned int i;
1411
1412 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1413 | DPU_ENCODER_FRAME_EVENT_ERROR
1414 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1415
1416 if (!dpu_enc->frame_busy_mask[0]) {
1417 /**
1418 * suppress frame_done without waiter,
1419 * likely autorefresh
1420 */
1421 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1422 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1423 ready_phys->hw_intf ? ready_phys->hw_intf->idx : -1,
1424 ready_phys->hw_wb ? ready_phys->hw_wb->idx : -1);
1425 return;
1426 }
1427
1428 /* One of the physical encoders has become idle */
1429 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1430 if (dpu_enc->phys_encs[i] == ready_phys) {
1431 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1432 dpu_enc->frame_busy_mask[0]);
1433 clear_bit(i, dpu_enc->frame_busy_mask);
1434 }
1435 }
1436
1437 if (!dpu_enc->frame_busy_mask[0]) {
1438 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1439 del_timer(&dpu_enc->frame_done_timer);
1440
1441 dpu_encoder_resource_control(drm_enc,
1442 DPU_ENC_RC_EVENT_FRAME_DONE);
1443
1444 if (dpu_enc->crtc_frame_event_cb)
1445 dpu_enc->crtc_frame_event_cb(
1446 dpu_enc->crtc_frame_event_cb_data,
1447 event);
1448 }
1449 } else {
1450 if (dpu_enc->crtc_frame_event_cb)
1451 dpu_enc->crtc_frame_event_cb(
1452 dpu_enc->crtc_frame_event_cb_data, event);
1453 }
1454 }
1455
dpu_encoder_off_work(struct work_struct * work)1456 static void dpu_encoder_off_work(struct work_struct *work)
1457 {
1458 struct dpu_encoder_virt *dpu_enc = container_of(work,
1459 struct dpu_encoder_virt, delayed_off_work.work);
1460
1461 dpu_encoder_resource_control(&dpu_enc->base,
1462 DPU_ENC_RC_EVENT_ENTER_IDLE);
1463
1464 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1465 DPU_ENCODER_FRAME_EVENT_IDLE);
1466 }
1467
1468 /**
1469 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1470 * @drm_enc: Pointer to drm encoder structure
1471 * @phys: Pointer to physical encoder structure
1472 * @extra_flush_bits: Additional bit mask to include in flush trigger
1473 */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1474 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1475 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1476 {
1477 struct dpu_hw_ctl *ctl;
1478 int pending_kickoff_cnt;
1479 u32 ret = UINT_MAX;
1480
1481 if (!phys->hw_pp) {
1482 DPU_ERROR("invalid pingpong hw\n");
1483 return;
1484 }
1485
1486 ctl = phys->hw_ctl;
1487 if (!ctl->ops.trigger_flush) {
1488 DPU_ERROR("missing trigger cb\n");
1489 return;
1490 }
1491
1492 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1493
1494 if (extra_flush_bits && ctl->ops.update_pending_flush)
1495 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1496
1497 ctl->ops.trigger_flush(ctl);
1498
1499 if (ctl->ops.get_pending_flush)
1500 ret = ctl->ops.get_pending_flush(ctl);
1501
1502 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1503 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1504 phys->hw_intf ? phys->hw_intf->idx : -1,
1505 phys->hw_wb ? phys->hw_wb->idx : -1,
1506 pending_kickoff_cnt, ctl->idx,
1507 extra_flush_bits, ret);
1508 }
1509
1510 /**
1511 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1512 * @phys: Pointer to physical encoder structure
1513 */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1514 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1515 {
1516 if (!phys) {
1517 DPU_ERROR("invalid argument(s)\n");
1518 return;
1519 }
1520
1521 if (!phys->hw_pp) {
1522 DPU_ERROR("invalid pingpong hw\n");
1523 return;
1524 }
1525
1526 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1527 phys->ops.trigger_start(phys);
1528 }
1529
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1530 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1531 {
1532 struct dpu_hw_ctl *ctl;
1533
1534 ctl = phys_enc->hw_ctl;
1535 if (ctl->ops.trigger_start) {
1536 ctl->ops.trigger_start(ctl);
1537 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1538 }
1539 }
1540
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,u32 irq_idx,struct dpu_encoder_wait_info * info)1541 static int dpu_encoder_helper_wait_event_timeout(
1542 int32_t drm_id,
1543 u32 irq_idx,
1544 struct dpu_encoder_wait_info *info)
1545 {
1546 int rc = 0;
1547 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1548 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1549 s64 time;
1550
1551 do {
1552 rc = wait_event_timeout(*(info->wq),
1553 atomic_read(info->atomic_cnt) == 0, jiffies);
1554 time = ktime_to_ms(ktime_get());
1555
1556 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1557 expected_time,
1558 atomic_read(info->atomic_cnt));
1559 /* If we timed out, counter is valid and time is less, wait again */
1560 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1561 (time < expected_time));
1562
1563 return rc;
1564 }
1565
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1566 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1567 {
1568 struct dpu_encoder_virt *dpu_enc;
1569 struct dpu_hw_ctl *ctl;
1570 int rc;
1571 struct drm_encoder *drm_enc;
1572
1573 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1574 ctl = phys_enc->hw_ctl;
1575 drm_enc = phys_enc->parent;
1576
1577 if (!ctl->ops.reset)
1578 return;
1579
1580 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1581 ctl->idx);
1582
1583 rc = ctl->ops.reset(ctl);
1584 if (rc) {
1585 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1586 msm_disp_snapshot_state(drm_enc->dev);
1587 }
1588
1589 phys_enc->enable_state = DPU_ENC_ENABLED;
1590 }
1591
1592 /**
1593 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1594 * Iterate through the physical encoders and perform consolidated flush
1595 * and/or control start triggering as needed. This is done in the virtual
1596 * encoder rather than the individual physical ones in order to handle
1597 * use cases that require visibility into multiple physical encoders at
1598 * a time.
1599 * @dpu_enc: Pointer to virtual encoder structure
1600 */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1601 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1602 {
1603 struct dpu_hw_ctl *ctl;
1604 uint32_t i, pending_flush;
1605 unsigned long lock_flags;
1606
1607 pending_flush = 0x0;
1608
1609 /* update pending counts and trigger kickoff ctl flush atomically */
1610 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1611
1612 /* don't perform flush/start operations for slave encoders */
1613 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1614 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1615
1616 if (phys->enable_state == DPU_ENC_DISABLED)
1617 continue;
1618
1619 ctl = phys->hw_ctl;
1620
1621 /*
1622 * This is cleared in frame_done worker, which isn't invoked
1623 * for async commits. So don't set this for async, since it'll
1624 * roll over to the next commit.
1625 */
1626 if (phys->split_role != ENC_ROLE_SLAVE)
1627 set_bit(i, dpu_enc->frame_busy_mask);
1628
1629 if (!phys->ops.needs_single_flush ||
1630 !phys->ops.needs_single_flush(phys))
1631 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1632 else if (ctl->ops.get_pending_flush)
1633 pending_flush |= ctl->ops.get_pending_flush(ctl);
1634 }
1635
1636 /* for split flush, combine pending flush masks and send to master */
1637 if (pending_flush && dpu_enc->cur_master) {
1638 _dpu_encoder_trigger_flush(
1639 &dpu_enc->base,
1640 dpu_enc->cur_master,
1641 pending_flush);
1642 }
1643
1644 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1645
1646 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1647 }
1648
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1649 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1650 {
1651 struct dpu_encoder_virt *dpu_enc;
1652 struct dpu_encoder_phys *phys;
1653 unsigned int i;
1654 struct dpu_hw_ctl *ctl;
1655 struct msm_display_info *disp_info;
1656
1657 if (!drm_enc) {
1658 DPU_ERROR("invalid encoder\n");
1659 return;
1660 }
1661 dpu_enc = to_dpu_encoder_virt(drm_enc);
1662 disp_info = &dpu_enc->disp_info;
1663
1664 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1665 phys = dpu_enc->phys_encs[i];
1666
1667 ctl = phys->hw_ctl;
1668 if (ctl->ops.clear_pending_flush)
1669 ctl->ops.clear_pending_flush(ctl);
1670
1671 /* update only for command mode primary ctl */
1672 if ((phys == dpu_enc->cur_master) &&
1673 disp_info->is_cmd_mode
1674 && ctl->ops.trigger_pending)
1675 ctl->ops.trigger_pending(ctl);
1676 }
1677 }
1678
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1679 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1680 struct drm_display_mode *mode)
1681 {
1682 u64 pclk_rate;
1683 u32 pclk_period;
1684 u32 line_time;
1685
1686 /*
1687 * For linetime calculation, only operate on master encoder.
1688 */
1689 if (!dpu_enc->cur_master)
1690 return 0;
1691
1692 if (!dpu_enc->cur_master->ops.get_line_count) {
1693 DPU_ERROR("get_line_count function not defined\n");
1694 return 0;
1695 }
1696
1697 pclk_rate = mode->clock; /* pixel clock in kHz */
1698 if (pclk_rate == 0) {
1699 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1700 return 0;
1701 }
1702
1703 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1704 if (pclk_period == 0) {
1705 DPU_ERROR("pclk period is 0\n");
1706 return 0;
1707 }
1708
1709 /*
1710 * Line time calculation based on Pixel clock and HTOTAL.
1711 * Final unit is in ns.
1712 */
1713 line_time = (pclk_period * mode->htotal) / 1000;
1714 if (line_time == 0) {
1715 DPU_ERROR("line time calculation is 0\n");
1716 return 0;
1717 }
1718
1719 DPU_DEBUG_ENC(dpu_enc,
1720 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1721 pclk_rate, pclk_period, line_time);
1722
1723 return line_time;
1724 }
1725
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1726 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1727 {
1728 struct drm_display_mode *mode;
1729 struct dpu_encoder_virt *dpu_enc;
1730 u32 cur_line;
1731 u32 line_time;
1732 u32 vtotal, time_to_vsync;
1733 ktime_t cur_time;
1734
1735 dpu_enc = to_dpu_encoder_virt(drm_enc);
1736
1737 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1738 DPU_ERROR("crtc/crtc state object is NULL\n");
1739 return -EINVAL;
1740 }
1741 mode = &drm_enc->crtc->state->adjusted_mode;
1742
1743 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1744 if (!line_time)
1745 return -EINVAL;
1746
1747 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1748
1749 vtotal = mode->vtotal;
1750 if (cur_line >= vtotal)
1751 time_to_vsync = line_time * vtotal;
1752 else
1753 time_to_vsync = line_time * (vtotal - cur_line);
1754
1755 if (time_to_vsync == 0) {
1756 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1757 vtotal);
1758 return -EINVAL;
1759 }
1760
1761 cur_time = ktime_get();
1762 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1763
1764 DPU_DEBUG_ENC(dpu_enc,
1765 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1766 cur_line, vtotal, time_to_vsync,
1767 ktime_to_ms(cur_time),
1768 ktime_to_ms(*wakeup_time));
1769 return 0;
1770 }
1771
1772 static u32
dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config * dsc,u32 enc_ip_width)1773 dpu_encoder_dsc_initial_line_calc(struct drm_dsc_config *dsc,
1774 u32 enc_ip_width)
1775 {
1776 int ssm_delay, total_pixels, soft_slice_per_enc;
1777
1778 soft_slice_per_enc = enc_ip_width / dsc->slice_width;
1779
1780 /*
1781 * minimum number of initial line pixels is a sum of:
1782 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1783 * 91 for 10 bpc) * 3
1784 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1785 * 3. the initial xmit delay
1786 * 4. total pipeline delay through the "lock step" of encoder (47)
1787 * 5. 6 additional pixels as the output of the rate buffer is
1788 * 48 bits wide
1789 */
1790 ssm_delay = ((dsc->bits_per_component < 10) ? 84 : 92);
1791 total_pixels = ssm_delay * 3 + dsc->initial_xmit_delay + 47;
1792 if (soft_slice_per_enc > 1)
1793 total_pixels += (ssm_delay * 3);
1794 return DIV_ROUND_UP(total_pixels, dsc->slice_width);
1795 }
1796
dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp,struct drm_dsc_config * dsc,u32 common_mode,u32 initial_lines)1797 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_ctl *ctl,
1798 struct dpu_hw_dsc *hw_dsc,
1799 struct dpu_hw_pingpong *hw_pp,
1800 struct drm_dsc_config *dsc,
1801 u32 common_mode,
1802 u32 initial_lines)
1803 {
1804 if (hw_dsc->ops.dsc_config)
1805 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1806
1807 if (hw_dsc->ops.dsc_config_thresh)
1808 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1809
1810 if (hw_pp->ops.setup_dsc)
1811 hw_pp->ops.setup_dsc(hw_pp);
1812
1813 if (hw_dsc->ops.dsc_bind_pingpong_blk)
1814 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, hw_pp->idx);
1815
1816 if (hw_pp->ops.enable_dsc)
1817 hw_pp->ops.enable_dsc(hw_pp);
1818
1819 if (ctl->ops.update_pending_flush_dsc)
1820 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
1821 }
1822
dpu_encoder_prep_dsc(struct dpu_encoder_virt * dpu_enc,struct drm_dsc_config * dsc)1823 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1824 struct drm_dsc_config *dsc)
1825 {
1826 /* coding only for 2LM, 2enc, 1 dsc config */
1827 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1828 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
1829 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1830 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1831 int this_frame_slices;
1832 int intf_ip_w, enc_ip_w;
1833 int dsc_common_mode;
1834 int pic_width;
1835 u32 initial_lines;
1836 int i;
1837
1838 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1839 hw_pp[i] = dpu_enc->hw_pp[i];
1840 hw_dsc[i] = dpu_enc->hw_dsc[i];
1841
1842 if (!hw_pp[i] || !hw_dsc[i]) {
1843 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1844 return;
1845 }
1846 }
1847
1848 dsc_common_mode = 0;
1849 pic_width = dsc->pic_width;
1850
1851 dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
1852 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1853 dsc_common_mode |= DSC_MODE_VIDEO;
1854
1855 this_frame_slices = pic_width / dsc->slice_width;
1856 intf_ip_w = this_frame_slices * dsc->slice_width;
1857
1858 /*
1859 * dsc merge case: when using 2 encoders for the same stream,
1860 * no. of slices need to be same on both the encoders.
1861 */
1862 enc_ip_w = intf_ip_w / 2;
1863 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1864
1865 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1866 dpu_encoder_dsc_pipe_cfg(ctl, hw_dsc[i], hw_pp[i],
1867 dsc, dsc_common_mode, initial_lines);
1868 }
1869
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1870 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1871 {
1872 struct dpu_encoder_virt *dpu_enc;
1873 struct dpu_encoder_phys *phys;
1874 bool needs_hw_reset = false;
1875 unsigned int i;
1876
1877 dpu_enc = to_dpu_encoder_virt(drm_enc);
1878
1879 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1880
1881 /* prepare for next kickoff, may include waiting on previous kickoff */
1882 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1883 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1884 phys = dpu_enc->phys_encs[i];
1885 if (phys->ops.prepare_for_kickoff)
1886 phys->ops.prepare_for_kickoff(phys);
1887 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1888 needs_hw_reset = true;
1889 }
1890 DPU_ATRACE_END("enc_prepare_for_kickoff");
1891
1892 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1893
1894 /* if any phys needs reset, reset all phys, in-order */
1895 if (needs_hw_reset) {
1896 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1897 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1898 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1899 }
1900 }
1901
1902 if (dpu_enc->dsc)
1903 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1904 }
1905
dpu_encoder_is_valid_for_commit(struct drm_encoder * drm_enc)1906 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1907 {
1908 struct dpu_encoder_virt *dpu_enc;
1909 unsigned int i;
1910 struct dpu_encoder_phys *phys;
1911
1912 dpu_enc = to_dpu_encoder_virt(drm_enc);
1913
1914 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1915 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1916 phys = dpu_enc->phys_encs[i];
1917 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1918 DPU_DEBUG("invalid FB not kicking off\n");
1919 return false;
1920 }
1921 }
1922 }
1923
1924 return true;
1925 }
1926
dpu_encoder_kickoff(struct drm_encoder * drm_enc)1927 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1928 {
1929 struct dpu_encoder_virt *dpu_enc;
1930 struct dpu_encoder_phys *phys;
1931 unsigned long timeout_ms;
1932 unsigned int i;
1933
1934 DPU_ATRACE_BEGIN("encoder_kickoff");
1935 dpu_enc = to_dpu_encoder_virt(drm_enc);
1936
1937 trace_dpu_enc_kickoff(DRMID(drm_enc));
1938
1939 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1940 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1941
1942 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1943 mod_timer(&dpu_enc->frame_done_timer,
1944 jiffies + msecs_to_jiffies(timeout_ms));
1945
1946 /* All phys encs are ready to go, trigger the kickoff */
1947 _dpu_encoder_kickoff_phys(dpu_enc);
1948
1949 /* allow phys encs to handle any post-kickoff business */
1950 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1951 phys = dpu_enc->phys_encs[i];
1952 if (phys->ops.handle_post_kickoff)
1953 phys->ops.handle_post_kickoff(phys);
1954 }
1955
1956 DPU_ATRACE_END("encoder_kickoff");
1957 }
1958
dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys * phys_enc)1959 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
1960 {
1961 struct dpu_hw_mixer_cfg mixer;
1962 int i, num_lm;
1963 struct dpu_global_state *global_state;
1964 struct dpu_hw_blk *hw_lm[2];
1965 struct dpu_hw_mixer *hw_mixer[2];
1966 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1967
1968 memset(&mixer, 0, sizeof(mixer));
1969
1970 /* reset all mixers for this encoder */
1971 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
1972 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
1973
1974 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
1975
1976 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
1977 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1978
1979 for (i = 0; i < num_lm; i++) {
1980 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
1981 if (phys_enc->hw_ctl->ops.update_pending_flush_mixer)
1982 phys_enc->hw_ctl->ops.update_pending_flush_mixer(ctl, hw_mixer[i]->idx);
1983
1984 /* clear all blendstages */
1985 if (phys_enc->hw_ctl->ops.setup_blendstage)
1986 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
1987 }
1988 }
1989
dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl * ctl,struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp)1990 static void dpu_encoder_dsc_pipe_clr(struct dpu_hw_ctl *ctl,
1991 struct dpu_hw_dsc *hw_dsc,
1992 struct dpu_hw_pingpong *hw_pp)
1993 {
1994 if (hw_dsc->ops.dsc_disable)
1995 hw_dsc->ops.dsc_disable(hw_dsc);
1996
1997 if (hw_pp->ops.disable_dsc)
1998 hw_pp->ops.disable_dsc(hw_pp);
1999
2000 if (hw_dsc->ops.dsc_bind_pingpong_blk)
2001 hw_dsc->ops.dsc_bind_pingpong_blk(hw_dsc, PINGPONG_NONE);
2002
2003 if (ctl->ops.update_pending_flush_dsc)
2004 ctl->ops.update_pending_flush_dsc(ctl, hw_dsc->idx);
2005 }
2006
dpu_encoder_unprep_dsc(struct dpu_encoder_virt * dpu_enc)2007 static void dpu_encoder_unprep_dsc(struct dpu_encoder_virt *dpu_enc)
2008 {
2009 /* coding only for 2LM, 2enc, 1 dsc config */
2010 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
2011 struct dpu_hw_ctl *ctl = enc_master->hw_ctl;
2012 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
2013 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
2014 int i;
2015
2016 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
2017 hw_pp[i] = dpu_enc->hw_pp[i];
2018 hw_dsc[i] = dpu_enc->hw_dsc[i];
2019
2020 if (hw_pp[i] && hw_dsc[i])
2021 dpu_encoder_dsc_pipe_clr(ctl, hw_dsc[i], hw_pp[i]);
2022 }
2023 }
2024
dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys * phys_enc)2025 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
2026 {
2027 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
2028 struct dpu_hw_intf_cfg intf_cfg = { 0 };
2029 int i;
2030 struct dpu_encoder_virt *dpu_enc;
2031
2032 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
2033
2034 phys_enc->hw_ctl->ops.reset(ctl);
2035
2036 dpu_encoder_helper_reset_mixers(phys_enc);
2037
2038 /*
2039 * TODO: move the once-only operation like CTL flush/trigger
2040 * into dpu_encoder_virt_disable() and all operations which need
2041 * to be done per phys encoder into the phys_disable() op.
2042 */
2043 if (phys_enc->hw_wb) {
2044 /* disable the PP block */
2045 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
2046 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, PINGPONG_NONE);
2047
2048 /* mark WB flush as pending */
2049 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
2050 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
2051 } else {
2052 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2053 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
2054 phys_enc->hw_intf->ops.bind_pingpong_blk(
2055 dpu_enc->phys_encs[i]->hw_intf,
2056 PINGPONG_NONE);
2057
2058 /* mark INTF flush as pending */
2059 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
2060 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
2061 dpu_enc->phys_encs[i]->hw_intf->idx);
2062 }
2063 }
2064
2065 /* reset the merge 3D HW block */
2066 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
2067 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
2068 BLEND_3D_NONE);
2069 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
2070 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
2071 phys_enc->hw_pp->merge_3d->idx);
2072 }
2073
2074 if (dpu_enc->dsc) {
2075 dpu_encoder_unprep_dsc(dpu_enc);
2076 dpu_enc->dsc = NULL;
2077 }
2078
2079 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2080 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2081 intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
2082
2083 if (phys_enc->hw_intf)
2084 intf_cfg.intf = phys_enc->hw_intf->idx;
2085 if (phys_enc->hw_wb)
2086 intf_cfg.wb = phys_enc->hw_wb->idx;
2087
2088 if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
2089 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2090
2091 if (ctl->ops.reset_intf_cfg)
2092 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2093
2094 ctl->ops.trigger_flush(ctl);
2095 ctl->ops.trigger_start(ctl);
2096 ctl->ops.clear_pending_flush(ctl);
2097 }
2098
2099 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)2100 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2101 {
2102 struct dpu_encoder_virt *dpu_enc = s->private;
2103 int i;
2104
2105 mutex_lock(&dpu_enc->enc_lock);
2106 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2107 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2108
2109 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
2110 phys->hw_intf ? phys->hw_intf->idx - INTF_0 : -1,
2111 phys->hw_wb ? phys->hw_wb->idx - WB_0 : -1,
2112 atomic_read(&phys->vsync_cnt),
2113 atomic_read(&phys->underrun_cnt));
2114
2115 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2116 }
2117 mutex_unlock(&dpu_enc->enc_lock);
2118
2119 return 0;
2120 }
2121
2122 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2123
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2124 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2125 {
2126 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2127
2128 char name[12];
2129
2130 if (!drm_enc->dev) {
2131 DPU_ERROR("invalid encoder or kms\n");
2132 return -EINVAL;
2133 }
2134
2135 snprintf(name, sizeof(name), "encoder%u", drm_enc->base.id);
2136
2137 /* create overall sub-directory for the encoder */
2138 dpu_enc->debugfs_root = debugfs_create_dir(name,
2139 drm_enc->dev->primary->debugfs_root);
2140
2141 /* don't error check these */
2142 debugfs_create_file("status", 0600,
2143 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
2144
2145 return 0;
2146 }
2147 #else
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2148 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2149 {
2150 return 0;
2151 }
2152 #endif
2153
dpu_encoder_late_register(struct drm_encoder * encoder)2154 static int dpu_encoder_late_register(struct drm_encoder *encoder)
2155 {
2156 return _dpu_encoder_init_debugfs(encoder);
2157 }
2158
dpu_encoder_early_unregister(struct drm_encoder * encoder)2159 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2160 {
2161 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2162
2163 debugfs_remove_recursive(dpu_enc->debugfs_root);
2164 }
2165
dpu_encoder_virt_add_phys_encs(struct msm_display_info * disp_info,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)2166 static int dpu_encoder_virt_add_phys_encs(
2167 struct msm_display_info *disp_info,
2168 struct dpu_encoder_virt *dpu_enc,
2169 struct dpu_enc_phys_init_params *params)
2170 {
2171 struct dpu_encoder_phys *enc = NULL;
2172
2173 DPU_DEBUG_ENC(dpu_enc, "\n");
2174
2175 /*
2176 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2177 * in this function, check up-front.
2178 */
2179 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2180 ARRAY_SIZE(dpu_enc->phys_encs)) {
2181 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2182 dpu_enc->num_phys_encs);
2183 return -EINVAL;
2184 }
2185
2186
2187 if (disp_info->intf_type == INTF_WB) {
2188 enc = dpu_encoder_phys_wb_init(params);
2189
2190 if (IS_ERR(enc)) {
2191 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2192 PTR_ERR(enc));
2193 return PTR_ERR(enc);
2194 }
2195
2196 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2197 ++dpu_enc->num_phys_encs;
2198 } else if (disp_info->is_cmd_mode) {
2199 enc = dpu_encoder_phys_cmd_init(params);
2200
2201 if (IS_ERR(enc)) {
2202 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2203 PTR_ERR(enc));
2204 return PTR_ERR(enc);
2205 }
2206
2207 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2208 ++dpu_enc->num_phys_encs;
2209 } else {
2210 enc = dpu_encoder_phys_vid_init(params);
2211
2212 if (IS_ERR(enc)) {
2213 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2214 PTR_ERR(enc));
2215 return PTR_ERR(enc);
2216 }
2217
2218 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2219 ++dpu_enc->num_phys_encs;
2220 }
2221
2222 if (params->split_role == ENC_ROLE_SLAVE)
2223 dpu_enc->cur_slave = enc;
2224 else
2225 dpu_enc->cur_master = enc;
2226
2227 return 0;
2228 }
2229
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2230 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2231 struct dpu_kms *dpu_kms,
2232 struct msm_display_info *disp_info)
2233 {
2234 int ret = 0;
2235 int i = 0;
2236 struct dpu_enc_phys_init_params phys_params;
2237
2238 if (!dpu_enc) {
2239 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2240 return -EINVAL;
2241 }
2242
2243 dpu_enc->cur_master = NULL;
2244
2245 memset(&phys_params, 0, sizeof(phys_params));
2246 phys_params.dpu_kms = dpu_kms;
2247 phys_params.parent = &dpu_enc->base;
2248 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2249
2250 WARN_ON(disp_info->num_of_h_tiles < 1);
2251
2252 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2253
2254 if (disp_info->intf_type != INTF_WB)
2255 dpu_enc->idle_pc_supported =
2256 dpu_kms->catalog->caps->has_idle_pc;
2257
2258 mutex_lock(&dpu_enc->enc_lock);
2259 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2260 /*
2261 * Left-most tile is at index 0, content is controller id
2262 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2263 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2264 */
2265 u32 controller_id = disp_info->h_tile_instance[i];
2266
2267 if (disp_info->num_of_h_tiles > 1) {
2268 if (i == 0)
2269 phys_params.split_role = ENC_ROLE_MASTER;
2270 else
2271 phys_params.split_role = ENC_ROLE_SLAVE;
2272 } else {
2273 phys_params.split_role = ENC_ROLE_SOLO;
2274 }
2275
2276 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2277 i, controller_id, phys_params.split_role);
2278
2279 phys_params.hw_intf = dpu_encoder_get_intf(dpu_kms->catalog, &dpu_kms->rm,
2280 disp_info->intf_type,
2281 controller_id);
2282
2283 if (disp_info->intf_type == INTF_WB && controller_id < WB_MAX)
2284 phys_params.hw_wb = dpu_rm_get_wb(&dpu_kms->rm, controller_id);
2285
2286 if (!phys_params.hw_intf && !phys_params.hw_wb) {
2287 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2288 ret = -EINVAL;
2289 break;
2290 }
2291
2292 if (phys_params.hw_intf && phys_params.hw_wb) {
2293 DPU_ERROR_ENC(dpu_enc,
2294 "invalid phys both intf and wb block at idx: %d\n", i);
2295 ret = -EINVAL;
2296 break;
2297 }
2298
2299 ret = dpu_encoder_virt_add_phys_encs(disp_info,
2300 dpu_enc, &phys_params);
2301 if (ret) {
2302 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2303 break;
2304 }
2305 }
2306
2307 mutex_unlock(&dpu_enc->enc_lock);
2308
2309 return ret;
2310 }
2311
dpu_encoder_frame_done_timeout(struct timer_list * t)2312 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2313 {
2314 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2315 frame_done_timer);
2316 struct drm_encoder *drm_enc = &dpu_enc->base;
2317 u32 event;
2318
2319 if (!drm_enc->dev) {
2320 DPU_ERROR("invalid parameters\n");
2321 return;
2322 }
2323
2324 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2325 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2326 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2327 return;
2328 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2329 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2330 return;
2331 }
2332
2333 DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
2334
2335 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2336 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2337 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2338 }
2339
2340 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2341 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2342 .atomic_disable = dpu_encoder_virt_atomic_disable,
2343 .atomic_enable = dpu_encoder_virt_atomic_enable,
2344 .atomic_check = dpu_encoder_virt_atomic_check,
2345 };
2346
2347 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2348 .destroy = dpu_encoder_destroy,
2349 .late_register = dpu_encoder_late_register,
2350 .early_unregister = dpu_encoder_early_unregister,
2351 };
2352
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode,struct msm_display_info * disp_info)2353 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2354 int drm_enc_mode,
2355 struct msm_display_info *disp_info)
2356 {
2357 struct msm_drm_private *priv = dev->dev_private;
2358 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2359 struct drm_encoder *drm_enc = NULL;
2360 struct dpu_encoder_virt *dpu_enc = NULL;
2361 int ret = 0;
2362
2363 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2364 if (!dpu_enc)
2365 return ERR_PTR(-ENOMEM);
2366
2367 ret = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2368 drm_enc_mode, NULL);
2369 if (ret) {
2370 devm_kfree(dev->dev, dpu_enc);
2371 return ERR_PTR(ret);
2372 }
2373
2374 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2375
2376 spin_lock_init(&dpu_enc->enc_spinlock);
2377 dpu_enc->enabled = false;
2378 mutex_init(&dpu_enc->enc_lock);
2379 mutex_init(&dpu_enc->rc_lock);
2380
2381 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2382 if (ret)
2383 goto fail;
2384
2385 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2386 timer_setup(&dpu_enc->frame_done_timer,
2387 dpu_encoder_frame_done_timeout, 0);
2388
2389 if (disp_info->intf_type == INTF_DP)
2390 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
2391 priv->dp[disp_info->h_tile_instance[0]]);
2392
2393 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2394 dpu_encoder_off_work);
2395 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2396
2397 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2398
2399 DPU_DEBUG_ENC(dpu_enc, "created\n");
2400
2401 return &dpu_enc->base;
2402
2403 fail:
2404 DPU_ERROR("failed to create encoder\n");
2405 if (drm_enc)
2406 dpu_encoder_destroy(drm_enc);
2407
2408 return ERR_PTR(ret);
2409 }
2410
dpu_encoder_wait_for_event(struct drm_encoder * drm_enc,enum msm_event_wait event)2411 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2412 enum msm_event_wait event)
2413 {
2414 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2415 struct dpu_encoder_virt *dpu_enc = NULL;
2416 int i, ret = 0;
2417
2418 if (!drm_enc) {
2419 DPU_ERROR("invalid encoder\n");
2420 return -EINVAL;
2421 }
2422 dpu_enc = to_dpu_encoder_virt(drm_enc);
2423 DPU_DEBUG_ENC(dpu_enc, "\n");
2424
2425 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2426 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2427
2428 switch (event) {
2429 case MSM_ENC_COMMIT_DONE:
2430 fn_wait = phys->ops.wait_for_commit_done;
2431 break;
2432 case MSM_ENC_TX_COMPLETE:
2433 fn_wait = phys->ops.wait_for_tx_complete;
2434 break;
2435 case MSM_ENC_VBLANK:
2436 fn_wait = phys->ops.wait_for_vblank;
2437 break;
2438 default:
2439 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2440 event);
2441 return -EINVAL;
2442 }
2443
2444 if (fn_wait) {
2445 DPU_ATRACE_BEGIN("wait_for_completion_event");
2446 ret = fn_wait(phys);
2447 DPU_ATRACE_END("wait_for_completion_event");
2448 if (ret)
2449 return ret;
2450 }
2451 }
2452
2453 return ret;
2454 }
2455
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2456 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2457 {
2458 struct dpu_encoder_virt *dpu_enc = NULL;
2459
2460 if (!encoder) {
2461 DPU_ERROR("invalid encoder\n");
2462 return INTF_MODE_NONE;
2463 }
2464 dpu_enc = to_dpu_encoder_virt(encoder);
2465
2466 if (dpu_enc->cur_master)
2467 return dpu_enc->cur_master->intf_mode;
2468
2469 if (dpu_enc->num_phys_encs)
2470 return dpu_enc->phys_encs[0]->intf_mode;
2471
2472 return INTF_MODE_NONE;
2473 }
2474
dpu_encoder_helper_get_dsc(struct dpu_encoder_phys * phys_enc)2475 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2476 {
2477 struct drm_encoder *encoder = phys_enc->parent;
2478 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2479
2480 return dpu_enc->dsc_mask;
2481 }
2482
dpu_encoder_phys_init(struct dpu_encoder_phys * phys_enc,struct dpu_enc_phys_init_params * p)2483 void dpu_encoder_phys_init(struct dpu_encoder_phys *phys_enc,
2484 struct dpu_enc_phys_init_params *p)
2485 {
2486 int i;
2487
2488 phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
2489 phys_enc->hw_intf = p->hw_intf;
2490 phys_enc->hw_wb = p->hw_wb;
2491 phys_enc->parent = p->parent;
2492 phys_enc->dpu_kms = p->dpu_kms;
2493 phys_enc->split_role = p->split_role;
2494 phys_enc->enc_spinlock = p->enc_spinlock;
2495 phys_enc->enable_state = DPU_ENC_DISABLED;
2496
2497 for (i = 0; i < ARRAY_SIZE(phys_enc->irq); i++)
2498 phys_enc->irq[i] = -EINVAL;
2499
2500 atomic_set(&phys_enc->vblank_refcount, 0);
2501 atomic_set(&phys_enc->pending_kickoff_cnt, 0);
2502 atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
2503
2504 atomic_set(&phys_enc->vsync_cnt, 0);
2505 atomic_set(&phys_enc->underrun_cnt, 0);
2506
2507 init_waitqueue_head(&phys_enc->pending_kickoff_wq);
2508 }
2509