1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013 Red Hat
4 * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
6 *
7 * Author: Rob Clark <robdclark@gmail.com>
8 */
9
10 #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
11 #include <linux/debugfs.h>
12 #include <linux/kthread.h>
13 #include <linux/seq_file.h>
14
15 #include <drm/drm_crtc.h>
16 #include <drm/drm_file.h>
17 #include <drm/drm_probe_helper.h>
18
19 #include "msm_drv.h"
20 #include "dpu_kms.h"
21 #include "dpu_hwio.h"
22 #include "dpu_hw_catalog.h"
23 #include "dpu_hw_intf.h"
24 #include "dpu_hw_ctl.h"
25 #include "dpu_hw_dspp.h"
26 #include "dpu_hw_dsc.h"
27 #include "dpu_hw_merge3d.h"
28 #include "dpu_formats.h"
29 #include "dpu_encoder_phys.h"
30 #include "dpu_crtc.h"
31 #include "dpu_trace.h"
32 #include "dpu_core_irq.h"
33 #include "disp/msm_disp_snapshot.h"
34
35 #define DPU_DEBUG_ENC(e, fmt, ...) DRM_DEBUG_ATOMIC("enc%d " fmt,\
36 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
37
38 #define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
39 (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
40
41 /*
42 * Two to anticipate panels that can do cmd/vid dynamic switching
43 * plan is to create all possible physical encoder types, and switch between
44 * them at runtime
45 */
46 #define NUM_PHYS_ENCODER_TYPES 2
47
48 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
49 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
50
51 #define MAX_CHANNELS_PER_ENC 2
52
53 #define IDLE_SHORT_TIMEOUT 1
54
55 #define MAX_HDISPLAY_SPLIT 1080
56
57 /* timeout in frames waiting for frame done */
58 #define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
59
60 /**
61 * enum dpu_enc_rc_events - events for resource control state machine
62 * @DPU_ENC_RC_EVENT_KICKOFF:
63 * This event happens at NORMAL priority.
64 * Event that signals the start of the transfer. When this event is
65 * received, enable MDP/DSI core clocks. Regardless of the previous
66 * state, the resource should be in ON state at the end of this event.
67 * @DPU_ENC_RC_EVENT_FRAME_DONE:
68 * This event happens at INTERRUPT level.
69 * Event signals the end of the data transfer after the PP FRAME_DONE
70 * event. At the end of this event, a delayed work is scheduled to go to
71 * IDLE_PC state after IDLE_TIMEOUT time.
72 * @DPU_ENC_RC_EVENT_PRE_STOP:
73 * This event happens at NORMAL priority.
74 * This event, when received during the ON state, leave the RC STATE
75 * in the PRE_OFF state. It should be followed by the STOP event as
76 * part of encoder disable.
77 * If received during IDLE or OFF states, it will do nothing.
78 * @DPU_ENC_RC_EVENT_STOP:
79 * This event happens at NORMAL priority.
80 * When this event is received, disable all the MDP/DSI core clocks, and
81 * disable IRQs. It should be called from the PRE_OFF or IDLE states.
82 * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
83 * PRE_OFF is expected when PRE_STOP was executed during the ON state.
84 * Resource state should be in OFF at the end of the event.
85 * @DPU_ENC_RC_EVENT_ENTER_IDLE:
86 * This event happens at NORMAL priority from a work item.
87 * Event signals that there were no frame updates for IDLE_TIMEOUT time.
88 * This would disable MDP/DSI core clocks and change the resource state
89 * to IDLE.
90 */
91 enum dpu_enc_rc_events {
92 DPU_ENC_RC_EVENT_KICKOFF = 1,
93 DPU_ENC_RC_EVENT_FRAME_DONE,
94 DPU_ENC_RC_EVENT_PRE_STOP,
95 DPU_ENC_RC_EVENT_STOP,
96 DPU_ENC_RC_EVENT_ENTER_IDLE
97 };
98
99 /*
100 * enum dpu_enc_rc_states - states that the resource control maintains
101 * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
102 * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
103 * @DPU_ENC_RC_STATE_ON: Resource is in ON state
104 * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
105 * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
106 */
107 enum dpu_enc_rc_states {
108 DPU_ENC_RC_STATE_OFF,
109 DPU_ENC_RC_STATE_PRE_OFF,
110 DPU_ENC_RC_STATE_ON,
111 DPU_ENC_RC_STATE_IDLE
112 };
113
114 /**
115 * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
116 * encoders. Virtual encoder manages one "logical" display. Physical
117 * encoders manage one intf block, tied to a specific panel/sub-panel.
118 * Virtual encoder defers as much as possible to the physical encoders.
119 * Virtual encoder registers itself with the DRM Framework as the encoder.
120 * @base: drm_encoder base class for registration with DRM
121 * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
122 * @enabled: True if the encoder is active, protected by enc_lock
123 * @num_phys_encs: Actual number of physical encoders contained.
124 * @phys_encs: Container of physical encoders managed.
125 * @cur_master: Pointer to the current master in this mode. Optimization
126 * Only valid after enable. Cleared as disable.
127 * @cur_slave: As above but for the slave encoder.
128 * @hw_pp: Handle to the pingpong blocks used for the display. No.
129 * pingpong blocks can be different than num_phys_encs.
130 * @hw_dsc: Handle to the DSC blocks used for the display.
131 * @dsc_mask: Bitmask of used DSC blocks.
132 * @intfs_swapped: Whether or not the phys_enc interfaces have been swapped
133 * for partial update right-only cases, such as pingpong
134 * split where virtual pingpong does not generate IRQs
135 * @crtc: Pointer to the currently assigned crtc. Normally you
136 * would use crtc->state->encoder_mask to determine the
137 * link between encoder/crtc. However in this case we need
138 * to track crtc in the disable() hook which is called
139 * _after_ encoder_mask is cleared.
140 * @connector: If a mode is set, cached pointer to the active connector
141 * @crtc_kickoff_cb: Callback into CRTC that will flush & start
142 * all CTL paths
143 * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
144 * @debugfs_root: Debug file system root file node
145 * @enc_lock: Lock around physical encoder
146 * create/destroy/enable/disable
147 * @frame_busy_mask: Bitmask tracking which phys_enc we are still
148 * busy processing current command.
149 * Bit0 = phys_encs[0] etc.
150 * @crtc_frame_event_cb: callback handler for frame event
151 * @crtc_frame_event_cb_data: callback handler private data
152 * @frame_done_timeout_ms: frame done timeout in ms
153 * @frame_done_timer: watchdog timer for frame done event
154 * @vsync_event_timer: vsync timer
155 * @disp_info: local copy of msm_display_info struct
156 * @idle_pc_supported: indicate if idle power collaps is supported
157 * @rc_lock: resource control mutex lock to protect
158 * virt encoder over various state changes
159 * @rc_state: resource controller state
160 * @delayed_off_work: delayed worker to schedule disabling of
161 * clks and resources after IDLE_TIMEOUT time.
162 * @vsync_event_work: worker to handle vsync event for autorefresh
163 * @topology: topology of the display
164 * @idle_timeout: idle timeout duration in milliseconds
165 * @dsc: msm_display_dsc_config pointer, for DSC-enabled encoders
166 */
167 struct dpu_encoder_virt {
168 struct drm_encoder base;
169 spinlock_t enc_spinlock;
170
171 bool enabled;
172
173 unsigned int num_phys_encs;
174 struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
175 struct dpu_encoder_phys *cur_master;
176 struct dpu_encoder_phys *cur_slave;
177 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
178 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
179
180 unsigned int dsc_mask;
181
182 bool intfs_swapped;
183
184 struct drm_crtc *crtc;
185 struct drm_connector *connector;
186
187 struct dentry *debugfs_root;
188 struct mutex enc_lock;
189 DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
190 void (*crtc_frame_event_cb)(void *, u32 event);
191 void *crtc_frame_event_cb_data;
192
193 atomic_t frame_done_timeout_ms;
194 struct timer_list frame_done_timer;
195 struct timer_list vsync_event_timer;
196
197 struct msm_display_info disp_info;
198
199 bool idle_pc_supported;
200 struct mutex rc_lock;
201 enum dpu_enc_rc_states rc_state;
202 struct delayed_work delayed_off_work;
203 struct kthread_work vsync_event_work;
204 struct msm_display_topology topology;
205
206 u32 idle_timeout;
207
208 bool wide_bus_en;
209
210 /* DSC configuration */
211 struct msm_display_dsc_config *dsc;
212 };
213
214 #define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
215
216 static u32 dither_matrix[DITHER_MATRIX_SZ] = {
217 15, 7, 13, 5, 3, 11, 1, 9, 12, 4, 14, 6, 0, 8, 2, 10
218 };
219
220
dpu_encoder_is_widebus_enabled(const struct drm_encoder * drm_enc)221 bool dpu_encoder_is_widebus_enabled(const struct drm_encoder *drm_enc)
222 {
223 const struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
224
225 return dpu_enc->wide_bus_en;
226 }
227
_dpu_encoder_setup_dither(struct dpu_hw_pingpong * hw_pp,unsigned bpc)228 static void _dpu_encoder_setup_dither(struct dpu_hw_pingpong *hw_pp, unsigned bpc)
229 {
230 struct dpu_hw_dither_cfg dither_cfg = { 0 };
231
232 if (!hw_pp->ops.setup_dither)
233 return;
234
235 switch (bpc) {
236 case 6:
237 dither_cfg.c0_bitdepth = 6;
238 dither_cfg.c1_bitdepth = 6;
239 dither_cfg.c2_bitdepth = 6;
240 dither_cfg.c3_bitdepth = 6;
241 dither_cfg.temporal_en = 0;
242 break;
243 default:
244 hw_pp->ops.setup_dither(hw_pp, NULL);
245 return;
246 }
247
248 memcpy(&dither_cfg.matrix, dither_matrix,
249 sizeof(u32) * DITHER_MATRIX_SZ);
250
251 hw_pp->ops.setup_dither(hw_pp, &dither_cfg);
252 }
253
dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)254 static char *dpu_encoder_helper_get_intf_type(enum dpu_intf_mode intf_mode)
255 {
256 switch (intf_mode) {
257 case INTF_MODE_VIDEO:
258 return "INTF_MODE_VIDEO";
259 case INTF_MODE_CMD:
260 return "INTF_MODE_CMD";
261 case INTF_MODE_WB_BLOCK:
262 return "INTF_MODE_WB_BLOCK";
263 case INTF_MODE_WB_LINE:
264 return "INTF_MODE_WB_LINE";
265 default:
266 return "INTF_MODE_UNKNOWN";
267 }
268 }
269
dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys * phys_enc,enum dpu_intr_idx intr_idx)270 void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
271 enum dpu_intr_idx intr_idx)
272 {
273 DRM_ERROR("irq timeout id=%u, intf_mode=%s intf=%d wb=%d, pp=%d, intr=%d\n",
274 DRMID(phys_enc->parent),
275 dpu_encoder_helper_get_intf_type(phys_enc->intf_mode),
276 phys_enc->intf_idx - INTF_0, phys_enc->wb_idx - WB_0,
277 phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
278
279 if (phys_enc->parent_ops->handle_frame_done)
280 phys_enc->parent_ops->handle_frame_done(
281 phys_enc->parent, phys_enc,
282 DPU_ENCODER_FRAME_EVENT_ERROR);
283 }
284
285 static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
286 u32 irq_idx, struct dpu_encoder_wait_info *info);
287
dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys * phys_enc,int irq,void (* func)(void * arg,int irq_idx),struct dpu_encoder_wait_info * wait_info)288 int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
289 int irq,
290 void (*func)(void *arg, int irq_idx),
291 struct dpu_encoder_wait_info *wait_info)
292 {
293 u32 irq_status;
294 int ret;
295
296 if (!wait_info) {
297 DPU_ERROR("invalid params\n");
298 return -EINVAL;
299 }
300 /* note: do master / slave checking outside */
301
302 /* return EWOULDBLOCK since we know the wait isn't necessary */
303 if (phys_enc->enable_state == DPU_ENC_DISABLED) {
304 DRM_ERROR("encoder is disabled id=%u, callback=%ps, irq=%d\n",
305 DRMID(phys_enc->parent), func,
306 irq);
307 return -EWOULDBLOCK;
308 }
309
310 if (irq < 0) {
311 DRM_DEBUG_KMS("skip irq wait id=%u, callback=%ps\n",
312 DRMID(phys_enc->parent), func);
313 return 0;
314 }
315
316 DRM_DEBUG_KMS("id=%u, callback=%ps, irq=%d, pp=%d, pending_cnt=%d\n",
317 DRMID(phys_enc->parent), func,
318 irq, phys_enc->hw_pp->idx - PINGPONG_0,
319 atomic_read(wait_info->atomic_cnt));
320
321 ret = dpu_encoder_helper_wait_event_timeout(
322 DRMID(phys_enc->parent),
323 irq,
324 wait_info);
325
326 if (ret <= 0) {
327 irq_status = dpu_core_irq_read(phys_enc->dpu_kms, irq);
328 if (irq_status) {
329 unsigned long flags;
330
331 DRM_DEBUG_KMS("irq not triggered id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
332 DRMID(phys_enc->parent), func,
333 irq,
334 phys_enc->hw_pp->idx - PINGPONG_0,
335 atomic_read(wait_info->atomic_cnt));
336 local_irq_save(flags);
337 func(phys_enc, irq);
338 local_irq_restore(flags);
339 ret = 0;
340 } else {
341 ret = -ETIMEDOUT;
342 DRM_DEBUG_KMS("irq timeout id=%u, callback=%ps, irq=%d, pp=%d, atomic_cnt=%d\n",
343 DRMID(phys_enc->parent), func,
344 irq,
345 phys_enc->hw_pp->idx - PINGPONG_0,
346 atomic_read(wait_info->atomic_cnt));
347 }
348 } else {
349 ret = 0;
350 trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
351 func, irq,
352 phys_enc->hw_pp->idx - PINGPONG_0,
353 atomic_read(wait_info->atomic_cnt));
354 }
355
356 return ret;
357 }
358
dpu_encoder_get_vsync_count(struct drm_encoder * drm_enc)359 int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
360 {
361 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
362 struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
363 return phys ? atomic_read(&phys->vsync_cnt) : 0;
364 }
365
dpu_encoder_get_linecount(struct drm_encoder * drm_enc)366 int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
367 {
368 struct dpu_encoder_virt *dpu_enc;
369 struct dpu_encoder_phys *phys;
370 int linecount = 0;
371
372 dpu_enc = to_dpu_encoder_virt(drm_enc);
373 phys = dpu_enc ? dpu_enc->cur_master : NULL;
374
375 if (phys && phys->ops.get_line_count)
376 linecount = phys->ops.get_line_count(phys);
377
378 return linecount;
379 }
380
dpu_encoder_destroy(struct drm_encoder * drm_enc)381 static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
382 {
383 struct dpu_encoder_virt *dpu_enc = NULL;
384 int i = 0;
385
386 if (!drm_enc) {
387 DPU_ERROR("invalid encoder\n");
388 return;
389 }
390
391 dpu_enc = to_dpu_encoder_virt(drm_enc);
392 DPU_DEBUG_ENC(dpu_enc, "\n");
393
394 mutex_lock(&dpu_enc->enc_lock);
395
396 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
397 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
398
399 if (phys->ops.destroy) {
400 phys->ops.destroy(phys);
401 --dpu_enc->num_phys_encs;
402 dpu_enc->phys_encs[i] = NULL;
403 }
404 }
405
406 if (dpu_enc->num_phys_encs)
407 DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
408 dpu_enc->num_phys_encs);
409 dpu_enc->num_phys_encs = 0;
410 mutex_unlock(&dpu_enc->enc_lock);
411
412 drm_encoder_cleanup(drm_enc);
413 mutex_destroy(&dpu_enc->enc_lock);
414 }
415
dpu_encoder_helper_split_config(struct dpu_encoder_phys * phys_enc,enum dpu_intf interface)416 void dpu_encoder_helper_split_config(
417 struct dpu_encoder_phys *phys_enc,
418 enum dpu_intf interface)
419 {
420 struct dpu_encoder_virt *dpu_enc;
421 struct split_pipe_cfg cfg = { 0 };
422 struct dpu_hw_mdp *hw_mdptop;
423 struct msm_display_info *disp_info;
424
425 if (!phys_enc->hw_mdptop || !phys_enc->parent) {
426 DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != NULL);
427 return;
428 }
429
430 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
431 hw_mdptop = phys_enc->hw_mdptop;
432 disp_info = &dpu_enc->disp_info;
433
434 if (disp_info->intf_type != DRM_MODE_ENCODER_DSI)
435 return;
436
437 /**
438 * disable split modes since encoder will be operating in as the only
439 * encoder, either for the entire use case in the case of, for example,
440 * single DSI, or for this frame in the case of left/right only partial
441 * update.
442 */
443 if (phys_enc->split_role == ENC_ROLE_SOLO) {
444 if (hw_mdptop->ops.setup_split_pipe)
445 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
446 return;
447 }
448
449 cfg.en = true;
450 cfg.mode = phys_enc->intf_mode;
451 cfg.intf = interface;
452
453 if (cfg.en && phys_enc->ops.needs_single_flush &&
454 phys_enc->ops.needs_single_flush(phys_enc))
455 cfg.split_flush_en = true;
456
457 if (phys_enc->split_role == ENC_ROLE_MASTER) {
458 DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
459
460 if (hw_mdptop->ops.setup_split_pipe)
461 hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
462 }
463 }
464
dpu_encoder_use_dsc_merge(struct drm_encoder * drm_enc)465 bool dpu_encoder_use_dsc_merge(struct drm_encoder *drm_enc)
466 {
467 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
468 int i, intf_count = 0, num_dsc = 0;
469
470 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
471 if (dpu_enc->phys_encs[i])
472 intf_count++;
473
474 /* See dpu_encoder_get_topology, we only support 2:2:1 topology */
475 if (dpu_enc->dsc)
476 num_dsc = 2;
477
478 return (num_dsc > 0) && (num_dsc > intf_count);
479 }
480
dpu_encoder_get_topology(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct drm_display_mode * mode)481 static struct msm_display_topology dpu_encoder_get_topology(
482 struct dpu_encoder_virt *dpu_enc,
483 struct dpu_kms *dpu_kms,
484 struct drm_display_mode *mode)
485 {
486 struct msm_display_topology topology = {0};
487 int i, intf_count = 0;
488
489 for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
490 if (dpu_enc->phys_encs[i])
491 intf_count++;
492
493 /* Datapath topology selection
494 *
495 * Dual display
496 * 2 LM, 2 INTF ( Split display using 2 interfaces)
497 *
498 * Single display
499 * 1 LM, 1 INTF
500 * 2 LM, 1 INTF (stream merge to support high resolution interfaces)
501 *
502 * Adding color blocks only to primary interface if available in
503 * sufficient number
504 */
505 if (intf_count == 2)
506 topology.num_lm = 2;
507 else if (!dpu_kms->catalog->caps->has_3d_merge)
508 topology.num_lm = 1;
509 else
510 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
511
512 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
513 if (dpu_kms->catalog->dspp &&
514 (dpu_kms->catalog->dspp_count >= topology.num_lm))
515 topology.num_dspp = topology.num_lm;
516 }
517
518 topology.num_enc = 0;
519 topology.num_intf = intf_count;
520
521 if (dpu_enc->dsc) {
522 /* In case of Display Stream Compression (DSC), we would use
523 * 2 encoders, 2 layer mixers and 1 interface
524 * this is power optimal and can drive up to (including) 4k
525 * screens
526 */
527 topology.num_enc = 2;
528 topology.num_dsc = 2;
529 topology.num_intf = 1;
530 topology.num_lm = 2;
531 }
532
533 return topology;
534 }
535
dpu_encoder_virt_atomic_check(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)536 static int dpu_encoder_virt_atomic_check(
537 struct drm_encoder *drm_enc,
538 struct drm_crtc_state *crtc_state,
539 struct drm_connector_state *conn_state)
540 {
541 struct dpu_encoder_virt *dpu_enc;
542 struct msm_drm_private *priv;
543 struct dpu_kms *dpu_kms;
544 struct drm_display_mode *adj_mode;
545 struct msm_display_topology topology;
546 struct dpu_global_state *global_state;
547 int i = 0;
548 int ret = 0;
549
550 if (!drm_enc || !crtc_state || !conn_state) {
551 DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
552 drm_enc != NULL, crtc_state != NULL, conn_state != NULL);
553 return -EINVAL;
554 }
555
556 dpu_enc = to_dpu_encoder_virt(drm_enc);
557 DPU_DEBUG_ENC(dpu_enc, "\n");
558
559 priv = drm_enc->dev->dev_private;
560 dpu_kms = to_dpu_kms(priv->kms);
561 adj_mode = &crtc_state->adjusted_mode;
562 global_state = dpu_kms_get_global_state(crtc_state->state);
563 if (IS_ERR(global_state))
564 return PTR_ERR(global_state);
565
566 trace_dpu_enc_atomic_check(DRMID(drm_enc));
567
568 /* perform atomic check on the first physical encoder (master) */
569 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
570 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
571
572 if (phys->ops.atomic_check)
573 ret = phys->ops.atomic_check(phys, crtc_state,
574 conn_state);
575 if (ret) {
576 DPU_ERROR_ENC(dpu_enc,
577 "mode unsupported, phys idx %d\n", i);
578 break;
579 }
580 }
581
582 topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
583
584 /* Reserve dynamic resources now. */
585 if (!ret) {
586 /*
587 * Release and Allocate resources on every modeset
588 * Dont allocate when active is false.
589 */
590 if (drm_atomic_crtc_needs_modeset(crtc_state)) {
591 dpu_rm_release(global_state, drm_enc);
592
593 if (!crtc_state->active_changed || crtc_state->active)
594 ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
595 drm_enc, crtc_state, topology);
596 }
597 }
598
599 trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
600
601 return ret;
602 }
603
_dpu_encoder_update_vsync_source(struct dpu_encoder_virt * dpu_enc,struct msm_display_info * disp_info)604 static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
605 struct msm_display_info *disp_info)
606 {
607 struct dpu_vsync_source_cfg vsync_cfg = { 0 };
608 struct msm_drm_private *priv;
609 struct dpu_kms *dpu_kms;
610 struct dpu_hw_mdp *hw_mdptop;
611 struct drm_encoder *drm_enc;
612 int i;
613
614 if (!dpu_enc || !disp_info) {
615 DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
616 dpu_enc != NULL, disp_info != NULL);
617 return;
618 } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
619 DPU_ERROR("invalid num phys enc %d/%d\n",
620 dpu_enc->num_phys_encs,
621 (int) ARRAY_SIZE(dpu_enc->hw_pp));
622 return;
623 }
624
625 drm_enc = &dpu_enc->base;
626 /* this pointers are checked in virt_enable_helper */
627 priv = drm_enc->dev->dev_private;
628
629 dpu_kms = to_dpu_kms(priv->kms);
630 hw_mdptop = dpu_kms->hw_mdp;
631 if (!hw_mdptop) {
632 DPU_ERROR("invalid mdptop\n");
633 return;
634 }
635
636 if (hw_mdptop->ops.setup_vsync_source &&
637 disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
638 for (i = 0; i < dpu_enc->num_phys_encs; i++)
639 vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
640
641 vsync_cfg.pp_count = dpu_enc->num_phys_encs;
642 if (disp_info->is_te_using_watchdog_timer)
643 vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
644 else
645 vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
646
647 hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
648 }
649 }
650
_dpu_encoder_irq_control(struct drm_encoder * drm_enc,bool enable)651 static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
652 {
653 struct dpu_encoder_virt *dpu_enc;
654 int i;
655
656 if (!drm_enc) {
657 DPU_ERROR("invalid encoder\n");
658 return;
659 }
660
661 dpu_enc = to_dpu_encoder_virt(drm_enc);
662
663 DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
664 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
665 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
666
667 if (phys->ops.irq_control)
668 phys->ops.irq_control(phys, enable);
669 }
670
671 }
672
_dpu_encoder_resource_control_helper(struct drm_encoder * drm_enc,bool enable)673 static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
674 bool enable)
675 {
676 struct msm_drm_private *priv;
677 struct dpu_kms *dpu_kms;
678 struct dpu_encoder_virt *dpu_enc;
679
680 dpu_enc = to_dpu_encoder_virt(drm_enc);
681 priv = drm_enc->dev->dev_private;
682 dpu_kms = to_dpu_kms(priv->kms);
683
684 trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
685
686 if (!dpu_enc->cur_master) {
687 DPU_ERROR("encoder master not set\n");
688 return;
689 }
690
691 if (enable) {
692 /* enable DPU core clks */
693 pm_runtime_get_sync(&dpu_kms->pdev->dev);
694
695 /* enable all the irq */
696 _dpu_encoder_irq_control(drm_enc, true);
697
698 } else {
699 /* disable all the irq */
700 _dpu_encoder_irq_control(drm_enc, false);
701
702 /* disable DPU core clks */
703 pm_runtime_put_sync(&dpu_kms->pdev->dev);
704 }
705
706 }
707
dpu_encoder_resource_control(struct drm_encoder * drm_enc,u32 sw_event)708 static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
709 u32 sw_event)
710 {
711 struct dpu_encoder_virt *dpu_enc;
712 struct msm_drm_private *priv;
713 bool is_vid_mode = false;
714
715 if (!drm_enc || !drm_enc->dev || !drm_enc->crtc) {
716 DPU_ERROR("invalid parameters\n");
717 return -EINVAL;
718 }
719 dpu_enc = to_dpu_encoder_virt(drm_enc);
720 priv = drm_enc->dev->dev_private;
721 is_vid_mode = dpu_enc->disp_info.capabilities &
722 MSM_DISPLAY_CAP_VID_MODE;
723
724 /*
725 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
726 * events and return early for other events (ie wb display).
727 */
728 if (!dpu_enc->idle_pc_supported &&
729 (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
730 sw_event != DPU_ENC_RC_EVENT_STOP &&
731 sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
732 return 0;
733
734 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
735 dpu_enc->rc_state, "begin");
736
737 switch (sw_event) {
738 case DPU_ENC_RC_EVENT_KICKOFF:
739 /* cancel delayed off work, if any */
740 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
741 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
742 sw_event);
743
744 mutex_lock(&dpu_enc->rc_lock);
745
746 /* return if the resource control is already in ON state */
747 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
748 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in ON state\n",
749 DRMID(drm_enc), sw_event);
750 mutex_unlock(&dpu_enc->rc_lock);
751 return 0;
752 } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
753 dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
754 DRM_DEBUG_ATOMIC("id;%u, sw_event:%d, rc in state %d\n",
755 DRMID(drm_enc), sw_event,
756 dpu_enc->rc_state);
757 mutex_unlock(&dpu_enc->rc_lock);
758 return -EINVAL;
759 }
760
761 if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
762 _dpu_encoder_irq_control(drm_enc, true);
763 else
764 _dpu_encoder_resource_control_helper(drm_enc, true);
765
766 dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
767
768 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
769 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
770 "kickoff");
771
772 mutex_unlock(&dpu_enc->rc_lock);
773 break;
774
775 case DPU_ENC_RC_EVENT_FRAME_DONE:
776 /*
777 * mutex lock is not used as this event happens at interrupt
778 * context. And locking is not required as, the other events
779 * like KICKOFF and STOP does a wait-for-idle before executing
780 * the resource_control
781 */
782 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
783 DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
784 DRMID(drm_enc), sw_event,
785 dpu_enc->rc_state);
786 return -EINVAL;
787 }
788
789 /*
790 * schedule off work item only when there are no
791 * frames pending
792 */
793 if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
794 DRM_DEBUG_KMS("id:%d skip schedule work\n",
795 DRMID(drm_enc));
796 return 0;
797 }
798
799 queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work,
800 msecs_to_jiffies(dpu_enc->idle_timeout));
801
802 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
803 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
804 "frame done");
805 break;
806
807 case DPU_ENC_RC_EVENT_PRE_STOP:
808 /* cancel delayed off work, if any */
809 if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work))
810 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
811 sw_event);
812
813 mutex_lock(&dpu_enc->rc_lock);
814
815 if (is_vid_mode &&
816 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
817 _dpu_encoder_irq_control(drm_enc, true);
818 }
819 /* skip if is already OFF or IDLE, resources are off already */
820 else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
821 dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
822 DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
823 DRMID(drm_enc), sw_event,
824 dpu_enc->rc_state);
825 mutex_unlock(&dpu_enc->rc_lock);
826 return 0;
827 }
828
829 dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
830
831 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
832 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
833 "pre stop");
834
835 mutex_unlock(&dpu_enc->rc_lock);
836 break;
837
838 case DPU_ENC_RC_EVENT_STOP:
839 mutex_lock(&dpu_enc->rc_lock);
840
841 /* return if the resource control is already in OFF state */
842 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
843 DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
844 DRMID(drm_enc), sw_event);
845 mutex_unlock(&dpu_enc->rc_lock);
846 return 0;
847 } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
848 DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
849 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
850 mutex_unlock(&dpu_enc->rc_lock);
851 return -EINVAL;
852 }
853
854 /**
855 * expect to arrive here only if in either idle state or pre-off
856 * and in IDLE state the resources are already disabled
857 */
858 if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
859 _dpu_encoder_resource_control_helper(drm_enc, false);
860
861 dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
862
863 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
864 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
865 "stop");
866
867 mutex_unlock(&dpu_enc->rc_lock);
868 break;
869
870 case DPU_ENC_RC_EVENT_ENTER_IDLE:
871 mutex_lock(&dpu_enc->rc_lock);
872
873 if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
874 DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
875 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
876 mutex_unlock(&dpu_enc->rc_lock);
877 return 0;
878 }
879
880 /*
881 * if we are in ON but a frame was just kicked off,
882 * ignore the IDLE event, it's probably a stale timer event
883 */
884 if (dpu_enc->frame_busy_mask[0]) {
885 DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
886 DRMID(drm_enc), sw_event, dpu_enc->rc_state);
887 mutex_unlock(&dpu_enc->rc_lock);
888 return 0;
889 }
890
891 if (is_vid_mode)
892 _dpu_encoder_irq_control(drm_enc, false);
893 else
894 _dpu_encoder_resource_control_helper(drm_enc, false);
895
896 dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
897
898 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
899 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
900 "idle");
901
902 mutex_unlock(&dpu_enc->rc_lock);
903 break;
904
905 default:
906 DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
907 sw_event);
908 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
909 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
910 "error");
911 break;
912 }
913
914 trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
915 dpu_enc->idle_pc_supported, dpu_enc->rc_state,
916 "end");
917 return 0;
918 }
919
dpu_encoder_prepare_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)920 void dpu_encoder_prepare_wb_job(struct drm_encoder *drm_enc,
921 struct drm_writeback_job *job)
922 {
923 struct dpu_encoder_virt *dpu_enc;
924 int i;
925
926 dpu_enc = to_dpu_encoder_virt(drm_enc);
927
928 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
929 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
930
931 if (phys->ops.prepare_wb_job)
932 phys->ops.prepare_wb_job(phys, job);
933
934 }
935 }
936
dpu_encoder_cleanup_wb_job(struct drm_encoder * drm_enc,struct drm_writeback_job * job)937 void dpu_encoder_cleanup_wb_job(struct drm_encoder *drm_enc,
938 struct drm_writeback_job *job)
939 {
940 struct dpu_encoder_virt *dpu_enc;
941 int i;
942
943 dpu_enc = to_dpu_encoder_virt(drm_enc);
944
945 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
946 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
947
948 if (phys->ops.cleanup_wb_job)
949 phys->ops.cleanup_wb_job(phys, job);
950
951 }
952 }
953
dpu_encoder_virt_atomic_mode_set(struct drm_encoder * drm_enc,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)954 static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
955 struct drm_crtc_state *crtc_state,
956 struct drm_connector_state *conn_state)
957 {
958 struct dpu_encoder_virt *dpu_enc;
959 struct msm_drm_private *priv;
960 struct dpu_kms *dpu_kms;
961 struct dpu_crtc_state *cstate;
962 struct dpu_global_state *global_state;
963 struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
964 struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
965 struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
966 struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
967 struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
968 int num_lm, num_ctl, num_pp, num_dsc;
969 unsigned int dsc_mask = 0;
970 int i;
971
972 if (!drm_enc) {
973 DPU_ERROR("invalid encoder\n");
974 return;
975 }
976
977 dpu_enc = to_dpu_encoder_virt(drm_enc);
978 DPU_DEBUG_ENC(dpu_enc, "\n");
979
980 priv = drm_enc->dev->dev_private;
981 dpu_kms = to_dpu_kms(priv->kms);
982
983 global_state = dpu_kms_get_existing_global_state(dpu_kms);
984 if (IS_ERR_OR_NULL(global_state)) {
985 DPU_ERROR("Failed to get global state");
986 return;
987 }
988
989 trace_dpu_enc_mode_set(DRMID(drm_enc));
990
991 /* Query resource that have been reserved in atomic check step. */
992 num_pp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
993 drm_enc->base.id, DPU_HW_BLK_PINGPONG, hw_pp,
994 ARRAY_SIZE(hw_pp));
995 num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
996 drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
997 num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
998 drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
999 dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1000 drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
1001 ARRAY_SIZE(hw_dspp));
1002
1003 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1004 dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
1005 : NULL;
1006
1007 if (dpu_enc->dsc) {
1008 num_dsc = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
1009 drm_enc->base.id, DPU_HW_BLK_DSC,
1010 hw_dsc, ARRAY_SIZE(hw_dsc));
1011 for (i = 0; i < num_dsc; i++) {
1012 dpu_enc->hw_dsc[i] = to_dpu_hw_dsc(hw_dsc[i]);
1013 dsc_mask |= BIT(dpu_enc->hw_dsc[i]->idx - DSC_0);
1014 }
1015 }
1016
1017 dpu_enc->dsc_mask = dsc_mask;
1018
1019 cstate = to_dpu_crtc_state(crtc_state);
1020
1021 for (i = 0; i < num_lm; i++) {
1022 int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
1023
1024 cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
1025 cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
1026 cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
1027 }
1028
1029 cstate->num_mixers = num_lm;
1030
1031 dpu_enc->connector = conn_state->connector;
1032
1033 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1034 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1035
1036 if (!dpu_enc->hw_pp[i]) {
1037 DPU_ERROR_ENC(dpu_enc,
1038 "no pp block assigned at idx: %d\n", i);
1039 return;
1040 }
1041
1042 if (!hw_ctl[i]) {
1043 DPU_ERROR_ENC(dpu_enc,
1044 "no ctl block assigned at idx: %d\n", i);
1045 return;
1046 }
1047
1048 phys->hw_pp = dpu_enc->hw_pp[i];
1049 phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
1050
1051 phys->cached_mode = crtc_state->adjusted_mode;
1052 if (phys->ops.atomic_mode_set)
1053 phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
1054 }
1055 }
1056
_dpu_encoder_virt_enable_helper(struct drm_encoder * drm_enc)1057 static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
1058 {
1059 struct dpu_encoder_virt *dpu_enc = NULL;
1060 int i;
1061
1062 if (!drm_enc || !drm_enc->dev) {
1063 DPU_ERROR("invalid parameters\n");
1064 return;
1065 }
1066
1067 dpu_enc = to_dpu_encoder_virt(drm_enc);
1068 if (!dpu_enc || !dpu_enc->cur_master) {
1069 DPU_ERROR("invalid dpu encoder/master\n");
1070 return;
1071 }
1072
1073
1074 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_TMDS &&
1075 dpu_enc->cur_master->hw_mdptop &&
1076 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
1077 dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
1078 dpu_enc->cur_master->hw_mdptop);
1079
1080 _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
1081
1082 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1083 !WARN_ON(dpu_enc->num_phys_encs == 0)) {
1084 unsigned bpc = dpu_enc->connector->display_info.bpc;
1085 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1086 if (!dpu_enc->hw_pp[i])
1087 continue;
1088 _dpu_encoder_setup_dither(dpu_enc->hw_pp[i], bpc);
1089 }
1090 }
1091 }
1092
dpu_encoder_virt_runtime_resume(struct drm_encoder * drm_enc)1093 void dpu_encoder_virt_runtime_resume(struct drm_encoder *drm_enc)
1094 {
1095 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1096
1097 mutex_lock(&dpu_enc->enc_lock);
1098
1099 if (!dpu_enc->enabled)
1100 goto out;
1101
1102 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.restore)
1103 dpu_enc->cur_slave->ops.restore(dpu_enc->cur_slave);
1104 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
1105 dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
1106
1107 _dpu_encoder_virt_enable_helper(drm_enc);
1108
1109 out:
1110 mutex_unlock(&dpu_enc->enc_lock);
1111 }
1112
dpu_encoder_virt_enable(struct drm_encoder * drm_enc)1113 static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
1114 {
1115 struct dpu_encoder_virt *dpu_enc = NULL;
1116 int ret = 0;
1117 struct drm_display_mode *cur_mode = NULL;
1118
1119 dpu_enc = to_dpu_encoder_virt(drm_enc);
1120
1121 mutex_lock(&dpu_enc->enc_lock);
1122 cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
1123
1124 trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
1125 cur_mode->vdisplay);
1126
1127 /* always enable slave encoder before master */
1128 if (dpu_enc->cur_slave && dpu_enc->cur_slave->ops.enable)
1129 dpu_enc->cur_slave->ops.enable(dpu_enc->cur_slave);
1130
1131 if (dpu_enc->cur_master && dpu_enc->cur_master->ops.enable)
1132 dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
1133
1134 ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1135 if (ret) {
1136 DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
1137 ret);
1138 goto out;
1139 }
1140
1141 _dpu_encoder_virt_enable_helper(drm_enc);
1142
1143 dpu_enc->enabled = true;
1144
1145 out:
1146 mutex_unlock(&dpu_enc->enc_lock);
1147 }
1148
dpu_encoder_virt_disable(struct drm_encoder * drm_enc)1149 static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1150 {
1151 struct dpu_encoder_virt *dpu_enc = NULL;
1152 int i = 0;
1153
1154 dpu_enc = to_dpu_encoder_virt(drm_enc);
1155 DPU_DEBUG_ENC(dpu_enc, "\n");
1156
1157 mutex_lock(&dpu_enc->enc_lock);
1158 dpu_enc->enabled = false;
1159
1160 trace_dpu_enc_disable(DRMID(drm_enc));
1161
1162 /* wait for idle */
1163 dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
1164
1165 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
1166
1167 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1168 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1169
1170 if (phys->ops.disable)
1171 phys->ops.disable(phys);
1172 }
1173
1174
1175 /* after phys waits for frame-done, should be no more frames pending */
1176 if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
1177 DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
1178 del_timer_sync(&dpu_enc->frame_done_timer);
1179 }
1180
1181 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
1182
1183 dpu_enc->connector = NULL;
1184
1185 DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
1186
1187 mutex_unlock(&dpu_enc->enc_lock);
1188 }
1189
dpu_encoder_get_intf(struct dpu_mdss_cfg * catalog,enum dpu_intf_type type,u32 controller_id)1190 static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
1191 enum dpu_intf_type type, u32 controller_id)
1192 {
1193 int i = 0;
1194
1195 if (type != INTF_WB) {
1196 for (i = 0; i < catalog->intf_count; i++) {
1197 if (catalog->intf[i].type == type
1198 && catalog->intf[i].controller_id == controller_id) {
1199 return catalog->intf[i].id;
1200 }
1201 }
1202 }
1203
1204 return INTF_MAX;
1205 }
1206
dpu_encoder_get_wb(struct dpu_mdss_cfg * catalog,enum dpu_intf_type type,u32 controller_id)1207 static enum dpu_wb dpu_encoder_get_wb(struct dpu_mdss_cfg *catalog,
1208 enum dpu_intf_type type, u32 controller_id)
1209 {
1210 int i = 0;
1211
1212 if (type != INTF_WB)
1213 goto end;
1214
1215 for (i = 0; i < catalog->wb_count; i++) {
1216 if (catalog->wb[i].id == controller_id)
1217 return catalog->wb[i].id;
1218 }
1219
1220 end:
1221 return WB_MAX;
1222 }
1223
dpu_encoder_vblank_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1224 static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
1225 struct dpu_encoder_phys *phy_enc)
1226 {
1227 struct dpu_encoder_virt *dpu_enc = NULL;
1228 unsigned long lock_flags;
1229
1230 if (!drm_enc || !phy_enc)
1231 return;
1232
1233 DPU_ATRACE_BEGIN("encoder_vblank_callback");
1234 dpu_enc = to_dpu_encoder_virt(drm_enc);
1235
1236 atomic_inc(&phy_enc->vsync_cnt);
1237
1238 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1239 if (dpu_enc->crtc)
1240 dpu_crtc_vblank_callback(dpu_enc->crtc);
1241 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1242
1243 DPU_ATRACE_END("encoder_vblank_callback");
1244 }
1245
dpu_encoder_underrun_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phy_enc)1246 static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
1247 struct dpu_encoder_phys *phy_enc)
1248 {
1249 if (!phy_enc)
1250 return;
1251
1252 DPU_ATRACE_BEGIN("encoder_underrun_callback");
1253 atomic_inc(&phy_enc->underrun_cnt);
1254
1255 /* trigger dump only on the first underrun */
1256 if (atomic_read(&phy_enc->underrun_cnt) == 1)
1257 msm_disp_snapshot_state(drm_enc->dev);
1258
1259 trace_dpu_enc_underrun_cb(DRMID(drm_enc),
1260 atomic_read(&phy_enc->underrun_cnt));
1261 DPU_ATRACE_END("encoder_underrun_callback");
1262 }
1263
dpu_encoder_assign_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc)1264 void dpu_encoder_assign_crtc(struct drm_encoder *drm_enc, struct drm_crtc *crtc)
1265 {
1266 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1267 unsigned long lock_flags;
1268
1269 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1270 /* crtc should always be cleared before re-assigning */
1271 WARN_ON(crtc && dpu_enc->crtc);
1272 dpu_enc->crtc = crtc;
1273 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1274 }
1275
dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder * drm_enc,struct drm_crtc * crtc,bool enable)1276 void dpu_encoder_toggle_vblank_for_crtc(struct drm_encoder *drm_enc,
1277 struct drm_crtc *crtc, bool enable)
1278 {
1279 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1280 unsigned long lock_flags;
1281 int i;
1282
1283 trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
1284
1285 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1286 if (dpu_enc->crtc != crtc) {
1287 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1288 return;
1289 }
1290 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1291
1292 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1293 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1294
1295 if (phys->ops.control_vblank_irq)
1296 phys->ops.control_vblank_irq(phys, enable);
1297 }
1298 }
1299
dpu_encoder_register_frame_event_callback(struct drm_encoder * drm_enc,void (* frame_event_cb)(void *,u32 event),void * frame_event_cb_data)1300 void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
1301 void (*frame_event_cb)(void *, u32 event),
1302 void *frame_event_cb_data)
1303 {
1304 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1305 unsigned long lock_flags;
1306 bool enable;
1307
1308 enable = frame_event_cb ? true : false;
1309
1310 if (!drm_enc) {
1311 DPU_ERROR("invalid encoder\n");
1312 return;
1313 }
1314 trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
1315
1316 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1317 dpu_enc->crtc_frame_event_cb = frame_event_cb;
1318 dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
1319 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1320 }
1321
dpu_encoder_frame_done_callback(struct drm_encoder * drm_enc,struct dpu_encoder_phys * ready_phys,u32 event)1322 static void dpu_encoder_frame_done_callback(
1323 struct drm_encoder *drm_enc,
1324 struct dpu_encoder_phys *ready_phys, u32 event)
1325 {
1326 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
1327 unsigned int i;
1328
1329 if (event & (DPU_ENCODER_FRAME_EVENT_DONE
1330 | DPU_ENCODER_FRAME_EVENT_ERROR
1331 | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
1332
1333 if (!dpu_enc->frame_busy_mask[0]) {
1334 /**
1335 * suppress frame_done without waiter,
1336 * likely autorefresh
1337 */
1338 trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc), event,
1339 dpu_encoder_helper_get_intf_type(ready_phys->intf_mode),
1340 ready_phys->intf_idx, ready_phys->wb_idx);
1341 return;
1342 }
1343
1344 /* One of the physical encoders has become idle */
1345 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1346 if (dpu_enc->phys_encs[i] == ready_phys) {
1347 trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
1348 dpu_enc->frame_busy_mask[0]);
1349 clear_bit(i, dpu_enc->frame_busy_mask);
1350 }
1351 }
1352
1353 if (!dpu_enc->frame_busy_mask[0]) {
1354 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
1355 del_timer(&dpu_enc->frame_done_timer);
1356
1357 dpu_encoder_resource_control(drm_enc,
1358 DPU_ENC_RC_EVENT_FRAME_DONE);
1359
1360 if (dpu_enc->crtc_frame_event_cb)
1361 dpu_enc->crtc_frame_event_cb(
1362 dpu_enc->crtc_frame_event_cb_data,
1363 event);
1364 }
1365 } else {
1366 if (dpu_enc->crtc_frame_event_cb)
1367 dpu_enc->crtc_frame_event_cb(
1368 dpu_enc->crtc_frame_event_cb_data, event);
1369 }
1370 }
1371
dpu_encoder_off_work(struct work_struct * work)1372 static void dpu_encoder_off_work(struct work_struct *work)
1373 {
1374 struct dpu_encoder_virt *dpu_enc = container_of(work,
1375 struct dpu_encoder_virt, delayed_off_work.work);
1376
1377 dpu_encoder_resource_control(&dpu_enc->base,
1378 DPU_ENC_RC_EVENT_ENTER_IDLE);
1379
1380 dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
1381 DPU_ENCODER_FRAME_EVENT_IDLE);
1382 }
1383
1384 /**
1385 * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
1386 * @drm_enc: Pointer to drm encoder structure
1387 * @phys: Pointer to physical encoder structure
1388 * @extra_flush_bits: Additional bit mask to include in flush trigger
1389 */
_dpu_encoder_trigger_flush(struct drm_encoder * drm_enc,struct dpu_encoder_phys * phys,uint32_t extra_flush_bits)1390 static void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
1391 struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
1392 {
1393 struct dpu_hw_ctl *ctl;
1394 int pending_kickoff_cnt;
1395 u32 ret = UINT_MAX;
1396
1397 if (!phys->hw_pp) {
1398 DPU_ERROR("invalid pingpong hw\n");
1399 return;
1400 }
1401
1402 ctl = phys->hw_ctl;
1403 if (!ctl->ops.trigger_flush) {
1404 DPU_ERROR("missing trigger cb\n");
1405 return;
1406 }
1407
1408 pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
1409
1410 if (extra_flush_bits && ctl->ops.update_pending_flush)
1411 ctl->ops.update_pending_flush(ctl, extra_flush_bits);
1412
1413 ctl->ops.trigger_flush(ctl);
1414
1415 if (ctl->ops.get_pending_flush)
1416 ret = ctl->ops.get_pending_flush(ctl);
1417
1418 trace_dpu_enc_trigger_flush(DRMID(drm_enc),
1419 dpu_encoder_helper_get_intf_type(phys->intf_mode),
1420 phys->intf_idx, phys->wb_idx,
1421 pending_kickoff_cnt, ctl->idx,
1422 extra_flush_bits, ret);
1423 }
1424
1425 /**
1426 * _dpu_encoder_trigger_start - trigger start for a physical encoder
1427 * @phys: Pointer to physical encoder structure
1428 */
_dpu_encoder_trigger_start(struct dpu_encoder_phys * phys)1429 static void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
1430 {
1431 if (!phys) {
1432 DPU_ERROR("invalid argument(s)\n");
1433 return;
1434 }
1435
1436 if (!phys->hw_pp) {
1437 DPU_ERROR("invalid pingpong hw\n");
1438 return;
1439 }
1440
1441 if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
1442 phys->ops.trigger_start(phys);
1443 }
1444
dpu_encoder_helper_trigger_start(struct dpu_encoder_phys * phys_enc)1445 void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
1446 {
1447 struct dpu_hw_ctl *ctl;
1448
1449 ctl = phys_enc->hw_ctl;
1450 if (ctl->ops.trigger_start) {
1451 ctl->ops.trigger_start(ctl);
1452 trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
1453 }
1454 }
1455
dpu_encoder_helper_wait_event_timeout(int32_t drm_id,u32 irq_idx,struct dpu_encoder_wait_info * info)1456 static int dpu_encoder_helper_wait_event_timeout(
1457 int32_t drm_id,
1458 u32 irq_idx,
1459 struct dpu_encoder_wait_info *info)
1460 {
1461 int rc = 0;
1462 s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
1463 s64 jiffies = msecs_to_jiffies(info->timeout_ms);
1464 s64 time;
1465
1466 do {
1467 rc = wait_event_timeout(*(info->wq),
1468 atomic_read(info->atomic_cnt) == 0, jiffies);
1469 time = ktime_to_ms(ktime_get());
1470
1471 trace_dpu_enc_wait_event_timeout(drm_id, irq_idx, rc, time,
1472 expected_time,
1473 atomic_read(info->atomic_cnt));
1474 /* If we timed out, counter is valid and time is less, wait again */
1475 } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
1476 (time < expected_time));
1477
1478 return rc;
1479 }
1480
dpu_encoder_helper_hw_reset(struct dpu_encoder_phys * phys_enc)1481 static void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
1482 {
1483 struct dpu_encoder_virt *dpu_enc;
1484 struct dpu_hw_ctl *ctl;
1485 int rc;
1486 struct drm_encoder *drm_enc;
1487
1488 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1489 ctl = phys_enc->hw_ctl;
1490 drm_enc = phys_enc->parent;
1491
1492 if (!ctl->ops.reset)
1493 return;
1494
1495 DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(drm_enc),
1496 ctl->idx);
1497
1498 rc = ctl->ops.reset(ctl);
1499 if (rc) {
1500 DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
1501 msm_disp_snapshot_state(drm_enc->dev);
1502 }
1503
1504 phys_enc->enable_state = DPU_ENC_ENABLED;
1505 }
1506
1507 /**
1508 * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
1509 * Iterate through the physical encoders and perform consolidated flush
1510 * and/or control start triggering as needed. This is done in the virtual
1511 * encoder rather than the individual physical ones in order to handle
1512 * use cases that require visibility into multiple physical encoders at
1513 * a time.
1514 * @dpu_enc: Pointer to virtual encoder structure
1515 */
_dpu_encoder_kickoff_phys(struct dpu_encoder_virt * dpu_enc)1516 static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
1517 {
1518 struct dpu_hw_ctl *ctl;
1519 uint32_t i, pending_flush;
1520 unsigned long lock_flags;
1521
1522 pending_flush = 0x0;
1523
1524 /* update pending counts and trigger kickoff ctl flush atomically */
1525 spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
1526
1527 /* don't perform flush/start operations for slave encoders */
1528 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1529 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
1530
1531 if (phys->enable_state == DPU_ENC_DISABLED)
1532 continue;
1533
1534 ctl = phys->hw_ctl;
1535
1536 /*
1537 * This is cleared in frame_done worker, which isn't invoked
1538 * for async commits. So don't set this for async, since it'll
1539 * roll over to the next commit.
1540 */
1541 if (phys->split_role != ENC_ROLE_SLAVE)
1542 set_bit(i, dpu_enc->frame_busy_mask);
1543
1544 if (!phys->ops.needs_single_flush ||
1545 !phys->ops.needs_single_flush(phys))
1546 _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
1547 else if (ctl->ops.get_pending_flush)
1548 pending_flush |= ctl->ops.get_pending_flush(ctl);
1549 }
1550
1551 /* for split flush, combine pending flush masks and send to master */
1552 if (pending_flush && dpu_enc->cur_master) {
1553 _dpu_encoder_trigger_flush(
1554 &dpu_enc->base,
1555 dpu_enc->cur_master,
1556 pending_flush);
1557 }
1558
1559 _dpu_encoder_trigger_start(dpu_enc->cur_master);
1560
1561 spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
1562 }
1563
dpu_encoder_trigger_kickoff_pending(struct drm_encoder * drm_enc)1564 void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
1565 {
1566 struct dpu_encoder_virt *dpu_enc;
1567 struct dpu_encoder_phys *phys;
1568 unsigned int i;
1569 struct dpu_hw_ctl *ctl;
1570 struct msm_display_info *disp_info;
1571
1572 if (!drm_enc) {
1573 DPU_ERROR("invalid encoder\n");
1574 return;
1575 }
1576 dpu_enc = to_dpu_encoder_virt(drm_enc);
1577 disp_info = &dpu_enc->disp_info;
1578
1579 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1580 phys = dpu_enc->phys_encs[i];
1581
1582 ctl = phys->hw_ctl;
1583 if (ctl->ops.clear_pending_flush)
1584 ctl->ops.clear_pending_flush(ctl);
1585
1586 /* update only for command mode primary ctl */
1587 if ((phys == dpu_enc->cur_master) &&
1588 (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
1589 && ctl->ops.trigger_pending)
1590 ctl->ops.trigger_pending(ctl);
1591 }
1592 }
1593
_dpu_encoder_calculate_linetime(struct dpu_encoder_virt * dpu_enc,struct drm_display_mode * mode)1594 static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
1595 struct drm_display_mode *mode)
1596 {
1597 u64 pclk_rate;
1598 u32 pclk_period;
1599 u32 line_time;
1600
1601 /*
1602 * For linetime calculation, only operate on master encoder.
1603 */
1604 if (!dpu_enc->cur_master)
1605 return 0;
1606
1607 if (!dpu_enc->cur_master->ops.get_line_count) {
1608 DPU_ERROR("get_line_count function not defined\n");
1609 return 0;
1610 }
1611
1612 pclk_rate = mode->clock; /* pixel clock in kHz */
1613 if (pclk_rate == 0) {
1614 DPU_ERROR("pclk is 0, cannot calculate line time\n");
1615 return 0;
1616 }
1617
1618 pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
1619 if (pclk_period == 0) {
1620 DPU_ERROR("pclk period is 0\n");
1621 return 0;
1622 }
1623
1624 /*
1625 * Line time calculation based on Pixel clock and HTOTAL.
1626 * Final unit is in ns.
1627 */
1628 line_time = (pclk_period * mode->htotal) / 1000;
1629 if (line_time == 0) {
1630 DPU_ERROR("line time calculation is 0\n");
1631 return 0;
1632 }
1633
1634 DPU_DEBUG_ENC(dpu_enc,
1635 "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
1636 pclk_rate, pclk_period, line_time);
1637
1638 return line_time;
1639 }
1640
dpu_encoder_vsync_time(struct drm_encoder * drm_enc,ktime_t * wakeup_time)1641 int dpu_encoder_vsync_time(struct drm_encoder *drm_enc, ktime_t *wakeup_time)
1642 {
1643 struct drm_display_mode *mode;
1644 struct dpu_encoder_virt *dpu_enc;
1645 u32 cur_line;
1646 u32 line_time;
1647 u32 vtotal, time_to_vsync;
1648 ktime_t cur_time;
1649
1650 dpu_enc = to_dpu_encoder_virt(drm_enc);
1651
1652 if (!drm_enc->crtc || !drm_enc->crtc->state) {
1653 DPU_ERROR("crtc/crtc state object is NULL\n");
1654 return -EINVAL;
1655 }
1656 mode = &drm_enc->crtc->state->adjusted_mode;
1657
1658 line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
1659 if (!line_time)
1660 return -EINVAL;
1661
1662 cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
1663
1664 vtotal = mode->vtotal;
1665 if (cur_line >= vtotal)
1666 time_to_vsync = line_time * vtotal;
1667 else
1668 time_to_vsync = line_time * (vtotal - cur_line);
1669
1670 if (time_to_vsync == 0) {
1671 DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
1672 vtotal);
1673 return -EINVAL;
1674 }
1675
1676 cur_time = ktime_get();
1677 *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
1678
1679 DPU_DEBUG_ENC(dpu_enc,
1680 "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
1681 cur_line, vtotal, time_to_vsync,
1682 ktime_to_ms(cur_time),
1683 ktime_to_ms(*wakeup_time));
1684 return 0;
1685 }
1686
dpu_encoder_vsync_event_handler(struct timer_list * t)1687 static void dpu_encoder_vsync_event_handler(struct timer_list *t)
1688 {
1689 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
1690 vsync_event_timer);
1691 struct drm_encoder *drm_enc = &dpu_enc->base;
1692 struct msm_drm_private *priv;
1693 struct msm_drm_thread *event_thread;
1694
1695 if (!drm_enc->dev || !drm_enc->crtc) {
1696 DPU_ERROR("invalid parameters\n");
1697 return;
1698 }
1699
1700 priv = drm_enc->dev->dev_private;
1701
1702 if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
1703 DPU_ERROR("invalid crtc index\n");
1704 return;
1705 }
1706 event_thread = &priv->event_thread[drm_enc->crtc->index];
1707 if (!event_thread) {
1708 DPU_ERROR("event_thread not found for crtc:%d\n",
1709 drm_enc->crtc->index);
1710 return;
1711 }
1712
1713 del_timer(&dpu_enc->vsync_event_timer);
1714 }
1715
dpu_encoder_vsync_event_work_handler(struct kthread_work * work)1716 static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
1717 {
1718 struct dpu_encoder_virt *dpu_enc = container_of(work,
1719 struct dpu_encoder_virt, vsync_event_work);
1720 ktime_t wakeup_time;
1721
1722 if (dpu_encoder_vsync_time(&dpu_enc->base, &wakeup_time))
1723 return;
1724
1725 trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
1726 mod_timer(&dpu_enc->vsync_event_timer,
1727 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1728 }
1729
1730 static u32
dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config * dsc,u32 enc_ip_width)1731 dpu_encoder_dsc_initial_line_calc(struct msm_display_dsc_config *dsc,
1732 u32 enc_ip_width)
1733 {
1734 int ssm_delay, total_pixels, soft_slice_per_enc;
1735
1736 soft_slice_per_enc = enc_ip_width / dsc->drm->slice_width;
1737
1738 /*
1739 * minimum number of initial line pixels is a sum of:
1740 * 1. sub-stream multiplexer delay (83 groups for 8bpc,
1741 * 91 for 10 bpc) * 3
1742 * 2. for two soft slice cases, add extra sub-stream multiplexer * 3
1743 * 3. the initial xmit delay
1744 * 4. total pipeline delay through the "lock step" of encoder (47)
1745 * 5. 6 additional pixels as the output of the rate buffer is
1746 * 48 bits wide
1747 */
1748 ssm_delay = ((dsc->drm->bits_per_component < 10) ? 84 : 92);
1749 total_pixels = ssm_delay * 3 + dsc->drm->initial_xmit_delay + 47;
1750 if (soft_slice_per_enc > 1)
1751 total_pixels += (ssm_delay * 3);
1752 return DIV_ROUND_UP(total_pixels, dsc->drm->slice_width);
1753 }
1754
dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc * hw_dsc,struct dpu_hw_pingpong * hw_pp,struct msm_display_dsc_config * dsc,u32 common_mode,u32 initial_lines)1755 static void dpu_encoder_dsc_pipe_cfg(struct dpu_hw_dsc *hw_dsc,
1756 struct dpu_hw_pingpong *hw_pp,
1757 struct msm_display_dsc_config *dsc,
1758 u32 common_mode,
1759 u32 initial_lines)
1760 {
1761 if (hw_dsc->ops.dsc_config)
1762 hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, initial_lines);
1763
1764 if (hw_dsc->ops.dsc_config_thresh)
1765 hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
1766
1767 if (hw_pp->ops.setup_dsc)
1768 hw_pp->ops.setup_dsc(hw_pp);
1769
1770 if (hw_pp->ops.enable_dsc)
1771 hw_pp->ops.enable_dsc(hw_pp);
1772 }
1773
dpu_encoder_prep_dsc(struct dpu_encoder_virt * dpu_enc,struct msm_display_dsc_config * dsc)1774 static void dpu_encoder_prep_dsc(struct dpu_encoder_virt *dpu_enc,
1775 struct msm_display_dsc_config *dsc)
1776 {
1777 /* coding only for 2LM, 2enc, 1 dsc config */
1778 struct dpu_encoder_phys *enc_master = dpu_enc->cur_master;
1779 struct dpu_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
1780 struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
1781 int this_frame_slices;
1782 int intf_ip_w, enc_ip_w;
1783 int dsc_common_mode;
1784 int pic_width;
1785 u32 initial_lines;
1786 int i;
1787
1788 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
1789 hw_pp[i] = dpu_enc->hw_pp[i];
1790 hw_dsc[i] = dpu_enc->hw_dsc[i];
1791
1792 if (!hw_pp[i] || !hw_dsc[i]) {
1793 DPU_ERROR_ENC(dpu_enc, "invalid params for DSC\n");
1794 return;
1795 }
1796 }
1797
1798 pic_width = dsc->drm->pic_width;
1799
1800 dsc_common_mode = DSC_MODE_MULTIPLEX | DSC_MODE_SPLIT_PANEL;
1801 if (enc_master->intf_mode == INTF_MODE_VIDEO)
1802 dsc_common_mode |= DSC_MODE_VIDEO;
1803
1804 this_frame_slices = pic_width / dsc->drm->slice_width;
1805 intf_ip_w = this_frame_slices * dsc->drm->slice_width;
1806
1807 /*
1808 * dsc merge case: when using 2 encoders for the same stream,
1809 * no. of slices need to be same on both the encoders.
1810 */
1811 enc_ip_w = intf_ip_w / 2;
1812 initial_lines = dpu_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
1813
1814 for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
1815 dpu_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], dsc, dsc_common_mode, initial_lines);
1816 }
1817
dpu_encoder_prepare_for_kickoff(struct drm_encoder * drm_enc)1818 void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
1819 {
1820 struct dpu_encoder_virt *dpu_enc;
1821 struct dpu_encoder_phys *phys;
1822 bool needs_hw_reset = false;
1823 unsigned int i;
1824
1825 dpu_enc = to_dpu_encoder_virt(drm_enc);
1826
1827 trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
1828
1829 /* prepare for next kickoff, may include waiting on previous kickoff */
1830 DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
1831 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1832 phys = dpu_enc->phys_encs[i];
1833 if (phys->ops.prepare_for_kickoff)
1834 phys->ops.prepare_for_kickoff(phys);
1835 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
1836 needs_hw_reset = true;
1837 }
1838 DPU_ATRACE_END("enc_prepare_for_kickoff");
1839
1840 dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
1841
1842 /* if any phys needs reset, reset all phys, in-order */
1843 if (needs_hw_reset) {
1844 trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
1845 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1846 dpu_encoder_helper_hw_reset(dpu_enc->phys_encs[i]);
1847 }
1848 }
1849
1850 if (dpu_enc->dsc)
1851 dpu_encoder_prep_dsc(dpu_enc, dpu_enc->dsc);
1852 }
1853
dpu_encoder_is_valid_for_commit(struct drm_encoder * drm_enc)1854 bool dpu_encoder_is_valid_for_commit(struct drm_encoder *drm_enc)
1855 {
1856 struct dpu_encoder_virt *dpu_enc;
1857 unsigned int i;
1858 struct dpu_encoder_phys *phys;
1859
1860 dpu_enc = to_dpu_encoder_virt(drm_enc);
1861
1862 if (drm_enc->encoder_type == DRM_MODE_ENCODER_VIRTUAL) {
1863 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1864 phys = dpu_enc->phys_encs[i];
1865 if (phys->ops.is_valid_for_commit && !phys->ops.is_valid_for_commit(phys)) {
1866 DPU_DEBUG("invalid FB not kicking off\n");
1867 return false;
1868 }
1869 }
1870 }
1871
1872 return true;
1873 }
1874
dpu_encoder_kickoff(struct drm_encoder * drm_enc)1875 void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
1876 {
1877 struct dpu_encoder_virt *dpu_enc;
1878 struct dpu_encoder_phys *phys;
1879 ktime_t wakeup_time;
1880 unsigned long timeout_ms;
1881 unsigned int i;
1882
1883 DPU_ATRACE_BEGIN("encoder_kickoff");
1884 dpu_enc = to_dpu_encoder_virt(drm_enc);
1885
1886 trace_dpu_enc_kickoff(DRMID(drm_enc));
1887
1888 timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
1889 drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
1890
1891 atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
1892 mod_timer(&dpu_enc->frame_done_timer,
1893 jiffies + msecs_to_jiffies(timeout_ms));
1894
1895 /* All phys encs are ready to go, trigger the kickoff */
1896 _dpu_encoder_kickoff_phys(dpu_enc);
1897
1898 /* allow phys encs to handle any post-kickoff business */
1899 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1900 phys = dpu_enc->phys_encs[i];
1901 if (phys->ops.handle_post_kickoff)
1902 phys->ops.handle_post_kickoff(phys);
1903 }
1904
1905 if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
1906 !dpu_encoder_vsync_time(drm_enc, &wakeup_time)) {
1907 trace_dpu_enc_early_kickoff(DRMID(drm_enc),
1908 ktime_to_ms(wakeup_time));
1909 mod_timer(&dpu_enc->vsync_event_timer,
1910 nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
1911 }
1912
1913 DPU_ATRACE_END("encoder_kickoff");
1914 }
1915
dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys * phys_enc)1916 static void dpu_encoder_helper_reset_mixers(struct dpu_encoder_phys *phys_enc)
1917 {
1918 struct dpu_hw_mixer_cfg mixer;
1919 int i, num_lm;
1920 u32 flush_mask = 0;
1921 struct dpu_global_state *global_state;
1922 struct dpu_hw_blk *hw_lm[2];
1923 struct dpu_hw_mixer *hw_mixer[2];
1924 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1925
1926 memset(&mixer, 0, sizeof(mixer));
1927
1928 /* reset all mixers for this encoder */
1929 if (phys_enc->hw_ctl->ops.clear_all_blendstages)
1930 phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
1931
1932 global_state = dpu_kms_get_existing_global_state(phys_enc->dpu_kms);
1933
1934 num_lm = dpu_rm_get_assigned_resources(&phys_enc->dpu_kms->rm, global_state,
1935 phys_enc->parent->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
1936
1937 for (i = 0; i < num_lm; i++) {
1938 hw_mixer[i] = to_dpu_hw_mixer(hw_lm[i]);
1939 flush_mask = phys_enc->hw_ctl->ops.get_bitmask_mixer(ctl, hw_mixer[i]->idx);
1940 if (phys_enc->hw_ctl->ops.update_pending_flush)
1941 phys_enc->hw_ctl->ops.update_pending_flush(ctl, flush_mask);
1942
1943 /* clear all blendstages */
1944 if (phys_enc->hw_ctl->ops.setup_blendstage)
1945 phys_enc->hw_ctl->ops.setup_blendstage(ctl, hw_mixer[i]->idx, NULL);
1946 }
1947 }
1948
dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys * phys_enc)1949 void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
1950 {
1951 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl;
1952 struct dpu_hw_intf_cfg intf_cfg = { 0 };
1953 int i;
1954 struct dpu_encoder_virt *dpu_enc;
1955
1956 dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
1957
1958 phys_enc->hw_ctl->ops.reset(ctl);
1959
1960 dpu_encoder_helper_reset_mixers(phys_enc);
1961
1962 /*
1963 * TODO: move the once-only operation like CTL flush/trigger
1964 * into dpu_encoder_virt_disable() and all operations which need
1965 * to be done per phys encoder into the phys_disable() op.
1966 */
1967 if (phys_enc->hw_wb) {
1968 /* disable the PP block */
1969 if (phys_enc->hw_wb->ops.bind_pingpong_blk)
1970 phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, false,
1971 phys_enc->hw_pp->idx);
1972
1973 /* mark WB flush as pending */
1974 if (phys_enc->hw_ctl->ops.update_pending_flush_wb)
1975 phys_enc->hw_ctl->ops.update_pending_flush_wb(ctl, phys_enc->hw_wb->idx);
1976 } else {
1977 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
1978 if (dpu_enc->phys_encs[i] && phys_enc->hw_intf->ops.bind_pingpong_blk)
1979 phys_enc->hw_intf->ops.bind_pingpong_blk(
1980 dpu_enc->phys_encs[i]->hw_intf, false,
1981 dpu_enc->phys_encs[i]->hw_pp->idx);
1982
1983 /* mark INTF flush as pending */
1984 if (phys_enc->hw_ctl->ops.update_pending_flush_intf)
1985 phys_enc->hw_ctl->ops.update_pending_flush_intf(phys_enc->hw_ctl,
1986 dpu_enc->phys_encs[i]->hw_intf->idx);
1987 }
1988 }
1989
1990 /* reset the merge 3D HW block */
1991 if (phys_enc->hw_pp->merge_3d) {
1992 phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
1993 BLEND_3D_NONE);
1994 if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
1995 phys_enc->hw_ctl->ops.update_pending_flush_merge_3d(ctl,
1996 phys_enc->hw_pp->merge_3d->idx);
1997 }
1998
1999 intf_cfg.stream_sel = 0; /* Don't care value for video mode */
2000 intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
2001
2002 if (phys_enc->hw_intf)
2003 intf_cfg.intf = phys_enc->hw_intf->idx;
2004 if (phys_enc->hw_wb)
2005 intf_cfg.wb = phys_enc->hw_wb->idx;
2006
2007 if (phys_enc->hw_pp->merge_3d)
2008 intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
2009
2010 if (ctl->ops.reset_intf_cfg)
2011 ctl->ops.reset_intf_cfg(ctl, &intf_cfg);
2012
2013 ctl->ops.trigger_flush(ctl);
2014 ctl->ops.trigger_start(ctl);
2015 ctl->ops.clear_pending_flush(ctl);
2016 }
2017
dpu_encoder_prepare_commit(struct drm_encoder * drm_enc)2018 void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
2019 {
2020 struct dpu_encoder_virt *dpu_enc;
2021 struct dpu_encoder_phys *phys;
2022 int i;
2023
2024 if (!drm_enc) {
2025 DPU_ERROR("invalid encoder\n");
2026 return;
2027 }
2028 dpu_enc = to_dpu_encoder_virt(drm_enc);
2029
2030 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2031 phys = dpu_enc->phys_encs[i];
2032 if (phys->ops.prepare_commit)
2033 phys->ops.prepare_commit(phys);
2034 }
2035 }
2036
2037 #ifdef CONFIG_DEBUG_FS
_dpu_encoder_status_show(struct seq_file * s,void * data)2038 static int _dpu_encoder_status_show(struct seq_file *s, void *data)
2039 {
2040 struct dpu_encoder_virt *dpu_enc = s->private;
2041 int i;
2042
2043 mutex_lock(&dpu_enc->enc_lock);
2044 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2045 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2046
2047 seq_printf(s, "intf:%d wb:%d vsync:%8d underrun:%8d ",
2048 phys->intf_idx - INTF_0, phys->wb_idx - WB_0,
2049 atomic_read(&phys->vsync_cnt),
2050 atomic_read(&phys->underrun_cnt));
2051
2052 seq_printf(s, "mode: %s\n", dpu_encoder_helper_get_intf_type(phys->intf_mode));
2053 }
2054 mutex_unlock(&dpu_enc->enc_lock);
2055
2056 return 0;
2057 }
2058
2059 DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
2060
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2061 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2062 {
2063 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
2064 int i;
2065
2066 char name[DPU_NAME_SIZE];
2067
2068 if (!drm_enc->dev) {
2069 DPU_ERROR("invalid encoder or kms\n");
2070 return -EINVAL;
2071 }
2072
2073 snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
2074
2075 /* create overall sub-directory for the encoder */
2076 dpu_enc->debugfs_root = debugfs_create_dir(name,
2077 drm_enc->dev->primary->debugfs_root);
2078
2079 /* don't error check these */
2080 debugfs_create_file("status", 0600,
2081 dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
2082
2083 for (i = 0; i < dpu_enc->num_phys_encs; i++)
2084 if (dpu_enc->phys_encs[i]->ops.late_register)
2085 dpu_enc->phys_encs[i]->ops.late_register(
2086 dpu_enc->phys_encs[i],
2087 dpu_enc->debugfs_root);
2088
2089 return 0;
2090 }
2091 #else
_dpu_encoder_init_debugfs(struct drm_encoder * drm_enc)2092 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
2093 {
2094 return 0;
2095 }
2096 #endif
2097
dpu_encoder_late_register(struct drm_encoder * encoder)2098 static int dpu_encoder_late_register(struct drm_encoder *encoder)
2099 {
2100 return _dpu_encoder_init_debugfs(encoder);
2101 }
2102
dpu_encoder_early_unregister(struct drm_encoder * encoder)2103 static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
2104 {
2105 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2106
2107 debugfs_remove_recursive(dpu_enc->debugfs_root);
2108 }
2109
dpu_encoder_virt_add_phys_encs(struct msm_display_info * disp_info,struct dpu_encoder_virt * dpu_enc,struct dpu_enc_phys_init_params * params)2110 static int dpu_encoder_virt_add_phys_encs(
2111 struct msm_display_info *disp_info,
2112 struct dpu_encoder_virt *dpu_enc,
2113 struct dpu_enc_phys_init_params *params)
2114 {
2115 struct dpu_encoder_phys *enc = NULL;
2116
2117 DPU_DEBUG_ENC(dpu_enc, "\n");
2118
2119 /*
2120 * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
2121 * in this function, check up-front.
2122 */
2123 if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
2124 ARRAY_SIZE(dpu_enc->phys_encs)) {
2125 DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
2126 dpu_enc->num_phys_encs);
2127 return -EINVAL;
2128 }
2129
2130 if (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE) {
2131 enc = dpu_encoder_phys_vid_init(params);
2132
2133 if (IS_ERR_OR_NULL(enc)) {
2134 DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
2135 PTR_ERR(enc));
2136 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2137 }
2138
2139 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2140 ++dpu_enc->num_phys_encs;
2141 }
2142
2143 if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
2144 enc = dpu_encoder_phys_cmd_init(params);
2145
2146 if (IS_ERR_OR_NULL(enc)) {
2147 DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
2148 PTR_ERR(enc));
2149 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2150 }
2151
2152 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2153 ++dpu_enc->num_phys_encs;
2154 }
2155
2156 if (disp_info->intf_type == DRM_MODE_ENCODER_VIRTUAL) {
2157 enc = dpu_encoder_phys_wb_init(params);
2158
2159 if (IS_ERR_OR_NULL(enc)) {
2160 DPU_ERROR_ENC(dpu_enc, "failed to init wb enc: %ld\n",
2161 PTR_ERR(enc));
2162 return enc == NULL ? -EINVAL : PTR_ERR(enc);
2163 }
2164
2165 dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
2166 ++dpu_enc->num_phys_encs;
2167 }
2168
2169 if (params->split_role == ENC_ROLE_SLAVE)
2170 dpu_enc->cur_slave = enc;
2171 else
2172 dpu_enc->cur_master = enc;
2173
2174 return 0;
2175 }
2176
2177 static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
2178 .handle_vblank_virt = dpu_encoder_vblank_callback,
2179 .handle_underrun_virt = dpu_encoder_underrun_callback,
2180 .handle_frame_done = dpu_encoder_frame_done_callback,
2181 };
2182
dpu_encoder_setup_display(struct dpu_encoder_virt * dpu_enc,struct dpu_kms * dpu_kms,struct msm_display_info * disp_info)2183 static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
2184 struct dpu_kms *dpu_kms,
2185 struct msm_display_info *disp_info)
2186 {
2187 int ret = 0;
2188 int i = 0;
2189 enum dpu_intf_type intf_type = INTF_NONE;
2190 struct dpu_enc_phys_init_params phys_params;
2191
2192 if (!dpu_enc) {
2193 DPU_ERROR("invalid arg(s), enc %d\n", dpu_enc != NULL);
2194 return -EINVAL;
2195 }
2196
2197 dpu_enc->cur_master = NULL;
2198
2199 memset(&phys_params, 0, sizeof(phys_params));
2200 phys_params.dpu_kms = dpu_kms;
2201 phys_params.parent = &dpu_enc->base;
2202 phys_params.parent_ops = &dpu_encoder_parent_ops;
2203 phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
2204
2205 switch (disp_info->intf_type) {
2206 case DRM_MODE_ENCODER_DSI:
2207 intf_type = INTF_DSI;
2208 break;
2209 case DRM_MODE_ENCODER_TMDS:
2210 intf_type = INTF_DP;
2211 break;
2212 case DRM_MODE_ENCODER_VIRTUAL:
2213 intf_type = INTF_WB;
2214 break;
2215 }
2216
2217 WARN_ON(disp_info->num_of_h_tiles < 1);
2218
2219 DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
2220
2221 if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
2222 (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
2223 dpu_enc->idle_pc_supported =
2224 dpu_kms->catalog->caps->has_idle_pc;
2225
2226 dpu_enc->dsc = disp_info->dsc;
2227
2228 mutex_lock(&dpu_enc->enc_lock);
2229 for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
2230 /*
2231 * Left-most tile is at index 0, content is controller id
2232 * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
2233 * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
2234 */
2235 u32 controller_id = disp_info->h_tile_instance[i];
2236
2237 if (disp_info->num_of_h_tiles > 1) {
2238 if (i == 0)
2239 phys_params.split_role = ENC_ROLE_MASTER;
2240 else
2241 phys_params.split_role = ENC_ROLE_SLAVE;
2242 } else {
2243 phys_params.split_role = ENC_ROLE_SOLO;
2244 }
2245
2246 DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
2247 i, controller_id, phys_params.split_role);
2248
2249 phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
2250 intf_type,
2251 controller_id);
2252
2253 phys_params.wb_idx = dpu_encoder_get_wb(dpu_kms->catalog,
2254 intf_type, controller_id);
2255 /*
2256 * The phys_params might represent either an INTF or a WB unit, but not
2257 * both of them at the same time.
2258 */
2259 if ((phys_params.intf_idx == INTF_MAX) &&
2260 (phys_params.wb_idx == WB_MAX)) {
2261 DPU_ERROR_ENC(dpu_enc, "could not get intf or wb: type %d, id %d\n",
2262 intf_type, controller_id);
2263 ret = -EINVAL;
2264 }
2265
2266 if ((phys_params.intf_idx != INTF_MAX) &&
2267 (phys_params.wb_idx != WB_MAX)) {
2268 DPU_ERROR_ENC(dpu_enc, "both intf and wb present: type %d, id %d\n",
2269 intf_type, controller_id);
2270 ret = -EINVAL;
2271 }
2272
2273 if (!ret) {
2274 ret = dpu_encoder_virt_add_phys_encs(disp_info,
2275 dpu_enc, &phys_params);
2276 if (ret)
2277 DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
2278 }
2279 }
2280
2281 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2282 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2283 atomic_set(&phys->vsync_cnt, 0);
2284 atomic_set(&phys->underrun_cnt, 0);
2285
2286 if (phys->intf_idx >= INTF_0 && phys->intf_idx < INTF_MAX)
2287 phys->hw_intf = dpu_rm_get_intf(&dpu_kms->rm, phys->intf_idx);
2288
2289 if (phys->wb_idx >= WB_0 && phys->wb_idx < WB_MAX)
2290 phys->hw_wb = dpu_rm_get_wb(&dpu_kms->rm, phys->wb_idx);
2291
2292 if (!phys->hw_intf && !phys->hw_wb) {
2293 DPU_ERROR_ENC(dpu_enc, "no intf or wb block assigned at idx: %d\n", i);
2294 ret = -EINVAL;
2295 }
2296
2297 if (phys->hw_intf && phys->hw_wb) {
2298 DPU_ERROR_ENC(dpu_enc,
2299 "invalid phys both intf and wb block at idx: %d\n", i);
2300 ret = -EINVAL;
2301 }
2302 }
2303
2304 mutex_unlock(&dpu_enc->enc_lock);
2305
2306 return ret;
2307 }
2308
dpu_encoder_frame_done_timeout(struct timer_list * t)2309 static void dpu_encoder_frame_done_timeout(struct timer_list *t)
2310 {
2311 struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
2312 frame_done_timer);
2313 struct drm_encoder *drm_enc = &dpu_enc->base;
2314 u32 event;
2315
2316 if (!drm_enc->dev) {
2317 DPU_ERROR("invalid parameters\n");
2318 return;
2319 }
2320
2321 if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
2322 DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
2323 DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
2324 return;
2325 } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
2326 DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
2327 return;
2328 }
2329
2330 DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
2331
2332 event = DPU_ENCODER_FRAME_EVENT_ERROR;
2333 trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
2334 dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
2335 }
2336
2337 static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
2338 .atomic_mode_set = dpu_encoder_virt_atomic_mode_set,
2339 .disable = dpu_encoder_virt_disable,
2340 .enable = dpu_encoder_virt_enable,
2341 .atomic_check = dpu_encoder_virt_atomic_check,
2342 };
2343
2344 static const struct drm_encoder_funcs dpu_encoder_funcs = {
2345 .destroy = dpu_encoder_destroy,
2346 .late_register = dpu_encoder_late_register,
2347 .early_unregister = dpu_encoder_early_unregister,
2348 };
2349
dpu_encoder_setup(struct drm_device * dev,struct drm_encoder * enc,struct msm_display_info * disp_info)2350 int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
2351 struct msm_display_info *disp_info)
2352 {
2353 struct msm_drm_private *priv = dev->dev_private;
2354 struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
2355 struct drm_encoder *drm_enc = NULL;
2356 struct dpu_encoder_virt *dpu_enc = NULL;
2357 int ret = 0;
2358
2359 dpu_enc = to_dpu_encoder_virt(enc);
2360
2361 ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
2362 if (ret)
2363 goto fail;
2364
2365 atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
2366 timer_setup(&dpu_enc->frame_done_timer,
2367 dpu_encoder_frame_done_timeout, 0);
2368
2369 if (disp_info->intf_type == DRM_MODE_ENCODER_DSI)
2370 timer_setup(&dpu_enc->vsync_event_timer,
2371 dpu_encoder_vsync_event_handler,
2372 0);
2373 else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
2374 dpu_enc->wide_bus_en = msm_dp_wide_bus_available(
2375 priv->dp[disp_info->h_tile_instance[0]]);
2376
2377 INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
2378 dpu_encoder_off_work);
2379 dpu_enc->idle_timeout = IDLE_TIMEOUT;
2380
2381 kthread_init_work(&dpu_enc->vsync_event_work,
2382 dpu_encoder_vsync_event_work_handler);
2383
2384 memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
2385
2386 DPU_DEBUG_ENC(dpu_enc, "created\n");
2387
2388 return ret;
2389
2390 fail:
2391 DPU_ERROR("failed to create encoder\n");
2392 if (drm_enc)
2393 dpu_encoder_destroy(drm_enc);
2394
2395 return ret;
2396
2397
2398 }
2399
dpu_encoder_init(struct drm_device * dev,int drm_enc_mode)2400 struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
2401 int drm_enc_mode)
2402 {
2403 struct dpu_encoder_virt *dpu_enc = NULL;
2404 int rc = 0;
2405
2406 dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
2407 if (!dpu_enc)
2408 return ERR_PTR(-ENOMEM);
2409
2410
2411 rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
2412 drm_enc_mode, NULL);
2413 if (rc) {
2414 devm_kfree(dev->dev, dpu_enc);
2415 return ERR_PTR(rc);
2416 }
2417
2418 drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
2419
2420 spin_lock_init(&dpu_enc->enc_spinlock);
2421 dpu_enc->enabled = false;
2422 mutex_init(&dpu_enc->enc_lock);
2423 mutex_init(&dpu_enc->rc_lock);
2424
2425 return &dpu_enc->base;
2426 }
2427
dpu_encoder_wait_for_event(struct drm_encoder * drm_enc,enum msm_event_wait event)2428 int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
2429 enum msm_event_wait event)
2430 {
2431 int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
2432 struct dpu_encoder_virt *dpu_enc = NULL;
2433 int i, ret = 0;
2434
2435 if (!drm_enc) {
2436 DPU_ERROR("invalid encoder\n");
2437 return -EINVAL;
2438 }
2439 dpu_enc = to_dpu_encoder_virt(drm_enc);
2440 DPU_DEBUG_ENC(dpu_enc, "\n");
2441
2442 for (i = 0; i < dpu_enc->num_phys_encs; i++) {
2443 struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
2444
2445 switch (event) {
2446 case MSM_ENC_COMMIT_DONE:
2447 fn_wait = phys->ops.wait_for_commit_done;
2448 break;
2449 case MSM_ENC_TX_COMPLETE:
2450 fn_wait = phys->ops.wait_for_tx_complete;
2451 break;
2452 case MSM_ENC_VBLANK:
2453 fn_wait = phys->ops.wait_for_vblank;
2454 break;
2455 default:
2456 DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
2457 event);
2458 return -EINVAL;
2459 }
2460
2461 if (fn_wait) {
2462 DPU_ATRACE_BEGIN("wait_for_completion_event");
2463 ret = fn_wait(phys);
2464 DPU_ATRACE_END("wait_for_completion_event");
2465 if (ret)
2466 return ret;
2467 }
2468 }
2469
2470 return ret;
2471 }
2472
dpu_encoder_get_intf_mode(struct drm_encoder * encoder)2473 enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
2474 {
2475 struct dpu_encoder_virt *dpu_enc = NULL;
2476
2477 if (!encoder) {
2478 DPU_ERROR("invalid encoder\n");
2479 return INTF_MODE_NONE;
2480 }
2481 dpu_enc = to_dpu_encoder_virt(encoder);
2482
2483 if (dpu_enc->cur_master)
2484 return dpu_enc->cur_master->intf_mode;
2485
2486 if (dpu_enc->num_phys_encs)
2487 return dpu_enc->phys_encs[0]->intf_mode;
2488
2489 return INTF_MODE_NONE;
2490 }
2491
dpu_encoder_helper_get_dsc(struct dpu_encoder_phys * phys_enc)2492 unsigned int dpu_encoder_helper_get_dsc(struct dpu_encoder_phys *phys_enc)
2493 {
2494 struct drm_encoder *encoder = phys_enc->parent;
2495 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(encoder);
2496
2497 return dpu_enc->dsc_mask;
2498 }
2499