1 /*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dccg.h"
49 #include "clk_mgr.h"
50 #include "link_hwss.h"
51 #include "dpcd_defs.h"
52 #include "dsc.h"
53 #include "dce/dmub_psr.h"
54 #include "dc_dmub_srv.h"
55 #include "dce/dmub_hw_lock_mgr.h"
56 #include "dc_trace.h"
57 #include "dce/dmub_outbox.h"
58 #include "link.h"
59
60 #define DC_LOGGER_INIT(logger)
61
62 #define CTX \
63 hws->ctx
64 #define REG(reg)\
65 hws->regs->reg
66
67 #undef FN
68 #define FN(reg_name, field_name) \
69 hws->shifts->field_name, hws->masks->field_name
70
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 print_microsec(dc_ctx, log_ctx, ref_cycle)
74
75 #define GAMMA_HW_POINTS_NUM 256
76
77 #define PGFSM_POWER_ON 0
78 #define PGFSM_POWER_OFF 2
79
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)80 static void print_microsec(struct dc_context *dc_ctx,
81 struct dc_log_buffer_ctx *log_ctx,
82 uint32_t ref_cycle)
83 {
84 const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
85 static const unsigned int frac = 1000;
86 uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
87
88 DTN_INFO(" %11d.%03d",
89 us_x10 / frac,
90 us_x10 % frac);
91 }
92
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)93 void dcn10_lock_all_pipes(struct dc *dc,
94 struct dc_state *context,
95 bool lock)
96 {
97 struct pipe_ctx *pipe_ctx;
98 struct pipe_ctx *old_pipe_ctx;
99 struct timing_generator *tg;
100 int i;
101
102 for (i = 0; i < dc->res_pool->pipe_count; i++) {
103 old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
104 pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 tg = pipe_ctx->stream_res.tg;
106
107 /*
108 * Only lock the top pipe's tg to prevent redundant
109 * (un)locking. Also skip if pipe is disabled.
110 */
111 if (pipe_ctx->top_pipe ||
112 !pipe_ctx->stream ||
113 (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
114 !tg->funcs->is_tg_enabled(tg) ||
115 pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
116 continue;
117
118 if (lock)
119 dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
120 else
121 dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
122 }
123 }
124
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)125 static void log_mpc_crc(struct dc *dc,
126 struct dc_log_buffer_ctx *log_ctx)
127 {
128 struct dc_context *dc_ctx = dc->ctx;
129 struct dce_hwseq *hws = dc->hwseq;
130
131 if (REG(MPC_CRC_RESULT_GB))
132 DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
133 REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
134 if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
135 DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
136 REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
137 }
138
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)139 static void dcn10_log_hubbub_state(struct dc *dc,
140 struct dc_log_buffer_ctx *log_ctx)
141 {
142 struct dc_context *dc_ctx = dc->ctx;
143 struct dcn_hubbub_wm wm;
144 int i;
145
146 memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
147 dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
148
149 DTN_INFO("HUBBUB WM: data_urgent pte_meta_urgent"
150 " sr_enter sr_exit dram_clk_change\n");
151
152 for (i = 0; i < 4; i++) {
153 struct dcn_hubbub_wm_set *s;
154
155 s = &wm.sets[i];
156 DTN_INFO("WM_Set[%d]:", s->wm_set);
157 DTN_INFO_MICRO_SEC(s->data_urgent);
158 DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
159 DTN_INFO_MICRO_SEC(s->sr_enter);
160 DTN_INFO_MICRO_SEC(s->sr_exit);
161 DTN_INFO_MICRO_SEC(s->dram_clk_change);
162 DTN_INFO("\n");
163 }
164
165 DTN_INFO("\n");
166 }
167
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)168 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
169 {
170 struct dc_context *dc_ctx = dc->ctx;
171 struct resource_pool *pool = dc->res_pool;
172 int i;
173
174 DTN_INFO(
175 "HUBP: format addr_hi width height rot mir sw_mode dcc_en blank_en clock_en ttu_dis underflow min_ttu_vblank qos_low_wm qos_high_wm\n");
176 for (i = 0; i < pool->pipe_count; i++) {
177 struct hubp *hubp = pool->hubps[i];
178 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
179
180 hubp->funcs->hubp_read_state(hubp);
181
182 if (!s->blank_en) {
183 DTN_INFO("[%2d]: %5xh %6xh %5d %6d %2xh %2xh %6xh %6d %8d %8d %7d %8xh",
184 hubp->inst,
185 s->pixel_format,
186 s->inuse_addr_hi,
187 s->viewport_width,
188 s->viewport_height,
189 s->rotation_angle,
190 s->h_mirror_en,
191 s->sw_mode,
192 s->dcc_en,
193 s->blank_en,
194 s->clock_en,
195 s->ttu_disable,
196 s->underflow_status);
197 DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
198 DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
199 DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
200 DTN_INFO("\n");
201 }
202 }
203
204 DTN_INFO("\n=========RQ========\n");
205 DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s"
206 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s"
207 " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h\n");
208 for (i = 0; i < pool->pipe_count; i++) {
209 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
210 struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
211
212 if (!s->blank_en)
213 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
214 pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
215 rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
216 rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
217 rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
218 rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
219 rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
220 rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
221 rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
222 rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
223 }
224
225 DTN_INFO("========DLG========\n");
226 DTN_INFO("HUBP: rc_hbe dlg_vbe min_d_y_n rc_per_ht rc_x_a_s "
227 " dst_y_a_s dst_y_pf dst_y_vvb dst_y_rvb dst_y_vfl dst_y_rfl rf_pix_fq"
228 " vratio_pf vrat_pf_c rc_pg_vbl rc_pg_vbc rc_mc_vbl rc_mc_vbc rc_pg_fll"
229 " rc_pg_flc rc_mc_fll rc_mc_flc pr_nom_l pr_nom_c rc_pg_nl rc_pg_nc "
230 " mr_nom_l mr_nom_c rc_mc_nl rc_mc_nc rc_ld_pl rc_ld_pc rc_ld_l "
231 " rc_ld_c cha_cur0 ofst_cur1 cha_cur1 vr_af_vc0 ddrq_limt x_rt_dlay"
232 " x_rp_dlay x_rr_sfl\n");
233 for (i = 0; i < pool->pipe_count; i++) {
234 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
235 struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
236
237 if (!s->blank_en)
238 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
239 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh"
240 " %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
241 pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
242 dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
243 dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
244 dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
245 dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
246 dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
247 dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
248 dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
249 dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
250 dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
251 dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
252 dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
253 dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
254 dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
255 dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
256 dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
257 dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
258 dlg_regs->xfc_reg_remote_surface_flip_latency);
259 }
260
261 DTN_INFO("========TTU========\n");
262 DTN_INFO("HUBP: qos_ll_wm qos_lh_wm mn_ttu_vb qos_l_flp rc_rd_p_l rc_rd_l rc_rd_p_c"
263 " rc_rd_c rc_rd_c0 rc_rd_pc0 rc_rd_c1 rc_rd_pc1 qos_lf_l qos_rds_l"
264 " qos_lf_c qos_rds_c qos_lf_c0 qos_rds_c0 qos_lf_c1 qos_rds_c1\n");
265 for (i = 0; i < pool->pipe_count; i++) {
266 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
267 struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
268
269 if (!s->blank_en)
270 DTN_INFO("[%2d]: %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh %8xh\n",
271 pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
272 ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
273 ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
274 ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
275 ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
276 ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
277 ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
278 }
279 DTN_INFO("\n");
280 }
281
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)282 void dcn10_log_hw_state(struct dc *dc,
283 struct dc_log_buffer_ctx *log_ctx)
284 {
285 struct dc_context *dc_ctx = dc->ctx;
286 struct resource_pool *pool = dc->res_pool;
287 int i;
288
289 DTN_INFO_BEGIN();
290
291 dcn10_log_hubbub_state(dc, log_ctx);
292
293 dcn10_log_hubp_states(dc, log_ctx);
294
295 DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
296 " GAMUT mode C11 C12 C13 C14 C21 C22 C23 C24 "
297 "C31 C32 C33 C34\n");
298 for (i = 0; i < pool->pipe_count; i++) {
299 struct dpp *dpp = pool->dpps[i];
300 struct dcn_dpp_state s = {0};
301
302 dpp->funcs->dpp_read_state(dpp, &s);
303
304 if (!s.is_enabled)
305 continue;
306
307 DTN_INFO("[%2d]: %11xh %-11s %-11s %-11s"
308 "%8x %08xh %08xh %08xh %08xh %08xh %08xh",
309 dpp->inst,
310 s.igam_input_format,
311 (s.igam_lut_mode == 0) ? "BypassFixed" :
312 ((s.igam_lut_mode == 1) ? "BypassFloat" :
313 ((s.igam_lut_mode == 2) ? "RAM" :
314 ((s.igam_lut_mode == 3) ? "RAM" :
315 "Unknown"))),
316 (s.dgam_lut_mode == 0) ? "Bypass" :
317 ((s.dgam_lut_mode == 1) ? "sRGB" :
318 ((s.dgam_lut_mode == 2) ? "Ycc" :
319 ((s.dgam_lut_mode == 3) ? "RAM" :
320 ((s.dgam_lut_mode == 4) ? "RAM" :
321 "Unknown")))),
322 (s.rgam_lut_mode == 0) ? "Bypass" :
323 ((s.rgam_lut_mode == 1) ? "sRGB" :
324 ((s.rgam_lut_mode == 2) ? "Ycc" :
325 ((s.rgam_lut_mode == 3) ? "RAM" :
326 ((s.rgam_lut_mode == 4) ? "RAM" :
327 "Unknown")))),
328 s.gamut_remap_mode,
329 s.gamut_remap_c11_c12,
330 s.gamut_remap_c13_c14,
331 s.gamut_remap_c21_c22,
332 s.gamut_remap_c23_c24,
333 s.gamut_remap_c31_c32,
334 s.gamut_remap_c33_c34);
335 DTN_INFO("\n");
336 }
337 DTN_INFO("\n");
338
339 DTN_INFO("MPCC: OPP DPP MPCCBOT MODE ALPHA_MODE PREMULT OVERLAP_ONLY IDLE\n");
340 for (i = 0; i < pool->pipe_count; i++) {
341 struct mpcc_state s = {0};
342
343 pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
344 if (s.opp_id != 0xf)
345 DTN_INFO("[%2d]: %2xh %2xh %6xh %4d %10d %7d %12d %4d\n",
346 i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
347 s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
348 s.idle);
349 }
350 DTN_INFO("\n");
351
352 DTN_INFO("OTG: v_bs v_be v_ss v_se vpol vmax vmin vmax_sel vmin_sel h_bs h_be h_ss h_se hpol htot vtot underflow blank_en\n");
353
354 for (i = 0; i < pool->timing_generator_count; i++) {
355 struct timing_generator *tg = pool->timing_generators[i];
356 struct dcn_otg_state s = {0};
357 /* Read shared OTG state registers for all DCNx */
358 optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
359
360 /*
361 * For DCN2 and greater, a register on the OPP is used to
362 * determine if the CRTC is blanked instead of the OTG. So use
363 * dpg_is_blanked() if exists, otherwise fallback on otg.
364 *
365 * TODO: Implement DCN-specific read_otg_state hooks.
366 */
367 if (pool->opps[i]->funcs->dpg_is_blanked)
368 s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
369 else
370 s.blank_enabled = tg->funcs->is_blanked(tg);
371
372 //only print if OTG master is enabled
373 if ((s.otg_enabled & 1) == 0)
374 continue;
375
376 DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d %9d %8d\n",
377 tg->inst,
378 s.v_blank_start,
379 s.v_blank_end,
380 s.v_sync_a_start,
381 s.v_sync_a_end,
382 s.v_sync_a_pol,
383 s.v_total_max,
384 s.v_total_min,
385 s.v_total_max_sel,
386 s.v_total_min_sel,
387 s.h_blank_start,
388 s.h_blank_end,
389 s.h_sync_a_start,
390 s.h_sync_a_end,
391 s.h_sync_a_pol,
392 s.h_total,
393 s.v_total,
394 s.underflow_occurred_status,
395 s.blank_enabled);
396
397 // Clear underflow for debug purposes
398 // We want to keep underflow sticky bit on for the longevity tests outside of test environment.
399 // This function is called only from Windows or Diags test environment, hence it's safe to clear
400 // it from here without affecting the original intent.
401 tg->funcs->clear_optc_underflow(tg);
402 }
403 DTN_INFO("\n");
404
405 // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
406 // TODO: Update golden log header to reflect this name change
407 DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n");
408 for (i = 0; i < pool->res_cap->num_dsc; i++) {
409 struct display_stream_compressor *dsc = pool->dscs[i];
410 struct dcn_dsc_state s = {0};
411
412 dsc->funcs->dsc_read_state(dsc, &s);
413 DTN_INFO("[%d]: %-9d %-12d %-10d\n",
414 dsc->inst,
415 s.dsc_clock_en,
416 s.dsc_slice_width,
417 s.dsc_bits_per_pixel);
418 DTN_INFO("\n");
419 }
420 DTN_INFO("\n");
421
422 DTN_INFO("S_ENC: DSC_MODE SEC_GSP7_LINE_NUM"
423 " VBID6_LINE_REFERENCE VBID6_LINE_NUM SEC_GSP7_ENABLE SEC_STREAM_ENABLE\n");
424 for (i = 0; i < pool->stream_enc_count; i++) {
425 struct stream_encoder *enc = pool->stream_enc[i];
426 struct enc_state s = {0};
427
428 if (enc->funcs->enc_read_state) {
429 enc->funcs->enc_read_state(enc, &s);
430 DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
431 enc->id,
432 s.dsc_mode,
433 s.sec_gsp_pps_line_num,
434 s.vbid6_line_reference,
435 s.vbid6_line_num,
436 s.sec_gsp_pps_enable,
437 s.sec_stream_enable);
438 DTN_INFO("\n");
439 }
440 }
441 DTN_INFO("\n");
442
443 DTN_INFO("L_ENC: DPHY_FEC_EN DPHY_FEC_READY_SHADOW DPHY_FEC_ACTIVE_STATUS DP_LINK_TRAINING_COMPLETE\n");
444 for (i = 0; i < dc->link_count; i++) {
445 struct link_encoder *lenc = dc->links[i]->link_enc;
446
447 struct link_enc_state s = {0};
448
449 if (lenc && lenc->funcs->read_state) {
450 lenc->funcs->read_state(lenc, &s);
451 DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
452 i,
453 s.dphy_fec_en,
454 s.dphy_fec_ready_shadow,
455 s.dphy_fec_active_status,
456 s.dp_link_training_complete);
457 DTN_INFO("\n");
458 }
459 }
460 DTN_INFO("\n");
461
462 DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
463 "dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
464 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
465 dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
466 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
467 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
468 dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
469 dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
470 dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
471
472 log_mpc_crc(dc, log_ctx);
473
474 {
475 if (pool->hpo_dp_stream_enc_count > 0) {
476 DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
477 for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
478 struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
479 struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
480
481 if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
482 hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
483
484 DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
485 hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
486 hpo_dp_se_state.stream_enc_enabled,
487 hpo_dp_se_state.otg_inst,
488 (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
489 ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
490 (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
491 (hpo_dp_se_state.component_depth == 0) ? 6 :
492 ((hpo_dp_se_state.component_depth == 1) ? 8 :
493 (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
494 hpo_dp_se_state.vid_stream_enabled,
495 hpo_dp_se_state.sdp_enabled,
496 hpo_dp_se_state.compressed_format,
497 hpo_dp_se_state.mapped_to_link_enc);
498 }
499 }
500
501 DTN_INFO("\n");
502 }
503
504 /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
505 if (pool->hpo_dp_link_enc_count) {
506 DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
507
508 for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
509 struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
510 struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
511
512 if (hpo_dp_link_enc->funcs->read_state) {
513 hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
514 DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
515 hpo_dp_link_enc->inst,
516 hpo_dp_le_state.link_enc_enabled,
517 (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
518 (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
519 (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
520 hpo_dp_le_state.lane_count,
521 hpo_dp_le_state.stream_src[0],
522 hpo_dp_le_state.slot_count[0],
523 hpo_dp_le_state.vc_rate_x[0],
524 hpo_dp_le_state.vc_rate_y[0]);
525 DTN_INFO("\n");
526 }
527 }
528
529 DTN_INFO("\n");
530 }
531 }
532
533 DTN_INFO_END();
534 }
535
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)536 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
537 {
538 struct hubp *hubp = pipe_ctx->plane_res.hubp;
539 struct timing_generator *tg = pipe_ctx->stream_res.tg;
540
541 if (tg->funcs->is_optc_underflow_occurred(tg)) {
542 tg->funcs->clear_optc_underflow(tg);
543 return true;
544 }
545
546 if (hubp->funcs->hubp_get_underflow_status(hubp)) {
547 hubp->funcs->hubp_clear_underflow(hubp);
548 return true;
549 }
550 return false;
551 }
552
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)553 void dcn10_enable_power_gating_plane(
554 struct dce_hwseq *hws,
555 bool enable)
556 {
557 bool force_on = true; /* disable power gating */
558
559 if (enable)
560 force_on = false;
561
562 /* DCHUBP0/1/2/3 */
563 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
564 REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
565 REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
566 REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
567
568 /* DPP0/1/2/3 */
569 REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
570 REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
571 REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
572 REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
573 }
574
dcn10_disable_vga(struct dce_hwseq * hws)575 void dcn10_disable_vga(
576 struct dce_hwseq *hws)
577 {
578 unsigned int in_vga1_mode = 0;
579 unsigned int in_vga2_mode = 0;
580 unsigned int in_vga3_mode = 0;
581 unsigned int in_vga4_mode = 0;
582
583 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
584 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
585 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
586 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
587
588 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
589 in_vga3_mode == 0 && in_vga4_mode == 0)
590 return;
591
592 REG_WRITE(D1VGA_CONTROL, 0);
593 REG_WRITE(D2VGA_CONTROL, 0);
594 REG_WRITE(D3VGA_CONTROL, 0);
595 REG_WRITE(D4VGA_CONTROL, 0);
596
597 /* HW Engineer's Notes:
598 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
599 * then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
600 *
601 * Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
602 * VGA_TEST_ENABLE, to leave it in the same state as before.
603 */
604 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
605 REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
606 }
607
608 /**
609 * dcn10_dpp_pg_control - DPP power gate control.
610 *
611 * @hws: dce_hwseq reference.
612 * @dpp_inst: DPP instance reference.
613 * @power_on: true if we want to enable power gate, false otherwise.
614 *
615 * Enable or disable power gate in the specific DPP instance.
616 */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)617 void dcn10_dpp_pg_control(
618 struct dce_hwseq *hws,
619 unsigned int dpp_inst,
620 bool power_on)
621 {
622 uint32_t power_gate = power_on ? 0 : 1;
623 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
624
625 if (hws->ctx->dc->debug.disable_dpp_power_gate)
626 return;
627 if (REG(DOMAIN1_PG_CONFIG) == 0)
628 return;
629
630 switch (dpp_inst) {
631 case 0: /* DPP0 */
632 REG_UPDATE(DOMAIN1_PG_CONFIG,
633 DOMAIN1_POWER_GATE, power_gate);
634
635 REG_WAIT(DOMAIN1_PG_STATUS,
636 DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
637 1, 1000);
638 break;
639 case 1: /* DPP1 */
640 REG_UPDATE(DOMAIN3_PG_CONFIG,
641 DOMAIN3_POWER_GATE, power_gate);
642
643 REG_WAIT(DOMAIN3_PG_STATUS,
644 DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
645 1, 1000);
646 break;
647 case 2: /* DPP2 */
648 REG_UPDATE(DOMAIN5_PG_CONFIG,
649 DOMAIN5_POWER_GATE, power_gate);
650
651 REG_WAIT(DOMAIN5_PG_STATUS,
652 DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
653 1, 1000);
654 break;
655 case 3: /* DPP3 */
656 REG_UPDATE(DOMAIN7_PG_CONFIG,
657 DOMAIN7_POWER_GATE, power_gate);
658
659 REG_WAIT(DOMAIN7_PG_STATUS,
660 DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
661 1, 1000);
662 break;
663 default:
664 BREAK_TO_DEBUGGER();
665 break;
666 }
667 }
668
669 /**
670 * dcn10_hubp_pg_control - HUBP power gate control.
671 *
672 * @hws: dce_hwseq reference.
673 * @hubp_inst: DPP instance reference.
674 * @power_on: true if we want to enable power gate, false otherwise.
675 *
676 * Enable or disable power gate in the specific HUBP instance.
677 */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)678 void dcn10_hubp_pg_control(
679 struct dce_hwseq *hws,
680 unsigned int hubp_inst,
681 bool power_on)
682 {
683 uint32_t power_gate = power_on ? 0 : 1;
684 uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
685
686 if (hws->ctx->dc->debug.disable_hubp_power_gate)
687 return;
688 if (REG(DOMAIN0_PG_CONFIG) == 0)
689 return;
690
691 switch (hubp_inst) {
692 case 0: /* DCHUBP0 */
693 REG_UPDATE(DOMAIN0_PG_CONFIG,
694 DOMAIN0_POWER_GATE, power_gate);
695
696 REG_WAIT(DOMAIN0_PG_STATUS,
697 DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
698 1, 1000);
699 break;
700 case 1: /* DCHUBP1 */
701 REG_UPDATE(DOMAIN2_PG_CONFIG,
702 DOMAIN2_POWER_GATE, power_gate);
703
704 REG_WAIT(DOMAIN2_PG_STATUS,
705 DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
706 1, 1000);
707 break;
708 case 2: /* DCHUBP2 */
709 REG_UPDATE(DOMAIN4_PG_CONFIG,
710 DOMAIN4_POWER_GATE, power_gate);
711
712 REG_WAIT(DOMAIN4_PG_STATUS,
713 DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
714 1, 1000);
715 break;
716 case 3: /* DCHUBP3 */
717 REG_UPDATE(DOMAIN6_PG_CONFIG,
718 DOMAIN6_POWER_GATE, power_gate);
719
720 REG_WAIT(DOMAIN6_PG_STATUS,
721 DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
722 1, 1000);
723 break;
724 default:
725 BREAK_TO_DEBUGGER();
726 break;
727 }
728 }
729
power_on_plane_resources(struct dce_hwseq * hws,int plane_id)730 static void power_on_plane_resources(
731 struct dce_hwseq *hws,
732 int plane_id)
733 {
734 DC_LOGGER_INIT(hws->ctx->logger);
735
736 if (hws->funcs.dpp_root_clock_control)
737 hws->funcs.dpp_root_clock_control(hws, plane_id, true);
738
739 if (REG(DC_IP_REQUEST_CNTL)) {
740 REG_SET(DC_IP_REQUEST_CNTL, 0,
741 IP_REQUEST_EN, 1);
742
743 if (hws->funcs.dpp_pg_control)
744 hws->funcs.dpp_pg_control(hws, plane_id, true);
745
746 if (hws->funcs.hubp_pg_control)
747 hws->funcs.hubp_pg_control(hws, plane_id, true);
748
749 REG_SET(DC_IP_REQUEST_CNTL, 0,
750 IP_REQUEST_EN, 0);
751 DC_LOG_DEBUG(
752 "Un-gated front end for pipe %d\n", plane_id);
753 }
754 }
755
undo_DEGVIDCN10_253_wa(struct dc * dc)756 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
757 {
758 struct dce_hwseq *hws = dc->hwseq;
759 struct hubp *hubp = dc->res_pool->hubps[0];
760
761 if (!hws->wa_state.DEGVIDCN10_253_applied)
762 return;
763
764 hubp->funcs->set_blank(hubp, true);
765
766 REG_SET(DC_IP_REQUEST_CNTL, 0,
767 IP_REQUEST_EN, 1);
768
769 hws->funcs.hubp_pg_control(hws, 0, false);
770 REG_SET(DC_IP_REQUEST_CNTL, 0,
771 IP_REQUEST_EN, 0);
772
773 hws->wa_state.DEGVIDCN10_253_applied = false;
774 }
775
apply_DEGVIDCN10_253_wa(struct dc * dc)776 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
777 {
778 struct dce_hwseq *hws = dc->hwseq;
779 struct hubp *hubp = dc->res_pool->hubps[0];
780 int i;
781
782 if (dc->debug.disable_stutter)
783 return;
784
785 if (!hws->wa.DEGVIDCN10_253)
786 return;
787
788 for (i = 0; i < dc->res_pool->pipe_count; i++) {
789 if (!dc->res_pool->hubps[i]->power_gated)
790 return;
791 }
792
793 /* all pipe power gated, apply work around to enable stutter. */
794
795 REG_SET(DC_IP_REQUEST_CNTL, 0,
796 IP_REQUEST_EN, 1);
797
798 hws->funcs.hubp_pg_control(hws, 0, true);
799 REG_SET(DC_IP_REQUEST_CNTL, 0,
800 IP_REQUEST_EN, 0);
801
802 hubp->funcs->set_hubp_blank_en(hubp, false);
803 hws->wa_state.DEGVIDCN10_253_applied = true;
804 }
805
dcn10_bios_golden_init(struct dc * dc)806 void dcn10_bios_golden_init(struct dc *dc)
807 {
808 struct dce_hwseq *hws = dc->hwseq;
809 struct dc_bios *bp = dc->ctx->dc_bios;
810 int i;
811 bool allow_self_fresh_force_enable = true;
812
813 if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
814 return;
815
816 if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
817 allow_self_fresh_force_enable =
818 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
819
820
821 /* WA for making DF sleep when idle after resume from S0i3.
822 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
823 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
824 * before calling command table and it changed to 1 after,
825 * it should be set back to 0.
826 */
827
828 /* initialize dcn global */
829 bp->funcs->enable_disp_power_gating(bp,
830 CONTROLLER_ID_D0, ASIC_PIPE_INIT);
831
832 for (i = 0; i < dc->res_pool->pipe_count; i++) {
833 /* initialize dcn per pipe */
834 bp->funcs->enable_disp_power_gating(bp,
835 CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
836 }
837
838 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
839 if (allow_self_fresh_force_enable == false &&
840 dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
841 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
842 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
843
844 }
845
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)846 static void false_optc_underflow_wa(
847 struct dc *dc,
848 const struct dc_stream_state *stream,
849 struct timing_generator *tg)
850 {
851 int i;
852 bool underflow;
853
854 if (!dc->hwseq->wa.false_optc_underflow)
855 return;
856
857 underflow = tg->funcs->is_optc_underflow_occurred(tg);
858
859 for (i = 0; i < dc->res_pool->pipe_count; i++) {
860 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
861
862 if (old_pipe_ctx->stream != stream)
863 continue;
864
865 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
866 }
867
868 if (tg->funcs->set_blank_data_double_buffer)
869 tg->funcs->set_blank_data_double_buffer(tg, true);
870
871 if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
872 tg->funcs->clear_optc_underflow(tg);
873 }
874
calculate_vready_offset_for_group(struct pipe_ctx * pipe)875 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
876 {
877 struct pipe_ctx *other_pipe;
878 int vready_offset = pipe->pipe_dlg_param.vready_offset;
879
880 /* Always use the largest vready_offset of all connected pipes */
881 for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
882 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
883 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
884 }
885 for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
886 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
887 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
888 }
889 for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
890 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
891 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
892 }
893 for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
894 if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
895 vready_offset = other_pipe->pipe_dlg_param.vready_offset;
896 }
897
898 return vready_offset;
899 }
900
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)901 enum dc_status dcn10_enable_stream_timing(
902 struct pipe_ctx *pipe_ctx,
903 struct dc_state *context,
904 struct dc *dc)
905 {
906 struct dc_stream_state *stream = pipe_ctx->stream;
907 enum dc_color_space color_space;
908 struct tg_color black_color = {0};
909
910 /* by upper caller loop, pipe0 is parent pipe and be called first.
911 * back end is set up by for pipe0. Other children pipe share back end
912 * with pipe 0. No program is needed.
913 */
914 if (pipe_ctx->top_pipe != NULL)
915 return DC_OK;
916
917 /* TODO check if timing_changed, disable stream if timing changed */
918
919 /* HW program guide assume display already disable
920 * by unplug sequence. OTG assume stop.
921 */
922 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
923
924 if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
925 pipe_ctx->clock_source,
926 &pipe_ctx->stream_res.pix_clk_params,
927 dc->link_srv->dp_get_encoding_format(&pipe_ctx->link_config.dp_link_settings),
928 &pipe_ctx->pll_settings)) {
929 BREAK_TO_DEBUGGER();
930 return DC_ERROR_UNEXPECTED;
931 }
932
933 if (dc_is_hdmi_tmds_signal(stream->signal)) {
934 stream->link->phy_state.symclk_ref_cnts.otg = 1;
935 if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
936 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
937 else
938 stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
939 }
940
941 pipe_ctx->stream_res.tg->funcs->program_timing(
942 pipe_ctx->stream_res.tg,
943 &stream->timing,
944 calculate_vready_offset_for_group(pipe_ctx),
945 pipe_ctx->pipe_dlg_param.vstartup_start,
946 pipe_ctx->pipe_dlg_param.vupdate_offset,
947 pipe_ctx->pipe_dlg_param.vupdate_width,
948 pipe_ctx->stream->signal,
949 true);
950
951 #if 0 /* move to after enable_crtc */
952 /* TODO: OPP FMT, ABM. etc. should be done here. */
953 /* or FPGA now. instance 0 only. TODO: move to opp.c */
954
955 inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
956
957 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
958 pipe_ctx->stream_res.opp,
959 &stream->bit_depth_params,
960 &stream->clamping);
961 #endif
962 /* program otg blank color */
963 color_space = stream->output_color_space;
964 color_space_to_black_color(dc, color_space, &black_color);
965
966 /*
967 * The way 420 is packed, 2 channels carry Y component, 1 channel
968 * alternate between Cb and Cr, so both channels need the pixel
969 * value for Y
970 */
971 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
972 black_color.color_r_cr = black_color.color_g_y;
973
974 if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
975 pipe_ctx->stream_res.tg->funcs->set_blank_color(
976 pipe_ctx->stream_res.tg,
977 &black_color);
978
979 if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
980 !pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
981 pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
982 hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
983 false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
984 }
985
986 /* VTG is within DCHUB command block. DCFCLK is always on */
987 if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
988 BREAK_TO_DEBUGGER();
989 return DC_ERROR_UNEXPECTED;
990 }
991
992 /* TODO program crtc source select for non-virtual signal*/
993 /* TODO program FMT */
994 /* TODO setup link_enc */
995 /* TODO set stream attributes */
996 /* TODO program audio */
997 /* TODO enable stream if timing changed */
998 /* TODO unblank stream if DP */
999
1000 return DC_OK;
1001 }
1002
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1003 static void dcn10_reset_back_end_for_pipe(
1004 struct dc *dc,
1005 struct pipe_ctx *pipe_ctx,
1006 struct dc_state *context)
1007 {
1008 int i;
1009 struct dc_link *link;
1010 DC_LOGGER_INIT(dc->ctx->logger);
1011 if (pipe_ctx->stream_res.stream_enc == NULL) {
1012 pipe_ctx->stream = NULL;
1013 return;
1014 }
1015
1016 link = pipe_ctx->stream->link;
1017 /* DPMS may already disable or */
1018 /* dpms_off status is incorrect due to fastboot
1019 * feature. When system resume from S4 with second
1020 * screen only, the dpms_off would be true but
1021 * VBIOS lit up eDP, so check link status too.
1022 */
1023 if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1024 dc->link_srv->set_dpms_off(pipe_ctx);
1025 else if (pipe_ctx->stream_res.audio)
1026 dc->hwss.disable_audio_stream(pipe_ctx);
1027
1028 if (pipe_ctx->stream_res.audio) {
1029 /*disable az_endpoint*/
1030 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1031
1032 /*free audio*/
1033 if (dc->caps.dynamic_audio == true) {
1034 /*we have to dynamic arbitrate the audio endpoints*/
1035 /*we free the resource, need reset is_audio_acquired*/
1036 update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1037 pipe_ctx->stream_res.audio, false);
1038 pipe_ctx->stream_res.audio = NULL;
1039 }
1040 }
1041
1042 /* by upper caller loop, parent pipe: pipe0, will be reset last.
1043 * back end share by all pipes and will be disable only when disable
1044 * parent pipe.
1045 */
1046 if (pipe_ctx->top_pipe == NULL) {
1047
1048 if (pipe_ctx->stream_res.abm)
1049 dc->hwss.set_abm_immediate_disable(pipe_ctx);
1050
1051 pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1052
1053 pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1054 if (pipe_ctx->stream_res.tg->funcs->set_drr)
1055 pipe_ctx->stream_res.tg->funcs->set_drr(
1056 pipe_ctx->stream_res.tg, NULL);
1057 if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
1058 pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1059 }
1060
1061 for (i = 0; i < dc->res_pool->pipe_count; i++)
1062 if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1063 break;
1064
1065 if (i == dc->res_pool->pipe_count)
1066 return;
1067
1068 pipe_ctx->stream = NULL;
1069 DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1070 pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1071 }
1072
dcn10_hw_wa_force_recovery(struct dc * dc)1073 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1074 {
1075 struct hubp *hubp ;
1076 unsigned int i;
1077 bool need_recover = true;
1078
1079 if (!dc->debug.recovery_enabled)
1080 return false;
1081
1082 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1083 struct pipe_ctx *pipe_ctx =
1084 &dc->current_state->res_ctx.pipe_ctx[i];
1085 if (pipe_ctx != NULL) {
1086 hubp = pipe_ctx->plane_res.hubp;
1087 if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1088 if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1089 /* one pipe underflow, we will reset all the pipes*/
1090 need_recover = true;
1091 }
1092 }
1093 }
1094 }
1095 if (!need_recover)
1096 return false;
1097 /*
1098 DCHUBP_CNTL:HUBP_BLANK_EN=1
1099 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1100 DCHUBP_CNTL:HUBP_DISABLE=1
1101 DCHUBP_CNTL:HUBP_DISABLE=0
1102 DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1103 DCSURF_PRIMARY_SURFACE_ADDRESS
1104 DCHUBP_CNTL:HUBP_BLANK_EN=0
1105 */
1106
1107 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1108 struct pipe_ctx *pipe_ctx =
1109 &dc->current_state->res_ctx.pipe_ctx[i];
1110 if (pipe_ctx != NULL) {
1111 hubp = pipe_ctx->plane_res.hubp;
1112 /*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1113 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1114 hubp->funcs->set_hubp_blank_en(hubp, true);
1115 }
1116 }
1117 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1118 hubbub1_soft_reset(dc->res_pool->hubbub, true);
1119
1120 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1121 struct pipe_ctx *pipe_ctx =
1122 &dc->current_state->res_ctx.pipe_ctx[i];
1123 if (pipe_ctx != NULL) {
1124 hubp = pipe_ctx->plane_res.hubp;
1125 /*DCHUBP_CNTL:HUBP_DISABLE=1*/
1126 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1127 hubp->funcs->hubp_disable_control(hubp, true);
1128 }
1129 }
1130 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1131 struct pipe_ctx *pipe_ctx =
1132 &dc->current_state->res_ctx.pipe_ctx[i];
1133 if (pipe_ctx != NULL) {
1134 hubp = pipe_ctx->plane_res.hubp;
1135 /*DCHUBP_CNTL:HUBP_DISABLE=0*/
1136 if (hubp != NULL && hubp->funcs->hubp_disable_control)
1137 hubp->funcs->hubp_disable_control(hubp, true);
1138 }
1139 }
1140 /*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1141 hubbub1_soft_reset(dc->res_pool->hubbub, false);
1142 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1143 struct pipe_ctx *pipe_ctx =
1144 &dc->current_state->res_ctx.pipe_ctx[i];
1145 if (pipe_ctx != NULL) {
1146 hubp = pipe_ctx->plane_res.hubp;
1147 /*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1148 if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1149 hubp->funcs->set_hubp_blank_en(hubp, true);
1150 }
1151 }
1152 return true;
1153
1154 }
1155
dcn10_verify_allow_pstate_change_high(struct dc * dc)1156 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1157 {
1158 struct hubbub *hubbub = dc->res_pool->hubbub;
1159 static bool should_log_hw_state; /* prevent hw state log by default */
1160
1161 if (!hubbub->funcs->verify_allow_pstate_change_high)
1162 return;
1163
1164 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1165 int i = 0;
1166
1167 if (should_log_hw_state)
1168 dcn10_log_hw_state(dc, NULL);
1169
1170 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1171 BREAK_TO_DEBUGGER();
1172 if (dcn10_hw_wa_force_recovery(dc)) {
1173 /*check again*/
1174 if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1175 BREAK_TO_DEBUGGER();
1176 }
1177 }
1178 }
1179
1180 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1181 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1182 {
1183 struct dce_hwseq *hws = dc->hwseq;
1184 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1185 int dpp_id = pipe_ctx->plane_res.dpp->inst;
1186 struct mpc *mpc = dc->res_pool->mpc;
1187 struct mpc_tree *mpc_tree_params;
1188 struct mpcc *mpcc_to_remove = NULL;
1189 struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1190
1191 mpc_tree_params = &(opp->mpc_tree_params);
1192 mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1193
1194 /*Already reset*/
1195 if (mpcc_to_remove == NULL)
1196 return;
1197
1198 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1199 // Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1200 // so don't wait for MPCC_IDLE in the programming sequence
1201 if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1202 opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1203
1204 dc->optimized_required = true;
1205
1206 if (hubp->funcs->hubp_disconnect)
1207 hubp->funcs->hubp_disconnect(hubp);
1208
1209 if (dc->debug.sanity_checks)
1210 hws->funcs.verify_allow_pstate_change_high(dc);
1211 }
1212
1213 /**
1214 * dcn10_plane_atomic_power_down - Power down plane components.
1215 *
1216 * @dc: dc struct reference. used for grab hwseq.
1217 * @dpp: dpp struct reference.
1218 * @hubp: hubp struct reference.
1219 *
1220 * Keep in mind that this operation requires a power gate configuration;
1221 * however, requests for switch power gate are precisely controlled to avoid
1222 * problems. For this reason, power gate request is usually disabled. This
1223 * function first needs to enable the power gate request before disabling DPP
1224 * and HUBP. Finally, it disables the power gate request again.
1225 */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1226 void dcn10_plane_atomic_power_down(struct dc *dc,
1227 struct dpp *dpp,
1228 struct hubp *hubp)
1229 {
1230 struct dce_hwseq *hws = dc->hwseq;
1231 DC_LOGGER_INIT(dc->ctx->logger);
1232
1233 if (REG(DC_IP_REQUEST_CNTL)) {
1234 REG_SET(DC_IP_REQUEST_CNTL, 0,
1235 IP_REQUEST_EN, 1);
1236
1237 if (hws->funcs.dpp_pg_control)
1238 hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1239
1240 if (hws->funcs.hubp_pg_control)
1241 hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1242
1243 dpp->funcs->dpp_reset(dpp);
1244
1245 REG_SET(DC_IP_REQUEST_CNTL, 0,
1246 IP_REQUEST_EN, 0);
1247 DC_LOG_DEBUG(
1248 "Power gated front end %d\n", hubp->inst);
1249 }
1250
1251 if (hws->funcs.dpp_root_clock_control)
1252 hws->funcs.dpp_root_clock_control(hws, dpp->inst, false);
1253 }
1254
1255 /* disable HW used by plane.
1256 * note: cannot disable until disconnect is complete
1257 */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1258 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1259 {
1260 struct dce_hwseq *hws = dc->hwseq;
1261 struct hubp *hubp = pipe_ctx->plane_res.hubp;
1262 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1263 int opp_id = hubp->opp_id;
1264
1265 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1266
1267 hubp->funcs->hubp_clk_cntl(hubp, false);
1268
1269 dpp->funcs->dpp_dppclk_control(dpp, false, false);
1270
1271 if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1272 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1273 pipe_ctx->stream_res.opp,
1274 false);
1275
1276 hubp->power_gated = true;
1277 dc->optimized_required = false; /* We're powering off, no need to optimize */
1278
1279 hws->funcs.plane_atomic_power_down(dc,
1280 pipe_ctx->plane_res.dpp,
1281 pipe_ctx->plane_res.hubp);
1282
1283 pipe_ctx->stream = NULL;
1284 memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1285 memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1286 pipe_ctx->top_pipe = NULL;
1287 pipe_ctx->bottom_pipe = NULL;
1288 pipe_ctx->plane_state = NULL;
1289 }
1290
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1291 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1292 {
1293 struct dce_hwseq *hws = dc->hwseq;
1294 DC_LOGGER_INIT(dc->ctx->logger);
1295
1296 if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1297 return;
1298
1299 hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1300
1301 apply_DEGVIDCN10_253_wa(dc);
1302
1303 DC_LOG_DC("Power down front end %d\n",
1304 pipe_ctx->pipe_idx);
1305 }
1306
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1307 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1308 {
1309 int i;
1310 struct dce_hwseq *hws = dc->hwseq;
1311 struct hubbub *hubbub = dc->res_pool->hubbub;
1312 bool can_apply_seamless_boot = false;
1313
1314 for (i = 0; i < context->stream_count; i++) {
1315 if (context->streams[i]->apply_seamless_boot_optimization) {
1316 can_apply_seamless_boot = true;
1317 break;
1318 }
1319 }
1320
1321 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1322 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1323 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1324
1325 /* There is assumption that pipe_ctx is not mapping irregularly
1326 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1327 * we will use the pipe, so don't disable
1328 */
1329 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1330 continue;
1331
1332 /* Blank controller using driver code instead of
1333 * command table.
1334 */
1335 if (tg->funcs->is_tg_enabled(tg)) {
1336 if (hws->funcs.init_blank != NULL) {
1337 hws->funcs.init_blank(dc, tg);
1338 tg->funcs->lock(tg);
1339 } else {
1340 tg->funcs->lock(tg);
1341 tg->funcs->set_blank(tg, true);
1342 hwss_wait_for_blank_complete(tg);
1343 }
1344 }
1345 }
1346
1347 /* Reset det size */
1348 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1349 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1350 struct hubp *hubp = dc->res_pool->hubps[i];
1351
1352 /* Do not need to reset for seamless boot */
1353 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1354 continue;
1355
1356 if (hubbub && hubp) {
1357 if (hubbub->funcs->program_det_size)
1358 hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1359 }
1360 }
1361
1362 /* num_opp will be equal to number of mpcc */
1363 for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1364 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1365
1366 /* Cannot reset the MPC mux if seamless boot */
1367 if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1368 continue;
1369
1370 dc->res_pool->mpc->funcs->mpc_init_single_inst(
1371 dc->res_pool->mpc, i);
1372 }
1373
1374 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1375 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1376 struct hubp *hubp = dc->res_pool->hubps[i];
1377 struct dpp *dpp = dc->res_pool->dpps[i];
1378 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1379
1380 /* There is assumption that pipe_ctx is not mapping irregularly
1381 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1382 * we will use the pipe, so don't disable
1383 */
1384 if (can_apply_seamless_boot &&
1385 pipe_ctx->stream != NULL &&
1386 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1387 pipe_ctx->stream_res.tg)) {
1388 // Enable double buffering for OTG_BLANK no matter if
1389 // seamless boot is enabled or not to suppress global sync
1390 // signals when OTG blanked. This is to prevent pipe from
1391 // requesting data while in PSR.
1392 tg->funcs->tg_init(tg);
1393 hubp->power_gated = true;
1394 continue;
1395 }
1396
1397 /* Disable on the current state so the new one isn't cleared. */
1398 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1399
1400 dpp->funcs->dpp_reset(dpp);
1401
1402 pipe_ctx->stream_res.tg = tg;
1403 pipe_ctx->pipe_idx = i;
1404
1405 pipe_ctx->plane_res.hubp = hubp;
1406 pipe_ctx->plane_res.dpp = dpp;
1407 pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1408 hubp->mpcc_id = dpp->inst;
1409 hubp->opp_id = OPP_ID_INVALID;
1410 hubp->power_gated = false;
1411
1412 dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1413 dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1414 dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1415 pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1416
1417 hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1418
1419 if (tg->funcs->is_tg_enabled(tg))
1420 tg->funcs->unlock(tg);
1421
1422 dc->hwss.disable_plane(dc, pipe_ctx);
1423
1424 pipe_ctx->stream_res.tg = NULL;
1425 pipe_ctx->plane_res.hubp = NULL;
1426
1427 if (tg->funcs->is_tg_enabled(tg)) {
1428 if (tg->funcs->init_odm)
1429 tg->funcs->init_odm(tg);
1430 }
1431
1432 tg->funcs->tg_init(tg);
1433 }
1434
1435 /* Power gate DSCs */
1436 if (hws->funcs.dsc_pg_control != NULL) {
1437 uint32_t num_opps = 0;
1438 uint32_t opp_id_src0 = OPP_ID_INVALID;
1439 uint32_t opp_id_src1 = OPP_ID_INVALID;
1440
1441 // Step 1: To find out which OPTC is running & OPTC DSC is ON
1442 // We can't use res_pool->res_cap->num_timing_generator to check
1443 // Because it records display pipes default setting built in driver,
1444 // not display pipes of the current chip.
1445 // Some ASICs would be fused display pipes less than the default setting.
1446 // In dcnxx_resource_construct function, driver would obatin real information.
1447 for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1448 uint32_t optc_dsc_state = 0;
1449 struct timing_generator *tg = dc->res_pool->timing_generators[i];
1450
1451 if (tg->funcs->is_tg_enabled(tg)) {
1452 if (tg->funcs->get_dsc_status)
1453 tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1454 // Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1455 // non-zero value is DSC enabled
1456 if (optc_dsc_state != 0) {
1457 tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1458 break;
1459 }
1460 }
1461 }
1462
1463 // Step 2: To power down DSC but skip DSC of running OPTC
1464 for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1465 struct dcn_dsc_state s = {0};
1466
1467 dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1468
1469 if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1470 s.dsc_clock_en && s.dsc_fw_en)
1471 continue;
1472
1473 hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1474 }
1475 }
1476 }
1477
dcn10_init_hw(struct dc * dc)1478 void dcn10_init_hw(struct dc *dc)
1479 {
1480 int i;
1481 struct abm *abm = dc->res_pool->abm;
1482 struct dmcu *dmcu = dc->res_pool->dmcu;
1483 struct dce_hwseq *hws = dc->hwseq;
1484 struct dc_bios *dcb = dc->ctx->dc_bios;
1485 struct resource_pool *res_pool = dc->res_pool;
1486 uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1487 bool is_optimized_init_done = false;
1488
1489 if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1490 dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1491
1492 /* Align bw context with hw config when system resume. */
1493 if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1494 dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1495 dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1496 }
1497
1498 // Initialize the dccg
1499 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1500 dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1501
1502 if (!dcb->funcs->is_accelerated_mode(dcb))
1503 hws->funcs.disable_vga(dc->hwseq);
1504
1505 if (!dc_dmub_srv_optimized_init_done(dc->ctx->dmub_srv))
1506 hws->funcs.bios_golden_init(dc);
1507
1508
1509 if (dc->ctx->dc_bios->fw_info_valid) {
1510 res_pool->ref_clocks.xtalin_clock_inKhz =
1511 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1512
1513 if (res_pool->dccg && res_pool->hubbub) {
1514
1515 (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1516 dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1517 &res_pool->ref_clocks.dccg_ref_clock_inKhz);
1518
1519 (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1520 res_pool->ref_clocks.dccg_ref_clock_inKhz,
1521 &res_pool->ref_clocks.dchub_ref_clock_inKhz);
1522 } else {
1523 // Not all ASICs have DCCG sw component
1524 res_pool->ref_clocks.dccg_ref_clock_inKhz =
1525 res_pool->ref_clocks.xtalin_clock_inKhz;
1526 res_pool->ref_clocks.dchub_ref_clock_inKhz =
1527 res_pool->ref_clocks.xtalin_clock_inKhz;
1528 }
1529 } else
1530 ASSERT_CRITICAL(false);
1531
1532 for (i = 0; i < dc->link_count; i++) {
1533 /* Power up AND update implementation according to the
1534 * required signal (which may be different from the
1535 * default signal on connector).
1536 */
1537 struct dc_link *link = dc->links[i];
1538
1539 if (!is_optimized_init_done)
1540 link->link_enc->funcs->hw_init(link->link_enc);
1541
1542 /* Check for enabled DIG to identify enabled display */
1543 if (link->link_enc->funcs->is_dig_enabled &&
1544 link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1545 link->link_status.link_active = true;
1546 if (link->link_enc->funcs->fec_is_active &&
1547 link->link_enc->funcs->fec_is_active(link->link_enc))
1548 link->fec_state = dc_link_fec_enabled;
1549 }
1550 }
1551
1552 /* we want to turn off all dp displays before doing detection */
1553 dc->link_srv->blank_all_dp_displays(dc);
1554
1555 if (hws->funcs.enable_power_gating_plane)
1556 hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1557
1558 /* If taking control over from VBIOS, we may want to optimize our first
1559 * mode set, so we need to skip powering down pipes until we know which
1560 * pipes we want to use.
1561 * Otherwise, if taking control is not possible, we need to power
1562 * everything down.
1563 */
1564 if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1565 if (!is_optimized_init_done) {
1566 hws->funcs.init_pipes(dc, dc->current_state);
1567 if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1568 dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1569 !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1570 }
1571 }
1572
1573 if (!is_optimized_init_done) {
1574
1575 for (i = 0; i < res_pool->audio_count; i++) {
1576 struct audio *audio = res_pool->audios[i];
1577
1578 audio->funcs->hw_init(audio);
1579 }
1580
1581 for (i = 0; i < dc->link_count; i++) {
1582 struct dc_link *link = dc->links[i];
1583
1584 if (link->panel_cntl)
1585 backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1586 }
1587
1588 if (abm != NULL)
1589 abm->funcs->abm_init(abm, backlight);
1590
1591 if (dmcu != NULL && !dmcu->auto_load_dmcu)
1592 dmcu->funcs->dmcu_init(dmcu);
1593 }
1594
1595 if (abm != NULL && dmcu != NULL)
1596 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1597
1598 /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1599 if (!is_optimized_init_done)
1600 REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1601
1602 if (!dc->debug.disable_clock_gate) {
1603 /* enable all DCN clock gating */
1604 REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1605
1606 REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1607
1608 REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1609 }
1610
1611 if (dc->clk_mgr->funcs->notify_wm_ranges)
1612 dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1613 }
1614
1615 /* In headless boot cases, DIG may be turned
1616 * on which causes HW/SW discrepancies.
1617 * To avoid this, power down hardware on boot
1618 * if DIG is turned on
1619 */
dcn10_power_down_on_boot(struct dc * dc)1620 void dcn10_power_down_on_boot(struct dc *dc)
1621 {
1622 struct dc_link *edp_links[MAX_NUM_EDP];
1623 struct dc_link *edp_link = NULL;
1624 int edp_num;
1625 int i = 0;
1626
1627 dc_get_edp_links(dc, edp_links, &edp_num);
1628 if (edp_num)
1629 edp_link = edp_links[0];
1630
1631 if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1632 edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1633 dc->hwseq->funcs.edp_backlight_control &&
1634 dc->hwss.power_down &&
1635 dc->hwss.edp_power_control) {
1636 dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1637 dc->hwss.power_down(dc);
1638 dc->hwss.edp_power_control(edp_link, false);
1639 } else {
1640 for (i = 0; i < dc->link_count; i++) {
1641 struct dc_link *link = dc->links[i];
1642
1643 if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1644 link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1645 dc->hwss.power_down) {
1646 dc->hwss.power_down(dc);
1647 break;
1648 }
1649
1650 }
1651 }
1652
1653 /*
1654 * Call update_clocks with empty context
1655 * to send DISPLAY_OFF
1656 * Otherwise DISPLAY_OFF may not be asserted
1657 */
1658 if (dc->clk_mgr->funcs->set_low_power_state)
1659 dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1660 }
1661
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1662 void dcn10_reset_hw_ctx_wrap(
1663 struct dc *dc,
1664 struct dc_state *context)
1665 {
1666 int i;
1667 struct dce_hwseq *hws = dc->hwseq;
1668
1669 /* Reset Back End*/
1670 for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1671 struct pipe_ctx *pipe_ctx_old =
1672 &dc->current_state->res_ctx.pipe_ctx[i];
1673 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1674
1675 if (!pipe_ctx_old->stream)
1676 continue;
1677
1678 if (pipe_ctx_old->top_pipe)
1679 continue;
1680
1681 if (!pipe_ctx->stream ||
1682 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1683 struct clock_source *old_clk = pipe_ctx_old->clock_source;
1684
1685 dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1686 if (hws->funcs.enable_stream_gating)
1687 hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1688 if (old_clk)
1689 old_clk->funcs->cs_power_down(old_clk);
1690 }
1691 }
1692 }
1693
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1694 static bool patch_address_for_sbs_tb_stereo(
1695 struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1696 {
1697 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1698 bool sec_split = pipe_ctx->top_pipe &&
1699 pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1700 if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1701 (pipe_ctx->stream->timing.timing_3d_format ==
1702 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1703 pipe_ctx->stream->timing.timing_3d_format ==
1704 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1705 *addr = plane_state->address.grph_stereo.left_addr;
1706 plane_state->address.grph_stereo.left_addr =
1707 plane_state->address.grph_stereo.right_addr;
1708 return true;
1709 } else {
1710 if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1711 plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1712 plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1713 plane_state->address.grph_stereo.right_addr =
1714 plane_state->address.grph_stereo.left_addr;
1715 plane_state->address.grph_stereo.right_meta_addr =
1716 plane_state->address.grph_stereo.left_meta_addr;
1717 }
1718 }
1719 return false;
1720 }
1721
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1722 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1723 {
1724 bool addr_patched = false;
1725 PHYSICAL_ADDRESS_LOC addr;
1726 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1727
1728 if (plane_state == NULL)
1729 return;
1730
1731 addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1732
1733 pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1734 pipe_ctx->plane_res.hubp,
1735 &plane_state->address,
1736 plane_state->flip_immediate);
1737
1738 plane_state->status.requested_address = plane_state->address;
1739
1740 if (plane_state->flip_immediate)
1741 plane_state->status.current_address = plane_state->address;
1742
1743 if (addr_patched)
1744 pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1745 }
1746
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1747 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1748 const struct dc_plane_state *plane_state)
1749 {
1750 struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1751 const struct dc_transfer_func *tf = NULL;
1752 bool result = true;
1753
1754 if (dpp_base == NULL)
1755 return false;
1756
1757 if (plane_state->in_transfer_func)
1758 tf = plane_state->in_transfer_func;
1759
1760 if (plane_state->gamma_correction &&
1761 !dpp_base->ctx->dc->debug.always_use_regamma
1762 && !plane_state->gamma_correction->is_identity
1763 && dce_use_lut(plane_state->format))
1764 dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1765
1766 if (tf == NULL)
1767 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1768 else if (tf->type == TF_TYPE_PREDEFINED) {
1769 switch (tf->tf) {
1770 case TRANSFER_FUNCTION_SRGB:
1771 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1772 break;
1773 case TRANSFER_FUNCTION_BT709:
1774 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1775 break;
1776 case TRANSFER_FUNCTION_LINEAR:
1777 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1778 break;
1779 case TRANSFER_FUNCTION_PQ:
1780 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1781 cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1782 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1783 result = true;
1784 break;
1785 default:
1786 result = false;
1787 break;
1788 }
1789 } else if (tf->type == TF_TYPE_BYPASS) {
1790 dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1791 } else {
1792 cm_helper_translate_curve_to_degamma_hw_format(tf,
1793 &dpp_base->degamma_params);
1794 dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1795 &dpp_base->degamma_params);
1796 result = true;
1797 }
1798
1799 return result;
1800 }
1801
1802 #define MAX_NUM_HW_POINTS 0x200
1803
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1804 static void log_tf(struct dc_context *ctx,
1805 struct dc_transfer_func *tf, uint32_t hw_points_num)
1806 {
1807 // DC_LOG_GAMMA is default logging of all hw points
1808 // DC_LOG_ALL_GAMMA logs all points, not only hw points
1809 // DC_LOG_ALL_TF_POINTS logs all channels of the tf
1810 int i = 0;
1811
1812 DC_LOGGER_INIT(ctx->logger);
1813 DC_LOG_GAMMA("Gamma Correction TF");
1814 DC_LOG_ALL_GAMMA("Logging all tf points...");
1815 DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1816
1817 for (i = 0; i < hw_points_num; i++) {
1818 DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1819 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1820 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1821 }
1822
1823 for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1824 DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1825 DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1826 DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1827 }
1828 }
1829
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1830 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1831 const struct dc_stream_state *stream)
1832 {
1833 struct dpp *dpp = pipe_ctx->plane_res.dpp;
1834
1835 if (dpp == NULL)
1836 return false;
1837
1838 dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1839
1840 if (stream->out_transfer_func &&
1841 stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1842 stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1843 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1844
1845 /* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1846 * update.
1847 */
1848 else if (cm_helper_translate_curve_to_hw_format(dc->ctx,
1849 stream->out_transfer_func,
1850 &dpp->regamma_params, false)) {
1851 dpp->funcs->dpp_program_regamma_pwl(
1852 dpp,
1853 &dpp->regamma_params, OPP_REGAMMA_USER);
1854 } else
1855 dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1856
1857 if (stream != NULL && stream->ctx != NULL &&
1858 stream->out_transfer_func != NULL) {
1859 log_tf(stream->ctx,
1860 stream->out_transfer_func,
1861 dpp->regamma_params.hw_points_num);
1862 }
1863
1864 return true;
1865 }
1866
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1867 void dcn10_pipe_control_lock(
1868 struct dc *dc,
1869 struct pipe_ctx *pipe,
1870 bool lock)
1871 {
1872 struct dce_hwseq *hws = dc->hwseq;
1873
1874 /* use TG master update lock to lock everything on the TG
1875 * therefore only top pipe need to lock
1876 */
1877 if (!pipe || pipe->top_pipe)
1878 return;
1879
1880 if (dc->debug.sanity_checks)
1881 hws->funcs.verify_allow_pstate_change_high(dc);
1882
1883 if (lock)
1884 pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1885 else
1886 pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1887
1888 if (dc->debug.sanity_checks)
1889 hws->funcs.verify_allow_pstate_change_high(dc);
1890 }
1891
1892 /**
1893 * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1894 *
1895 * Software keepout workaround to prevent cursor update locking from stalling
1896 * out cursor updates indefinitely or from old values from being retained in
1897 * the case where the viewport changes in the same frame as the cursor.
1898 *
1899 * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1900 * too close to VUPDATE, then stall out until VUPDATE finishes.
1901 *
1902 * TODO: Optimize cursor programming to be once per frame before VUPDATE
1903 * to avoid the need for this workaround.
1904 *
1905 * @dc: Current DC state
1906 * @pipe_ctx: Pipe_ctx pointer for delayed cursor update
1907 *
1908 * Return: void
1909 */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1910 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1911 {
1912 struct dc_stream_state *stream = pipe_ctx->stream;
1913 struct crtc_position position;
1914 uint32_t vupdate_start, vupdate_end;
1915 unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1916 unsigned int us_per_line, us_vupdate;
1917
1918 if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1919 return;
1920
1921 if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1922 return;
1923
1924 dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1925 &vupdate_end);
1926
1927 dc->hwss.get_position(&pipe_ctx, 1, &position);
1928 vpos = position.vertical_count;
1929
1930 /* Avoid wraparound calculation issues */
1931 vupdate_start += stream->timing.v_total;
1932 vupdate_end += stream->timing.v_total;
1933 vpos += stream->timing.v_total;
1934
1935 if (vpos <= vupdate_start) {
1936 /* VPOS is in VACTIVE or back porch. */
1937 lines_to_vupdate = vupdate_start - vpos;
1938 } else if (vpos > vupdate_end) {
1939 /* VPOS is in the front porch. */
1940 return;
1941 } else {
1942 /* VPOS is in VUPDATE. */
1943 lines_to_vupdate = 0;
1944 }
1945
1946 /* Calculate time until VUPDATE in microseconds. */
1947 us_per_line =
1948 stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1949 us_to_vupdate = lines_to_vupdate * us_per_line;
1950
1951 /* 70 us is a conservative estimate of cursor update time*/
1952 if (us_to_vupdate > 70)
1953 return;
1954
1955 /* Stall out until the cursor update completes. */
1956 if (vupdate_end < vupdate_start)
1957 vupdate_end += stream->timing.v_total;
1958 us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1959 udelay(us_to_vupdate + us_vupdate);
1960 }
1961
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1962 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1963 {
1964 /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1965 if (!pipe || pipe->top_pipe)
1966 return;
1967
1968 /* Prevent cursor lock from stalling out cursor updates. */
1969 if (lock)
1970 delay_cursor_until_vupdate(dc, pipe);
1971
1972 if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1973 union dmub_hw_lock_flags hw_locks = { 0 };
1974 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1975
1976 hw_locks.bits.lock_cursor = 1;
1977 inst_flags.opp_inst = pipe->stream_res.opp->inst;
1978
1979 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1980 lock,
1981 &hw_locks,
1982 &inst_flags);
1983 } else
1984 dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1985 pipe->stream_res.opp->inst, lock);
1986 }
1987
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1988 static bool wait_for_reset_trigger_to_occur(
1989 struct dc_context *dc_ctx,
1990 struct timing_generator *tg)
1991 {
1992 bool rc = false;
1993
1994 /* To avoid endless loop we wait at most
1995 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1996 const uint32_t frames_to_wait_on_triggered_reset = 10;
1997 int i;
1998
1999 for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2000
2001 if (!tg->funcs->is_counter_moving(tg)) {
2002 DC_ERROR("TG counter is not moving!\n");
2003 break;
2004 }
2005
2006 if (tg->funcs->did_triggered_reset_occur(tg)) {
2007 rc = true;
2008 /* usually occurs at i=1 */
2009 DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2010 i);
2011 break;
2012 }
2013
2014 /* Wait for one frame. */
2015 tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2016 tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2017 }
2018
2019 if (false == rc)
2020 DC_ERROR("GSL: Timeout on reset trigger!\n");
2021
2022 return rc;
2023 }
2024
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2025 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2026 uint64_t *denominator,
2027 bool checkUint32Bounary)
2028 {
2029 int i;
2030 bool ret = checkUint32Bounary == false;
2031 uint64_t max_int32 = 0xffffffff;
2032 uint64_t num, denom;
2033 static const uint16_t prime_numbers[] = {
2034 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2035 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2036 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2037 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2038 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2039 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2040 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2041 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2042 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2043 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2044 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2045 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2046 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2047 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2048 941, 947, 953, 967, 971, 977, 983, 991, 997};
2049 int count = ARRAY_SIZE(prime_numbers);
2050
2051 num = *numerator;
2052 denom = *denominator;
2053 for (i = 0; i < count; i++) {
2054 uint32_t num_remainder, denom_remainder;
2055 uint64_t num_result, denom_result;
2056 if (checkUint32Bounary &&
2057 num <= max_int32 && denom <= max_int32) {
2058 ret = true;
2059 break;
2060 }
2061 do {
2062 num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2063 denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2064 if (num_remainder == 0 && denom_remainder == 0) {
2065 num = num_result;
2066 denom = denom_result;
2067 }
2068 } while (num_remainder == 0 && denom_remainder == 0);
2069 }
2070 *numerator = num;
2071 *denominator = denom;
2072 return ret;
2073 }
2074
is_low_refresh_rate(struct pipe_ctx * pipe)2075 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2076 {
2077 uint32_t master_pipe_refresh_rate =
2078 pipe->stream->timing.pix_clk_100hz * 100 /
2079 pipe->stream->timing.h_total /
2080 pipe->stream->timing.v_total;
2081 return master_pipe_refresh_rate <= 30;
2082 }
2083
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2084 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2085 bool account_low_refresh_rate)
2086 {
2087 uint32_t clock_divider = 1;
2088 uint32_t numpipes = 1;
2089
2090 if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2091 clock_divider *= 2;
2092
2093 if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2094 clock_divider *= 2;
2095
2096 while (pipe->next_odm_pipe) {
2097 pipe = pipe->next_odm_pipe;
2098 numpipes++;
2099 }
2100 clock_divider *= numpipes;
2101
2102 return clock_divider;
2103 }
2104
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2105 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2106 struct pipe_ctx *grouped_pipes[])
2107 {
2108 struct dc_context *dc_ctx = dc->ctx;
2109 int i, master = -1, embedded = -1;
2110 struct dc_crtc_timing *hw_crtc_timing;
2111 uint64_t phase[MAX_PIPES];
2112 uint64_t modulo[MAX_PIPES];
2113 unsigned int pclk;
2114
2115 uint32_t embedded_pix_clk_100hz;
2116 uint16_t embedded_h_total;
2117 uint16_t embedded_v_total;
2118 uint32_t dp_ref_clk_100hz =
2119 dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2120
2121 hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2122 if (!hw_crtc_timing)
2123 return master;
2124
2125 if (dc->config.vblank_alignment_dto_params &&
2126 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2127 embedded_h_total =
2128 (dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2129 embedded_v_total =
2130 (dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2131 embedded_pix_clk_100hz =
2132 dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2133
2134 for (i = 0; i < group_size; i++) {
2135 grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2136 grouped_pipes[i]->stream_res.tg,
2137 &hw_crtc_timing[i]);
2138 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2139 dc->res_pool->dp_clock_source,
2140 grouped_pipes[i]->stream_res.tg->inst,
2141 &pclk);
2142 hw_crtc_timing[i].pix_clk_100hz = pclk;
2143 if (dc_is_embedded_signal(
2144 grouped_pipes[i]->stream->signal)) {
2145 embedded = i;
2146 master = i;
2147 phase[i] = embedded_pix_clk_100hz*100;
2148 modulo[i] = dp_ref_clk_100hz*100;
2149 } else {
2150
2151 phase[i] = (uint64_t)embedded_pix_clk_100hz*
2152 hw_crtc_timing[i].h_total*
2153 hw_crtc_timing[i].v_total;
2154 phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2155 modulo[i] = (uint64_t)dp_ref_clk_100hz*
2156 embedded_h_total*
2157 embedded_v_total;
2158
2159 if (reduceSizeAndFraction(&phase[i],
2160 &modulo[i], true) == false) {
2161 /*
2162 * this will help to stop reporting
2163 * this timing synchronizable
2164 */
2165 DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2166 grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2167 }
2168 }
2169 }
2170
2171 for (i = 0; i < group_size; i++) {
2172 if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2173 dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2174 dc->res_pool->dp_clock_source,
2175 grouped_pipes[i]->stream_res.tg->inst,
2176 phase[i], modulo[i]);
2177 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2178 dc->res_pool->dp_clock_source,
2179 grouped_pipes[i]->stream_res.tg->inst, &pclk);
2180 grouped_pipes[i]->stream->timing.pix_clk_100hz =
2181 pclk*get_clock_divider(grouped_pipes[i], false);
2182 if (master == -1)
2183 master = i;
2184 }
2185 }
2186
2187 }
2188
2189 kfree(hw_crtc_timing);
2190 return master;
2191 }
2192
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2193 void dcn10_enable_vblanks_synchronization(
2194 struct dc *dc,
2195 int group_index,
2196 int group_size,
2197 struct pipe_ctx *grouped_pipes[])
2198 {
2199 struct dc_context *dc_ctx = dc->ctx;
2200 struct output_pixel_processor *opp;
2201 struct timing_generator *tg;
2202 int i, width, height, master;
2203
2204 for (i = 1; i < group_size; i++) {
2205 opp = grouped_pipes[i]->stream_res.opp;
2206 tg = grouped_pipes[i]->stream_res.tg;
2207 tg->funcs->get_otg_active_size(tg, &width, &height);
2208
2209 if (!tg->funcs->is_tg_enabled(tg)) {
2210 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2211 return;
2212 }
2213
2214 if (opp->funcs->opp_program_dpg_dimensions)
2215 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2216 }
2217
2218 for (i = 0; i < group_size; i++) {
2219 if (grouped_pipes[i]->stream == NULL)
2220 continue;
2221 grouped_pipes[i]->stream->vblank_synchronized = false;
2222 grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2223 }
2224
2225 DC_SYNC_INFO("Aligning DP DTOs\n");
2226
2227 master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2228
2229 DC_SYNC_INFO("Synchronizing VBlanks\n");
2230
2231 if (master >= 0) {
2232 for (i = 0; i < group_size; i++) {
2233 if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2234 grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2235 grouped_pipes[master]->stream_res.tg,
2236 grouped_pipes[i]->stream_res.tg,
2237 grouped_pipes[master]->stream->timing.pix_clk_100hz,
2238 grouped_pipes[i]->stream->timing.pix_clk_100hz,
2239 get_clock_divider(grouped_pipes[master], false),
2240 get_clock_divider(grouped_pipes[i], false));
2241 grouped_pipes[i]->stream->vblank_synchronized = true;
2242 }
2243 grouped_pipes[master]->stream->vblank_synchronized = true;
2244 DC_SYNC_INFO("Sync complete\n");
2245 }
2246
2247 for (i = 1; i < group_size; i++) {
2248 opp = grouped_pipes[i]->stream_res.opp;
2249 tg = grouped_pipes[i]->stream_res.tg;
2250 tg->funcs->get_otg_active_size(tg, &width, &height);
2251 if (opp->funcs->opp_program_dpg_dimensions)
2252 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2253 }
2254 }
2255
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2256 void dcn10_enable_timing_synchronization(
2257 struct dc *dc,
2258 int group_index,
2259 int group_size,
2260 struct pipe_ctx *grouped_pipes[])
2261 {
2262 struct dc_context *dc_ctx = dc->ctx;
2263 struct output_pixel_processor *opp;
2264 struct timing_generator *tg;
2265 int i, width, height;
2266
2267 DC_SYNC_INFO("Setting up OTG reset trigger\n");
2268
2269 for (i = 1; i < group_size; i++) {
2270 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2271 continue;
2272
2273 opp = grouped_pipes[i]->stream_res.opp;
2274 tg = grouped_pipes[i]->stream_res.tg;
2275 tg->funcs->get_otg_active_size(tg, &width, &height);
2276
2277 if (!tg->funcs->is_tg_enabled(tg)) {
2278 DC_SYNC_INFO("Skipping timing sync on disabled OTG\n");
2279 return;
2280 }
2281
2282 if (opp->funcs->opp_program_dpg_dimensions)
2283 opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2284 }
2285
2286 for (i = 0; i < group_size; i++) {
2287 if (grouped_pipes[i]->stream == NULL)
2288 continue;
2289
2290 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2291 continue;
2292
2293 grouped_pipes[i]->stream->vblank_synchronized = false;
2294 }
2295
2296 for (i = 1; i < group_size; i++) {
2297 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2298 continue;
2299
2300 grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2301 grouped_pipes[i]->stream_res.tg,
2302 grouped_pipes[0]->stream_res.tg->inst);
2303 }
2304
2305 DC_SYNC_INFO("Waiting for trigger\n");
2306
2307 /* Need to get only check 1 pipe for having reset as all the others are
2308 * synchronized. Look at last pipe programmed to reset.
2309 */
2310
2311 if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2312 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2313
2314 for (i = 1; i < group_size; i++) {
2315 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2316 continue;
2317
2318 grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2319 grouped_pipes[i]->stream_res.tg);
2320 }
2321
2322 for (i = 1; i < group_size; i++) {
2323 if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2324 continue;
2325
2326 opp = grouped_pipes[i]->stream_res.opp;
2327 tg = grouped_pipes[i]->stream_res.tg;
2328 tg->funcs->get_otg_active_size(tg, &width, &height);
2329 if (opp->funcs->opp_program_dpg_dimensions)
2330 opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2331 }
2332
2333 DC_SYNC_INFO("Sync complete\n");
2334 }
2335
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2336 void dcn10_enable_per_frame_crtc_position_reset(
2337 struct dc *dc,
2338 int group_size,
2339 struct pipe_ctx *grouped_pipes[])
2340 {
2341 struct dc_context *dc_ctx = dc->ctx;
2342 int i;
2343
2344 DC_SYNC_INFO("Setting up\n");
2345 for (i = 0; i < group_size; i++)
2346 if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2347 grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2348 grouped_pipes[i]->stream_res.tg,
2349 0,
2350 &grouped_pipes[i]->stream->triggered_crtc_reset);
2351
2352 DC_SYNC_INFO("Waiting for trigger\n");
2353
2354 for (i = 0; i < group_size; i++)
2355 wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2356
2357 DC_SYNC_INFO("Multi-display sync is complete\n");
2358 }
2359
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2360 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2361 struct vm_system_aperture_param *apt,
2362 struct dce_hwseq *hws)
2363 {
2364 PHYSICAL_ADDRESS_LOC physical_page_number;
2365 uint32_t logical_addr_low;
2366 uint32_t logical_addr_high;
2367
2368 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2369 PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2370 REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2371 PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2372
2373 REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2374 LOGICAL_ADDR, &logical_addr_low);
2375
2376 REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2377 LOGICAL_ADDR, &logical_addr_high);
2378
2379 apt->sys_default.quad_part = physical_page_number.quad_part << 12;
2380 apt->sys_low.quad_part = (int64_t)logical_addr_low << 18;
2381 apt->sys_high.quad_part = (int64_t)logical_addr_high << 18;
2382 }
2383
2384 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2385 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2386 struct vm_context0_param *vm0,
2387 struct dce_hwseq *hws)
2388 {
2389 PHYSICAL_ADDRESS_LOC fb_base;
2390 PHYSICAL_ADDRESS_LOC fb_offset;
2391 uint32_t fb_base_value;
2392 uint32_t fb_offset_value;
2393
2394 REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2395 REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2396
2397 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2398 PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2399 REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2400 PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2401
2402 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2403 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2404 REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2405 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2406
2407 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2408 LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2409 REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2410 LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2411
2412 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2413 PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2414 REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2415 PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2416
2417 /*
2418 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2419 * Therefore we need to do
2420 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2421 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2422 */
2423 fb_base.quad_part = (uint64_t)fb_base_value << 24;
2424 fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2425 vm0->pte_base.quad_part += fb_base.quad_part;
2426 vm0->pte_base.quad_part -= fb_offset.quad_part;
2427 }
2428
2429
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2430 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2431 {
2432 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2433 struct vm_system_aperture_param apt = {0};
2434 struct vm_context0_param vm0 = {0};
2435
2436 mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2437 mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2438
2439 hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2440 hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2441 }
2442
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2443 static void dcn10_enable_plane(
2444 struct dc *dc,
2445 struct pipe_ctx *pipe_ctx,
2446 struct dc_state *context)
2447 {
2448 struct dce_hwseq *hws = dc->hwseq;
2449
2450 if (dc->debug.sanity_checks) {
2451 hws->funcs.verify_allow_pstate_change_high(dc);
2452 }
2453
2454 undo_DEGVIDCN10_253_wa(dc);
2455
2456 power_on_plane_resources(dc->hwseq,
2457 pipe_ctx->plane_res.hubp->inst);
2458
2459 /* enable DCFCLK current DCHUB */
2460 pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2461
2462 /* make sure OPP_PIPE_CLOCK_EN = 1 */
2463 pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2464 pipe_ctx->stream_res.opp,
2465 true);
2466
2467 if (dc->config.gpu_vm_support)
2468 dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2469
2470 if (dc->debug.sanity_checks) {
2471 hws->funcs.verify_allow_pstate_change_high(dc);
2472 }
2473
2474 if (!pipe_ctx->top_pipe
2475 && pipe_ctx->plane_state
2476 && pipe_ctx->plane_state->flip_int_enabled
2477 && pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2478 pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2479
2480 }
2481
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2482 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2483 {
2484 int i = 0;
2485 struct dpp_grph_csc_adjustment adjust;
2486 memset(&adjust, 0, sizeof(adjust));
2487 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2488
2489
2490 if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2491 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2492 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2493 adjust.temperature_matrix[i] =
2494 pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2495 } else if (pipe_ctx->plane_state &&
2496 pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2497 adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2498 for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2499 adjust.temperature_matrix[i] =
2500 pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2501 }
2502
2503 pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2504 }
2505
2506
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2507 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2508 {
2509 if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2510 if (pipe_ctx->top_pipe) {
2511 struct pipe_ctx *top = pipe_ctx->top_pipe;
2512
2513 while (top->top_pipe)
2514 top = top->top_pipe; // Traverse to top pipe_ctx
2515 if (top->plane_state && top->plane_state->layer_index == 0)
2516 return true; // Front MPO plane not hidden
2517 }
2518 }
2519 return false;
2520 }
2521
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2522 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2523 {
2524 // Override rear plane RGB bias to fix MPO brightness
2525 uint16_t rgb_bias = matrix[3];
2526
2527 matrix[3] = 0;
2528 matrix[7] = 0;
2529 matrix[11] = 0;
2530 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2531 matrix[3] = rgb_bias;
2532 matrix[7] = rgb_bias;
2533 matrix[11] = rgb_bias;
2534 }
2535
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2536 void dcn10_program_output_csc(struct dc *dc,
2537 struct pipe_ctx *pipe_ctx,
2538 enum dc_color_space colorspace,
2539 uint16_t *matrix,
2540 int opp_id)
2541 {
2542 if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2543 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2544
2545 /* MPO is broken with RGB colorspaces when OCSC matrix
2546 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2547 * Blending adds offsets from front + rear to rear plane
2548 *
2549 * Fix is to set RGB bias to 0 on rear plane, top plane
2550 * black value pixels add offset instead of rear + front
2551 */
2552
2553 int16_t rgb_bias = matrix[3];
2554 // matrix[3/7/11] are all the same offset value
2555
2556 if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2557 dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2558 } else {
2559 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2560 }
2561 }
2562 } else {
2563 if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2564 pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2565 }
2566 }
2567
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2568 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2569 {
2570 struct dc_bias_and_scale bns_params = {0};
2571
2572 // program the input csc
2573 dpp->funcs->dpp_setup(dpp,
2574 plane_state->format,
2575 EXPANSION_MODE_ZERO,
2576 plane_state->input_csc_color_matrix,
2577 plane_state->color_space,
2578 NULL);
2579
2580 //set scale and bias registers
2581 build_prescale_params(&bns_params, plane_state);
2582 if (dpp->funcs->dpp_program_bias_and_scale)
2583 dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2584 }
2585
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,int mpcc_id)2586 void dcn10_update_visual_confirm_color(struct dc *dc,
2587 struct pipe_ctx *pipe_ctx,
2588 int mpcc_id)
2589 {
2590 struct mpc *mpc = dc->res_pool->mpc;
2591
2592 if (mpc->funcs->set_bg_color) {
2593 memcpy(&pipe_ctx->plane_state->visual_confirm_color, &(pipe_ctx->visual_confirm_color), sizeof(struct tg_color));
2594 mpc->funcs->set_bg_color(mpc, &(pipe_ctx->visual_confirm_color), mpcc_id);
2595 }
2596 }
2597
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2598 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2599 {
2600 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2601 struct mpcc_blnd_cfg blnd_cfg = {0};
2602 bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2603 int mpcc_id;
2604 struct mpcc *new_mpcc;
2605 struct mpc *mpc = dc->res_pool->mpc;
2606 struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2607
2608 blnd_cfg.overlap_only = false;
2609 blnd_cfg.global_gain = 0xff;
2610
2611 if (per_pixel_alpha) {
2612 /* DCN1.0 has output CM before MPC which seems to screw with
2613 * pre-multiplied alpha.
2614 */
2615 blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2616 pipe_ctx->stream->output_color_space)
2617 && pipe_ctx->plane_state->pre_multiplied_alpha);
2618 if (pipe_ctx->plane_state->global_alpha) {
2619 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2620 blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2621 } else {
2622 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2623 }
2624 } else {
2625 blnd_cfg.pre_multiplied_alpha = false;
2626 blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2627 }
2628
2629 if (pipe_ctx->plane_state->global_alpha)
2630 blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2631 else
2632 blnd_cfg.global_alpha = 0xff;
2633
2634 /*
2635 * TODO: remove hack
2636 * Note: currently there is a bug in init_hw such that
2637 * on resume from hibernate, BIOS sets up MPCC0, and
2638 * we do mpcc_remove but the mpcc cannot go to idle
2639 * after remove. This cause us to pick mpcc1 here,
2640 * which causes a pstate hang for yet unknown reason.
2641 */
2642 mpcc_id = hubp->inst;
2643
2644 /* If there is no full update, don't need to touch MPC tree*/
2645 if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2646 mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2647 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2648 return;
2649 }
2650
2651 /* check if this MPCC is already being used */
2652 new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2653 /* remove MPCC if being used */
2654 if (new_mpcc != NULL)
2655 mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2656 else
2657 if (dc->debug.sanity_checks)
2658 mpc->funcs->assert_mpcc_idle_before_connect(
2659 dc->res_pool->mpc, mpcc_id);
2660
2661 /* Call MPC to insert new plane */
2662 new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2663 mpc_tree_params,
2664 &blnd_cfg,
2665 NULL,
2666 NULL,
2667 hubp->inst,
2668 mpcc_id);
2669 dc->hwss.update_visual_confirm_color(dc, pipe_ctx, mpcc_id);
2670
2671 ASSERT(new_mpcc != NULL);
2672 hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2673 hubp->mpcc_id = mpcc_id;
2674 }
2675
update_scaler(struct pipe_ctx * pipe_ctx)2676 static void update_scaler(struct pipe_ctx *pipe_ctx)
2677 {
2678 bool per_pixel_alpha =
2679 pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2680
2681 pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2682 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2683 /* scaler configuration */
2684 pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2685 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2686 }
2687
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2688 static void dcn10_update_dchubp_dpp(
2689 struct dc *dc,
2690 struct pipe_ctx *pipe_ctx,
2691 struct dc_state *context)
2692 {
2693 struct dce_hwseq *hws = dc->hwseq;
2694 struct hubp *hubp = pipe_ctx->plane_res.hubp;
2695 struct dpp *dpp = pipe_ctx->plane_res.dpp;
2696 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2697 struct plane_size size = plane_state->plane_size;
2698 unsigned int compat_level = 0;
2699 bool should_divided_by_2 = false;
2700
2701 /* depends on DML calculation, DPP clock value may change dynamically */
2702 /* If request max dpp clk is lower than current dispclk, no need to
2703 * divided by 2
2704 */
2705 if (plane_state->update_flags.bits.full_update) {
2706
2707 /* new calculated dispclk, dppclk are stored in
2708 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2709 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2710 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2711 * dispclk will put in use after optimize_bandwidth when
2712 * ramp_up_dispclk_with_dpp is called.
2713 * there are two places for dppclk be put in use. One location
2714 * is the same as the location as dispclk. Another is within
2715 * update_dchubp_dpp which happens between pre_bandwidth and
2716 * optimize_bandwidth.
2717 * dppclk updated within update_dchubp_dpp will cause new
2718 * clock values of dispclk and dppclk not be in use at the same
2719 * time. when clocks are decreased, this may cause dppclk is
2720 * lower than previous configuration and let pipe stuck.
2721 * for example, eDP + external dp, change resolution of DP from
2722 * 1920x1080x144hz to 1280x960x60hz.
2723 * before change: dispclk = 337889 dppclk = 337889
2724 * change mode, dcn10_validate_bandwidth calculate
2725 * dispclk = 143122 dppclk = 143122
2726 * update_dchubp_dpp be executed before dispclk be updated,
2727 * dispclk = 337889, but dppclk use new value dispclk /2 =
2728 * 168944. this will cause pipe pstate warning issue.
2729 * solution: between pre_bandwidth and optimize_bandwidth, while
2730 * dispclk is going to be decreased, keep dppclk = dispclk
2731 **/
2732 if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2733 dc->clk_mgr->clks.dispclk_khz)
2734 should_divided_by_2 = false;
2735 else
2736 should_divided_by_2 =
2737 context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2738 dc->clk_mgr->clks.dispclk_khz / 2;
2739
2740 dpp->funcs->dpp_dppclk_control(
2741 dpp,
2742 should_divided_by_2,
2743 true);
2744
2745 if (dc->res_pool->dccg)
2746 dc->res_pool->dccg->funcs->update_dpp_dto(
2747 dc->res_pool->dccg,
2748 dpp->inst,
2749 pipe_ctx->plane_res.bw.dppclk_khz);
2750 else
2751 dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2752 dc->clk_mgr->clks.dispclk_khz / 2 :
2753 dc->clk_mgr->clks.dispclk_khz;
2754 }
2755
2756 /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2757 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2758 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2759 */
2760 if (plane_state->update_flags.bits.full_update) {
2761 hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2762
2763 hubp->funcs->hubp_setup(
2764 hubp,
2765 &pipe_ctx->dlg_regs,
2766 &pipe_ctx->ttu_regs,
2767 &pipe_ctx->rq_regs,
2768 &pipe_ctx->pipe_dlg_param);
2769 hubp->funcs->hubp_setup_interdependent(
2770 hubp,
2771 &pipe_ctx->dlg_regs,
2772 &pipe_ctx->ttu_regs);
2773 }
2774
2775 size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2776
2777 if (plane_state->update_flags.bits.full_update ||
2778 plane_state->update_flags.bits.bpp_change)
2779 dcn10_update_dpp(dpp, plane_state);
2780
2781 if (plane_state->update_flags.bits.full_update ||
2782 plane_state->update_flags.bits.per_pixel_alpha_change ||
2783 plane_state->update_flags.bits.global_alpha_change)
2784 hws->funcs.update_mpcc(dc, pipe_ctx);
2785
2786 if (plane_state->update_flags.bits.full_update ||
2787 plane_state->update_flags.bits.per_pixel_alpha_change ||
2788 plane_state->update_flags.bits.global_alpha_change ||
2789 plane_state->update_flags.bits.scaling_change ||
2790 plane_state->update_flags.bits.position_change) {
2791 update_scaler(pipe_ctx);
2792 }
2793
2794 if (plane_state->update_flags.bits.full_update ||
2795 plane_state->update_flags.bits.scaling_change ||
2796 plane_state->update_flags.bits.position_change) {
2797 hubp->funcs->mem_program_viewport(
2798 hubp,
2799 &pipe_ctx->plane_res.scl_data.viewport,
2800 &pipe_ctx->plane_res.scl_data.viewport_c);
2801 }
2802
2803 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2804 dc->hwss.set_cursor_position(pipe_ctx);
2805 dc->hwss.set_cursor_attribute(pipe_ctx);
2806
2807 if (dc->hwss.set_cursor_sdr_white_level)
2808 dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2809 }
2810
2811 if (plane_state->update_flags.bits.full_update) {
2812 /*gamut remap*/
2813 dc->hwss.program_gamut_remap(pipe_ctx);
2814
2815 dc->hwss.program_output_csc(dc,
2816 pipe_ctx,
2817 pipe_ctx->stream->output_color_space,
2818 pipe_ctx->stream->csc_color_matrix.matrix,
2819 pipe_ctx->stream_res.opp->inst);
2820 }
2821
2822 if (plane_state->update_flags.bits.full_update ||
2823 plane_state->update_flags.bits.pixel_format_change ||
2824 plane_state->update_flags.bits.horizontal_mirror_change ||
2825 plane_state->update_flags.bits.rotation_change ||
2826 plane_state->update_flags.bits.swizzle_change ||
2827 plane_state->update_flags.bits.dcc_change ||
2828 plane_state->update_flags.bits.bpp_change ||
2829 plane_state->update_flags.bits.scaling_change ||
2830 plane_state->update_flags.bits.plane_size_change) {
2831 hubp->funcs->hubp_program_surface_config(
2832 hubp,
2833 plane_state->format,
2834 &plane_state->tiling_info,
2835 &size,
2836 plane_state->rotation,
2837 &plane_state->dcc,
2838 plane_state->horizontal_mirror,
2839 compat_level);
2840 }
2841
2842 hubp->power_gated = false;
2843
2844 hws->funcs.update_plane_addr(dc, pipe_ctx);
2845
2846 if (is_pipe_tree_visible(pipe_ctx))
2847 hubp->funcs->set_blank(hubp, false);
2848 }
2849
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2850 void dcn10_blank_pixel_data(
2851 struct dc *dc,
2852 struct pipe_ctx *pipe_ctx,
2853 bool blank)
2854 {
2855 enum dc_color_space color_space;
2856 struct tg_color black_color = {0};
2857 struct stream_resource *stream_res = &pipe_ctx->stream_res;
2858 struct dc_stream_state *stream = pipe_ctx->stream;
2859
2860 /* program otg blank color */
2861 color_space = stream->output_color_space;
2862 color_space_to_black_color(dc, color_space, &black_color);
2863
2864 /*
2865 * The way 420 is packed, 2 channels carry Y component, 1 channel
2866 * alternate between Cb and Cr, so both channels need the pixel
2867 * value for Y
2868 */
2869 if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2870 black_color.color_r_cr = black_color.color_g_y;
2871
2872
2873 if (stream_res->tg->funcs->set_blank_color)
2874 stream_res->tg->funcs->set_blank_color(
2875 stream_res->tg,
2876 &black_color);
2877
2878 if (!blank) {
2879 if (stream_res->tg->funcs->set_blank)
2880 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2881 if (stream_res->abm) {
2882 dc->hwss.set_pipe(pipe_ctx);
2883 stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2884 }
2885 } else {
2886 dc->hwss.set_abm_immediate_disable(pipe_ctx);
2887 if (stream_res->tg->funcs->set_blank) {
2888 stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2889 stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2890 }
2891 }
2892 }
2893
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2894 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2895 {
2896 struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2897 uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2898 struct custom_float_format fmt;
2899
2900 fmt.exponenta_bits = 6;
2901 fmt.mantissa_bits = 12;
2902 fmt.sign = true;
2903
2904
2905 if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2906 convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2907
2908 pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2909 pipe_ctx->plane_res.dpp, hw_mult);
2910 }
2911
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2912 void dcn10_program_pipe(
2913 struct dc *dc,
2914 struct pipe_ctx *pipe_ctx,
2915 struct dc_state *context)
2916 {
2917 struct dce_hwseq *hws = dc->hwseq;
2918
2919 if (pipe_ctx->top_pipe == NULL) {
2920 bool blank = !is_pipe_tree_visible(pipe_ctx);
2921
2922 pipe_ctx->stream_res.tg->funcs->program_global_sync(
2923 pipe_ctx->stream_res.tg,
2924 calculate_vready_offset_for_group(pipe_ctx),
2925 pipe_ctx->pipe_dlg_param.vstartup_start,
2926 pipe_ctx->pipe_dlg_param.vupdate_offset,
2927 pipe_ctx->pipe_dlg_param.vupdate_width);
2928
2929 pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2930 pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2931
2932 if (hws->funcs.setup_vupdate_interrupt)
2933 hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2934
2935 hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2936 }
2937
2938 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2939 dcn10_enable_plane(dc, pipe_ctx, context);
2940
2941 dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2942
2943 hws->funcs.set_hdr_multiplier(pipe_ctx);
2944
2945 if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2946 pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2947 pipe_ctx->plane_state->update_flags.bits.gamma_change)
2948 hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2949
2950 /* dcn10_translate_regamma_to_hw_format takes 750us to finish
2951 * only do gamma programming for full update.
2952 * TODO: This can be further optimized/cleaned up
2953 * Always call this for now since it does memcmp inside before
2954 * doing heavy calculation and programming
2955 */
2956 if (pipe_ctx->plane_state->update_flags.bits.full_update)
2957 hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2958 }
2959
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2960 void dcn10_wait_for_pending_cleared(struct dc *dc,
2961 struct dc_state *context)
2962 {
2963 struct pipe_ctx *pipe_ctx;
2964 struct timing_generator *tg;
2965 int i;
2966
2967 for (i = 0; i < dc->res_pool->pipe_count; i++) {
2968 pipe_ctx = &context->res_ctx.pipe_ctx[i];
2969 tg = pipe_ctx->stream_res.tg;
2970
2971 /*
2972 * Only wait for top pipe's tg penindg bit
2973 * Also skip if pipe is disabled.
2974 */
2975 if (pipe_ctx->top_pipe ||
2976 !pipe_ctx->stream || !pipe_ctx->plane_state ||
2977 !tg->funcs->is_tg_enabled(tg))
2978 continue;
2979
2980 /*
2981 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2982 * For some reason waiting for OTG_UPDATE_PENDING cleared
2983 * seems to not trigger the update right away, and if we
2984 * lock again before VUPDATE then we don't get a separated
2985 * operation.
2986 */
2987 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2988 pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2989 }
2990 }
2991
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2992 void dcn10_post_unlock_program_front_end(
2993 struct dc *dc,
2994 struct dc_state *context)
2995 {
2996 int i;
2997
2998 DC_LOGGER_INIT(dc->ctx->logger);
2999
3000 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3001 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3002
3003 if (!pipe_ctx->top_pipe &&
3004 !pipe_ctx->prev_odm_pipe &&
3005 pipe_ctx->stream) {
3006 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3007
3008 if (context->stream_status[i].plane_count == 0)
3009 false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3010 }
3011 }
3012
3013 for (i = 0; i < dc->res_pool->pipe_count; i++)
3014 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3015 dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3016
3017 for (i = 0; i < dc->res_pool->pipe_count; i++)
3018 if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3019 dc->hwss.optimize_bandwidth(dc, context);
3020 break;
3021 }
3022
3023 if (dc->hwseq->wa.DEGVIDCN10_254)
3024 hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3025 }
3026
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3027 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3028 {
3029 uint8_t i;
3030
3031 for (i = 0; i < context->stream_count; i++) {
3032 if (context->streams[i]->timing.timing_3d_format
3033 == TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3034 /*
3035 * Disable stutter
3036 */
3037 hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3038 break;
3039 }
3040 }
3041 }
3042
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3043 void dcn10_prepare_bandwidth(
3044 struct dc *dc,
3045 struct dc_state *context)
3046 {
3047 struct dce_hwseq *hws = dc->hwseq;
3048 struct hubbub *hubbub = dc->res_pool->hubbub;
3049 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3050
3051 if (dc->debug.sanity_checks)
3052 hws->funcs.verify_allow_pstate_change_high(dc);
3053
3054 if (context->stream_count == 0)
3055 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3056
3057 dc->clk_mgr->funcs->update_clocks(
3058 dc->clk_mgr,
3059 context,
3060 false);
3061
3062 dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3063 &context->bw_ctx.bw.dcn.watermarks,
3064 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3065 true);
3066 dcn10_stereo_hw_frame_pack_wa(dc, context);
3067
3068 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3069 DC_FP_START();
3070 dcn_get_soc_clks(
3071 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3072 DC_FP_END();
3073 dcn_bw_notify_pplib_of_wm_ranges(
3074 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3075 }
3076
3077 if (dc->debug.sanity_checks)
3078 hws->funcs.verify_allow_pstate_change_high(dc);
3079 }
3080
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3081 void dcn10_optimize_bandwidth(
3082 struct dc *dc,
3083 struct dc_state *context)
3084 {
3085 struct dce_hwseq *hws = dc->hwseq;
3086 struct hubbub *hubbub = dc->res_pool->hubbub;
3087 int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3088
3089 if (dc->debug.sanity_checks)
3090 hws->funcs.verify_allow_pstate_change_high(dc);
3091
3092 if (context->stream_count == 0)
3093 context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3094
3095 dc->clk_mgr->funcs->update_clocks(
3096 dc->clk_mgr,
3097 context,
3098 true);
3099
3100 hubbub->funcs->program_watermarks(hubbub,
3101 &context->bw_ctx.bw.dcn.watermarks,
3102 dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3103 true);
3104
3105 dcn10_stereo_hw_frame_pack_wa(dc, context);
3106
3107 if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3108 DC_FP_START();
3109 dcn_get_soc_clks(
3110 dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3111 DC_FP_END();
3112 dcn_bw_notify_pplib_of_wm_ranges(
3113 dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3114 }
3115
3116 if (dc->debug.sanity_checks)
3117 hws->funcs.verify_allow_pstate_change_high(dc);
3118 }
3119
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3120 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3121 int num_pipes, struct dc_crtc_timing_adjust adjust)
3122 {
3123 int i = 0;
3124 struct drr_params params = {0};
3125 // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3126 unsigned int event_triggers = 0x800;
3127 // Note DRR trigger events are generated regardless of whether num frames met.
3128 unsigned int num_frames = 2;
3129
3130 params.vertical_total_max = adjust.v_total_max;
3131 params.vertical_total_min = adjust.v_total_min;
3132 params.vertical_total_mid = adjust.v_total_mid;
3133 params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3134 /* TODO: If multiple pipes are to be supported, you need
3135 * some GSL stuff. Static screen triggers may be programmed differently
3136 * as well.
3137 */
3138 for (i = 0; i < num_pipes; i++) {
3139 if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3140 if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3141 pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3142 pipe_ctx[i]->stream_res.tg, ¶ms);
3143 if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3144 if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3145 pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3146 pipe_ctx[i]->stream_res.tg,
3147 event_triggers, num_frames);
3148 }
3149 }
3150 }
3151
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3152 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3153 int num_pipes,
3154 struct crtc_position *position)
3155 {
3156 int i = 0;
3157
3158 /* TODO: handle pipes > 1
3159 */
3160 for (i = 0; i < num_pipes; i++)
3161 pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3162 }
3163
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3164 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3165 int num_pipes, const struct dc_static_screen_params *params)
3166 {
3167 unsigned int i;
3168 unsigned int triggers = 0;
3169
3170 if (params->triggers.surface_update)
3171 triggers |= 0x80;
3172 if (params->triggers.cursor_update)
3173 triggers |= 0x2;
3174 if (params->triggers.force_trigger)
3175 triggers |= 0x1;
3176
3177 for (i = 0; i < num_pipes; i++)
3178 pipe_ctx[i]->stream_res.tg->funcs->
3179 set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3180 triggers, params->num_frames);
3181 }
3182
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3183 static void dcn10_config_stereo_parameters(
3184 struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3185 {
3186 enum view_3d_format view_format = stream->view_format;
3187 enum dc_timing_3d_format timing_3d_format =\
3188 stream->timing.timing_3d_format;
3189 bool non_stereo_timing = false;
3190
3191 if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3192 timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3193 timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3194 non_stereo_timing = true;
3195
3196 if (non_stereo_timing == false &&
3197 view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3198
3199 flags->PROGRAM_STEREO = 1;
3200 flags->PROGRAM_POLARITY = 1;
3201 if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3202 timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3203 timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3204 timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3205
3206 if (stream->link && stream->link->ddc) {
3207 enum display_dongle_type dongle = \
3208 stream->link->ddc->dongle_type;
3209
3210 if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3211 dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3212 dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3213 flags->DISABLE_STEREO_DP_SYNC = 1;
3214 }
3215 }
3216 flags->RIGHT_EYE_POLARITY =\
3217 stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3218 if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3219 flags->FRAME_PACKED = 1;
3220 }
3221
3222 return;
3223 }
3224
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3225 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3226 {
3227 struct crtc_stereo_flags flags = { 0 };
3228 struct dc_stream_state *stream = pipe_ctx->stream;
3229
3230 dcn10_config_stereo_parameters(stream, &flags);
3231
3232 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3233 if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3234 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3235 } else {
3236 dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3237 }
3238
3239 pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3240 pipe_ctx->stream_res.opp,
3241 flags.PROGRAM_STEREO == 1,
3242 &stream->timing);
3243
3244 pipe_ctx->stream_res.tg->funcs->program_stereo(
3245 pipe_ctx->stream_res.tg,
3246 &stream->timing,
3247 &flags);
3248
3249 return;
3250 }
3251
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3252 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3253 {
3254 int i;
3255
3256 for (i = 0; i < res_pool->pipe_count; i++) {
3257 if (res_pool->hubps[i]->inst == mpcc_inst)
3258 return res_pool->hubps[i];
3259 }
3260 ASSERT(false);
3261 return NULL;
3262 }
3263
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3264 void dcn10_wait_for_mpcc_disconnect(
3265 struct dc *dc,
3266 struct resource_pool *res_pool,
3267 struct pipe_ctx *pipe_ctx)
3268 {
3269 struct dce_hwseq *hws = dc->hwseq;
3270 int mpcc_inst;
3271
3272 if (dc->debug.sanity_checks) {
3273 hws->funcs.verify_allow_pstate_change_high(dc);
3274 }
3275
3276 if (!pipe_ctx->stream_res.opp)
3277 return;
3278
3279 for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3280 if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3281 struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3282
3283 if (pipe_ctx->stream_res.tg &&
3284 pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3285 res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3286 pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3287 hubp->funcs->set_blank(hubp, true);
3288 }
3289 }
3290
3291 if (dc->debug.sanity_checks) {
3292 hws->funcs.verify_allow_pstate_change_high(dc);
3293 }
3294
3295 }
3296
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3297 bool dcn10_dummy_display_power_gating(
3298 struct dc *dc,
3299 uint8_t controller_id,
3300 struct dc_bios *dcb,
3301 enum pipe_gating_control power_gating)
3302 {
3303 return true;
3304 }
3305
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3306 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3307 {
3308 struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3309 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3310 bool flip_pending;
3311 struct dc *dc = pipe_ctx->stream->ctx->dc;
3312
3313 if (plane_state == NULL)
3314 return;
3315
3316 flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3317 pipe_ctx->plane_res.hubp);
3318
3319 plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3320
3321 if (!flip_pending)
3322 plane_state->status.current_address = plane_state->status.requested_address;
3323
3324 if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3325 tg->funcs->is_stereo_left_eye) {
3326 plane_state->status.is_right_eye =
3327 !tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3328 }
3329
3330 if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3331 struct dce_hwseq *hwseq = dc->hwseq;
3332 struct timing_generator *tg = dc->res_pool->timing_generators[0];
3333 unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3334
3335 if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3336 struct hubbub *hubbub = dc->res_pool->hubbub;
3337
3338 hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3339 hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3340 }
3341 }
3342 }
3343
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3344 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3345 {
3346 struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3347
3348 /* In DCN, this programming sequence is owned by the hubbub */
3349 hubbub->funcs->update_dchub(hubbub, dh_data);
3350 }
3351
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3352 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3353 {
3354 struct pipe_ctx *test_pipe, *split_pipe;
3355 const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3356 struct rect r1 = scl_data->recout, r2, r2_half;
3357 int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3358 int cur_layer = pipe_ctx->plane_state->layer_index;
3359
3360 /**
3361 * Disable the cursor if there's another pipe above this with a
3362 * plane that contains this pipe's viewport to prevent double cursor
3363 * and incorrect scaling artifacts.
3364 */
3365 for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3366 test_pipe = test_pipe->top_pipe) {
3367 // Skip invisible layer and pipe-split plane on same layer
3368 if (!test_pipe->plane_state ||
3369 !test_pipe->plane_state->visible ||
3370 test_pipe->plane_state->layer_index == cur_layer)
3371 continue;
3372
3373 r2 = test_pipe->plane_res.scl_data.recout;
3374 r2_r = r2.x + r2.width;
3375 r2_b = r2.y + r2.height;
3376 split_pipe = test_pipe;
3377
3378 /**
3379 * There is another half plane on same layer because of
3380 * pipe-split, merge together per same height.
3381 */
3382 for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3383 split_pipe = split_pipe->top_pipe)
3384 if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3385 r2_half = split_pipe->plane_res.scl_data.recout;
3386 r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3387 r2.width = r2.width + r2_half.width;
3388 r2_r = r2.x + r2.width;
3389 break;
3390 }
3391
3392 if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3393 return true;
3394 }
3395
3396 return false;
3397 }
3398
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3399 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3400 {
3401 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3402 struct hubp *hubp = pipe_ctx->plane_res.hubp;
3403 struct dpp *dpp = pipe_ctx->plane_res.dpp;
3404 struct dc_cursor_mi_param param = {
3405 .pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3406 .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3407 .viewport = pipe_ctx->plane_res.scl_data.viewport,
3408 .h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3409 .v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3410 .rotation = pipe_ctx->plane_state->rotation,
3411 .mirror = pipe_ctx->plane_state->horizontal_mirror,
3412 .stream = pipe_ctx->stream,
3413 };
3414 bool pipe_split_on = false;
3415 bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3416 (pipe_ctx->prev_odm_pipe != NULL);
3417
3418 int x_plane = pipe_ctx->plane_state->dst_rect.x;
3419 int y_plane = pipe_ctx->plane_state->dst_rect.y;
3420 int x_pos = pos_cpy.x;
3421 int y_pos = pos_cpy.y;
3422
3423 if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3424 if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3425 (pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3426 pipe_split_on = true;
3427 }
3428 }
3429
3430 /**
3431 * DC cursor is stream space, HW cursor is plane space and drawn
3432 * as part of the framebuffer.
3433 *
3434 * Cursor position can't be negative, but hotspot can be used to
3435 * shift cursor out of the plane bounds. Hotspot must be smaller
3436 * than the cursor size.
3437 */
3438
3439 /**
3440 * Translate cursor from stream space to plane space.
3441 *
3442 * If the cursor is scaled then we need to scale the position
3443 * to be in the approximately correct place. We can't do anything
3444 * about the actual size being incorrect, that's a limitation of
3445 * the hardware.
3446 */
3447 if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3448 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3449 pipe_ctx->plane_state->dst_rect.width;
3450 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3451 pipe_ctx->plane_state->dst_rect.height;
3452 } else {
3453 x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3454 pipe_ctx->plane_state->dst_rect.width;
3455 y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3456 pipe_ctx->plane_state->dst_rect.height;
3457 }
3458
3459 /**
3460 * If the cursor's source viewport is clipped then we need to
3461 * translate the cursor to appear in the correct position on
3462 * the screen.
3463 *
3464 * This translation isn't affected by scaling so it needs to be
3465 * done *after* we adjust the position for the scale factor.
3466 *
3467 * This is only done by opt-in for now since there are still
3468 * some usecases like tiled display that might enable the
3469 * cursor on both streams while expecting dc to clip it.
3470 */
3471 if (pos_cpy.translate_by_source) {
3472 x_pos += pipe_ctx->plane_state->src_rect.x;
3473 y_pos += pipe_ctx->plane_state->src_rect.y;
3474 }
3475
3476 /**
3477 * If the position is negative then we need to add to the hotspot
3478 * to shift the cursor outside the plane.
3479 */
3480
3481 if (x_pos < 0) {
3482 pos_cpy.x_hotspot -= x_pos;
3483 x_pos = 0;
3484 }
3485
3486 if (y_pos < 0) {
3487 pos_cpy.y_hotspot -= y_pos;
3488 y_pos = 0;
3489 }
3490
3491 pos_cpy.x = (uint32_t)x_pos;
3492 pos_cpy.y = (uint32_t)y_pos;
3493
3494 if (pipe_ctx->plane_state->address.type
3495 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3496 pos_cpy.enable = false;
3497
3498 if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3499 pos_cpy.enable = false;
3500
3501
3502 if (param.rotation == ROTATION_ANGLE_0) {
3503 int viewport_width =
3504 pipe_ctx->plane_res.scl_data.viewport.width;
3505 int viewport_x =
3506 pipe_ctx->plane_res.scl_data.viewport.x;
3507
3508 if (param.mirror) {
3509 if (pipe_split_on || odm_combine_on) {
3510 if (pos_cpy.x >= viewport_width + viewport_x) {
3511 pos_cpy.x = 2 * viewport_width
3512 - pos_cpy.x + 2 * viewport_x;
3513 } else {
3514 uint32_t temp_x = pos_cpy.x;
3515
3516 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3517 if (temp_x >= viewport_x +
3518 (int)hubp->curs_attr.width || pos_cpy.x
3519 <= (int)hubp->curs_attr.width +
3520 pipe_ctx->plane_state->src_rect.x) {
3521 pos_cpy.x = temp_x + viewport_width;
3522 }
3523 }
3524 } else {
3525 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3526 }
3527 }
3528 }
3529 // Swap axis and mirror horizontally
3530 else if (param.rotation == ROTATION_ANGLE_90) {
3531 uint32_t temp_x = pos_cpy.x;
3532
3533 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3534 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3535 pos_cpy.y = temp_x;
3536 }
3537 // Swap axis and mirror vertically
3538 else if (param.rotation == ROTATION_ANGLE_270) {
3539 uint32_t temp_y = pos_cpy.y;
3540 int viewport_height =
3541 pipe_ctx->plane_res.scl_data.viewport.height;
3542 int viewport_y =
3543 pipe_ctx->plane_res.scl_data.viewport.y;
3544
3545 /**
3546 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3547 * For pipe split cases:
3548 * - apply offset of viewport.y to normalize pos_cpy.x
3549 * - calculate the pos_cpy.y as before
3550 * - shift pos_cpy.y back by same offset to get final value
3551 * - since we iterate through both pipes, use the lower
3552 * viewport.y for offset
3553 * For non pipe split cases, use the same calculation for
3554 * pos_cpy.y as the 180 degree rotation case below,
3555 * but use pos_cpy.x as our input because we are rotating
3556 * 270 degrees
3557 */
3558 if (pipe_split_on || odm_combine_on) {
3559 int pos_cpy_x_offset;
3560 int other_pipe_viewport_y;
3561
3562 if (pipe_split_on) {
3563 if (pipe_ctx->bottom_pipe) {
3564 other_pipe_viewport_y =
3565 pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3566 } else {
3567 other_pipe_viewport_y =
3568 pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3569 }
3570 } else {
3571 if (pipe_ctx->next_odm_pipe) {
3572 other_pipe_viewport_y =
3573 pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3574 } else {
3575 other_pipe_viewport_y =
3576 pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3577 }
3578 }
3579 pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3580 other_pipe_viewport_y : viewport_y;
3581 pos_cpy.x -= pos_cpy_x_offset;
3582 if (pos_cpy.x > viewport_height) {
3583 pos_cpy.x = pos_cpy.x - viewport_height;
3584 pos_cpy.y = viewport_height - pos_cpy.x;
3585 } else {
3586 pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3587 }
3588 pos_cpy.y += pos_cpy_x_offset;
3589 } else {
3590 pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3591 }
3592 pos_cpy.x = temp_y;
3593 }
3594 // Mirror horizontally and vertically
3595 else if (param.rotation == ROTATION_ANGLE_180) {
3596 int viewport_width =
3597 pipe_ctx->plane_res.scl_data.viewport.width;
3598 int viewport_x =
3599 pipe_ctx->plane_res.scl_data.viewport.x;
3600
3601 if (!param.mirror) {
3602 if (pipe_split_on || odm_combine_on) {
3603 if (pos_cpy.x >= viewport_width + viewport_x) {
3604 pos_cpy.x = 2 * viewport_width
3605 - pos_cpy.x + 2 * viewport_x;
3606 } else {
3607 uint32_t temp_x = pos_cpy.x;
3608
3609 pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3610 if (temp_x >= viewport_x +
3611 (int)hubp->curs_attr.width || pos_cpy.x
3612 <= (int)hubp->curs_attr.width +
3613 pipe_ctx->plane_state->src_rect.x) {
3614 pos_cpy.x = 2 * viewport_width - temp_x;
3615 }
3616 }
3617 } else {
3618 pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3619 }
3620 }
3621
3622 /**
3623 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3624 * Calculation:
3625 * delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3626 * pos_cpy.y_new = viewport.y + delta_from_bottom
3627 * Simplify it as:
3628 * pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3629 */
3630 pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3631 pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3632 }
3633
3634 hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
3635 dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width, hubp->curs_attr.height);
3636 }
3637
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3638 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3639 {
3640 struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3641
3642 pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3643 pipe_ctx->plane_res.hubp, attributes);
3644 pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3645 pipe_ctx->plane_res.dpp, attributes);
3646 }
3647
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3648 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3649 {
3650 uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3651 struct fixed31_32 multiplier;
3652 struct dpp_cursor_attributes opt_attr = { 0 };
3653 uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3654 struct custom_float_format fmt;
3655
3656 if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3657 return;
3658
3659 fmt.exponenta_bits = 5;
3660 fmt.mantissa_bits = 10;
3661 fmt.sign = true;
3662
3663 if (sdr_white_level > 80) {
3664 multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3665 convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3666 }
3667
3668 opt_attr.scale = hw_scale;
3669 opt_attr.bias = 0;
3670
3671 pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3672 pipe_ctx->plane_res.dpp, &opt_attr);
3673 }
3674
3675 /*
3676 * apply_front_porch_workaround TODO FPGA still need?
3677 *
3678 * This is a workaround for a bug that has existed since R5xx and has not been
3679 * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3680 */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3681 static void apply_front_porch_workaround(
3682 struct dc_crtc_timing *timing)
3683 {
3684 if (timing->flags.INTERLACE == 1) {
3685 if (timing->v_front_porch < 2)
3686 timing->v_front_porch = 2;
3687 } else {
3688 if (timing->v_front_porch < 1)
3689 timing->v_front_porch = 1;
3690 }
3691 }
3692
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3693 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3694 {
3695 const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3696 struct dc_crtc_timing patched_crtc_timing;
3697 int vesa_sync_start;
3698 int asic_blank_end;
3699 int interlace_factor;
3700
3701 patched_crtc_timing = *dc_crtc_timing;
3702 apply_front_porch_workaround(&patched_crtc_timing);
3703
3704 interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3705
3706 vesa_sync_start = patched_crtc_timing.v_addressable +
3707 patched_crtc_timing.v_border_bottom +
3708 patched_crtc_timing.v_front_porch;
3709
3710 asic_blank_end = (patched_crtc_timing.v_total -
3711 vesa_sync_start -
3712 patched_crtc_timing.v_border_top)
3713 * interlace_factor;
3714
3715 return asic_blank_end -
3716 pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3717 }
3718
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3719 void dcn10_calc_vupdate_position(
3720 struct dc *dc,
3721 struct pipe_ctx *pipe_ctx,
3722 uint32_t *start_line,
3723 uint32_t *end_line)
3724 {
3725 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3726 int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3727
3728 if (vupdate_pos >= 0)
3729 *start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3730 else
3731 *start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3732 *end_line = (*start_line + 2) % timing->v_total;
3733 }
3734
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3735 static void dcn10_cal_vline_position(
3736 struct dc *dc,
3737 struct pipe_ctx *pipe_ctx,
3738 uint32_t *start_line,
3739 uint32_t *end_line)
3740 {
3741 const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3742 int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3743
3744 if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3745 if (vline_pos > 0)
3746 vline_pos--;
3747 else if (vline_pos < 0)
3748 vline_pos++;
3749
3750 vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3751 if (vline_pos >= 0)
3752 *start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3753 else
3754 *start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3755 *end_line = (*start_line + 2) % timing->v_total;
3756 } else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3757 // vsync is line 0 so start_line is just the requested line offset
3758 *start_line = vline_pos;
3759 *end_line = (*start_line + 2) % timing->v_total;
3760 } else
3761 ASSERT(0);
3762 }
3763
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3764 void dcn10_setup_periodic_interrupt(
3765 struct dc *dc,
3766 struct pipe_ctx *pipe_ctx)
3767 {
3768 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3769 uint32_t start_line = 0;
3770 uint32_t end_line = 0;
3771
3772 dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3773
3774 tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3775 }
3776
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3777 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3778 {
3779 struct timing_generator *tg = pipe_ctx->stream_res.tg;
3780 int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3781
3782 if (start_line < 0) {
3783 ASSERT(0);
3784 start_line = 0;
3785 }
3786
3787 if (tg->funcs->setup_vertical_interrupt2)
3788 tg->funcs->setup_vertical_interrupt2(tg, start_line);
3789 }
3790
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3791 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3792 struct dc_link_settings *link_settings)
3793 {
3794 struct encoder_unblank_param params = {0};
3795 struct dc_stream_state *stream = pipe_ctx->stream;
3796 struct dc_link *link = stream->link;
3797 struct dce_hwseq *hws = link->dc->hwseq;
3798
3799 /* only 3 items below are used by unblank */
3800 params.timing = pipe_ctx->stream->timing;
3801
3802 params.link_settings.link_rate = link_settings->link_rate;
3803
3804 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3805 if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3806 params.timing.pix_clk_100hz /= 2;
3807 pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, ¶ms);
3808 }
3809
3810 if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3811 hws->funcs.edp_backlight_control(link, true);
3812 }
3813 }
3814
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3815 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3816 const uint8_t *custom_sdp_message,
3817 unsigned int sdp_message_size)
3818 {
3819 if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3820 pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3821 pipe_ctx->stream_res.stream_enc,
3822 custom_sdp_message,
3823 sdp_message_size);
3824 }
3825 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3826 enum dc_status dcn10_set_clock(struct dc *dc,
3827 enum dc_clock_type clock_type,
3828 uint32_t clk_khz,
3829 uint32_t stepping)
3830 {
3831 struct dc_state *context = dc->current_state;
3832 struct dc_clock_config clock_cfg = {0};
3833 struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3834
3835 if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3836 return DC_FAIL_UNSUPPORTED_1;
3837
3838 dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3839 context, clock_type, &clock_cfg);
3840
3841 if (clk_khz > clock_cfg.max_clock_khz)
3842 return DC_FAIL_CLK_EXCEED_MAX;
3843
3844 if (clk_khz < clock_cfg.min_clock_khz)
3845 return DC_FAIL_CLK_BELOW_MIN;
3846
3847 if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3848 return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3849
3850 /*update internal request clock for update clock use*/
3851 if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3852 current_clocks->dispclk_khz = clk_khz;
3853 else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3854 current_clocks->dppclk_khz = clk_khz;
3855 else
3856 return DC_ERROR_UNEXPECTED;
3857
3858 if (dc->clk_mgr->funcs->update_clocks)
3859 dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3860 context, true);
3861 return DC_OK;
3862
3863 }
3864
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3865 void dcn10_get_clock(struct dc *dc,
3866 enum dc_clock_type clock_type,
3867 struct dc_clock_config *clock_cfg)
3868 {
3869 struct dc_state *context = dc->current_state;
3870
3871 if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3872 dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3873
3874 }
3875
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3876 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3877 {
3878 struct resource_pool *pool = dc->res_pool;
3879 int i;
3880
3881 for (i = 0; i < pool->pipe_count; i++) {
3882 struct hubp *hubp = pool->hubps[i];
3883 struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3884
3885 hubp->funcs->hubp_read_state(hubp);
3886
3887 if (!s->blank_en)
3888 dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3889 }
3890 }
3891