1 /*
2  * Copyright 2016 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include <linux/delay.h>
27 #include "dm_services.h"
28 #include "basics/dc_common.h"
29 #include "core_types.h"
30 #include "resource.h"
31 #include "custom_float.h"
32 #include "dcn10_hw_sequencer.h"
33 #include "dcn10_hw_sequencer_debug.h"
34 #include "dce/dce_hwseq.h"
35 #include "abm.h"
36 #include "dmcu.h"
37 #include "dcn10_optc.h"
38 #include "dcn10_dpp.h"
39 #include "dcn10_mpc.h"
40 #include "timing_generator.h"
41 #include "opp.h"
42 #include "ipp.h"
43 #include "mpc.h"
44 #include "reg_helper.h"
45 #include "dcn10_hubp.h"
46 #include "dcn10_hubbub.h"
47 #include "dcn10_cm_common.h"
48 #include "dc_link_dp.h"
49 #include "dccg.h"
50 #include "clk_mgr.h"
51 #include "link_hwss.h"
52 #include "dpcd_defs.h"
53 #include "dsc.h"
54 #include "dce/dmub_psr.h"
55 #include "dc_dmub_srv.h"
56 #include "dce/dmub_hw_lock_mgr.h"
57 #include "dc_trace.h"
58 #include "dce/dmub_outbox.h"
59 #include "inc/dc_link_dp.h"
60 #include "inc/link_dpcd.h"
61 
62 #define DC_LOGGER_INIT(logger)
63 
64 #define CTX \
65 	hws->ctx
66 #define REG(reg)\
67 	hws->regs->reg
68 
69 #undef FN
70 #define FN(reg_name, field_name) \
71 	hws->shifts->field_name, hws->masks->field_name
72 
73 /*print is 17 wide, first two characters are spaces*/
74 #define DTN_INFO_MICRO_SEC(ref_cycle) \
75 	print_microsec(dc_ctx, log_ctx, ref_cycle)
76 
77 #define GAMMA_HW_POINTS_NUM 256
78 
79 #define PGFSM_POWER_ON 0
80 #define PGFSM_POWER_OFF 2
81 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)82 static void print_microsec(struct dc_context *dc_ctx,
83 			   struct dc_log_buffer_ctx *log_ctx,
84 			   uint32_t ref_cycle)
85 {
86 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
87 	static const unsigned int frac = 1000;
88 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
89 
90 	DTN_INFO("  %11d.%03d",
91 			us_x10 / frac,
92 			us_x10 % frac);
93 }
94 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)95 void dcn10_lock_all_pipes(struct dc *dc,
96 	struct dc_state *context,
97 	bool lock)
98 {
99 	struct pipe_ctx *pipe_ctx;
100 	struct timing_generator *tg;
101 	int i;
102 
103 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
104 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
105 		tg = pipe_ctx->stream_res.tg;
106 
107 		/*
108 		 * Only lock the top pipe's tg to prevent redundant
109 		 * (un)locking. Also skip if pipe is disabled.
110 		 */
111 		if (pipe_ctx->top_pipe ||
112 		    !pipe_ctx->stream ||
113 		    !pipe_ctx->plane_state ||
114 		    !tg->funcs->is_tg_enabled(tg))
115 			continue;
116 
117 		if (lock)
118 			dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
119 		else
120 			dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
121 	}
122 }
123 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)124 static void log_mpc_crc(struct dc *dc,
125 	struct dc_log_buffer_ctx *log_ctx)
126 {
127 	struct dc_context *dc_ctx = dc->ctx;
128 	struct dce_hwseq *hws = dc->hwseq;
129 
130 	if (REG(MPC_CRC_RESULT_GB))
131 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
132 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
133 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
134 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
135 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
136 }
137 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)138 static void dcn10_log_hubbub_state(struct dc *dc,
139 				   struct dc_log_buffer_ctx *log_ctx)
140 {
141 	struct dc_context *dc_ctx = dc->ctx;
142 	struct dcn_hubbub_wm wm;
143 	int i;
144 
145 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
146 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
147 
148 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
149 			"         sr_enter          sr_exit  dram_clk_change\n");
150 
151 	for (i = 0; i < 4; i++) {
152 		struct dcn_hubbub_wm_set *s;
153 
154 		s = &wm.sets[i];
155 		DTN_INFO("WM_Set[%d]:", s->wm_set);
156 		DTN_INFO_MICRO_SEC(s->data_urgent);
157 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
158 		DTN_INFO_MICRO_SEC(s->sr_enter);
159 		DTN_INFO_MICRO_SEC(s->sr_exit);
160 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
161 		DTN_INFO("\n");
162 	}
163 
164 	DTN_INFO("\n");
165 }
166 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)167 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
168 {
169 	struct dc_context *dc_ctx = dc->ctx;
170 	struct resource_pool *pool = dc->res_pool;
171 	int i;
172 
173 	DTN_INFO(
174 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
175 	for (i = 0; i < pool->pipe_count; i++) {
176 		struct hubp *hubp = pool->hubps[i];
177 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
178 
179 		hubp->funcs->hubp_read_state(hubp);
180 
181 		if (!s->blank_en) {
182 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
183 					hubp->inst,
184 					s->pixel_format,
185 					s->inuse_addr_hi,
186 					s->viewport_width,
187 					s->viewport_height,
188 					s->rotation_angle,
189 					s->h_mirror_en,
190 					s->sw_mode,
191 					s->dcc_en,
192 					s->blank_en,
193 					s->clock_en,
194 					s->ttu_disable,
195 					s->underflow_status);
196 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
197 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
198 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
199 			DTN_INFO("\n");
200 		}
201 	}
202 
203 	DTN_INFO("\n=========RQ========\n");
204 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
205 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
206 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
207 	for (i = 0; i < pool->pipe_count; i++) {
208 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
209 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
210 
211 		if (!s->blank_en)
212 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
213 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
214 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
215 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
216 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
217 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
218 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
219 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
220 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
221 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
222 	}
223 
224 	DTN_INFO("========DLG========\n");
225 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
226 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
227 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
228 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
229 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
230 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
231 			"  x_rp_dlay  x_rr_sfl\n");
232 	for (i = 0; i < pool->pipe_count; i++) {
233 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
234 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
235 
236 		if (!s->blank_en)
237 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
238 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
239 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
240 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
241 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
242 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
243 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
244 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
245 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
246 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
247 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
248 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
249 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
250 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
251 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
252 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
253 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
254 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
255 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
256 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
257 				dlg_regs->xfc_reg_remote_surface_flip_latency);
258 	}
259 
260 	DTN_INFO("========TTU========\n");
261 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
262 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
263 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
264 	for (i = 0; i < pool->pipe_count; i++) {
265 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
266 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
267 
268 		if (!s->blank_en)
269 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
270 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
271 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
272 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
273 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
274 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
275 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
276 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
277 	}
278 	DTN_INFO("\n");
279 }
280 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)281 void dcn10_log_hw_state(struct dc *dc,
282 	struct dc_log_buffer_ctx *log_ctx)
283 {
284 	struct dc_context *dc_ctx = dc->ctx;
285 	struct resource_pool *pool = dc->res_pool;
286 	int i;
287 
288 	DTN_INFO_BEGIN();
289 
290 	dcn10_log_hubbub_state(dc, log_ctx);
291 
292 	dcn10_log_hubp_states(dc, log_ctx);
293 
294 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
295 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
296 			"C31 C32   C33 C34\n");
297 	for (i = 0; i < pool->pipe_count; i++) {
298 		struct dpp *dpp = pool->dpps[i];
299 		struct dcn_dpp_state s = {0};
300 
301 		dpp->funcs->dpp_read_state(dpp, &s);
302 
303 		if (!s.is_enabled)
304 			continue;
305 
306 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
307 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
308 				dpp->inst,
309 				s.igam_input_format,
310 				(s.igam_lut_mode == 0) ? "BypassFixed" :
311 					((s.igam_lut_mode == 1) ? "BypassFloat" :
312 					((s.igam_lut_mode == 2) ? "RAM" :
313 					((s.igam_lut_mode == 3) ? "RAM" :
314 								 "Unknown"))),
315 				(s.dgam_lut_mode == 0) ? "Bypass" :
316 					((s.dgam_lut_mode == 1) ? "sRGB" :
317 					((s.dgam_lut_mode == 2) ? "Ycc" :
318 					((s.dgam_lut_mode == 3) ? "RAM" :
319 					((s.dgam_lut_mode == 4) ? "RAM" :
320 								 "Unknown")))),
321 				(s.rgam_lut_mode == 0) ? "Bypass" :
322 					((s.rgam_lut_mode == 1) ? "sRGB" :
323 					((s.rgam_lut_mode == 2) ? "Ycc" :
324 					((s.rgam_lut_mode == 3) ? "RAM" :
325 					((s.rgam_lut_mode == 4) ? "RAM" :
326 								 "Unknown")))),
327 				s.gamut_remap_mode,
328 				s.gamut_remap_c11_c12,
329 				s.gamut_remap_c13_c14,
330 				s.gamut_remap_c21_c22,
331 				s.gamut_remap_c23_c24,
332 				s.gamut_remap_c31_c32,
333 				s.gamut_remap_c33_c34);
334 		DTN_INFO("\n");
335 	}
336 	DTN_INFO("\n");
337 
338 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
339 	for (i = 0; i < pool->pipe_count; i++) {
340 		struct mpcc_state s = {0};
341 
342 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
343 		if (s.opp_id != 0xf)
344 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
345 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
346 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
347 				s.idle);
348 	}
349 	DTN_INFO("\n");
350 
351 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
352 
353 	for (i = 0; i < pool->timing_generator_count; i++) {
354 		struct timing_generator *tg = pool->timing_generators[i];
355 		struct dcn_otg_state s = {0};
356 		/* Read shared OTG state registers for all DCNx */
357 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
358 
359 		/*
360 		 * For DCN2 and greater, a register on the OPP is used to
361 		 * determine if the CRTC is blanked instead of the OTG. So use
362 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
363 		 *
364 		 * TODO: Implement DCN-specific read_otg_state hooks.
365 		 */
366 		if (pool->opps[i]->funcs->dpg_is_blanked)
367 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
368 		else
369 			s.blank_enabled = tg->funcs->is_blanked(tg);
370 
371 		//only print if OTG master is enabled
372 		if ((s.otg_enabled & 1) == 0)
373 			continue;
374 
375 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
376 				tg->inst,
377 				s.v_blank_start,
378 				s.v_blank_end,
379 				s.v_sync_a_start,
380 				s.v_sync_a_end,
381 				s.v_sync_a_pol,
382 				s.v_total_max,
383 				s.v_total_min,
384 				s.v_total_max_sel,
385 				s.v_total_min_sel,
386 				s.h_blank_start,
387 				s.h_blank_end,
388 				s.h_sync_a_start,
389 				s.h_sync_a_end,
390 				s.h_sync_a_pol,
391 				s.h_total,
392 				s.v_total,
393 				s.underflow_occurred_status,
394 				s.blank_enabled);
395 
396 		// Clear underflow for debug purposes
397 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
398 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
399 		// it from here without affecting the original intent.
400 		tg->funcs->clear_optc_underflow(tg);
401 	}
402 	DTN_INFO("\n");
403 
404 	// dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel
405 	// TODO: Update golden log header to reflect this name change
406 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
407 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
408 		struct display_stream_compressor *dsc = pool->dscs[i];
409 		struct dcn_dsc_state s = {0};
410 
411 		dsc->funcs->dsc_read_state(dsc, &s);
412 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
413 		dsc->inst,
414 			s.dsc_clock_en,
415 			s.dsc_slice_width,
416 			s.dsc_bits_per_pixel);
417 		DTN_INFO("\n");
418 	}
419 	DTN_INFO("\n");
420 
421 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
422 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
423 	for (i = 0; i < pool->stream_enc_count; i++) {
424 		struct stream_encoder *enc = pool->stream_enc[i];
425 		struct enc_state s = {0};
426 
427 		if (enc->funcs->enc_read_state) {
428 			enc->funcs->enc_read_state(enc, &s);
429 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
430 				enc->id,
431 				s.dsc_mode,
432 				s.sec_gsp_pps_line_num,
433 				s.vbid6_line_reference,
434 				s.vbid6_line_num,
435 				s.sec_gsp_pps_enable,
436 				s.sec_stream_enable);
437 			DTN_INFO("\n");
438 		}
439 	}
440 	DTN_INFO("\n");
441 
442 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
443 	for (i = 0; i < dc->link_count; i++) {
444 		struct link_encoder *lenc = dc->links[i]->link_enc;
445 
446 		struct link_enc_state s = {0};
447 
448 		if (lenc && lenc->funcs->read_state) {
449 			lenc->funcs->read_state(lenc, &s);
450 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
451 				i,
452 				s.dphy_fec_en,
453 				s.dphy_fec_ready_shadow,
454 				s.dphy_fec_active_status,
455 				s.dp_link_training_complete);
456 			DTN_INFO("\n");
457 		}
458 	}
459 	DTN_INFO("\n");
460 
461 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
462 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
463 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
464 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
465 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
466 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
467 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
468 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
469 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
470 
471 	log_mpc_crc(dc, log_ctx);
472 
473 	{
474 		if (pool->hpo_dp_stream_enc_count > 0) {
475 			DTN_INFO("DP HPO S_ENC:  Enabled  OTG   Format   Depth   Vid   SDP   Compressed  Link\n");
476 			for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
477 				struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
478 				struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
479 
480 				if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
481 					hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
482 
483 					DTN_INFO("[%d]:                 %d    %d   %6s       %d     %d     %d            %d     %d\n",
484 							hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
485 							hpo_dp_se_state.stream_enc_enabled,
486 							hpo_dp_se_state.otg_inst,
487 							(hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
488 									((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
489 									(hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
490 							(hpo_dp_se_state.component_depth == 0) ? 6 :
491 									((hpo_dp_se_state.component_depth == 1) ? 8 :
492 									(hpo_dp_se_state.component_depth == 2) ? 10 : 12),
493 							hpo_dp_se_state.vid_stream_enabled,
494 							hpo_dp_se_state.sdp_enabled,
495 							hpo_dp_se_state.compressed_format,
496 							hpo_dp_se_state.mapped_to_link_enc);
497 				}
498 			}
499 
500 			DTN_INFO("\n");
501 		}
502 
503 		/* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
504 		if (pool->hpo_dp_link_enc_count) {
505 			DTN_INFO("DP HPO L_ENC:  Enabled  Mode   Lanes   Stream  Slots   VC Rate X    VC Rate Y\n");
506 
507 			for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
508 				struct hpo_dp_link_encoder *hpo_dp_link_enc = pool->hpo_dp_link_enc[i];
509 				struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
510 
511 				if (hpo_dp_link_enc->funcs->read_state) {
512 					hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
513 					DTN_INFO("[%d]:                 %d  %6s     %d        %d      %d     %d     %d\n",
514 							hpo_dp_link_enc->inst,
515 							hpo_dp_le_state.link_enc_enabled,
516 							(hpo_dp_le_state.link_mode == 0) ? "TPS1" :
517 									(hpo_dp_le_state.link_mode == 1) ? "TPS2" :
518 									(hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
519 							hpo_dp_le_state.lane_count,
520 							hpo_dp_le_state.stream_src[0],
521 							hpo_dp_le_state.slot_count[0],
522 							hpo_dp_le_state.vc_rate_x[0],
523 							hpo_dp_le_state.vc_rate_y[0]);
524 					DTN_INFO("\n");
525 				}
526 			}
527 
528 			DTN_INFO("\n");
529 		}
530 	}
531 
532 	DTN_INFO_END();
533 }
534 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)535 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
536 {
537 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
538 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
539 
540 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
541 		tg->funcs->clear_optc_underflow(tg);
542 		return true;
543 	}
544 
545 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
546 		hubp->funcs->hubp_clear_underflow(hubp);
547 		return true;
548 	}
549 	return false;
550 }
551 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)552 void dcn10_enable_power_gating_plane(
553 	struct dce_hwseq *hws,
554 	bool enable)
555 {
556 	bool force_on = true; /* disable power gating */
557 
558 	if (enable)
559 		force_on = false;
560 
561 	/* DCHUBP0/1/2/3 */
562 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
563 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
564 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
565 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
566 
567 	/* DPP0/1/2/3 */
568 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
569 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
570 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
571 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
572 }
573 
dcn10_disable_vga(struct dce_hwseq * hws)574 void dcn10_disable_vga(
575 	struct dce_hwseq *hws)
576 {
577 	unsigned int in_vga1_mode = 0;
578 	unsigned int in_vga2_mode = 0;
579 	unsigned int in_vga3_mode = 0;
580 	unsigned int in_vga4_mode = 0;
581 
582 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
583 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
584 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
585 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
586 
587 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
588 			in_vga3_mode == 0 && in_vga4_mode == 0)
589 		return;
590 
591 	REG_WRITE(D1VGA_CONTROL, 0);
592 	REG_WRITE(D2VGA_CONTROL, 0);
593 	REG_WRITE(D3VGA_CONTROL, 0);
594 	REG_WRITE(D4VGA_CONTROL, 0);
595 
596 	/* HW Engineer's Notes:
597 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
598 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
599 	 *
600 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
601 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
602 	 */
603 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
604 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
605 }
606 
607 /**
608  * dcn10_dpp_pg_control - DPP power gate control.
609  *
610  * @hws: dce_hwseq reference.
611  * @dpp_inst: DPP instance reference.
612  * @power_on: true if we want to enable power gate, false otherwise.
613  *
614  * Enable or disable power gate in the specific DPP instance.
615  */
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)616 void dcn10_dpp_pg_control(
617 		struct dce_hwseq *hws,
618 		unsigned int dpp_inst,
619 		bool power_on)
620 {
621 	uint32_t power_gate = power_on ? 0 : 1;
622 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
623 
624 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
625 		return;
626 	if (REG(DOMAIN1_PG_CONFIG) == 0)
627 		return;
628 
629 	switch (dpp_inst) {
630 	case 0: /* DPP0 */
631 		REG_UPDATE(DOMAIN1_PG_CONFIG,
632 				DOMAIN1_POWER_GATE, power_gate);
633 
634 		REG_WAIT(DOMAIN1_PG_STATUS,
635 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
636 				1, 1000);
637 		break;
638 	case 1: /* DPP1 */
639 		REG_UPDATE(DOMAIN3_PG_CONFIG,
640 				DOMAIN3_POWER_GATE, power_gate);
641 
642 		REG_WAIT(DOMAIN3_PG_STATUS,
643 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
644 				1, 1000);
645 		break;
646 	case 2: /* DPP2 */
647 		REG_UPDATE(DOMAIN5_PG_CONFIG,
648 				DOMAIN5_POWER_GATE, power_gate);
649 
650 		REG_WAIT(DOMAIN5_PG_STATUS,
651 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
652 				1, 1000);
653 		break;
654 	case 3: /* DPP3 */
655 		REG_UPDATE(DOMAIN7_PG_CONFIG,
656 				DOMAIN7_POWER_GATE, power_gate);
657 
658 		REG_WAIT(DOMAIN7_PG_STATUS,
659 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
660 				1, 1000);
661 		break;
662 	default:
663 		BREAK_TO_DEBUGGER();
664 		break;
665 	}
666 }
667 
668 /**
669  * dcn10_hubp_pg_control - HUBP power gate control.
670  *
671  * @hws: dce_hwseq reference.
672  * @hubp_inst: DPP instance reference.
673  * @power_on: true if we want to enable power gate, false otherwise.
674  *
675  * Enable or disable power gate in the specific HUBP instance.
676  */
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)677 void dcn10_hubp_pg_control(
678 		struct dce_hwseq *hws,
679 		unsigned int hubp_inst,
680 		bool power_on)
681 {
682 	uint32_t power_gate = power_on ? 0 : 1;
683 	uint32_t pwr_status = power_on ? PGFSM_POWER_ON : PGFSM_POWER_OFF;
684 
685 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
686 		return;
687 	if (REG(DOMAIN0_PG_CONFIG) == 0)
688 		return;
689 
690 	switch (hubp_inst) {
691 	case 0: /* DCHUBP0 */
692 		REG_UPDATE(DOMAIN0_PG_CONFIG,
693 				DOMAIN0_POWER_GATE, power_gate);
694 
695 		REG_WAIT(DOMAIN0_PG_STATUS,
696 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
697 				1, 1000);
698 		break;
699 	case 1: /* DCHUBP1 */
700 		REG_UPDATE(DOMAIN2_PG_CONFIG,
701 				DOMAIN2_POWER_GATE, power_gate);
702 
703 		REG_WAIT(DOMAIN2_PG_STATUS,
704 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
705 				1, 1000);
706 		break;
707 	case 2: /* DCHUBP2 */
708 		REG_UPDATE(DOMAIN4_PG_CONFIG,
709 				DOMAIN4_POWER_GATE, power_gate);
710 
711 		REG_WAIT(DOMAIN4_PG_STATUS,
712 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
713 				1, 1000);
714 		break;
715 	case 3: /* DCHUBP3 */
716 		REG_UPDATE(DOMAIN6_PG_CONFIG,
717 				DOMAIN6_POWER_GATE, power_gate);
718 
719 		REG_WAIT(DOMAIN6_PG_STATUS,
720 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
721 				1, 1000);
722 		break;
723 	default:
724 		BREAK_TO_DEBUGGER();
725 		break;
726 	}
727 }
728 
power_on_plane(struct dce_hwseq * hws,int plane_id)729 static void power_on_plane(
730 	struct dce_hwseq *hws,
731 	int plane_id)
732 {
733 	DC_LOGGER_INIT(hws->ctx->logger);
734 	if (REG(DC_IP_REQUEST_CNTL)) {
735 		REG_SET(DC_IP_REQUEST_CNTL, 0,
736 				IP_REQUEST_EN, 1);
737 
738 		if (hws->funcs.dpp_pg_control)
739 			hws->funcs.dpp_pg_control(hws, plane_id, true);
740 
741 		if (hws->funcs.hubp_pg_control)
742 			hws->funcs.hubp_pg_control(hws, plane_id, true);
743 
744 		REG_SET(DC_IP_REQUEST_CNTL, 0,
745 				IP_REQUEST_EN, 0);
746 		DC_LOG_DEBUG(
747 				"Un-gated front end for pipe %d\n", plane_id);
748 	}
749 }
750 
undo_DEGVIDCN10_253_wa(struct dc * dc)751 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
752 {
753 	struct dce_hwseq *hws = dc->hwseq;
754 	struct hubp *hubp = dc->res_pool->hubps[0];
755 
756 	if (!hws->wa_state.DEGVIDCN10_253_applied)
757 		return;
758 
759 	hubp->funcs->set_blank(hubp, true);
760 
761 	REG_SET(DC_IP_REQUEST_CNTL, 0,
762 			IP_REQUEST_EN, 1);
763 
764 	hws->funcs.hubp_pg_control(hws, 0, false);
765 	REG_SET(DC_IP_REQUEST_CNTL, 0,
766 			IP_REQUEST_EN, 0);
767 
768 	hws->wa_state.DEGVIDCN10_253_applied = false;
769 }
770 
apply_DEGVIDCN10_253_wa(struct dc * dc)771 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
772 {
773 	struct dce_hwseq *hws = dc->hwseq;
774 	struct hubp *hubp = dc->res_pool->hubps[0];
775 	int i;
776 
777 	if (dc->debug.disable_stutter)
778 		return;
779 
780 	if (!hws->wa.DEGVIDCN10_253)
781 		return;
782 
783 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
784 		if (!dc->res_pool->hubps[i]->power_gated)
785 			return;
786 	}
787 
788 	/* all pipe power gated, apply work around to enable stutter. */
789 
790 	REG_SET(DC_IP_REQUEST_CNTL, 0,
791 			IP_REQUEST_EN, 1);
792 
793 	hws->funcs.hubp_pg_control(hws, 0, true);
794 	REG_SET(DC_IP_REQUEST_CNTL, 0,
795 			IP_REQUEST_EN, 0);
796 
797 	hubp->funcs->set_hubp_blank_en(hubp, false);
798 	hws->wa_state.DEGVIDCN10_253_applied = true;
799 }
800 
dcn10_bios_golden_init(struct dc * dc)801 void dcn10_bios_golden_init(struct dc *dc)
802 {
803 	struct dce_hwseq *hws = dc->hwseq;
804 	struct dc_bios *bp = dc->ctx->dc_bios;
805 	int i;
806 	bool allow_self_fresh_force_enable = true;
807 
808 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
809 		return;
810 
811 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
812 		allow_self_fresh_force_enable =
813 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
814 
815 
816 	/* WA for making DF sleep when idle after resume from S0i3.
817 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
818 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
819 	 * before calling command table and it changed to 1 after,
820 	 * it should be set back to 0.
821 	 */
822 
823 	/* initialize dcn global */
824 	bp->funcs->enable_disp_power_gating(bp,
825 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
826 
827 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
828 		/* initialize dcn per pipe */
829 		bp->funcs->enable_disp_power_gating(bp,
830 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
831 	}
832 
833 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
834 		if (allow_self_fresh_force_enable == false &&
835 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
836 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
837 										!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
838 
839 }
840 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)841 static void false_optc_underflow_wa(
842 		struct dc *dc,
843 		const struct dc_stream_state *stream,
844 		struct timing_generator *tg)
845 {
846 	int i;
847 	bool underflow;
848 
849 	if (!dc->hwseq->wa.false_optc_underflow)
850 		return;
851 
852 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
853 
854 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
855 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
856 
857 		if (old_pipe_ctx->stream != stream)
858 			continue;
859 
860 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
861 	}
862 
863 	if (tg->funcs->set_blank_data_double_buffer)
864 		tg->funcs->set_blank_data_double_buffer(tg, true);
865 
866 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
867 		tg->funcs->clear_optc_underflow(tg);
868 }
869 
calculate_vready_offset_for_group(struct pipe_ctx * pipe)870 static int calculate_vready_offset_for_group(struct pipe_ctx *pipe)
871 {
872 	struct pipe_ctx *other_pipe;
873 	int vready_offset = pipe->pipe_dlg_param.vready_offset;
874 
875 	/* Always use the largest vready_offset of all connected pipes */
876 	for (other_pipe = pipe->bottom_pipe; other_pipe != NULL; other_pipe = other_pipe->bottom_pipe) {
877 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
878 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
879 	}
880 	for (other_pipe = pipe->top_pipe; other_pipe != NULL; other_pipe = other_pipe->top_pipe) {
881 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
882 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
883 	}
884 	for (other_pipe = pipe->next_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->next_odm_pipe) {
885 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
886 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
887 	}
888 	for (other_pipe = pipe->prev_odm_pipe; other_pipe != NULL; other_pipe = other_pipe->prev_odm_pipe) {
889 		if (other_pipe->pipe_dlg_param.vready_offset > vready_offset)
890 			vready_offset = other_pipe->pipe_dlg_param.vready_offset;
891 	}
892 
893 	return vready_offset;
894 }
895 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)896 enum dc_status dcn10_enable_stream_timing(
897 		struct pipe_ctx *pipe_ctx,
898 		struct dc_state *context,
899 		struct dc *dc)
900 {
901 	struct dc_stream_state *stream = pipe_ctx->stream;
902 	enum dc_color_space color_space;
903 	struct tg_color black_color = {0};
904 
905 	/* by upper caller loop, pipe0 is parent pipe and be called first.
906 	 * back end is set up by for pipe0. Other children pipe share back end
907 	 * with pipe 0. No program is needed.
908 	 */
909 	if (pipe_ctx->top_pipe != NULL)
910 		return DC_OK;
911 
912 	/* TODO check if timing_changed, disable stream if timing changed */
913 
914 	/* HW program guide assume display already disable
915 	 * by unplug sequence. OTG assume stop.
916 	 */
917 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
918 
919 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
920 			pipe_ctx->clock_source,
921 			&pipe_ctx->stream_res.pix_clk_params,
922 			dp_get_link_encoding_format(&pipe_ctx->link_config.dp_link_settings),
923 			&pipe_ctx->pll_settings)) {
924 		BREAK_TO_DEBUGGER();
925 		return DC_ERROR_UNEXPECTED;
926 	}
927 
928 	if (dc_is_hdmi_tmds_signal(stream->signal)) {
929 		stream->link->phy_state.symclk_ref_cnts.otg = 1;
930 		if (stream->link->phy_state.symclk_state == SYMCLK_OFF_TX_OFF)
931 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_OFF;
932 		else
933 			stream->link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
934 	}
935 
936 	pipe_ctx->stream_res.tg->funcs->program_timing(
937 			pipe_ctx->stream_res.tg,
938 			&stream->timing,
939 			calculate_vready_offset_for_group(pipe_ctx),
940 			pipe_ctx->pipe_dlg_param.vstartup_start,
941 			pipe_ctx->pipe_dlg_param.vupdate_offset,
942 			pipe_ctx->pipe_dlg_param.vupdate_width,
943 			pipe_ctx->stream->signal,
944 			true);
945 
946 #if 0 /* move to after enable_crtc */
947 	/* TODO: OPP FMT, ABM. etc. should be done here. */
948 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
949 
950 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
951 
952 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
953 				pipe_ctx->stream_res.opp,
954 				&stream->bit_depth_params,
955 				&stream->clamping);
956 #endif
957 	/* program otg blank color */
958 	color_space = stream->output_color_space;
959 	color_space_to_black_color(dc, color_space, &black_color);
960 
961 	/*
962 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
963 	 * alternate between Cb and Cr, so both channels need the pixel
964 	 * value for Y
965 	 */
966 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
967 		black_color.color_r_cr = black_color.color_g_y;
968 
969 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
970 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
971 				pipe_ctx->stream_res.tg,
972 				&black_color);
973 
974 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
975 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
976 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
977 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
978 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
979 	}
980 
981 	/* VTG is  within DCHUB command block. DCFCLK is always on */
982 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
983 		BREAK_TO_DEBUGGER();
984 		return DC_ERROR_UNEXPECTED;
985 	}
986 
987 	/* TODO program crtc source select for non-virtual signal*/
988 	/* TODO program FMT */
989 	/* TODO setup link_enc */
990 	/* TODO set stream attributes */
991 	/* TODO program audio */
992 	/* TODO enable stream if timing changed */
993 	/* TODO unblank stream if DP */
994 
995 	return DC_OK;
996 }
997 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)998 static void dcn10_reset_back_end_for_pipe(
999 		struct dc *dc,
1000 		struct pipe_ctx *pipe_ctx,
1001 		struct dc_state *context)
1002 {
1003 	int i;
1004 	struct dc_link *link;
1005 	DC_LOGGER_INIT(dc->ctx->logger);
1006 	if (pipe_ctx->stream_res.stream_enc == NULL) {
1007 		pipe_ctx->stream = NULL;
1008 		return;
1009 	}
1010 
1011 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1012 		link = pipe_ctx->stream->link;
1013 		/* DPMS may already disable or */
1014 		/* dpms_off status is incorrect due to fastboot
1015 		 * feature. When system resume from S4 with second
1016 		 * screen only, the dpms_off would be true but
1017 		 * VBIOS lit up eDP, so check link status too.
1018 		 */
1019 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
1020 			core_link_disable_stream(pipe_ctx);
1021 		else if (pipe_ctx->stream_res.audio)
1022 			dc->hwss.disable_audio_stream(pipe_ctx);
1023 
1024 		if (pipe_ctx->stream_res.audio) {
1025 			/*disable az_endpoint*/
1026 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
1027 
1028 			/*free audio*/
1029 			if (dc->caps.dynamic_audio == true) {
1030 				/*we have to dynamic arbitrate the audio endpoints*/
1031 				/*we free the resource, need reset is_audio_acquired*/
1032 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
1033 						pipe_ctx->stream_res.audio, false);
1034 				pipe_ctx->stream_res.audio = NULL;
1035 			}
1036 		}
1037 	}
1038 
1039 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
1040 	 * back end share by all pipes and will be disable only when disable
1041 	 * parent pipe.
1042 	 */
1043 	if (pipe_ctx->top_pipe == NULL) {
1044 
1045 		if (pipe_ctx->stream_res.abm)
1046 			dc->hwss.set_abm_immediate_disable(pipe_ctx);
1047 
1048 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
1049 
1050 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
1051 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
1052 			pipe_ctx->stream_res.tg->funcs->set_drr(
1053 					pipe_ctx->stream_res.tg, NULL);
1054 		pipe_ctx->stream->link->phy_state.symclk_ref_cnts.otg = 0;
1055 	}
1056 
1057 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1058 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
1059 			break;
1060 
1061 	if (i == dc->res_pool->pipe_count)
1062 		return;
1063 
1064 	pipe_ctx->stream = NULL;
1065 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
1066 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
1067 }
1068 
dcn10_hw_wa_force_recovery(struct dc * dc)1069 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
1070 {
1071 	struct hubp *hubp ;
1072 	unsigned int i;
1073 	bool need_recover = true;
1074 
1075 	if (!dc->debug.recovery_enabled)
1076 		return false;
1077 
1078 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1079 		struct pipe_ctx *pipe_ctx =
1080 			&dc->current_state->res_ctx.pipe_ctx[i];
1081 		if (pipe_ctx != NULL) {
1082 			hubp = pipe_ctx->plane_res.hubp;
1083 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
1084 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
1085 					/* one pipe underflow, we will reset all the pipes*/
1086 					need_recover = true;
1087 				}
1088 			}
1089 		}
1090 	}
1091 	if (!need_recover)
1092 		return false;
1093 	/*
1094 	DCHUBP_CNTL:HUBP_BLANK_EN=1
1095 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
1096 	DCHUBP_CNTL:HUBP_DISABLE=1
1097 	DCHUBP_CNTL:HUBP_DISABLE=0
1098 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
1099 	DCSURF_PRIMARY_SURFACE_ADDRESS
1100 	DCHUBP_CNTL:HUBP_BLANK_EN=0
1101 	*/
1102 
1103 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1104 		struct pipe_ctx *pipe_ctx =
1105 			&dc->current_state->res_ctx.pipe_ctx[i];
1106 		if (pipe_ctx != NULL) {
1107 			hubp = pipe_ctx->plane_res.hubp;
1108 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
1109 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1110 				hubp->funcs->set_hubp_blank_en(hubp, true);
1111 		}
1112 	}
1113 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
1114 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
1115 
1116 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1117 		struct pipe_ctx *pipe_ctx =
1118 			&dc->current_state->res_ctx.pipe_ctx[i];
1119 		if (pipe_ctx != NULL) {
1120 			hubp = pipe_ctx->plane_res.hubp;
1121 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
1122 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1123 				hubp->funcs->hubp_disable_control(hubp, true);
1124 		}
1125 	}
1126 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1127 		struct pipe_ctx *pipe_ctx =
1128 			&dc->current_state->res_ctx.pipe_ctx[i];
1129 		if (pipe_ctx != NULL) {
1130 			hubp = pipe_ctx->plane_res.hubp;
1131 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
1132 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
1133 				hubp->funcs->hubp_disable_control(hubp, true);
1134 		}
1135 	}
1136 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
1137 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
1138 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1139 		struct pipe_ctx *pipe_ctx =
1140 			&dc->current_state->res_ctx.pipe_ctx[i];
1141 		if (pipe_ctx != NULL) {
1142 			hubp = pipe_ctx->plane_res.hubp;
1143 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1144 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1145 				hubp->funcs->set_hubp_blank_en(hubp, true);
1146 		}
1147 	}
1148 	return true;
1149 
1150 }
1151 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1152 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1153 {
1154 	struct hubbub *hubbub = dc->res_pool->hubbub;
1155 	static bool should_log_hw_state; /* prevent hw state log by default */
1156 
1157 	if (!hubbub->funcs->verify_allow_pstate_change_high)
1158 		return;
1159 
1160 	if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
1161 		int i = 0;
1162 
1163 		if (should_log_hw_state)
1164 			dcn10_log_hw_state(dc, NULL);
1165 
1166 		TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
1167 		BREAK_TO_DEBUGGER();
1168 		if (dcn10_hw_wa_force_recovery(dc)) {
1169 			/*check again*/
1170 			if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub))
1171 				BREAK_TO_DEBUGGER();
1172 		}
1173 	}
1174 }
1175 
1176 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1177 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1178 {
1179 	struct dce_hwseq *hws = dc->hwseq;
1180 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1181 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1182 	struct mpc *mpc = dc->res_pool->mpc;
1183 	struct mpc_tree *mpc_tree_params;
1184 	struct mpcc *mpcc_to_remove = NULL;
1185 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1186 
1187 	mpc_tree_params = &(opp->mpc_tree_params);
1188 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1189 
1190 	/*Already reset*/
1191 	if (mpcc_to_remove == NULL)
1192 		return;
1193 
1194 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1195 	// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
1196 	// so don't wait for MPCC_IDLE in the programming sequence
1197 	if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
1198 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1199 
1200 	dc->optimized_required = true;
1201 
1202 	if (hubp->funcs->hubp_disconnect)
1203 		hubp->funcs->hubp_disconnect(hubp);
1204 
1205 	if (dc->debug.sanity_checks)
1206 		hws->funcs.verify_allow_pstate_change_high(dc);
1207 }
1208 
1209 /**
1210  * dcn10_plane_atomic_power_down - Power down plane components.
1211  *
1212  * @dc: dc struct reference. used for grab hwseq.
1213  * @dpp: dpp struct reference.
1214  * @hubp: hubp struct reference.
1215  *
1216  * Keep in mind that this operation requires a power gate configuration;
1217  * however, requests for switch power gate are precisely controlled to avoid
1218  * problems. For this reason, power gate request is usually disabled. This
1219  * function first needs to enable the power gate request before disabling DPP
1220  * and HUBP. Finally, it disables the power gate request again.
1221  */
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1222 void dcn10_plane_atomic_power_down(struct dc *dc,
1223 		struct dpp *dpp,
1224 		struct hubp *hubp)
1225 {
1226 	struct dce_hwseq *hws = dc->hwseq;
1227 	DC_LOGGER_INIT(dc->ctx->logger);
1228 
1229 	if (REG(DC_IP_REQUEST_CNTL)) {
1230 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1231 				IP_REQUEST_EN, 1);
1232 
1233 		if (hws->funcs.dpp_pg_control)
1234 			hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1235 
1236 		if (hws->funcs.hubp_pg_control)
1237 			hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1238 
1239 		dpp->funcs->dpp_reset(dpp);
1240 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1241 				IP_REQUEST_EN, 0);
1242 		DC_LOG_DEBUG(
1243 				"Power gated front end %d\n", hubp->inst);
1244 	}
1245 }
1246 
1247 /* disable HW used by plane.
1248  * note:  cannot disable until disconnect is complete
1249  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1250 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1251 {
1252 	struct dce_hwseq *hws = dc->hwseq;
1253 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1254 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1255 	int opp_id = hubp->opp_id;
1256 
1257 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1258 
1259 	hubp->funcs->hubp_clk_cntl(hubp, false);
1260 
1261 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1262 
1263 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1264 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1265 				pipe_ctx->stream_res.opp,
1266 				false);
1267 
1268 	hubp->power_gated = true;
1269 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1270 
1271 	hws->funcs.plane_atomic_power_down(dc,
1272 			pipe_ctx->plane_res.dpp,
1273 			pipe_ctx->plane_res.hubp);
1274 
1275 	pipe_ctx->stream = NULL;
1276 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1277 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1278 	pipe_ctx->top_pipe = NULL;
1279 	pipe_ctx->bottom_pipe = NULL;
1280 	pipe_ctx->plane_state = NULL;
1281 }
1282 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1283 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1284 {
1285 	struct dce_hwseq *hws = dc->hwseq;
1286 	DC_LOGGER_INIT(dc->ctx->logger);
1287 
1288 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1289 		return;
1290 
1291 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1292 
1293 	apply_DEGVIDCN10_253_wa(dc);
1294 
1295 	DC_LOG_DC("Power down front end %d\n",
1296 					pipe_ctx->pipe_idx);
1297 }
1298 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1299 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1300 {
1301 	int i;
1302 	struct dce_hwseq *hws = dc->hwseq;
1303 	struct hubbub *hubbub = dc->res_pool->hubbub;
1304 	bool can_apply_seamless_boot = false;
1305 
1306 	for (i = 0; i < context->stream_count; i++) {
1307 		if (context->streams[i]->apply_seamless_boot_optimization) {
1308 			can_apply_seamless_boot = true;
1309 			break;
1310 		}
1311 	}
1312 
1313 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1314 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1315 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1316 
1317 		/* There is assumption that pipe_ctx is not mapping irregularly
1318 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1319 		 * we will use the pipe, so don't disable
1320 		 */
1321 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1322 			continue;
1323 
1324 		/* Blank controller using driver code instead of
1325 		 * command table.
1326 		 */
1327 		if (tg->funcs->is_tg_enabled(tg)) {
1328 			if (hws->funcs.init_blank != NULL) {
1329 				hws->funcs.init_blank(dc, tg);
1330 				tg->funcs->lock(tg);
1331 			} else {
1332 				tg->funcs->lock(tg);
1333 				tg->funcs->set_blank(tg, true);
1334 				hwss_wait_for_blank_complete(tg);
1335 			}
1336 		}
1337 	}
1338 
1339 	/* Reset det size */
1340 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1341 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1342 		struct hubp *hubp = dc->res_pool->hubps[i];
1343 
1344 		/* Do not need to reset for seamless boot */
1345 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1346 			continue;
1347 
1348 		if (hubbub && hubp) {
1349 			if (hubbub->funcs->program_det_size)
1350 				hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
1351 		}
1352 	}
1353 
1354 	/* num_opp will be equal to number of mpcc */
1355 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1356 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1357 
1358 		/* Cannot reset the MPC mux if seamless boot */
1359 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1360 			continue;
1361 
1362 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1363 				dc->res_pool->mpc, i);
1364 	}
1365 
1366 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1367 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1368 		struct hubp *hubp = dc->res_pool->hubps[i];
1369 		struct dpp *dpp = dc->res_pool->dpps[i];
1370 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1371 
1372 		/* There is assumption that pipe_ctx is not mapping irregularly
1373 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1374 		 * we will use the pipe, so don't disable
1375 		 */
1376 		if (can_apply_seamless_boot &&
1377 			pipe_ctx->stream != NULL &&
1378 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1379 				pipe_ctx->stream_res.tg)) {
1380 			// Enable double buffering for OTG_BLANK no matter if
1381 			// seamless boot is enabled or not to suppress global sync
1382 			// signals when OTG blanked. This is to prevent pipe from
1383 			// requesting data while in PSR.
1384 			tg->funcs->tg_init(tg);
1385 			hubp->power_gated = true;
1386 			continue;
1387 		}
1388 
1389 		/* Disable on the current state so the new one isn't cleared. */
1390 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1391 
1392 		dpp->funcs->dpp_reset(dpp);
1393 
1394 		pipe_ctx->stream_res.tg = tg;
1395 		pipe_ctx->pipe_idx = i;
1396 
1397 		pipe_ctx->plane_res.hubp = hubp;
1398 		pipe_ctx->plane_res.dpp = dpp;
1399 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1400 		hubp->mpcc_id = dpp->inst;
1401 		hubp->opp_id = OPP_ID_INVALID;
1402 		hubp->power_gated = false;
1403 
1404 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1405 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1406 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1407 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1408 
1409 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1410 
1411 		if (tg->funcs->is_tg_enabled(tg))
1412 			tg->funcs->unlock(tg);
1413 
1414 		dc->hwss.disable_plane(dc, pipe_ctx);
1415 
1416 		pipe_ctx->stream_res.tg = NULL;
1417 		pipe_ctx->plane_res.hubp = NULL;
1418 
1419 		if (tg->funcs->is_tg_enabled(tg)) {
1420 			if (tg->funcs->init_odm)
1421 				tg->funcs->init_odm(tg);
1422 		}
1423 
1424 		tg->funcs->tg_init(tg);
1425 	}
1426 
1427 	/* Power gate DSCs */
1428 	if (hws->funcs.dsc_pg_control != NULL) {
1429 		uint32_t num_opps = 0;
1430 		uint32_t opp_id_src0 = OPP_ID_INVALID;
1431 		uint32_t opp_id_src1 = OPP_ID_INVALID;
1432 
1433 		// Step 1: To find out which OPTC is running & OPTC DSC is ON
1434 		// We can't use res_pool->res_cap->num_timing_generator to check
1435 		// Because it records display pipes default setting built in driver,
1436 		// not display pipes of the current chip.
1437 		// Some ASICs would be fused display pipes less than the default setting.
1438 		// In dcnxx_resource_construct function, driver would obatin real information.
1439 		for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
1440 			uint32_t optc_dsc_state = 0;
1441 			struct timing_generator *tg = dc->res_pool->timing_generators[i];
1442 
1443 			if (tg->funcs->is_tg_enabled(tg)) {
1444 				if (tg->funcs->get_dsc_status)
1445 					tg->funcs->get_dsc_status(tg, &optc_dsc_state);
1446 				// Only one OPTC with DSC is ON, so if we got one result, we would exit this block.
1447 				// non-zero value is DSC enabled
1448 				if (optc_dsc_state != 0) {
1449 					tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
1450 					break;
1451 				}
1452 			}
1453 		}
1454 
1455 		// Step 2: To power down DSC but skip DSC  of running OPTC
1456 		for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
1457 			struct dcn_dsc_state s  = {0};
1458 
1459 			dc->res_pool->dscs[i]->funcs->dsc_read_state(dc->res_pool->dscs[i], &s);
1460 
1461 			if ((s.dsc_opp_source == opp_id_src0 || s.dsc_opp_source == opp_id_src1) &&
1462 				s.dsc_clock_en && s.dsc_fw_en)
1463 				continue;
1464 
1465 			hws->funcs.dsc_pg_control(hws, dc->res_pool->dscs[i]->inst, false);
1466 		}
1467 	}
1468 }
1469 
dcn10_init_hw(struct dc * dc)1470 void dcn10_init_hw(struct dc *dc)
1471 {
1472 	int i;
1473 	struct abm *abm = dc->res_pool->abm;
1474 	struct dmcu *dmcu = dc->res_pool->dmcu;
1475 	struct dce_hwseq *hws = dc->hwseq;
1476 	struct dc_bios *dcb = dc->ctx->dc_bios;
1477 	struct resource_pool *res_pool = dc->res_pool;
1478 	uint32_t backlight = MAX_BACKLIGHT_LEVEL;
1479 	bool   is_optimized_init_done = false;
1480 
1481 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1482 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1483 
1484 	/* Align bw context with hw config when system resume. */
1485 	if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
1486 		dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
1487 		dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
1488 	}
1489 
1490 	// Initialize the dccg
1491 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1492 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1493 
1494 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1495 
1496 		REG_WRITE(REFCLK_CNTL, 0);
1497 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1498 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1499 
1500 		if (!dc->debug.disable_clock_gate) {
1501 			/* enable all DCN clock gating */
1502 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1503 
1504 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1505 
1506 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1507 		}
1508 
1509 		//Enable ability to power gate / don't force power on permanently
1510 		if (hws->funcs.enable_power_gating_plane)
1511 			hws->funcs.enable_power_gating_plane(hws, true);
1512 
1513 		return;
1514 	}
1515 
1516 	if (!dcb->funcs->is_accelerated_mode(dcb))
1517 		hws->funcs.disable_vga(dc->hwseq);
1518 
1519 	hws->funcs.bios_golden_init(dc);
1520 
1521 	if (dc->ctx->dc_bios->fw_info_valid) {
1522 		res_pool->ref_clocks.xtalin_clock_inKhz =
1523 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1524 
1525 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1526 			if (res_pool->dccg && res_pool->hubbub) {
1527 
1528 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1529 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1530 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1531 
1532 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1533 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1534 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1535 			} else {
1536 				// Not all ASICs have DCCG sw component
1537 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1538 						res_pool->ref_clocks.xtalin_clock_inKhz;
1539 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1540 						res_pool->ref_clocks.xtalin_clock_inKhz;
1541 			}
1542 		}
1543 	} else
1544 		ASSERT_CRITICAL(false);
1545 
1546 	for (i = 0; i < dc->link_count; i++) {
1547 		/* Power up AND update implementation according to the
1548 		 * required signal (which may be different from the
1549 		 * default signal on connector).
1550 		 */
1551 		struct dc_link *link = dc->links[i];
1552 
1553 		if (!is_optimized_init_done)
1554 			link->link_enc->funcs->hw_init(link->link_enc);
1555 
1556 		/* Check for enabled DIG to identify enabled display */
1557 		if (link->link_enc->funcs->is_dig_enabled &&
1558 			link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1559 			link->link_status.link_active = true;
1560 			if (link->link_enc->funcs->fec_is_active &&
1561 					link->link_enc->funcs->fec_is_active(link->link_enc))
1562 				link->fec_state = dc_link_fec_enabled;
1563 		}
1564 	}
1565 
1566 	/* we want to turn off all dp displays before doing detection */
1567 	dc_link_blank_all_dp_displays(dc);
1568 
1569 	if (hws->funcs.enable_power_gating_plane)
1570 		hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1571 
1572 	/* If taking control over from VBIOS, we may want to optimize our first
1573 	 * mode set, so we need to skip powering down pipes until we know which
1574 	 * pipes we want to use.
1575 	 * Otherwise, if taking control is not possible, we need to power
1576 	 * everything down.
1577 	 */
1578 	if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) {
1579 		if (!is_optimized_init_done) {
1580 			hws->funcs.init_pipes(dc, dc->current_state);
1581 			if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
1582 				dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
1583 						!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
1584 		}
1585 	}
1586 
1587 	if (!is_optimized_init_done) {
1588 
1589 		for (i = 0; i < res_pool->audio_count; i++) {
1590 			struct audio *audio = res_pool->audios[i];
1591 
1592 			audio->funcs->hw_init(audio);
1593 		}
1594 
1595 		for (i = 0; i < dc->link_count; i++) {
1596 			struct dc_link *link = dc->links[i];
1597 
1598 			if (link->panel_cntl)
1599 				backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
1600 		}
1601 
1602 		if (abm != NULL)
1603 			abm->funcs->abm_init(abm, backlight);
1604 
1605 		if (dmcu != NULL && !dmcu->auto_load_dmcu)
1606 			dmcu->funcs->dmcu_init(dmcu);
1607 	}
1608 
1609 	if (abm != NULL && dmcu != NULL)
1610 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1611 
1612 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1613 	if (!is_optimized_init_done)
1614 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1615 
1616 	if (!dc->debug.disable_clock_gate) {
1617 		/* enable all DCN clock gating */
1618 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1619 
1620 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1621 
1622 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1623 	}
1624 
1625 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1626 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1627 }
1628 
1629 /* In headless boot cases, DIG may be turned
1630  * on which causes HW/SW discrepancies.
1631  * To avoid this, power down hardware on boot
1632  * if DIG is turned on
1633  */
dcn10_power_down_on_boot(struct dc * dc)1634 void dcn10_power_down_on_boot(struct dc *dc)
1635 {
1636 	struct dc_link *edp_links[MAX_NUM_EDP];
1637 	struct dc_link *edp_link = NULL;
1638 	int edp_num;
1639 	int i = 0;
1640 
1641 	get_edp_links(dc, edp_links, &edp_num);
1642 	if (edp_num)
1643 		edp_link = edp_links[0];
1644 
1645 	if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
1646 			edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
1647 			dc->hwseq->funcs.edp_backlight_control &&
1648 			dc->hwss.power_down &&
1649 			dc->hwss.edp_power_control) {
1650 		dc->hwseq->funcs.edp_backlight_control(edp_link, false);
1651 		dc->hwss.power_down(dc);
1652 		dc->hwss.edp_power_control(edp_link, false);
1653 	} else {
1654 		for (i = 0; i < dc->link_count; i++) {
1655 			struct dc_link *link = dc->links[i];
1656 
1657 			if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
1658 					link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
1659 					dc->hwss.power_down) {
1660 				dc->hwss.power_down(dc);
1661 				break;
1662 			}
1663 
1664 		}
1665 	}
1666 
1667 	/*
1668 	 * Call update_clocks with empty context
1669 	 * to send DISPLAY_OFF
1670 	 * Otherwise DISPLAY_OFF may not be asserted
1671 	 */
1672 	if (dc->clk_mgr->funcs->set_low_power_state)
1673 		dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
1674 }
1675 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1676 void dcn10_reset_hw_ctx_wrap(
1677 		struct dc *dc,
1678 		struct dc_state *context)
1679 {
1680 	int i;
1681 	struct dce_hwseq *hws = dc->hwseq;
1682 
1683 	/* Reset Back End*/
1684 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1685 		struct pipe_ctx *pipe_ctx_old =
1686 			&dc->current_state->res_ctx.pipe_ctx[i];
1687 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1688 
1689 		if (!pipe_ctx_old->stream)
1690 			continue;
1691 
1692 		if (pipe_ctx_old->top_pipe)
1693 			continue;
1694 
1695 		if (!pipe_ctx->stream ||
1696 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1697 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1698 
1699 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1700 			if (hws->funcs.enable_stream_gating)
1701 				hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
1702 			if (old_clk)
1703 				old_clk->funcs->cs_power_down(old_clk);
1704 		}
1705 	}
1706 }
1707 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1708 static bool patch_address_for_sbs_tb_stereo(
1709 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1710 {
1711 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1712 	bool sec_split = pipe_ctx->top_pipe &&
1713 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1714 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1715 		(pipe_ctx->stream->timing.timing_3d_format ==
1716 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1717 		 pipe_ctx->stream->timing.timing_3d_format ==
1718 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1719 		*addr = plane_state->address.grph_stereo.left_addr;
1720 		plane_state->address.grph_stereo.left_addr =
1721 		plane_state->address.grph_stereo.right_addr;
1722 		return true;
1723 	} else {
1724 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1725 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1726 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1727 			plane_state->address.grph_stereo.right_addr =
1728 			plane_state->address.grph_stereo.left_addr;
1729 			plane_state->address.grph_stereo.right_meta_addr =
1730 			plane_state->address.grph_stereo.left_meta_addr;
1731 		}
1732 	}
1733 	return false;
1734 }
1735 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1736 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1737 {
1738 	bool addr_patched = false;
1739 	PHYSICAL_ADDRESS_LOC addr;
1740 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1741 
1742 	if (plane_state == NULL)
1743 		return;
1744 
1745 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1746 
1747 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1748 			pipe_ctx->plane_res.hubp,
1749 			&plane_state->address,
1750 			plane_state->flip_immediate);
1751 
1752 	plane_state->status.requested_address = plane_state->address;
1753 
1754 	if (plane_state->flip_immediate)
1755 		plane_state->status.current_address = plane_state->address;
1756 
1757 	if (addr_patched)
1758 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1759 }
1760 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1761 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1762 			const struct dc_plane_state *plane_state)
1763 {
1764 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1765 	const struct dc_transfer_func *tf = NULL;
1766 	bool result = true;
1767 
1768 	if (dpp_base == NULL)
1769 		return false;
1770 
1771 	if (plane_state->in_transfer_func)
1772 		tf = plane_state->in_transfer_func;
1773 
1774 	if (plane_state->gamma_correction &&
1775 		!dpp_base->ctx->dc->debug.always_use_regamma
1776 		&& !plane_state->gamma_correction->is_identity
1777 			&& dce_use_lut(plane_state->format))
1778 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1779 
1780 	if (tf == NULL)
1781 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1782 	else if (tf->type == TF_TYPE_PREDEFINED) {
1783 		switch (tf->tf) {
1784 		case TRANSFER_FUNCTION_SRGB:
1785 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1786 			break;
1787 		case TRANSFER_FUNCTION_BT709:
1788 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1789 			break;
1790 		case TRANSFER_FUNCTION_LINEAR:
1791 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1792 			break;
1793 		case TRANSFER_FUNCTION_PQ:
1794 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1795 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1796 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1797 			result = true;
1798 			break;
1799 		default:
1800 			result = false;
1801 			break;
1802 		}
1803 	} else if (tf->type == TF_TYPE_BYPASS) {
1804 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1805 	} else {
1806 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1807 					&dpp_base->degamma_params);
1808 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1809 				&dpp_base->degamma_params);
1810 		result = true;
1811 	}
1812 
1813 	return result;
1814 }
1815 
1816 #define MAX_NUM_HW_POINTS 0x200
1817 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1818 static void log_tf(struct dc_context *ctx,
1819 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1820 {
1821 	// DC_LOG_GAMMA is default logging of all hw points
1822 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1823 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1824 	int i = 0;
1825 
1826 	DC_LOGGER_INIT(ctx->logger);
1827 	DC_LOG_GAMMA("Gamma Correction TF");
1828 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1829 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1830 
1831 	for (i = 0; i < hw_points_num; i++) {
1832 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1833 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1834 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1835 	}
1836 
1837 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1838 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1839 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1840 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1841 	}
1842 }
1843 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1844 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1845 				const struct dc_stream_state *stream)
1846 {
1847 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1848 
1849 	if (dpp == NULL)
1850 		return false;
1851 
1852 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1853 
1854 	if (stream->out_transfer_func &&
1855 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1856 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1857 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1858 
1859 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1860 	 * update.
1861 	 */
1862 	else if (cm_helper_translate_curve_to_hw_format(
1863 			stream->out_transfer_func,
1864 			&dpp->regamma_params, false)) {
1865 		dpp->funcs->dpp_program_regamma_pwl(
1866 				dpp,
1867 				&dpp->regamma_params, OPP_REGAMMA_USER);
1868 	} else
1869 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1870 
1871 	if (stream != NULL && stream->ctx != NULL &&
1872 			stream->out_transfer_func != NULL) {
1873 		log_tf(stream->ctx,
1874 				stream->out_transfer_func,
1875 				dpp->regamma_params.hw_points_num);
1876 	}
1877 
1878 	return true;
1879 }
1880 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1881 void dcn10_pipe_control_lock(
1882 	struct dc *dc,
1883 	struct pipe_ctx *pipe,
1884 	bool lock)
1885 {
1886 	struct dce_hwseq *hws = dc->hwseq;
1887 
1888 	/* use TG master update lock to lock everything on the TG
1889 	 * therefore only top pipe need to lock
1890 	 */
1891 	if (!pipe || pipe->top_pipe)
1892 		return;
1893 
1894 	if (dc->debug.sanity_checks)
1895 		hws->funcs.verify_allow_pstate_change_high(dc);
1896 
1897 	if (lock)
1898 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1899 	else
1900 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1901 
1902 	if (dc->debug.sanity_checks)
1903 		hws->funcs.verify_allow_pstate_change_high(dc);
1904 }
1905 
1906 /**
1907  * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
1908  *
1909  * Software keepout workaround to prevent cursor update locking from stalling
1910  * out cursor updates indefinitely or from old values from being retained in
1911  * the case where the viewport changes in the same frame as the cursor.
1912  *
1913  * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
1914  * too close to VUPDATE, then stall out until VUPDATE finishes.
1915  *
1916  * TODO: Optimize cursor programming to be once per frame before VUPDATE
1917  *       to avoid the need for this workaround.
1918  */
delay_cursor_until_vupdate(struct dc * dc,struct pipe_ctx * pipe_ctx)1919 static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
1920 {
1921 	struct dc_stream_state *stream = pipe_ctx->stream;
1922 	struct crtc_position position;
1923 	uint32_t vupdate_start, vupdate_end;
1924 	unsigned int lines_to_vupdate, us_to_vupdate, vpos;
1925 	unsigned int us_per_line, us_vupdate;
1926 
1927 	if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
1928 		return;
1929 
1930 	if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
1931 		return;
1932 
1933 	dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
1934 				       &vupdate_end);
1935 
1936 	dc->hwss.get_position(&pipe_ctx, 1, &position);
1937 	vpos = position.vertical_count;
1938 
1939 	/* Avoid wraparound calculation issues */
1940 	vupdate_start += stream->timing.v_total;
1941 	vupdate_end += stream->timing.v_total;
1942 	vpos += stream->timing.v_total;
1943 
1944 	if (vpos <= vupdate_start) {
1945 		/* VPOS is in VACTIVE or back porch. */
1946 		lines_to_vupdate = vupdate_start - vpos;
1947 	} else if (vpos > vupdate_end) {
1948 		/* VPOS is in the front porch. */
1949 		return;
1950 	} else {
1951 		/* VPOS is in VUPDATE. */
1952 		lines_to_vupdate = 0;
1953 	}
1954 
1955 	/* Calculate time until VUPDATE in microseconds. */
1956 	us_per_line =
1957 		stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
1958 	us_to_vupdate = lines_to_vupdate * us_per_line;
1959 
1960 	/* 70 us is a conservative estimate of cursor update time*/
1961 	if (us_to_vupdate > 70)
1962 		return;
1963 
1964 	/* Stall out until the cursor update completes. */
1965 	if (vupdate_end < vupdate_start)
1966 		vupdate_end += stream->timing.v_total;
1967 	us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
1968 	udelay(us_to_vupdate + us_vupdate);
1969 }
1970 
dcn10_cursor_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1971 void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
1972 {
1973 	/* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
1974 	if (!pipe || pipe->top_pipe)
1975 		return;
1976 
1977 	/* Prevent cursor lock from stalling out cursor updates. */
1978 	if (lock)
1979 		delay_cursor_until_vupdate(dc, pipe);
1980 
1981 	if (pipe->stream && should_use_dmub_lock(pipe->stream->link)) {
1982 		union dmub_hw_lock_flags hw_locks = { 0 };
1983 		struct dmub_hw_lock_inst_flags inst_flags = { 0 };
1984 
1985 		hw_locks.bits.lock_cursor = 1;
1986 		inst_flags.opp_inst = pipe->stream_res.opp->inst;
1987 
1988 		dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
1989 					lock,
1990 					&hw_locks,
1991 					&inst_flags);
1992 	} else
1993 		dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
1994 				pipe->stream_res.opp->inst, lock);
1995 }
1996 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1997 static bool wait_for_reset_trigger_to_occur(
1998 	struct dc_context *dc_ctx,
1999 	struct timing_generator *tg)
2000 {
2001 	bool rc = false;
2002 
2003 	/* To avoid endless loop we wait at most
2004 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
2005 	const uint32_t frames_to_wait_on_triggered_reset = 10;
2006 	int i;
2007 
2008 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
2009 
2010 		if (!tg->funcs->is_counter_moving(tg)) {
2011 			DC_ERROR("TG counter is not moving!\n");
2012 			break;
2013 		}
2014 
2015 		if (tg->funcs->did_triggered_reset_occur(tg)) {
2016 			rc = true;
2017 			/* usually occurs at i=1 */
2018 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
2019 					i);
2020 			break;
2021 		}
2022 
2023 		/* Wait for one frame. */
2024 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
2025 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
2026 	}
2027 
2028 	if (false == rc)
2029 		DC_ERROR("GSL: Timeout on reset trigger!\n");
2030 
2031 	return rc;
2032 }
2033 
reduceSizeAndFraction(uint64_t * numerator,uint64_t * denominator,bool checkUint32Bounary)2034 static uint64_t reduceSizeAndFraction(uint64_t *numerator,
2035 				      uint64_t *denominator,
2036 				      bool checkUint32Bounary)
2037 {
2038 	int i;
2039 	bool ret = checkUint32Bounary == false;
2040 	uint64_t max_int32 = 0xffffffff;
2041 	uint64_t num, denom;
2042 	static const uint16_t prime_numbers[] = {
2043 		2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43,
2044 		47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103,
2045 		107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163,
2046 		167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227,
2047 		229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281,
2048 		283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353,
2049 		359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421,
2050 		431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487,
2051 		491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569,
2052 		571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
2053 		641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,
2054 		709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773,
2055 		787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,
2056 		859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
2057 		941, 947, 953, 967, 971, 977, 983, 991, 997};
2058 	int count = ARRAY_SIZE(prime_numbers);
2059 
2060 	num = *numerator;
2061 	denom = *denominator;
2062 	for (i = 0; i < count; i++) {
2063 		uint32_t num_remainder, denom_remainder;
2064 		uint64_t num_result, denom_result;
2065 		if (checkUint32Bounary &&
2066 			num <= max_int32 && denom <= max_int32) {
2067 			ret = true;
2068 			break;
2069 		}
2070 		do {
2071 			num_result = div_u64_rem(num, prime_numbers[i], &num_remainder);
2072 			denom_result = div_u64_rem(denom, prime_numbers[i], &denom_remainder);
2073 			if (num_remainder == 0 && denom_remainder == 0) {
2074 				num = num_result;
2075 				denom = denom_result;
2076 			}
2077 		} while (num_remainder == 0 && denom_remainder == 0);
2078 	}
2079 	*numerator = num;
2080 	*denominator = denom;
2081 	return ret;
2082 }
2083 
is_low_refresh_rate(struct pipe_ctx * pipe)2084 static bool is_low_refresh_rate(struct pipe_ctx *pipe)
2085 {
2086 	uint32_t master_pipe_refresh_rate =
2087 		pipe->stream->timing.pix_clk_100hz * 100 /
2088 		pipe->stream->timing.h_total /
2089 		pipe->stream->timing.v_total;
2090 	return master_pipe_refresh_rate <= 30;
2091 }
2092 
get_clock_divider(struct pipe_ctx * pipe,bool account_low_refresh_rate)2093 static uint8_t get_clock_divider(struct pipe_ctx *pipe,
2094 				 bool account_low_refresh_rate)
2095 {
2096 	uint32_t clock_divider = 1;
2097 	uint32_t numpipes = 1;
2098 
2099 	if (account_low_refresh_rate && is_low_refresh_rate(pipe))
2100 		clock_divider *= 2;
2101 
2102 	if (pipe->stream_res.pix_clk_params.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2103 		clock_divider *= 2;
2104 
2105 	while (pipe->next_odm_pipe) {
2106 		pipe = pipe->next_odm_pipe;
2107 		numpipes++;
2108 	}
2109 	clock_divider *= numpipes;
2110 
2111 	return clock_divider;
2112 }
2113 
dcn10_align_pixel_clocks(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2114 static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
2115 				    struct pipe_ctx *grouped_pipes[])
2116 {
2117 	struct dc_context *dc_ctx = dc->ctx;
2118 	int i, master = -1, embedded = -1;
2119 	struct dc_crtc_timing *hw_crtc_timing;
2120 	uint64_t phase[MAX_PIPES];
2121 	uint64_t modulo[MAX_PIPES];
2122 	unsigned int pclk;
2123 
2124 	uint32_t embedded_pix_clk_100hz;
2125 	uint16_t embedded_h_total;
2126 	uint16_t embedded_v_total;
2127 	uint32_t dp_ref_clk_100hz =
2128 		dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
2129 
2130 	hw_crtc_timing = kcalloc(MAX_PIPES, sizeof(*hw_crtc_timing), GFP_KERNEL);
2131 	if (!hw_crtc_timing)
2132 		return master;
2133 
2134 	if (dc->config.vblank_alignment_dto_params &&
2135 		dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk) {
2136 		embedded_h_total =
2137 			(dc->config.vblank_alignment_dto_params >> 32) & 0x7FFF;
2138 		embedded_v_total =
2139 			(dc->config.vblank_alignment_dto_params >> 48) & 0x7FFF;
2140 		embedded_pix_clk_100hz =
2141 			dc->config.vblank_alignment_dto_params & 0xFFFFFFFF;
2142 
2143 		for (i = 0; i < group_size; i++) {
2144 			grouped_pipes[i]->stream_res.tg->funcs->get_hw_timing(
2145 					grouped_pipes[i]->stream_res.tg,
2146 					&hw_crtc_timing[i]);
2147 			dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2148 				dc->res_pool->dp_clock_source,
2149 				grouped_pipes[i]->stream_res.tg->inst,
2150 				&pclk);
2151 			hw_crtc_timing[i].pix_clk_100hz = pclk;
2152 			if (dc_is_embedded_signal(
2153 					grouped_pipes[i]->stream->signal)) {
2154 				embedded = i;
2155 				master = i;
2156 				phase[i] = embedded_pix_clk_100hz*100;
2157 				modulo[i] = dp_ref_clk_100hz*100;
2158 			} else {
2159 
2160 				phase[i] = (uint64_t)embedded_pix_clk_100hz*
2161 					hw_crtc_timing[i].h_total*
2162 					hw_crtc_timing[i].v_total;
2163 				phase[i] = div_u64(phase[i], get_clock_divider(grouped_pipes[i], true));
2164 				modulo[i] = (uint64_t)dp_ref_clk_100hz*
2165 					embedded_h_total*
2166 					embedded_v_total;
2167 
2168 				if (reduceSizeAndFraction(&phase[i],
2169 						&modulo[i], true) == false) {
2170 					/*
2171 					 * this will help to stop reporting
2172 					 * this timing synchronizable
2173 					 */
2174 					DC_SYNC_INFO("Failed to reduce DTO parameters\n");
2175 					grouped_pipes[i]->stream->has_non_synchronizable_pclk = true;
2176 				}
2177 			}
2178 		}
2179 
2180 		for (i = 0; i < group_size; i++) {
2181 			if (i != embedded && !grouped_pipes[i]->stream->has_non_synchronizable_pclk) {
2182 				dc->res_pool->dp_clock_source->funcs->override_dp_pix_clk(
2183 					dc->res_pool->dp_clock_source,
2184 					grouped_pipes[i]->stream_res.tg->inst,
2185 					phase[i], modulo[i]);
2186 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
2187 					dc->res_pool->dp_clock_source,
2188 					grouped_pipes[i]->stream_res.tg->inst, &pclk);
2189 				grouped_pipes[i]->stream->timing.pix_clk_100hz =
2190 					pclk*get_clock_divider(grouped_pipes[i], false);
2191 				if (master == -1)
2192 					master = i;
2193 			}
2194 		}
2195 
2196 	}
2197 
2198 	kfree(hw_crtc_timing);
2199 	return master;
2200 }
2201 
dcn10_enable_vblanks_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2202 void dcn10_enable_vblanks_synchronization(
2203 	struct dc *dc,
2204 	int group_index,
2205 	int group_size,
2206 	struct pipe_ctx *grouped_pipes[])
2207 {
2208 	struct dc_context *dc_ctx = dc->ctx;
2209 	struct output_pixel_processor *opp;
2210 	struct timing_generator *tg;
2211 	int i, width, height, master;
2212 
2213 	for (i = 1; i < group_size; i++) {
2214 		opp = grouped_pipes[i]->stream_res.opp;
2215 		tg = grouped_pipes[i]->stream_res.tg;
2216 		tg->funcs->get_otg_active_size(tg, &width, &height);
2217 		if (opp->funcs->opp_program_dpg_dimensions)
2218 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2219 	}
2220 
2221 	for (i = 0; i < group_size; i++) {
2222 		if (grouped_pipes[i]->stream == NULL)
2223 			continue;
2224 		grouped_pipes[i]->stream->vblank_synchronized = false;
2225 		grouped_pipes[i]->stream->has_non_synchronizable_pclk = false;
2226 	}
2227 
2228 	DC_SYNC_INFO("Aligning DP DTOs\n");
2229 
2230 	master = dcn10_align_pixel_clocks(dc, group_size, grouped_pipes);
2231 
2232 	DC_SYNC_INFO("Synchronizing VBlanks\n");
2233 
2234 	if (master >= 0) {
2235 		for (i = 0; i < group_size; i++) {
2236 			if (i != master && !grouped_pipes[i]->stream->has_non_synchronizable_pclk)
2237 				grouped_pipes[i]->stream_res.tg->funcs->align_vblanks(
2238 					grouped_pipes[master]->stream_res.tg,
2239 					grouped_pipes[i]->stream_res.tg,
2240 					grouped_pipes[master]->stream->timing.pix_clk_100hz,
2241 					grouped_pipes[i]->stream->timing.pix_clk_100hz,
2242 					get_clock_divider(grouped_pipes[master], false),
2243 					get_clock_divider(grouped_pipes[i], false));
2244 			grouped_pipes[i]->stream->vblank_synchronized = true;
2245 		}
2246 		grouped_pipes[master]->stream->vblank_synchronized = true;
2247 		DC_SYNC_INFO("Sync complete\n");
2248 	}
2249 
2250 	for (i = 1; i < group_size; i++) {
2251 		opp = grouped_pipes[i]->stream_res.opp;
2252 		tg = grouped_pipes[i]->stream_res.tg;
2253 		tg->funcs->get_otg_active_size(tg, &width, &height);
2254 		if (opp->funcs->opp_program_dpg_dimensions)
2255 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2256 	}
2257 }
2258 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])2259 void dcn10_enable_timing_synchronization(
2260 	struct dc *dc,
2261 	int group_index,
2262 	int group_size,
2263 	struct pipe_ctx *grouped_pipes[])
2264 {
2265 	struct dc_context *dc_ctx = dc->ctx;
2266 	struct output_pixel_processor *opp;
2267 	struct timing_generator *tg;
2268 	int i, width, height;
2269 
2270 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
2271 
2272 	for (i = 1; i < group_size; i++) {
2273 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2274 			continue;
2275 
2276 		opp = grouped_pipes[i]->stream_res.opp;
2277 		tg = grouped_pipes[i]->stream_res.tg;
2278 		tg->funcs->get_otg_active_size(tg, &width, &height);
2279 		if (opp->funcs->opp_program_dpg_dimensions)
2280 			opp->funcs->opp_program_dpg_dimensions(opp, width, 2*(height) + 1);
2281 	}
2282 
2283 	for (i = 0; i < group_size; i++) {
2284 		if (grouped_pipes[i]->stream == NULL)
2285 			continue;
2286 
2287 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2288 			continue;
2289 
2290 		grouped_pipes[i]->stream->vblank_synchronized = false;
2291 	}
2292 
2293 	for (i = 1; i < group_size; i++) {
2294 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2295 			continue;
2296 
2297 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
2298 				grouped_pipes[i]->stream_res.tg,
2299 				grouped_pipes[0]->stream_res.tg->inst);
2300 	}
2301 
2302 	DC_SYNC_INFO("Waiting for trigger\n");
2303 
2304 	/* Need to get only check 1 pipe for having reset as all the others are
2305 	 * synchronized. Look at last pipe programmed to reset.
2306 	 */
2307 
2308 	if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
2309 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
2310 
2311 	for (i = 1; i < group_size; i++) {
2312 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2313 			continue;
2314 
2315 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
2316 				grouped_pipes[i]->stream_res.tg);
2317 	}
2318 
2319 	for (i = 1; i < group_size; i++) {
2320 		if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
2321 			continue;
2322 
2323 		opp = grouped_pipes[i]->stream_res.opp;
2324 		tg = grouped_pipes[i]->stream_res.tg;
2325 		tg->funcs->get_otg_active_size(tg, &width, &height);
2326 		if (opp->funcs->opp_program_dpg_dimensions)
2327 			opp->funcs->opp_program_dpg_dimensions(opp, width, height);
2328 	}
2329 
2330 	DC_SYNC_INFO("Sync complete\n");
2331 }
2332 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])2333 void dcn10_enable_per_frame_crtc_position_reset(
2334 	struct dc *dc,
2335 	int group_size,
2336 	struct pipe_ctx *grouped_pipes[])
2337 {
2338 	struct dc_context *dc_ctx = dc->ctx;
2339 	int i;
2340 
2341 	DC_SYNC_INFO("Setting up\n");
2342 	for (i = 0; i < group_size; i++)
2343 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
2344 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
2345 					grouped_pipes[i]->stream_res.tg,
2346 					0,
2347 					&grouped_pipes[i]->stream->triggered_crtc_reset);
2348 
2349 	DC_SYNC_INFO("Waiting for trigger\n");
2350 
2351 	for (i = 0; i < group_size; i++)
2352 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
2353 
2354 	DC_SYNC_INFO("Multi-display sync is complete\n");
2355 }
2356 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)2357 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
2358 		struct vm_system_aperture_param *apt,
2359 		struct dce_hwseq *hws)
2360 {
2361 	PHYSICAL_ADDRESS_LOC physical_page_number;
2362 	uint32_t logical_addr_low;
2363 	uint32_t logical_addr_high;
2364 
2365 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
2366 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
2367 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
2368 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
2369 
2370 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
2371 			LOGICAL_ADDR, &logical_addr_low);
2372 
2373 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
2374 			LOGICAL_ADDR, &logical_addr_high);
2375 
2376 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
2377 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
2378 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
2379 }
2380 
2381 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)2382 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
2383 		struct vm_context0_param *vm0,
2384 		struct dce_hwseq *hws)
2385 {
2386 	PHYSICAL_ADDRESS_LOC fb_base;
2387 	PHYSICAL_ADDRESS_LOC fb_offset;
2388 	uint32_t fb_base_value;
2389 	uint32_t fb_offset_value;
2390 
2391 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
2392 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
2393 
2394 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
2395 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
2396 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
2397 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
2398 
2399 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
2400 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
2401 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
2402 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
2403 
2404 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
2405 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
2406 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
2407 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
2408 
2409 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
2410 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
2411 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
2412 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
2413 
2414 	/*
2415 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
2416 	 * Therefore we need to do
2417 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
2418 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
2419 	 */
2420 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
2421 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
2422 	vm0->pte_base.quad_part += fb_base.quad_part;
2423 	vm0->pte_base.quad_part -= fb_offset.quad_part;
2424 }
2425 
2426 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)2427 static void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
2428 {
2429 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
2430 	struct vm_system_aperture_param apt = {0};
2431 	struct vm_context0_param vm0 = {0};
2432 
2433 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
2434 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
2435 
2436 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
2437 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
2438 }
2439 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2440 static void dcn10_enable_plane(
2441 	struct dc *dc,
2442 	struct pipe_ctx *pipe_ctx,
2443 	struct dc_state *context)
2444 {
2445 	struct dce_hwseq *hws = dc->hwseq;
2446 
2447 	if (dc->debug.sanity_checks) {
2448 		hws->funcs.verify_allow_pstate_change_high(dc);
2449 	}
2450 
2451 	undo_DEGVIDCN10_253_wa(dc);
2452 
2453 	power_on_plane(dc->hwseq,
2454 		pipe_ctx->plane_res.hubp->inst);
2455 
2456 	/* enable DCFCLK current DCHUB */
2457 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
2458 
2459 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
2460 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
2461 			pipe_ctx->stream_res.opp,
2462 			true);
2463 
2464 	if (dc->config.gpu_vm_support)
2465 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
2466 
2467 	if (dc->debug.sanity_checks) {
2468 		hws->funcs.verify_allow_pstate_change_high(dc);
2469 	}
2470 
2471 	if (!pipe_ctx->top_pipe
2472 		&& pipe_ctx->plane_state
2473 		&& pipe_ctx->plane_state->flip_int_enabled
2474 		&& pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int)
2475 			pipe_ctx->plane_res.hubp->funcs->hubp_set_flip_int(pipe_ctx->plane_res.hubp);
2476 
2477 }
2478 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)2479 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
2480 {
2481 	int i = 0;
2482 	struct dpp_grph_csc_adjustment adjust;
2483 	memset(&adjust, 0, sizeof(adjust));
2484 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
2485 
2486 
2487 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
2488 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2489 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2490 			adjust.temperature_matrix[i] =
2491 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
2492 	} else if (pipe_ctx->plane_state &&
2493 		   pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
2494 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
2495 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
2496 			adjust.temperature_matrix[i] =
2497 				pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
2498 	}
2499 
2500 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
2501 }
2502 
2503 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)2504 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
2505 {
2506 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
2507 		if (pipe_ctx->top_pipe) {
2508 			struct pipe_ctx *top = pipe_ctx->top_pipe;
2509 
2510 			while (top->top_pipe)
2511 				top = top->top_pipe; // Traverse to top pipe_ctx
2512 			if (top->plane_state && top->plane_state->layer_index == 0)
2513 				return true; // Front MPO plane not hidden
2514 		}
2515 	}
2516 	return false;
2517 }
2518 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)2519 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2520 {
2521 	// Override rear plane RGB bias to fix MPO brightness
2522 	uint16_t rgb_bias = matrix[3];
2523 
2524 	matrix[3] = 0;
2525 	matrix[7] = 0;
2526 	matrix[11] = 0;
2527 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2528 	matrix[3] = rgb_bias;
2529 	matrix[7] = rgb_bias;
2530 	matrix[11] = rgb_bias;
2531 }
2532 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2533 void dcn10_program_output_csc(struct dc *dc,
2534 		struct pipe_ctx *pipe_ctx,
2535 		enum dc_color_space colorspace,
2536 		uint16_t *matrix,
2537 		int opp_id)
2538 {
2539 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2540 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2541 
2542 			/* MPO is broken with RGB colorspaces when OCSC matrix
2543 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2544 			 * Blending adds offsets from front + rear to rear plane
2545 			 *
2546 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2547 			 * black value pixels add offset instead of rear + front
2548 			 */
2549 
2550 			int16_t rgb_bias = matrix[3];
2551 			// matrix[3/7/11] are all the same offset value
2552 
2553 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2554 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2555 			} else {
2556 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2557 			}
2558 		}
2559 	} else {
2560 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2561 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2562 	}
2563 }
2564 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2565 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2566 {
2567 	struct dc_bias_and_scale bns_params = {0};
2568 
2569 	// program the input csc
2570 	dpp->funcs->dpp_setup(dpp,
2571 			plane_state->format,
2572 			EXPANSION_MODE_ZERO,
2573 			plane_state->input_csc_color_matrix,
2574 			plane_state->color_space,
2575 			NULL);
2576 
2577 	//set scale and bias registers
2578 	build_prescale_params(&bns_params, plane_state);
2579 	if (dpp->funcs->dpp_program_bias_and_scale)
2580 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2581 }
2582 
dcn10_update_visual_confirm_color(struct dc * dc,struct pipe_ctx * pipe_ctx,struct tg_color * color,int mpcc_id)2583 void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
2584 {
2585 	struct mpc *mpc = dc->res_pool->mpc;
2586 
2587 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
2588 		get_hdr_visual_confirm_color(pipe_ctx, color);
2589 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
2590 		get_surface_visual_confirm_color(pipe_ctx, color);
2591 	else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SWIZZLE)
2592 		get_surface_tile_visual_confirm_color(pipe_ctx, color);
2593 	else
2594 		color_space_to_black_color(
2595 				dc, pipe_ctx->stream->output_color_space, color);
2596 
2597 	if (mpc->funcs->set_bg_color) {
2598 		memcpy(&pipe_ctx->plane_state->visual_confirm_color, color, sizeof(struct tg_color));
2599 		mpc->funcs->set_bg_color(mpc, color, mpcc_id);
2600 	}
2601 }
2602 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2603 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2604 {
2605 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2606 	struct mpcc_blnd_cfg blnd_cfg = {0};
2607 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2608 	int mpcc_id;
2609 	struct mpcc *new_mpcc;
2610 	struct mpc *mpc = dc->res_pool->mpc;
2611 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2612 
2613 	blnd_cfg.overlap_only = false;
2614 	blnd_cfg.global_gain = 0xff;
2615 
2616 	if (per_pixel_alpha) {
2617 		/* DCN1.0 has output CM before MPC which seems to screw with
2618 		 * pre-multiplied alpha.
2619 		 */
2620 		blnd_cfg.pre_multiplied_alpha = (is_rgb_cspace(
2621 				pipe_ctx->stream->output_color_space)
2622 						&& pipe_ctx->plane_state->pre_multiplied_alpha);
2623 		if (pipe_ctx->plane_state->global_alpha) {
2624 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA_COMBINED_GLOBAL_GAIN;
2625 			blnd_cfg.global_gain = pipe_ctx->plane_state->global_alpha_value;
2626 		} else {
2627 			blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2628 		}
2629 	} else {
2630 		blnd_cfg.pre_multiplied_alpha = false;
2631 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2632 	}
2633 
2634 	if (pipe_ctx->plane_state->global_alpha)
2635 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2636 	else
2637 		blnd_cfg.global_alpha = 0xff;
2638 
2639 	/*
2640 	 * TODO: remove hack
2641 	 * Note: currently there is a bug in init_hw such that
2642 	 * on resume from hibernate, BIOS sets up MPCC0, and
2643 	 * we do mpcc_remove but the mpcc cannot go to idle
2644 	 * after remove. This cause us to pick mpcc1 here,
2645 	 * which causes a pstate hang for yet unknown reason.
2646 	 */
2647 	mpcc_id = hubp->inst;
2648 
2649 	/* If there is no full update, don't need to touch MPC tree*/
2650 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2651 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2652 		dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2653 		return;
2654 	}
2655 
2656 	/* check if this MPCC is already being used */
2657 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2658 	/* remove MPCC if being used */
2659 	if (new_mpcc != NULL)
2660 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2661 	else
2662 		if (dc->debug.sanity_checks)
2663 			mpc->funcs->assert_mpcc_idle_before_connect(
2664 					dc->res_pool->mpc, mpcc_id);
2665 
2666 	/* Call MPC to insert new plane */
2667 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2668 			mpc_tree_params,
2669 			&blnd_cfg,
2670 			NULL,
2671 			NULL,
2672 			hubp->inst,
2673 			mpcc_id);
2674 	dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
2675 
2676 	ASSERT(new_mpcc != NULL);
2677 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2678 	hubp->mpcc_id = mpcc_id;
2679 }
2680 
update_scaler(struct pipe_ctx * pipe_ctx)2681 static void update_scaler(struct pipe_ctx *pipe_ctx)
2682 {
2683 	bool per_pixel_alpha =
2684 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2685 
2686 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2687 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
2688 	/* scaler configuration */
2689 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2690 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2691 }
2692 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2693 static void dcn10_update_dchubp_dpp(
2694 	struct dc *dc,
2695 	struct pipe_ctx *pipe_ctx,
2696 	struct dc_state *context)
2697 {
2698 	struct dce_hwseq *hws = dc->hwseq;
2699 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2700 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2701 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2702 	struct plane_size size = plane_state->plane_size;
2703 	unsigned int compat_level = 0;
2704 	bool should_divided_by_2 = false;
2705 
2706 	/* depends on DML calculation, DPP clock value may change dynamically */
2707 	/* If request max dpp clk is lower than current dispclk, no need to
2708 	 * divided by 2
2709 	 */
2710 	if (plane_state->update_flags.bits.full_update) {
2711 
2712 		/* new calculated dispclk, dppclk are stored in
2713 		 * context->bw_ctx.bw.dcn.clk.dispclk_khz / dppclk_khz. current
2714 		 * dispclk, dppclk are from dc->clk_mgr->clks.dispclk_khz.
2715 		 * dcn10_validate_bandwidth compute new dispclk, dppclk.
2716 		 * dispclk will put in use after optimize_bandwidth when
2717 		 * ramp_up_dispclk_with_dpp is called.
2718 		 * there are two places for dppclk be put in use. One location
2719 		 * is the same as the location as dispclk. Another is within
2720 		 * update_dchubp_dpp which happens between pre_bandwidth and
2721 		 * optimize_bandwidth.
2722 		 * dppclk updated within update_dchubp_dpp will cause new
2723 		 * clock values of dispclk and dppclk not be in use at the same
2724 		 * time. when clocks are decreased, this may cause dppclk is
2725 		 * lower than previous configuration and let pipe stuck.
2726 		 * for example, eDP + external dp,  change resolution of DP from
2727 		 * 1920x1080x144hz to 1280x960x60hz.
2728 		 * before change: dispclk = 337889 dppclk = 337889
2729 		 * change mode, dcn10_validate_bandwidth calculate
2730 		 *                dispclk = 143122 dppclk = 143122
2731 		 * update_dchubp_dpp be executed before dispclk be updated,
2732 		 * dispclk = 337889, but dppclk use new value dispclk /2 =
2733 		 * 168944. this will cause pipe pstate warning issue.
2734 		 * solution: between pre_bandwidth and optimize_bandwidth, while
2735 		 * dispclk is going to be decreased, keep dppclk = dispclk
2736 		 **/
2737 		if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
2738 				dc->clk_mgr->clks.dispclk_khz)
2739 			should_divided_by_2 = false;
2740 		else
2741 			should_divided_by_2 =
2742 					context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2743 					dc->clk_mgr->clks.dispclk_khz / 2;
2744 
2745 		dpp->funcs->dpp_dppclk_control(
2746 				dpp,
2747 				should_divided_by_2,
2748 				true);
2749 
2750 		if (dc->res_pool->dccg)
2751 			dc->res_pool->dccg->funcs->update_dpp_dto(
2752 					dc->res_pool->dccg,
2753 					dpp->inst,
2754 					pipe_ctx->plane_res.bw.dppclk_khz);
2755 		else
2756 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2757 						dc->clk_mgr->clks.dispclk_khz / 2 :
2758 							dc->clk_mgr->clks.dispclk_khz;
2759 	}
2760 
2761 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2762 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2763 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2764 	 */
2765 	if (plane_state->update_flags.bits.full_update) {
2766 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2767 
2768 		hubp->funcs->hubp_setup(
2769 			hubp,
2770 			&pipe_ctx->dlg_regs,
2771 			&pipe_ctx->ttu_regs,
2772 			&pipe_ctx->rq_regs,
2773 			&pipe_ctx->pipe_dlg_param);
2774 		hubp->funcs->hubp_setup_interdependent(
2775 			hubp,
2776 			&pipe_ctx->dlg_regs,
2777 			&pipe_ctx->ttu_regs);
2778 	}
2779 
2780 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2781 
2782 	if (plane_state->update_flags.bits.full_update ||
2783 		plane_state->update_flags.bits.bpp_change)
2784 		dcn10_update_dpp(dpp, plane_state);
2785 
2786 	if (plane_state->update_flags.bits.full_update ||
2787 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2788 		plane_state->update_flags.bits.global_alpha_change)
2789 		hws->funcs.update_mpcc(dc, pipe_ctx);
2790 
2791 	if (plane_state->update_flags.bits.full_update ||
2792 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2793 		plane_state->update_flags.bits.global_alpha_change ||
2794 		plane_state->update_flags.bits.scaling_change ||
2795 		plane_state->update_flags.bits.position_change) {
2796 		update_scaler(pipe_ctx);
2797 	}
2798 
2799 	if (plane_state->update_flags.bits.full_update ||
2800 		plane_state->update_flags.bits.scaling_change ||
2801 		plane_state->update_flags.bits.position_change) {
2802 		hubp->funcs->mem_program_viewport(
2803 			hubp,
2804 			&pipe_ctx->plane_res.scl_data.viewport,
2805 			&pipe_ctx->plane_res.scl_data.viewport_c);
2806 	}
2807 
2808 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2809 		dc->hwss.set_cursor_position(pipe_ctx);
2810 		dc->hwss.set_cursor_attribute(pipe_ctx);
2811 
2812 		if (dc->hwss.set_cursor_sdr_white_level)
2813 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2814 	}
2815 
2816 	if (plane_state->update_flags.bits.full_update) {
2817 		/*gamut remap*/
2818 		dc->hwss.program_gamut_remap(pipe_ctx);
2819 
2820 		dc->hwss.program_output_csc(dc,
2821 				pipe_ctx,
2822 				pipe_ctx->stream->output_color_space,
2823 				pipe_ctx->stream->csc_color_matrix.matrix,
2824 				pipe_ctx->stream_res.opp->inst);
2825 	}
2826 
2827 	if (plane_state->update_flags.bits.full_update ||
2828 		plane_state->update_flags.bits.pixel_format_change ||
2829 		plane_state->update_flags.bits.horizontal_mirror_change ||
2830 		plane_state->update_flags.bits.rotation_change ||
2831 		plane_state->update_flags.bits.swizzle_change ||
2832 		plane_state->update_flags.bits.dcc_change ||
2833 		plane_state->update_flags.bits.bpp_change ||
2834 		plane_state->update_flags.bits.scaling_change ||
2835 		plane_state->update_flags.bits.plane_size_change) {
2836 		hubp->funcs->hubp_program_surface_config(
2837 			hubp,
2838 			plane_state->format,
2839 			&plane_state->tiling_info,
2840 			&size,
2841 			plane_state->rotation,
2842 			&plane_state->dcc,
2843 			plane_state->horizontal_mirror,
2844 			compat_level);
2845 	}
2846 
2847 	hubp->power_gated = false;
2848 
2849 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2850 
2851 	if (is_pipe_tree_visible(pipe_ctx))
2852 		hubp->funcs->set_blank(hubp, false);
2853 }
2854 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2855 void dcn10_blank_pixel_data(
2856 		struct dc *dc,
2857 		struct pipe_ctx *pipe_ctx,
2858 		bool blank)
2859 {
2860 	enum dc_color_space color_space;
2861 	struct tg_color black_color = {0};
2862 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2863 	struct dc_stream_state *stream = pipe_ctx->stream;
2864 
2865 	/* program otg blank color */
2866 	color_space = stream->output_color_space;
2867 	color_space_to_black_color(dc, color_space, &black_color);
2868 
2869 	/*
2870 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2871 	 * alternate between Cb and Cr, so both channels need the pixel
2872 	 * value for Y
2873 	 */
2874 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2875 		black_color.color_r_cr = black_color.color_g_y;
2876 
2877 
2878 	if (stream_res->tg->funcs->set_blank_color)
2879 		stream_res->tg->funcs->set_blank_color(
2880 				stream_res->tg,
2881 				&black_color);
2882 
2883 	if (!blank) {
2884 		if (stream_res->tg->funcs->set_blank)
2885 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2886 		if (stream_res->abm) {
2887 			dc->hwss.set_pipe(pipe_ctx);
2888 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2889 		}
2890 	} else if (blank) {
2891 		dc->hwss.set_abm_immediate_disable(pipe_ctx);
2892 		if (stream_res->tg->funcs->set_blank) {
2893 			stream_res->tg->funcs->wait_for_state(stream_res->tg, CRTC_STATE_VBLANK);
2894 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2895 		}
2896 	}
2897 }
2898 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2899 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2900 {
2901 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2902 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2903 	struct custom_float_format fmt;
2904 
2905 	fmt.exponenta_bits = 6;
2906 	fmt.mantissa_bits = 12;
2907 	fmt.sign = true;
2908 
2909 
2910 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2911 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2912 
2913 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2914 			pipe_ctx->plane_res.dpp, hw_mult);
2915 }
2916 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2917 void dcn10_program_pipe(
2918 		struct dc *dc,
2919 		struct pipe_ctx *pipe_ctx,
2920 		struct dc_state *context)
2921 {
2922 	struct dce_hwseq *hws = dc->hwseq;
2923 
2924 	if (pipe_ctx->top_pipe == NULL) {
2925 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2926 
2927 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2928 				pipe_ctx->stream_res.tg,
2929 				calculate_vready_offset_for_group(pipe_ctx),
2930 				pipe_ctx->pipe_dlg_param.vstartup_start,
2931 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2932 				pipe_ctx->pipe_dlg_param.vupdate_width);
2933 
2934 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2935 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
2936 
2937 		if (hws->funcs.setup_vupdate_interrupt)
2938 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2939 
2940 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2941 	}
2942 
2943 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2944 		dcn10_enable_plane(dc, pipe_ctx, context);
2945 
2946 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2947 
2948 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2949 
2950 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2951 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2952 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2953 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2954 
2955 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2956 	 * only do gamma programming for full update.
2957 	 * TODO: This can be further optimized/cleaned up
2958 	 * Always call this for now since it does memcmp inside before
2959 	 * doing heavy calculation and programming
2960 	 */
2961 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2962 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2963 }
2964 
dcn10_wait_for_pending_cleared(struct dc * dc,struct dc_state * context)2965 void dcn10_wait_for_pending_cleared(struct dc *dc,
2966 		struct dc_state *context)
2967 {
2968 		struct pipe_ctx *pipe_ctx;
2969 		struct timing_generator *tg;
2970 		int i;
2971 
2972 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2973 			pipe_ctx = &context->res_ctx.pipe_ctx[i];
2974 			tg = pipe_ctx->stream_res.tg;
2975 
2976 			/*
2977 			 * Only wait for top pipe's tg penindg bit
2978 			 * Also skip if pipe is disabled.
2979 			 */
2980 			if (pipe_ctx->top_pipe ||
2981 			    !pipe_ctx->stream || !pipe_ctx->plane_state ||
2982 			    !tg->funcs->is_tg_enabled(tg))
2983 				continue;
2984 
2985 			/*
2986 			 * Wait for VBLANK then VACTIVE to ensure we get VUPDATE.
2987 			 * For some reason waiting for OTG_UPDATE_PENDING cleared
2988 			 * seems to not trigger the update right away, and if we
2989 			 * lock again before VUPDATE then we don't get a separated
2990 			 * operation.
2991 			 */
2992 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
2993 			pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
2994 		}
2995 }
2996 
dcn10_post_unlock_program_front_end(struct dc * dc,struct dc_state * context)2997 void dcn10_post_unlock_program_front_end(
2998 		struct dc *dc,
2999 		struct dc_state *context)
3000 {
3001 	int i;
3002 
3003 	DC_LOGGER_INIT(dc->ctx->logger);
3004 
3005 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3006 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3007 
3008 		if (!pipe_ctx->top_pipe &&
3009 			!pipe_ctx->prev_odm_pipe &&
3010 			pipe_ctx->stream) {
3011 			struct timing_generator *tg = pipe_ctx->stream_res.tg;
3012 
3013 			if (context->stream_status[i].plane_count == 0)
3014 				false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
3015 		}
3016 	}
3017 
3018 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3019 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
3020 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
3021 
3022 	for (i = 0; i < dc->res_pool->pipe_count; i++)
3023 		if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
3024 			dc->hwss.optimize_bandwidth(dc, context);
3025 			break;
3026 		}
3027 
3028 	if (dc->hwseq->wa.DEGVIDCN10_254)
3029 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
3030 }
3031 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)3032 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
3033 {
3034 	uint8_t i;
3035 
3036 	for (i = 0; i < context->stream_count; i++) {
3037 		if (context->streams[i]->timing.timing_3d_format
3038 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
3039 			/*
3040 			 * Disable stutter
3041 			 */
3042 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
3043 			break;
3044 		}
3045 	}
3046 }
3047 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)3048 void dcn10_prepare_bandwidth(
3049 		struct dc *dc,
3050 		struct dc_state *context)
3051 {
3052 	struct dce_hwseq *hws = dc->hwseq;
3053 	struct hubbub *hubbub = dc->res_pool->hubbub;
3054 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3055 
3056 	if (dc->debug.sanity_checks)
3057 		hws->funcs.verify_allow_pstate_change_high(dc);
3058 
3059 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3060 		if (context->stream_count == 0)
3061 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3062 
3063 		dc->clk_mgr->funcs->update_clocks(
3064 				dc->clk_mgr,
3065 				context,
3066 				false);
3067 	}
3068 
3069 	dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
3070 			&context->bw_ctx.bw.dcn.watermarks,
3071 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3072 			true);
3073 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3074 
3075 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3076 		DC_FP_START();
3077 		dcn_get_soc_clks(
3078 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3079 		DC_FP_END();
3080 		dcn_bw_notify_pplib_of_wm_ranges(
3081 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3082 	}
3083 
3084 	if (dc->debug.sanity_checks)
3085 		hws->funcs.verify_allow_pstate_change_high(dc);
3086 }
3087 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)3088 void dcn10_optimize_bandwidth(
3089 		struct dc *dc,
3090 		struct dc_state *context)
3091 {
3092 	struct dce_hwseq *hws = dc->hwseq;
3093 	struct hubbub *hubbub = dc->res_pool->hubbub;
3094 	int min_fclk_khz, min_dcfclk_khz, socclk_khz;
3095 
3096 	if (dc->debug.sanity_checks)
3097 		hws->funcs.verify_allow_pstate_change_high(dc);
3098 
3099 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
3100 		if (context->stream_count == 0)
3101 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
3102 
3103 		dc->clk_mgr->funcs->update_clocks(
3104 				dc->clk_mgr,
3105 				context,
3106 				true);
3107 	}
3108 
3109 	hubbub->funcs->program_watermarks(hubbub,
3110 			&context->bw_ctx.bw.dcn.watermarks,
3111 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
3112 			true);
3113 
3114 	dcn10_stereo_hw_frame_pack_wa(dc, context);
3115 
3116 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) {
3117 		DC_FP_START();
3118 		dcn_get_soc_clks(
3119 			dc, &min_fclk_khz, &min_dcfclk_khz, &socclk_khz);
3120 		DC_FP_END();
3121 		dcn_bw_notify_pplib_of_wm_ranges(
3122 			dc, min_fclk_khz, min_dcfclk_khz, socclk_khz);
3123 	}
3124 
3125 	if (dc->debug.sanity_checks)
3126 		hws->funcs.verify_allow_pstate_change_high(dc);
3127 }
3128 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,struct dc_crtc_timing_adjust adjust)3129 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
3130 		int num_pipes, struct dc_crtc_timing_adjust adjust)
3131 {
3132 	int i = 0;
3133 	struct drr_params params = {0};
3134 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
3135 	unsigned int event_triggers = 0x800;
3136 	// Note DRR trigger events are generated regardless of whether num frames met.
3137 	unsigned int num_frames = 2;
3138 
3139 	params.vertical_total_max = adjust.v_total_max;
3140 	params.vertical_total_min = adjust.v_total_min;
3141 	params.vertical_total_mid = adjust.v_total_mid;
3142 	params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
3143 	/* TODO: If multiple pipes are to be supported, you need
3144 	 * some GSL stuff. Static screen triggers may be programmed differently
3145 	 * as well.
3146 	 */
3147 	for (i = 0; i < num_pipes; i++) {
3148 		if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
3149 			if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
3150 				pipe_ctx[i]->stream_res.tg->funcs->set_drr(
3151 					pipe_ctx[i]->stream_res.tg, &params);
3152 			if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
3153 				if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
3154 					pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
3155 						pipe_ctx[i]->stream_res.tg,
3156 						event_triggers, num_frames);
3157 		}
3158 	}
3159 }
3160 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)3161 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
3162 		int num_pipes,
3163 		struct crtc_position *position)
3164 {
3165 	int i = 0;
3166 
3167 	/* TODO: handle pipes > 1
3168 	 */
3169 	for (i = 0; i < num_pipes; i++)
3170 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
3171 }
3172 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)3173 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
3174 		int num_pipes, const struct dc_static_screen_params *params)
3175 {
3176 	unsigned int i;
3177 	unsigned int triggers = 0;
3178 
3179 	if (params->triggers.surface_update)
3180 		triggers |= 0x80;
3181 	if (params->triggers.cursor_update)
3182 		triggers |= 0x2;
3183 	if (params->triggers.force_trigger)
3184 		triggers |= 0x1;
3185 
3186 	for (i = 0; i < num_pipes; i++)
3187 		pipe_ctx[i]->stream_res.tg->funcs->
3188 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
3189 					triggers, params->num_frames);
3190 }
3191 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)3192 static void dcn10_config_stereo_parameters(
3193 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
3194 {
3195 	enum view_3d_format view_format = stream->view_format;
3196 	enum dc_timing_3d_format timing_3d_format =\
3197 			stream->timing.timing_3d_format;
3198 	bool non_stereo_timing = false;
3199 
3200 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
3201 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
3202 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
3203 		non_stereo_timing = true;
3204 
3205 	if (non_stereo_timing == false &&
3206 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
3207 
3208 		flags->PROGRAM_STEREO         = 1;
3209 		flags->PROGRAM_POLARITY       = 1;
3210 		if (timing_3d_format == TIMING_3D_FORMAT_FRAME_ALTERNATE ||
3211 			timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
3212 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
3213 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3214 			enum display_dongle_type dongle = \
3215 					stream->link->ddc->dongle_type;
3216 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
3217 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
3218 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
3219 				flags->DISABLE_STEREO_DP_SYNC = 1;
3220 		}
3221 		flags->RIGHT_EYE_POLARITY =\
3222 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
3223 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
3224 			flags->FRAME_PACKED = 1;
3225 	}
3226 
3227 	return;
3228 }
3229 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)3230 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
3231 {
3232 	struct crtc_stereo_flags flags = { 0 };
3233 	struct dc_stream_state *stream = pipe_ctx->stream;
3234 
3235 	dcn10_config_stereo_parameters(stream, &flags);
3236 
3237 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
3238 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
3239 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3240 	} else {
3241 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
3242 	}
3243 
3244 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
3245 		pipe_ctx->stream_res.opp,
3246 		flags.PROGRAM_STEREO == 1,
3247 		&stream->timing);
3248 
3249 	pipe_ctx->stream_res.tg->funcs->program_stereo(
3250 		pipe_ctx->stream_res.tg,
3251 		&stream->timing,
3252 		&flags);
3253 
3254 	return;
3255 }
3256 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)3257 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
3258 {
3259 	int i;
3260 
3261 	for (i = 0; i < res_pool->pipe_count; i++) {
3262 		if (res_pool->hubps[i]->inst == mpcc_inst)
3263 			return res_pool->hubps[i];
3264 	}
3265 	ASSERT(false);
3266 	return NULL;
3267 }
3268 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)3269 void dcn10_wait_for_mpcc_disconnect(
3270 		struct dc *dc,
3271 		struct resource_pool *res_pool,
3272 		struct pipe_ctx *pipe_ctx)
3273 {
3274 	struct dce_hwseq *hws = dc->hwseq;
3275 	int mpcc_inst;
3276 
3277 	if (dc->debug.sanity_checks) {
3278 		hws->funcs.verify_allow_pstate_change_high(dc);
3279 	}
3280 
3281 	if (!pipe_ctx->stream_res.opp)
3282 		return;
3283 
3284 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
3285 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
3286 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
3287 
3288 			if (pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
3289 				res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
3290 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
3291 			hubp->funcs->set_blank(hubp, true);
3292 		}
3293 	}
3294 
3295 	if (dc->debug.sanity_checks) {
3296 		hws->funcs.verify_allow_pstate_change_high(dc);
3297 	}
3298 
3299 }
3300 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)3301 bool dcn10_dummy_display_power_gating(
3302 	struct dc *dc,
3303 	uint8_t controller_id,
3304 	struct dc_bios *dcb,
3305 	enum pipe_gating_control power_gating)
3306 {
3307 	return true;
3308 }
3309 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)3310 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
3311 {
3312 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
3313 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3314 	bool flip_pending;
3315 	struct dc *dc = pipe_ctx->stream->ctx->dc;
3316 
3317 	if (plane_state == NULL)
3318 		return;
3319 
3320 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
3321 					pipe_ctx->plane_res.hubp);
3322 
3323 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
3324 
3325 	if (!flip_pending)
3326 		plane_state->status.current_address = plane_state->status.requested_address;
3327 
3328 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
3329 			tg->funcs->is_stereo_left_eye) {
3330 		plane_state->status.is_right_eye =
3331 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
3332 	}
3333 
3334 	if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
3335 		struct dce_hwseq *hwseq = dc->hwseq;
3336 		struct timing_generator *tg = dc->res_pool->timing_generators[0];
3337 		unsigned int cur_frame = tg->funcs->get_frame_count(tg);
3338 
3339 		if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
3340 			struct hubbub *hubbub = dc->res_pool->hubbub;
3341 
3342 			hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
3343 			hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
3344 		}
3345 	}
3346 }
3347 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)3348 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
3349 {
3350 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
3351 
3352 	/* In DCN, this programming sequence is owned by the hubbub */
3353 	hubbub->funcs->update_dchub(hubbub, dh_data);
3354 }
3355 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)3356 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
3357 {
3358 	struct pipe_ctx *test_pipe, *split_pipe;
3359 	const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
3360 	struct rect r1 = scl_data->recout, r2, r2_half;
3361 	int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
3362 	int cur_layer = pipe_ctx->plane_state->layer_index;
3363 
3364 	/**
3365 	 * Disable the cursor if there's another pipe above this with a
3366 	 * plane that contains this pipe's viewport to prevent double cursor
3367 	 * and incorrect scaling artifacts.
3368 	 */
3369 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
3370 	     test_pipe = test_pipe->top_pipe) {
3371 		// Skip invisible layer and pipe-split plane on same layer
3372 		if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
3373 			continue;
3374 
3375 		r2 = test_pipe->plane_res.scl_data.recout;
3376 		r2_r = r2.x + r2.width;
3377 		r2_b = r2.y + r2.height;
3378 		split_pipe = test_pipe;
3379 
3380 		/**
3381 		 * There is another half plane on same layer because of
3382 		 * pipe-split, merge together per same height.
3383 		 */
3384 		for (split_pipe = pipe_ctx->top_pipe; split_pipe;
3385 		     split_pipe = split_pipe->top_pipe)
3386 			if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
3387 				r2_half = split_pipe->plane_res.scl_data.recout;
3388 				r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
3389 				r2.width = r2.width + r2_half.width;
3390 				r2_r = r2.x + r2.width;
3391 				break;
3392 			}
3393 
3394 		if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
3395 			return true;
3396 	}
3397 
3398 	return false;
3399 }
3400 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)3401 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
3402 {
3403 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
3404 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
3405 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
3406 	struct dc_cursor_mi_param param = {
3407 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
3408 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
3409 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
3410 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
3411 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
3412 		.rotation = pipe_ctx->plane_state->rotation,
3413 		.mirror = pipe_ctx->plane_state->horizontal_mirror
3414 	};
3415 	bool pipe_split_on = false;
3416 	bool odm_combine_on = (pipe_ctx->next_odm_pipe != NULL) ||
3417 		(pipe_ctx->prev_odm_pipe != NULL);
3418 
3419 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
3420 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
3421 	int x_pos = pos_cpy.x;
3422 	int y_pos = pos_cpy.y;
3423 
3424 	if ((pipe_ctx->top_pipe != NULL) || (pipe_ctx->bottom_pipe != NULL)) {
3425 		if ((pipe_ctx->plane_state->src_rect.width != pipe_ctx->plane_res.scl_data.viewport.width) ||
3426 			(pipe_ctx->plane_state->src_rect.height != pipe_ctx->plane_res.scl_data.viewport.height)) {
3427 			pipe_split_on = true;
3428 		}
3429 	}
3430 
3431 	/**
3432 	 * DC cursor is stream space, HW cursor is plane space and drawn
3433 	 * as part of the framebuffer.
3434 	 *
3435 	 * Cursor position can't be negative, but hotspot can be used to
3436 	 * shift cursor out of the plane bounds. Hotspot must be smaller
3437 	 * than the cursor size.
3438 	 */
3439 
3440 	/**
3441 	 * Translate cursor from stream space to plane space.
3442 	 *
3443 	 * If the cursor is scaled then we need to scale the position
3444 	 * to be in the approximately correct place. We can't do anything
3445 	 * about the actual size being incorrect, that's a limitation of
3446 	 * the hardware.
3447 	 */
3448 	if (param.rotation == ROTATION_ANGLE_90 || param.rotation == ROTATION_ANGLE_270) {
3449 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.height /
3450 				pipe_ctx->plane_state->dst_rect.width;
3451 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.width /
3452 				pipe_ctx->plane_state->dst_rect.height;
3453 	} else {
3454 		x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
3455 				pipe_ctx->plane_state->dst_rect.width;
3456 		y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
3457 				pipe_ctx->plane_state->dst_rect.height;
3458 	}
3459 
3460 	/**
3461 	 * If the cursor's source viewport is clipped then we need to
3462 	 * translate the cursor to appear in the correct position on
3463 	 * the screen.
3464 	 *
3465 	 * This translation isn't affected by scaling so it needs to be
3466 	 * done *after* we adjust the position for the scale factor.
3467 	 *
3468 	 * This is only done by opt-in for now since there are still
3469 	 * some usecases like tiled display that might enable the
3470 	 * cursor on both streams while expecting dc to clip it.
3471 	 */
3472 	if (pos_cpy.translate_by_source) {
3473 		x_pos += pipe_ctx->plane_state->src_rect.x;
3474 		y_pos += pipe_ctx->plane_state->src_rect.y;
3475 	}
3476 
3477 	/**
3478 	 * If the position is negative then we need to add to the hotspot
3479 	 * to shift the cursor outside the plane.
3480 	 */
3481 
3482 	if (x_pos < 0) {
3483 		pos_cpy.x_hotspot -= x_pos;
3484 		x_pos = 0;
3485 	}
3486 
3487 	if (y_pos < 0) {
3488 		pos_cpy.y_hotspot -= y_pos;
3489 		y_pos = 0;
3490 	}
3491 
3492 	pos_cpy.x = (uint32_t)x_pos;
3493 	pos_cpy.y = (uint32_t)y_pos;
3494 
3495 	if (pipe_ctx->plane_state->address.type
3496 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
3497 		pos_cpy.enable = false;
3498 
3499 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
3500 		pos_cpy.enable = false;
3501 
3502 
3503 	if (param.rotation == ROTATION_ANGLE_0) {
3504 		int viewport_width =
3505 			pipe_ctx->plane_res.scl_data.viewport.width;
3506 		int viewport_x =
3507 			pipe_ctx->plane_res.scl_data.viewport.x;
3508 
3509 		if (param.mirror) {
3510 			if (pipe_split_on || odm_combine_on) {
3511 				if (pos_cpy.x >= viewport_width + viewport_x) {
3512 					pos_cpy.x = 2 * viewport_width
3513 							- pos_cpy.x + 2 * viewport_x;
3514 				} else {
3515 					uint32_t temp_x = pos_cpy.x;
3516 
3517 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3518 					if (temp_x >= viewport_x +
3519 						(int)hubp->curs_attr.width || pos_cpy.x
3520 						<= (int)hubp->curs_attr.width +
3521 						pipe_ctx->plane_state->src_rect.x) {
3522 						pos_cpy.x = temp_x + viewport_width;
3523 					}
3524 				}
3525 			} else {
3526 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3527 			}
3528 		}
3529 	}
3530 	// Swap axis and mirror horizontally
3531 	else if (param.rotation == ROTATION_ANGLE_90) {
3532 		uint32_t temp_x = pos_cpy.x;
3533 
3534 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
3535 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3536 		pos_cpy.y = temp_x;
3537 	}
3538 	// Swap axis and mirror vertically
3539 	else if (param.rotation == ROTATION_ANGLE_270) {
3540 		uint32_t temp_y = pos_cpy.y;
3541 		int viewport_height =
3542 			pipe_ctx->plane_res.scl_data.viewport.height;
3543 		int viewport_y =
3544 			pipe_ctx->plane_res.scl_data.viewport.y;
3545 
3546 		/**
3547 		 * Display groups that are 1xnY, have pos_cpy.x > 2 * viewport.height
3548 		 * For pipe split cases:
3549 		 * - apply offset of viewport.y to normalize pos_cpy.x
3550 		 * - calculate the pos_cpy.y as before
3551 		 * - shift pos_cpy.y back by same offset to get final value
3552 		 * - since we iterate through both pipes, use the lower
3553 		 *   viewport.y for offset
3554 		 * For non pipe split cases, use the same calculation for
3555 		 *  pos_cpy.y as the 180 degree rotation case below,
3556 		 *  but use pos_cpy.x as our input because we are rotating
3557 		 *  270 degrees
3558 		 */
3559 		if (pipe_split_on || odm_combine_on) {
3560 			int pos_cpy_x_offset;
3561 			int other_pipe_viewport_y;
3562 
3563 			if (pipe_split_on) {
3564 				if (pipe_ctx->bottom_pipe) {
3565 					other_pipe_viewport_y =
3566 						pipe_ctx->bottom_pipe->plane_res.scl_data.viewport.y;
3567 				} else {
3568 					other_pipe_viewport_y =
3569 						pipe_ctx->top_pipe->plane_res.scl_data.viewport.y;
3570 				}
3571 			} else {
3572 				if (pipe_ctx->next_odm_pipe) {
3573 					other_pipe_viewport_y =
3574 						pipe_ctx->next_odm_pipe->plane_res.scl_data.viewport.y;
3575 				} else {
3576 					other_pipe_viewport_y =
3577 						pipe_ctx->prev_odm_pipe->plane_res.scl_data.viewport.y;
3578 				}
3579 			}
3580 			pos_cpy_x_offset = (viewport_y > other_pipe_viewport_y) ?
3581 				other_pipe_viewport_y : viewport_y;
3582 			pos_cpy.x -= pos_cpy_x_offset;
3583 			if (pos_cpy.x > viewport_height) {
3584 				pos_cpy.x = pos_cpy.x - viewport_height;
3585 				pos_cpy.y = viewport_height - pos_cpy.x;
3586 			} else {
3587 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3588 			}
3589 			pos_cpy.y += pos_cpy_x_offset;
3590 		} else {
3591 			pos_cpy.y = (2 * viewport_y) + viewport_height - pos_cpy.x;
3592 		}
3593 		pos_cpy.x = temp_y;
3594 	}
3595 	// Mirror horizontally and vertically
3596 	else if (param.rotation == ROTATION_ANGLE_180) {
3597 		int viewport_width =
3598 			pipe_ctx->plane_res.scl_data.viewport.width;
3599 		int viewport_x =
3600 			pipe_ctx->plane_res.scl_data.viewport.x;
3601 
3602 		if (!param.mirror) {
3603 			if (pipe_split_on || odm_combine_on) {
3604 				if (pos_cpy.x >= viewport_width + viewport_x) {
3605 					pos_cpy.x = 2 * viewport_width
3606 							- pos_cpy.x + 2 * viewport_x;
3607 				} else {
3608 					uint32_t temp_x = pos_cpy.x;
3609 
3610 					pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3611 					if (temp_x >= viewport_x +
3612 						(int)hubp->curs_attr.width || pos_cpy.x
3613 						<= (int)hubp->curs_attr.width +
3614 						pipe_ctx->plane_state->src_rect.x) {
3615 						pos_cpy.x = temp_x + viewport_width;
3616 					}
3617 				}
3618 			} else {
3619 				pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3620 			}
3621 		}
3622 
3623 		/**
3624 		 * Display groups that are 1xnY, have pos_cpy.y > viewport.height
3625 		 * Calculation:
3626 		 *   delta_from_bottom = viewport.y + viewport.height - pos_cpy.y
3627 		 *   pos_cpy.y_new = viewport.y + delta_from_bottom
3628 		 * Simplify it as:
3629 		 *   pos_cpy.y = viewport.y * 2 + viewport.height - pos_cpy.y
3630 		 */
3631 		pos_cpy.y = (2 * pipe_ctx->plane_res.scl_data.viewport.y) +
3632 			pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3633 	}
3634 
3635 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3636 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3637 }
3638 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3639 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3640 {
3641 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3642 
3643 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3644 			pipe_ctx->plane_res.hubp, attributes);
3645 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3646 		pipe_ctx->plane_res.dpp, attributes);
3647 }
3648 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3649 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3650 {
3651 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3652 	struct fixed31_32 multiplier;
3653 	struct dpp_cursor_attributes opt_attr = { 0 };
3654 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3655 	struct custom_float_format fmt;
3656 
3657 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3658 		return;
3659 
3660 	fmt.exponenta_bits = 5;
3661 	fmt.mantissa_bits = 10;
3662 	fmt.sign = true;
3663 
3664 	if (sdr_white_level > 80) {
3665 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3666 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3667 	}
3668 
3669 	opt_attr.scale = hw_scale;
3670 	opt_attr.bias = 0;
3671 
3672 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3673 			pipe_ctx->plane_res.dpp, &opt_attr);
3674 }
3675 
3676 /*
3677  * apply_front_porch_workaround  TODO FPGA still need?
3678  *
3679  * This is a workaround for a bug that has existed since R5xx and has not been
3680  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3681  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3682 static void apply_front_porch_workaround(
3683 	struct dc_crtc_timing *timing)
3684 {
3685 	if (timing->flags.INTERLACE == 1) {
3686 		if (timing->v_front_porch < 2)
3687 			timing->v_front_porch = 2;
3688 	} else {
3689 		if (timing->v_front_porch < 1)
3690 			timing->v_front_porch = 1;
3691 	}
3692 }
3693 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3694 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3695 {
3696 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3697 	struct dc_crtc_timing patched_crtc_timing;
3698 	int vesa_sync_start;
3699 	int asic_blank_end;
3700 	int interlace_factor;
3701 
3702 	patched_crtc_timing = *dc_crtc_timing;
3703 	apply_front_porch_workaround(&patched_crtc_timing);
3704 
3705 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3706 
3707 	vesa_sync_start = patched_crtc_timing.v_addressable +
3708 			patched_crtc_timing.v_border_bottom +
3709 			patched_crtc_timing.v_front_porch;
3710 
3711 	asic_blank_end = (patched_crtc_timing.v_total -
3712 			vesa_sync_start -
3713 			patched_crtc_timing.v_border_top)
3714 			* interlace_factor;
3715 
3716 	return asic_blank_end -
3717 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3718 }
3719 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3720 void dcn10_calc_vupdate_position(
3721 		struct dc *dc,
3722 		struct pipe_ctx *pipe_ctx,
3723 		uint32_t *start_line,
3724 		uint32_t *end_line)
3725 {
3726 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3727 	int vupdate_pos = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3728 
3729 	if (vupdate_pos >= 0)
3730 		*start_line = vupdate_pos - ((vupdate_pos / timing->v_total) * timing->v_total);
3731 	else
3732 		*start_line = vupdate_pos + ((-vupdate_pos / timing->v_total) + 1) * timing->v_total - 1;
3733 	*end_line = (*start_line + 2) % timing->v_total;
3734 }
3735 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3736 static void dcn10_cal_vline_position(
3737 		struct dc *dc,
3738 		struct pipe_ctx *pipe_ctx,
3739 		uint32_t *start_line,
3740 		uint32_t *end_line)
3741 {
3742 	const struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
3743 	int vline_pos = pipe_ctx->stream->periodic_interrupt.lines_offset;
3744 
3745 	if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_UPDATE) {
3746 		if (vline_pos > 0)
3747 			vline_pos--;
3748 		else if (vline_pos < 0)
3749 			vline_pos++;
3750 
3751 		vline_pos += dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3752 		if (vline_pos >= 0)
3753 			*start_line = vline_pos - ((vline_pos / timing->v_total) * timing->v_total);
3754 		else
3755 			*start_line = vline_pos + ((-vline_pos / timing->v_total) + 1) * timing->v_total - 1;
3756 		*end_line = (*start_line + 2) % timing->v_total;
3757 	} else if (pipe_ctx->stream->periodic_interrupt.ref_point == START_V_SYNC) {
3758 		// vsync is line 0 so start_line is just the requested line offset
3759 		*start_line = vline_pos;
3760 		*end_line = (*start_line + 2) % timing->v_total;
3761 	} else
3762 		ASSERT(0);
3763 }
3764 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3765 void dcn10_setup_periodic_interrupt(
3766 		struct dc *dc,
3767 		struct pipe_ctx *pipe_ctx)
3768 {
3769 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3770 	uint32_t start_line = 0;
3771 	uint32_t end_line = 0;
3772 
3773 	dcn10_cal_vline_position(dc, pipe_ctx, &start_line, &end_line);
3774 
3775 	tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3776 }
3777 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3778 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3779 {
3780 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3781 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3782 
3783 	if (start_line < 0) {
3784 		ASSERT(0);
3785 		start_line = 0;
3786 	}
3787 
3788 	if (tg->funcs->setup_vertical_interrupt2)
3789 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3790 }
3791 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3792 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3793 		struct dc_link_settings *link_settings)
3794 {
3795 	struct encoder_unblank_param params = {0};
3796 	struct dc_stream_state *stream = pipe_ctx->stream;
3797 	struct dc_link *link = stream->link;
3798 	struct dce_hwseq *hws = link->dc->hwseq;
3799 
3800 	/* only 3 items below are used by unblank */
3801 	params.timing = pipe_ctx->stream->timing;
3802 
3803 	params.link_settings.link_rate = link_settings->link_rate;
3804 
3805 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3806 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3807 			params.timing.pix_clk_100hz /= 2;
3808 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
3809 	}
3810 
3811 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3812 		hws->funcs.edp_backlight_control(link, true);
3813 	}
3814 }
3815 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3816 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3817 				const uint8_t *custom_sdp_message,
3818 				unsigned int sdp_message_size)
3819 {
3820 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3821 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3822 				pipe_ctx->stream_res.stream_enc,
3823 				custom_sdp_message,
3824 				sdp_message_size);
3825 	}
3826 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3827 enum dc_status dcn10_set_clock(struct dc *dc,
3828 			enum dc_clock_type clock_type,
3829 			uint32_t clk_khz,
3830 			uint32_t stepping)
3831 {
3832 	struct dc_state *context = dc->current_state;
3833 	struct dc_clock_config clock_cfg = {0};
3834 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3835 
3836 	if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_clock)
3837 		return DC_FAIL_UNSUPPORTED_1;
3838 
3839 	dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3840 		context, clock_type, &clock_cfg);
3841 
3842 	if (clk_khz > clock_cfg.max_clock_khz)
3843 		return DC_FAIL_CLK_EXCEED_MAX;
3844 
3845 	if (clk_khz < clock_cfg.min_clock_khz)
3846 		return DC_FAIL_CLK_BELOW_MIN;
3847 
3848 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3849 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3850 
3851 	/*update internal request clock for update clock use*/
3852 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3853 		current_clocks->dispclk_khz = clk_khz;
3854 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3855 		current_clocks->dppclk_khz = clk_khz;
3856 	else
3857 		return DC_ERROR_UNEXPECTED;
3858 
3859 	if (dc->clk_mgr->funcs->update_clocks)
3860 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3861 				context, true);
3862 	return DC_OK;
3863 
3864 }
3865 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3866 void dcn10_get_clock(struct dc *dc,
3867 			enum dc_clock_type clock_type,
3868 			struct dc_clock_config *clock_cfg)
3869 {
3870 	struct dc_state *context = dc->current_state;
3871 
3872 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3873 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3874 
3875 }
3876 
dcn10_get_dcc_en_bits(struct dc * dc,int * dcc_en_bits)3877 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits)
3878 {
3879 	struct resource_pool *pool = dc->res_pool;
3880 	int i;
3881 
3882 	for (i = 0; i < pool->pipe_count; i++) {
3883 		struct hubp *hubp = pool->hubps[i];
3884 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
3885 
3886 		hubp->funcs->hubp_read_state(hubp);
3887 
3888 		if (!s->blank_en)
3889 			dcc_en_bits[i] = s->dcc_en ? 1 : 0;
3890 	}
3891 }
3892