1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #include "dml_wrapper.h"
27 #include "resource.h"
28 #include "core_types.h"
29 #include "dsc.h"
30 #include "clk_mgr.h"
31 
32 #ifndef DC_LOGGER_INIT
33 #define DC_LOGGER_INIT
34 #undef DC_LOG_WARNING
35 #define DC_LOG_WARNING
36 #endif
37 
38 #define DML_WRAPPER_TRANSLATION_
39 #include "dml_wrapper_translation.c"
40 #undef DML_WRAPPER_TRANSLATION_
41 
is_dual_plane(enum surface_pixel_format format)42 static bool is_dual_plane(enum surface_pixel_format format)
43 {
44 	return format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA;
45 }
46 
build_clamping_params(struct dc_stream_state * stream)47 static void build_clamping_params(struct dc_stream_state *stream)
48 {
49 	stream->clamping.clamping_level = CLAMPING_FULL_RANGE;
50 	stream->clamping.c_depth = stream->timing.display_color_depth;
51 	stream->clamping.pixel_encoding = stream->timing.pixel_encoding;
52 }
53 
get_pixel_clock_parameters(const struct pipe_ctx * pipe_ctx,struct pixel_clk_params * pixel_clk_params)54 static void get_pixel_clock_parameters(
55 	const struct pipe_ctx *pipe_ctx,
56 	struct pixel_clk_params *pixel_clk_params)
57 {
58 	const struct dc_stream_state *stream = pipe_ctx->stream;
59 
60 	/*TODO: is this halved for YCbCr 420? in that case we might want to move
61 	 * the pixel clock normalization for hdmi up to here instead of doing it
62 	 * in pll_adjust_pix_clk
63 	 */
64 	pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
65 	pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
66 	pixel_clk_params->signal_type = pipe_ctx->stream->signal;
67 	pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
68 	/* TODO: un-hardcode*/
69 	pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
70 						LINK_RATE_REF_FREQ_IN_KHZ;
71 	pixel_clk_params->flags.ENABLE_SS = 0;
72 	pixel_clk_params->color_depth =
73 		stream->timing.display_color_depth;
74 	pixel_clk_params->flags.DISPLAY_BLANKED = 1;
75 	pixel_clk_params->flags.SUPPORT_YCBCR420 = (stream->timing.pixel_encoding ==
76 			PIXEL_ENCODING_YCBCR420);
77 	pixel_clk_params->pixel_encoding = stream->timing.pixel_encoding;
78 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) {
79 		pixel_clk_params->color_depth = COLOR_DEPTH_888;
80 	}
81 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
82 		pixel_clk_params->requested_pix_clk_100hz  = pixel_clk_params->requested_pix_clk_100hz / 2;
83 	}
84 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
85 		pixel_clk_params->requested_pix_clk_100hz *= 2;
86 
87 }
88 
build_pipe_hw_param(struct pipe_ctx * pipe_ctx)89 static enum dc_status build_pipe_hw_param(struct pipe_ctx *pipe_ctx)
90 {
91 	get_pixel_clock_parameters(pipe_ctx, &pipe_ctx->stream_res.pix_clk_params);
92 
93 	if (pipe_ctx->clock_source)
94 		pipe_ctx->clock_source->funcs->get_pix_clk_dividers(
95 			pipe_ctx->clock_source,
96 			&pipe_ctx->stream_res.pix_clk_params,
97 			&pipe_ctx->pll_settings);
98 
99 	pipe_ctx->stream->clamping.pixel_encoding = pipe_ctx->stream->timing.pixel_encoding;
100 
101 	resource_build_bit_depth_reduction_params(pipe_ctx->stream,
102 					&pipe_ctx->stream->bit_depth_params);
103 	build_clamping_params(pipe_ctx->stream);
104 
105 	return DC_OK;
106 }
107 
resource_build_bit_depth_reduction_params(struct dc_stream_state * stream,struct bit_depth_reduction_params * fmt_bit_depth)108 static void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
109 		struct bit_depth_reduction_params *fmt_bit_depth)
110 {
111 	enum dc_dither_option option = stream->dither_option;
112 	enum dc_pixel_encoding pixel_encoding =
113 			stream->timing.pixel_encoding;
114 
115 	memset(fmt_bit_depth, 0, sizeof(*fmt_bit_depth));
116 
117 	if (option == DITHER_OPTION_DEFAULT) {
118 		switch (stream->timing.display_color_depth) {
119 		case COLOR_DEPTH_666:
120 			option = DITHER_OPTION_SPATIAL6;
121 			break;
122 		case COLOR_DEPTH_888:
123 			option = DITHER_OPTION_SPATIAL8;
124 			break;
125 		case COLOR_DEPTH_101010:
126 			option = DITHER_OPTION_SPATIAL10;
127 			break;
128 		default:
129 			option = DITHER_OPTION_DISABLE;
130 		}
131 	}
132 
133 	if (option == DITHER_OPTION_DISABLE)
134 		return;
135 
136 	if (option == DITHER_OPTION_TRUN6) {
137 		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
138 		fmt_bit_depth->flags.TRUNCATE_DEPTH = 0;
139 	} else if (option == DITHER_OPTION_TRUN8 ||
140 			option == DITHER_OPTION_TRUN8_SPATIAL6 ||
141 			option == DITHER_OPTION_TRUN8_FM6) {
142 		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
143 		fmt_bit_depth->flags.TRUNCATE_DEPTH = 1;
144 	} else if (option == DITHER_OPTION_TRUN10        ||
145 			option == DITHER_OPTION_TRUN10_SPATIAL6   ||
146 			option == DITHER_OPTION_TRUN10_SPATIAL8   ||
147 			option == DITHER_OPTION_TRUN10_FM8     ||
148 			option == DITHER_OPTION_TRUN10_FM6     ||
149 			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
150 		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
151 		fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
152 	}
153 
154 	/* special case - Formatter can only reduce by 4 bits at most.
155 	 * When reducing from 12 to 6 bits,
156 	 * HW recommends we use trunc with round mode
157 	 * (if we did nothing, trunc to 10 bits would be used)
158 	 * note that any 12->10 bit reduction is ignored prior to DCE8,
159 	 * as the input was 10 bits.
160 	 */
161 	if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM ||
162 			option == DITHER_OPTION_SPATIAL6 ||
163 			option == DITHER_OPTION_FM6) {
164 		fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
165 		fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
166 		fmt_bit_depth->flags.TRUNCATE_MODE = 1;
167 	}
168 
169 	/* spatial dither
170 	 * note that spatial modes 1-3 are never used
171 	 */
172 	if (option == DITHER_OPTION_SPATIAL6_FRAME_RANDOM            ||
173 			option == DITHER_OPTION_SPATIAL6 ||
174 			option == DITHER_OPTION_TRUN10_SPATIAL6      ||
175 			option == DITHER_OPTION_TRUN8_SPATIAL6) {
176 		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
177 		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 0;
178 		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
179 		fmt_bit_depth->flags.RGB_RANDOM =
180 				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
181 	} else if (option == DITHER_OPTION_SPATIAL8_FRAME_RANDOM            ||
182 			option == DITHER_OPTION_SPATIAL8 ||
183 			option == DITHER_OPTION_SPATIAL8_FM6        ||
184 			option == DITHER_OPTION_TRUN10_SPATIAL8      ||
185 			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
186 		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
187 		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 1;
188 		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
189 		fmt_bit_depth->flags.RGB_RANDOM =
190 				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
191 	} else if (option == DITHER_OPTION_SPATIAL10_FRAME_RANDOM ||
192 			option == DITHER_OPTION_SPATIAL10 ||
193 			option == DITHER_OPTION_SPATIAL10_FM8 ||
194 			option == DITHER_OPTION_SPATIAL10_FM6) {
195 		fmt_bit_depth->flags.SPATIAL_DITHER_ENABLED = 1;
196 		fmt_bit_depth->flags.SPATIAL_DITHER_DEPTH = 2;
197 		fmt_bit_depth->flags.HIGHPASS_RANDOM = 1;
198 		fmt_bit_depth->flags.RGB_RANDOM =
199 				(pixel_encoding == PIXEL_ENCODING_RGB) ? 1 : 0;
200 	}
201 
202 	if (option == DITHER_OPTION_SPATIAL6 ||
203 			option == DITHER_OPTION_SPATIAL8 ||
204 			option == DITHER_OPTION_SPATIAL10) {
205 		fmt_bit_depth->flags.FRAME_RANDOM = 0;
206 	} else {
207 		fmt_bit_depth->flags.FRAME_RANDOM = 1;
208 	}
209 
210 	//////////////////////
211 	//// temporal dither
212 	//////////////////////
213 	if (option == DITHER_OPTION_FM6           ||
214 			option == DITHER_OPTION_SPATIAL8_FM6     ||
215 			option == DITHER_OPTION_SPATIAL10_FM6     ||
216 			option == DITHER_OPTION_TRUN10_FM6     ||
217 			option == DITHER_OPTION_TRUN8_FM6      ||
218 			option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
219 		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
220 		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 0;
221 	} else if (option == DITHER_OPTION_FM8        ||
222 			option == DITHER_OPTION_SPATIAL10_FM8  ||
223 			option == DITHER_OPTION_TRUN10_FM8) {
224 		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
225 		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 1;
226 	} else if (option == DITHER_OPTION_FM10) {
227 		fmt_bit_depth->flags.FRAME_MODULATION_ENABLED = 1;
228 		fmt_bit_depth->flags.FRAME_MODULATION_DEPTH = 2;
229 	}
230 
231 	fmt_bit_depth->pixel_encoding = pixel_encoding;
232 }
233 
dml_validate_dsc(struct dc * dc,struct dc_state * new_ctx)234 bool dml_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
235 {
236 	int i;
237 
238 	/* Validate DSC config, dsc count validation is already done */
239 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
240 		struct pipe_ctx *pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
241 		struct dc_stream_state *stream = pipe_ctx->stream;
242 		struct dsc_config dsc_cfg;
243 		struct pipe_ctx *odm_pipe;
244 		int opp_cnt = 1;
245 
246 		for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
247 			opp_cnt++;
248 
249 		/* Only need to validate top pipe */
250 		if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe || !stream || !stream->timing.flags.DSC)
251 			continue;
252 
253 		dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left
254 				+ stream->timing.h_border_right) / opp_cnt;
255 		dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top
256 				+ stream->timing.v_border_bottom;
257 		dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
258 		dsc_cfg.color_depth = stream->timing.display_color_depth;
259 		dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
260 		dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
261 		dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
262 
263 		if (pipe_ctx->stream_res.dsc && !pipe_ctx->stream_res.dsc->funcs->dsc_validate_stream(pipe_ctx->stream_res.dsc, &dsc_cfg))
264 			return false;
265 	}
266 	return true;
267 }
268 
dml_build_mapped_resource(const struct dc * dc,struct dc_state * context,struct dc_stream_state * stream)269 enum dc_status dml_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream)
270 {
271 	enum dc_status status = DC_OK;
272 	struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
273 
274 	if (!pipe_ctx)
275 		return DC_ERROR_UNEXPECTED;
276 
277 
278 	status = build_pipe_hw_param(pipe_ctx);
279 
280 	return status;
281 }
282 
dml_acquire_dsc(const struct dc * dc,struct resource_context * res_ctx,struct display_stream_compressor ** dsc,int pipe_idx)283 void dml_acquire_dsc(const struct dc *dc,
284 			struct resource_context *res_ctx,
285 			struct display_stream_compressor **dsc,
286 			int pipe_idx)
287 {
288 	int i;
289 	const struct resource_pool *pool = dc->res_pool;
290 	struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
291 
292 	ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
293 	*dsc = NULL;
294 
295 	/* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
296 	if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
297 		*dsc = pool->dscs[pipe_idx];
298 		res_ctx->is_dsc_acquired[pipe_idx] = true;
299 		return;
300 	}
301 
302 	/* Return old DSC to avoid the need for redo it */
303 	if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
304 		*dsc = dsc_old;
305 		res_ctx->is_dsc_acquired[dsc_old->inst] = true;
306 		return ;
307 	}
308 
309 	/* Find first free DSC */
310 	for (i = 0; i < pool->res_cap->num_dsc; i++)
311 		if (!res_ctx->is_dsc_acquired[i]) {
312 			*dsc = pool->dscs[i];
313 			res_ctx->is_dsc_acquired[i] = true;
314 			break;
315 		}
316 }
317 
dml_split_stream_for_mpc_or_odm(const struct dc * dc,struct resource_context * res_ctx,struct pipe_ctx * pri_pipe,struct pipe_ctx * sec_pipe,bool odm)318 static bool dml_split_stream_for_mpc_or_odm(
319 		const struct dc *dc,
320 		struct resource_context *res_ctx,
321 		struct pipe_ctx *pri_pipe,
322 		struct pipe_ctx *sec_pipe,
323 		bool odm)
324 {
325 	int pipe_idx = sec_pipe->pipe_idx;
326 	const struct resource_pool *pool = dc->res_pool;
327 
328 	*sec_pipe = *pri_pipe;
329 
330 	sec_pipe->pipe_idx = pipe_idx;
331 	sec_pipe->plane_res.mi = pool->mis[pipe_idx];
332 	sec_pipe->plane_res.hubp = pool->hubps[pipe_idx];
333 	sec_pipe->plane_res.ipp = pool->ipps[pipe_idx];
334 	sec_pipe->plane_res.xfm = pool->transforms[pipe_idx];
335 	sec_pipe->plane_res.dpp = pool->dpps[pipe_idx];
336 	sec_pipe->plane_res.mpcc_inst = pool->dpps[pipe_idx]->inst;
337 	sec_pipe->stream_res.dsc = NULL;
338 	if (odm) {
339 		if (pri_pipe->next_odm_pipe) {
340 			ASSERT(pri_pipe->next_odm_pipe != sec_pipe);
341 			sec_pipe->next_odm_pipe = pri_pipe->next_odm_pipe;
342 			sec_pipe->next_odm_pipe->prev_odm_pipe = sec_pipe;
343 		}
344 		if (pri_pipe->top_pipe && pri_pipe->top_pipe->next_odm_pipe) {
345 			pri_pipe->top_pipe->next_odm_pipe->bottom_pipe = sec_pipe;
346 			sec_pipe->top_pipe = pri_pipe->top_pipe->next_odm_pipe;
347 		}
348 		if (pri_pipe->bottom_pipe && pri_pipe->bottom_pipe->next_odm_pipe) {
349 			pri_pipe->bottom_pipe->next_odm_pipe->top_pipe = sec_pipe;
350 			sec_pipe->bottom_pipe = pri_pipe->bottom_pipe->next_odm_pipe;
351 		}
352 		pri_pipe->next_odm_pipe = sec_pipe;
353 		sec_pipe->prev_odm_pipe = pri_pipe;
354 		ASSERT(sec_pipe->top_pipe == NULL);
355 
356 		if (!sec_pipe->top_pipe)
357 			sec_pipe->stream_res.opp = pool->opps[pipe_idx];
358 		else
359 			sec_pipe->stream_res.opp = sec_pipe->top_pipe->stream_res.opp;
360 		if (sec_pipe->stream->timing.flags.DSC == 1) {
361 			dml_acquire_dsc(dc, res_ctx, &sec_pipe->stream_res.dsc, pipe_idx);
362 			ASSERT(sec_pipe->stream_res.dsc);
363 			if (sec_pipe->stream_res.dsc == NULL)
364 				return false;
365 		}
366 	} else {
367 		if (pri_pipe->bottom_pipe) {
368 			ASSERT(pri_pipe->bottom_pipe != sec_pipe);
369 			sec_pipe->bottom_pipe = pri_pipe->bottom_pipe;
370 			sec_pipe->bottom_pipe->top_pipe = sec_pipe;
371 		}
372 		pri_pipe->bottom_pipe = sec_pipe;
373 		sec_pipe->top_pipe = pri_pipe;
374 
375 		ASSERT(pri_pipe->plane_state);
376 	}
377 
378 	return true;
379 }
380 
dml_find_split_pipe(struct dc * dc,struct dc_state * context,int old_index)381 static struct pipe_ctx *dml_find_split_pipe(
382 		struct dc *dc,
383 		struct dc_state *context,
384 		int old_index)
385 {
386 	struct pipe_ctx *pipe = NULL;
387 	int i;
388 
389 	if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
390 		pipe = &context->res_ctx.pipe_ctx[old_index];
391 		pipe->pipe_idx = old_index;
392 	}
393 
394 	if (!pipe)
395 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
396 			if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
397 					&& dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
398 				if (context->res_ctx.pipe_ctx[i].stream == NULL) {
399 					pipe = &context->res_ctx.pipe_ctx[i];
400 					pipe->pipe_idx = i;
401 					break;
402 				}
403 			}
404 		}
405 
406 	/*
407 	 * May need to fix pipes getting tossed from 1 opp to another on flip
408 	 * Add for debugging transient underflow during topology updates:
409 	 * ASSERT(pipe);
410 	 */
411 	if (!pipe)
412 		for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
413 			if (context->res_ctx.pipe_ctx[i].stream == NULL) {
414 				pipe = &context->res_ctx.pipe_ctx[i];
415 				pipe->pipe_idx = i;
416 				break;
417 			}
418 		}
419 
420 	return pipe;
421 }
422 
dml_release_dsc(struct resource_context * res_ctx,const struct resource_pool * pool,struct display_stream_compressor ** dsc)423 static void dml_release_dsc(struct resource_context *res_ctx,
424 			const struct resource_pool *pool,
425 			struct display_stream_compressor **dsc)
426 {
427 	int i;
428 
429 	for (i = 0; i < pool->res_cap->num_dsc; i++)
430 		if (pool->dscs[i] == *dsc) {
431 			res_ctx->is_dsc_acquired[i] = false;
432 			*dsc = NULL;
433 			break;
434 		}
435 }
436 
dml_get_num_mpc_splits(struct pipe_ctx * pipe)437 static int dml_get_num_mpc_splits(struct pipe_ctx *pipe)
438 {
439 	int mpc_split_count = 0;
440 	struct pipe_ctx *other_pipe = pipe->bottom_pipe;
441 
442 	while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
443 		mpc_split_count++;
444 		other_pipe = other_pipe->bottom_pipe;
445 	}
446 	other_pipe = pipe->top_pipe;
447 	while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
448 		mpc_split_count++;
449 		other_pipe = other_pipe->top_pipe;
450 	}
451 
452 	return mpc_split_count;
453 }
454 
dml_enough_pipes_for_subvp(struct dc * dc,struct dc_state * context)455 static bool dml_enough_pipes_for_subvp(struct dc *dc,
456 		struct dc_state *context)
457 {
458 	int i = 0;
459 	int num_pipes = 0;
460 
461 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
462 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
463 
464 		if (pipe->stream && pipe->plane_state)
465 			num_pipes++;
466 	}
467 
468 	// Sub-VP only possible if the number of "real" pipes is
469 	// less than or equal to half the number of available pipes
470 	if (num_pipes * 2 > dc->res_pool->pipe_count)
471 		return false;
472 
473 	return true;
474 }
475 
dml_validate_apply_pipe_split_flags(struct dc * dc,struct dc_state * context,int vlevel,int * split,bool * merge)476 static int dml_validate_apply_pipe_split_flags(
477 		struct dc *dc,
478 		struct dc_state *context,
479 		int vlevel,
480 		int *split,
481 		bool *merge)
482 {
483 	int i, pipe_idx, vlevel_split;
484 	int plane_count = 0;
485 	bool force_split = false;
486 	bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
487 	struct vba_vars_st *v = &context->bw_ctx.dml.vba;
488 	int max_mpc_comb = v->maxMpcComb;
489 
490 	if (context->stream_count > 1) {
491 		if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
492 			avoid_split = true;
493 	} else if (dc->debug.force_single_disp_pipe_split)
494 			force_split = true;
495 
496 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
497 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
498 
499 		/**
500 		 * Workaround for avoiding pipe-split in cases where we'd split
501 		 * planes that are too small, resulting in splits that aren't
502 		 * valid for the scaler.
503 		 */
504 		if (pipe->plane_state &&
505 		    (pipe->plane_state->dst_rect.width <= 16 ||
506 		     pipe->plane_state->dst_rect.height <= 16 ||
507 		     pipe->plane_state->src_rect.width <= 16 ||
508 		     pipe->plane_state->src_rect.height <= 16))
509 			avoid_split = true;
510 
511 		/* TODO: fix dc bugs and remove this split threshold thing */
512 		if (pipe->stream && !pipe->prev_odm_pipe &&
513 				(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
514 			++plane_count;
515 	}
516 	if (plane_count > dc->res_pool->pipe_count / 2)
517 		avoid_split = true;
518 
519 	/* W/A: Mode timing with borders may not work well with pipe split, avoid for this corner case */
520 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
521 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
522 		struct dc_crtc_timing timing;
523 
524 		if (!pipe->stream)
525 			continue;
526 		else {
527 			timing = pipe->stream->timing;
528 			if (timing.h_border_left + timing.h_border_right
529 					+ timing.v_border_top + timing.v_border_bottom > 0) {
530 				avoid_split = true;
531 				break;
532 			}
533 		}
534 	}
535 
536 	/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
537 	if (avoid_split) {
538 		for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
539 			if (!context->res_ctx.pipe_ctx[i].stream)
540 				continue;
541 
542 			for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
543 				if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
544 						v->ModeSupport[vlevel][0])
545 					break;
546 			/* Impossible to not split this pipe */
547 			if (vlevel > context->bw_ctx.dml.soc.num_states)
548 				vlevel = vlevel_split;
549 			else
550 				max_mpc_comb = 0;
551 			pipe_idx++;
552 		}
553 		v->maxMpcComb = max_mpc_comb;
554 	}
555 
556 	/* Split loop sets which pipe should be split based on dml outputs and dc flags */
557 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
558 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
559 		int pipe_plane = v->pipe_plane[pipe_idx];
560 		bool split4mpc = context->stream_count == 1 && plane_count == 1
561 				&& dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
562 
563 		if (!context->res_ctx.pipe_ctx[i].stream)
564 			continue;
565 
566 		if (split4mpc || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 4)
567 			split[i] = 4;
568 		else if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] == 2)
569 				split[i] = 2;
570 
571 		if ((pipe->stream->view_format ==
572 				VIEW_3D_FORMAT_SIDE_BY_SIDE ||
573 				pipe->stream->view_format ==
574 				VIEW_3D_FORMAT_TOP_AND_BOTTOM) &&
575 				(pipe->stream->timing.timing_3d_format ==
576 				TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
577 				 pipe->stream->timing.timing_3d_format ==
578 				TIMING_3D_FORMAT_SIDE_BY_SIDE))
579 			split[i] = 2;
580 		if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
581 			split[i] = 2;
582 			v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
583 		}
584 		if (dc->debug.force_odm_combine_4to1 & (1 << pipe->stream_res.tg->inst)) {
585 			split[i] = 4;
586 			v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_4to1;
587 		}
588 		/*420 format workaround*/
589 		if (pipe->stream->timing.h_addressable > 7680 &&
590 				pipe->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
591 			split[i] = 4;
592 		}
593 
594 		v->ODMCombineEnabled[pipe_plane] =
595 			v->ODMCombineEnablePerState[vlevel][pipe_plane];
596 
597 		if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
598 			if (dml_get_num_mpc_splits(pipe) == 1) {
599 				/*If need split for mpc but 2 way split already*/
600 				if (split[i] == 4)
601 					split[i] = 2; /* 2 -> 4 MPC */
602 				else if (split[i] == 2)
603 					split[i] = 0; /* 2 -> 2 MPC */
604 				else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
605 					merge[i] = true; /* 2 -> 1 MPC */
606 			} else if (dml_get_num_mpc_splits(pipe) == 3) {
607 				/*If need split for mpc but 4 way split already*/
608 				if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
609 						|| !pipe->bottom_pipe)) {
610 					merge[i] = true; /* 4 -> 2 MPC */
611 				} else if (split[i] == 0 && pipe->top_pipe &&
612 						pipe->top_pipe->plane_state == pipe->plane_state)
613 					merge[i] = true; /* 4 -> 1 MPC */
614 				split[i] = 0;
615 			} else if (dml_get_num_mpc_splits(pipe)) {
616 				/* ODM -> MPC transition */
617 				if (pipe->prev_odm_pipe) {
618 					split[i] = 0;
619 					merge[i] = true;
620 				}
621 			}
622 		} else {
623 			if (dml_get_num_mpc_splits(pipe) == 1) {
624 				/*If need split for odm but 2 way split already*/
625 				if (split[i] == 4)
626 					split[i] = 2; /* 2 -> 4 ODM */
627 				else if (split[i] == 2)
628 					split[i] = 0; /* 2 -> 2 ODM */
629 				else if (pipe->prev_odm_pipe) {
630 					ASSERT(0); /* NOT expected yet */
631 					merge[i] = true; /* exit ODM */
632 				}
633 			} else if (dml_get_num_mpc_splits(pipe) == 3) {
634 				/*If need split for odm but 4 way split already*/
635 				if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
636 						|| !pipe->next_odm_pipe)) {
637 					ASSERT(0); /* NOT expected yet */
638 					merge[i] = true; /* 4 -> 2 ODM */
639 				} else if (split[i] == 0 && pipe->prev_odm_pipe) {
640 					ASSERT(0); /* NOT expected yet */
641 					merge[i] = true; /* exit ODM */
642 				}
643 				split[i] = 0;
644 			} else if (dml_get_num_mpc_splits(pipe)) {
645 				/* MPC -> ODM transition */
646 				ASSERT(0); /* NOT expected yet */
647 				if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
648 					split[i] = 0;
649 					merge[i] = true;
650 				}
651 			}
652 		}
653 
654 		/* Adjust dppclk when split is forced, do not bother with dispclk */
655 		if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
656 			v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
657 		pipe_idx++;
658 	}
659 
660 	return vlevel;
661 }
662 
dml_set_phantom_stream_timing(struct dc * dc,struct dc_state * context,struct pipe_ctx * ref_pipe,struct dc_stream_state * phantom_stream)663 static void dml_set_phantom_stream_timing(struct dc *dc,
664 		struct dc_state *context,
665 		struct pipe_ctx *ref_pipe,
666 		struct dc_stream_state *phantom_stream)
667 {
668 	// phantom_vactive = blackout (latency + margin) + fw_processing_delays + pstate allow width
669 	uint32_t phantom_vactive_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us + 60 +
670 					dc->caps.subvp_fw_processing_delay_us +
671 					dc->caps.subvp_pstate_allow_width_us;
672 	uint32_t phantom_vactive = ((double)phantom_vactive_us/1000000) *
673 					(ref_pipe->stream->timing.pix_clk_100hz * 100) /
674 					(double)ref_pipe->stream->timing.h_total;
675 	uint32_t phantom_bp = ref_pipe->pipe_dlg_param.vstartup_start;
676 
677 	phantom_stream->dst.y = 0;
678 	phantom_stream->dst.height = phantom_vactive;
679 	phantom_stream->src.y = 0;
680 	phantom_stream->src.height = phantom_vactive;
681 
682 	phantom_stream->timing.v_addressable = phantom_vactive;
683 	phantom_stream->timing.v_front_porch = 1;
684 	phantom_stream->timing.v_total = phantom_stream->timing.v_addressable +
685 						phantom_stream->timing.v_front_porch +
686 						phantom_stream->timing.v_sync_width +
687 						phantom_bp;
688 }
689 
dml_enable_phantom_stream(struct dc * dc,struct dc_state * context,struct pipe_ctx * ref_pipe)690 static struct dc_stream_state *dml_enable_phantom_stream(struct dc *dc,
691 		struct dc_state *context,
692 		struct pipe_ctx *ref_pipe)
693 {
694 	struct dc_stream_state *phantom_stream = NULL;
695 
696 	phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
697 	phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
698 	phantom_stream->dpms_off = true;
699 	phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
700 	phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
701 	ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
702 	ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
703 
704 	/* stream has limited viewport and small timing */
705 	memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
706 	memcpy(&phantom_stream->src, &ref_pipe->stream->src, sizeof(phantom_stream->src));
707 	memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
708 	dml_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream);
709 
710 	dc_add_stream_to_ctx(dc, context, phantom_stream);
711 	dc->hwss.apply_ctx_to_hw(dc, context);
712 	return phantom_stream;
713 }
714 
dml_enable_phantom_plane(struct dc * dc,struct dc_state * context,struct dc_stream_state * phantom_stream,struct pipe_ctx * main_pipe)715 static void dml_enable_phantom_plane(struct dc *dc,
716 		struct dc_state *context,
717 		struct dc_stream_state *phantom_stream,
718 		struct pipe_ctx *main_pipe)
719 {
720 	struct dc_plane_state *phantom_plane = NULL;
721 	struct dc_plane_state *prev_phantom_plane = NULL;
722 	struct pipe_ctx *curr_pipe = main_pipe;
723 
724 	while (curr_pipe) {
725 		if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
726 			phantom_plane = prev_phantom_plane;
727 		else
728 			phantom_plane = dc_create_plane_state(dc);
729 
730 		memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
731 		memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
732 				sizeof(phantom_plane->scaling_quality));
733 		memcpy(&phantom_plane->src_rect, &curr_pipe->plane_state->src_rect, sizeof(phantom_plane->src_rect));
734 		memcpy(&phantom_plane->dst_rect, &curr_pipe->plane_state->dst_rect, sizeof(phantom_plane->dst_rect));
735 		memcpy(&phantom_plane->clip_rect, &curr_pipe->plane_state->clip_rect, sizeof(phantom_plane->clip_rect));
736 		memcpy(&phantom_plane->plane_size, &curr_pipe->plane_state->plane_size,
737 				sizeof(phantom_plane->plane_size));
738 		memcpy(&phantom_plane->tiling_info, &curr_pipe->plane_state->tiling_info,
739 				sizeof(phantom_plane->tiling_info));
740 		memcpy(&phantom_plane->dcc, &curr_pipe->plane_state->dcc, sizeof(phantom_plane->dcc));
741 		/* Currently compat_level is undefined in dc_state
742 		* phantom_plane->compat_level = curr_pipe->plane_state->compat_level;
743 		*/
744 		phantom_plane->format = curr_pipe->plane_state->format;
745 		phantom_plane->rotation = curr_pipe->plane_state->rotation;
746 		phantom_plane->visible = curr_pipe->plane_state->visible;
747 
748 		/* Shadow pipe has small viewport. */
749 		phantom_plane->clip_rect.y = 0;
750 		phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
751 
752 		dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
753 
754 		curr_pipe = curr_pipe->bottom_pipe;
755 		prev_phantom_plane = phantom_plane;
756 	}
757 }
758 
dml_add_phantom_pipes(struct dc * dc,struct dc_state * context)759 static void dml_add_phantom_pipes(struct dc *dc, struct dc_state *context)
760 {
761 	int i = 0;
762 
763 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
764 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
765 		struct dc_stream_state *ref_stream = pipe->stream;
766 		// Only construct phantom stream for top pipes that have plane enabled
767 		if (!pipe->top_pipe && pipe->plane_state && pipe->stream &&
768 				pipe->stream->mall_stream_config.type == SUBVP_NONE) {
769 			struct dc_stream_state *phantom_stream = NULL;
770 
771 			phantom_stream = dml_enable_phantom_stream(dc, context, pipe);
772 			dml_enable_phantom_plane(dc, context, phantom_stream, pipe);
773 		}
774 	}
775 
776 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
777 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
778 
779 		if (pipe->plane_state && pipe->stream &&
780 				pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
781 			pipe->stream->use_dynamic_meta = false;
782 			pipe->plane_state->flip_immediate = false;
783 			if (!resource_build_scaling_params(pipe)) {
784 				// Log / remove phantom pipes since failed to build scaling params
785 			}
786 		}
787 	}
788 }
789 
dml_remove_phantom_pipes(struct dc * dc,struct dc_state * context)790 static void dml_remove_phantom_pipes(struct dc *dc, struct dc_state *context)
791 {
792 	int i;
793 	bool removed_pipe = false;
794 
795 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
796 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
797 		// build scaling params for phantom pipes
798 		if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
799 			dc_rem_all_planes_for_stream(dc, pipe->stream, context);
800 			dc_remove_stream_from_ctx(dc, context, pipe->stream);
801 			removed_pipe = true;
802 		}
803 
804 		// Clear all phantom stream info
805 		if (pipe->stream) {
806 			pipe->stream->mall_stream_config.type = SUBVP_NONE;
807 			pipe->stream->mall_stream_config.paired_stream = NULL;
808 		}
809 	}
810 	if (removed_pipe)
811 		dc->hwss.apply_ctx_to_hw(dc, context);
812 }
813 
814 /*
815  * If the input state contains no upstream planes for a particular pipe (i.e. only timing)
816  * we need to populate some "conservative" plane information as DML cannot handle "no planes"
817  */
populate_default_plane_from_timing(const struct dc_crtc_timing * timing,struct _vcs_dpi_display_pipe_params_st * pipe)818 static void populate_default_plane_from_timing(const struct dc_crtc_timing *timing, struct _vcs_dpi_display_pipe_params_st *pipe)
819 {
820 	pipe->src.is_hsplit = pipe->dest.odm_combine != dm_odm_combine_mode_disabled;
821 	pipe->src.source_scan = dm_horz;
822 	pipe->src.sw_mode = dm_sw_4kb_s;
823 	pipe->src.macro_tile_size = dm_64k_tile;
824 	pipe->src.viewport_width = timing->h_addressable;
825 	if (pipe->src.viewport_width > 1920)
826 		pipe->src.viewport_width = 1920;
827 	pipe->src.viewport_height = timing->v_addressable;
828 	if (pipe->src.viewport_height > 1080)
829 		pipe->src.viewport_height = 1080;
830 	pipe->src.surface_height_y = pipe->src.viewport_height;
831 	pipe->src.surface_width_y = pipe->src.viewport_width;
832 	pipe->src.surface_height_c = pipe->src.viewport_height;
833 	pipe->src.surface_width_c = pipe->src.viewport_width;
834 	pipe->src.data_pitch = ((pipe->src.viewport_width + 255) / 256) * 256;
835 	pipe->src.source_format = dm_444_32;
836 	pipe->dest.recout_width = pipe->src.viewport_width;
837 	pipe->dest.recout_height = pipe->src.viewport_height;
838 	pipe->dest.full_recout_width = pipe->dest.recout_width;
839 	pipe->dest.full_recout_height = pipe->dest.recout_height;
840 	pipe->scale_ratio_depth.lb_depth = dm_lb_16;
841 	pipe->scale_ratio_depth.hscl_ratio = 1.0;
842 	pipe->scale_ratio_depth.vscl_ratio = 1.0;
843 	pipe->scale_ratio_depth.scl_enable = 0;
844 	pipe->scale_taps.htaps = 1;
845 	pipe->scale_taps.vtaps = 1;
846 	pipe->dest.vtotal_min = timing->v_total;
847 	pipe->dest.vtotal_max = timing->v_total;
848 
849 	if (pipe->dest.odm_combine == dm_odm_combine_mode_2to1) {
850 		pipe->src.viewport_width /= 2;
851 		pipe->dest.recout_width /= 2;
852 	} else if (pipe->dest.odm_combine == dm_odm_combine_mode_4to1) {
853 		pipe->src.viewport_width /= 4;
854 		pipe->dest.recout_width /= 4;
855 	}
856 
857 	pipe->src.dcc = false;
858 	pipe->src.dcc_rate = 1;
859 }
860 
861 /*
862  * If the pipe is not blending (i.e. pipe_ctx->top pipe == null) then its
863  * hsplit group is equal to its own pipe ID
864  * Otherwise, all pipes part of the same blending tree have the same hsplit group
865  * ID as the top most pipe
866  *
867  * If the pipe ctx is ODM combined, then similar logic follows
868  */
populate_hsplit_group_from_dc_pipe_ctx(const struct pipe_ctx * dc_pipe_ctx,struct _vcs_dpi_display_e2e_pipe_params_st * e2e_pipe)869 static void populate_hsplit_group_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
870 {
871 	e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
872 
873 	if (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state
874 			== dc_pipe_ctx->plane_state) {
875 		struct pipe_ctx *first_pipe = dc_pipe_ctx->top_pipe;
876 		int split_idx = 0;
877 
878 		while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
879 				== dc_pipe_ctx->plane_state) {
880 			first_pipe = first_pipe->top_pipe;
881 			split_idx++;
882 		}
883 
884 		/* Treat 4to1 mpc combine as an mpo of 2 2-to-1 combines */
885 		if (split_idx == 0)
886 			e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
887 		else if (split_idx == 1)
888 			e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
889 		else if (split_idx == 2)
890 			e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->top_pipe->pipe_idx;
891 
892 	} else if (dc_pipe_ctx->prev_odm_pipe) {
893 		struct pipe_ctx *first_pipe = dc_pipe_ctx->prev_odm_pipe;
894 
895 		while (first_pipe->prev_odm_pipe)
896 			first_pipe = first_pipe->prev_odm_pipe;
897 		e2e_pipe->pipe.src.hsplit_grp = first_pipe->pipe_idx;
898 	}
899 }
900 
populate_dml_from_dc_pipe_ctx(const struct pipe_ctx * dc_pipe_ctx,struct _vcs_dpi_display_e2e_pipe_params_st * e2e_pipe,int always_scale)901 static void populate_dml_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe, int always_scale)
902 {
903 	const struct dc_plane_state *pln = dc_pipe_ctx->plane_state;
904 	const struct scaler_data *scl = &dc_pipe_ctx->plane_res.scl_data;
905 
906 	e2e_pipe->pipe.src.immediate_flip = pln->flip_immediate;
907 	e2e_pipe->pipe.src.is_hsplit = (dc_pipe_ctx->bottom_pipe && dc_pipe_ctx->bottom_pipe->plane_state == pln)
908 			|| (dc_pipe_ctx->top_pipe && dc_pipe_ctx->top_pipe->plane_state == pln)
909 			|| e2e_pipe->pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
910 
911 	/* stereo is not split */
912 	if (pln->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE ||
913 		pln->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) {
914 		e2e_pipe->pipe.src.is_hsplit = false;
915 		e2e_pipe->pipe.src.hsplit_grp = dc_pipe_ctx->pipe_idx;
916 	}
917 
918 	e2e_pipe->pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
919 			|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
920 	e2e_pipe->pipe.src.viewport_y_y = scl->viewport.y;
921 	e2e_pipe->pipe.src.viewport_y_c = scl->viewport_c.y;
922 	e2e_pipe->pipe.src.viewport_width = scl->viewport.width;
923 	e2e_pipe->pipe.src.viewport_width_c = scl->viewport_c.width;
924 	e2e_pipe->pipe.src.viewport_height = scl->viewport.height;
925 	e2e_pipe->pipe.src.viewport_height_c = scl->viewport_c.height;
926 	e2e_pipe->pipe.src.viewport_width_max = pln->src_rect.width;
927 	e2e_pipe->pipe.src.viewport_height_max = pln->src_rect.height;
928 	e2e_pipe->pipe.src.surface_width_y = pln->plane_size.surface_size.width;
929 	e2e_pipe->pipe.src.surface_height_y = pln->plane_size.surface_size.height;
930 	e2e_pipe->pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
931 	e2e_pipe->pipe.src.surface_height_c = pln->plane_size.chroma_size.height;
932 
933 	if (pln->format == SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA
934 			|| pln->format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
935 		e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
936 		e2e_pipe->pipe.src.data_pitch_c = pln->plane_size.chroma_pitch;
937 		e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
938 		e2e_pipe->pipe.src.meta_pitch_c = pln->dcc.meta_pitch_c;
939 	} else {
940 		e2e_pipe->pipe.src.data_pitch = pln->plane_size.surface_pitch;
941 		e2e_pipe->pipe.src.meta_pitch = pln->dcc.meta_pitch;
942 	}
943 	e2e_pipe->pipe.src.dcc = pln->dcc.enable;
944 	e2e_pipe->pipe.src.dcc_rate = 1;
945 	e2e_pipe->pipe.dest.recout_width = scl->recout.width;
946 	e2e_pipe->pipe.dest.recout_height = scl->recout.height;
947 	e2e_pipe->pipe.dest.full_recout_height = scl->recout.height;
948 	e2e_pipe->pipe.dest.full_recout_width = scl->recout.width;
949 	if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
950 		e2e_pipe->pipe.dest.full_recout_width *= 2;
951 	else if (e2e_pipe->pipe.dest.odm_combine == dm_odm_combine_mode_4to1)
952 		e2e_pipe->pipe.dest.full_recout_width *= 4;
953 	else {
954 		struct pipe_ctx *split_pipe = dc_pipe_ctx->bottom_pipe;
955 
956 		while (split_pipe && split_pipe->plane_state == pln) {
957 			e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
958 			split_pipe = split_pipe->bottom_pipe;
959 		}
960 		split_pipe = dc_pipe_ctx->top_pipe;
961 		while (split_pipe && split_pipe->plane_state == pln) {
962 			e2e_pipe->pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
963 			split_pipe = split_pipe->top_pipe;
964 		}
965 	}
966 
967 	e2e_pipe->pipe.scale_ratio_depth.lb_depth = dm_lb_16;
968 	e2e_pipe->pipe.scale_ratio_depth.hscl_ratio = (double) scl->ratios.horz.value / (1ULL<<32);
969 	e2e_pipe->pipe.scale_ratio_depth.hscl_ratio_c = (double) scl->ratios.horz_c.value / (1ULL<<32);
970 	e2e_pipe->pipe.scale_ratio_depth.vscl_ratio = (double) scl->ratios.vert.value / (1ULL<<32);
971 	e2e_pipe->pipe.scale_ratio_depth.vscl_ratio_c = (double) scl->ratios.vert_c.value / (1ULL<<32);
972 	e2e_pipe->pipe.scale_ratio_depth.scl_enable =
973 			scl->ratios.vert.value != dc_fixpt_one.value
974 			|| scl->ratios.horz.value != dc_fixpt_one.value
975 			|| scl->ratios.vert_c.value != dc_fixpt_one.value
976 			|| scl->ratios.horz_c.value != dc_fixpt_one.value /*Lb only or Full scl*/
977 			|| always_scale; /*support always scale*/
978 	e2e_pipe->pipe.scale_taps.htaps = scl->taps.h_taps;
979 	e2e_pipe->pipe.scale_taps.htaps_c = scl->taps.h_taps_c;
980 	e2e_pipe->pipe.scale_taps.vtaps = scl->taps.v_taps;
981 	e2e_pipe->pipe.scale_taps.vtaps_c = scl->taps.v_taps_c;
982 
983 	/* Currently compat_level is not defined. Commenting it until further resolution
984 	 * if (pln->compat_level == DC_LEGACY_TILING_ADDR_GEN_TWO) {
985 		swizzle_to_dml_params(pln->tiling_info.gfx9.swizzle,
986 				&e2e_pipe->pipe.src.sw_mode);
987 		e2e_pipe->pipe.src.macro_tile_size =
988 				swizzle_mode_to_macro_tile_size(pln->tiling_info.gfx9.swizzle);
989 	} else {
990 		gfx10array_mode_to_dml_params(pln->tiling_info.gfx10compatible.array_mode,
991 				pln->compat_level,
992 				&e2e_pipe->pipe.src.sw_mode);
993 		e2e_pipe->pipe.src.macro_tile_size = dm_4k_tile;
994 	}*/
995 
996 	e2e_pipe->pipe.src.source_format = dc_source_format_to_dml_source_format(pln->format);
997 }
998 
populate_dml_cursor_parameters_from_dc_pipe_ctx(const struct pipe_ctx * dc_pipe_ctx,struct _vcs_dpi_display_e2e_pipe_params_st * e2e_pipe)999 static void populate_dml_cursor_parameters_from_dc_pipe_ctx (const struct pipe_ctx *dc_pipe_ctx, struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe)
1000 {
1001 	/*
1002 	* For graphic plane, cursor number is 1, nv12 is 0
1003 	* bw calculations due to cursor on/off
1004 	*/
1005 	if (dc_pipe_ctx->plane_state &&
1006 			(dc_pipe_ctx->plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
1007 			dc_pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM))
1008 		e2e_pipe->pipe.src.num_cursors = 0;
1009 	else
1010 		e2e_pipe->pipe.src.num_cursors = 1;
1011 
1012 	e2e_pipe->pipe.src.cur0_src_width = 256;
1013 	e2e_pipe->pipe.src.cur0_bpp = dm_cur_32bit;
1014 }
1015 
populate_dml_pipes_from_context_base(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,bool fast_validate)1016 static int populate_dml_pipes_from_context_base(
1017 		struct dc *dc,
1018 		struct dc_state *context,
1019 		display_e2e_pipe_params_st *pipes,
1020 		bool fast_validate)
1021 {
1022 	int pipe_cnt, i;
1023 	bool synchronized_vblank = true;
1024 	struct resource_context *res_ctx = &context->res_ctx;
1025 
1026 	for (i = 0, pipe_cnt = -1; i < dc->res_pool->pipe_count; i++) {
1027 		if (!res_ctx->pipe_ctx[i].stream)
1028 			continue;
1029 
1030 		if (pipe_cnt < 0) {
1031 			pipe_cnt = i;
1032 			continue;
1033 		}
1034 
1035 		if (res_ctx->pipe_ctx[pipe_cnt].stream == res_ctx->pipe_ctx[i].stream)
1036 			continue;
1037 
1038 		if (dc->debug.disable_timing_sync ||
1039 			(!resource_are_streams_timing_synchronizable(
1040 				res_ctx->pipe_ctx[pipe_cnt].stream,
1041 				res_ctx->pipe_ctx[i].stream) &&
1042 			!resource_are_vblanks_synchronizable(
1043 				res_ctx->pipe_ctx[pipe_cnt].stream,
1044 				res_ctx->pipe_ctx[i].stream))) {
1045 			synchronized_vblank = false;
1046 			break;
1047 		}
1048 	}
1049 
1050 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1051 		struct dc_crtc_timing *timing = &res_ctx->pipe_ctx[i].stream->timing;
1052 
1053 		struct audio_check aud_check = {0};
1054 		if (!res_ctx->pipe_ctx[i].stream)
1055 			continue;
1056 
1057 		/* todo:
1058 		pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = 0;
1059 		pipes[pipe_cnt].pipe.src.dcc = 0;
1060 		pipes[pipe_cnt].pipe.src.vm = 0;*/
1061 
1062 		pipes[pipe_cnt].clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
1063 
1064 		pipes[pipe_cnt].dout.dsc_enable = res_ctx->pipe_ctx[i].stream->timing.flags.DSC;
1065 		/* todo: rotation?*/
1066 		pipes[pipe_cnt].dout.dsc_slices = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.num_slices_h;
1067 		if (res_ctx->pipe_ctx[i].stream->use_dynamic_meta) {
1068 			pipes[pipe_cnt].pipe.src.dynamic_metadata_enable = true;
1069 			/* 1/2 vblank */
1070 			pipes[pipe_cnt].pipe.src.dynamic_metadata_lines_before_active =
1071 				(timing->v_total - timing->v_addressable
1072 					- timing->v_border_top - timing->v_border_bottom) / 2;
1073 			/* 36 bytes dp, 32 hdmi */
1074 			pipes[pipe_cnt].pipe.src.dynamic_metadata_xmit_bytes =
1075 				dc_is_dp_signal(res_ctx->pipe_ctx[i].stream->signal) ? 36 : 32;
1076 		}
1077 		pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank;
1078 
1079 		dc_timing_to_dml_timing(timing, &pipes[pipe_cnt].pipe.dest);
1080 		pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
1081 		pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
1082 
1083 		pipes[pipe_cnt].pipe.dest.otg_inst = res_ctx->pipe_ctx[i].stream_res.tg->inst;
1084 
1085 		pipes[pipe_cnt].pipe.dest.odm_combine = get_dml_odm_combine(&res_ctx->pipe_ctx[i]);
1086 
1087 		populate_hsplit_group_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
1088 
1089 		pipes[pipe_cnt].dout.dp_lanes = 4;
1090 		pipes[pipe_cnt].dout.is_virtual = 0;
1091 		pipes[pipe_cnt].dout.output_type = get_dml_output_type(res_ctx->pipe_ctx[i].stream->signal);
1092 		if (pipes[pipe_cnt].dout.output_type < 0) {
1093 			pipes[pipe_cnt].dout.output_type = dm_dp;
1094 			pipes[pipe_cnt].dout.is_virtual = 1;
1095 		}
1096 
1097 		populate_color_depth_and_encoding_from_timing(&res_ctx->pipe_ctx[i].stream->timing, &pipes[pipe_cnt].dout);
1098 
1099 		if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC)
1100 			pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0;
1101 
1102 		/* todo: default max for now, until there is logic reflecting this in dc*/
1103 		pipes[pipe_cnt].dout.dsc_input_bpc = 12;
1104 		/*fill up the audio sample rate (unit in kHz)*/
1105 		get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
1106 		pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
1107 
1108 		populate_dml_cursor_parameters_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt]);
1109 
1110 		if (!res_ctx->pipe_ctx[i].plane_state) {
1111 			populate_default_plane_from_timing(timing, &pipes[pipe_cnt].pipe);
1112 		} else {
1113 			populate_dml_from_dc_pipe_ctx(&res_ctx->pipe_ctx[i], &pipes[pipe_cnt], dc->debug.always_scale);
1114 		}
1115 
1116 		pipe_cnt++;
1117 	}
1118 
1119 	/* populate writeback information */
1120 	if (dc->res_pool)
1121 		dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
1122 
1123 	return pipe_cnt;
1124 }
1125 
dml_populate_dml_pipes_from_context(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,bool fast_validate)1126 static int dml_populate_dml_pipes_from_context(
1127 	struct dc *dc, struct dc_state *context,
1128 	display_e2e_pipe_params_st *pipes,
1129 	bool fast_validate)
1130 {
1131 	int i, pipe_cnt;
1132 	struct resource_context *res_ctx = &context->res_ctx;
1133 	struct pipe_ctx *pipe;
1134 
1135 	populate_dml_pipes_from_context_base(dc, context, pipes, fast_validate);
1136 
1137 	for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
1138 		struct dc_crtc_timing *timing;
1139 
1140 		if (!res_ctx->pipe_ctx[i].stream)
1141 			continue;
1142 		pipe = &res_ctx->pipe_ctx[i];
1143 		timing = &pipe->stream->timing;
1144 
1145 		pipes[pipe_cnt].pipe.src.gpuvm = true;
1146 		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
1147 		pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_chroma = 0;
1148 		pipes[pipe_cnt].pipe.dest.vfront_porch = timing->v_front_porch;
1149 
1150 		pipes[pipe_cnt].dout.dsc_input_bpc = 0;
1151 		if (pipes[pipe_cnt].dout.dsc_enable) {
1152 			switch (timing->display_color_depth) {
1153 			case COLOR_DEPTH_888:
1154 				pipes[pipe_cnt].dout.dsc_input_bpc = 8;
1155 				break;
1156 			case COLOR_DEPTH_101010:
1157 				pipes[pipe_cnt].dout.dsc_input_bpc = 10;
1158 				break;
1159 			case COLOR_DEPTH_121212:
1160 				pipes[pipe_cnt].dout.dsc_input_bpc = 12;
1161 				break;
1162 			default:
1163 				ASSERT(0);
1164 				break;
1165 			}
1166 		}
1167 		pipe_cnt++;
1168 	}
1169 	dc->config.enable_4to1MPC = false;
1170 	if (pipe_cnt == 1 && pipe->plane_state && !dc->debug.disable_z9_mpc) {
1171 		if (is_dual_plane(pipe->plane_state->format)
1172 				&& pipe->plane_state->src_rect.width <= 1920 && pipe->plane_state->src_rect.height <= 1080) {
1173 			dc->config.enable_4to1MPC = true;
1174 		} else if (!is_dual_plane(pipe->plane_state->format)) {
1175 			context->bw_ctx.dml.ip.det_buffer_size_kbytes = 192;
1176 			pipes[0].pipe.src.unbounded_req_mode = true;
1177 		}
1178 	}
1179 
1180 	return pipe_cnt;
1181 }
1182 
dml_full_validate_bw_helper(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int * vlevel,int * split,bool * merge,int * pipe_cnt)1183 static void dml_full_validate_bw_helper(struct dc *dc,
1184 		struct dc_state *context,
1185 		display_e2e_pipe_params_st *pipes,
1186 		int *vlevel,
1187 		int *split,
1188 		bool *merge,
1189 		int *pipe_cnt)
1190 {
1191 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1192 
1193 	/*
1194 	 * DML favors voltage over p-state, but we're more interested in
1195 	 * supporting p-state over voltage. We can't support p-state in
1196 	 * prefetch mode > 0 so try capping the prefetch mode to start.
1197 	 */
1198 	context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
1199 		dm_allow_self_refresh_and_mclk_switch;
1200 	*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1201 	/* This may adjust vlevel and maxMpcComb */
1202 	if (*vlevel < context->bw_ctx.dml.soc.num_states)
1203 		*vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1204 
1205 	/* Conditions for setting up phantom pipes for SubVP:
1206 	 * 1. Not force disable SubVP
1207 	 * 2. Full update (i.e. !fast_validate)
1208 	 * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?)
1209 	 * 4. Display configuration passes validation
1210 	 * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch)
1211 	 */
1212 	if (!dc->debug.force_disable_subvp &&
1213 			dml_enough_pipes_for_subvp(dc, context) &&
1214 			*vlevel < context->bw_ctx.dml.soc.num_states &&
1215 			(vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
1216 			dc->debug.force_subvp_mclk_switch)) {
1217 
1218 		dml_add_phantom_pipes(dc, context);
1219 
1220 		 /* Create input to DML based on new context which includes phantom pipes
1221 		  * TODO: Input to DML should mark which pipes are phantom
1222 		  */
1223 		*pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
1224 		*vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt);
1225 		if (*vlevel < context->bw_ctx.dml.soc.num_states) {
1226 			memset(split, 0, MAX_PIPES * sizeof(*split));
1227 			memset(merge, 0, MAX_PIPES * sizeof(*merge));
1228 			*vlevel = dml_validate_apply_pipe_split_flags(dc, context, *vlevel, split, merge);
1229 		}
1230 
1231 		// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
1232 		// remove phantom pipes and repopulate dml pipes
1233 		if (*vlevel == context->bw_ctx.dml.soc.num_states ||
1234 				vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
1235 			dml_remove_phantom_pipes(dc, context);
1236 			*pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, false);
1237 		}
1238 	}
1239 }
1240 
dcn20_adjust_adaptive_sync_v_startup(const struct dc_crtc_timing * dc_crtc_timing,int * vstartup_start)1241 static void dcn20_adjust_adaptive_sync_v_startup(
1242 		const struct dc_crtc_timing *dc_crtc_timing, int *vstartup_start)
1243 {
1244 	struct dc_crtc_timing patched_crtc_timing;
1245 	uint32_t asic_blank_end   = 0;
1246 	uint32_t asic_blank_start = 0;
1247 	uint32_t newVstartup	  = 0;
1248 
1249 	patched_crtc_timing = *dc_crtc_timing;
1250 
1251 	if (patched_crtc_timing.flags.INTERLACE == 1) {
1252 		if (patched_crtc_timing.v_front_porch < 2)
1253 			patched_crtc_timing.v_front_porch = 2;
1254 	} else {
1255 		if (patched_crtc_timing.v_front_porch < 1)
1256 			patched_crtc_timing.v_front_porch = 1;
1257 	}
1258 
1259 	/* blank_start = frame end - front porch */
1260 	asic_blank_start = patched_crtc_timing.v_total -
1261 					patched_crtc_timing.v_front_porch;
1262 
1263 	/* blank_end = blank_start - active */
1264 	asic_blank_end = asic_blank_start -
1265 					patched_crtc_timing.v_border_bottom -
1266 					patched_crtc_timing.v_addressable -
1267 					patched_crtc_timing.v_border_top;
1268 
1269 	newVstartup = asic_blank_end + (patched_crtc_timing.v_total - asic_blank_start);
1270 
1271 	*vstartup_start = ((newVstartup > *vstartup_start) ? newVstartup : *vstartup_start);
1272 }
1273 
is_dp_128b_132b_signal(struct pipe_ctx * pipe_ctx)1274 static bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
1275 {
1276 	return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
1277 			pipe_ctx->link_res.hpo_dp_link_enc &&
1278 			dc_is_dp_signal(pipe_ctx->stream->signal));
1279 }
1280 
is_dtbclk_required(struct dc * dc,struct dc_state * context)1281 static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
1282 {
1283 	int i;
1284 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1285 		if (!context->res_ctx.pipe_ctx[i].stream)
1286 			continue;
1287 		if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
1288 			return true;
1289 	}
1290 	return false;
1291 }
1292 
dml_update_soc_for_wm_a(struct dc * dc,struct dc_state * context)1293 static void dml_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
1294 {
1295 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
1296 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
1297 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
1298 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
1299 	}
1300 }
1301 
dml_internal_validate(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int * pipe_cnt_out,int * vlevel_out,bool fast_validate)1302 static bool dml_internal_validate(
1303 		struct dc *dc,
1304 		struct dc_state *context,
1305 		display_e2e_pipe_params_st *pipes,
1306 		int *pipe_cnt_out,
1307 		int *vlevel_out,
1308 		bool fast_validate)
1309 {
1310 	bool out = false;
1311 	bool repopulate_pipes = false;
1312 	int split[MAX_PIPES] = { 0 };
1313 	bool merge[MAX_PIPES] = { false };
1314 	bool newly_split[MAX_PIPES] = { false };
1315 	int pipe_cnt, i, pipe_idx, vlevel;
1316 	struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
1317 
1318 	ASSERT(pipes);
1319 	if (!pipes)
1320 		return false;
1321 
1322 	// For each full update, remove all existing phantom pipes first
1323 	dml_remove_phantom_pipes(dc, context);
1324 
1325 	dml_update_soc_for_wm_a(dc, context);
1326 
1327 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1328 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1329 
1330 		if (pipe->plane_state) {
1331 			// On initial pass through DML, we intend to use MALL for SS on all
1332 			// (non-PSR) surfaces with none using MALL for P-State
1333 			// 'mall_plane_config': is not a member of 'dc_plane_state' - commenting it out till mall_plane_config gets supported in dc_plant_state
1334 			//if (pipe->stream && pipe->stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
1335 			//	pipe->plane_state->mall_plane_config.use_mall_for_ss = true;
1336 		}
1337 	}
1338 	pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
1339 
1340 	if (!pipe_cnt) {
1341 		out = true;
1342 		goto validate_out;
1343 	}
1344 
1345 	dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt);
1346 
1347 	if (!fast_validate) {
1348 		dml_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt);
1349 	}
1350 
1351 	if (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states ||
1352 			vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported) {
1353 		/*
1354 		 * If mode is unsupported or there's still no p-state support then
1355 		 * fall back to favoring voltage.
1356 		 *
1357 		 * We don't actually support prefetch mode 2, so require that we
1358 		 * at least support prefetch mode 1.
1359 		 */
1360 		context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
1361 			dm_allow_self_refresh;
1362 
1363 		vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt);
1364 		if (vlevel < context->bw_ctx.dml.soc.num_states) {
1365 			memset(split, 0, sizeof(split));
1366 			memset(merge, 0, sizeof(merge));
1367 			vlevel = dml_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
1368 		}
1369 	}
1370 
1371 	dml_log_mode_support_params(&context->bw_ctx.dml);
1372 
1373 	if (vlevel == context->bw_ctx.dml.soc.num_states)
1374 		goto validate_fail;
1375 
1376 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1377 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1378 		struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
1379 
1380 		if (!pipe->stream)
1381 			continue;
1382 
1383 		/* We only support full screen mpo with ODM */
1384 		if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
1385 				&& pipe->plane_state && mpo_pipe
1386 				&& memcmp(&mpo_pipe->plane_res.scl_data.recout,
1387 						&pipe->plane_res.scl_data.recout,
1388 						sizeof(struct rect)) != 0) {
1389 			ASSERT(mpo_pipe->plane_state != pipe->plane_state);
1390 			goto validate_fail;
1391 		}
1392 		pipe_idx++;
1393 	}
1394 
1395 	/* merge pipes if necessary */
1396 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1397 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1398 
1399 		/*skip pipes that don't need merging*/
1400 		if (!merge[i])
1401 			continue;
1402 
1403 		/* if ODM merge we ignore mpc tree, mpo pipes will have their own flags */
1404 		if (pipe->prev_odm_pipe) {
1405 			/*split off odm pipe*/
1406 			pipe->prev_odm_pipe->next_odm_pipe = pipe->next_odm_pipe;
1407 			if (pipe->next_odm_pipe)
1408 				pipe->next_odm_pipe->prev_odm_pipe = pipe->prev_odm_pipe;
1409 
1410 			pipe->bottom_pipe = NULL;
1411 			pipe->next_odm_pipe = NULL;
1412 			pipe->plane_state = NULL;
1413 			pipe->stream = NULL;
1414 			pipe->top_pipe = NULL;
1415 			pipe->prev_odm_pipe = NULL;
1416 			if (pipe->stream_res.dsc)
1417 				dml_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
1418 			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
1419 			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
1420 			repopulate_pipes = true;
1421 		} else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
1422 			struct pipe_ctx *top_pipe = pipe->top_pipe;
1423 			struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
1424 
1425 			top_pipe->bottom_pipe = bottom_pipe;
1426 			if (bottom_pipe)
1427 				bottom_pipe->top_pipe = top_pipe;
1428 
1429 			pipe->top_pipe = NULL;
1430 			pipe->bottom_pipe = NULL;
1431 			pipe->plane_state = NULL;
1432 			pipe->stream = NULL;
1433 			memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
1434 			memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
1435 			repopulate_pipes = true;
1436 		} else
1437 			ASSERT(0); /* Should never try to merge master pipe */
1438 
1439 	}
1440 
1441 	for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
1442 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1443 		struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1444 		struct pipe_ctx *hsplit_pipe = NULL;
1445 		bool odm;
1446 		int old_index = -1;
1447 
1448 		if (!pipe->stream || newly_split[i])
1449 			continue;
1450 
1451 		pipe_idx++;
1452 		odm = vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled;
1453 
1454 		if (!pipe->plane_state && !odm)
1455 			continue;
1456 
1457 		if (split[i]) {
1458 			if (odm) {
1459 				if (split[i] == 4 && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe)
1460 					old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
1461 				else if (old_pipe->next_odm_pipe)
1462 					old_index = old_pipe->next_odm_pipe->pipe_idx;
1463 			} else {
1464 				if (split[i] == 4 && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
1465 						old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
1466 					old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
1467 				else if (old_pipe->bottom_pipe &&
1468 						old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
1469 					old_index = old_pipe->bottom_pipe->pipe_idx;
1470 			}
1471 			hsplit_pipe = dml_find_split_pipe(dc, context, old_index);
1472 			ASSERT(hsplit_pipe);
1473 			if (!hsplit_pipe)
1474 				goto validate_fail;
1475 
1476 			if (!dml_split_stream_for_mpc_or_odm(
1477 					dc, &context->res_ctx,
1478 					pipe, hsplit_pipe, odm))
1479 				goto validate_fail;
1480 
1481 			newly_split[hsplit_pipe->pipe_idx] = true;
1482 			repopulate_pipes = true;
1483 		}
1484 		if (split[i] == 4) {
1485 			struct pipe_ctx *pipe_4to1;
1486 
1487 			if (odm && old_pipe->next_odm_pipe)
1488 				old_index = old_pipe->next_odm_pipe->pipe_idx;
1489 			else if (!odm && old_pipe->bottom_pipe &&
1490 						old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
1491 				old_index = old_pipe->bottom_pipe->pipe_idx;
1492 			else
1493 				old_index = -1;
1494 			pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
1495 			ASSERT(pipe_4to1);
1496 			if (!pipe_4to1)
1497 				goto validate_fail;
1498 			if (!dml_split_stream_for_mpc_or_odm(
1499 					dc, &context->res_ctx,
1500 					pipe, pipe_4to1, odm))
1501 				goto validate_fail;
1502 			newly_split[pipe_4to1->pipe_idx] = true;
1503 
1504 			if (odm && old_pipe->next_odm_pipe && old_pipe->next_odm_pipe->next_odm_pipe
1505 					&& old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
1506 				old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
1507 			else if (!odm && old_pipe->bottom_pipe && old_pipe->bottom_pipe->bottom_pipe &&
1508 					old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
1509 					old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
1510 				old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
1511 			else
1512 				old_index = -1;
1513 			pipe_4to1 = dml_find_split_pipe(dc, context, old_index);
1514 			ASSERT(pipe_4to1);
1515 			if (!pipe_4to1)
1516 				goto validate_fail;
1517 			if (!dml_split_stream_for_mpc_or_odm(
1518 					dc, &context->res_ctx,
1519 					hsplit_pipe, pipe_4to1, odm))
1520 				goto validate_fail;
1521 			newly_split[pipe_4to1->pipe_idx] = true;
1522 		}
1523 		if (odm)
1524 			dml_build_mapped_resource(dc, context, pipe->stream);
1525 	}
1526 
1527 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1528 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1529 
1530 		if (pipe->plane_state) {
1531 			if (!resource_build_scaling_params(pipe))
1532 				goto validate_fail;
1533 		}
1534 	}
1535 
1536 	/* Actual dsc count per stream dsc validation*/
1537 	if (!dml_validate_dsc(dc, context)) {
1538 		vba->ValidationStatus[vba->soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE;
1539 		goto validate_fail;
1540 	}
1541 
1542 	if (repopulate_pipes)
1543 		pipe_cnt = dml_populate_dml_pipes_from_context(dc, context, pipes, fast_validate);
1544 	*vlevel_out = vlevel;
1545 	*pipe_cnt_out = pipe_cnt;
1546 
1547 	out = true;
1548 	goto validate_out;
1549 
1550 validate_fail:
1551 	out = false;
1552 
1553 validate_out:
1554 	return out;
1555 }
1556 
dml_calculate_dlg_params(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt,int vlevel)1557 static void dml_calculate_dlg_params(
1558 		struct dc *dc, struct dc_state *context,
1559 		display_e2e_pipe_params_st *pipes,
1560 		int pipe_cnt,
1561 		int vlevel)
1562 {
1563 	int i, pipe_idx;
1564 	int plane_count;
1565 
1566 	/* Writeback MCIF_WB arbitration parameters */
1567 	if (dc->res_pool)
1568 		dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt);
1569 
1570 	context->bw_ctx.bw.dcn.clk.dispclk_khz = context->bw_ctx.dml.vba.DISPCLK * 1000;
1571 	context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
1572 	context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
1573 	context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
1574 	context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
1575 	context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
1576 	context->bw_ctx.bw.dcn.clk.p_state_change_support =
1577 		context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb]
1578 							!= dm_dram_clock_change_unsupported;
1579 
1580 	context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
1581 	/* 'z9_support': is not a member of 'dc_clocks' - Commenting out till we have this support in dc_clocks
1582 	 * context->bw_ctx.bw.dcn.clk.z9_support = (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) ?
1583 			DCN_Z9_SUPPORT_ALLOW : DCN_Z9_SUPPORT_DISALLOW;
1584 	*/
1585 	plane_count = 0;
1586 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1587 		if (context->res_ctx.pipe_ctx[i].plane_state)
1588 			plane_count++;
1589 	}
1590 
1591 	/* Commented out as per above error for now.
1592 	if (plane_count == 0)
1593 		context->bw_ctx.bw.dcn.clk.z9_support = DCN_Z9_SUPPORT_ALLOW;
1594 	*/
1595 	context->bw_ctx.bw.dcn.clk.dtbclk_en = is_dtbclk_required(dc, context);
1596 	/* TODO : Uncomment the below line and make changes
1597 	 * as per DML nomenclature once it is available.
1598 	 * context->bw_ctx.bw.dcn.clk.fclk_p_state_change_support = context->bw_ctx.dml.vba.fclk_pstate_support;
1599 	 */
1600 
1601 	if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
1602 		context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
1603 
1604 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1605 		if (!context->res_ctx.pipe_ctx[i].stream)
1606 			continue;
1607 		pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1608 		pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1609 		pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1610 		pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1611 		if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
1612 			// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
1613 			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
1614 			context->res_ctx.pipe_ctx[i].unbounded_req = false;
1615 		} else {
1616 			context->res_ctx.pipe_ctx[i].det_buffer_size_kb = context->bw_ctx.dml.ip.det_buffer_size_kbytes;
1617 			context->res_ctx.pipe_ctx[i].unbounded_req = pipes[pipe_idx].pipe.src.unbounded_req_mode;
1618 		}
1619 
1620 		if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
1621 			context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
1622 		context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
1623 						pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
1624 		context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
1625 		pipe_idx++;
1626 	}
1627 	/*save a original dppclock copy*/
1628 	context->bw_ctx.bw.dcn.clk.bw_dppclk_khz = context->bw_ctx.bw.dcn.clk.dppclk_khz;
1629 	context->bw_ctx.bw.dcn.clk.bw_dispclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz;
1630 	context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dppclk_mhz * 1000;
1631 	context->bw_ctx.bw.dcn.clk.max_supported_dispclk_khz = context->bw_ctx.dml.soc.clock_limits[vlevel].dispclk_mhz * 1000;
1632 	context->bw_ctx.bw.dcn.compbuf_size_kb = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes
1633 						- context->bw_ctx.dml.ip.det_buffer_size_kbytes * pipe_idx;
1634 
1635 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1636 		bool cstate_en = context->bw_ctx.dml.vba.PrefetchMode[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != 2;
1637 
1638 		if (!context->res_ctx.pipe_ctx[i].stream)
1639 			continue;
1640 
1641 		context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
1642 				&context->res_ctx.pipe_ctx[i].dlg_regs,
1643 				&context->res_ctx.pipe_ctx[i].ttu_regs,
1644 				pipes,
1645 				pipe_cnt,
1646 				pipe_idx,
1647 				cstate_en,
1648 				context->bw_ctx.bw.dcn.clk.p_state_change_support,
1649 				false, false, true);
1650 
1651 		context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
1652 				&context->res_ctx.pipe_ctx[i].rq_regs,
1653 				&pipes[pipe_idx].pipe);
1654 		pipe_idx++;
1655 	}
1656 }
1657 
dml_calculate_wm_and_dlg(struct dc * dc,struct dc_state * context,display_e2e_pipe_params_st * pipes,int pipe_cnt,int vlevel)1658 static void dml_calculate_wm_and_dlg(
1659 		struct dc *dc, struct dc_state *context,
1660 		display_e2e_pipe_params_st *pipes,
1661 		int pipe_cnt,
1662 		int vlevel)
1663 {
1664 	int i, pipe_idx, vlevel_temp = 0;
1665 
1666 	double dcfclk = context->bw_ctx.dml.soc.clock_limits[0].dcfclk_mhz;
1667 	double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
1668 	unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
1669 	bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
1670 			dm_dram_clock_change_unsupported;
1671 
1672 	/* Set B:
1673 	 * For Set B calculations use clocks from clock_limits[2] when available i.e. when SMU is present,
1674 	 * otherwise use arbitrary low value from spreadsheet for DCFCLK as lower is safer for watermark
1675 	 * calculations to cover bootup clocks.
1676 	 * DCFCLK: soc.clock_limits[2] when available
1677 	 * UCLK: soc.clock_limits[2] when available
1678 	 */
1679 	if (context->bw_ctx.dml.soc.num_states > 2) {
1680 		vlevel_temp = 2;
1681 		dcfclk = context->bw_ctx.dml.soc.clock_limits[2].dcfclk_mhz;
1682 	} else
1683 		dcfclk = 615; //DCFCLK Vmin_lv
1684 
1685 	pipes[0].clks_cfg.voltage = vlevel_temp;
1686 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
1687 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
1688 
1689 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid) {
1690 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us;
1691 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us;
1692 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us;
1693 	}
1694 	context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1695 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1696 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1697 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1698 	context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1699 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1700 	context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1701 	context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1702 	//context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1703 	//context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1704 
1705 	/* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
1706 	context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 4;
1707 	context->bw_ctx.bw.dcn.watermarks.b.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns / 8;
1708 
1709 	/* Set D:
1710 	 * All clocks min.
1711 	 * DCFCLK: Min, as reported by PM FW when available
1712 	 * UCLK  : Min, as reported by PM FW when available
1713 	 * sr_enter_exit/sr_exit should be lower than used for DRAM (TBD after bringup or later, use as decided in Clk Mgr)
1714 	 */
1715 
1716 	if (context->bw_ctx.dml.soc.num_states > 2) {
1717 		vlevel_temp = 0;
1718 		dcfclk = dc->clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz;
1719 	} else
1720 		dcfclk = 615; //DCFCLK Vmin_lv
1721 
1722 	pipes[0].clks_cfg.voltage = vlevel_temp;
1723 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
1724 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel_temp].socclk_mhz;
1725 
1726 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
1727 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
1728 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
1729 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us;
1730 	}
1731 	context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1732 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1733 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1734 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1735 	context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1736 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1737 	context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1738 	context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1739 	//context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1740 	//context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1741 
1742 	/* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
1743 	context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 4;
1744 	context->bw_ctx.bw.dcn.watermarks.d.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns / 8;
1745 
1746 	/* Set C, for Dummy P-State:
1747 	 * All clocks min.
1748 	 * DCFCLK: Min, as reported by PM FW, when available
1749 	 * UCLK  : Min,  as reported by PM FW, when available
1750 	 * pstate latency as per UCLK state dummy pstate latency
1751 	 */
1752 	if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
1753 		unsigned int min_dram_speed_mts_margin = 160;
1754 
1755 		if ((!pstate_en))
1756 			min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
1757 
1758 		/* find largest table entry that is lower than dram speed, but lower than DPM0 still uses DPM0 */
1759 		for (i = 3; i > 0; i--)
1760 			if (min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts)
1761 				break;
1762 
1763 		context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
1764 		context->bw_ctx.dml.soc.dummy_pstate_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
1765 		context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
1766 		context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
1767 	}
1768 	context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1769 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1770 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1771 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1772 	context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1773 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1774 	context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1775 	context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1776 	//context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = get_wm_fclk_pstate(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1777 	//context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = get_wm_usr_retraining(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1778 
1779 	/* Temporary, to have some fclk_pstate_change_ns and usr_retraining_ns wm values until DML is implemented */
1780 	context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.fclk_pstate_change_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 4;
1781 	context->bw_ctx.bw.dcn.watermarks.c.usr_retraining_ns = context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns / 8;
1782 
1783 	if ((!pstate_en) && (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid)) {
1784 		/* The only difference between A and C is p-state latency, if p-state is not supported
1785 		 * with full p-state latency we want to calculate DLG based on dummy p-state latency,
1786 		 * Set A p-state watermark set to 0 previously, when p-state unsupported, for now keep as previous implementation.
1787 		 */
1788 		context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
1789 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0;
1790 	} else {
1791 		/* Set A:
1792 		 * All clocks min.
1793 		 * DCFCLK: Min, as reported by PM FW, when available
1794 		 * UCLK: Min, as reported by PM FW, when available
1795 		 */
1796 		dml_update_soc_for_wm_a(dc, context);
1797 		context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1798 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1799 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1800 		context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1801 		context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1802 		context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1803 		context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1804 		context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
1805 	}
1806 
1807 	pipes[0].clks_cfg.voltage = vlevel;
1808 	pipes[0].clks_cfg.dcfclk_mhz = dcfclk_from_validation;
1809 	pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
1810 
1811 	for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
1812 		if (!context->res_ctx.pipe_ctx[i].stream)
1813 			continue;
1814 
1815 		pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
1816 		pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
1817 
1818 		if (dc->config.forced_clocks) {
1819 			pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
1820 			pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
1821 		}
1822 		if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
1823 			pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
1824 		if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
1825 			pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
1826 
1827 		pipe_idx++;
1828 	}
1829 
1830 	context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
1831 
1832 	dml_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
1833 
1834 	if (!pstate_en)
1835 		/* Restore full p-state latency */
1836 		context->bw_ctx.dml.soc.dram_clock_change_latency_us =
1837 				dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
1838 }
1839 
dml_validate(struct dc * dc,struct dc_state * context,bool fast_validate)1840 bool dml_validate(struct dc *dc,
1841 		struct dc_state *context,
1842 		bool fast_validate)
1843 {
1844 	bool out = false;
1845 
1846 	BW_VAL_TRACE_SETUP();
1847 
1848 	int vlevel = 0;
1849 	int pipe_cnt = 0;
1850 	display_e2e_pipe_params_st *pipes = context->bw_ctx.dml.dml_pipe_state;
1851 	DC_LOGGER_INIT(dc->ctx->logger);
1852 
1853 	BW_VAL_TRACE_COUNT();
1854 
1855 	out = dml_internal_validate(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
1856 
1857 	if (pipe_cnt == 0)
1858 		goto validate_out;
1859 
1860 	if (!out)
1861 		goto validate_fail;
1862 
1863 	BW_VAL_TRACE_END_VOLTAGE_LEVEL();
1864 
1865 	if (fast_validate) {
1866 		BW_VAL_TRACE_SKIP(fast);
1867 		goto validate_out;
1868 	}
1869 
1870 	dml_calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
1871 
1872 	BW_VAL_TRACE_END_WATERMARKS();
1873 
1874 	goto validate_out;
1875 
1876 validate_fail:
1877 	DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
1878 		dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
1879 
1880 	BW_VAL_TRACE_SKIP(fail);
1881 	out = false;
1882 
1883 validate_out:
1884 	BW_VAL_TRACE_FINISH();
1885 
1886 	return out;
1887 }
1888