1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 
25 #include <linux/slab.h>
26 #include <linux/mm.h>
27 
28 #include "dm_services.h"
29 
30 #include "dc.h"
31 
32 #include "core_status.h"
33 #include "core_types.h"
34 #include "hw_sequencer.h"
35 #include "dce/dce_hwseq.h"
36 
37 #include "resource.h"
38 
39 #include "clk_mgr.h"
40 #include "clock_source.h"
41 #include "dc_bios_types.h"
42 
43 #include "bios_parser_interface.h"
44 #include "bios/bios_parser_helper.h"
45 #include "include/irq_service_interface.h"
46 #include "transform.h"
47 #include "dmcu.h"
48 #include "dpp.h"
49 #include "timing_generator.h"
50 #include "abm.h"
51 #include "virtual/virtual_link_encoder.h"
52 #include "hubp.h"
53 
54 #include "link_hwss.h"
55 #include "link_encoder.h"
56 #include "link_enc_cfg.h"
57 
58 #include "dc_link.h"
59 #include "dc_link_ddc.h"
60 #include "dm_helpers.h"
61 #include "mem_input.h"
62 
63 #include "dc_link_dp.h"
64 #include "dc_dmub_srv.h"
65 
66 #include "dsc.h"
67 
68 #include "vm_helper.h"
69 
70 #include "dce/dce_i2c.h"
71 
72 #include "dmub/dmub_srv.h"
73 
74 #include "i2caux_interface.h"
75 #include "dce/dmub_hw_lock_mgr.h"
76 
77 #include "dc_trace.h"
78 
79 #include "dce/dmub_outbox.h"
80 
81 #define CTX \
82 	dc->ctx
83 
84 #define DC_LOGGER \
85 	dc->ctx->logger
86 
87 static const char DC_BUILD_ID[] = "production-build";
88 
89 /**
90  * DOC: Overview
91  *
92  * DC is the OS-agnostic component of the amdgpu DC driver.
93  *
94  * DC maintains and validates a set of structs representing the state of the
95  * driver and writes that state to AMD hardware
96  *
97  * Main DC HW structs:
98  *
99  * struct dc - The central struct.  One per driver.  Created on driver load,
100  * destroyed on driver unload.
101  *
102  * struct dc_context - One per driver.
103  * Used as a backpointer by most other structs in dc.
104  *
105  * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
106  * plugpoints).  Created on driver load, destroyed on driver unload.
107  *
108  * struct dc_sink - One per display.  Created on boot or hotplug.
109  * Destroyed on shutdown or hotunplug.  A dc_link can have a local sink
110  * (the display directly attached).  It may also have one or more remote
111  * sinks (in the Multi-Stream Transport case)
112  *
113  * struct resource_pool - One per driver.  Represents the hw blocks not in the
114  * main pipeline.  Not directly accessible by dm.
115  *
116  * Main dc state structs:
117  *
118  * These structs can be created and destroyed as needed.  There is a full set of
119  * these structs in dc->current_state representing the currently programmed state.
120  *
121  * struct dc_state - The global DC state to track global state information,
122  * such as bandwidth values.
123  *
124  * struct dc_stream_state - Represents the hw configuration for the pipeline from
125  * a framebuffer to a display.  Maps one-to-one with dc_sink.
126  *
127  * struct dc_plane_state - Represents a framebuffer.  Each stream has at least one,
128  * and may have more in the Multi-Plane Overlay case.
129  *
130  * struct resource_context - Represents the programmable state of everything in
131  * the resource_pool.  Not directly accessible by dm.
132  *
133  * struct pipe_ctx - A member of struct resource_context.  Represents the
134  * internal hardware pipeline components.  Each dc_plane_state has either
135  * one or two (in the pipe-split case).
136  */
137 
138 /*******************************************************************************
139  * Private functions
140  ******************************************************************************/
141 
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)142 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
143 {
144 	if (new > *original)
145 		*original = new;
146 }
147 
destroy_links(struct dc * dc)148 static void destroy_links(struct dc *dc)
149 {
150 	uint32_t i;
151 
152 	for (i = 0; i < dc->link_count; i++) {
153 		if (NULL != dc->links[i])
154 			link_destroy(&dc->links[i]);
155 	}
156 }
157 
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)158 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
159 {
160 	int i;
161 	uint32_t count = 0;
162 
163 	for (i = 0; i < num_links; i++) {
164 		if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
165 				links[i]->is_internal_display)
166 			count++;
167 	}
168 
169 	return count;
170 }
171 
get_seamless_boot_stream_count(struct dc_state * ctx)172 static int get_seamless_boot_stream_count(struct dc_state *ctx)
173 {
174 	uint8_t i;
175 	uint8_t seamless_boot_stream_count = 0;
176 
177 	for (i = 0; i < ctx->stream_count; i++)
178 		if (ctx->streams[i]->apply_seamless_boot_optimization)
179 			seamless_boot_stream_count++;
180 
181 	return seamless_boot_stream_count;
182 }
183 
create_links(struct dc * dc,uint32_t num_virtual_links)184 static bool create_links(
185 		struct dc *dc,
186 		uint32_t num_virtual_links)
187 {
188 	int i;
189 	int connectors_num;
190 	struct dc_bios *bios = dc->ctx->dc_bios;
191 
192 	dc->link_count = 0;
193 
194 	connectors_num = bios->funcs->get_connectors_number(bios);
195 
196 	DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
197 
198 	if (connectors_num > ENUM_ID_COUNT) {
199 		dm_error(
200 			"DC: Number of connectors %d exceeds maximum of %d!\n",
201 			connectors_num,
202 			ENUM_ID_COUNT);
203 		return false;
204 	}
205 
206 	dm_output_to_console(
207 		"DC: %s: connectors_num: physical:%d, virtual:%d\n",
208 		__func__,
209 		connectors_num,
210 		num_virtual_links);
211 
212 	for (i = 0; i < connectors_num; i++) {
213 		struct link_init_data link_init_params = {0};
214 		struct dc_link *link;
215 
216 		DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
217 
218 		link_init_params.ctx = dc->ctx;
219 		/* next BIOS object table connector */
220 		link_init_params.connector_index = i;
221 		link_init_params.link_index = dc->link_count;
222 		link_init_params.dc = dc;
223 		link = link_create(&link_init_params);
224 
225 		if (link) {
226 			dc->links[dc->link_count] = link;
227 			link->dc = dc;
228 			++dc->link_count;
229 		}
230 	}
231 
232 	DC_LOG_DC("BIOS object table - end");
233 
234 	/* Create a link for each usb4 dpia port */
235 	for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
236 		struct link_init_data link_init_params = {0};
237 		struct dc_link *link;
238 
239 		link_init_params.ctx = dc->ctx;
240 		link_init_params.connector_index = i;
241 		link_init_params.link_index = dc->link_count;
242 		link_init_params.dc = dc;
243 		link_init_params.is_dpia_link = true;
244 
245 		link = link_create(&link_init_params);
246 		if (link) {
247 			dc->links[dc->link_count] = link;
248 			link->dc = dc;
249 			++dc->link_count;
250 		}
251 	}
252 
253 	for (i = 0; i < num_virtual_links; i++) {
254 		struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
255 		struct encoder_init_data enc_init = {0};
256 
257 		if (link == NULL) {
258 			BREAK_TO_DEBUGGER();
259 			goto failed_alloc;
260 		}
261 
262 		link->link_index = dc->link_count;
263 		dc->links[dc->link_count] = link;
264 		dc->link_count++;
265 
266 		link->ctx = dc->ctx;
267 		link->dc = dc;
268 		link->connector_signal = SIGNAL_TYPE_VIRTUAL;
269 		link->link_id.type = OBJECT_TYPE_CONNECTOR;
270 		link->link_id.id = CONNECTOR_ID_VIRTUAL;
271 		link->link_id.enum_id = ENUM_ID_1;
272 		link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
273 
274 		if (!link->link_enc) {
275 			BREAK_TO_DEBUGGER();
276 			goto failed_alloc;
277 		}
278 
279 		link->link_status.dpcd_caps = &link->dpcd_caps;
280 
281 		enc_init.ctx = dc->ctx;
282 		enc_init.channel = CHANNEL_ID_UNKNOWN;
283 		enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
284 		enc_init.transmitter = TRANSMITTER_UNKNOWN;
285 		enc_init.connector = link->link_id;
286 		enc_init.encoder.type = OBJECT_TYPE_ENCODER;
287 		enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
288 		enc_init.encoder.enum_id = ENUM_ID_1;
289 		virtual_link_encoder_construct(link->link_enc, &enc_init);
290 	}
291 
292 	dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
293 
294 	return true;
295 
296 failed_alloc:
297 	return false;
298 }
299 
300 /* Create additional DIG link encoder objects if fewer than the platform
301  * supports were created during link construction. This can happen if the
302  * number of physical connectors is less than the number of DIGs.
303  */
create_link_encoders(struct dc * dc)304 static bool create_link_encoders(struct dc *dc)
305 {
306 	bool res = true;
307 	unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
308 	unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
309 	int i;
310 
311 	/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
312 	 * link encoders and physical display endpoints and does not require
313 	 * additional link encoder objects.
314 	 */
315 	if (num_usb4_dpia == 0)
316 		return res;
317 
318 	/* Create as many link encoder objects as the platform supports. DPIA
319 	 * endpoints can be programmably mapped to any DIG.
320 	 */
321 	if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
322 		for (i = 0; i < num_dig_link_enc; i++) {
323 			struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
324 
325 			if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
326 				link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
327 						(enum engine_id)(ENGINE_ID_DIGA + i));
328 				if (link_enc) {
329 					dc->res_pool->link_encoders[i] = link_enc;
330 					dc->res_pool->dig_link_enc_count++;
331 				} else {
332 					res = false;
333 				}
334 			}
335 		}
336 	}
337 
338 	return res;
339 }
340 
341 /* Destroy any additional DIG link encoder objects created by
342  * create_link_encoders().
343  * NB: Must only be called after destroy_links().
344  */
destroy_link_encoders(struct dc * dc)345 static void destroy_link_encoders(struct dc *dc)
346 {
347 	unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
348 	unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
349 	int i;
350 
351 	/* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
352 	 * link encoders and physical display endpoints and does not require
353 	 * additional link encoder objects.
354 	 */
355 	if (num_usb4_dpia == 0)
356 		return;
357 
358 	for (i = 0; i < num_dig_link_enc; i++) {
359 		struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
360 
361 		if (link_enc) {
362 			link_enc->funcs->destroy(&link_enc);
363 			dc->res_pool->link_encoders[i] = NULL;
364 			dc->res_pool->dig_link_enc_count--;
365 		}
366 	}
367 }
368 
dc_perf_trace_create(void)369 static struct dc_perf_trace *dc_perf_trace_create(void)
370 {
371 	return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
372 }
373 
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)374 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
375 {
376 	kfree(*perf_trace);
377 	*perf_trace = NULL;
378 }
379 
380 /**
381  *  dc_stream_adjust_vmin_vmax:
382  *
383  *  Looks up the pipe context of dc_stream_state and updates the
384  *  vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
385  *  Rate, which is a power-saving feature that targets reducing panel
386  *  refresh rate while the screen is static
387  *
388  *  @dc:     dc reference
389  *  @stream: Initial dc stream state
390  *  @adjust: Updated parameters for vertical_total_min and vertical_total_max
391  */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)392 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
393 		struct dc_stream_state *stream,
394 		struct dc_crtc_timing_adjust *adjust)
395 {
396 	int i;
397 	bool ret = false;
398 
399 	stream->adjust.v_total_max = adjust->v_total_max;
400 	stream->adjust.v_total_mid = adjust->v_total_mid;
401 	stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
402 	stream->adjust.v_total_min = adjust->v_total_min;
403 
404 	for (i = 0; i < MAX_PIPES; i++) {
405 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
406 
407 		if (pipe->stream == stream && pipe->stream_res.tg) {
408 			dc->hwss.set_drr(&pipe,
409 					1,
410 					*adjust);
411 
412 			ret = true;
413 		}
414 	}
415 	return ret;
416 }
417 
418 /**
419  *****************************************************************************
420  *  Function: dc_stream_get_last_vrr_vtotal
421  *
422  *  @brief
423  *     Looks up the pipe context of dc_stream_state and gets the
424  *     last VTOTAL used by DRR (Dynamic Refresh Rate)
425  *
426  *  @param [in] dc: dc reference
427  *  @param [in] stream: Initial dc stream state
428  *  @param [in] adjust: Updated parameters for vertical_total_min and
429  *  vertical_total_max
430  *****************************************************************************
431  */
dc_stream_get_last_used_drr_vtotal(struct dc * dc,struct dc_stream_state * stream,uint32_t * refresh_rate)432 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
433 		struct dc_stream_state *stream,
434 		uint32_t *refresh_rate)
435 {
436 	bool status = false;
437 
438 	int i = 0;
439 
440 	for (i = 0; i < MAX_PIPES; i++) {
441 		struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
442 
443 		if (pipe->stream == stream && pipe->stream_res.tg) {
444 			/* Only execute if a function pointer has been defined for
445 			 * the DC version in question
446 			 */
447 			if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
448 				pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
449 
450 				status = true;
451 
452 				break;
453 			}
454 		}
455 	}
456 
457 	return status;
458 }
459 
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)460 bool dc_stream_get_crtc_position(struct dc *dc,
461 		struct dc_stream_state **streams, int num_streams,
462 		unsigned int *v_pos, unsigned int *nom_v_pos)
463 {
464 	/* TODO: Support multiple streams */
465 	const struct dc_stream_state *stream = streams[0];
466 	int i;
467 	bool ret = false;
468 	struct crtc_position position;
469 
470 	for (i = 0; i < MAX_PIPES; i++) {
471 		struct pipe_ctx *pipe =
472 				&dc->current_state->res_ctx.pipe_ctx[i];
473 
474 		if (pipe->stream == stream && pipe->stream_res.stream_enc) {
475 			dc->hwss.get_position(&pipe, 1, &position);
476 
477 			*v_pos = position.vertical_count;
478 			*nom_v_pos = position.nominal_vcount;
479 			ret = true;
480 		}
481 	}
482 	return ret;
483 }
484 
485 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
dc_stream_forward_dmcu_crc_window(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window)486 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
487 			     struct crc_params *crc_window)
488 {
489 	int i;
490 	struct dmcu *dmcu = dc->res_pool->dmcu;
491 	struct pipe_ctx *pipe;
492 	struct crc_region tmp_win, *crc_win;
493 	struct otg_phy_mux mapping_tmp, *mux_mapping;
494 
495 	/*crc window can't be null*/
496 	if (!crc_window)
497 		return false;
498 
499 	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
500 		crc_win = &tmp_win;
501 		mux_mapping = &mapping_tmp;
502 		/*set crc window*/
503 		tmp_win.x_start = crc_window->windowa_x_start;
504 		tmp_win.y_start = crc_window->windowa_y_start;
505 		tmp_win.x_end = crc_window->windowa_x_end;
506 		tmp_win.y_end = crc_window->windowa_y_end;
507 
508 		for (i = 0; i < MAX_PIPES; i++) {
509 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
510 			if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
511 				break;
512 		}
513 
514 		/* Stream not found */
515 		if (i == MAX_PIPES)
516 			return false;
517 
518 
519 		/*set mux routing info*/
520 		mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
521 		mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
522 
523 		dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
524 	} else {
525 		DC_LOG_DC("dmcu is not initialized");
526 		return false;
527 	}
528 
529 	return true;
530 }
531 
dc_stream_stop_dmcu_crc_win_update(struct dc * dc,struct dc_stream_state * stream)532 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
533 {
534 	int i;
535 	struct dmcu *dmcu = dc->res_pool->dmcu;
536 	struct pipe_ctx *pipe;
537 	struct otg_phy_mux mapping_tmp, *mux_mapping;
538 
539 	if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
540 		mux_mapping = &mapping_tmp;
541 
542 		for (i = 0; i < MAX_PIPES; i++) {
543 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
544 			if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
545 				break;
546 		}
547 
548 		/* Stream not found */
549 		if (i == MAX_PIPES)
550 			return false;
551 
552 
553 		/*set mux routing info*/
554 		mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
555 		mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
556 
557 		dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
558 	} else {
559 		DC_LOG_DC("dmcu is not initialized");
560 		return false;
561 	}
562 
563 	return true;
564 }
565 #endif
566 
567 /**
568  * dc_stream_configure_crc() - Configure CRC capture for the given stream.
569  * @dc: DC Object
570  * @stream: The stream to configure CRC on.
571  * @enable: Enable CRC if true, disable otherwise.
572  * @crc_window: CRC window (x/y start/end) information
573  * @continuous: Capture CRC on every frame if true. Otherwise, only capture
574  *              once.
575  *
576  * By default, only CRC0 is configured, and the entire frame is used to
577  * calculate the crc.
578  */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous)579 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
580 			     struct crc_params *crc_window, bool enable, bool continuous)
581 {
582 	int i;
583 	struct pipe_ctx *pipe;
584 	struct crc_params param;
585 	struct timing_generator *tg;
586 
587 	for (i = 0; i < MAX_PIPES; i++) {
588 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
589 		if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
590 			break;
591 	}
592 	/* Stream not found */
593 	if (i == MAX_PIPES)
594 		return false;
595 
596 	/* By default, capture the full frame */
597 	param.windowa_x_start = 0;
598 	param.windowa_y_start = 0;
599 	param.windowa_x_end = pipe->stream->timing.h_addressable;
600 	param.windowa_y_end = pipe->stream->timing.v_addressable;
601 	param.windowb_x_start = 0;
602 	param.windowb_y_start = 0;
603 	param.windowb_x_end = pipe->stream->timing.h_addressable;
604 	param.windowb_y_end = pipe->stream->timing.v_addressable;
605 
606 	if (crc_window) {
607 		param.windowa_x_start = crc_window->windowa_x_start;
608 		param.windowa_y_start = crc_window->windowa_y_start;
609 		param.windowa_x_end = crc_window->windowa_x_end;
610 		param.windowa_y_end = crc_window->windowa_y_end;
611 		param.windowb_x_start = crc_window->windowb_x_start;
612 		param.windowb_y_start = crc_window->windowb_y_start;
613 		param.windowb_x_end = crc_window->windowb_x_end;
614 		param.windowb_y_end = crc_window->windowb_y_end;
615 	}
616 
617 	param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
618 	param.odm_mode = pipe->next_odm_pipe ? 1:0;
619 
620 	/* Default to the union of both windows */
621 	param.selection = UNION_WINDOW_A_B;
622 	param.continuous_mode = continuous;
623 	param.enable = enable;
624 
625 	tg = pipe->stream_res.tg;
626 
627 	/* Only call if supported */
628 	if (tg->funcs->configure_crc)
629 		return tg->funcs->configure_crc(tg, &param);
630 	DC_LOG_WARNING("CRC capture not supported.");
631 	return false;
632 }
633 
634 /**
635  * dc_stream_get_crc() - Get CRC values for the given stream.
636  * @dc: DC object
637  * @stream: The DC stream state of the stream to get CRCs from.
638  * @r_cr: CRC value for the first of the 3 channels stored here.
639  * @g_y:  CRC value for the second of the 3 channels stored here.
640  * @b_cb: CRC value for the third of the 3 channels stored here.
641  *
642  * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
643  * Return false if stream is not found, or if CRCs are not enabled.
644  */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)645 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
646 		       uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
647 {
648 	int i;
649 	struct pipe_ctx *pipe;
650 	struct timing_generator *tg;
651 
652 	for (i = 0; i < MAX_PIPES; i++) {
653 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
654 		if (pipe->stream == stream)
655 			break;
656 	}
657 	/* Stream not found */
658 	if (i == MAX_PIPES)
659 		return false;
660 
661 	tg = pipe->stream_res.tg;
662 
663 	if (tg->funcs->get_crc)
664 		return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
665 	DC_LOG_WARNING("CRC capture not supported.");
666 	return false;
667 }
668 
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)669 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
670 		enum dc_dynamic_expansion option)
671 {
672 	/* OPP FMT dyn expansion updates*/
673 	int i;
674 	struct pipe_ctx *pipe_ctx;
675 
676 	for (i = 0; i < MAX_PIPES; i++) {
677 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
678 				== stream) {
679 			pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
680 			pipe_ctx->stream_res.opp->dyn_expansion = option;
681 			pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
682 					pipe_ctx->stream_res.opp,
683 					COLOR_SPACE_YCBCR601,
684 					stream->timing.display_color_depth,
685 					stream->signal);
686 		}
687 	}
688 }
689 
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)690 void dc_stream_set_dither_option(struct dc_stream_state *stream,
691 		enum dc_dither_option option)
692 {
693 	struct bit_depth_reduction_params params;
694 	struct dc_link *link = stream->link;
695 	struct pipe_ctx *pipes = NULL;
696 	int i;
697 
698 	for (i = 0; i < MAX_PIPES; i++) {
699 		if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
700 				stream) {
701 			pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
702 			break;
703 		}
704 	}
705 
706 	if (!pipes)
707 		return;
708 	if (option > DITHER_OPTION_MAX)
709 		return;
710 
711 	stream->dither_option = option;
712 
713 	memset(&params, 0, sizeof(params));
714 	resource_build_bit_depth_reduction_params(stream, &params);
715 	stream->bit_depth_params = params;
716 
717 	if (pipes->plane_res.xfm &&
718 	    pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
719 		pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
720 			pipes->plane_res.xfm,
721 			pipes->plane_res.scl_data.lb_params.depth,
722 			&stream->bit_depth_params);
723 	}
724 
725 	pipes->stream_res.opp->funcs->
726 		opp_program_bit_depth_reduction(pipes->stream_res.opp, &params);
727 }
728 
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)729 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
730 {
731 	int i;
732 	bool ret = false;
733 	struct pipe_ctx *pipes;
734 
735 	for (i = 0; i < MAX_PIPES; i++) {
736 		if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
737 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
738 			dc->hwss.program_gamut_remap(pipes);
739 			ret = true;
740 		}
741 	}
742 
743 	return ret;
744 }
745 
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)746 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
747 {
748 	int i;
749 	bool ret = false;
750 	struct pipe_ctx *pipes;
751 
752 	for (i = 0; i < MAX_PIPES; i++) {
753 		if (dc->current_state->res_ctx.pipe_ctx[i].stream
754 				== stream) {
755 
756 			pipes = &dc->current_state->res_ctx.pipe_ctx[i];
757 			dc->hwss.program_output_csc(dc,
758 					pipes,
759 					stream->output_color_space,
760 					stream->csc_color_matrix.matrix,
761 					pipes->stream_res.opp->inst);
762 			ret = true;
763 		}
764 	}
765 
766 	return ret;
767 }
768 
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)769 void dc_stream_set_static_screen_params(struct dc *dc,
770 		struct dc_stream_state **streams,
771 		int num_streams,
772 		const struct dc_static_screen_params *params)
773 {
774 	int i, j;
775 	struct pipe_ctx *pipes_affected[MAX_PIPES];
776 	int num_pipes_affected = 0;
777 
778 	for (i = 0; i < num_streams; i++) {
779 		struct dc_stream_state *stream = streams[i];
780 
781 		for (j = 0; j < MAX_PIPES; j++) {
782 			if (dc->current_state->res_ctx.pipe_ctx[j].stream
783 					== stream) {
784 				pipes_affected[num_pipes_affected++] =
785 						&dc->current_state->res_ctx.pipe_ctx[j];
786 			}
787 		}
788 	}
789 
790 	dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
791 }
792 
dc_destruct(struct dc * dc)793 static void dc_destruct(struct dc *dc)
794 {
795 	// reset link encoder assignment table on destruct
796 	if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
797 		link_enc_cfg_init(dc, dc->current_state);
798 
799 	if (dc->current_state) {
800 		dc_release_state(dc->current_state);
801 		dc->current_state = NULL;
802 	}
803 
804 	destroy_links(dc);
805 
806 	destroy_link_encoders(dc);
807 
808 	if (dc->clk_mgr) {
809 		dc_destroy_clk_mgr(dc->clk_mgr);
810 		dc->clk_mgr = NULL;
811 	}
812 
813 	dc_destroy_resource_pool(dc);
814 
815 	if (dc->ctx->gpio_service)
816 		dal_gpio_service_destroy(&dc->ctx->gpio_service);
817 
818 	if (dc->ctx->created_bios)
819 		dal_bios_parser_destroy(&dc->ctx->dc_bios);
820 
821 	dc_perf_trace_destroy(&dc->ctx->perf_trace);
822 
823 	kfree(dc->ctx);
824 	dc->ctx = NULL;
825 
826 	kfree(dc->bw_vbios);
827 	dc->bw_vbios = NULL;
828 
829 	kfree(dc->bw_dceip);
830 	dc->bw_dceip = NULL;
831 
832 	kfree(dc->dcn_soc);
833 	dc->dcn_soc = NULL;
834 
835 	kfree(dc->dcn_ip);
836 	dc->dcn_ip = NULL;
837 
838 	kfree(dc->vm_helper);
839 	dc->vm_helper = NULL;
840 
841 }
842 
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)843 static bool dc_construct_ctx(struct dc *dc,
844 		const struct dc_init_data *init_params)
845 {
846 	struct dc_context *dc_ctx;
847 	enum dce_version dc_version = DCE_VERSION_UNKNOWN;
848 
849 	dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
850 	if (!dc_ctx)
851 		return false;
852 
853 	dc_ctx->cgs_device = init_params->cgs_device;
854 	dc_ctx->driver_context = init_params->driver;
855 	dc_ctx->dc = dc;
856 	dc_ctx->asic_id = init_params->asic_id;
857 	dc_ctx->dc_sink_id_count = 0;
858 	dc_ctx->dc_stream_id_count = 0;
859 	dc_ctx->dce_environment = init_params->dce_environment;
860 
861 	/* Create logger */
862 
863 	dc_version = resource_parse_asic_id(init_params->asic_id);
864 	dc_ctx->dce_version = dc_version;
865 
866 	dc_ctx->perf_trace = dc_perf_trace_create();
867 	if (!dc_ctx->perf_trace) {
868 		ASSERT_CRITICAL(false);
869 		return false;
870 	}
871 
872 	dc->ctx = dc_ctx;
873 
874 	return true;
875 }
876 
dc_construct(struct dc * dc,const struct dc_init_data * init_params)877 static bool dc_construct(struct dc *dc,
878 		const struct dc_init_data *init_params)
879 {
880 	struct dc_context *dc_ctx;
881 	struct bw_calcs_dceip *dc_dceip;
882 	struct bw_calcs_vbios *dc_vbios;
883 	struct dcn_soc_bounding_box *dcn_soc;
884 	struct dcn_ip_params *dcn_ip;
885 
886 	dc->config = init_params->flags;
887 
888 	// Allocate memory for the vm_helper
889 	dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
890 	if (!dc->vm_helper) {
891 		dm_error("%s: failed to create dc->vm_helper\n", __func__);
892 		goto fail;
893 	}
894 
895 	memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
896 
897 	dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
898 	if (!dc_dceip) {
899 		dm_error("%s: failed to create dceip\n", __func__);
900 		goto fail;
901 	}
902 
903 	dc->bw_dceip = dc_dceip;
904 
905 	dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
906 	if (!dc_vbios) {
907 		dm_error("%s: failed to create vbios\n", __func__);
908 		goto fail;
909 	}
910 
911 	dc->bw_vbios = dc_vbios;
912 	dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
913 	if (!dcn_soc) {
914 		dm_error("%s: failed to create dcn_soc\n", __func__);
915 		goto fail;
916 	}
917 
918 	dc->dcn_soc = dcn_soc;
919 
920 	dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
921 	if (!dcn_ip) {
922 		dm_error("%s: failed to create dcn_ip\n", __func__);
923 		goto fail;
924 	}
925 
926 	dc->dcn_ip = dcn_ip;
927 
928 	if (!dc_construct_ctx(dc, init_params)) {
929 		dm_error("%s: failed to create ctx\n", __func__);
930 		goto fail;
931 	}
932 
933         dc_ctx = dc->ctx;
934 
935 	/* Resource should construct all asic specific resources.
936 	 * This should be the only place where we need to parse the asic id
937 	 */
938 	if (init_params->vbios_override)
939 		dc_ctx->dc_bios = init_params->vbios_override;
940 	else {
941 		/* Create BIOS parser */
942 		struct bp_init_data bp_init_data;
943 
944 		bp_init_data.ctx = dc_ctx;
945 		bp_init_data.bios = init_params->asic_id.atombios_base_address;
946 
947 		dc_ctx->dc_bios = dal_bios_parser_create(
948 				&bp_init_data, dc_ctx->dce_version);
949 
950 		if (!dc_ctx->dc_bios) {
951 			ASSERT_CRITICAL(false);
952 			goto fail;
953 		}
954 
955 		dc_ctx->created_bios = true;
956 	}
957 
958 	dc->vendor_signature = init_params->vendor_signature;
959 
960 	/* Create GPIO service */
961 	dc_ctx->gpio_service = dal_gpio_service_create(
962 			dc_ctx->dce_version,
963 			dc_ctx->dce_environment,
964 			dc_ctx);
965 
966 	if (!dc_ctx->gpio_service) {
967 		ASSERT_CRITICAL(false);
968 		goto fail;
969 	}
970 
971 	dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
972 	if (!dc->res_pool)
973 		goto fail;
974 
975 	/* set i2c speed if not done by the respective dcnxxx__resource.c */
976 	if (dc->caps.i2c_speed_in_khz_hdcp == 0)
977 		dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
978 
979 	dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
980 	if (!dc->clk_mgr)
981 		goto fail;
982 #ifdef CONFIG_DRM_AMD_DC_DCN
983 	dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
984 
985 	if (dc->res_pool->funcs->update_bw_bounding_box) {
986 		DC_FP_START();
987 		dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
988 		DC_FP_END();
989 	}
990 #endif
991 
992 	/* Creation of current_state must occur after dc->dml
993 	 * is initialized in dc_create_resource_pool because
994 	 * on creation it copies the contents of dc->dml
995 	 */
996 
997 	dc->current_state = dc_create_state(dc);
998 
999 	if (!dc->current_state) {
1000 		dm_error("%s: failed to create validate ctx\n", __func__);
1001 		goto fail;
1002 	}
1003 
1004 	if (!create_links(dc, init_params->num_virtual_links))
1005 		goto fail;
1006 
1007 	/* Create additional DIG link encoder objects if fewer than the platform
1008 	 * supports were created during link construction.
1009 	 */
1010 	if (!create_link_encoders(dc))
1011 		goto fail;
1012 
1013 	dc_resource_state_construct(dc, dc->current_state);
1014 
1015 	return true;
1016 
1017 fail:
1018 	return false;
1019 }
1020 
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)1021 static void disable_all_writeback_pipes_for_stream(
1022 		const struct dc *dc,
1023 		struct dc_stream_state *stream,
1024 		struct dc_state *context)
1025 {
1026 	int i;
1027 
1028 	for (i = 0; i < stream->num_wb_info; i++)
1029 		stream->writeback_info[i].wb_enabled = false;
1030 }
1031 
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)1032 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1033 					  struct dc_stream_state *stream, bool lock)
1034 {
1035 	int i;
1036 
1037 	/* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1038 	if (dc->hwss.interdependent_update_lock)
1039 		dc->hwss.interdependent_update_lock(dc, context, lock);
1040 	else {
1041 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
1042 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1043 			struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1044 
1045 			// Copied conditions that were previously in dce110_apply_ctx_for_surface
1046 			if (stream == pipe_ctx->stream) {
1047 				if (!pipe_ctx->top_pipe &&
1048 					(pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1049 					dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1050 			}
1051 		}
1052 	}
1053 }
1054 
disable_dangling_plane(struct dc * dc,struct dc_state * context)1055 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1056 {
1057 	int i, j;
1058 	struct dc_state *dangling_context = dc_create_state(dc);
1059 	struct dc_state *current_ctx;
1060 
1061 	if (dangling_context == NULL)
1062 		return;
1063 
1064 	dc_resource_state_copy_construct(dc->current_state, dangling_context);
1065 
1066 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1067 		struct dc_stream_state *old_stream =
1068 				dc->current_state->res_ctx.pipe_ctx[i].stream;
1069 		bool should_disable = true;
1070 		bool pipe_split_change = false;
1071 
1072 		if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1073 			(dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1074 			pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1075 				dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1076 		else
1077 			pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1078 				dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1079 
1080 		for (j = 0; j < context->stream_count; j++) {
1081 			if (old_stream == context->streams[j]) {
1082 				should_disable = false;
1083 				break;
1084 			}
1085 		}
1086 		if (!should_disable && pipe_split_change &&
1087 				dc->current_state->stream_count != context->stream_count)
1088 			should_disable = true;
1089 
1090 		if (should_disable && old_stream) {
1091 			dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1092 			disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1093 
1094 			if (dc->hwss.apply_ctx_for_surface) {
1095 				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1096 				dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1097 				apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1098 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1099 			}
1100 			if (dc->hwss.program_front_end_for_ctx) {
1101 				dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1102 				dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1103 				dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1104 				dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1105 			}
1106 		}
1107 	}
1108 
1109 	current_ctx = dc->current_state;
1110 	dc->current_state = dangling_context;
1111 	dc_release_state(current_ctx);
1112 }
1113 
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)1114 static void disable_vbios_mode_if_required(
1115 		struct dc *dc,
1116 		struct dc_state *context)
1117 {
1118 	unsigned int i, j;
1119 
1120 	/* check if timing_changed, disable stream*/
1121 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1122 		struct dc_stream_state *stream = NULL;
1123 		struct dc_link *link = NULL;
1124 		struct pipe_ctx *pipe = NULL;
1125 
1126 		pipe = &context->res_ctx.pipe_ctx[i];
1127 		stream = pipe->stream;
1128 		if (stream == NULL)
1129 			continue;
1130 
1131 		// only looking for first odm pipe
1132 		if (pipe->prev_odm_pipe)
1133 			continue;
1134 
1135 		if (stream->link->local_sink &&
1136 			stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1137 			link = stream->link;
1138 		}
1139 
1140 		if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1141 			unsigned int enc_inst, tg_inst = 0;
1142 			unsigned int pix_clk_100hz;
1143 
1144 			enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1145 			if (enc_inst != ENGINE_ID_UNKNOWN) {
1146 				for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1147 					if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1148 						tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1149 							dc->res_pool->stream_enc[j]);
1150 						break;
1151 					}
1152 				}
1153 
1154 				dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1155 					dc->res_pool->dp_clock_source,
1156 					tg_inst, &pix_clk_100hz);
1157 
1158 				if (link->link_status.link_active) {
1159 					uint32_t requested_pix_clk_100hz =
1160 						pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1161 
1162 					if (pix_clk_100hz != requested_pix_clk_100hz) {
1163 						core_link_disable_stream(pipe);
1164 						pipe->stream->dpms_off = false;
1165 					}
1166 				}
1167 			}
1168 		}
1169 	}
1170 }
1171 
wait_for_no_pipes_pending(struct dc * dc,struct dc_state * context)1172 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1173 {
1174 	int i;
1175 	PERF_TRACE();
1176 	for (i = 0; i < MAX_PIPES; i++) {
1177 		int count = 0;
1178 		struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1179 
1180 		if (!pipe->plane_state)
1181 			continue;
1182 
1183 		/* Timeout 100 ms */
1184 		while (count < 100000) {
1185 			/* Must set to false to start with, due to OR in update function */
1186 			pipe->plane_state->status.is_flip_pending = false;
1187 			dc->hwss.update_pending_status(pipe);
1188 			if (!pipe->plane_state->status.is_flip_pending)
1189 				break;
1190 			udelay(1);
1191 			count++;
1192 		}
1193 		ASSERT(!pipe->plane_state->status.is_flip_pending);
1194 	}
1195 	PERF_TRACE();
1196 }
1197 
1198 /*******************************************************************************
1199  * Public functions
1200  ******************************************************************************/
1201 
dc_create(const struct dc_init_data * init_params)1202 struct dc *dc_create(const struct dc_init_data *init_params)
1203 {
1204 	struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1205 	unsigned int full_pipe_count;
1206 
1207 	if (!dc)
1208 		return NULL;
1209 
1210 	if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1211 		if (!dc_construct_ctx(dc, init_params))
1212 			goto destruct_dc;
1213 	} else {
1214 		if (!dc_construct(dc, init_params))
1215 			goto destruct_dc;
1216 
1217 		full_pipe_count = dc->res_pool->pipe_count;
1218 		if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1219 			full_pipe_count--;
1220 		dc->caps.max_streams = min(
1221 				full_pipe_count,
1222 				dc->res_pool->stream_enc_count);
1223 
1224 		dc->caps.max_links = dc->link_count;
1225 		dc->caps.max_audios = dc->res_pool->audio_count;
1226 		dc->caps.linear_pitch_alignment = 64;
1227 
1228 		dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1229 
1230 		dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1231 
1232 		if (dc->res_pool->dmcu != NULL)
1233 			dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1234 	}
1235 
1236 	/* Populate versioning information */
1237 	dc->versions.dc_ver = DC_VER;
1238 
1239 	dc->build_id = DC_BUILD_ID;
1240 
1241 	DC_LOG_DC("Display Core initialized\n");
1242 
1243 
1244 
1245 	return dc;
1246 
1247 destruct_dc:
1248 	dc_destruct(dc);
1249 	kfree(dc);
1250 	return NULL;
1251 }
1252 
detect_edp_presence(struct dc * dc)1253 static void detect_edp_presence(struct dc *dc)
1254 {
1255 	struct dc_link *edp_links[MAX_NUM_EDP];
1256 	struct dc_link *edp_link = NULL;
1257 	enum dc_connection_type type;
1258 	int i;
1259 	int edp_num;
1260 
1261 	get_edp_links(dc, edp_links, &edp_num);
1262 	if (!edp_num)
1263 		return;
1264 
1265 	for (i = 0; i < edp_num; i++) {
1266 		edp_link = edp_links[i];
1267 		if (dc->config.edp_not_connected) {
1268 			edp_link->edp_sink_present = false;
1269 		} else {
1270 			dc_link_detect_sink(edp_link, &type);
1271 			edp_link->edp_sink_present = (type != dc_connection_none);
1272 		}
1273 	}
1274 }
1275 
dc_hardware_init(struct dc * dc)1276 void dc_hardware_init(struct dc *dc)
1277 {
1278 
1279 	detect_edp_presence(dc);
1280 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1281 		dc->hwss.init_hw(dc);
1282 }
1283 
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1284 void dc_init_callbacks(struct dc *dc,
1285 		const struct dc_callback_init *init_params)
1286 {
1287 #ifdef CONFIG_DRM_AMD_DC_HDCP
1288 	dc->ctx->cp_psp = init_params->cp_psp;
1289 #endif
1290 }
1291 
dc_deinit_callbacks(struct dc * dc)1292 void dc_deinit_callbacks(struct dc *dc)
1293 {
1294 #ifdef CONFIG_DRM_AMD_DC_HDCP
1295 	memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1296 #endif
1297 }
1298 
dc_destroy(struct dc ** dc)1299 void dc_destroy(struct dc **dc)
1300 {
1301 	dc_destruct(*dc);
1302 	kfree(*dc);
1303 	*dc = NULL;
1304 }
1305 
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1306 static void enable_timing_multisync(
1307 		struct dc *dc,
1308 		struct dc_state *ctx)
1309 {
1310 	int i, multisync_count = 0;
1311 	int pipe_count = dc->res_pool->pipe_count;
1312 	struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1313 
1314 	for (i = 0; i < pipe_count; i++) {
1315 		if (!ctx->res_ctx.pipe_ctx[i].stream ||
1316 				!ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1317 			continue;
1318 		if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1319 			continue;
1320 		multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1321 		multisync_count++;
1322 	}
1323 
1324 	if (multisync_count > 0) {
1325 		dc->hwss.enable_per_frame_crtc_position_reset(
1326 			dc, multisync_count, multisync_pipes);
1327 	}
1328 }
1329 
program_timing_sync(struct dc * dc,struct dc_state * ctx)1330 static void program_timing_sync(
1331 		struct dc *dc,
1332 		struct dc_state *ctx)
1333 {
1334 	int i, j, k;
1335 	int group_index = 0;
1336 	int num_group = 0;
1337 	int pipe_count = dc->res_pool->pipe_count;
1338 	struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1339 
1340 	for (i = 0; i < pipe_count; i++) {
1341 		if (!ctx->res_ctx.pipe_ctx[i].stream || ctx->res_ctx.pipe_ctx[i].top_pipe)
1342 			continue;
1343 
1344 		unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1345 	}
1346 
1347 	for (i = 0; i < pipe_count; i++) {
1348 		int group_size = 1;
1349 		enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1350 		struct pipe_ctx *pipe_set[MAX_PIPES];
1351 
1352 		if (!unsynced_pipes[i])
1353 			continue;
1354 
1355 		pipe_set[0] = unsynced_pipes[i];
1356 		unsynced_pipes[i] = NULL;
1357 
1358 		/* Add tg to the set, search rest of the tg's for ones with
1359 		 * same timing, add all tgs with same timing to the group
1360 		 */
1361 		for (j = i + 1; j < pipe_count; j++) {
1362 			if (!unsynced_pipes[j])
1363 				continue;
1364 			if (sync_type != TIMING_SYNCHRONIZABLE &&
1365 				dc->hwss.enable_vblanks_synchronization &&
1366 				unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1367 				resource_are_vblanks_synchronizable(
1368 					unsynced_pipes[j]->stream,
1369 					pipe_set[0]->stream)) {
1370 				sync_type = VBLANK_SYNCHRONIZABLE;
1371 				pipe_set[group_size] = unsynced_pipes[j];
1372 				unsynced_pipes[j] = NULL;
1373 				group_size++;
1374 			} else
1375 			if (sync_type != VBLANK_SYNCHRONIZABLE &&
1376 				resource_are_streams_timing_synchronizable(
1377 					unsynced_pipes[j]->stream,
1378 					pipe_set[0]->stream)) {
1379 				sync_type = TIMING_SYNCHRONIZABLE;
1380 				pipe_set[group_size] = unsynced_pipes[j];
1381 				unsynced_pipes[j] = NULL;
1382 				group_size++;
1383 			}
1384 		}
1385 
1386 		/* set first unblanked pipe as master */
1387 		for (j = 0; j < group_size; j++) {
1388 			bool is_blanked;
1389 
1390 			if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1391 				is_blanked =
1392 					pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1393 			else
1394 				is_blanked =
1395 					pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1396 			if (!is_blanked) {
1397 				if (j == 0)
1398 					break;
1399 
1400 				swap(pipe_set[0], pipe_set[j]);
1401 				break;
1402 			}
1403 		}
1404 
1405 		for (k = 0; k < group_size; k++) {
1406 			struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1407 
1408 			status->timing_sync_info.group_id = num_group;
1409 			status->timing_sync_info.group_size = group_size;
1410 			if (k == 0)
1411 				status->timing_sync_info.master = true;
1412 			else
1413 				status->timing_sync_info.master = false;
1414 
1415 		}
1416 
1417 		/* remove any other pipes that are already been synced */
1418 		if (dc->config.use_pipe_ctx_sync_logic) {
1419 			/* check pipe's syncd to decide which pipe to be removed */
1420 			for (j = 1; j < group_size; j++) {
1421 				if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1422 					group_size--;
1423 					pipe_set[j] = pipe_set[group_size];
1424 					j--;
1425 				} else
1426 					/* link slave pipe's syncd with master pipe */
1427 					pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1428 			}
1429 		} else {
1430 			for (j = j + 1; j < group_size; j++) {
1431 				bool is_blanked;
1432 
1433 				if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1434 					is_blanked =
1435 						pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1436 				else
1437 					is_blanked =
1438 						pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1439 				if (!is_blanked) {
1440 					group_size--;
1441 					pipe_set[j] = pipe_set[group_size];
1442 					j--;
1443 				}
1444 			}
1445 		}
1446 
1447 		if (group_size > 1) {
1448 			if (sync_type == TIMING_SYNCHRONIZABLE) {
1449 				dc->hwss.enable_timing_synchronization(
1450 					dc, group_index, group_size, pipe_set);
1451 			} else
1452 				if (sync_type == VBLANK_SYNCHRONIZABLE) {
1453 				dc->hwss.enable_vblanks_synchronization(
1454 					dc, group_index, group_size, pipe_set);
1455 				}
1456 			group_index++;
1457 		}
1458 		num_group++;
1459 	}
1460 }
1461 
context_changed(struct dc * dc,struct dc_state * context)1462 static bool context_changed(
1463 		struct dc *dc,
1464 		struct dc_state *context)
1465 {
1466 	uint8_t i;
1467 
1468 	if (context->stream_count != dc->current_state->stream_count)
1469 		return true;
1470 
1471 	for (i = 0; i < dc->current_state->stream_count; i++) {
1472 		if (dc->current_state->streams[i] != context->streams[i])
1473 			return true;
1474 	}
1475 
1476 	return false;
1477 }
1478 
dc_validate_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1479 bool dc_validate_boot_timing(const struct dc *dc,
1480 				const struct dc_sink *sink,
1481 				struct dc_crtc_timing *crtc_timing)
1482 {
1483 	struct timing_generator *tg;
1484 	struct stream_encoder *se = NULL;
1485 
1486 	struct dc_crtc_timing hw_crtc_timing = {0};
1487 
1488 	struct dc_link *link = sink->link;
1489 	unsigned int i, enc_inst, tg_inst = 0;
1490 
1491 	/* Support seamless boot on EDP displays only */
1492 	if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1493 		return false;
1494 	}
1495 
1496 	/* Check for enabled DIG to identify enabled display */
1497 	if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1498 		return false;
1499 
1500 	enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1501 
1502 	if (enc_inst == ENGINE_ID_UNKNOWN)
1503 		return false;
1504 
1505 	for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1506 		if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1507 
1508 			se = dc->res_pool->stream_enc[i];
1509 
1510 			tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1511 				dc->res_pool->stream_enc[i]);
1512 			break;
1513 		}
1514 	}
1515 
1516 	// tg_inst not found
1517 	if (i == dc->res_pool->stream_enc_count)
1518 		return false;
1519 
1520 	if (tg_inst >= dc->res_pool->timing_generator_count)
1521 		return false;
1522 
1523 	tg = dc->res_pool->timing_generators[tg_inst];
1524 
1525 	if (!tg->funcs->get_hw_timing)
1526 		return false;
1527 
1528 	if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1529 		return false;
1530 
1531 	if (crtc_timing->h_total != hw_crtc_timing.h_total)
1532 		return false;
1533 
1534 	if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1535 		return false;
1536 
1537 	if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1538 		return false;
1539 
1540 	if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1541 		return false;
1542 
1543 	if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1544 		return false;
1545 
1546 	if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1547 		return false;
1548 
1549 	if (crtc_timing->v_total != hw_crtc_timing.v_total)
1550 		return false;
1551 
1552 	if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1553 		return false;
1554 
1555 	if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1556 		return false;
1557 
1558 	if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1559 		return false;
1560 
1561 	if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1562 		return false;
1563 
1564 	if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1565 		return false;
1566 
1567 	/* block DSC for now, as VBIOS does not currently support DSC timings */
1568 	if (crtc_timing->flags.DSC)
1569 		return false;
1570 
1571 	if (dc_is_dp_signal(link->connector_signal)) {
1572 		unsigned int pix_clk_100hz;
1573 		uint32_t numOdmPipes = 1;
1574 		uint32_t id_src[4] = {0};
1575 
1576 		dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1577 			dc->res_pool->dp_clock_source,
1578 			tg_inst, &pix_clk_100hz);
1579 
1580 		if (tg->funcs->get_optc_source)
1581 			tg->funcs->get_optc_source(tg,
1582 						&numOdmPipes, &id_src[0], &id_src[1]);
1583 
1584 		if (numOdmPipes == 2)
1585 			pix_clk_100hz *= 2;
1586 		if (numOdmPipes == 4)
1587 			pix_clk_100hz *= 4;
1588 
1589 		// Note: In rare cases, HW pixclk may differ from crtc's pixclk
1590 		// slightly due to rounding issues in 10 kHz units.
1591 		if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1592 			return false;
1593 
1594 		if (!se->funcs->dp_get_pixel_format)
1595 			return false;
1596 
1597 		if (!se->funcs->dp_get_pixel_format(
1598 			se,
1599 			&hw_crtc_timing.pixel_encoding,
1600 			&hw_crtc_timing.display_color_depth))
1601 			return false;
1602 
1603 		if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1604 			return false;
1605 
1606 		if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1607 			return false;
1608 	}
1609 
1610 	if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1611 		return false;
1612 	}
1613 
1614 	if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1615 		DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1616 		return false;
1617 	}
1618 
1619 	return true;
1620 }
1621 
should_update_pipe_for_stream(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)1622 static inline bool should_update_pipe_for_stream(
1623 		struct dc_state *context,
1624 		struct pipe_ctx *pipe_ctx,
1625 		struct dc_stream_state *stream)
1626 {
1627 	return (pipe_ctx->stream && pipe_ctx->stream == stream);
1628 }
1629 
should_update_pipe_for_plane(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state)1630 static inline bool should_update_pipe_for_plane(
1631 		struct dc_state *context,
1632 		struct pipe_ctx *pipe_ctx,
1633 		struct dc_plane_state *plane_state)
1634 {
1635 	return (pipe_ctx->plane_state == plane_state);
1636 }
1637 
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1638 void dc_enable_stereo(
1639 	struct dc *dc,
1640 	struct dc_state *context,
1641 	struct dc_stream_state *streams[],
1642 	uint8_t stream_count)
1643 {
1644 	int i, j;
1645 	struct pipe_ctx *pipe;
1646 
1647 	for (i = 0; i < MAX_PIPES; i++) {
1648 		if (context != NULL) {
1649 			pipe = &context->res_ctx.pipe_ctx[i];
1650 		} else {
1651 			context = dc->current_state;
1652 			pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1653 		}
1654 
1655 		for (j = 0; pipe && j < stream_count; j++)  {
1656 			if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1657 				dc->hwss.setup_stereo)
1658 				dc->hwss.setup_stereo(pipe, dc);
1659 		}
1660 	}
1661 }
1662 
dc_trigger_sync(struct dc * dc,struct dc_state * context)1663 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1664 {
1665 	if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1666 		enable_timing_multisync(dc, context);
1667 		program_timing_sync(dc, context);
1668 	}
1669 }
1670 
get_stream_mask(struct dc * dc,struct dc_state * context)1671 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1672 {
1673 	int i;
1674 	unsigned int stream_mask = 0;
1675 
1676 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1677 		if (context->res_ctx.pipe_ctx[i].stream)
1678 			stream_mask |= 1 << i;
1679 	}
1680 
1681 	return stream_mask;
1682 }
1683 
dc_z10_restore(const struct dc * dc)1684 void dc_z10_restore(const struct dc *dc)
1685 {
1686 	if (dc->hwss.z10_restore)
1687 		dc->hwss.z10_restore(dc);
1688 }
1689 
dc_z10_save_init(struct dc * dc)1690 void dc_z10_save_init(struct dc *dc)
1691 {
1692 	if (dc->hwss.z10_save_init)
1693 		dc->hwss.z10_save_init(dc);
1694 }
1695 
1696 /*
1697  * Applies given context to HW and copy it into current context.
1698  * It's up to the user to release the src context afterwards.
1699  */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)1700 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1701 {
1702 	struct dc_bios *dcb = dc->ctx->dc_bios;
1703 	enum dc_status result = DC_ERROR_UNEXPECTED;
1704 	struct pipe_ctx *pipe;
1705 	int i, k, l;
1706 	struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1707 	struct dc_state *old_state;
1708 
1709 	dc_z10_restore(dc);
1710 	dc_allow_idle_optimizations(dc, false);
1711 
1712 	for (i = 0; i < context->stream_count; i++)
1713 		dc_streams[i] =  context->streams[i];
1714 
1715 	if (!dcb->funcs->is_accelerated_mode(dcb)) {
1716 		disable_vbios_mode_if_required(dc, context);
1717 		dc->hwss.enable_accelerated_mode(dc, context);
1718 	}
1719 
1720 	if (context->stream_count > get_seamless_boot_stream_count(context) ||
1721 		context->stream_count == 0)
1722 		dc->hwss.prepare_bandwidth(dc, context);
1723 
1724 	disable_dangling_plane(dc, context);
1725 	/* re-program planes for existing stream, in case we need to
1726 	 * free up plane resource for later use
1727 	 */
1728 	if (dc->hwss.apply_ctx_for_surface) {
1729 		for (i = 0; i < context->stream_count; i++) {
1730 			if (context->streams[i]->mode_changed)
1731 				continue;
1732 			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1733 			dc->hwss.apply_ctx_for_surface(
1734 				dc, context->streams[i],
1735 				context->stream_status[i].plane_count,
1736 				context); /* use new pipe config in new context */
1737 			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1738 			dc->hwss.post_unlock_program_front_end(dc, context);
1739 		}
1740 	}
1741 
1742 	/* Program hardware */
1743 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1744 		pipe = &context->res_ctx.pipe_ctx[i];
1745 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1746 	}
1747 
1748 	result = dc->hwss.apply_ctx_to_hw(dc, context);
1749 
1750 	if (result != DC_OK)
1751 		return result;
1752 
1753 	dc_trigger_sync(dc, context);
1754 
1755 	/* Program all planes within new context*/
1756 	if (dc->hwss.program_front_end_for_ctx) {
1757 		dc->hwss.interdependent_update_lock(dc, context, true);
1758 		dc->hwss.program_front_end_for_ctx(dc, context);
1759 		dc->hwss.interdependent_update_lock(dc, context, false);
1760 		dc->hwss.post_unlock_program_front_end(dc, context);
1761 	}
1762 	for (i = 0; i < context->stream_count; i++) {
1763 		const struct dc_link *link = context->streams[i]->link;
1764 
1765 		if (!context->streams[i]->mode_changed)
1766 			continue;
1767 
1768 		if (dc->hwss.apply_ctx_for_surface) {
1769 			apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1770 			dc->hwss.apply_ctx_for_surface(
1771 					dc, context->streams[i],
1772 					context->stream_status[i].plane_count,
1773 					context);
1774 			apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1775 			dc->hwss.post_unlock_program_front_end(dc, context);
1776 		}
1777 
1778 		/*
1779 		 * enable stereo
1780 		 * TODO rework dc_enable_stereo call to work with validation sets?
1781 		 */
1782 		for (k = 0; k < MAX_PIPES; k++) {
1783 			pipe = &context->res_ctx.pipe_ctx[k];
1784 
1785 			for (l = 0 ; pipe && l < context->stream_count; l++)  {
1786 				if (context->streams[l] &&
1787 					context->streams[l] == pipe->stream &&
1788 					dc->hwss.setup_stereo)
1789 					dc->hwss.setup_stereo(pipe, dc);
1790 			}
1791 		}
1792 
1793 		CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1794 				context->streams[i]->timing.h_addressable,
1795 				context->streams[i]->timing.v_addressable,
1796 				context->streams[i]->timing.h_total,
1797 				context->streams[i]->timing.v_total,
1798 				context->streams[i]->timing.pix_clk_100hz / 10);
1799 	}
1800 
1801 	dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1802 
1803 	if (context->stream_count > get_seamless_boot_stream_count(context) ||
1804 		context->stream_count == 0) {
1805 		/* Must wait for no flips to be pending before doing optimize bw */
1806 		wait_for_no_pipes_pending(dc, context);
1807 		/* pplib is notified if disp_num changed */
1808 		dc->hwss.optimize_bandwidth(dc, context);
1809 	}
1810 
1811 	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1812 		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1813 	else
1814 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1815 
1816 	context->stream_mask = get_stream_mask(dc, context);
1817 
1818 	if (context->stream_mask != dc->current_state->stream_mask)
1819 		dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1820 
1821 	for (i = 0; i < context->stream_count; i++)
1822 		context->streams[i]->mode_changed = false;
1823 
1824 	old_state = dc->current_state;
1825 	dc->current_state = context;
1826 
1827 	dc_release_state(old_state);
1828 
1829 	dc_retain_state(dc->current_state);
1830 
1831 	return result;
1832 }
1833 
dc_commit_state(struct dc * dc,struct dc_state * context)1834 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1835 {
1836 	enum dc_status result = DC_ERROR_UNEXPECTED;
1837 	int i;
1838 
1839 	if (!context_changed(dc, context))
1840 		return DC_OK;
1841 
1842 	DC_LOG_DC("%s: %d streams\n",
1843 				__func__, context->stream_count);
1844 
1845 	for (i = 0; i < context->stream_count; i++) {
1846 		struct dc_stream_state *stream = context->streams[i];
1847 
1848 		dc_stream_log(dc, stream);
1849 	}
1850 
1851 	/*
1852 	 * Previous validation was perfomred with fast_validation = true and
1853 	 * the full DML state required for hardware programming was skipped.
1854 	 *
1855 	 * Re-validate here to calculate these parameters / watermarks.
1856 	 */
1857 	result = dc_validate_global_state(dc, context, false);
1858 	if (result != DC_OK) {
1859 		DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1860 			     dc_status_to_str(result), result);
1861 		return result;
1862 	}
1863 
1864 	result = dc_commit_state_no_check(dc, context);
1865 
1866 	return (result == DC_OK);
1867 }
1868 
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)1869 bool dc_acquire_release_mpc_3dlut(
1870 		struct dc *dc, bool acquire,
1871 		struct dc_stream_state *stream,
1872 		struct dc_3dlut **lut,
1873 		struct dc_transfer_func **shaper)
1874 {
1875 	int pipe_idx;
1876 	bool ret = false;
1877 	bool found_pipe_idx = false;
1878 	const struct resource_pool *pool = dc->res_pool;
1879 	struct resource_context *res_ctx = &dc->current_state->res_ctx;
1880 	int mpcc_id = 0;
1881 
1882 	if (pool && res_ctx) {
1883 		if (acquire) {
1884 			/*find pipe idx for the given stream*/
1885 			for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1886 				if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1887 					found_pipe_idx = true;
1888 					mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1889 					break;
1890 				}
1891 			}
1892 		} else
1893 			found_pipe_idx = true;/*for release pipe_idx is not required*/
1894 
1895 		if (found_pipe_idx) {
1896 			if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1897 				ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1898 			else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1899 				ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1900 		}
1901 	}
1902 	return ret;
1903 }
1904 
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)1905 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1906 {
1907 	int i;
1908 	struct pipe_ctx *pipe;
1909 
1910 	for (i = 0; i < MAX_PIPES; i++) {
1911 		pipe = &context->res_ctx.pipe_ctx[i];
1912 
1913 		if (!pipe->plane_state)
1914 			continue;
1915 
1916 		/* Must set to false to start with, due to OR in update function */
1917 		pipe->plane_state->status.is_flip_pending = false;
1918 		dc->hwss.update_pending_status(pipe);
1919 		if (pipe->plane_state->status.is_flip_pending)
1920 			return true;
1921 	}
1922 	return false;
1923 }
1924 
1925 /* Perform updates here which need to be deferred until next vupdate
1926  *
1927  * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
1928  * but forcing lut memory to shutdown state is immediate. This causes
1929  * single frame corruption as lut gets disabled mid-frame unless shutdown
1930  * is deferred until after entering bypass.
1931  */
process_deferred_updates(struct dc * dc)1932 static void process_deferred_updates(struct dc *dc)
1933 {
1934 	int i = 0;
1935 
1936 	if (dc->debug.enable_mem_low_power.bits.cm) {
1937 		ASSERT(dc->dcn_ip->max_num_dpp);
1938 		for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
1939 			if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
1940 				dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
1941 	}
1942 }
1943 
dc_post_update_surfaces_to_stream(struct dc * dc)1944 void dc_post_update_surfaces_to_stream(struct dc *dc)
1945 {
1946 	int i;
1947 	struct dc_state *context = dc->current_state;
1948 
1949 	if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
1950 		return;
1951 
1952 	post_surface_trace(dc);
1953 
1954 	if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1955 		TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1956 	else
1957 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1958 
1959 	if (is_flip_pending_in_pipes(dc, context))
1960 		return;
1961 
1962 	for (i = 0; i < dc->res_pool->pipe_count; i++)
1963 		if (context->res_ctx.pipe_ctx[i].stream == NULL ||
1964 		    context->res_ctx.pipe_ctx[i].plane_state == NULL) {
1965 			context->res_ctx.pipe_ctx[i].pipe_idx = i;
1966 			dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
1967 		}
1968 
1969 	process_deferred_updates(dc);
1970 
1971 	dc->hwss.optimize_bandwidth(dc, context);
1972 
1973 	dc->optimized_required = false;
1974 	dc->wm_optimized_required = false;
1975 }
1976 
init_state(struct dc * dc,struct dc_state * context)1977 static void init_state(struct dc *dc, struct dc_state *context)
1978 {
1979 	/* Each context must have their own instance of VBA and in order to
1980 	 * initialize and obtain IP and SOC the base DML instance from DC is
1981 	 * initially copied into every context
1982 	 */
1983 	memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
1984 }
1985 
dc_create_state(struct dc * dc)1986 struct dc_state *dc_create_state(struct dc *dc)
1987 {
1988 	struct dc_state *context = kvzalloc(sizeof(struct dc_state),
1989 					    GFP_KERNEL);
1990 
1991 	if (!context)
1992 		return NULL;
1993 
1994 	init_state(dc, context);
1995 
1996 	kref_init(&context->refcount);
1997 
1998 	return context;
1999 }
2000 
dc_copy_state(struct dc_state * src_ctx)2001 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2002 {
2003 	int i, j;
2004 	struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2005 
2006 	if (!new_ctx)
2007 		return NULL;
2008 	memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2009 
2010 	for (i = 0; i < MAX_PIPES; i++) {
2011 			struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2012 
2013 			if (cur_pipe->top_pipe)
2014 				cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2015 
2016 			if (cur_pipe->bottom_pipe)
2017 				cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2018 
2019 			if (cur_pipe->prev_odm_pipe)
2020 				cur_pipe->prev_odm_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2021 
2022 			if (cur_pipe->next_odm_pipe)
2023 				cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2024 
2025 	}
2026 
2027 	for (i = 0; i < new_ctx->stream_count; i++) {
2028 			dc_stream_retain(new_ctx->streams[i]);
2029 			for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2030 				dc_plane_state_retain(
2031 					new_ctx->stream_status[i].plane_states[j]);
2032 	}
2033 
2034 	kref_init(&new_ctx->refcount);
2035 
2036 	return new_ctx;
2037 }
2038 
dc_retain_state(struct dc_state * context)2039 void dc_retain_state(struct dc_state *context)
2040 {
2041 	kref_get(&context->refcount);
2042 }
2043 
dc_state_free(struct kref * kref)2044 static void dc_state_free(struct kref *kref)
2045 {
2046 	struct dc_state *context = container_of(kref, struct dc_state, refcount);
2047 	dc_resource_state_destruct(context);
2048 	kvfree(context);
2049 }
2050 
dc_release_state(struct dc_state * context)2051 void dc_release_state(struct dc_state *context)
2052 {
2053 	kref_put(&context->refcount, dc_state_free);
2054 }
2055 
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)2056 bool dc_set_generic_gpio_for_stereo(bool enable,
2057 		struct gpio_service *gpio_service)
2058 {
2059 	enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2060 	struct gpio_pin_info pin_info;
2061 	struct gpio *generic;
2062 	struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2063 			   GFP_KERNEL);
2064 
2065 	if (!config)
2066 		return false;
2067 	pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2068 
2069 	if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2070 		kfree(config);
2071 		return false;
2072 	} else {
2073 		generic = dal_gpio_service_create_generic_mux(
2074 			gpio_service,
2075 			pin_info.offset,
2076 			pin_info.mask);
2077 	}
2078 
2079 	if (!generic) {
2080 		kfree(config);
2081 		return false;
2082 	}
2083 
2084 	gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2085 
2086 	config->enable_output_from_mux = enable;
2087 	config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2088 
2089 	if (gpio_result == GPIO_RESULT_OK)
2090 		gpio_result = dal_mux_setup_config(generic, config);
2091 
2092 	if (gpio_result == GPIO_RESULT_OK) {
2093 		dal_gpio_close(generic);
2094 		dal_gpio_destroy_generic_mux(&generic);
2095 		kfree(config);
2096 		return true;
2097 	} else {
2098 		dal_gpio_close(generic);
2099 		dal_gpio_destroy_generic_mux(&generic);
2100 		kfree(config);
2101 		return false;
2102 	}
2103 }
2104 
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)2105 static bool is_surface_in_context(
2106 		const struct dc_state *context,
2107 		const struct dc_plane_state *plane_state)
2108 {
2109 	int j;
2110 
2111 	for (j = 0; j < MAX_PIPES; j++) {
2112 		const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2113 
2114 		if (plane_state == pipe_ctx->plane_state) {
2115 			return true;
2116 		}
2117 	}
2118 
2119 	return false;
2120 }
2121 
get_plane_info_update_type(const struct dc_surface_update * u)2122 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2123 {
2124 	union surface_update_flags *update_flags = &u->surface->update_flags;
2125 	enum surface_update_type update_type = UPDATE_TYPE_FAST;
2126 
2127 	if (!u->plane_info)
2128 		return UPDATE_TYPE_FAST;
2129 
2130 	if (u->plane_info->color_space != u->surface->color_space) {
2131 		update_flags->bits.color_space_change = 1;
2132 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2133 	}
2134 
2135 	if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2136 		update_flags->bits.horizontal_mirror_change = 1;
2137 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2138 	}
2139 
2140 	if (u->plane_info->rotation != u->surface->rotation) {
2141 		update_flags->bits.rotation_change = 1;
2142 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2143 	}
2144 
2145 	if (u->plane_info->format != u->surface->format) {
2146 		update_flags->bits.pixel_format_change = 1;
2147 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2148 	}
2149 
2150 	if (u->plane_info->stereo_format != u->surface->stereo_format) {
2151 		update_flags->bits.stereo_format_change = 1;
2152 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2153 	}
2154 
2155 	if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2156 		update_flags->bits.per_pixel_alpha_change = 1;
2157 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2158 	}
2159 
2160 	if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2161 		update_flags->bits.global_alpha_change = 1;
2162 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2163 	}
2164 
2165 	if (u->plane_info->dcc.enable != u->surface->dcc.enable
2166 			|| u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2167 			|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2168 		/* During DCC on/off, stutter period is calculated before
2169 		 * DCC has fully transitioned. This results in incorrect
2170 		 * stutter period calculation. Triggering a full update will
2171 		 * recalculate stutter period.
2172 		 */
2173 		update_flags->bits.dcc_change = 1;
2174 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2175 	}
2176 
2177 	if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2178 			resource_pixel_format_to_bpp(u->surface->format)) {
2179 		/* different bytes per element will require full bandwidth
2180 		 * and DML calculation
2181 		 */
2182 		update_flags->bits.bpp_change = 1;
2183 		elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2184 	}
2185 
2186 	if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2187 			|| u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2188 		update_flags->bits.plane_size_change = 1;
2189 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2190 	}
2191 
2192 
2193 	if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2194 			sizeof(union dc_tiling_info)) != 0) {
2195 		update_flags->bits.swizzle_change = 1;
2196 		elevate_update_type(&update_type, UPDATE_TYPE_MED);
2197 
2198 		/* todo: below are HW dependent, we should add a hook to
2199 		 * DCE/N resource and validated there.
2200 		 */
2201 		if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2202 			/* swizzled mode requires RQ to be setup properly,
2203 			 * thus need to run DML to calculate RQ settings
2204 			 */
2205 			update_flags->bits.bandwidth_change = 1;
2206 			elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2207 		}
2208 	}
2209 
2210 	/* This should be UPDATE_TYPE_FAST if nothing has changed. */
2211 	return update_type;
2212 }
2213 
get_scaling_info_update_type(const struct dc_surface_update * u)2214 static enum surface_update_type get_scaling_info_update_type(
2215 		const struct dc_surface_update *u)
2216 {
2217 	union surface_update_flags *update_flags = &u->surface->update_flags;
2218 
2219 	if (!u->scaling_info)
2220 		return UPDATE_TYPE_FAST;
2221 
2222 	if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2223 			|| u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2224 			|| u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2225 			|| u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2226 			|| u->scaling_info->scaling_quality.integer_scaling !=
2227 				u->surface->scaling_quality.integer_scaling
2228 			) {
2229 		update_flags->bits.scaling_change = 1;
2230 
2231 		if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2232 			|| u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2233 				&& (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2234 					|| u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2235 			/* Making dst rect smaller requires a bandwidth change */
2236 			update_flags->bits.bandwidth_change = 1;
2237 	}
2238 
2239 	if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2240 		|| u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2241 
2242 		update_flags->bits.scaling_change = 1;
2243 		if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2244 				|| u->scaling_info->src_rect.height > u->surface->src_rect.height)
2245 			/* Making src rect bigger requires a bandwidth change */
2246 			update_flags->bits.clock_change = 1;
2247 	}
2248 
2249 	if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2250 			|| u->scaling_info->src_rect.y != u->surface->src_rect.y
2251 			|| u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2252 			|| u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2253 			|| u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2254 			|| u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2255 		update_flags->bits.position_change = 1;
2256 
2257 	if (update_flags->bits.clock_change
2258 			|| update_flags->bits.bandwidth_change
2259 			|| update_flags->bits.scaling_change)
2260 		return UPDATE_TYPE_FULL;
2261 
2262 	if (update_flags->bits.position_change)
2263 		return UPDATE_TYPE_MED;
2264 
2265 	return UPDATE_TYPE_FAST;
2266 }
2267 
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2268 static enum surface_update_type det_surface_update(const struct dc *dc,
2269 		const struct dc_surface_update *u)
2270 {
2271 	const struct dc_state *context = dc->current_state;
2272 	enum surface_update_type type;
2273 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2274 	union surface_update_flags *update_flags = &u->surface->update_flags;
2275 
2276 	if (u->flip_addr)
2277 		update_flags->bits.addr_update = 1;
2278 
2279 	if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2280 		update_flags->raw = 0xFFFFFFFF;
2281 		return UPDATE_TYPE_FULL;
2282 	}
2283 
2284 	update_flags->raw = 0; // Reset all flags
2285 
2286 	type = get_plane_info_update_type(u);
2287 	elevate_update_type(&overall_type, type);
2288 
2289 	type = get_scaling_info_update_type(u);
2290 	elevate_update_type(&overall_type, type);
2291 
2292 	if (u->flip_addr)
2293 		update_flags->bits.addr_update = 1;
2294 
2295 	if (u->in_transfer_func)
2296 		update_flags->bits.in_transfer_func_change = 1;
2297 
2298 	if (u->input_csc_color_matrix)
2299 		update_flags->bits.input_csc_change = 1;
2300 
2301 	if (u->coeff_reduction_factor)
2302 		update_flags->bits.coeff_reduction_change = 1;
2303 
2304 	if (u->gamut_remap_matrix)
2305 		update_flags->bits.gamut_remap_change = 1;
2306 
2307 	if (u->gamma) {
2308 		enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2309 
2310 		if (u->plane_info)
2311 			format = u->plane_info->format;
2312 		else if (u->surface)
2313 			format = u->surface->format;
2314 
2315 		if (dce_use_lut(format))
2316 			update_flags->bits.gamma_change = 1;
2317 	}
2318 
2319 	if (u->lut3d_func || u->func_shaper)
2320 		update_flags->bits.lut_3d = 1;
2321 
2322 	if (u->hdr_mult.value)
2323 		if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2324 			update_flags->bits.hdr_mult = 1;
2325 			elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2326 		}
2327 
2328 	if (update_flags->bits.in_transfer_func_change) {
2329 		type = UPDATE_TYPE_MED;
2330 		elevate_update_type(&overall_type, type);
2331 	}
2332 
2333 	if (update_flags->bits.input_csc_change
2334 			|| update_flags->bits.coeff_reduction_change
2335 			|| update_flags->bits.lut_3d
2336 			|| update_flags->bits.gamma_change
2337 			|| update_flags->bits.gamut_remap_change) {
2338 		type = UPDATE_TYPE_FULL;
2339 		elevate_update_type(&overall_type, type);
2340 	}
2341 
2342 	return overall_type;
2343 }
2344 
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2345 static enum surface_update_type check_update_surfaces_for_stream(
2346 		struct dc *dc,
2347 		struct dc_surface_update *updates,
2348 		int surface_count,
2349 		struct dc_stream_update *stream_update,
2350 		const struct dc_stream_status *stream_status)
2351 {
2352 	int i;
2353 	enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2354 
2355 	if (dc->idle_optimizations_allowed)
2356 		overall_type = UPDATE_TYPE_FULL;
2357 
2358 	if (stream_status == NULL || stream_status->plane_count != surface_count)
2359 		overall_type = UPDATE_TYPE_FULL;
2360 
2361 	if (stream_update && stream_update->pending_test_pattern) {
2362 		overall_type = UPDATE_TYPE_FULL;
2363 	}
2364 
2365 	/* some stream updates require passive update */
2366 	if (stream_update) {
2367 		union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2368 
2369 		if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2370 			(stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2371 			stream_update->integer_scaling_update)
2372 			su_flags->bits.scaling = 1;
2373 
2374 		if (stream_update->out_transfer_func)
2375 			su_flags->bits.out_tf = 1;
2376 
2377 		if (stream_update->abm_level)
2378 			su_flags->bits.abm_level = 1;
2379 
2380 		if (stream_update->dpms_off)
2381 			su_flags->bits.dpms_off = 1;
2382 
2383 		if (stream_update->gamut_remap)
2384 			su_flags->bits.gamut_remap = 1;
2385 
2386 		if (stream_update->wb_update)
2387 			su_flags->bits.wb_update = 1;
2388 
2389 		if (stream_update->dsc_config)
2390 			su_flags->bits.dsc_changed = 1;
2391 
2392 		if (stream_update->mst_bw_update)
2393 			su_flags->bits.mst_bw = 1;
2394 		if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2395 			su_flags->bits.crtc_timing_adjust = 1;
2396 
2397 		if (su_flags->raw != 0)
2398 			overall_type = UPDATE_TYPE_FULL;
2399 
2400 		if (stream_update->output_csc_transform || stream_update->output_color_space)
2401 			su_flags->bits.out_csc = 1;
2402 	}
2403 
2404 	for (i = 0 ; i < surface_count; i++) {
2405 		enum surface_update_type type =
2406 				det_surface_update(dc, &updates[i]);
2407 
2408 		elevate_update_type(&overall_type, type);
2409 	}
2410 
2411 	return overall_type;
2412 }
2413 
2414 /*
2415  * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2416  *
2417  * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2418  */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2419 enum surface_update_type dc_check_update_surfaces_for_stream(
2420 		struct dc *dc,
2421 		struct dc_surface_update *updates,
2422 		int surface_count,
2423 		struct dc_stream_update *stream_update,
2424 		const struct dc_stream_status *stream_status)
2425 {
2426 	int i;
2427 	enum surface_update_type type;
2428 
2429 	if (stream_update)
2430 		stream_update->stream->update_flags.raw = 0;
2431 	for (i = 0; i < surface_count; i++)
2432 		updates[i].surface->update_flags.raw = 0;
2433 
2434 	type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2435 	if (type == UPDATE_TYPE_FULL) {
2436 		if (stream_update) {
2437 			uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2438 			stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2439 			stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2440 		}
2441 		for (i = 0; i < surface_count; i++)
2442 			updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2443 	}
2444 
2445 	if (type == UPDATE_TYPE_FAST) {
2446 		// If there's an available clock comparator, we use that.
2447 		if (dc->clk_mgr->funcs->are_clock_states_equal) {
2448 			if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2449 				dc->optimized_required = true;
2450 		// Else we fallback to mem compare.
2451 		} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2452 			dc->optimized_required = true;
2453 		}
2454 
2455 		dc->optimized_required |= dc->wm_optimized_required;
2456 	}
2457 
2458 	return type;
2459 }
2460 
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2461 static struct dc_stream_status *stream_get_status(
2462 	struct dc_state *ctx,
2463 	struct dc_stream_state *stream)
2464 {
2465 	uint8_t i;
2466 
2467 	for (i = 0; i < ctx->stream_count; i++) {
2468 		if (stream == ctx->streams[i]) {
2469 			return &ctx->stream_status[i];
2470 		}
2471 	}
2472 
2473 	return NULL;
2474 }
2475 
2476 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2477 
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2478 static void copy_surface_update_to_plane(
2479 		struct dc_plane_state *surface,
2480 		struct dc_surface_update *srf_update)
2481 {
2482 	if (srf_update->flip_addr) {
2483 		surface->address = srf_update->flip_addr->address;
2484 		surface->flip_immediate =
2485 			srf_update->flip_addr->flip_immediate;
2486 		surface->time.time_elapsed_in_us[surface->time.index] =
2487 			srf_update->flip_addr->flip_timestamp_in_us -
2488 				surface->time.prev_update_time_in_us;
2489 		surface->time.prev_update_time_in_us =
2490 			srf_update->flip_addr->flip_timestamp_in_us;
2491 		surface->time.index++;
2492 		if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2493 			surface->time.index = 0;
2494 
2495 		surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2496 	}
2497 
2498 	if (srf_update->scaling_info) {
2499 		surface->scaling_quality =
2500 				srf_update->scaling_info->scaling_quality;
2501 		surface->dst_rect =
2502 				srf_update->scaling_info->dst_rect;
2503 		surface->src_rect =
2504 				srf_update->scaling_info->src_rect;
2505 		surface->clip_rect =
2506 				srf_update->scaling_info->clip_rect;
2507 	}
2508 
2509 	if (srf_update->plane_info) {
2510 		surface->color_space =
2511 				srf_update->plane_info->color_space;
2512 		surface->format =
2513 				srf_update->plane_info->format;
2514 		surface->plane_size =
2515 				srf_update->plane_info->plane_size;
2516 		surface->rotation =
2517 				srf_update->plane_info->rotation;
2518 		surface->horizontal_mirror =
2519 				srf_update->plane_info->horizontal_mirror;
2520 		surface->stereo_format =
2521 				srf_update->plane_info->stereo_format;
2522 		surface->tiling_info =
2523 				srf_update->plane_info->tiling_info;
2524 		surface->visible =
2525 				srf_update->plane_info->visible;
2526 		surface->per_pixel_alpha =
2527 				srf_update->plane_info->per_pixel_alpha;
2528 		surface->global_alpha =
2529 				srf_update->plane_info->global_alpha;
2530 		surface->global_alpha_value =
2531 				srf_update->plane_info->global_alpha_value;
2532 		surface->dcc =
2533 				srf_update->plane_info->dcc;
2534 		surface->layer_index =
2535 				srf_update->plane_info->layer_index;
2536 	}
2537 
2538 	if (srf_update->gamma &&
2539 			(surface->gamma_correction !=
2540 					srf_update->gamma)) {
2541 		memcpy(&surface->gamma_correction->entries,
2542 			&srf_update->gamma->entries,
2543 			sizeof(struct dc_gamma_entries));
2544 		surface->gamma_correction->is_identity =
2545 			srf_update->gamma->is_identity;
2546 		surface->gamma_correction->num_entries =
2547 			srf_update->gamma->num_entries;
2548 		surface->gamma_correction->type =
2549 			srf_update->gamma->type;
2550 	}
2551 
2552 	if (srf_update->in_transfer_func &&
2553 			(surface->in_transfer_func !=
2554 				srf_update->in_transfer_func)) {
2555 		surface->in_transfer_func->sdr_ref_white_level =
2556 			srf_update->in_transfer_func->sdr_ref_white_level;
2557 		surface->in_transfer_func->tf =
2558 			srf_update->in_transfer_func->tf;
2559 		surface->in_transfer_func->type =
2560 			srf_update->in_transfer_func->type;
2561 		memcpy(&surface->in_transfer_func->tf_pts,
2562 			&srf_update->in_transfer_func->tf_pts,
2563 			sizeof(struct dc_transfer_func_distributed_points));
2564 	}
2565 
2566 	if (srf_update->func_shaper &&
2567 			(surface->in_shaper_func !=
2568 			srf_update->func_shaper))
2569 		memcpy(surface->in_shaper_func, srf_update->func_shaper,
2570 		sizeof(*surface->in_shaper_func));
2571 
2572 	if (srf_update->lut3d_func &&
2573 			(surface->lut3d_func !=
2574 			srf_update->lut3d_func))
2575 		memcpy(surface->lut3d_func, srf_update->lut3d_func,
2576 		sizeof(*surface->lut3d_func));
2577 
2578 	if (srf_update->hdr_mult.value)
2579 		surface->hdr_mult =
2580 				srf_update->hdr_mult;
2581 
2582 	if (srf_update->blend_tf &&
2583 			(surface->blend_tf !=
2584 			srf_update->blend_tf))
2585 		memcpy(surface->blend_tf, srf_update->blend_tf,
2586 		sizeof(*surface->blend_tf));
2587 
2588 	if (srf_update->input_csc_color_matrix)
2589 		surface->input_csc_color_matrix =
2590 			*srf_update->input_csc_color_matrix;
2591 
2592 	if (srf_update->coeff_reduction_factor)
2593 		surface->coeff_reduction_factor =
2594 			*srf_update->coeff_reduction_factor;
2595 
2596 	if (srf_update->gamut_remap_matrix)
2597 		surface->gamut_remap_matrix =
2598 			*srf_update->gamut_remap_matrix;
2599 }
2600 
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)2601 static void copy_stream_update_to_stream(struct dc *dc,
2602 					 struct dc_state *context,
2603 					 struct dc_stream_state *stream,
2604 					 struct dc_stream_update *update)
2605 {
2606 	struct dc_context *dc_ctx = dc->ctx;
2607 
2608 	if (update == NULL || stream == NULL)
2609 		return;
2610 
2611 	if (update->src.height && update->src.width)
2612 		stream->src = update->src;
2613 
2614 	if (update->dst.height && update->dst.width)
2615 		stream->dst = update->dst;
2616 
2617 	if (update->out_transfer_func &&
2618 	    stream->out_transfer_func != update->out_transfer_func) {
2619 		stream->out_transfer_func->sdr_ref_white_level =
2620 			update->out_transfer_func->sdr_ref_white_level;
2621 		stream->out_transfer_func->tf = update->out_transfer_func->tf;
2622 		stream->out_transfer_func->type =
2623 			update->out_transfer_func->type;
2624 		memcpy(&stream->out_transfer_func->tf_pts,
2625 		       &update->out_transfer_func->tf_pts,
2626 		       sizeof(struct dc_transfer_func_distributed_points));
2627 	}
2628 
2629 	if (update->hdr_static_metadata)
2630 		stream->hdr_static_metadata = *update->hdr_static_metadata;
2631 
2632 	if (update->abm_level)
2633 		stream->abm_level = *update->abm_level;
2634 
2635 	if (update->periodic_interrupt0)
2636 		stream->periodic_interrupt0 = *update->periodic_interrupt0;
2637 
2638 	if (update->periodic_interrupt1)
2639 		stream->periodic_interrupt1 = *update->periodic_interrupt1;
2640 
2641 	if (update->gamut_remap)
2642 		stream->gamut_remap_matrix = *update->gamut_remap;
2643 
2644 	/* Note: this being updated after mode set is currently not a use case
2645 	 * however if it arises OCSC would need to be reprogrammed at the
2646 	 * minimum
2647 	 */
2648 	if (update->output_color_space)
2649 		stream->output_color_space = *update->output_color_space;
2650 
2651 	if (update->output_csc_transform)
2652 		stream->csc_color_matrix = *update->output_csc_transform;
2653 
2654 	if (update->vrr_infopacket)
2655 		stream->vrr_infopacket = *update->vrr_infopacket;
2656 
2657 	if (update->crtc_timing_adjust)
2658 		stream->adjust = *update->crtc_timing_adjust;
2659 
2660 	if (update->dpms_off)
2661 		stream->dpms_off = *update->dpms_off;
2662 
2663 	if (update->vsc_infopacket)
2664 		stream->vsc_infopacket = *update->vsc_infopacket;
2665 
2666 	if (update->vsp_infopacket)
2667 		stream->vsp_infopacket = *update->vsp_infopacket;
2668 
2669 	if (update->dither_option)
2670 		stream->dither_option = *update->dither_option;
2671 
2672 	if (update->pending_test_pattern)
2673 		stream->test_pattern = *update->pending_test_pattern;
2674 	/* update current stream with writeback info */
2675 	if (update->wb_update) {
2676 		int i;
2677 
2678 		stream->num_wb_info = update->wb_update->num_wb_info;
2679 		ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2680 		for (i = 0; i < stream->num_wb_info; i++)
2681 			stream->writeback_info[i] =
2682 				update->wb_update->writeback_info[i];
2683 	}
2684 	if (update->dsc_config) {
2685 		struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2686 		uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2687 		uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2688 				       update->dsc_config->num_slices_v != 0);
2689 
2690 		/* Use temporarry context for validating new DSC config */
2691 		struct dc_state *dsc_validate_context = dc_create_state(dc);
2692 
2693 		if (dsc_validate_context) {
2694 			dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2695 
2696 			stream->timing.dsc_cfg = *update->dsc_config;
2697 			stream->timing.flags.DSC = enable_dsc;
2698 			if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2699 				stream->timing.dsc_cfg = old_dsc_cfg;
2700 				stream->timing.flags.DSC = old_dsc_enabled;
2701 				update->dsc_config = NULL;
2702 			}
2703 
2704 			dc_release_state(dsc_validate_context);
2705 		} else {
2706 			DC_ERROR("Failed to allocate new validate context for DSC change\n");
2707 			update->dsc_config = NULL;
2708 		}
2709 	}
2710 }
2711 
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)2712 static void commit_planes_do_stream_update(struct dc *dc,
2713 		struct dc_stream_state *stream,
2714 		struct dc_stream_update *stream_update,
2715 		enum surface_update_type update_type,
2716 		struct dc_state *context)
2717 {
2718 	int j;
2719 
2720 	// Stream updates
2721 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
2722 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2723 
2724 		if (!pipe_ctx->top_pipe &&  !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
2725 
2726 			if (stream_update->periodic_interrupt0 &&
2727 					dc->hwss.setup_periodic_interrupt)
2728 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE0);
2729 
2730 			if (stream_update->periodic_interrupt1 &&
2731 					dc->hwss.setup_periodic_interrupt)
2732 				dc->hwss.setup_periodic_interrupt(dc, pipe_ctx, VLINE1);
2733 
2734 			if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
2735 					stream_update->vrr_infopacket ||
2736 					stream_update->vsc_infopacket ||
2737 					stream_update->vsp_infopacket) {
2738 				resource_build_info_frame(pipe_ctx);
2739 				dc->hwss.update_info_frame(pipe_ctx);
2740 
2741 				if (dc_is_dp_signal(pipe_ctx->stream->signal))
2742 					dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
2743 			}
2744 
2745 			if (stream_update->hdr_static_metadata &&
2746 					stream->use_dynamic_meta &&
2747 					dc->hwss.set_dmdata_attributes &&
2748 					pipe_ctx->stream->dmdata_address.quad_part != 0)
2749 				dc->hwss.set_dmdata_attributes(pipe_ctx);
2750 
2751 			if (stream_update->gamut_remap)
2752 				dc_stream_set_gamut_remap(dc, stream);
2753 
2754 			if (stream_update->output_csc_transform)
2755 				dc_stream_program_csc_matrix(dc, stream);
2756 
2757 			if (stream_update->dither_option) {
2758 				struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
2759 				resource_build_bit_depth_reduction_params(pipe_ctx->stream,
2760 									&pipe_ctx->stream->bit_depth_params);
2761 				pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
2762 						&stream->bit_depth_params,
2763 						&stream->clamping);
2764 				while (odm_pipe) {
2765 					odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
2766 							&stream->bit_depth_params,
2767 							&stream->clamping);
2768 					odm_pipe = odm_pipe->next_odm_pipe;
2769 				}
2770 			}
2771 
2772 
2773 			/* Full fe update*/
2774 			if (update_type == UPDATE_TYPE_FAST)
2775 				continue;
2776 
2777 			if (stream_update->dsc_config)
2778 				dp_update_dsc_config(pipe_ctx);
2779 
2780 			if (stream_update->mst_bw_update) {
2781 				if (stream_update->mst_bw_update->is_increase)
2782 					dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2783 				else
2784 					dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
2785 			}
2786 
2787 			if (stream_update->pending_test_pattern) {
2788 				dc_link_dp_set_test_pattern(stream->link,
2789 					stream->test_pattern.type,
2790 					stream->test_pattern.color_space,
2791 					stream->test_pattern.p_link_settings,
2792 					stream->test_pattern.p_custom_pattern,
2793 					stream->test_pattern.cust_pattern_size);
2794 			}
2795 
2796 			if (stream_update->dpms_off) {
2797 				if (*stream_update->dpms_off) {
2798 					core_link_disable_stream(pipe_ctx);
2799 					/* for dpms, keep acquired resources*/
2800 					if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
2801 						pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
2802 
2803 					dc->optimized_required = true;
2804 
2805 				} else {
2806 					if (get_seamless_boot_stream_count(context) == 0)
2807 						dc->hwss.prepare_bandwidth(dc, dc->current_state);
2808 
2809 					core_link_enable_stream(dc->current_state, pipe_ctx);
2810 				}
2811 			}
2812 
2813 			if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
2814 				bool should_program_abm = true;
2815 
2816 				// if otg funcs defined check if blanked before programming
2817 				if (pipe_ctx->stream_res.tg->funcs->is_blanked)
2818 					if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
2819 						should_program_abm = false;
2820 
2821 				if (should_program_abm) {
2822 					if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
2823 						dc->hwss.set_abm_immediate_disable(pipe_ctx);
2824 					} else {
2825 						pipe_ctx->stream_res.abm->funcs->set_abm_level(
2826 							pipe_ctx->stream_res.abm, stream->abm_level);
2827 					}
2828 				}
2829 			}
2830 		}
2831 	}
2832 }
2833 
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)2834 static void commit_planes_for_stream(struct dc *dc,
2835 		struct dc_surface_update *srf_updates,
2836 		int surface_count,
2837 		struct dc_stream_state *stream,
2838 		struct dc_stream_update *stream_update,
2839 		enum surface_update_type update_type,
2840 		struct dc_state *context)
2841 {
2842 	int i, j;
2843 	struct pipe_ctx *top_pipe_to_program = NULL;
2844 	bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
2845 
2846 	dc_z10_restore(dc);
2847 
2848 	if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
2849 		/* Optimize seamless boot flag keeps clocks and watermarks high until
2850 		 * first flip. After first flip, optimization is required to lower
2851 		 * bandwidth. Important to note that it is expected UEFI will
2852 		 * only light up a single display on POST, therefore we only expect
2853 		 * one stream with seamless boot flag set.
2854 		 */
2855 		if (stream->apply_seamless_boot_optimization) {
2856 			stream->apply_seamless_boot_optimization = false;
2857 
2858 			if (get_seamless_boot_stream_count(context) == 0)
2859 				dc->optimized_required = true;
2860 		}
2861 	}
2862 
2863 	if (update_type == UPDATE_TYPE_FULL) {
2864 		dc_allow_idle_optimizations(dc, false);
2865 
2866 		if (get_seamless_boot_stream_count(context) == 0)
2867 			dc->hwss.prepare_bandwidth(dc, context);
2868 
2869 		context_clock_trace(dc, context);
2870 	}
2871 
2872 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
2873 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2874 
2875 		if (!pipe_ctx->top_pipe &&
2876 			!pipe_ctx->prev_odm_pipe &&
2877 			pipe_ctx->stream &&
2878 			pipe_ctx->stream == stream) {
2879 			top_pipe_to_program = pipe_ctx;
2880 		}
2881 	}
2882 
2883 	if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
2884 		struct pipe_ctx *mpcc_pipe;
2885 		struct pipe_ctx *odm_pipe;
2886 
2887 		for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
2888 			for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
2889 				odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
2890 	}
2891 
2892 	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
2893 		if (top_pipe_to_program &&
2894 			top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
2895 			if (should_use_dmub_lock(stream->link)) {
2896 				union dmub_hw_lock_flags hw_locks = { 0 };
2897 				struct dmub_hw_lock_inst_flags inst_flags = { 0 };
2898 
2899 				hw_locks.bits.lock_dig = 1;
2900 				inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
2901 
2902 				dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
2903 							true,
2904 							&hw_locks,
2905 							&inst_flags);
2906 			} else
2907 				top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
2908 						top_pipe_to_program->stream_res.tg);
2909 		}
2910 
2911 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
2912 		dc->hwss.interdependent_update_lock(dc, context, true);
2913 	} else {
2914 		/* Lock the top pipe while updating plane addrs, since freesync requires
2915 		 *  plane addr update event triggers to be synchronized.
2916 		 *  top_pipe_to_program is expected to never be NULL
2917 		 */
2918 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
2919 	}
2920 
2921 	// Stream updates
2922 	if (stream_update)
2923 		commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
2924 
2925 	if (surface_count == 0) {
2926 		/*
2927 		 * In case of turning off screen, no need to program front end a second time.
2928 		 * just return after program blank.
2929 		 */
2930 		if (dc->hwss.apply_ctx_for_surface)
2931 			dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
2932 		if (dc->hwss.program_front_end_for_ctx)
2933 			dc->hwss.program_front_end_for_ctx(dc, context);
2934 
2935 		if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
2936 			dc->hwss.interdependent_update_lock(dc, context, false);
2937 		} else {
2938 			dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
2939 		}
2940 		dc->hwss.post_unlock_program_front_end(dc, context);
2941 		return;
2942 	}
2943 
2944 	if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
2945 		for (i = 0; i < surface_count; i++) {
2946 			struct dc_plane_state *plane_state = srf_updates[i].surface;
2947 			/*set logical flag for lock/unlock use*/
2948 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
2949 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2950 				if (!pipe_ctx->plane_state)
2951 					continue;
2952 				if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
2953 					continue;
2954 				pipe_ctx->plane_state->triplebuffer_flips = false;
2955 				if (update_type == UPDATE_TYPE_FAST &&
2956 					dc->hwss.program_triplebuffer != NULL &&
2957 					!pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
2958 						/*triple buffer for VUpdate  only*/
2959 						pipe_ctx->plane_state->triplebuffer_flips = true;
2960 				}
2961 			}
2962 			if (update_type == UPDATE_TYPE_FULL) {
2963 				/* force vsync flip when reconfiguring pipes to prevent underflow */
2964 				plane_state->flip_immediate = false;
2965 			}
2966 		}
2967 	}
2968 
2969 	// Update Type FULL, Surface updates
2970 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
2971 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2972 
2973 		if (!pipe_ctx->top_pipe &&
2974 			!pipe_ctx->prev_odm_pipe &&
2975 			should_update_pipe_for_stream(context, pipe_ctx, stream)) {
2976 			struct dc_stream_status *stream_status = NULL;
2977 
2978 			if (!pipe_ctx->plane_state)
2979 				continue;
2980 
2981 			/* Full fe update*/
2982 			if (update_type == UPDATE_TYPE_FAST)
2983 				continue;
2984 
2985 			ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
2986 
2987 			if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
2988 				/*turn off triple buffer for full update*/
2989 				dc->hwss.program_triplebuffer(
2990 					dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
2991 			}
2992 			stream_status =
2993 				stream_get_status(context, pipe_ctx->stream);
2994 
2995 			if (dc->hwss.apply_ctx_for_surface)
2996 				dc->hwss.apply_ctx_for_surface(
2997 					dc, pipe_ctx->stream, stream_status->plane_count, context);
2998 		}
2999 	}
3000 	if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3001 		dc->hwss.program_front_end_for_ctx(dc, context);
3002 		if (dc->debug.validate_dml_output) {
3003 			for (i = 0; i < dc->res_pool->pipe_count; i++) {
3004 				struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3005 				if (cur_pipe->stream == NULL)
3006 					continue;
3007 
3008 				cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3009 						cur_pipe->plane_res.hubp, dc->ctx,
3010 						&context->res_ctx.pipe_ctx[i].rq_regs,
3011 						&context->res_ctx.pipe_ctx[i].dlg_regs,
3012 						&context->res_ctx.pipe_ctx[i].ttu_regs);
3013 			}
3014 		}
3015 	}
3016 
3017 	// Update Type FAST, Surface updates
3018 	if (update_type == UPDATE_TYPE_FAST) {
3019 		if (dc->hwss.set_flip_control_gsl)
3020 			for (i = 0; i < surface_count; i++) {
3021 				struct dc_plane_state *plane_state = srf_updates[i].surface;
3022 
3023 				for (j = 0; j < dc->res_pool->pipe_count; j++) {
3024 					struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3025 
3026 					if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3027 						continue;
3028 
3029 					if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3030 						continue;
3031 
3032 					// GSL has to be used for flip immediate
3033 					dc->hwss.set_flip_control_gsl(pipe_ctx,
3034 							pipe_ctx->plane_state->flip_immediate);
3035 				}
3036 			}
3037 
3038 		/* Perform requested Updates */
3039 		for (i = 0; i < surface_count; i++) {
3040 			struct dc_plane_state *plane_state = srf_updates[i].surface;
3041 
3042 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
3043 				struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3044 
3045 				if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3046 					continue;
3047 
3048 				if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3049 					continue;
3050 
3051 				/*program triple buffer after lock based on flip type*/
3052 				if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3053 					/*only enable triplebuffer for  fast_update*/
3054 					dc->hwss.program_triplebuffer(
3055 						dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3056 				}
3057 				if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3058 					dc->hwss.update_plane_addr(dc, pipe_ctx);
3059 			}
3060 		}
3061 
3062 	}
3063 
3064 	if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3065 		dc->hwss.interdependent_update_lock(dc, context, false);
3066 	} else {
3067 		dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3068 	}
3069 
3070 	if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3071 		if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3072 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3073 					top_pipe_to_program->stream_res.tg,
3074 					CRTC_STATE_VACTIVE);
3075 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3076 					top_pipe_to_program->stream_res.tg,
3077 					CRTC_STATE_VBLANK);
3078 			top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3079 					top_pipe_to_program->stream_res.tg,
3080 					CRTC_STATE_VACTIVE);
3081 
3082 			if (stream && should_use_dmub_lock(stream->link)) {
3083 				union dmub_hw_lock_flags hw_locks = { 0 };
3084 				struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3085 
3086 				hw_locks.bits.lock_dig = 1;
3087 				inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3088 
3089 				dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3090 							false,
3091 							&hw_locks,
3092 							&inst_flags);
3093 			} else
3094 				top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3095 					top_pipe_to_program->stream_res.tg);
3096 		}
3097 
3098 	if (update_type != UPDATE_TYPE_FAST)
3099 		dc->hwss.post_unlock_program_front_end(dc, context);
3100 
3101 	// Fire manual trigger only when bottom plane is flipped
3102 	for (j = 0; j < dc->res_pool->pipe_count; j++) {
3103 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3104 
3105 		if (!pipe_ctx->plane_state)
3106 			continue;
3107 
3108 		if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3109 				!pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3110 				!pipe_ctx->plane_state->update_flags.bits.addr_update ||
3111 				pipe_ctx->plane_state->skip_manual_trigger)
3112 			continue;
3113 
3114 		if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3115 			pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3116 	}
3117 }
3118 
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)3119 void dc_commit_updates_for_stream(struct dc *dc,
3120 		struct dc_surface_update *srf_updates,
3121 		int surface_count,
3122 		struct dc_stream_state *stream,
3123 		struct dc_stream_update *stream_update,
3124 		struct dc_state *state)
3125 {
3126 	const struct dc_stream_status *stream_status;
3127 	enum surface_update_type update_type;
3128 	struct dc_state *context;
3129 	struct dc_context *dc_ctx = dc->ctx;
3130 	int i, j;
3131 
3132 	stream_status = dc_stream_get_status(stream);
3133 	context = dc->current_state;
3134 
3135 	update_type = dc_check_update_surfaces_for_stream(
3136 				dc, srf_updates, surface_count, stream_update, stream_status);
3137 
3138 	if (update_type >= update_surface_trace_level)
3139 		update_surface_trace(dc, srf_updates, surface_count);
3140 
3141 
3142 	if (update_type >= UPDATE_TYPE_FULL) {
3143 
3144 		/* initialize scratch memory for building context */
3145 		context = dc_create_state(dc);
3146 		if (context == NULL) {
3147 			DC_ERROR("Failed to allocate new validate context!\n");
3148 			return;
3149 		}
3150 
3151 		dc_resource_state_copy_construct(state, context);
3152 
3153 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3154 			struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3155 			struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3156 
3157 			if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3158 				new_pipe->plane_state->force_full_update = true;
3159 		}
3160 	} else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3161 		/*
3162 		 * Previous frame finished and HW is ready for optimization.
3163 		 *
3164 		 * Only relevant for DCN behavior where we can guarantee the optimization
3165 		 * is safe to apply - retain the legacy behavior for DCE.
3166 		 */
3167 		dc_post_update_surfaces_to_stream(dc);
3168 	}
3169 
3170 
3171 	for (i = 0; i < surface_count; i++) {
3172 		struct dc_plane_state *surface = srf_updates[i].surface;
3173 
3174 		copy_surface_update_to_plane(surface, &srf_updates[i]);
3175 
3176 		if (update_type >= UPDATE_TYPE_MED) {
3177 			for (j = 0; j < dc->res_pool->pipe_count; j++) {
3178 				struct pipe_ctx *pipe_ctx =
3179 					&context->res_ctx.pipe_ctx[j];
3180 
3181 				if (pipe_ctx->plane_state != surface)
3182 					continue;
3183 
3184 				resource_build_scaling_params(pipe_ctx);
3185 			}
3186 		}
3187 	}
3188 
3189 	copy_stream_update_to_stream(dc, context, stream, stream_update);
3190 
3191 	if (update_type >= UPDATE_TYPE_FULL) {
3192 		if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3193 			DC_ERROR("Mode validation failed for stream update!\n");
3194 			dc_release_state(context);
3195 			return;
3196 		}
3197 	}
3198 
3199 	TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3200 
3201 	commit_planes_for_stream(
3202 				dc,
3203 				srf_updates,
3204 				surface_count,
3205 				stream,
3206 				stream_update,
3207 				update_type,
3208 				context);
3209 	/*update current_State*/
3210 	if (dc->current_state != context) {
3211 
3212 		struct dc_state *old = dc->current_state;
3213 
3214 		dc->current_state = context;
3215 		dc_release_state(old);
3216 
3217 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
3218 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3219 
3220 			if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3221 				pipe_ctx->plane_state->force_full_update = false;
3222 		}
3223 	}
3224 
3225 	/* Legacy optimization path for DCE. */
3226 	if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3227 		dc_post_update_surfaces_to_stream(dc);
3228 		TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3229 	}
3230 
3231 	return;
3232 
3233 }
3234 
dc_get_current_stream_count(struct dc * dc)3235 uint8_t dc_get_current_stream_count(struct dc *dc)
3236 {
3237 	return dc->current_state->stream_count;
3238 }
3239 
dc_get_stream_at_index(struct dc * dc,uint8_t i)3240 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3241 {
3242 	if (i < dc->current_state->stream_count)
3243 		return dc->current_state->streams[i];
3244 	return NULL;
3245 }
3246 
dc_stream_find_from_link(const struct dc_link * link)3247 struct dc_stream_state *dc_stream_find_from_link(const struct dc_link *link)
3248 {
3249 	uint8_t i;
3250 	struct dc_context *ctx = link->ctx;
3251 
3252 	for (i = 0; i < ctx->dc->current_state->stream_count; i++) {
3253 		if (ctx->dc->current_state->streams[i]->link == link)
3254 			return ctx->dc->current_state->streams[i];
3255 	}
3256 
3257 	return NULL;
3258 }
3259 
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)3260 enum dc_irq_source dc_interrupt_to_irq_source(
3261 		struct dc *dc,
3262 		uint32_t src_id,
3263 		uint32_t ext_id)
3264 {
3265 	return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3266 }
3267 
3268 /*
3269  * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3270  */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)3271 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3272 {
3273 
3274 	if (dc == NULL)
3275 		return false;
3276 
3277 	return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3278 }
3279 
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)3280 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3281 {
3282 	dal_irq_service_ack(dc->res_pool->irqs, src);
3283 }
3284 
dc_power_down_on_boot(struct dc * dc)3285 void dc_power_down_on_boot(struct dc *dc)
3286 {
3287 	if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3288 			dc->hwss.power_down_on_boot)
3289 		dc->hwss.power_down_on_boot(dc);
3290 }
3291 
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)3292 void dc_set_power_state(
3293 	struct dc *dc,
3294 	enum dc_acpi_cm_power_state power_state)
3295 {
3296 	struct kref refcount;
3297 	struct display_mode_lib *dml;
3298 
3299 	if (!dc->current_state)
3300 		return;
3301 
3302 	switch (power_state) {
3303 	case DC_ACPI_CM_POWER_STATE_D0:
3304 		dc_resource_state_construct(dc, dc->current_state);
3305 
3306 		dc_z10_restore(dc);
3307 
3308 		if (dc->ctx->dmub_srv)
3309 			dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3310 
3311 		dc->hwss.init_hw(dc);
3312 
3313 		if (dc->hwss.init_sys_ctx != NULL &&
3314 			dc->vm_pa_config.valid) {
3315 			dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3316 		}
3317 
3318 		break;
3319 	default:
3320 		ASSERT(dc->current_state->stream_count == 0);
3321 		/* Zero out the current context so that on resume we start with
3322 		 * clean state, and dc hw programming optimizations will not
3323 		 * cause any trouble.
3324 		 */
3325 		dml = kzalloc(sizeof(struct display_mode_lib),
3326 				GFP_KERNEL);
3327 
3328 		ASSERT(dml);
3329 		if (!dml)
3330 			return;
3331 
3332 		/* Preserve refcount */
3333 		refcount = dc->current_state->refcount;
3334 		/* Preserve display mode lib */
3335 		memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
3336 
3337 		dc_resource_state_destruct(dc->current_state);
3338 		memset(dc->current_state, 0,
3339 				sizeof(*dc->current_state));
3340 
3341 		dc->current_state->refcount = refcount;
3342 		dc->current_state->bw_ctx.dml = *dml;
3343 
3344 		kfree(dml);
3345 
3346 		break;
3347 	}
3348 }
3349 
dc_resume(struct dc * dc)3350 void dc_resume(struct dc *dc)
3351 {
3352 	uint32_t i;
3353 
3354 	for (i = 0; i < dc->link_count; i++)
3355 		core_link_resume(dc->links[i]);
3356 }
3357 
dc_is_dmcu_initialized(struct dc * dc)3358 bool dc_is_dmcu_initialized(struct dc *dc)
3359 {
3360 	struct dmcu *dmcu = dc->res_pool->dmcu;
3361 
3362 	if (dmcu)
3363 		return dmcu->funcs->is_dmcu_initialized(dmcu);
3364 	return false;
3365 }
3366 
dc_is_oem_i2c_device_present(struct dc * dc,size_t slave_address)3367 bool dc_is_oem_i2c_device_present(
3368 	struct dc *dc,
3369 	size_t slave_address)
3370 {
3371 	if (dc->res_pool->oem_device)
3372 		return dce_i2c_oem_device_present(
3373 			dc->res_pool,
3374 			dc->res_pool->oem_device,
3375 			slave_address);
3376 
3377 	return false;
3378 }
3379 
dc_submit_i2c(struct dc * dc,uint32_t link_index,struct i2c_command * cmd)3380 bool dc_submit_i2c(
3381 		struct dc *dc,
3382 		uint32_t link_index,
3383 		struct i2c_command *cmd)
3384 {
3385 
3386 	struct dc_link *link = dc->links[link_index];
3387 	struct ddc_service *ddc = link->ddc;
3388 	return dce_i2c_submit_command(
3389 		dc->res_pool,
3390 		ddc->ddc_pin,
3391 		cmd);
3392 }
3393 
dc_submit_i2c_oem(struct dc * dc,struct i2c_command * cmd)3394 bool dc_submit_i2c_oem(
3395 		struct dc *dc,
3396 		struct i2c_command *cmd)
3397 {
3398 	struct ddc_service *ddc = dc->res_pool->oem_device;
3399 	return dce_i2c_submit_command(
3400 		dc->res_pool,
3401 		ddc->ddc_pin,
3402 		cmd);
3403 }
3404 
link_add_remote_sink_helper(struct dc_link * dc_link,struct dc_sink * sink)3405 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
3406 {
3407 	if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
3408 		BREAK_TO_DEBUGGER();
3409 		return false;
3410 	}
3411 
3412 	dc_sink_retain(sink);
3413 
3414 	dc_link->remote_sinks[dc_link->sink_count] = sink;
3415 	dc_link->sink_count++;
3416 
3417 	return true;
3418 }
3419 
3420 /*
3421  * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
3422  *
3423  * EDID length is in bytes
3424  */
dc_link_add_remote_sink(struct dc_link * link,const uint8_t * edid,int len,struct dc_sink_init_data * init_data)3425 struct dc_sink *dc_link_add_remote_sink(
3426 		struct dc_link *link,
3427 		const uint8_t *edid,
3428 		int len,
3429 		struct dc_sink_init_data *init_data)
3430 {
3431 	struct dc_sink *dc_sink;
3432 	enum dc_edid_status edid_status;
3433 
3434 	if (len > DC_MAX_EDID_BUFFER_SIZE) {
3435 		dm_error("Max EDID buffer size breached!\n");
3436 		return NULL;
3437 	}
3438 
3439 	if (!init_data) {
3440 		BREAK_TO_DEBUGGER();
3441 		return NULL;
3442 	}
3443 
3444 	if (!init_data->link) {
3445 		BREAK_TO_DEBUGGER();
3446 		return NULL;
3447 	}
3448 
3449 	dc_sink = dc_sink_create(init_data);
3450 
3451 	if (!dc_sink)
3452 		return NULL;
3453 
3454 	memmove(dc_sink->dc_edid.raw_edid, edid, len);
3455 	dc_sink->dc_edid.length = len;
3456 
3457 	if (!link_add_remote_sink_helper(
3458 			link,
3459 			dc_sink))
3460 		goto fail_add_sink;
3461 
3462 	edid_status = dm_helpers_parse_edid_caps(
3463 			link,
3464 			&dc_sink->dc_edid,
3465 			&dc_sink->edid_caps);
3466 
3467 	/*
3468 	 * Treat device as no EDID device if EDID
3469 	 * parsing fails
3470 	 */
3471 	if (edid_status != EDID_OK) {
3472 		dc_sink->dc_edid.length = 0;
3473 		dm_error("Bad EDID, status%d!\n", edid_status);
3474 	}
3475 
3476 	return dc_sink;
3477 
3478 fail_add_sink:
3479 	dc_sink_release(dc_sink);
3480 	return NULL;
3481 }
3482 
3483 /*
3484  * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
3485  *
3486  * Note that this just removes the struct dc_sink - it doesn't
3487  * program hardware or alter other members of dc_link
3488  */
dc_link_remove_remote_sink(struct dc_link * link,struct dc_sink * sink)3489 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
3490 {
3491 	int i;
3492 
3493 	if (!link->sink_count) {
3494 		BREAK_TO_DEBUGGER();
3495 		return;
3496 	}
3497 
3498 	for (i = 0; i < link->sink_count; i++) {
3499 		if (link->remote_sinks[i] == sink) {
3500 			dc_sink_release(sink);
3501 			link->remote_sinks[i] = NULL;
3502 
3503 			/* shrink array to remove empty place */
3504 			while (i < link->sink_count - 1) {
3505 				link->remote_sinks[i] = link->remote_sinks[i+1];
3506 				i++;
3507 			}
3508 			link->remote_sinks[i] = NULL;
3509 			link->sink_count--;
3510 			return;
3511 		}
3512 	}
3513 }
3514 
get_clock_requirements_for_state(struct dc_state * state,struct AsicStateEx * info)3515 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
3516 {
3517 	info->displayClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
3518 	info->engineClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
3519 	info->memoryClock				= (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
3520 	info->maxSupportedDppClock		= (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
3521 	info->dppClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
3522 	info->socClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
3523 	info->dcfClockDeepSleep			= (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
3524 	info->fClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
3525 	info->phyClock					= (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
3526 }
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3527 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
3528 {
3529 	if (dc->hwss.set_clock)
3530 		return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
3531 	return DC_ERROR_UNEXPECTED;
3532 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3533 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
3534 {
3535 	if (dc->hwss.get_clock)
3536 		dc->hwss.get_clock(dc, clock_type, clock_cfg);
3537 }
3538 
3539 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)3540 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
3541 {
3542 	int i;
3543 	bool allow_active;
3544 
3545 	for (i = 0; i < dc->current_state->stream_count ; i++) {
3546 		struct dc_link *link;
3547 		struct dc_stream_state *stream = dc->current_state->streams[i];
3548 
3549 		link = stream->link;
3550 		if (!link)
3551 			continue;
3552 
3553 		if (link->psr_settings.psr_feature_enabled) {
3554 			if (enable && !link->psr_settings.psr_allow_active) {
3555 				allow_active = true;
3556 				if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
3557 					return false;
3558 			} else if (!enable && link->psr_settings.psr_allow_active) {
3559 				allow_active = false;
3560 				if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
3561 					return false;
3562 			}
3563 		}
3564 	}
3565 
3566 	return true;
3567 }
3568 
dc_allow_idle_optimizations(struct dc * dc,bool allow)3569 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
3570 {
3571 	if (dc->debug.disable_idle_power_optimizations)
3572 		return;
3573 
3574 	if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
3575 		if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
3576 			return;
3577 
3578 	if (allow == dc->idle_optimizations_allowed)
3579 		return;
3580 
3581 	if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
3582 		dc->idle_optimizations_allowed = allow;
3583 }
3584 
3585 /*
3586  * blank all streams, and set min and max memory clock to
3587  * lowest and highest DPM level, respectively
3588  */
dc_unlock_memory_clock_frequency(struct dc * dc)3589 void dc_unlock_memory_clock_frequency(struct dc *dc)
3590 {
3591 	unsigned int i;
3592 
3593 	for (i = 0; i < MAX_PIPES; i++)
3594 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3595 			core_link_disable_stream(&dc->current_state->res_ctx.pipe_ctx[i]);
3596 
3597 	dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
3598 	dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3599 }
3600 
3601 /*
3602  * set min memory clock to the min required for current mode,
3603  * max to maxDPM, and unblank streams
3604  */
dc_lock_memory_clock_frequency(struct dc * dc)3605 void dc_lock_memory_clock_frequency(struct dc *dc)
3606 {
3607 	unsigned int i;
3608 
3609 	dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
3610 	dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
3611 	dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
3612 
3613 	for (i = 0; i < MAX_PIPES; i++)
3614 		if (dc->current_state->res_ctx.pipe_ctx[i].plane_state)
3615 			core_link_enable_stream(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
3616 }
3617 
blank_and_force_memclk(struct dc * dc,bool apply,unsigned int memclk_mhz)3618 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
3619 {
3620 	struct dc_state *context = dc->current_state;
3621 	struct hubp *hubp;
3622 	struct pipe_ctx *pipe;
3623 	int i;
3624 
3625 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3626 		pipe = &context->res_ctx.pipe_ctx[i];
3627 
3628 		if (pipe->stream != NULL) {
3629 			dc->hwss.disable_pixel_data(dc, pipe, true);
3630 
3631 			// wait for double buffer
3632 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3633 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
3634 			pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
3635 
3636 			hubp = pipe->plane_res.hubp;
3637 			hubp->funcs->set_blank_regs(hubp, true);
3638 		}
3639 	}
3640 
3641 	dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
3642 	dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
3643 
3644 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
3645 		pipe = &context->res_ctx.pipe_ctx[i];
3646 
3647 		if (pipe->stream != NULL) {
3648 			dc->hwss.disable_pixel_data(dc, pipe, false);
3649 
3650 			hubp = pipe->plane_res.hubp;
3651 			hubp->funcs->set_blank_regs(hubp, false);
3652 		}
3653 	}
3654 }
3655 
3656 
3657 /**
3658  * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
3659  * @dc: pointer to dc of the dm calling this
3660  * @enable: True = transition to DC mode, false = transition back to AC mode
3661  *
3662  * Some SoCs define additional clock limits when in DC mode, DM should
3663  * invoke this function when the platform undergoes a power source transition
3664  * so DC can apply/unapply the limit. This interface may be disruptive to
3665  * the onscreen content.
3666  *
3667  * Context: Triggered by OS through DM interface, or manually by escape calls.
3668  * Need to hold a dclock when doing so.
3669  *
3670  * Return: none (void function)
3671  *
3672  */
dc_enable_dcmode_clk_limit(struct dc * dc,bool enable)3673 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
3674 {
3675 	uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
3676 	unsigned int softMax, maxDPM, funcMin;
3677 	bool p_state_change_support;
3678 
3679 	if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
3680 		return;
3681 
3682 	softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
3683 	maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
3684 	funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
3685 	p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
3686 
3687 	if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
3688 		if (p_state_change_support) {
3689 			if (funcMin <= softMax)
3690 				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
3691 			// else: No-Op
3692 		} else {
3693 			if (funcMin <= softMax)
3694 				blank_and_force_memclk(dc, true, softMax);
3695 			// else: No-Op
3696 		}
3697 	} else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
3698 		if (p_state_change_support) {
3699 			if (funcMin <= softMax)
3700 				dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
3701 			// else: No-Op
3702 		} else {
3703 			if (funcMin <= softMax)
3704 				blank_and_force_memclk(dc, true, maxDPM);
3705 			// else: No-Op
3706 		}
3707 	}
3708 	dc->clk_mgr->dc_mode_softmax_enabled = enable;
3709 }
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,struct dc_plane_state * plane,struct dc_cursor_attributes * cursor_attr)3710 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
3711 		struct dc_cursor_attributes *cursor_attr)
3712 {
3713 	if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
3714 		return true;
3715 	return false;
3716 }
3717 
3718 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)3719 void dc_hardware_release(struct dc *dc)
3720 {
3721 	if (dc->hwss.hardware_release)
3722 		dc->hwss.hardware_release(dc);
3723 }
3724 
3725 /*
3726  *****************************************************************************
3727  * Function: dc_is_dmub_outbox_supported -
3728  *
3729  * @brief
3730  *      Checks whether DMUB FW supports outbox notifications, if supported
3731  *		DM should register outbox interrupt prior to actually enabling interrupts
3732  *		via dc_enable_dmub_outbox
3733  *
3734  *  @param
3735  *		[in] dc: dc structure
3736  *
3737  *  @return
3738  *		True if DMUB FW supports outbox notifications, False otherwise
3739  *****************************************************************************
3740  */
dc_is_dmub_outbox_supported(struct dc * dc)3741 bool dc_is_dmub_outbox_supported(struct dc *dc)
3742 {
3743 	/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
3744 	if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
3745 	    dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
3746 	    !dc->debug.dpia_debug.bits.disable_dpia)
3747 		return true;
3748 
3749 	/* dmub aux needs dmub notifications to be enabled */
3750 	return dc->debug.enable_dmub_aux_for_legacy_ddc;
3751 }
3752 
3753 /*
3754  *****************************************************************************
3755  *  Function: dc_enable_dmub_notifications
3756  *
3757  *  @brief
3758  *		Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
3759  *		notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
3760  *		This API shall be removed after switching.
3761  *
3762  *  @param
3763  *		[in] dc: dc structure
3764  *
3765  *  @return
3766  *		True if DMUB FW supports outbox notifications, False otherwise
3767  *****************************************************************************
3768  */
dc_enable_dmub_notifications(struct dc * dc)3769 bool dc_enable_dmub_notifications(struct dc *dc)
3770 {
3771 	return dc_is_dmub_outbox_supported(dc);
3772 }
3773 
3774 /**
3775  *****************************************************************************
3776  *  Function: dc_enable_dmub_outbox
3777  *
3778  *  @brief
3779  *		Enables DMUB unsolicited notifications to x86 via outbox
3780  *
3781  *  @param
3782  *		[in] dc: dc structure
3783  *
3784  *  @return
3785  *		None
3786  *****************************************************************************
3787  */
dc_enable_dmub_outbox(struct dc * dc)3788 void dc_enable_dmub_outbox(struct dc *dc)
3789 {
3790 	struct dc_context *dc_ctx = dc->ctx;
3791 
3792 	dmub_enable_outbox_notification(dc_ctx->dmub_srv);
3793 	DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
3794 }
3795 
3796 /**
3797  * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
3798  *                                      Sets port index appropriately for legacy DDC
3799  * @dc: dc structure
3800  * @link_index: link index
3801  * @payload: aux payload
3802  *
3803  * Returns: True if successful, False if failure
3804  */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)3805 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
3806 				uint32_t link_index,
3807 				struct aux_payload *payload)
3808 {
3809 	uint8_t action;
3810 	union dmub_rb_cmd cmd = {0};
3811 	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3812 
3813 	ASSERT(payload->length <= 16);
3814 
3815 	cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
3816 	cmd.dp_aux_access.header.payload_bytes = 0;
3817 	/* For dpia, ddc_pin is set to NULL */
3818 	if (!dc->links[link_index]->ddc->ddc_pin)
3819 		cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
3820 	else
3821 		cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
3822 
3823 	cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
3824 	cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
3825 	cmd.dp_aux_access.aux_control.timeout = 0;
3826 	cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
3827 	cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
3828 	cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
3829 
3830 	/* set aux action */
3831 	if (payload->i2c_over_aux) {
3832 		if (payload->write) {
3833 			if (payload->mot)
3834 				action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
3835 			else
3836 				action = DP_AUX_REQ_ACTION_I2C_WRITE;
3837 		} else {
3838 			if (payload->mot)
3839 				action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
3840 			else
3841 				action = DP_AUX_REQ_ACTION_I2C_READ;
3842 			}
3843 	} else {
3844 		if (payload->write)
3845 			action = DP_AUX_REQ_ACTION_DPCD_WRITE;
3846 		else
3847 			action = DP_AUX_REQ_ACTION_DPCD_READ;
3848 	}
3849 
3850 	cmd.dp_aux_access.aux_control.dpaux.action = action;
3851 
3852 	if (payload->length && payload->write) {
3853 		memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
3854 			payload->data,
3855 			payload->length
3856 			);
3857 	}
3858 
3859 	dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
3860 	dc_dmub_srv_cmd_execute(dmub_srv);
3861 	dc_dmub_srv_wait_idle(dmub_srv);
3862 
3863 	return true;
3864 }
3865 
get_link_index_from_dpia_port_index(const struct dc * dc,uint8_t dpia_port_index)3866 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
3867 					    uint8_t dpia_port_index)
3868 {
3869 	uint8_t index, link_index = 0xFF;
3870 
3871 	for (index = 0; index < dc->link_count; index++) {
3872 		/* ddc_hw_inst has dpia port index for dpia links
3873 		 * and ddc instance for legacy links
3874 		 */
3875 		if (!dc->links[index]->ddc->ddc_pin) {
3876 			if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
3877 				link_index = index;
3878 				break;
3879 			}
3880 		}
3881 	}
3882 	ASSERT(link_index != 0xFF);
3883 	return link_index;
3884 }
3885 
3886 /**
3887  *****************************************************************************
3888  *  Function: dc_process_dmub_set_config_async
3889  *
3890  *  @brief
3891  *		Submits set_config command to dmub via inbox message
3892  *
3893  *  @param
3894  *		[in] dc: dc structure
3895  *		[in] link_index: link index
3896  *		[in] payload: aux payload
3897  *		[out] notify: set_config immediate reply
3898  *
3899  *  @return
3900  *		True if successful, False if failure
3901  *****************************************************************************
3902  */
dc_process_dmub_set_config_async(struct dc * dc,uint32_t link_index,struct set_config_cmd_payload * payload,struct dmub_notification * notify)3903 bool dc_process_dmub_set_config_async(struct dc *dc,
3904 				uint32_t link_index,
3905 				struct set_config_cmd_payload *payload,
3906 				struct dmub_notification *notify)
3907 {
3908 	union dmub_rb_cmd cmd = {0};
3909 	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3910 	bool is_cmd_complete = true;
3911 
3912 	/* prepare SET_CONFIG command */
3913 	cmd.set_config_access.header.type = DMUB_CMD__DPIA;
3914 	cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
3915 
3916 	cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
3917 	cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
3918 	cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
3919 
3920 	if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
3921 		/* command is not processed by dmub */
3922 		notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
3923 		return is_cmd_complete;
3924 	}
3925 
3926 	/* command processed by dmub, if ret_status is 1, it is completed instantly */
3927 	if (cmd.set_config_access.header.ret_status == 1)
3928 		notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
3929 	else
3930 		/* cmd pending, will receive notification via outbox */
3931 		is_cmd_complete = false;
3932 
3933 	return is_cmd_complete;
3934 }
3935 
3936 /**
3937  *****************************************************************************
3938  *  Function: dc_process_dmub_set_mst_slots
3939  *
3940  *  @brief
3941  *		Submits mst slot allocation command to dmub via inbox message
3942  *
3943  *  @param
3944  *		[in] dc: dc structure
3945  *		[in] link_index: link index
3946  *		[in] mst_alloc_slots: mst slots to be allotted
3947  *		[out] mst_slots_in_use: mst slots in use returned in failure case
3948  *
3949  *	@return
3950  *		DC_OK if successful, DC_ERROR if failure
3951  *****************************************************************************
3952  */
dc_process_dmub_set_mst_slots(const struct dc * dc,uint32_t link_index,uint8_t mst_alloc_slots,uint8_t * mst_slots_in_use)3953 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
3954 				uint32_t link_index,
3955 				uint8_t mst_alloc_slots,
3956 				uint8_t *mst_slots_in_use)
3957 {
3958 	union dmub_rb_cmd cmd = {0};
3959 	struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
3960 
3961 	/* prepare MST_ALLOC_SLOTS command */
3962 	cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
3963 	cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
3964 
3965 	cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
3966 	cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
3967 
3968 	if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
3969 		/* command is not processed by dmub */
3970 		return DC_ERROR_UNEXPECTED;
3971 
3972 	/* command processed by dmub, if ret_status is 1 */
3973 	if (cmd.set_config_access.header.ret_status != 1)
3974 		/* command processing error */
3975 		return DC_ERROR_UNEXPECTED;
3976 
3977 	/* command processed and we have a status of 2, mst not enabled in dpia */
3978 	if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
3979 		return DC_FAIL_UNSUPPORTED_1;
3980 
3981 	/* previously configured mst alloc and used slots did not match */
3982 	if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
3983 		*mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
3984 		return DC_NOT_SUPPORTED;
3985 	}
3986 
3987 	return DC_OK;
3988 }
3989 
3990 /**
3991  * dc_disable_accelerated_mode - disable accelerated mode
3992  * @dc: dc structure
3993  */
dc_disable_accelerated_mode(struct dc * dc)3994 void dc_disable_accelerated_mode(struct dc *dc)
3995 {
3996 	bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
3997 }
3998 
3999 
4000 /**
4001  *****************************************************************************
4002  *  dc_notify_vsync_int_state() - notifies vsync enable/disable state
4003  *  @dc: dc structure
4004  *	@stream: stream where vsync int state changed
4005  *	@enable: whether vsync is enabled or disabled
4006  *
4007  *  Called when vsync is enabled/disabled
4008  *	Will notify DMUB to start/stop ABM interrupts after steady state is reached
4009  *
4010  *****************************************************************************
4011  */
dc_notify_vsync_int_state(struct dc * dc,struct dc_stream_state * stream,bool enable)4012 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4013 {
4014 	int i;
4015 	int edp_num;
4016 	struct pipe_ctx *pipe = NULL;
4017 	struct dc_link *link = stream->sink->link;
4018 	struct dc_link *edp_links[MAX_NUM_EDP];
4019 
4020 
4021 	if (link->psr_settings.psr_feature_enabled)
4022 		return;
4023 
4024 	/*find primary pipe associated with stream*/
4025 	for (i = 0; i < MAX_PIPES; i++) {
4026 		pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4027 
4028 		if (pipe->stream == stream && pipe->stream_res.tg)
4029 			break;
4030 	}
4031 
4032 	if (i == MAX_PIPES) {
4033 		ASSERT(0);
4034 		return;
4035 	}
4036 
4037 	get_edp_links(dc, edp_links, &edp_num);
4038 
4039 	/* Determine panel inst */
4040 	for (i = 0; i < edp_num; i++) {
4041 		if (edp_links[i] == link)
4042 			break;
4043 	}
4044 
4045 	if (i == edp_num) {
4046 		return;
4047 	}
4048 
4049 	if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4050 		pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4051 }
4052 /*
4053  * dc_extended_blank_supported: Decide whether extended blank is supported
4054  *
4055  * Extended blank is a freesync optimization feature to be enabled in the future.
4056  * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
4057  *
4058  * @param [in] dc: Current DC state
4059  * @return: Indicate whether extended blank is supported (true or false)
4060  */
dc_extended_blank_supported(struct dc * dc)4061 bool dc_extended_blank_supported(struct dc *dc)
4062 {
4063 	return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4064 		&& dc->caps.zstate_support && dc->caps.is_apu;
4065 }
4066