1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 */
24
25 #include "dm_services.h"
26
27 #include "dc.h"
28
29 #include "core_status.h"
30 #include "core_types.h"
31 #include "hw_sequencer.h"
32 #include "dce/dce_hwseq.h"
33
34 #include "resource.h"
35
36 #include "clk_mgr.h"
37 #include "clock_source.h"
38 #include "dc_bios_types.h"
39
40 #include "bios_parser_interface.h"
41 #include "bios/bios_parser_helper.h"
42 #include "include/irq_service_interface.h"
43 #include "transform.h"
44 #include "dmcu.h"
45 #include "dpp.h"
46 #include "timing_generator.h"
47 #include "abm.h"
48 #include "virtual/virtual_link_encoder.h"
49 #include "hubp.h"
50
51 #include "link_hwss.h"
52 #include "link_encoder.h"
53 #include "link_enc_cfg.h"
54
55 #include "dc_link.h"
56 #include "dc_link_ddc.h"
57 #include "dm_helpers.h"
58 #include "mem_input.h"
59
60 #include "dc_link_dp.h"
61 #include "dc_dmub_srv.h"
62
63 #include "dsc.h"
64
65 #include "vm_helper.h"
66
67 #include "dce/dce_i2c.h"
68
69 #include "dmub/dmub_srv.h"
70
71 #include "i2caux_interface.h"
72
73 #include "dce/dmub_psr.h"
74
75 #include "dce/dmub_hw_lock_mgr.h"
76
77 #include "dc_trace.h"
78
79 #include "dce/dmub_outbox.h"
80
81 #define CTX \
82 dc->ctx
83
84 #define DC_LOGGER \
85 dc->ctx->logger
86
87 static const char DC_BUILD_ID[] = "production-build";
88
89 /**
90 * DOC: Overview
91 *
92 * DC is the OS-agnostic component of the amdgpu DC driver.
93 *
94 * DC maintains and validates a set of structs representing the state of the
95 * driver and writes that state to AMD hardware
96 *
97 * Main DC HW structs:
98 *
99 * struct dc - The central struct. One per driver. Created on driver load,
100 * destroyed on driver unload.
101 *
102 * struct dc_context - One per driver.
103 * Used as a backpointer by most other structs in dc.
104 *
105 * struct dc_link - One per connector (the physical DP, HDMI, miniDP, or eDP
106 * plugpoints). Created on driver load, destroyed on driver unload.
107 *
108 * struct dc_sink - One per display. Created on boot or hotplug.
109 * Destroyed on shutdown or hotunplug. A dc_link can have a local sink
110 * (the display directly attached). It may also have one or more remote
111 * sinks (in the Multi-Stream Transport case)
112 *
113 * struct resource_pool - One per driver. Represents the hw blocks not in the
114 * main pipeline. Not directly accessible by dm.
115 *
116 * Main dc state structs:
117 *
118 * These structs can be created and destroyed as needed. There is a full set of
119 * these structs in dc->current_state representing the currently programmed state.
120 *
121 * struct dc_state - The global DC state to track global state information,
122 * such as bandwidth values.
123 *
124 * struct dc_stream_state - Represents the hw configuration for the pipeline from
125 * a framebuffer to a display. Maps one-to-one with dc_sink.
126 *
127 * struct dc_plane_state - Represents a framebuffer. Each stream has at least one,
128 * and may have more in the Multi-Plane Overlay case.
129 *
130 * struct resource_context - Represents the programmable state of everything in
131 * the resource_pool. Not directly accessible by dm.
132 *
133 * struct pipe_ctx - A member of struct resource_context. Represents the
134 * internal hardware pipeline components. Each dc_plane_state has either
135 * one or two (in the pipe-split case).
136 */
137
138 /*******************************************************************************
139 * Private functions
140 ******************************************************************************/
141
elevate_update_type(enum surface_update_type * original,enum surface_update_type new)142 static inline void elevate_update_type(enum surface_update_type *original, enum surface_update_type new)
143 {
144 if (new > *original)
145 *original = new;
146 }
147
destroy_links(struct dc * dc)148 static void destroy_links(struct dc *dc)
149 {
150 uint32_t i;
151
152 for (i = 0; i < dc->link_count; i++) {
153 if (NULL != dc->links[i])
154 link_destroy(&dc->links[i]);
155 }
156 }
157
get_num_of_internal_disp(struct dc_link ** links,uint32_t num_links)158 static uint32_t get_num_of_internal_disp(struct dc_link **links, uint32_t num_links)
159 {
160 int i;
161 uint32_t count = 0;
162
163 for (i = 0; i < num_links; i++) {
164 if (links[i]->connector_signal == SIGNAL_TYPE_EDP ||
165 links[i]->is_internal_display)
166 count++;
167 }
168
169 return count;
170 }
171
get_seamless_boot_stream_count(struct dc_state * ctx)172 static int get_seamless_boot_stream_count(struct dc_state *ctx)
173 {
174 uint8_t i;
175 uint8_t seamless_boot_stream_count = 0;
176
177 for (i = 0; i < ctx->stream_count; i++)
178 if (ctx->streams[i]->apply_seamless_boot_optimization)
179 seamless_boot_stream_count++;
180
181 return seamless_boot_stream_count;
182 }
183
create_links(struct dc * dc,uint32_t num_virtual_links)184 static bool create_links(
185 struct dc *dc,
186 uint32_t num_virtual_links)
187 {
188 int i;
189 int connectors_num;
190 struct dc_bios *bios = dc->ctx->dc_bios;
191
192 dc->link_count = 0;
193
194 connectors_num = bios->funcs->get_connectors_number(bios);
195
196 DC_LOG_DC("BIOS object table - number of connectors: %d", connectors_num);
197
198 if (connectors_num > ENUM_ID_COUNT) {
199 dm_error(
200 "DC: Number of connectors %d exceeds maximum of %d!\n",
201 connectors_num,
202 ENUM_ID_COUNT);
203 return false;
204 }
205
206 dm_output_to_console(
207 "DC: %s: connectors_num: physical:%d, virtual:%d\n",
208 __func__,
209 connectors_num,
210 num_virtual_links);
211
212 for (i = 0; i < connectors_num; i++) {
213 struct link_init_data link_init_params = {0};
214 struct dc_link *link;
215
216 DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
217
218 link_init_params.ctx = dc->ctx;
219 /* next BIOS object table connector */
220 link_init_params.connector_index = i;
221 link_init_params.link_index = dc->link_count;
222 link_init_params.dc = dc;
223 link = link_create(&link_init_params);
224
225 if (link) {
226 dc->links[dc->link_count] = link;
227 link->dc = dc;
228 ++dc->link_count;
229 }
230 }
231
232 DC_LOG_DC("BIOS object table - end");
233
234 /* Create a link for each usb4 dpia port */
235 for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
236 struct link_init_data link_init_params = {0};
237 struct dc_link *link;
238
239 link_init_params.ctx = dc->ctx;
240 link_init_params.connector_index = i;
241 link_init_params.link_index = dc->link_count;
242 link_init_params.dc = dc;
243 link_init_params.is_dpia_link = true;
244
245 link = link_create(&link_init_params);
246 if (link) {
247 dc->links[dc->link_count] = link;
248 link->dc = dc;
249 ++dc->link_count;
250 }
251 }
252
253 for (i = 0; i < num_virtual_links; i++) {
254 struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
255 struct encoder_init_data enc_init = {0};
256
257 if (link == NULL) {
258 BREAK_TO_DEBUGGER();
259 goto failed_alloc;
260 }
261
262 link->link_index = dc->link_count;
263 dc->links[dc->link_count] = link;
264 dc->link_count++;
265
266 link->ctx = dc->ctx;
267 link->dc = dc;
268 link->connector_signal = SIGNAL_TYPE_VIRTUAL;
269 link->link_id.type = OBJECT_TYPE_CONNECTOR;
270 link->link_id.id = CONNECTOR_ID_VIRTUAL;
271 link->link_id.enum_id = ENUM_ID_1;
272 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
273
274 if (!link->link_enc) {
275 BREAK_TO_DEBUGGER();
276 goto failed_alloc;
277 }
278
279 link->link_status.dpcd_caps = &link->dpcd_caps;
280
281 enc_init.ctx = dc->ctx;
282 enc_init.channel = CHANNEL_ID_UNKNOWN;
283 enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
284 enc_init.transmitter = TRANSMITTER_UNKNOWN;
285 enc_init.connector = link->link_id;
286 enc_init.encoder.type = OBJECT_TYPE_ENCODER;
287 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
288 enc_init.encoder.enum_id = ENUM_ID_1;
289 virtual_link_encoder_construct(link->link_enc, &enc_init);
290 }
291
292 dc->caps.num_of_internal_disp = get_num_of_internal_disp(dc->links, dc->link_count);
293
294 return true;
295
296 failed_alloc:
297 return false;
298 }
299
300 /* Create additional DIG link encoder objects if fewer than the platform
301 * supports were created during link construction. This can happen if the
302 * number of physical connectors is less than the number of DIGs.
303 */
create_link_encoders(struct dc * dc)304 static bool create_link_encoders(struct dc *dc)
305 {
306 bool res = true;
307 unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
308 unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
309 int i;
310
311 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
312 * link encoders and physical display endpoints and does not require
313 * additional link encoder objects.
314 */
315 if (num_usb4_dpia == 0)
316 return res;
317
318 /* Create as many link encoder objects as the platform supports. DPIA
319 * endpoints can be programmably mapped to any DIG.
320 */
321 if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
322 for (i = 0; i < num_dig_link_enc; i++) {
323 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
324
325 if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
326 link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
327 (enum engine_id)(ENGINE_ID_DIGA + i));
328 if (link_enc) {
329 dc->res_pool->link_encoders[i] = link_enc;
330 dc->res_pool->dig_link_enc_count++;
331 } else {
332 res = false;
333 }
334 }
335 }
336 }
337
338 return res;
339 }
340
341 /* Destroy any additional DIG link encoder objects created by
342 * create_link_encoders().
343 * NB: Must only be called after destroy_links().
344 */
destroy_link_encoders(struct dc * dc)345 static void destroy_link_encoders(struct dc *dc)
346 {
347 unsigned int num_usb4_dpia;
348 unsigned int num_dig_link_enc;
349 int i;
350
351 if (!dc->res_pool)
352 return;
353
354 num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
355 num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
356
357 /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
358 * link encoders and physical display endpoints and does not require
359 * additional link encoder objects.
360 */
361 if (num_usb4_dpia == 0)
362 return;
363
364 for (i = 0; i < num_dig_link_enc; i++) {
365 struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
366
367 if (link_enc) {
368 link_enc->funcs->destroy(&link_enc);
369 dc->res_pool->link_encoders[i] = NULL;
370 dc->res_pool->dig_link_enc_count--;
371 }
372 }
373 }
374
dc_perf_trace_create(void)375 static struct dc_perf_trace *dc_perf_trace_create(void)
376 {
377 return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
378 }
379
dc_perf_trace_destroy(struct dc_perf_trace ** perf_trace)380 static void dc_perf_trace_destroy(struct dc_perf_trace **perf_trace)
381 {
382 kfree(*perf_trace);
383 *perf_trace = NULL;
384 }
385
386 /**
387 * dc_stream_adjust_vmin_vmax:
388 *
389 * Looks up the pipe context of dc_stream_state and updates the
390 * vertical_total_min and vertical_total_max of the DRR, Dynamic Refresh
391 * Rate, which is a power-saving feature that targets reducing panel
392 * refresh rate while the screen is static
393 *
394 * @dc: dc reference
395 * @stream: Initial dc stream state
396 * @adjust: Updated parameters for vertical_total_min and vertical_total_max
397 */
dc_stream_adjust_vmin_vmax(struct dc * dc,struct dc_stream_state * stream,struct dc_crtc_timing_adjust * adjust)398 bool dc_stream_adjust_vmin_vmax(struct dc *dc,
399 struct dc_stream_state *stream,
400 struct dc_crtc_timing_adjust *adjust)
401 {
402 int i;
403
404 if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0)
405 return true;
406
407 stream->adjust.v_total_max = adjust->v_total_max;
408 stream->adjust.v_total_mid = adjust->v_total_mid;
409 stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
410 stream->adjust.v_total_min = adjust->v_total_min;
411
412 for (i = 0; i < MAX_PIPES; i++) {
413 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
414
415 if (pipe->stream == stream && pipe->stream_res.tg) {
416 dc->hwss.set_drr(&pipe,
417 1,
418 *adjust);
419
420 return true;
421 }
422 }
423 return false;
424 }
425
426 /**
427 *****************************************************************************
428 * Function: dc_stream_get_last_vrr_vtotal
429 *
430 * @brief
431 * Looks up the pipe context of dc_stream_state and gets the
432 * last VTOTAL used by DRR (Dynamic Refresh Rate)
433 *
434 * @param [in] dc: dc reference
435 * @param [in] stream: Initial dc stream state
436 * @param [in] adjust: Updated parameters for vertical_total_min and
437 * vertical_total_max
438 *****************************************************************************
439 */
dc_stream_get_last_used_drr_vtotal(struct dc * dc,struct dc_stream_state * stream,uint32_t * refresh_rate)440 bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
441 struct dc_stream_state *stream,
442 uint32_t *refresh_rate)
443 {
444 bool status = false;
445
446 int i = 0;
447
448 for (i = 0; i < MAX_PIPES; i++) {
449 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
450
451 if (pipe->stream == stream && pipe->stream_res.tg) {
452 /* Only execute if a function pointer has been defined for
453 * the DC version in question
454 */
455 if (pipe->stream_res.tg->funcs->get_last_used_drr_vtotal) {
456 pipe->stream_res.tg->funcs->get_last_used_drr_vtotal(pipe->stream_res.tg, refresh_rate);
457
458 status = true;
459
460 break;
461 }
462 }
463 }
464
465 return status;
466 }
467
dc_stream_get_crtc_position(struct dc * dc,struct dc_stream_state ** streams,int num_streams,unsigned int * v_pos,unsigned int * nom_v_pos)468 bool dc_stream_get_crtc_position(struct dc *dc,
469 struct dc_stream_state **streams, int num_streams,
470 unsigned int *v_pos, unsigned int *nom_v_pos)
471 {
472 /* TODO: Support multiple streams */
473 const struct dc_stream_state *stream = streams[0];
474 int i;
475 bool ret = false;
476 struct crtc_position position;
477
478 for (i = 0; i < MAX_PIPES; i++) {
479 struct pipe_ctx *pipe =
480 &dc->current_state->res_ctx.pipe_ctx[i];
481
482 if (pipe->stream == stream && pipe->stream_res.stream_enc) {
483 dc->hwss.get_position(&pipe, 1, &position);
484
485 *v_pos = position.vertical_count;
486 *nom_v_pos = position.nominal_vcount;
487 ret = true;
488 }
489 }
490 return ret;
491 }
492
493 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
dc_stream_forward_dmcu_crc_window(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window)494 bool dc_stream_forward_dmcu_crc_window(struct dc *dc, struct dc_stream_state *stream,
495 struct crc_params *crc_window)
496 {
497 int i;
498 struct dmcu *dmcu = dc->res_pool->dmcu;
499 struct pipe_ctx *pipe;
500 struct crc_region tmp_win, *crc_win;
501 struct otg_phy_mux mapping_tmp, *mux_mapping;
502
503 /*crc window can't be null*/
504 if (!crc_window)
505 return false;
506
507 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
508 crc_win = &tmp_win;
509 mux_mapping = &mapping_tmp;
510 /*set crc window*/
511 tmp_win.x_start = crc_window->windowa_x_start;
512 tmp_win.y_start = crc_window->windowa_y_start;
513 tmp_win.x_end = crc_window->windowa_x_end;
514 tmp_win.y_end = crc_window->windowa_y_end;
515
516 for (i = 0; i < MAX_PIPES; i++) {
517 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
518 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
519 break;
520 }
521
522 /* Stream not found */
523 if (i == MAX_PIPES)
524 return false;
525
526
527 /*set mux routing info*/
528 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
529 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
530
531 dmcu->funcs->forward_crc_window(dmcu, crc_win, mux_mapping);
532 } else {
533 DC_LOG_DC("dmcu is not initialized");
534 return false;
535 }
536
537 return true;
538 }
539
dc_stream_stop_dmcu_crc_win_update(struct dc * dc,struct dc_stream_state * stream)540 bool dc_stream_stop_dmcu_crc_win_update(struct dc *dc, struct dc_stream_state *stream)
541 {
542 int i;
543 struct dmcu *dmcu = dc->res_pool->dmcu;
544 struct pipe_ctx *pipe;
545 struct otg_phy_mux mapping_tmp, *mux_mapping;
546
547 if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu))) {
548 mux_mapping = &mapping_tmp;
549
550 for (i = 0; i < MAX_PIPES; i++) {
551 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
552 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
553 break;
554 }
555
556 /* Stream not found */
557 if (i == MAX_PIPES)
558 return false;
559
560
561 /*set mux routing info*/
562 mapping_tmp.phy_output_num = stream->link->link_enc_hw_inst;
563 mapping_tmp.otg_output_num = pipe->stream_res.tg->inst;
564
565 dmcu->funcs->stop_crc_win_update(dmcu, mux_mapping);
566 } else {
567 DC_LOG_DC("dmcu is not initialized");
568 return false;
569 }
570
571 return true;
572 }
573 #endif
574
575 /**
576 * dc_stream_configure_crc() - Configure CRC capture for the given stream.
577 * @dc: DC Object
578 * @stream: The stream to configure CRC on.
579 * @enable: Enable CRC if true, disable otherwise.
580 * @crc_window: CRC window (x/y start/end) information
581 * @continuous: Capture CRC on every frame if true. Otherwise, only capture
582 * once.
583 *
584 * By default, only CRC0 is configured, and the entire frame is used to
585 * calculate the crc.
586 */
dc_stream_configure_crc(struct dc * dc,struct dc_stream_state * stream,struct crc_params * crc_window,bool enable,bool continuous)587 bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
588 struct crc_params *crc_window, bool enable, bool continuous)
589 {
590 int i;
591 struct pipe_ctx *pipe;
592 struct crc_params param;
593 struct timing_generator *tg;
594
595 for (i = 0; i < MAX_PIPES; i++) {
596 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
597 if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
598 break;
599 }
600 /* Stream not found */
601 if (i == MAX_PIPES)
602 return false;
603
604 /* By default, capture the full frame */
605 param.windowa_x_start = 0;
606 param.windowa_y_start = 0;
607 param.windowa_x_end = pipe->stream->timing.h_addressable;
608 param.windowa_y_end = pipe->stream->timing.v_addressable;
609 param.windowb_x_start = 0;
610 param.windowb_y_start = 0;
611 param.windowb_x_end = pipe->stream->timing.h_addressable;
612 param.windowb_y_end = pipe->stream->timing.v_addressable;
613
614 if (crc_window) {
615 param.windowa_x_start = crc_window->windowa_x_start;
616 param.windowa_y_start = crc_window->windowa_y_start;
617 param.windowa_x_end = crc_window->windowa_x_end;
618 param.windowa_y_end = crc_window->windowa_y_end;
619 param.windowb_x_start = crc_window->windowb_x_start;
620 param.windowb_y_start = crc_window->windowb_y_start;
621 param.windowb_x_end = crc_window->windowb_x_end;
622 param.windowb_y_end = crc_window->windowb_y_end;
623 }
624
625 param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
626 param.odm_mode = pipe->next_odm_pipe ? 1:0;
627
628 /* Default to the union of both windows */
629 param.selection = UNION_WINDOW_A_B;
630 param.continuous_mode = continuous;
631 param.enable = enable;
632
633 tg = pipe->stream_res.tg;
634
635 /* Only call if supported */
636 if (tg->funcs->configure_crc)
637 return tg->funcs->configure_crc(tg, ¶m);
638 DC_LOG_WARNING("CRC capture not supported.");
639 return false;
640 }
641
642 /**
643 * dc_stream_get_crc() - Get CRC values for the given stream.
644 *
645 * @dc: DC object.
646 * @stream: The DC stream state of the stream to get CRCs from.
647 * @r_cr: CRC value for the red component.
648 * @g_y: CRC value for the green component.
649 * @b_cb: CRC value for the blue component.
650 *
651 * dc_stream_configure_crc needs to be called beforehand to enable CRCs.
652 *
653 * Return:
654 * false if stream is not found, or if CRCs are not enabled.
655 */
dc_stream_get_crc(struct dc * dc,struct dc_stream_state * stream,uint32_t * r_cr,uint32_t * g_y,uint32_t * b_cb)656 bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
657 uint32_t *r_cr, uint32_t *g_y, uint32_t *b_cb)
658 {
659 int i;
660 struct pipe_ctx *pipe;
661 struct timing_generator *tg;
662
663 for (i = 0; i < MAX_PIPES; i++) {
664 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
665 if (pipe->stream == stream)
666 break;
667 }
668 /* Stream not found */
669 if (i == MAX_PIPES)
670 return false;
671
672 tg = pipe->stream_res.tg;
673
674 if (tg->funcs->get_crc)
675 return tg->funcs->get_crc(tg, r_cr, g_y, b_cb);
676 DC_LOG_WARNING("CRC capture not supported.");
677 return false;
678 }
679
dc_stream_set_dyn_expansion(struct dc * dc,struct dc_stream_state * stream,enum dc_dynamic_expansion option)680 void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
681 enum dc_dynamic_expansion option)
682 {
683 /* OPP FMT dyn expansion updates*/
684 int i;
685 struct pipe_ctx *pipe_ctx;
686
687 for (i = 0; i < MAX_PIPES; i++) {
688 if (dc->current_state->res_ctx.pipe_ctx[i].stream
689 == stream) {
690 pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
691 pipe_ctx->stream_res.opp->dyn_expansion = option;
692 pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
693 pipe_ctx->stream_res.opp,
694 COLOR_SPACE_YCBCR601,
695 stream->timing.display_color_depth,
696 stream->signal);
697 }
698 }
699 }
700
dc_stream_set_dither_option(struct dc_stream_state * stream,enum dc_dither_option option)701 void dc_stream_set_dither_option(struct dc_stream_state *stream,
702 enum dc_dither_option option)
703 {
704 struct bit_depth_reduction_params params;
705 struct dc_link *link = stream->link;
706 struct pipe_ctx *pipes = NULL;
707 int i;
708
709 for (i = 0; i < MAX_PIPES; i++) {
710 if (link->dc->current_state->res_ctx.pipe_ctx[i].stream ==
711 stream) {
712 pipes = &link->dc->current_state->res_ctx.pipe_ctx[i];
713 break;
714 }
715 }
716
717 if (!pipes)
718 return;
719 if (option > DITHER_OPTION_MAX)
720 return;
721
722 stream->dither_option = option;
723
724 memset(¶ms, 0, sizeof(params));
725 resource_build_bit_depth_reduction_params(stream, ¶ms);
726 stream->bit_depth_params = params;
727
728 if (pipes->plane_res.xfm &&
729 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth) {
730 pipes->plane_res.xfm->funcs->transform_set_pixel_storage_depth(
731 pipes->plane_res.xfm,
732 pipes->plane_res.scl_data.lb_params.depth,
733 &stream->bit_depth_params);
734 }
735
736 pipes->stream_res.opp->funcs->
737 opp_program_bit_depth_reduction(pipes->stream_res.opp, ¶ms);
738 }
739
dc_stream_set_gamut_remap(struct dc * dc,const struct dc_stream_state * stream)740 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
741 {
742 int i;
743 bool ret = false;
744 struct pipe_ctx *pipes;
745
746 for (i = 0; i < MAX_PIPES; i++) {
747 if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
748 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
749 dc->hwss.program_gamut_remap(pipes);
750 ret = true;
751 }
752 }
753
754 return ret;
755 }
756
dc_stream_program_csc_matrix(struct dc * dc,struct dc_stream_state * stream)757 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
758 {
759 int i;
760 bool ret = false;
761 struct pipe_ctx *pipes;
762
763 for (i = 0; i < MAX_PIPES; i++) {
764 if (dc->current_state->res_ctx.pipe_ctx[i].stream
765 == stream) {
766
767 pipes = &dc->current_state->res_ctx.pipe_ctx[i];
768 dc->hwss.program_output_csc(dc,
769 pipes,
770 stream->output_color_space,
771 stream->csc_color_matrix.matrix,
772 pipes->stream_res.opp->inst);
773 ret = true;
774 }
775 }
776
777 return ret;
778 }
779
dc_stream_set_static_screen_params(struct dc * dc,struct dc_stream_state ** streams,int num_streams,const struct dc_static_screen_params * params)780 void dc_stream_set_static_screen_params(struct dc *dc,
781 struct dc_stream_state **streams,
782 int num_streams,
783 const struct dc_static_screen_params *params)
784 {
785 int i, j;
786 struct pipe_ctx *pipes_affected[MAX_PIPES];
787 int num_pipes_affected = 0;
788
789 for (i = 0; i < num_streams; i++) {
790 struct dc_stream_state *stream = streams[i];
791
792 for (j = 0; j < MAX_PIPES; j++) {
793 if (dc->current_state->res_ctx.pipe_ctx[j].stream
794 == stream) {
795 pipes_affected[num_pipes_affected++] =
796 &dc->current_state->res_ctx.pipe_ctx[j];
797 }
798 }
799 }
800
801 dc->hwss.set_static_screen_control(pipes_affected, num_pipes_affected, params);
802 }
803
dc_destruct(struct dc * dc)804 static void dc_destruct(struct dc *dc)
805 {
806 // reset link encoder assignment table on destruct
807 if (dc->res_pool && dc->res_pool->funcs->link_encs_assign)
808 link_enc_cfg_init(dc, dc->current_state);
809
810 if (dc->current_state) {
811 dc_release_state(dc->current_state);
812 dc->current_state = NULL;
813 }
814
815 destroy_links(dc);
816
817 destroy_link_encoders(dc);
818
819 if (dc->clk_mgr) {
820 dc_destroy_clk_mgr(dc->clk_mgr);
821 dc->clk_mgr = NULL;
822 }
823
824 dc_destroy_resource_pool(dc);
825
826 if (dc->ctx->gpio_service)
827 dal_gpio_service_destroy(&dc->ctx->gpio_service);
828
829 if (dc->ctx->created_bios)
830 dal_bios_parser_destroy(&dc->ctx->dc_bios);
831
832 dc_perf_trace_destroy(&dc->ctx->perf_trace);
833
834 kfree(dc->ctx);
835 dc->ctx = NULL;
836
837 kfree(dc->bw_vbios);
838 dc->bw_vbios = NULL;
839
840 kfree(dc->bw_dceip);
841 dc->bw_dceip = NULL;
842
843 kfree(dc->dcn_soc);
844 dc->dcn_soc = NULL;
845
846 kfree(dc->dcn_ip);
847 dc->dcn_ip = NULL;
848
849 kfree(dc->vm_helper);
850 dc->vm_helper = NULL;
851
852 }
853
dc_construct_ctx(struct dc * dc,const struct dc_init_data * init_params)854 static bool dc_construct_ctx(struct dc *dc,
855 const struct dc_init_data *init_params)
856 {
857 struct dc_context *dc_ctx;
858 enum dce_version dc_version = DCE_VERSION_UNKNOWN;
859
860 dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL);
861 if (!dc_ctx)
862 return false;
863
864 dc_ctx->cgs_device = init_params->cgs_device;
865 dc_ctx->driver_context = init_params->driver;
866 dc_ctx->dc = dc;
867 dc_ctx->asic_id = init_params->asic_id;
868 dc_ctx->dc_sink_id_count = 0;
869 dc_ctx->dc_stream_id_count = 0;
870 dc_ctx->dce_environment = init_params->dce_environment;
871 dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
872 dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
873
874 /* Create logger */
875
876 dc_version = resource_parse_asic_id(init_params->asic_id);
877 dc_ctx->dce_version = dc_version;
878
879 dc_ctx->perf_trace = dc_perf_trace_create();
880 if (!dc_ctx->perf_trace) {
881 ASSERT_CRITICAL(false);
882 return false;
883 }
884
885 dc->ctx = dc_ctx;
886
887 return true;
888 }
889
dc_construct(struct dc * dc,const struct dc_init_data * init_params)890 static bool dc_construct(struct dc *dc,
891 const struct dc_init_data *init_params)
892 {
893 struct dc_context *dc_ctx;
894 struct bw_calcs_dceip *dc_dceip;
895 struct bw_calcs_vbios *dc_vbios;
896 struct dcn_soc_bounding_box *dcn_soc;
897 struct dcn_ip_params *dcn_ip;
898
899 dc->config = init_params->flags;
900
901 // Allocate memory for the vm_helper
902 dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
903 if (!dc->vm_helper) {
904 dm_error("%s: failed to create dc->vm_helper\n", __func__);
905 goto fail;
906 }
907
908 memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
909
910 dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
911 if (!dc_dceip) {
912 dm_error("%s: failed to create dceip\n", __func__);
913 goto fail;
914 }
915
916 dc->bw_dceip = dc_dceip;
917
918 dc_vbios = kzalloc(sizeof(*dc_vbios), GFP_KERNEL);
919 if (!dc_vbios) {
920 dm_error("%s: failed to create vbios\n", __func__);
921 goto fail;
922 }
923
924 dc->bw_vbios = dc_vbios;
925 dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
926 if (!dcn_soc) {
927 dm_error("%s: failed to create dcn_soc\n", __func__);
928 goto fail;
929 }
930
931 dc->dcn_soc = dcn_soc;
932
933 dcn_ip = kzalloc(sizeof(*dcn_ip), GFP_KERNEL);
934 if (!dcn_ip) {
935 dm_error("%s: failed to create dcn_ip\n", __func__);
936 goto fail;
937 }
938
939 dc->dcn_ip = dcn_ip;
940
941 if (!dc_construct_ctx(dc, init_params)) {
942 dm_error("%s: failed to create ctx\n", __func__);
943 goto fail;
944 }
945
946 dc_ctx = dc->ctx;
947
948 /* Resource should construct all asic specific resources.
949 * This should be the only place where we need to parse the asic id
950 */
951 if (init_params->vbios_override)
952 dc_ctx->dc_bios = init_params->vbios_override;
953 else {
954 /* Create BIOS parser */
955 struct bp_init_data bp_init_data;
956
957 bp_init_data.ctx = dc_ctx;
958 bp_init_data.bios = init_params->asic_id.atombios_base_address;
959
960 dc_ctx->dc_bios = dal_bios_parser_create(
961 &bp_init_data, dc_ctx->dce_version);
962
963 if (!dc_ctx->dc_bios) {
964 ASSERT_CRITICAL(false);
965 goto fail;
966 }
967
968 dc_ctx->created_bios = true;
969 }
970
971 dc->vendor_signature = init_params->vendor_signature;
972
973 /* Create GPIO service */
974 dc_ctx->gpio_service = dal_gpio_service_create(
975 dc_ctx->dce_version,
976 dc_ctx->dce_environment,
977 dc_ctx);
978
979 if (!dc_ctx->gpio_service) {
980 ASSERT_CRITICAL(false);
981 goto fail;
982 }
983
984 dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version);
985 if (!dc->res_pool)
986 goto fail;
987
988 /* set i2c speed if not done by the respective dcnxxx__resource.c */
989 if (dc->caps.i2c_speed_in_khz_hdcp == 0)
990 dc->caps.i2c_speed_in_khz_hdcp = dc->caps.i2c_speed_in_khz;
991
992 dc->clk_mgr = dc_clk_mgr_create(dc->ctx, dc->res_pool->pp_smu, dc->res_pool->dccg);
993 if (!dc->clk_mgr)
994 goto fail;
995 #ifdef CONFIG_DRM_AMD_DC_DCN
996 dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
997
998 if (dc->res_pool->funcs->update_bw_bounding_box) {
999 DC_FP_START();
1000 dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
1001 DC_FP_END();
1002 }
1003 #endif
1004
1005 /* Creation of current_state must occur after dc->dml
1006 * is initialized in dc_create_resource_pool because
1007 * on creation it copies the contents of dc->dml
1008 */
1009
1010 dc->current_state = dc_create_state(dc);
1011
1012 if (!dc->current_state) {
1013 dm_error("%s: failed to create validate ctx\n", __func__);
1014 goto fail;
1015 }
1016
1017 if (!create_links(dc, init_params->num_virtual_links))
1018 goto fail;
1019
1020 /* Create additional DIG link encoder objects if fewer than the platform
1021 * supports were created during link construction.
1022 */
1023 if (!create_link_encoders(dc))
1024 goto fail;
1025
1026 dc_resource_state_construct(dc, dc->current_state);
1027
1028 return true;
1029
1030 fail:
1031 return false;
1032 }
1033
disable_all_writeback_pipes_for_stream(const struct dc * dc,struct dc_stream_state * stream,struct dc_state * context)1034 static void disable_all_writeback_pipes_for_stream(
1035 const struct dc *dc,
1036 struct dc_stream_state *stream,
1037 struct dc_state *context)
1038 {
1039 int i;
1040
1041 for (i = 0; i < stream->num_wb_info; i++)
1042 stream->writeback_info[i].wb_enabled = false;
1043 }
1044
apply_ctx_interdependent_lock(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,bool lock)1045 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
1046 struct dc_stream_state *stream, bool lock)
1047 {
1048 int i;
1049
1050 /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
1051 if (dc->hwss.interdependent_update_lock)
1052 dc->hwss.interdependent_update_lock(dc, context, lock);
1053 else {
1054 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1055 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1056 struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1057
1058 // Copied conditions that were previously in dce110_apply_ctx_for_surface
1059 if (stream == pipe_ctx->stream) {
1060 if (!pipe_ctx->top_pipe &&
1061 (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
1062 dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
1063 }
1064 }
1065 }
1066 }
1067
disable_dangling_plane(struct dc * dc,struct dc_state * context)1068 static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
1069 {
1070 int i, j;
1071 struct dc_state *dangling_context = dc_create_state(dc);
1072 struct dc_state *current_ctx;
1073 struct pipe_ctx *pipe;
1074
1075 if (dangling_context == NULL)
1076 return;
1077
1078 dc_resource_state_copy_construct(dc->current_state, dangling_context);
1079
1080 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1081 struct dc_stream_state *old_stream =
1082 dc->current_state->res_ctx.pipe_ctx[i].stream;
1083 bool should_disable = true;
1084 bool pipe_split_change = false;
1085
1086 if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
1087 (dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
1088 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
1089 dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
1090 else
1091 pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
1092 dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
1093
1094 for (j = 0; j < context->stream_count; j++) {
1095 if (old_stream == context->streams[j]) {
1096 should_disable = false;
1097 break;
1098 }
1099 }
1100 if (!should_disable && pipe_split_change &&
1101 dc->current_state->stream_count != context->stream_count)
1102 should_disable = true;
1103
1104 if (old_stream && !dc->current_state->res_ctx.pipe_ctx[i].top_pipe &&
1105 !dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe) {
1106 struct pipe_ctx *old_pipe, *new_pipe;
1107
1108 old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1109 new_pipe = &context->res_ctx.pipe_ctx[i];
1110
1111 if (old_pipe->plane_state && !new_pipe->plane_state)
1112 should_disable = true;
1113 }
1114
1115 if (should_disable && old_stream) {
1116 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1117 /* When disabling plane for a phantom pipe, we must turn on the
1118 * phantom OTG so the disable programming gets the double buffer
1119 * update. Otherwise the pipe will be left in a partially disabled
1120 * state that can result in underflow or hang when enabling it
1121 * again for different use.
1122 */
1123 if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
1124 pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
1125 }
1126 dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
1127 disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
1128
1129 if (dc->hwss.apply_ctx_for_surface) {
1130 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
1131 dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
1132 apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
1133 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1134 }
1135 if (dc->hwss.program_front_end_for_ctx) {
1136 dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
1137 dc->hwss.program_front_end_for_ctx(dc, dangling_context);
1138 dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
1139 dc->hwss.post_unlock_program_front_end(dc, dangling_context);
1140 }
1141 }
1142 }
1143
1144 current_ctx = dc->current_state;
1145 dc->current_state = dangling_context;
1146 dc_release_state(current_ctx);
1147 }
1148
disable_vbios_mode_if_required(struct dc * dc,struct dc_state * context)1149 static void disable_vbios_mode_if_required(
1150 struct dc *dc,
1151 struct dc_state *context)
1152 {
1153 unsigned int i, j;
1154
1155 /* check if timing_changed, disable stream*/
1156 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1157 struct dc_stream_state *stream = NULL;
1158 struct dc_link *link = NULL;
1159 struct pipe_ctx *pipe = NULL;
1160
1161 pipe = &context->res_ctx.pipe_ctx[i];
1162 stream = pipe->stream;
1163 if (stream == NULL)
1164 continue;
1165
1166 // only looking for first odm pipe
1167 if (pipe->prev_odm_pipe)
1168 continue;
1169
1170 if (stream->link->local_sink &&
1171 stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
1172 link = stream->link;
1173 }
1174
1175 if (link != NULL && link->link_enc->funcs->is_dig_enabled(link->link_enc)) {
1176 unsigned int enc_inst, tg_inst = 0;
1177 unsigned int pix_clk_100hz;
1178
1179 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1180 if (enc_inst != ENGINE_ID_UNKNOWN) {
1181 for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
1182 if (dc->res_pool->stream_enc[j]->id == enc_inst) {
1183 tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
1184 dc->res_pool->stream_enc[j]);
1185 break;
1186 }
1187 }
1188
1189 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1190 dc->res_pool->dp_clock_source,
1191 tg_inst, &pix_clk_100hz);
1192
1193 if (link->link_status.link_active) {
1194 uint32_t requested_pix_clk_100hz =
1195 pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
1196
1197 if (pix_clk_100hz != requested_pix_clk_100hz) {
1198 core_link_disable_stream(pipe);
1199 pipe->stream->dpms_off = false;
1200 }
1201 }
1202 }
1203 }
1204 }
1205 }
1206
wait_for_no_pipes_pending(struct dc * dc,struct dc_state * context)1207 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
1208 {
1209 int i;
1210 PERF_TRACE();
1211 for (i = 0; i < MAX_PIPES; i++) {
1212 int count = 0;
1213 struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
1214
1215 if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
1216 continue;
1217
1218 /* Timeout 100 ms */
1219 while (count < 100000) {
1220 /* Must set to false to start with, due to OR in update function */
1221 pipe->plane_state->status.is_flip_pending = false;
1222 dc->hwss.update_pending_status(pipe);
1223 if (!pipe->plane_state->status.is_flip_pending)
1224 break;
1225 udelay(1);
1226 count++;
1227 }
1228 ASSERT(!pipe->plane_state->status.is_flip_pending);
1229 }
1230 PERF_TRACE();
1231 }
1232
1233 /*******************************************************************************
1234 * Public functions
1235 ******************************************************************************/
1236
dc_create(const struct dc_init_data * init_params)1237 struct dc *dc_create(const struct dc_init_data *init_params)
1238 {
1239 struct dc *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
1240 unsigned int full_pipe_count;
1241
1242 if (!dc)
1243 return NULL;
1244
1245 if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) {
1246 if (!dc_construct_ctx(dc, init_params))
1247 goto destruct_dc;
1248 } else {
1249 if (!dc_construct(dc, init_params))
1250 goto destruct_dc;
1251
1252 full_pipe_count = dc->res_pool->pipe_count;
1253 if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE)
1254 full_pipe_count--;
1255 dc->caps.max_streams = min(
1256 full_pipe_count,
1257 dc->res_pool->stream_enc_count);
1258
1259 dc->caps.max_links = dc->link_count;
1260 dc->caps.max_audios = dc->res_pool->audio_count;
1261 dc->caps.linear_pitch_alignment = 64;
1262
1263 dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
1264
1265 dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
1266
1267 if (dc->res_pool->dmcu != NULL)
1268 dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
1269 }
1270
1271 dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
1272 dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
1273
1274 /* Populate versioning information */
1275 dc->versions.dc_ver = DC_VER;
1276
1277 dc->build_id = DC_BUILD_ID;
1278
1279 DC_LOG_DC("Display Core initialized\n");
1280
1281
1282
1283 return dc;
1284
1285 destruct_dc:
1286 dc_destruct(dc);
1287 kfree(dc);
1288 return NULL;
1289 }
1290
detect_edp_presence(struct dc * dc)1291 static void detect_edp_presence(struct dc *dc)
1292 {
1293 struct dc_link *edp_links[MAX_NUM_EDP];
1294 struct dc_link *edp_link = NULL;
1295 enum dc_connection_type type;
1296 int i;
1297 int edp_num;
1298
1299 get_edp_links(dc, edp_links, &edp_num);
1300 if (!edp_num)
1301 return;
1302
1303 for (i = 0; i < edp_num; i++) {
1304 edp_link = edp_links[i];
1305 if (dc->config.edp_not_connected) {
1306 edp_link->edp_sink_present = false;
1307 } else {
1308 dc_link_detect_sink(edp_link, &type);
1309 edp_link->edp_sink_present = (type != dc_connection_none);
1310 }
1311 }
1312 }
1313
dc_hardware_init(struct dc * dc)1314 void dc_hardware_init(struct dc *dc)
1315 {
1316
1317 detect_edp_presence(dc);
1318 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW)
1319 dc->hwss.init_hw(dc);
1320 }
1321
dc_init_callbacks(struct dc * dc,const struct dc_callback_init * init_params)1322 void dc_init_callbacks(struct dc *dc,
1323 const struct dc_callback_init *init_params)
1324 {
1325 #ifdef CONFIG_DRM_AMD_DC_HDCP
1326 dc->ctx->cp_psp = init_params->cp_psp;
1327 #endif
1328 }
1329
dc_deinit_callbacks(struct dc * dc)1330 void dc_deinit_callbacks(struct dc *dc)
1331 {
1332 #ifdef CONFIG_DRM_AMD_DC_HDCP
1333 memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp));
1334 #endif
1335 }
1336
dc_destroy(struct dc ** dc)1337 void dc_destroy(struct dc **dc)
1338 {
1339 dc_destruct(*dc);
1340 kfree(*dc);
1341 *dc = NULL;
1342 }
1343
enable_timing_multisync(struct dc * dc,struct dc_state * ctx)1344 static void enable_timing_multisync(
1345 struct dc *dc,
1346 struct dc_state *ctx)
1347 {
1348 int i, multisync_count = 0;
1349 int pipe_count = dc->res_pool->pipe_count;
1350 struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
1351
1352 for (i = 0; i < pipe_count; i++) {
1353 if (!ctx->res_ctx.pipe_ctx[i].stream ||
1354 !ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.enabled)
1355 continue;
1356 if (ctx->res_ctx.pipe_ctx[i].stream == ctx->res_ctx.pipe_ctx[i].stream->triggered_crtc_reset.event_source)
1357 continue;
1358 multisync_pipes[multisync_count] = &ctx->res_ctx.pipe_ctx[i];
1359 multisync_count++;
1360 }
1361
1362 if (multisync_count > 0) {
1363 dc->hwss.enable_per_frame_crtc_position_reset(
1364 dc, multisync_count, multisync_pipes);
1365 }
1366 }
1367
program_timing_sync(struct dc * dc,struct dc_state * ctx)1368 static void program_timing_sync(
1369 struct dc *dc,
1370 struct dc_state *ctx)
1371 {
1372 int i, j, k;
1373 int group_index = 0;
1374 int num_group = 0;
1375 int pipe_count = dc->res_pool->pipe_count;
1376 struct pipe_ctx *unsynced_pipes[MAX_PIPES] = { NULL };
1377
1378 for (i = 0; i < pipe_count; i++) {
1379 if (!ctx->res_ctx.pipe_ctx[i].stream
1380 || ctx->res_ctx.pipe_ctx[i].top_pipe
1381 || ctx->res_ctx.pipe_ctx[i].prev_odm_pipe)
1382 continue;
1383
1384 unsynced_pipes[i] = &ctx->res_ctx.pipe_ctx[i];
1385 }
1386
1387 for (i = 0; i < pipe_count; i++) {
1388 int group_size = 1;
1389 enum timing_synchronization_type sync_type = NOT_SYNCHRONIZABLE;
1390 struct pipe_ctx *pipe_set[MAX_PIPES];
1391
1392 if (!unsynced_pipes[i])
1393 continue;
1394
1395 pipe_set[0] = unsynced_pipes[i];
1396 unsynced_pipes[i] = NULL;
1397
1398 /* Add tg to the set, search rest of the tg's for ones with
1399 * same timing, add all tgs with same timing to the group
1400 */
1401 for (j = i + 1; j < pipe_count; j++) {
1402 if (!unsynced_pipes[j])
1403 continue;
1404 if (sync_type != TIMING_SYNCHRONIZABLE &&
1405 dc->hwss.enable_vblanks_synchronization &&
1406 unsynced_pipes[j]->stream_res.tg->funcs->align_vblanks &&
1407 resource_are_vblanks_synchronizable(
1408 unsynced_pipes[j]->stream,
1409 pipe_set[0]->stream)) {
1410 sync_type = VBLANK_SYNCHRONIZABLE;
1411 pipe_set[group_size] = unsynced_pipes[j];
1412 unsynced_pipes[j] = NULL;
1413 group_size++;
1414 } else
1415 if (sync_type != VBLANK_SYNCHRONIZABLE &&
1416 resource_are_streams_timing_synchronizable(
1417 unsynced_pipes[j]->stream,
1418 pipe_set[0]->stream)) {
1419 sync_type = TIMING_SYNCHRONIZABLE;
1420 pipe_set[group_size] = unsynced_pipes[j];
1421 unsynced_pipes[j] = NULL;
1422 group_size++;
1423 }
1424 }
1425
1426 /* set first unblanked pipe as master */
1427 for (j = 0; j < group_size; j++) {
1428 bool is_blanked;
1429
1430 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1431 is_blanked =
1432 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1433 else
1434 is_blanked =
1435 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1436 if (!is_blanked) {
1437 if (j == 0)
1438 break;
1439
1440 swap(pipe_set[0], pipe_set[j]);
1441 break;
1442 }
1443 }
1444
1445 for (k = 0; k < group_size; k++) {
1446 struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
1447
1448 status->timing_sync_info.group_id = num_group;
1449 status->timing_sync_info.group_size = group_size;
1450 if (k == 0)
1451 status->timing_sync_info.master = true;
1452 else
1453 status->timing_sync_info.master = false;
1454
1455 }
1456
1457 /* remove any other pipes that are already been synced */
1458 if (dc->config.use_pipe_ctx_sync_logic) {
1459 /* check pipe's syncd to decide which pipe to be removed */
1460 for (j = 1; j < group_size; j++) {
1461 if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
1462 group_size--;
1463 pipe_set[j] = pipe_set[group_size];
1464 j--;
1465 } else
1466 /* link slave pipe's syncd with master pipe */
1467 pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
1468 }
1469 } else {
1470 for (j = j + 1; j < group_size; j++) {
1471 bool is_blanked;
1472
1473 if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
1474 is_blanked =
1475 pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
1476 else
1477 is_blanked =
1478 pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
1479 if (!is_blanked) {
1480 group_size--;
1481 pipe_set[j] = pipe_set[group_size];
1482 j--;
1483 }
1484 }
1485 }
1486
1487 if (group_size > 1) {
1488 if (sync_type == TIMING_SYNCHRONIZABLE) {
1489 dc->hwss.enable_timing_synchronization(
1490 dc, group_index, group_size, pipe_set);
1491 } else
1492 if (sync_type == VBLANK_SYNCHRONIZABLE) {
1493 dc->hwss.enable_vblanks_synchronization(
1494 dc, group_index, group_size, pipe_set);
1495 }
1496 group_index++;
1497 }
1498 num_group++;
1499 }
1500 }
1501
context_changed(struct dc * dc,struct dc_state * context)1502 static bool context_changed(
1503 struct dc *dc,
1504 struct dc_state *context)
1505 {
1506 uint8_t i;
1507
1508 if (context->stream_count != dc->current_state->stream_count)
1509 return true;
1510
1511 for (i = 0; i < dc->current_state->stream_count; i++) {
1512 if (dc->current_state->streams[i] != context->streams[i])
1513 return true;
1514 }
1515
1516 return false;
1517 }
1518
dc_validate_boot_timing(const struct dc * dc,const struct dc_sink * sink,struct dc_crtc_timing * crtc_timing)1519 bool dc_validate_boot_timing(const struct dc *dc,
1520 const struct dc_sink *sink,
1521 struct dc_crtc_timing *crtc_timing)
1522 {
1523 struct timing_generator *tg;
1524 struct stream_encoder *se = NULL;
1525
1526 struct dc_crtc_timing hw_crtc_timing = {0};
1527
1528 struct dc_link *link = sink->link;
1529 unsigned int i, enc_inst, tg_inst = 0;
1530
1531 /* Support seamless boot on EDP displays only */
1532 if (sink->sink_signal != SIGNAL_TYPE_EDP) {
1533 return false;
1534 }
1535
1536 /* Check for enabled DIG to identify enabled display */
1537 if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
1538 return false;
1539
1540 enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
1541
1542 if (enc_inst == ENGINE_ID_UNKNOWN)
1543 return false;
1544
1545 for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
1546 if (dc->res_pool->stream_enc[i]->id == enc_inst) {
1547
1548 se = dc->res_pool->stream_enc[i];
1549
1550 tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg(
1551 dc->res_pool->stream_enc[i]);
1552 break;
1553 }
1554 }
1555
1556 // tg_inst not found
1557 if (i == dc->res_pool->stream_enc_count)
1558 return false;
1559
1560 if (tg_inst >= dc->res_pool->timing_generator_count)
1561 return false;
1562
1563 tg = dc->res_pool->timing_generators[tg_inst];
1564
1565 if (!tg->funcs->get_hw_timing)
1566 return false;
1567
1568 if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing))
1569 return false;
1570
1571 if (crtc_timing->h_total != hw_crtc_timing.h_total)
1572 return false;
1573
1574 if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left)
1575 return false;
1576
1577 if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable)
1578 return false;
1579
1580 if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right)
1581 return false;
1582
1583 if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch)
1584 return false;
1585
1586 if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width)
1587 return false;
1588
1589 if (crtc_timing->v_total != hw_crtc_timing.v_total)
1590 return false;
1591
1592 if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top)
1593 return false;
1594
1595 if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable)
1596 return false;
1597
1598 if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom)
1599 return false;
1600
1601 if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch)
1602 return false;
1603
1604 if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width)
1605 return false;
1606
1607 /* block DSC for now, as VBIOS does not currently support DSC timings */
1608 if (crtc_timing->flags.DSC)
1609 return false;
1610
1611 if (dc_is_dp_signal(link->connector_signal)) {
1612 unsigned int pix_clk_100hz;
1613 uint32_t numOdmPipes = 1;
1614 uint32_t id_src[4] = {0};
1615
1616 dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
1617 dc->res_pool->dp_clock_source,
1618 tg_inst, &pix_clk_100hz);
1619
1620 if (tg->funcs->get_optc_source)
1621 tg->funcs->get_optc_source(tg,
1622 &numOdmPipes, &id_src[0], &id_src[1]);
1623
1624 if (numOdmPipes == 2)
1625 pix_clk_100hz *= 2;
1626 if (numOdmPipes == 4)
1627 pix_clk_100hz *= 4;
1628
1629 // Note: In rare cases, HW pixclk may differ from crtc's pixclk
1630 // slightly due to rounding issues in 10 kHz units.
1631 if (crtc_timing->pix_clk_100hz != pix_clk_100hz)
1632 return false;
1633
1634 if (!se->funcs->dp_get_pixel_format)
1635 return false;
1636
1637 if (!se->funcs->dp_get_pixel_format(
1638 se,
1639 &hw_crtc_timing.pixel_encoding,
1640 &hw_crtc_timing.display_color_depth))
1641 return false;
1642
1643 if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth)
1644 return false;
1645
1646 if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding)
1647 return false;
1648 }
1649
1650 if (link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
1651 return false;
1652 }
1653
1654 if (is_edp_ilr_optimization_required(link, crtc_timing)) {
1655 DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
1656 return false;
1657 }
1658
1659 return true;
1660 }
1661
should_update_pipe_for_stream(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_stream_state * stream)1662 static inline bool should_update_pipe_for_stream(
1663 struct dc_state *context,
1664 struct pipe_ctx *pipe_ctx,
1665 struct dc_stream_state *stream)
1666 {
1667 return (pipe_ctx->stream && pipe_ctx->stream == stream);
1668 }
1669
should_update_pipe_for_plane(struct dc_state * context,struct pipe_ctx * pipe_ctx,struct dc_plane_state * plane_state)1670 static inline bool should_update_pipe_for_plane(
1671 struct dc_state *context,
1672 struct pipe_ctx *pipe_ctx,
1673 struct dc_plane_state *plane_state)
1674 {
1675 return (pipe_ctx->plane_state == plane_state);
1676 }
1677
dc_enable_stereo(struct dc * dc,struct dc_state * context,struct dc_stream_state * streams[],uint8_t stream_count)1678 void dc_enable_stereo(
1679 struct dc *dc,
1680 struct dc_state *context,
1681 struct dc_stream_state *streams[],
1682 uint8_t stream_count)
1683 {
1684 int i, j;
1685 struct pipe_ctx *pipe;
1686
1687 for (i = 0; i < MAX_PIPES; i++) {
1688 if (context != NULL) {
1689 pipe = &context->res_ctx.pipe_ctx[i];
1690 } else {
1691 context = dc->current_state;
1692 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1693 }
1694
1695 for (j = 0; pipe && j < stream_count; j++) {
1696 if (should_update_pipe_for_stream(context, pipe, streams[j]) &&
1697 dc->hwss.setup_stereo)
1698 dc->hwss.setup_stereo(pipe, dc);
1699 }
1700 }
1701 }
1702
dc_trigger_sync(struct dc * dc,struct dc_state * context)1703 void dc_trigger_sync(struct dc *dc, struct dc_state *context)
1704 {
1705 if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
1706 enable_timing_multisync(dc, context);
1707 program_timing_sync(dc, context);
1708 }
1709 }
1710
get_stream_mask(struct dc * dc,struct dc_state * context)1711 static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
1712 {
1713 int i;
1714 unsigned int stream_mask = 0;
1715
1716 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1717 if (context->res_ctx.pipe_ctx[i].stream)
1718 stream_mask |= 1 << i;
1719 }
1720
1721 return stream_mask;
1722 }
1723
dc_z10_restore(const struct dc * dc)1724 void dc_z10_restore(const struct dc *dc)
1725 {
1726 if (dc->hwss.z10_restore)
1727 dc->hwss.z10_restore(dc);
1728 }
1729
dc_z10_save_init(struct dc * dc)1730 void dc_z10_save_init(struct dc *dc)
1731 {
1732 if (dc->hwss.z10_save_init)
1733 dc->hwss.z10_save_init(dc);
1734 }
1735
1736 /*
1737 * Applies given context to HW and copy it into current context.
1738 * It's up to the user to release the src context afterwards.
1739 */
dc_commit_state_no_check(struct dc * dc,struct dc_state * context)1740 static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *context)
1741 {
1742 struct dc_bios *dcb = dc->ctx->dc_bios;
1743 enum dc_status result = DC_ERROR_UNEXPECTED;
1744 struct pipe_ctx *pipe;
1745 int i, k, l;
1746 struct dc_stream_state *dc_streams[MAX_STREAMS] = {0};
1747 struct dc_state *old_state;
1748 bool subvp_prev_use = false;
1749
1750 dc_z10_restore(dc);
1751 dc_allow_idle_optimizations(dc, false);
1752
1753 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1754 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
1755
1756 /* Check old context for SubVP */
1757 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
1758 if (subvp_prev_use)
1759 break;
1760 }
1761
1762 for (i = 0; i < context->stream_count; i++)
1763 dc_streams[i] = context->streams[i];
1764
1765 if (!dcb->funcs->is_accelerated_mode(dcb)) {
1766 disable_vbios_mode_if_required(dc, context);
1767 dc->hwss.enable_accelerated_mode(dc, context);
1768 }
1769
1770 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1771 context->stream_count == 0)
1772 dc->hwss.prepare_bandwidth(dc, context);
1773
1774 /* When SubVP is active, all HW programming must be done while
1775 * SubVP lock is acquired
1776 */
1777 if (dc->hwss.subvp_pipe_control_lock)
1778 dc->hwss.subvp_pipe_control_lock(dc, context, true, true, NULL, subvp_prev_use);
1779
1780 if (dc->debug.enable_double_buffered_dsc_pg_support)
1781 dc->hwss.update_dsc_pg(dc, context, false);
1782
1783 disable_dangling_plane(dc, context);
1784 /* re-program planes for existing stream, in case we need to
1785 * free up plane resource for later use
1786 */
1787 if (dc->hwss.apply_ctx_for_surface) {
1788 for (i = 0; i < context->stream_count; i++) {
1789 if (context->streams[i]->mode_changed)
1790 continue;
1791 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1792 dc->hwss.apply_ctx_for_surface(
1793 dc, context->streams[i],
1794 context->stream_status[i].plane_count,
1795 context); /* use new pipe config in new context */
1796 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1797 dc->hwss.post_unlock_program_front_end(dc, context);
1798 }
1799 }
1800
1801 /* Program hardware */
1802 for (i = 0; i < dc->res_pool->pipe_count; i++) {
1803 pipe = &context->res_ctx.pipe_ctx[i];
1804 dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe);
1805 }
1806
1807 result = dc->hwss.apply_ctx_to_hw(dc, context);
1808
1809 if (result != DC_OK) {
1810 /* Application of dc_state to hardware stopped. */
1811 dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
1812 return result;
1813 }
1814
1815 dc_trigger_sync(dc, context);
1816
1817 /* Program all planes within new context*/
1818 if (dc->hwss.program_front_end_for_ctx) {
1819 dc->hwss.interdependent_update_lock(dc, context, true);
1820 dc->hwss.program_front_end_for_ctx(dc, context);
1821 dc->hwss.interdependent_update_lock(dc, context, false);
1822 dc->hwss.post_unlock_program_front_end(dc, context);
1823 }
1824
1825 if (dc->hwss.commit_subvp_config)
1826 dc->hwss.commit_subvp_config(dc, context);
1827 if (dc->hwss.subvp_pipe_control_lock)
1828 dc->hwss.subvp_pipe_control_lock(dc, context, false, true, NULL, subvp_prev_use);
1829
1830 for (i = 0; i < context->stream_count; i++) {
1831 const struct dc_link *link = context->streams[i]->link;
1832
1833 if (!context->streams[i]->mode_changed)
1834 continue;
1835
1836 if (dc->hwss.apply_ctx_for_surface) {
1837 apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
1838 dc->hwss.apply_ctx_for_surface(
1839 dc, context->streams[i],
1840 context->stream_status[i].plane_count,
1841 context);
1842 apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
1843 dc->hwss.post_unlock_program_front_end(dc, context);
1844 }
1845
1846 /*
1847 * enable stereo
1848 * TODO rework dc_enable_stereo call to work with validation sets?
1849 */
1850 for (k = 0; k < MAX_PIPES; k++) {
1851 pipe = &context->res_ctx.pipe_ctx[k];
1852
1853 for (l = 0 ; pipe && l < context->stream_count; l++) {
1854 if (context->streams[l] &&
1855 context->streams[l] == pipe->stream &&
1856 dc->hwss.setup_stereo)
1857 dc->hwss.setup_stereo(pipe, dc);
1858 }
1859 }
1860
1861 CONN_MSG_MODE(link, "{%dx%d, %dx%d@%dKhz}",
1862 context->streams[i]->timing.h_addressable,
1863 context->streams[i]->timing.v_addressable,
1864 context->streams[i]->timing.h_total,
1865 context->streams[i]->timing.v_total,
1866 context->streams[i]->timing.pix_clk_100hz / 10);
1867 }
1868
1869 dc_enable_stereo(dc, context, dc_streams, context->stream_count);
1870
1871 if (context->stream_count > get_seamless_boot_stream_count(context) ||
1872 context->stream_count == 0) {
1873 /* Must wait for no flips to be pending before doing optimize bw */
1874 wait_for_no_pipes_pending(dc, context);
1875 /* pplib is notified if disp_num changed */
1876 dc->hwss.optimize_bandwidth(dc, context);
1877 }
1878
1879 if (dc->debug.enable_double_buffered_dsc_pg_support)
1880 dc->hwss.update_dsc_pg(dc, context, true);
1881
1882 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
1883 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
1884 else
1885 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
1886
1887 context->stream_mask = get_stream_mask(dc, context);
1888
1889 if (context->stream_mask != dc->current_state->stream_mask)
1890 dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, context->stream_mask);
1891
1892 for (i = 0; i < context->stream_count; i++)
1893 context->streams[i]->mode_changed = false;
1894
1895 old_state = dc->current_state;
1896 dc->current_state = context;
1897
1898 dc_release_state(old_state);
1899
1900 dc_retain_state(dc->current_state);
1901
1902 return result;
1903 }
1904
dc_commit_state(struct dc * dc,struct dc_state * context)1905 bool dc_commit_state(struct dc *dc, struct dc_state *context)
1906 {
1907 enum dc_status result = DC_ERROR_UNEXPECTED;
1908 int i;
1909
1910 if (!context_changed(dc, context))
1911 return DC_OK;
1912
1913 DC_LOG_DC("%s: %d streams\n",
1914 __func__, context->stream_count);
1915
1916 for (i = 0; i < context->stream_count; i++) {
1917 struct dc_stream_state *stream = context->streams[i];
1918
1919 dc_stream_log(dc, stream);
1920 }
1921
1922 /*
1923 * Previous validation was perfomred with fast_validation = true and
1924 * the full DML state required for hardware programming was skipped.
1925 *
1926 * Re-validate here to calculate these parameters / watermarks.
1927 */
1928 result = dc_validate_global_state(dc, context, false);
1929 if (result != DC_OK) {
1930 DC_LOG_ERROR("DC commit global validation failure: %s (%d)",
1931 dc_status_to_str(result), result);
1932 return result;
1933 }
1934
1935 result = dc_commit_state_no_check(dc, context);
1936
1937 return (result == DC_OK);
1938 }
1939
dc_acquire_release_mpc_3dlut(struct dc * dc,bool acquire,struct dc_stream_state * stream,struct dc_3dlut ** lut,struct dc_transfer_func ** shaper)1940 bool dc_acquire_release_mpc_3dlut(
1941 struct dc *dc, bool acquire,
1942 struct dc_stream_state *stream,
1943 struct dc_3dlut **lut,
1944 struct dc_transfer_func **shaper)
1945 {
1946 int pipe_idx;
1947 bool ret = false;
1948 bool found_pipe_idx = false;
1949 const struct resource_pool *pool = dc->res_pool;
1950 struct resource_context *res_ctx = &dc->current_state->res_ctx;
1951 int mpcc_id = 0;
1952
1953 if (pool && res_ctx) {
1954 if (acquire) {
1955 /*find pipe idx for the given stream*/
1956 for (pipe_idx = 0; pipe_idx < pool->pipe_count; pipe_idx++) {
1957 if (res_ctx->pipe_ctx[pipe_idx].stream == stream) {
1958 found_pipe_idx = true;
1959 mpcc_id = res_ctx->pipe_ctx[pipe_idx].plane_res.hubp->inst;
1960 break;
1961 }
1962 }
1963 } else
1964 found_pipe_idx = true;/*for release pipe_idx is not required*/
1965
1966 if (found_pipe_idx) {
1967 if (acquire && pool->funcs->acquire_post_bldn_3dlut)
1968 ret = pool->funcs->acquire_post_bldn_3dlut(res_ctx, pool, mpcc_id, lut, shaper);
1969 else if (!acquire && pool->funcs->release_post_bldn_3dlut)
1970 ret = pool->funcs->release_post_bldn_3dlut(res_ctx, pool, lut, shaper);
1971 }
1972 }
1973 return ret;
1974 }
1975
is_flip_pending_in_pipes(struct dc * dc,struct dc_state * context)1976 static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
1977 {
1978 int i;
1979 struct pipe_ctx *pipe;
1980
1981 for (i = 0; i < MAX_PIPES; i++) {
1982 pipe = &context->res_ctx.pipe_ctx[i];
1983
1984 // Don't check flip pending on phantom pipes
1985 if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
1986 continue;
1987
1988 /* Must set to false to start with, due to OR in update function */
1989 pipe->plane_state->status.is_flip_pending = false;
1990 dc->hwss.update_pending_status(pipe);
1991 if (pipe->plane_state->status.is_flip_pending)
1992 return true;
1993 }
1994 return false;
1995 }
1996
1997 /* Perform updates here which need to be deferred until next vupdate
1998 *
1999 * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
2000 * but forcing lut memory to shutdown state is immediate. This causes
2001 * single frame corruption as lut gets disabled mid-frame unless shutdown
2002 * is deferred until after entering bypass.
2003 */
process_deferred_updates(struct dc * dc)2004 static void process_deferred_updates(struct dc *dc)
2005 {
2006 int i = 0;
2007
2008 if (dc->debug.enable_mem_low_power.bits.cm) {
2009 ASSERT(dc->dcn_ip->max_num_dpp);
2010 for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
2011 if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
2012 dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
2013 }
2014 }
2015
dc_post_update_surfaces_to_stream(struct dc * dc)2016 void dc_post_update_surfaces_to_stream(struct dc *dc)
2017 {
2018 int i;
2019 struct dc_state *context = dc->current_state;
2020
2021 if ((!dc->optimized_required) || get_seamless_boot_stream_count(context) > 0)
2022 return;
2023
2024 post_surface_trace(dc);
2025
2026 if (dc->ctx->dce_version >= DCE_VERSION_MAX)
2027 TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
2028 else
2029 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
2030
2031 if (is_flip_pending_in_pipes(dc, context))
2032 return;
2033
2034 for (i = 0; i < dc->res_pool->pipe_count; i++)
2035 if (context->res_ctx.pipe_ctx[i].stream == NULL ||
2036 context->res_ctx.pipe_ctx[i].plane_state == NULL) {
2037 context->res_ctx.pipe_ctx[i].pipe_idx = i;
2038 dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
2039 }
2040
2041 process_deferred_updates(dc);
2042
2043 dc->hwss.optimize_bandwidth(dc, context);
2044
2045 if (dc->debug.enable_double_buffered_dsc_pg_support)
2046 dc->hwss.update_dsc_pg(dc, context, true);
2047
2048 dc->optimized_required = false;
2049 dc->wm_optimized_required = false;
2050 }
2051
init_state(struct dc * dc,struct dc_state * context)2052 static void init_state(struct dc *dc, struct dc_state *context)
2053 {
2054 /* Each context must have their own instance of VBA and in order to
2055 * initialize and obtain IP and SOC the base DML instance from DC is
2056 * initially copied into every context
2057 */
2058 memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
2059 }
2060
dc_create_state(struct dc * dc)2061 struct dc_state *dc_create_state(struct dc *dc)
2062 {
2063 struct dc_state *context = kvzalloc(sizeof(struct dc_state),
2064 GFP_KERNEL);
2065
2066 if (!context)
2067 return NULL;
2068
2069 init_state(dc, context);
2070
2071 kref_init(&context->refcount);
2072
2073 return context;
2074 }
2075
dc_copy_state(struct dc_state * src_ctx)2076 struct dc_state *dc_copy_state(struct dc_state *src_ctx)
2077 {
2078 int i, j;
2079 struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
2080
2081 if (!new_ctx)
2082 return NULL;
2083 memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
2084
2085 for (i = 0; i < MAX_PIPES; i++) {
2086 struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
2087
2088 if (cur_pipe->top_pipe)
2089 cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
2090
2091 if (cur_pipe->bottom_pipe)
2092 cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
2093
2094 if (cur_pipe->prev_odm_pipe)
2095 cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
2096
2097 if (cur_pipe->next_odm_pipe)
2098 cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
2099
2100 }
2101
2102 for (i = 0; i < new_ctx->stream_count; i++) {
2103 dc_stream_retain(new_ctx->streams[i]);
2104 for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
2105 dc_plane_state_retain(
2106 new_ctx->stream_status[i].plane_states[j]);
2107 }
2108
2109 kref_init(&new_ctx->refcount);
2110
2111 return new_ctx;
2112 }
2113
dc_retain_state(struct dc_state * context)2114 void dc_retain_state(struct dc_state *context)
2115 {
2116 kref_get(&context->refcount);
2117 }
2118
dc_state_free(struct kref * kref)2119 static void dc_state_free(struct kref *kref)
2120 {
2121 struct dc_state *context = container_of(kref, struct dc_state, refcount);
2122 dc_resource_state_destruct(context);
2123 kvfree(context);
2124 }
2125
dc_release_state(struct dc_state * context)2126 void dc_release_state(struct dc_state *context)
2127 {
2128 kref_put(&context->refcount, dc_state_free);
2129 }
2130
dc_set_generic_gpio_for_stereo(bool enable,struct gpio_service * gpio_service)2131 bool dc_set_generic_gpio_for_stereo(bool enable,
2132 struct gpio_service *gpio_service)
2133 {
2134 enum gpio_result gpio_result = GPIO_RESULT_NON_SPECIFIC_ERROR;
2135 struct gpio_pin_info pin_info;
2136 struct gpio *generic;
2137 struct gpio_generic_mux_config *config = kzalloc(sizeof(struct gpio_generic_mux_config),
2138 GFP_KERNEL);
2139
2140 if (!config)
2141 return false;
2142 pin_info = dal_gpio_get_generic_pin_info(gpio_service, GPIO_ID_GENERIC, 0);
2143
2144 if (pin_info.mask == 0xFFFFFFFF || pin_info.offset == 0xFFFFFFFF) {
2145 kfree(config);
2146 return false;
2147 } else {
2148 generic = dal_gpio_service_create_generic_mux(
2149 gpio_service,
2150 pin_info.offset,
2151 pin_info.mask);
2152 }
2153
2154 if (!generic) {
2155 kfree(config);
2156 return false;
2157 }
2158
2159 gpio_result = dal_gpio_open(generic, GPIO_MODE_OUTPUT);
2160
2161 config->enable_output_from_mux = enable;
2162 config->mux_select = GPIO_SIGNAL_SOURCE_PASS_THROUGH_STEREO_SYNC;
2163
2164 if (gpio_result == GPIO_RESULT_OK)
2165 gpio_result = dal_mux_setup_config(generic, config);
2166
2167 if (gpio_result == GPIO_RESULT_OK) {
2168 dal_gpio_close(generic);
2169 dal_gpio_destroy_generic_mux(&generic);
2170 kfree(config);
2171 return true;
2172 } else {
2173 dal_gpio_close(generic);
2174 dal_gpio_destroy_generic_mux(&generic);
2175 kfree(config);
2176 return false;
2177 }
2178 }
2179
is_surface_in_context(const struct dc_state * context,const struct dc_plane_state * plane_state)2180 static bool is_surface_in_context(
2181 const struct dc_state *context,
2182 const struct dc_plane_state *plane_state)
2183 {
2184 int j;
2185
2186 for (j = 0; j < MAX_PIPES; j++) {
2187 const struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2188
2189 if (plane_state == pipe_ctx->plane_state) {
2190 return true;
2191 }
2192 }
2193
2194 return false;
2195 }
2196
get_plane_info_update_type(const struct dc_surface_update * u)2197 static enum surface_update_type get_plane_info_update_type(const struct dc_surface_update *u)
2198 {
2199 union surface_update_flags *update_flags = &u->surface->update_flags;
2200 enum surface_update_type update_type = UPDATE_TYPE_FAST;
2201
2202 if (!u->plane_info)
2203 return UPDATE_TYPE_FAST;
2204
2205 if (u->plane_info->color_space != u->surface->color_space) {
2206 update_flags->bits.color_space_change = 1;
2207 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2208 }
2209
2210 if (u->plane_info->horizontal_mirror != u->surface->horizontal_mirror) {
2211 update_flags->bits.horizontal_mirror_change = 1;
2212 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2213 }
2214
2215 if (u->plane_info->rotation != u->surface->rotation) {
2216 update_flags->bits.rotation_change = 1;
2217 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2218 }
2219
2220 if (u->plane_info->format != u->surface->format) {
2221 update_flags->bits.pixel_format_change = 1;
2222 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2223 }
2224
2225 if (u->plane_info->stereo_format != u->surface->stereo_format) {
2226 update_flags->bits.stereo_format_change = 1;
2227 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2228 }
2229
2230 if (u->plane_info->per_pixel_alpha != u->surface->per_pixel_alpha) {
2231 update_flags->bits.per_pixel_alpha_change = 1;
2232 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2233 }
2234
2235 if (u->plane_info->global_alpha_value != u->surface->global_alpha_value) {
2236 update_flags->bits.global_alpha_change = 1;
2237 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2238 }
2239
2240 if (u->plane_info->dcc.enable != u->surface->dcc.enable
2241 || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
2242 || u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
2243 /* During DCC on/off, stutter period is calculated before
2244 * DCC has fully transitioned. This results in incorrect
2245 * stutter period calculation. Triggering a full update will
2246 * recalculate stutter period.
2247 */
2248 update_flags->bits.dcc_change = 1;
2249 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2250 }
2251
2252 if (resource_pixel_format_to_bpp(u->plane_info->format) !=
2253 resource_pixel_format_to_bpp(u->surface->format)) {
2254 /* different bytes per element will require full bandwidth
2255 * and DML calculation
2256 */
2257 update_flags->bits.bpp_change = 1;
2258 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2259 }
2260
2261 if (u->plane_info->plane_size.surface_pitch != u->surface->plane_size.surface_pitch
2262 || u->plane_info->plane_size.chroma_pitch != u->surface->plane_size.chroma_pitch) {
2263 update_flags->bits.plane_size_change = 1;
2264 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2265 }
2266
2267
2268 if (memcmp(&u->plane_info->tiling_info, &u->surface->tiling_info,
2269 sizeof(union dc_tiling_info)) != 0) {
2270 update_flags->bits.swizzle_change = 1;
2271 elevate_update_type(&update_type, UPDATE_TYPE_MED);
2272
2273 /* todo: below are HW dependent, we should add a hook to
2274 * DCE/N resource and validated there.
2275 */
2276 if (u->plane_info->tiling_info.gfx9.swizzle != DC_SW_LINEAR) {
2277 /* swizzled mode requires RQ to be setup properly,
2278 * thus need to run DML to calculate RQ settings
2279 */
2280 update_flags->bits.bandwidth_change = 1;
2281 elevate_update_type(&update_type, UPDATE_TYPE_FULL);
2282 }
2283 }
2284
2285 /* This should be UPDATE_TYPE_FAST if nothing has changed. */
2286 return update_type;
2287 }
2288
get_scaling_info_update_type(const struct dc_surface_update * u)2289 static enum surface_update_type get_scaling_info_update_type(
2290 const struct dc_surface_update *u)
2291 {
2292 union surface_update_flags *update_flags = &u->surface->update_flags;
2293
2294 if (!u->scaling_info)
2295 return UPDATE_TYPE_FAST;
2296
2297 if (u->scaling_info->clip_rect.width != u->surface->clip_rect.width
2298 || u->scaling_info->clip_rect.height != u->surface->clip_rect.height
2299 || u->scaling_info->dst_rect.width != u->surface->dst_rect.width
2300 || u->scaling_info->dst_rect.height != u->surface->dst_rect.height
2301 || u->scaling_info->scaling_quality.integer_scaling !=
2302 u->surface->scaling_quality.integer_scaling
2303 ) {
2304 update_flags->bits.scaling_change = 1;
2305
2306 if ((u->scaling_info->dst_rect.width < u->surface->dst_rect.width
2307 || u->scaling_info->dst_rect.height < u->surface->dst_rect.height)
2308 && (u->scaling_info->dst_rect.width < u->surface->src_rect.width
2309 || u->scaling_info->dst_rect.height < u->surface->src_rect.height))
2310 /* Making dst rect smaller requires a bandwidth change */
2311 update_flags->bits.bandwidth_change = 1;
2312 }
2313
2314 if (u->scaling_info->src_rect.width != u->surface->src_rect.width
2315 || u->scaling_info->src_rect.height != u->surface->src_rect.height) {
2316
2317 update_flags->bits.scaling_change = 1;
2318 if (u->scaling_info->src_rect.width > u->surface->src_rect.width
2319 || u->scaling_info->src_rect.height > u->surface->src_rect.height)
2320 /* Making src rect bigger requires a bandwidth change */
2321 update_flags->bits.clock_change = 1;
2322 }
2323
2324 if (u->scaling_info->src_rect.x != u->surface->src_rect.x
2325 || u->scaling_info->src_rect.y != u->surface->src_rect.y
2326 || u->scaling_info->clip_rect.x != u->surface->clip_rect.x
2327 || u->scaling_info->clip_rect.y != u->surface->clip_rect.y
2328 || u->scaling_info->dst_rect.x != u->surface->dst_rect.x
2329 || u->scaling_info->dst_rect.y != u->surface->dst_rect.y)
2330 update_flags->bits.position_change = 1;
2331
2332 if (update_flags->bits.clock_change
2333 || update_flags->bits.bandwidth_change
2334 || update_flags->bits.scaling_change)
2335 return UPDATE_TYPE_FULL;
2336
2337 if (update_flags->bits.position_change)
2338 return UPDATE_TYPE_MED;
2339
2340 return UPDATE_TYPE_FAST;
2341 }
2342
det_surface_update(const struct dc * dc,const struct dc_surface_update * u)2343 static enum surface_update_type det_surface_update(const struct dc *dc,
2344 const struct dc_surface_update *u)
2345 {
2346 const struct dc_state *context = dc->current_state;
2347 enum surface_update_type type;
2348 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2349 union surface_update_flags *update_flags = &u->surface->update_flags;
2350
2351 if (u->flip_addr)
2352 update_flags->bits.addr_update = 1;
2353
2354 if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) {
2355 update_flags->raw = 0xFFFFFFFF;
2356 return UPDATE_TYPE_FULL;
2357 }
2358
2359 update_flags->raw = 0; // Reset all flags
2360
2361 type = get_plane_info_update_type(u);
2362 elevate_update_type(&overall_type, type);
2363
2364 type = get_scaling_info_update_type(u);
2365 elevate_update_type(&overall_type, type);
2366
2367 if (u->flip_addr) {
2368 update_flags->bits.addr_update = 1;
2369 if (u->flip_addr->address.tmz_surface != u->surface->address.tmz_surface) {
2370 update_flags->bits.tmz_changed = 1;
2371 elevate_update_type(&overall_type, UPDATE_TYPE_FULL);
2372 }
2373 }
2374 if (u->in_transfer_func)
2375 update_flags->bits.in_transfer_func_change = 1;
2376
2377 if (u->input_csc_color_matrix)
2378 update_flags->bits.input_csc_change = 1;
2379
2380 if (u->coeff_reduction_factor)
2381 update_flags->bits.coeff_reduction_change = 1;
2382
2383 if (u->gamut_remap_matrix)
2384 update_flags->bits.gamut_remap_change = 1;
2385
2386 if (u->gamma) {
2387 enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
2388
2389 if (u->plane_info)
2390 format = u->plane_info->format;
2391 else if (u->surface)
2392 format = u->surface->format;
2393
2394 if (dce_use_lut(format))
2395 update_flags->bits.gamma_change = 1;
2396 }
2397
2398 if (u->lut3d_func || u->func_shaper)
2399 update_flags->bits.lut_3d = 1;
2400
2401 if (u->hdr_mult.value)
2402 if (u->hdr_mult.value != u->surface->hdr_mult.value) {
2403 update_flags->bits.hdr_mult = 1;
2404 elevate_update_type(&overall_type, UPDATE_TYPE_MED);
2405 }
2406
2407 if (update_flags->bits.in_transfer_func_change) {
2408 type = UPDATE_TYPE_MED;
2409 elevate_update_type(&overall_type, type);
2410 }
2411
2412 if (update_flags->bits.input_csc_change
2413 || update_flags->bits.coeff_reduction_change
2414 || update_flags->bits.lut_3d
2415 || update_flags->bits.gamma_change
2416 || update_flags->bits.gamut_remap_change) {
2417 type = UPDATE_TYPE_FULL;
2418 elevate_update_type(&overall_type, type);
2419 }
2420
2421 return overall_type;
2422 }
2423
check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2424 static enum surface_update_type check_update_surfaces_for_stream(
2425 struct dc *dc,
2426 struct dc_surface_update *updates,
2427 int surface_count,
2428 struct dc_stream_update *stream_update,
2429 const struct dc_stream_status *stream_status)
2430 {
2431 int i;
2432 enum surface_update_type overall_type = UPDATE_TYPE_FAST;
2433
2434 if (dc->idle_optimizations_allowed)
2435 overall_type = UPDATE_TYPE_FULL;
2436
2437 if (stream_status == NULL || stream_status->plane_count != surface_count)
2438 overall_type = UPDATE_TYPE_FULL;
2439
2440 if (stream_update && stream_update->pending_test_pattern) {
2441 overall_type = UPDATE_TYPE_FULL;
2442 }
2443
2444 /* some stream updates require passive update */
2445 if (stream_update) {
2446 union stream_update_flags *su_flags = &stream_update->stream->update_flags;
2447
2448 if ((stream_update->src.height != 0 && stream_update->src.width != 0) ||
2449 (stream_update->dst.height != 0 && stream_update->dst.width != 0) ||
2450 stream_update->integer_scaling_update)
2451 su_flags->bits.scaling = 1;
2452
2453 if (stream_update->out_transfer_func)
2454 su_flags->bits.out_tf = 1;
2455
2456 if (stream_update->abm_level)
2457 su_flags->bits.abm_level = 1;
2458
2459 if (stream_update->dpms_off)
2460 su_flags->bits.dpms_off = 1;
2461
2462 if (stream_update->gamut_remap)
2463 su_flags->bits.gamut_remap = 1;
2464
2465 if (stream_update->wb_update)
2466 su_flags->bits.wb_update = 1;
2467
2468 if (stream_update->dsc_config)
2469 su_flags->bits.dsc_changed = 1;
2470
2471 if (stream_update->mst_bw_update)
2472 su_flags->bits.mst_bw = 1;
2473 if (stream_update->crtc_timing_adjust && dc_extended_blank_supported(dc))
2474 su_flags->bits.crtc_timing_adjust = 1;
2475
2476 if (su_flags->raw != 0)
2477 overall_type = UPDATE_TYPE_FULL;
2478
2479 if (stream_update->output_csc_transform || stream_update->output_color_space)
2480 su_flags->bits.out_csc = 1;
2481 }
2482
2483 for (i = 0 ; i < surface_count; i++) {
2484 enum surface_update_type type =
2485 det_surface_update(dc, &updates[i]);
2486
2487 elevate_update_type(&overall_type, type);
2488 }
2489
2490 return overall_type;
2491 }
2492
dc_check_is_fullscreen_video(struct rect src,struct rect clip_rect)2493 static bool dc_check_is_fullscreen_video(struct rect src, struct rect clip_rect)
2494 {
2495 int view_height, view_width, clip_x, clip_y, clip_width, clip_height;
2496
2497 view_height = src.height;
2498 view_width = src.width;
2499
2500 clip_x = clip_rect.x;
2501 clip_y = clip_rect.y;
2502
2503 clip_width = clip_rect.width;
2504 clip_height = clip_rect.height;
2505
2506 /* check for centered video accounting for off by 1 scaling truncation */
2507 if ((view_height - clip_y - clip_height <= clip_y + 1) &&
2508 (view_width - clip_x - clip_width <= clip_x + 1) &&
2509 (view_height - clip_y - clip_height >= clip_y - 1) &&
2510 (view_width - clip_x - clip_width >= clip_x - 1)) {
2511
2512 /* when OS scales up/down to letter box, it may end up
2513 * with few blank pixels on the border due to truncating.
2514 * Add offset margin to account for this
2515 */
2516 if (clip_x <= 4 || clip_y <= 4)
2517 return true;
2518 }
2519
2520 return false;
2521 }
2522
check_boundary_crossing_for_windowed_mpo_with_odm(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,enum surface_update_type update_type)2523 static enum surface_update_type check_boundary_crossing_for_windowed_mpo_with_odm(struct dc *dc,
2524 struct dc_surface_update *srf_updates, int surface_count,
2525 enum surface_update_type update_type)
2526 {
2527 enum surface_update_type new_update_type = update_type;
2528 int i, j;
2529 struct pipe_ctx *pipe = NULL;
2530 struct dc_stream_state *stream;
2531
2532 /* Check that we are in windowed MPO with ODM
2533 * - look for MPO pipe by scanning pipes for first pipe matching
2534 * surface that has moved ( position change )
2535 * - MPO pipe will have top pipe
2536 * - check that top pipe has ODM pointer
2537 */
2538 if ((surface_count > 1) && dc->config.enable_windowed_mpo_odm) {
2539 for (i = 0; i < surface_count; i++) {
2540 if (srf_updates[i].surface && srf_updates[i].scaling_info
2541 && srf_updates[i].surface->update_flags.bits.position_change) {
2542
2543 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2544 if (srf_updates[i].surface == dc->current_state->res_ctx.pipe_ctx[j].plane_state) {
2545 pipe = &dc->current_state->res_ctx.pipe_ctx[j];
2546 stream = pipe->stream;
2547 break;
2548 }
2549 }
2550
2551 if (pipe && pipe->top_pipe && (get_num_odm_splits(pipe->top_pipe) > 0) && stream
2552 && !dc_check_is_fullscreen_video(stream->src, srf_updates[i].scaling_info->clip_rect)) {
2553 struct rect old_clip_rect, new_clip_rect;
2554 bool old_clip_rect_left, old_clip_rect_right, old_clip_rect_middle;
2555 bool new_clip_rect_left, new_clip_rect_right, new_clip_rect_middle;
2556
2557 old_clip_rect = srf_updates[i].surface->clip_rect;
2558 new_clip_rect = srf_updates[i].scaling_info->clip_rect;
2559
2560 old_clip_rect_left = ((old_clip_rect.x + old_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2561 old_clip_rect_right = (old_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2562 old_clip_rect_middle = !old_clip_rect_left && !old_clip_rect_right;
2563
2564 new_clip_rect_left = ((new_clip_rect.x + new_clip_rect.width) <= (stream->src.x + (stream->src.width/2)));
2565 new_clip_rect_right = (new_clip_rect.x >= (stream->src.x + (stream->src.width/2)));
2566 new_clip_rect_middle = !new_clip_rect_left && !new_clip_rect_right;
2567
2568 if (old_clip_rect_left && new_clip_rect_middle)
2569 new_update_type = UPDATE_TYPE_FULL;
2570 else if (old_clip_rect_middle && new_clip_rect_right)
2571 new_update_type = UPDATE_TYPE_FULL;
2572 else if (old_clip_rect_right && new_clip_rect_middle)
2573 new_update_type = UPDATE_TYPE_FULL;
2574 else if (old_clip_rect_middle && new_clip_rect_left)
2575 new_update_type = UPDATE_TYPE_FULL;
2576 }
2577 }
2578 }
2579 }
2580 return new_update_type;
2581 }
2582
2583 /*
2584 * dc_check_update_surfaces_for_stream() - Determine update type (fast, med, or full)
2585 *
2586 * See :c:type:`enum surface_update_type <surface_update_type>` for explanation of update types
2587 */
dc_check_update_surfaces_for_stream(struct dc * dc,struct dc_surface_update * updates,int surface_count,struct dc_stream_update * stream_update,const struct dc_stream_status * stream_status)2588 enum surface_update_type dc_check_update_surfaces_for_stream(
2589 struct dc *dc,
2590 struct dc_surface_update *updates,
2591 int surface_count,
2592 struct dc_stream_update *stream_update,
2593 const struct dc_stream_status *stream_status)
2594 {
2595 int i;
2596 enum surface_update_type type;
2597
2598 if (stream_update)
2599 stream_update->stream->update_flags.raw = 0;
2600 for (i = 0; i < surface_count; i++)
2601 updates[i].surface->update_flags.raw = 0;
2602
2603 type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
2604 if (type == UPDATE_TYPE_FULL) {
2605 if (stream_update) {
2606 uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
2607 stream_update->stream->update_flags.raw = 0xFFFFFFFF;
2608 stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
2609 }
2610 for (i = 0; i < surface_count; i++)
2611 updates[i].surface->update_flags.raw = 0xFFFFFFFF;
2612 }
2613
2614 if (type == UPDATE_TYPE_MED)
2615 type = check_boundary_crossing_for_windowed_mpo_with_odm(dc,
2616 updates, surface_count, type);
2617
2618 if (type == UPDATE_TYPE_FAST) {
2619 // If there's an available clock comparator, we use that.
2620 if (dc->clk_mgr->funcs->are_clock_states_equal) {
2621 if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk))
2622 dc->optimized_required = true;
2623 // Else we fallback to mem compare.
2624 } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
2625 dc->optimized_required = true;
2626 }
2627
2628 dc->optimized_required |= dc->wm_optimized_required;
2629 }
2630
2631 return type;
2632 }
2633
stream_get_status(struct dc_state * ctx,struct dc_stream_state * stream)2634 static struct dc_stream_status *stream_get_status(
2635 struct dc_state *ctx,
2636 struct dc_stream_state *stream)
2637 {
2638 uint8_t i;
2639
2640 for (i = 0; i < ctx->stream_count; i++) {
2641 if (stream == ctx->streams[i]) {
2642 return &ctx->stream_status[i];
2643 }
2644 }
2645
2646 return NULL;
2647 }
2648
2649 static const enum surface_update_type update_surface_trace_level = UPDATE_TYPE_FULL;
2650
copy_surface_update_to_plane(struct dc_plane_state * surface,struct dc_surface_update * srf_update)2651 static void copy_surface_update_to_plane(
2652 struct dc_plane_state *surface,
2653 struct dc_surface_update *srf_update)
2654 {
2655 if (srf_update->flip_addr) {
2656 surface->address = srf_update->flip_addr->address;
2657 surface->flip_immediate =
2658 srf_update->flip_addr->flip_immediate;
2659 surface->time.time_elapsed_in_us[surface->time.index] =
2660 srf_update->flip_addr->flip_timestamp_in_us -
2661 surface->time.prev_update_time_in_us;
2662 surface->time.prev_update_time_in_us =
2663 srf_update->flip_addr->flip_timestamp_in_us;
2664 surface->time.index++;
2665 if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
2666 surface->time.index = 0;
2667
2668 surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
2669 }
2670
2671 if (srf_update->scaling_info) {
2672 surface->scaling_quality =
2673 srf_update->scaling_info->scaling_quality;
2674 surface->dst_rect =
2675 srf_update->scaling_info->dst_rect;
2676 surface->src_rect =
2677 srf_update->scaling_info->src_rect;
2678 surface->clip_rect =
2679 srf_update->scaling_info->clip_rect;
2680 }
2681
2682 if (srf_update->plane_info) {
2683 surface->color_space =
2684 srf_update->plane_info->color_space;
2685 surface->format =
2686 srf_update->plane_info->format;
2687 surface->plane_size =
2688 srf_update->plane_info->plane_size;
2689 surface->rotation =
2690 srf_update->plane_info->rotation;
2691 surface->horizontal_mirror =
2692 srf_update->plane_info->horizontal_mirror;
2693 surface->stereo_format =
2694 srf_update->plane_info->stereo_format;
2695 surface->tiling_info =
2696 srf_update->plane_info->tiling_info;
2697 surface->visible =
2698 srf_update->plane_info->visible;
2699 surface->per_pixel_alpha =
2700 srf_update->plane_info->per_pixel_alpha;
2701 surface->global_alpha =
2702 srf_update->plane_info->global_alpha;
2703 surface->global_alpha_value =
2704 srf_update->plane_info->global_alpha_value;
2705 surface->dcc =
2706 srf_update->plane_info->dcc;
2707 surface->layer_index =
2708 srf_update->plane_info->layer_index;
2709 }
2710
2711 if (srf_update->gamma &&
2712 (surface->gamma_correction !=
2713 srf_update->gamma)) {
2714 memcpy(&surface->gamma_correction->entries,
2715 &srf_update->gamma->entries,
2716 sizeof(struct dc_gamma_entries));
2717 surface->gamma_correction->is_identity =
2718 srf_update->gamma->is_identity;
2719 surface->gamma_correction->num_entries =
2720 srf_update->gamma->num_entries;
2721 surface->gamma_correction->type =
2722 srf_update->gamma->type;
2723 }
2724
2725 if (srf_update->in_transfer_func &&
2726 (surface->in_transfer_func !=
2727 srf_update->in_transfer_func)) {
2728 surface->in_transfer_func->sdr_ref_white_level =
2729 srf_update->in_transfer_func->sdr_ref_white_level;
2730 surface->in_transfer_func->tf =
2731 srf_update->in_transfer_func->tf;
2732 surface->in_transfer_func->type =
2733 srf_update->in_transfer_func->type;
2734 memcpy(&surface->in_transfer_func->tf_pts,
2735 &srf_update->in_transfer_func->tf_pts,
2736 sizeof(struct dc_transfer_func_distributed_points));
2737 }
2738
2739 if (srf_update->func_shaper &&
2740 (surface->in_shaper_func !=
2741 srf_update->func_shaper))
2742 memcpy(surface->in_shaper_func, srf_update->func_shaper,
2743 sizeof(*surface->in_shaper_func));
2744
2745 if (srf_update->lut3d_func &&
2746 (surface->lut3d_func !=
2747 srf_update->lut3d_func))
2748 memcpy(surface->lut3d_func, srf_update->lut3d_func,
2749 sizeof(*surface->lut3d_func));
2750
2751 if (srf_update->hdr_mult.value)
2752 surface->hdr_mult =
2753 srf_update->hdr_mult;
2754
2755 if (srf_update->blend_tf &&
2756 (surface->blend_tf !=
2757 srf_update->blend_tf))
2758 memcpy(surface->blend_tf, srf_update->blend_tf,
2759 sizeof(*surface->blend_tf));
2760
2761 if (srf_update->input_csc_color_matrix)
2762 surface->input_csc_color_matrix =
2763 *srf_update->input_csc_color_matrix;
2764
2765 if (srf_update->coeff_reduction_factor)
2766 surface->coeff_reduction_factor =
2767 *srf_update->coeff_reduction_factor;
2768
2769 if (srf_update->gamut_remap_matrix)
2770 surface->gamut_remap_matrix =
2771 *srf_update->gamut_remap_matrix;
2772 }
2773
copy_stream_update_to_stream(struct dc * dc,struct dc_state * context,struct dc_stream_state * stream,struct dc_stream_update * update)2774 static void copy_stream_update_to_stream(struct dc *dc,
2775 struct dc_state *context,
2776 struct dc_stream_state *stream,
2777 struct dc_stream_update *update)
2778 {
2779 struct dc_context *dc_ctx = dc->ctx;
2780
2781 if (update == NULL || stream == NULL)
2782 return;
2783
2784 if (update->src.height && update->src.width)
2785 stream->src = update->src;
2786
2787 if (update->dst.height && update->dst.width)
2788 stream->dst = update->dst;
2789
2790 if (update->out_transfer_func &&
2791 stream->out_transfer_func != update->out_transfer_func) {
2792 stream->out_transfer_func->sdr_ref_white_level =
2793 update->out_transfer_func->sdr_ref_white_level;
2794 stream->out_transfer_func->tf = update->out_transfer_func->tf;
2795 stream->out_transfer_func->type =
2796 update->out_transfer_func->type;
2797 memcpy(&stream->out_transfer_func->tf_pts,
2798 &update->out_transfer_func->tf_pts,
2799 sizeof(struct dc_transfer_func_distributed_points));
2800 }
2801
2802 if (update->hdr_static_metadata)
2803 stream->hdr_static_metadata = *update->hdr_static_metadata;
2804
2805 if (update->abm_level)
2806 stream->abm_level = *update->abm_level;
2807
2808 if (update->periodic_interrupt)
2809 stream->periodic_interrupt = *update->periodic_interrupt;
2810
2811 if (update->gamut_remap)
2812 stream->gamut_remap_matrix = *update->gamut_remap;
2813
2814 /* Note: this being updated after mode set is currently not a use case
2815 * however if it arises OCSC would need to be reprogrammed at the
2816 * minimum
2817 */
2818 if (update->output_color_space)
2819 stream->output_color_space = *update->output_color_space;
2820
2821 if (update->output_csc_transform)
2822 stream->csc_color_matrix = *update->output_csc_transform;
2823
2824 if (update->vrr_infopacket)
2825 stream->vrr_infopacket = *update->vrr_infopacket;
2826
2827 if (update->allow_freesync)
2828 stream->allow_freesync = *update->allow_freesync;
2829
2830 if (update->vrr_active_variable)
2831 stream->vrr_active_variable = *update->vrr_active_variable;
2832
2833 if (update->crtc_timing_adjust)
2834 stream->adjust = *update->crtc_timing_adjust;
2835
2836 if (update->dpms_off)
2837 stream->dpms_off = *update->dpms_off;
2838
2839 if (update->hfvsif_infopacket)
2840 stream->hfvsif_infopacket = *update->hfvsif_infopacket;
2841
2842 if (update->vtem_infopacket)
2843 stream->vtem_infopacket = *update->vtem_infopacket;
2844
2845 if (update->vsc_infopacket)
2846 stream->vsc_infopacket = *update->vsc_infopacket;
2847
2848 if (update->vsp_infopacket)
2849 stream->vsp_infopacket = *update->vsp_infopacket;
2850
2851 if (update->dither_option)
2852 stream->dither_option = *update->dither_option;
2853
2854 if (update->pending_test_pattern)
2855 stream->test_pattern = *update->pending_test_pattern;
2856 /* update current stream with writeback info */
2857 if (update->wb_update) {
2858 int i;
2859
2860 stream->num_wb_info = update->wb_update->num_wb_info;
2861 ASSERT(stream->num_wb_info <= MAX_DWB_PIPES);
2862 for (i = 0; i < stream->num_wb_info; i++)
2863 stream->writeback_info[i] =
2864 update->wb_update->writeback_info[i];
2865 }
2866 if (update->dsc_config) {
2867 struct dc_dsc_config old_dsc_cfg = stream->timing.dsc_cfg;
2868 uint32_t old_dsc_enabled = stream->timing.flags.DSC;
2869 uint32_t enable_dsc = (update->dsc_config->num_slices_h != 0 &&
2870 update->dsc_config->num_slices_v != 0);
2871
2872 /* Use temporarry context for validating new DSC config */
2873 struct dc_state *dsc_validate_context = dc_create_state(dc);
2874
2875 if (dsc_validate_context) {
2876 dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
2877
2878 stream->timing.dsc_cfg = *update->dsc_config;
2879 stream->timing.flags.DSC = enable_dsc;
2880 if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
2881 stream->timing.dsc_cfg = old_dsc_cfg;
2882 stream->timing.flags.DSC = old_dsc_enabled;
2883 update->dsc_config = NULL;
2884 }
2885
2886 dc_release_state(dsc_validate_context);
2887 } else {
2888 DC_ERROR("Failed to allocate new validate context for DSC change\n");
2889 update->dsc_config = NULL;
2890 }
2891 }
2892 }
2893
update_planes_and_stream_state(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type * new_update_type,struct dc_state ** new_context)2894 static bool update_planes_and_stream_state(struct dc *dc,
2895 struct dc_surface_update *srf_updates, int surface_count,
2896 struct dc_stream_state *stream,
2897 struct dc_stream_update *stream_update,
2898 enum surface_update_type *new_update_type,
2899 struct dc_state **new_context)
2900 {
2901 struct dc_state *context;
2902 int i, j;
2903 enum surface_update_type update_type;
2904 const struct dc_stream_status *stream_status;
2905 struct dc_context *dc_ctx = dc->ctx;
2906
2907 stream_status = dc_stream_get_status(stream);
2908
2909 if (!stream_status) {
2910 if (surface_count) /* Only an error condition if surf_count non-zero*/
2911 ASSERT(false);
2912
2913 return false; /* Cannot commit surface to stream that is not committed */
2914 }
2915
2916 context = dc->current_state;
2917
2918 update_type = dc_check_update_surfaces_for_stream(
2919 dc, srf_updates, surface_count, stream_update, stream_status);
2920
2921 /* update current stream with the new updates */
2922 copy_stream_update_to_stream(dc, context, stream, stream_update);
2923
2924 /* do not perform surface update if surface has invalid dimensions
2925 * (all zero) and no scaling_info is provided
2926 */
2927 if (surface_count > 0) {
2928 for (i = 0; i < surface_count; i++) {
2929 if ((srf_updates[i].surface->src_rect.width == 0 ||
2930 srf_updates[i].surface->src_rect.height == 0 ||
2931 srf_updates[i].surface->dst_rect.width == 0 ||
2932 srf_updates[i].surface->dst_rect.height == 0) &&
2933 (!srf_updates[i].scaling_info ||
2934 srf_updates[i].scaling_info->src_rect.width == 0 ||
2935 srf_updates[i].scaling_info->src_rect.height == 0 ||
2936 srf_updates[i].scaling_info->dst_rect.width == 0 ||
2937 srf_updates[i].scaling_info->dst_rect.height == 0)) {
2938 DC_ERROR("Invalid src/dst rects in surface update!\n");
2939 return false;
2940 }
2941 }
2942 }
2943
2944 if (update_type >= update_surface_trace_level)
2945 update_surface_trace(dc, srf_updates, surface_count);
2946
2947 if (update_type >= UPDATE_TYPE_FULL) {
2948 struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
2949
2950 for (i = 0; i < surface_count; i++)
2951 new_planes[i] = srf_updates[i].surface;
2952
2953 /* initialize scratch memory for building context */
2954 context = dc_create_state(dc);
2955 if (context == NULL) {
2956 DC_ERROR("Failed to allocate new validate context!\n");
2957 return false;
2958 }
2959
2960 dc_resource_state_copy_construct(
2961 dc->current_state, context);
2962
2963 /* For each full update, remove all existing phantom pipes first.
2964 * Ensures that we have enough pipes for newly added MPO planes
2965 */
2966 if (dc->res_pool->funcs->remove_phantom_pipes)
2967 dc->res_pool->funcs->remove_phantom_pipes(dc, context);
2968
2969 /*remove old surfaces from context */
2970 if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
2971
2972 BREAK_TO_DEBUGGER();
2973 goto fail;
2974 }
2975
2976 /* add surface to context */
2977 if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
2978
2979 BREAK_TO_DEBUGGER();
2980 goto fail;
2981 }
2982 }
2983
2984 /* save update parameters into surface */
2985 for (i = 0; i < surface_count; i++) {
2986 struct dc_plane_state *surface = srf_updates[i].surface;
2987
2988 copy_surface_update_to_plane(surface, &srf_updates[i]);
2989
2990 if (update_type >= UPDATE_TYPE_MED) {
2991 for (j = 0; j < dc->res_pool->pipe_count; j++) {
2992 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
2993
2994 if (pipe_ctx->plane_state != surface)
2995 continue;
2996
2997 resource_build_scaling_params(pipe_ctx);
2998 }
2999 }
3000 }
3001
3002 if (update_type == UPDATE_TYPE_FULL) {
3003 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3004 BREAK_TO_DEBUGGER();
3005 goto fail;
3006 }
3007 }
3008
3009 *new_context = context;
3010 *new_update_type = update_type;
3011
3012 return true;
3013
3014 fail:
3015 dc_release_state(context);
3016
3017 return false;
3018
3019 }
3020
commit_planes_do_stream_update(struct dc * dc,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3021 static void commit_planes_do_stream_update(struct dc *dc,
3022 struct dc_stream_state *stream,
3023 struct dc_stream_update *stream_update,
3024 enum surface_update_type update_type,
3025 struct dc_state *context)
3026 {
3027 int j;
3028
3029 // Stream updates
3030 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3031 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3032
3033 if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->stream == stream) {
3034
3035 if (stream_update->periodic_interrupt && dc->hwss.setup_periodic_interrupt)
3036 dc->hwss.setup_periodic_interrupt(dc, pipe_ctx);
3037
3038 if ((stream_update->hdr_static_metadata && !stream->use_dynamic_meta) ||
3039 stream_update->vrr_infopacket ||
3040 stream_update->vsc_infopacket ||
3041 stream_update->vsp_infopacket ||
3042 stream_update->hfvsif_infopacket ||
3043 stream_update->vtem_infopacket) {
3044 resource_build_info_frame(pipe_ctx);
3045 dc->hwss.update_info_frame(pipe_ctx);
3046
3047 if (dc_is_dp_signal(pipe_ctx->stream->signal))
3048 dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
3049 }
3050
3051 if (stream_update->hdr_static_metadata &&
3052 stream->use_dynamic_meta &&
3053 dc->hwss.set_dmdata_attributes &&
3054 pipe_ctx->stream->dmdata_address.quad_part != 0)
3055 dc->hwss.set_dmdata_attributes(pipe_ctx);
3056
3057 if (stream_update->gamut_remap)
3058 dc_stream_set_gamut_remap(dc, stream);
3059
3060 if (stream_update->output_csc_transform)
3061 dc_stream_program_csc_matrix(dc, stream);
3062
3063 if (stream_update->dither_option) {
3064 struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
3065 resource_build_bit_depth_reduction_params(pipe_ctx->stream,
3066 &pipe_ctx->stream->bit_depth_params);
3067 pipe_ctx->stream_res.opp->funcs->opp_program_fmt(pipe_ctx->stream_res.opp,
3068 &stream->bit_depth_params,
3069 &stream->clamping);
3070 while (odm_pipe) {
3071 odm_pipe->stream_res.opp->funcs->opp_program_fmt(odm_pipe->stream_res.opp,
3072 &stream->bit_depth_params,
3073 &stream->clamping);
3074 odm_pipe = odm_pipe->next_odm_pipe;
3075 }
3076 }
3077
3078
3079 /* Full fe update*/
3080 if (update_type == UPDATE_TYPE_FAST)
3081 continue;
3082
3083 if (stream_update->dsc_config)
3084 dp_update_dsc_config(pipe_ctx);
3085
3086 if (stream_update->mst_bw_update) {
3087 if (stream_update->mst_bw_update->is_increase)
3088 dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3089 else
3090 dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
3091 }
3092
3093 if (stream_update->pending_test_pattern) {
3094 dc_link_dp_set_test_pattern(stream->link,
3095 stream->test_pattern.type,
3096 stream->test_pattern.color_space,
3097 stream->test_pattern.p_link_settings,
3098 stream->test_pattern.p_custom_pattern,
3099 stream->test_pattern.cust_pattern_size);
3100 }
3101
3102 if (stream_update->dpms_off) {
3103 if (*stream_update->dpms_off) {
3104 core_link_disable_stream(pipe_ctx);
3105 /* for dpms, keep acquired resources*/
3106 if (pipe_ctx->stream_res.audio && !dc->debug.az_endpoint_mute_only)
3107 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
3108
3109 dc->optimized_required = true;
3110
3111 } else {
3112 if (get_seamless_boot_stream_count(context) == 0)
3113 dc->hwss.prepare_bandwidth(dc, dc->current_state);
3114 core_link_enable_stream(dc->current_state, pipe_ctx);
3115 }
3116 }
3117
3118 if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
3119 bool should_program_abm = true;
3120
3121 // if otg funcs defined check if blanked before programming
3122 if (pipe_ctx->stream_res.tg->funcs->is_blanked)
3123 if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg))
3124 should_program_abm = false;
3125
3126 if (should_program_abm) {
3127 if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
3128 dc->hwss.set_abm_immediate_disable(pipe_ctx);
3129 } else {
3130 pipe_ctx->stream_res.abm->funcs->set_abm_level(
3131 pipe_ctx->stream_res.abm, stream->abm_level);
3132 }
3133 }
3134 }
3135 }
3136 }
3137 }
3138
dc_dmub_should_send_dirty_rect_cmd(struct dc * dc,struct dc_stream_state * stream)3139 static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_state *stream)
3140 {
3141 if ((stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1
3142 || stream->link->psr_settings.psr_version == DC_PSR_VERSION_1)
3143 && stream->ctx->dce_version >= DCN_VERSION_3_1)
3144 return true;
3145
3146 return false;
3147 }
3148
dc_dmub_update_dirty_rect(struct dc * dc,int surface_count,struct dc_stream_state * stream,struct dc_surface_update * srf_updates,struct dc_state * context)3149 void dc_dmub_update_dirty_rect(struct dc *dc,
3150 int surface_count,
3151 struct dc_stream_state *stream,
3152 struct dc_surface_update *srf_updates,
3153 struct dc_state *context)
3154 {
3155 union dmub_rb_cmd cmd;
3156 struct dc_context *dc_ctx = dc->ctx;
3157 struct dmub_cmd_update_dirty_rect_data *update_dirty_rect;
3158 unsigned int i, j;
3159 unsigned int panel_inst = 0;
3160
3161 if (!dc_dmub_should_send_dirty_rect_cmd(dc, stream))
3162 return;
3163
3164 if (!dc_get_edp_link_panel_inst(dc, stream->link, &panel_inst))
3165 return;
3166
3167 memset(&cmd, 0x0, sizeof(cmd));
3168 cmd.update_dirty_rect.header.type = DMUB_CMD__UPDATE_DIRTY_RECT;
3169 cmd.update_dirty_rect.header.sub_type = 0;
3170 cmd.update_dirty_rect.header.payload_bytes =
3171 sizeof(cmd.update_dirty_rect) -
3172 sizeof(cmd.update_dirty_rect.header);
3173 update_dirty_rect = &cmd.update_dirty_rect.update_dirty_rect_data;
3174 for (i = 0; i < surface_count; i++) {
3175 struct dc_plane_state *plane_state = srf_updates[i].surface;
3176 const struct dc_flip_addrs *flip_addr = srf_updates[i].flip_addr;
3177
3178 if (!srf_updates[i].surface || !flip_addr)
3179 continue;
3180 /* Do not send in immediate flip mode */
3181 if (srf_updates[i].surface->flip_immediate)
3182 continue;
3183
3184 update_dirty_rect->dirty_rect_count = flip_addr->dirty_rect_count;
3185 memcpy(update_dirty_rect->src_dirty_rects, flip_addr->dirty_rects,
3186 sizeof(flip_addr->dirty_rects));
3187 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3188 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3189
3190 if (pipe_ctx->stream != stream)
3191 continue;
3192 if (pipe_ctx->plane_state != plane_state)
3193 continue;
3194
3195 update_dirty_rect->panel_inst = panel_inst;
3196 update_dirty_rect->pipe_idx = j;
3197 dc_dmub_srv_cmd_queue(dc_ctx->dmub_srv, &cmd);
3198 dc_dmub_srv_cmd_execute(dc_ctx->dmub_srv);
3199 }
3200 }
3201 }
3202
commit_planes_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,enum surface_update_type update_type,struct dc_state * context)3203 static void commit_planes_for_stream(struct dc *dc,
3204 struct dc_surface_update *srf_updates,
3205 int surface_count,
3206 struct dc_stream_state *stream,
3207 struct dc_stream_update *stream_update,
3208 enum surface_update_type update_type,
3209 struct dc_state *context)
3210 {
3211 int i, j;
3212 struct pipe_ctx *top_pipe_to_program = NULL;
3213 bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
3214 bool subvp_prev_use = false;
3215
3216 // Once we apply the new subvp context to hardware it won't be in the
3217 // dc->current_state anymore, so we have to cache it before we apply
3218 // the new SubVP context
3219 subvp_prev_use = false;
3220
3221
3222 dc_z10_restore(dc);
3223
3224 if (get_seamless_boot_stream_count(context) > 0 && surface_count > 0) {
3225 /* Optimize seamless boot flag keeps clocks and watermarks high until
3226 * first flip. After first flip, optimization is required to lower
3227 * bandwidth. Important to note that it is expected UEFI will
3228 * only light up a single display on POST, therefore we only expect
3229 * one stream with seamless boot flag set.
3230 */
3231 if (stream->apply_seamless_boot_optimization) {
3232 stream->apply_seamless_boot_optimization = false;
3233
3234 if (get_seamless_boot_stream_count(context) == 0)
3235 dc->optimized_required = true;
3236 }
3237 }
3238
3239 if (update_type == UPDATE_TYPE_FULL) {
3240 dc_allow_idle_optimizations(dc, false);
3241
3242 if (get_seamless_boot_stream_count(context) == 0)
3243 dc->hwss.prepare_bandwidth(dc, context);
3244
3245 if (dc->debug.enable_double_buffered_dsc_pg_support)
3246 dc->hwss.update_dsc_pg(dc, context, false);
3247
3248 context_clock_trace(dc, context);
3249 }
3250
3251 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3252 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3253
3254 if (!pipe_ctx->top_pipe &&
3255 !pipe_ctx->prev_odm_pipe &&
3256 pipe_ctx->stream &&
3257 pipe_ctx->stream == stream) {
3258 top_pipe_to_program = pipe_ctx;
3259 }
3260 }
3261
3262 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3263 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3264
3265 // Check old context for SubVP
3266 subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
3267 if (subvp_prev_use)
3268 break;
3269 }
3270
3271 if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
3272 struct pipe_ctx *mpcc_pipe;
3273 struct pipe_ctx *odm_pipe;
3274
3275 for (mpcc_pipe = top_pipe_to_program; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
3276 for (odm_pipe = mpcc_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
3277 odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
3278 }
3279
3280 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3281 if (top_pipe_to_program &&
3282 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3283 if (should_use_dmub_lock(stream->link)) {
3284 union dmub_hw_lock_flags hw_locks = { 0 };
3285 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3286
3287 hw_locks.bits.lock_dig = 1;
3288 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3289
3290 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3291 true,
3292 &hw_locks,
3293 &inst_flags);
3294 } else
3295 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
3296 top_pipe_to_program->stream_res.tg);
3297 }
3298
3299 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3300 if (dc->hwss.subvp_pipe_control_lock)
3301 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, NULL, subvp_prev_use);
3302 dc->hwss.interdependent_update_lock(dc, context, true);
3303
3304 } else {
3305 if (dc->hwss.subvp_pipe_control_lock)
3306 dc->hwss.subvp_pipe_control_lock(dc, context, true, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3307 /* Lock the top pipe while updating plane addrs, since freesync requires
3308 * plane addr update event triggers to be synchronized.
3309 * top_pipe_to_program is expected to never be NULL
3310 */
3311 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
3312 }
3313
3314 if (update_type != UPDATE_TYPE_FAST) {
3315 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3316 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3317
3318 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3319 subvp_prev_use) {
3320 // If old context or new context has phantom pipes, apply
3321 // the phantom timings now. We can't change the phantom
3322 // pipe configuration safely without driver acquiring
3323 // the DMCUB lock first.
3324 dc->hwss.apply_ctx_to_hw(dc, context);
3325 break;
3326 }
3327 }
3328 }
3329
3330 dc_dmub_update_dirty_rect(dc, surface_count, stream, srf_updates, context);
3331
3332 if (update_type != UPDATE_TYPE_FAST) {
3333 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3334 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3335
3336 if ((new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) ||
3337 subvp_prev_use) {
3338 // If old context or new context has phantom pipes, apply
3339 // the phantom timings now. We can't change the phantom
3340 // pipe configuration safely without driver acquiring
3341 // the DMCUB lock first.
3342 dc->hwss.apply_ctx_to_hw(dc, context);
3343 break;
3344 }
3345 }
3346 }
3347
3348 // Stream updates
3349 if (stream_update)
3350 commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
3351
3352 if (surface_count == 0) {
3353 /*
3354 * In case of turning off screen, no need to program front end a second time.
3355 * just return after program blank.
3356 */
3357 if (dc->hwss.apply_ctx_for_surface)
3358 dc->hwss.apply_ctx_for_surface(dc, stream, 0, context);
3359 if (dc->hwss.program_front_end_for_ctx)
3360 dc->hwss.program_front_end_for_ctx(dc, context);
3361
3362 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3363 dc->hwss.interdependent_update_lock(dc, context, false);
3364 } else {
3365 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3366 }
3367 dc->hwss.post_unlock_program_front_end(dc, context);
3368
3369 if (update_type != UPDATE_TYPE_FAST)
3370 if (dc->hwss.commit_subvp_config)
3371 dc->hwss.commit_subvp_config(dc, context);
3372
3373 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3374 * move the SubVP lock to after the phantom pipes have been setup
3375 */
3376 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3377 if (dc->hwss.subvp_pipe_control_lock)
3378 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3379 } else {
3380 if (dc->hwss.subvp_pipe_control_lock)
3381 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3382 }
3383
3384 return;
3385 }
3386
3387 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
3388 for (i = 0; i < surface_count; i++) {
3389 struct dc_plane_state *plane_state = srf_updates[i].surface;
3390 /*set logical flag for lock/unlock use*/
3391 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3392 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3393 if (!pipe_ctx->plane_state)
3394 continue;
3395 if (should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3396 continue;
3397 pipe_ctx->plane_state->triplebuffer_flips = false;
3398 if (update_type == UPDATE_TYPE_FAST &&
3399 dc->hwss.program_triplebuffer != NULL &&
3400 !pipe_ctx->plane_state->flip_immediate && dc->debug.enable_tri_buf) {
3401 /*triple buffer for VUpdate only*/
3402 pipe_ctx->plane_state->triplebuffer_flips = true;
3403 }
3404 }
3405 if (update_type == UPDATE_TYPE_FULL) {
3406 /* force vsync flip when reconfiguring pipes to prevent underflow */
3407 plane_state->flip_immediate = false;
3408 }
3409 }
3410 }
3411
3412 // Update Type FULL, Surface updates
3413 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3414 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3415
3416 if (!pipe_ctx->top_pipe &&
3417 !pipe_ctx->prev_odm_pipe &&
3418 should_update_pipe_for_stream(context, pipe_ctx, stream)) {
3419 struct dc_stream_status *stream_status = NULL;
3420
3421 if (!pipe_ctx->plane_state)
3422 continue;
3423
3424 /* Full fe update*/
3425 if (update_type == UPDATE_TYPE_FAST)
3426 continue;
3427
3428 ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
3429
3430 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3431 /*turn off triple buffer for full update*/
3432 dc->hwss.program_triplebuffer(
3433 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3434 }
3435 stream_status =
3436 stream_get_status(context, pipe_ctx->stream);
3437
3438 if (dc->hwss.apply_ctx_for_surface)
3439 dc->hwss.apply_ctx_for_surface(
3440 dc, pipe_ctx->stream, stream_status->plane_count, context);
3441 }
3442 }
3443 if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) {
3444 dc->hwss.program_front_end_for_ctx(dc, context);
3445 if (dc->debug.validate_dml_output) {
3446 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3447 struct pipe_ctx *cur_pipe = &context->res_ctx.pipe_ctx[i];
3448 if (cur_pipe->stream == NULL)
3449 continue;
3450
3451 cur_pipe->plane_res.hubp->funcs->validate_dml_output(
3452 cur_pipe->plane_res.hubp, dc->ctx,
3453 &context->res_ctx.pipe_ctx[i].rq_regs,
3454 &context->res_ctx.pipe_ctx[i].dlg_regs,
3455 &context->res_ctx.pipe_ctx[i].ttu_regs);
3456 }
3457 }
3458 }
3459
3460 // Update Type FAST, Surface updates
3461 if (update_type == UPDATE_TYPE_FAST) {
3462 if (dc->hwss.set_flip_control_gsl)
3463 for (i = 0; i < surface_count; i++) {
3464 struct dc_plane_state *plane_state = srf_updates[i].surface;
3465
3466 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3467 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3468
3469 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3470 continue;
3471
3472 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3473 continue;
3474
3475 // GSL has to be used for flip immediate
3476 dc->hwss.set_flip_control_gsl(pipe_ctx,
3477 pipe_ctx->plane_state->flip_immediate);
3478 }
3479 }
3480
3481 /* Perform requested Updates */
3482 for (i = 0; i < surface_count; i++) {
3483 struct dc_plane_state *plane_state = srf_updates[i].surface;
3484
3485 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3486 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3487
3488 if (!should_update_pipe_for_stream(context, pipe_ctx, stream))
3489 continue;
3490
3491 if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
3492 continue;
3493
3494 /*program triple buffer after lock based on flip type*/
3495 if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
3496 /*only enable triplebuffer for fast_update*/
3497 dc->hwss.program_triplebuffer(
3498 dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
3499 }
3500 if (pipe_ctx->plane_state->update_flags.bits.addr_update)
3501 dc->hwss.update_plane_addr(dc, pipe_ctx);
3502 }
3503 }
3504
3505 }
3506
3507 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3508 dc->hwss.interdependent_update_lock(dc, context, false);
3509 } else {
3510 dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
3511 }
3512
3513 if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
3514 if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
3515 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3516 top_pipe_to_program->stream_res.tg,
3517 CRTC_STATE_VACTIVE);
3518 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3519 top_pipe_to_program->stream_res.tg,
3520 CRTC_STATE_VBLANK);
3521 top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
3522 top_pipe_to_program->stream_res.tg,
3523 CRTC_STATE_VACTIVE);
3524
3525 if (should_use_dmub_lock(stream->link)) {
3526 union dmub_hw_lock_flags hw_locks = { 0 };
3527 struct dmub_hw_lock_inst_flags inst_flags = { 0 };
3528
3529 hw_locks.bits.lock_dig = 1;
3530 inst_flags.dig_inst = top_pipe_to_program->stream_res.tg->inst;
3531
3532 dmub_hw_lock_mgr_cmd(dc->ctx->dmub_srv,
3533 false,
3534 &hw_locks,
3535 &inst_flags);
3536 } else
3537 top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
3538 top_pipe_to_program->stream_res.tg);
3539 }
3540
3541 if (update_type != UPDATE_TYPE_FAST)
3542 dc->hwss.post_unlock_program_front_end(dc, context);
3543 if (update_type != UPDATE_TYPE_FAST)
3544 if (dc->hwss.commit_subvp_config)
3545 dc->hwss.commit_subvp_config(dc, context);
3546
3547 if (update_type != UPDATE_TYPE_FAST)
3548 if (dc->hwss.commit_subvp_config)
3549 dc->hwss.commit_subvp_config(dc, context);
3550
3551 /* Since phantom pipe programming is moved to post_unlock_program_front_end,
3552 * move the SubVP lock to after the phantom pipes have been setup
3553 */
3554 if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
3555 if (dc->hwss.subvp_pipe_control_lock)
3556 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
3557 } else {
3558 if (dc->hwss.subvp_pipe_control_lock)
3559 dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
3560 }
3561
3562 // Fire manual trigger only when bottom plane is flipped
3563 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3564 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
3565
3566 if (!pipe_ctx->plane_state)
3567 continue;
3568
3569 if (pipe_ctx->bottom_pipe || pipe_ctx->next_odm_pipe ||
3570 !pipe_ctx->stream || !should_update_pipe_for_stream(context, pipe_ctx, stream) ||
3571 !pipe_ctx->plane_state->update_flags.bits.addr_update ||
3572 pipe_ctx->plane_state->skip_manual_trigger)
3573 continue;
3574
3575 if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
3576 pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
3577 }
3578 }
3579
3580 /* Determines if the incoming context requires a applying transition state with unnecessary
3581 * pipe splitting and ODM disabled, due to hardware limitations. In a case where
3582 * the OPP associated with an MPCC might change due to plane additions, this function
3583 * returns true.
3584 */
could_mpcc_tree_change_for_active_pipes(struct dc * dc,struct dc_stream_state * stream,int surface_count,bool * is_plane_addition)3585 static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
3586 struct dc_stream_state *stream,
3587 int surface_count,
3588 bool *is_plane_addition)
3589 {
3590
3591 struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
3592 bool force_minimal_pipe_splitting = false;
3593
3594 *is_plane_addition = false;
3595
3596 if (cur_stream_status &&
3597 dc->current_state->stream_count > 0 &&
3598 dc->debug.pipe_split_policy != MPC_SPLIT_AVOID) {
3599 /* determine if minimal transition is required due to MPC*/
3600 if (surface_count > 0) {
3601 if (cur_stream_status->plane_count > surface_count) {
3602 force_minimal_pipe_splitting = true;
3603 } else if (cur_stream_status->plane_count < surface_count) {
3604 force_minimal_pipe_splitting = true;
3605 *is_plane_addition = true;
3606 }
3607 }
3608 }
3609
3610 if (cur_stream_status &&
3611 dc->current_state->stream_count == 1 &&
3612 dc->debug.enable_single_display_2to1_odm_policy) {
3613 /* determine if minimal transition is required due to dynamic ODM*/
3614 if (surface_count > 0) {
3615 if (cur_stream_status->plane_count > 2 && cur_stream_status->plane_count > surface_count) {
3616 force_minimal_pipe_splitting = true;
3617 } else if (surface_count > 2 && cur_stream_status->plane_count < surface_count) {
3618 force_minimal_pipe_splitting = true;
3619 *is_plane_addition = true;
3620 }
3621 }
3622 }
3623
3624 /* For SubVP when adding MPO video we need to add a minimal transition.
3625 */
3626 if (cur_stream_status && stream->mall_stream_config.type == SUBVP_MAIN) {
3627 /* determine if minimal transition is required due to SubVP*/
3628 if (surface_count > 0) {
3629 if (cur_stream_status->plane_count > surface_count) {
3630 force_minimal_pipe_splitting = true;
3631 } else if (cur_stream_status->plane_count < surface_count) {
3632 force_minimal_pipe_splitting = true;
3633 *is_plane_addition = true;
3634 }
3635 }
3636 }
3637
3638 return force_minimal_pipe_splitting;
3639 }
3640
commit_minimal_transition_state(struct dc * dc,struct dc_state * transition_base_context)3641 static bool commit_minimal_transition_state(struct dc *dc,
3642 struct dc_state *transition_base_context)
3643 {
3644 struct dc_state *transition_context = dc_create_state(dc);
3645 enum pipe_split_policy tmp_mpc_policy;
3646 bool temp_dynamic_odm_policy;
3647 bool temp_subvp_policy;
3648 enum dc_status ret = DC_ERROR_UNEXPECTED;
3649 unsigned int i, j;
3650 unsigned int pipe_in_use = 0;
3651
3652 if (!transition_context)
3653 return false;
3654
3655 /* check current pipes in use*/
3656 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3657 struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
3658
3659 if (pipe->plane_state)
3660 pipe_in_use++;
3661 }
3662
3663 /* When the OS add a new surface if we have been used all of pipes with odm combine
3664 * and mpc split feature, it need use commit_minimal_transition_state to transition safely.
3665 * After OS exit MPO, it will back to use odm and mpc split with all of pipes, we need
3666 * call it again. Otherwise return true to skip.
3667 *
3668 * Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
3669 * enter/exit MPO when DCN still have enough resources.
3670 */
3671 if (pipe_in_use != dc->res_pool->pipe_count) {
3672 dc_release_state(transition_context);
3673 return true;
3674 }
3675
3676 if (!dc->config.is_vmin_only_asic) {
3677 tmp_mpc_policy = dc->debug.pipe_split_policy;
3678 dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
3679 }
3680
3681 temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
3682 dc->debug.enable_single_display_2to1_odm_policy = false;
3683
3684 temp_subvp_policy = dc->debug.force_disable_subvp;
3685 dc->debug.force_disable_subvp = true;
3686
3687 dc_resource_state_copy_construct(transition_base_context, transition_context);
3688
3689 //commit minimal state
3690 if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
3691 for (i = 0; i < transition_context->stream_count; i++) {
3692 struct dc_stream_status *stream_status = &transition_context->stream_status[i];
3693
3694 for (j = 0; j < stream_status->plane_count; j++) {
3695 struct dc_plane_state *plane_state = stream_status->plane_states[j];
3696
3697 /* force vsync flip when reconfiguring pipes to prevent underflow
3698 * and corruption
3699 */
3700 plane_state->flip_immediate = false;
3701 }
3702 }
3703
3704 ret = dc_commit_state_no_check(dc, transition_context);
3705 }
3706
3707 /*always release as dc_commit_state_no_check retains in good case*/
3708 dc_release_state(transition_context);
3709
3710 /*restore previous pipe split and odm policy*/
3711 if (!dc->config.is_vmin_only_asic)
3712 dc->debug.pipe_split_policy = tmp_mpc_policy;
3713
3714 dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
3715 dc->debug.force_disable_subvp = temp_subvp_policy;
3716
3717 if (ret != DC_OK) {
3718 /*this should never happen*/
3719 BREAK_TO_DEBUGGER();
3720 return false;
3721 }
3722
3723 /*force full surface update*/
3724 for (i = 0; i < dc->current_state->stream_count; i++) {
3725 for (j = 0; j < dc->current_state->stream_status[i].plane_count; j++) {
3726 dc->current_state->stream_status[i].plane_states[j]->update_flags.raw = 0xFFFFFFFF;
3727 }
3728 }
3729
3730 return true;
3731 }
3732
dc_update_planes_and_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update)3733 bool dc_update_planes_and_stream(struct dc *dc,
3734 struct dc_surface_update *srf_updates, int surface_count,
3735 struct dc_stream_state *stream,
3736 struct dc_stream_update *stream_update)
3737 {
3738 struct dc_state *context;
3739 enum surface_update_type update_type;
3740 int i;
3741
3742 /* In cases where MPO and split or ODM are used transitions can
3743 * cause underflow. Apply stream configuration with minimal pipe
3744 * split first to avoid unsupported transitions for active pipes.
3745 */
3746 bool force_minimal_pipe_splitting;
3747 bool is_plane_addition;
3748
3749 force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
3750 dc,
3751 stream,
3752 surface_count,
3753 &is_plane_addition);
3754
3755 /* on plane addition, minimal state is the current one */
3756 if (force_minimal_pipe_splitting && is_plane_addition &&
3757 !commit_minimal_transition_state(dc, dc->current_state))
3758 return false;
3759
3760 if (!update_planes_and_stream_state(
3761 dc,
3762 srf_updates,
3763 surface_count,
3764 stream,
3765 stream_update,
3766 &update_type,
3767 &context))
3768 return false;
3769
3770 /* on plane removal, minimal state is the new one */
3771 if (force_minimal_pipe_splitting && !is_plane_addition) {
3772 if (!commit_minimal_transition_state(dc, context)) {
3773 dc_release_state(context);
3774 return false;
3775 }
3776
3777 update_type = UPDATE_TYPE_FULL;
3778 }
3779
3780 commit_planes_for_stream(
3781 dc,
3782 srf_updates,
3783 surface_count,
3784 stream,
3785 stream_update,
3786 update_type,
3787 context);
3788
3789 if (dc->current_state != context) {
3790
3791 /* Since memory free requires elevated IRQL, an interrupt
3792 * request is generated by mem free. If this happens
3793 * between freeing and reassigning the context, our vsync
3794 * interrupt will call into dc and cause a memory
3795 * corruption BSOD. Hence, we first reassign the context,
3796 * then free the old context.
3797 */
3798
3799 struct dc_state *old = dc->current_state;
3800
3801 dc->current_state = context;
3802 dc_release_state(old);
3803
3804 // clear any forced full updates
3805 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3806 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3807
3808 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3809 pipe_ctx->plane_state->force_full_update = false;
3810 }
3811 }
3812 return true;
3813 }
3814
dc_commit_updates_for_stream(struct dc * dc,struct dc_surface_update * srf_updates,int surface_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_state * state)3815 void dc_commit_updates_for_stream(struct dc *dc,
3816 struct dc_surface_update *srf_updates,
3817 int surface_count,
3818 struct dc_stream_state *stream,
3819 struct dc_stream_update *stream_update,
3820 struct dc_state *state)
3821 {
3822 const struct dc_stream_status *stream_status;
3823 enum surface_update_type update_type;
3824 struct dc_state *context;
3825 struct dc_context *dc_ctx = dc->ctx;
3826 int i, j;
3827
3828 stream_status = dc_stream_get_status(stream);
3829 context = dc->current_state;
3830
3831 update_type = dc_check_update_surfaces_for_stream(
3832 dc, srf_updates, surface_count, stream_update, stream_status);
3833
3834 if (update_type >= update_surface_trace_level)
3835 update_surface_trace(dc, srf_updates, surface_count);
3836
3837
3838 if (update_type >= UPDATE_TYPE_FULL) {
3839
3840 /* initialize scratch memory for building context */
3841 context = dc_create_state(dc);
3842 if (context == NULL) {
3843 DC_ERROR("Failed to allocate new validate context!\n");
3844 return;
3845 }
3846
3847 dc_resource_state_copy_construct(state, context);
3848
3849 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3850 struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
3851 struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
3852
3853 if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
3854 new_pipe->plane_state->force_full_update = true;
3855 }
3856 } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
3857 /*
3858 * Previous frame finished and HW is ready for optimization.
3859 *
3860 * Only relevant for DCN behavior where we can guarantee the optimization
3861 * is safe to apply - retain the legacy behavior for DCE.
3862 */
3863 dc_post_update_surfaces_to_stream(dc);
3864 }
3865
3866
3867 for (i = 0; i < surface_count; i++) {
3868 struct dc_plane_state *surface = srf_updates[i].surface;
3869
3870 copy_surface_update_to_plane(surface, &srf_updates[i]);
3871
3872 if (update_type >= UPDATE_TYPE_MED) {
3873 for (j = 0; j < dc->res_pool->pipe_count; j++) {
3874 struct pipe_ctx *pipe_ctx =
3875 &context->res_ctx.pipe_ctx[j];
3876
3877 if (pipe_ctx->plane_state != surface)
3878 continue;
3879
3880 resource_build_scaling_params(pipe_ctx);
3881 }
3882 }
3883 }
3884
3885 copy_stream_update_to_stream(dc, context, stream, stream_update);
3886
3887 if (update_type >= UPDATE_TYPE_FULL) {
3888 if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
3889 DC_ERROR("Mode validation failed for stream update!\n");
3890 dc_release_state(context);
3891 return;
3892 }
3893 }
3894
3895 TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
3896
3897 commit_planes_for_stream(
3898 dc,
3899 srf_updates,
3900 surface_count,
3901 stream,
3902 stream_update,
3903 update_type,
3904 context);
3905 /*update current_State*/
3906 if (dc->current_state != context) {
3907
3908 struct dc_state *old = dc->current_state;
3909
3910 dc->current_state = context;
3911 dc_release_state(old);
3912
3913 for (i = 0; i < dc->res_pool->pipe_count; i++) {
3914 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
3915
3916 if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
3917 pipe_ctx->plane_state->force_full_update = false;
3918 }
3919 }
3920
3921 /* Legacy optimization path for DCE. */
3922 if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
3923 dc_post_update_surfaces_to_stream(dc);
3924 TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
3925 }
3926
3927 return;
3928
3929 }
3930
dc_get_current_stream_count(struct dc * dc)3931 uint8_t dc_get_current_stream_count(struct dc *dc)
3932 {
3933 return dc->current_state->stream_count;
3934 }
3935
dc_get_stream_at_index(struct dc * dc,uint8_t i)3936 struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i)
3937 {
3938 if (i < dc->current_state->stream_count)
3939 return dc->current_state->streams[i];
3940 return NULL;
3941 }
3942
dc_interrupt_to_irq_source(struct dc * dc,uint32_t src_id,uint32_t ext_id)3943 enum dc_irq_source dc_interrupt_to_irq_source(
3944 struct dc *dc,
3945 uint32_t src_id,
3946 uint32_t ext_id)
3947 {
3948 return dal_irq_service_to_irq_source(dc->res_pool->irqs, src_id, ext_id);
3949 }
3950
3951 /*
3952 * dc_interrupt_set() - Enable/disable an AMD hw interrupt source
3953 */
dc_interrupt_set(struct dc * dc,enum dc_irq_source src,bool enable)3954 bool dc_interrupt_set(struct dc *dc, enum dc_irq_source src, bool enable)
3955 {
3956
3957 if (dc == NULL)
3958 return false;
3959
3960 return dal_irq_service_set(dc->res_pool->irqs, src, enable);
3961 }
3962
dc_interrupt_ack(struct dc * dc,enum dc_irq_source src)3963 void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src)
3964 {
3965 dal_irq_service_ack(dc->res_pool->irqs, src);
3966 }
3967
dc_power_down_on_boot(struct dc * dc)3968 void dc_power_down_on_boot(struct dc *dc)
3969 {
3970 if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW &&
3971 dc->hwss.power_down_on_boot)
3972 dc->hwss.power_down_on_boot(dc);
3973 }
3974
dc_set_power_state(struct dc * dc,enum dc_acpi_cm_power_state power_state)3975 void dc_set_power_state(
3976 struct dc *dc,
3977 enum dc_acpi_cm_power_state power_state)
3978 {
3979 struct kref refcount;
3980 struct display_mode_lib *dml;
3981
3982 if (!dc->current_state)
3983 return;
3984
3985 switch (power_state) {
3986 case DC_ACPI_CM_POWER_STATE_D0:
3987 dc_resource_state_construct(dc, dc->current_state);
3988
3989 dc_z10_restore(dc);
3990
3991 if (dc->ctx->dmub_srv)
3992 dc_dmub_srv_wait_phy_init(dc->ctx->dmub_srv);
3993
3994 dc->hwss.init_hw(dc);
3995
3996 if (dc->hwss.init_sys_ctx != NULL &&
3997 dc->vm_pa_config.valid) {
3998 dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config);
3999 }
4000
4001 break;
4002 default:
4003 ASSERT(dc->current_state->stream_count == 0);
4004 /* Zero out the current context so that on resume we start with
4005 * clean state, and dc hw programming optimizations will not
4006 * cause any trouble.
4007 */
4008 dml = kzalloc(sizeof(struct display_mode_lib),
4009 GFP_KERNEL);
4010
4011 ASSERT(dml);
4012 if (!dml)
4013 return;
4014
4015 /* Preserve refcount */
4016 refcount = dc->current_state->refcount;
4017 /* Preserve display mode lib */
4018 memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
4019
4020 dc_resource_state_destruct(dc->current_state);
4021 memset(dc->current_state, 0,
4022 sizeof(*dc->current_state));
4023
4024 dc->current_state->refcount = refcount;
4025 dc->current_state->bw_ctx.dml = *dml;
4026
4027 kfree(dml);
4028
4029 break;
4030 }
4031 }
4032
dc_resume(struct dc * dc)4033 void dc_resume(struct dc *dc)
4034 {
4035 uint32_t i;
4036
4037 for (i = 0; i < dc->link_count; i++)
4038 core_link_resume(dc->links[i]);
4039 }
4040
dc_is_dmcu_initialized(struct dc * dc)4041 bool dc_is_dmcu_initialized(struct dc *dc)
4042 {
4043 struct dmcu *dmcu = dc->res_pool->dmcu;
4044
4045 if (dmcu)
4046 return dmcu->funcs->is_dmcu_initialized(dmcu);
4047 return false;
4048 }
4049
dc_is_oem_i2c_device_present(struct dc * dc,size_t slave_address)4050 bool dc_is_oem_i2c_device_present(
4051 struct dc *dc,
4052 size_t slave_address)
4053 {
4054 if (dc->res_pool->oem_device)
4055 return dce_i2c_oem_device_present(
4056 dc->res_pool,
4057 dc->res_pool->oem_device,
4058 slave_address);
4059
4060 return false;
4061 }
4062
dc_submit_i2c(struct dc * dc,uint32_t link_index,struct i2c_command * cmd)4063 bool dc_submit_i2c(
4064 struct dc *dc,
4065 uint32_t link_index,
4066 struct i2c_command *cmd)
4067 {
4068
4069 struct dc_link *link = dc->links[link_index];
4070 struct ddc_service *ddc = link->ddc;
4071 return dce_i2c_submit_command(
4072 dc->res_pool,
4073 ddc->ddc_pin,
4074 cmd);
4075 }
4076
dc_submit_i2c_oem(struct dc * dc,struct i2c_command * cmd)4077 bool dc_submit_i2c_oem(
4078 struct dc *dc,
4079 struct i2c_command *cmd)
4080 {
4081 struct ddc_service *ddc = dc->res_pool->oem_device;
4082 if (ddc)
4083 return dce_i2c_submit_command(
4084 dc->res_pool,
4085 ddc->ddc_pin,
4086 cmd);
4087
4088 return false;
4089 }
4090
link_add_remote_sink_helper(struct dc_link * dc_link,struct dc_sink * sink)4091 static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink)
4092 {
4093 if (dc_link->sink_count >= MAX_SINKS_PER_LINK) {
4094 BREAK_TO_DEBUGGER();
4095 return false;
4096 }
4097
4098 dc_sink_retain(sink);
4099
4100 dc_link->remote_sinks[dc_link->sink_count] = sink;
4101 dc_link->sink_count++;
4102
4103 return true;
4104 }
4105
4106 /*
4107 * dc_link_add_remote_sink() - Create a sink and attach it to an existing link
4108 *
4109 * EDID length is in bytes
4110 */
dc_link_add_remote_sink(struct dc_link * link,const uint8_t * edid,int len,struct dc_sink_init_data * init_data)4111 struct dc_sink *dc_link_add_remote_sink(
4112 struct dc_link *link,
4113 const uint8_t *edid,
4114 int len,
4115 struct dc_sink_init_data *init_data)
4116 {
4117 struct dc_sink *dc_sink;
4118 enum dc_edid_status edid_status;
4119
4120 if (len > DC_MAX_EDID_BUFFER_SIZE) {
4121 dm_error("Max EDID buffer size breached!\n");
4122 return NULL;
4123 }
4124
4125 if (!init_data) {
4126 BREAK_TO_DEBUGGER();
4127 return NULL;
4128 }
4129
4130 if (!init_data->link) {
4131 BREAK_TO_DEBUGGER();
4132 return NULL;
4133 }
4134
4135 dc_sink = dc_sink_create(init_data);
4136
4137 if (!dc_sink)
4138 return NULL;
4139
4140 memmove(dc_sink->dc_edid.raw_edid, edid, len);
4141 dc_sink->dc_edid.length = len;
4142
4143 if (!link_add_remote_sink_helper(
4144 link,
4145 dc_sink))
4146 goto fail_add_sink;
4147
4148 edid_status = dm_helpers_parse_edid_caps(
4149 link,
4150 &dc_sink->dc_edid,
4151 &dc_sink->edid_caps);
4152
4153 /*
4154 * Treat device as no EDID device if EDID
4155 * parsing fails
4156 */
4157 if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) {
4158 dc_sink->dc_edid.length = 0;
4159 dm_error("Bad EDID, status%d!\n", edid_status);
4160 }
4161
4162 return dc_sink;
4163
4164 fail_add_sink:
4165 dc_sink_release(dc_sink);
4166 return NULL;
4167 }
4168
4169 /*
4170 * dc_link_remove_remote_sink() - Remove a remote sink from a dc_link
4171 *
4172 * Note that this just removes the struct dc_sink - it doesn't
4173 * program hardware or alter other members of dc_link
4174 */
dc_link_remove_remote_sink(struct dc_link * link,struct dc_sink * sink)4175 void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
4176 {
4177 int i;
4178
4179 if (!link->sink_count) {
4180 BREAK_TO_DEBUGGER();
4181 return;
4182 }
4183
4184 for (i = 0; i < link->sink_count; i++) {
4185 if (link->remote_sinks[i] == sink) {
4186 dc_sink_release(sink);
4187 link->remote_sinks[i] = NULL;
4188
4189 /* shrink array to remove empty place */
4190 while (i < link->sink_count - 1) {
4191 link->remote_sinks[i] = link->remote_sinks[i+1];
4192 i++;
4193 }
4194 link->remote_sinks[i] = NULL;
4195 link->sink_count--;
4196 return;
4197 }
4198 }
4199 }
4200
get_clock_requirements_for_state(struct dc_state * state,struct AsicStateEx * info)4201 void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
4202 {
4203 info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
4204 info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
4205 info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
4206 info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
4207 info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
4208 info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
4209 info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
4210 info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
4211 info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
4212 }
dc_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)4213 enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping)
4214 {
4215 if (dc->hwss.set_clock)
4216 return dc->hwss.set_clock(dc, clock_type, clk_khz, stepping);
4217 return DC_ERROR_UNEXPECTED;
4218 }
dc_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)4219 void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg)
4220 {
4221 if (dc->hwss.get_clock)
4222 dc->hwss.get_clock(dc, clock_type, clock_cfg);
4223 }
4224
4225 /* enable/disable eDP PSR without specify stream for eDP */
dc_set_psr_allow_active(struct dc * dc,bool enable)4226 bool dc_set_psr_allow_active(struct dc *dc, bool enable)
4227 {
4228 int i;
4229 bool allow_active;
4230
4231 for (i = 0; i < dc->current_state->stream_count ; i++) {
4232 struct dc_link *link;
4233 struct dc_stream_state *stream = dc->current_state->streams[i];
4234
4235 link = stream->link;
4236 if (!link)
4237 continue;
4238
4239 if (link->psr_settings.psr_feature_enabled) {
4240 if (enable && !link->psr_settings.psr_allow_active) {
4241 allow_active = true;
4242 if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
4243 return false;
4244 } else if (!enable && link->psr_settings.psr_allow_active) {
4245 allow_active = false;
4246 if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
4247 return false;
4248 }
4249 }
4250 }
4251
4252 return true;
4253 }
4254
dc_allow_idle_optimizations(struct dc * dc,bool allow)4255 void dc_allow_idle_optimizations(struct dc *dc, bool allow)
4256 {
4257 if (dc->debug.disable_idle_power_optimizations)
4258 return;
4259
4260 if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
4261 if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
4262 return;
4263
4264 if (allow == dc->idle_optimizations_allowed)
4265 return;
4266
4267 if (dc->hwss.apply_idle_power_optimizations && dc->hwss.apply_idle_power_optimizations(dc, allow))
4268 dc->idle_optimizations_allowed = allow;
4269 }
4270
4271 /* set min and max memory clock to lowest and highest DPM level, respectively */
dc_unlock_memory_clock_frequency(struct dc * dc)4272 void dc_unlock_memory_clock_frequency(struct dc *dc)
4273 {
4274 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4275 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, false);
4276
4277 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4278 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4279 }
4280
4281 /* set min memory clock to the min required for current mode, max to maxDPM */
dc_lock_memory_clock_frequency(struct dc * dc)4282 void dc_lock_memory_clock_frequency(struct dc *dc)
4283 {
4284 if (dc->clk_mgr->funcs->get_memclk_states_from_smu)
4285 dc->clk_mgr->funcs->get_memclk_states_from_smu(dc->clk_mgr);
4286
4287 if (dc->clk_mgr->funcs->set_hard_min_memclk)
4288 dc->clk_mgr->funcs->set_hard_min_memclk(dc->clk_mgr, true);
4289
4290 if (dc->clk_mgr->funcs->set_hard_max_memclk)
4291 dc->clk_mgr->funcs->set_hard_max_memclk(dc->clk_mgr);
4292 }
4293
blank_and_force_memclk(struct dc * dc,bool apply,unsigned int memclk_mhz)4294 static void blank_and_force_memclk(struct dc *dc, bool apply, unsigned int memclk_mhz)
4295 {
4296 struct dc_state *context = dc->current_state;
4297 struct hubp *hubp;
4298 struct pipe_ctx *pipe;
4299 int i;
4300
4301 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4302 pipe = &context->res_ctx.pipe_ctx[i];
4303
4304 if (pipe->stream != NULL) {
4305 dc->hwss.disable_pixel_data(dc, pipe, true);
4306
4307 // wait for double buffer
4308 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4309 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
4310 pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VACTIVE);
4311
4312 hubp = pipe->plane_res.hubp;
4313 hubp->funcs->set_blank_regs(hubp, true);
4314 }
4315 }
4316
4317 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, memclk_mhz);
4318 dc->clk_mgr->funcs->set_min_memclk(dc->clk_mgr, memclk_mhz);
4319
4320 for (i = 0; i < dc->res_pool->pipe_count; i++) {
4321 pipe = &context->res_ctx.pipe_ctx[i];
4322
4323 if (pipe->stream != NULL) {
4324 dc->hwss.disable_pixel_data(dc, pipe, false);
4325
4326 hubp = pipe->plane_res.hubp;
4327 hubp->funcs->set_blank_regs(hubp, false);
4328 }
4329 }
4330 }
4331
4332
4333 /**
4334 * dc_enable_dcmode_clk_limit() - lower clocks in dc (battery) mode
4335 * @dc: pointer to dc of the dm calling this
4336 * @enable: True = transition to DC mode, false = transition back to AC mode
4337 *
4338 * Some SoCs define additional clock limits when in DC mode, DM should
4339 * invoke this function when the platform undergoes a power source transition
4340 * so DC can apply/unapply the limit. This interface may be disruptive to
4341 * the onscreen content.
4342 *
4343 * Context: Triggered by OS through DM interface, or manually by escape calls.
4344 * Need to hold a dclock when doing so.
4345 *
4346 * Return: none (void function)
4347 *
4348 */
dc_enable_dcmode_clk_limit(struct dc * dc,bool enable)4349 void dc_enable_dcmode_clk_limit(struct dc *dc, bool enable)
4350 {
4351 uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
4352 unsigned int softMax, maxDPM, funcMin;
4353 bool p_state_change_support;
4354
4355 if (!ASICREV_IS_BEIGE_GOBY_P(hw_internal_rev))
4356 return;
4357
4358 softMax = dc->clk_mgr->bw_params->dc_mode_softmax_memclk;
4359 maxDPM = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz;
4360 funcMin = (dc->clk_mgr->clks.dramclk_khz + 999) / 1000;
4361 p_state_change_support = dc->clk_mgr->clks.p_state_change_support;
4362
4363 if (enable && !dc->clk_mgr->dc_mode_softmax_enabled) {
4364 if (p_state_change_support) {
4365 if (funcMin <= softMax)
4366 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, softMax);
4367 // else: No-Op
4368 } else {
4369 if (funcMin <= softMax)
4370 blank_and_force_memclk(dc, true, softMax);
4371 // else: No-Op
4372 }
4373 } else if (!enable && dc->clk_mgr->dc_mode_softmax_enabled) {
4374 if (p_state_change_support) {
4375 if (funcMin <= softMax)
4376 dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, maxDPM);
4377 // else: No-Op
4378 } else {
4379 if (funcMin <= softMax)
4380 blank_and_force_memclk(dc, true, maxDPM);
4381 // else: No-Op
4382 }
4383 }
4384 dc->clk_mgr->dc_mode_softmax_enabled = enable;
4385 }
dc_is_plane_eligible_for_idle_optimizations(struct dc * dc,struct dc_plane_state * plane,struct dc_cursor_attributes * cursor_attr)4386 bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_state *plane,
4387 struct dc_cursor_attributes *cursor_attr)
4388 {
4389 if (dc->hwss.does_plane_fit_in_mall && dc->hwss.does_plane_fit_in_mall(dc, plane, cursor_attr))
4390 return true;
4391 return false;
4392 }
4393
4394 /* cleanup on driver unload */
dc_hardware_release(struct dc * dc)4395 void dc_hardware_release(struct dc *dc)
4396 {
4397 dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(dc);
4398
4399 if (dc->hwss.hardware_release)
4400 dc->hwss.hardware_release(dc);
4401 }
4402
dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc * dc)4403 void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
4404 {
4405 if (dc->current_state)
4406 dc->current_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching_shut_down = true;
4407 }
4408
4409 /*
4410 *****************************************************************************
4411 * Function: dc_is_dmub_outbox_supported -
4412 *
4413 * @brief
4414 * Checks whether DMUB FW supports outbox notifications, if supported
4415 * DM should register outbox interrupt prior to actually enabling interrupts
4416 * via dc_enable_dmub_outbox
4417 *
4418 * @param
4419 * [in] dc: dc structure
4420 *
4421 * @return
4422 * True if DMUB FW supports outbox notifications, False otherwise
4423 *****************************************************************************
4424 */
dc_is_dmub_outbox_supported(struct dc * dc)4425 bool dc_is_dmub_outbox_supported(struct dc *dc)
4426 {
4427 /* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
4428 if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
4429 dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
4430 !dc->debug.dpia_debug.bits.disable_dpia)
4431 return true;
4432
4433 if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
4434 !dc->debug.dpia_debug.bits.disable_dpia)
4435 return true;
4436
4437 /* dmub aux needs dmub notifications to be enabled */
4438 return dc->debug.enable_dmub_aux_for_legacy_ddc;
4439 }
4440
4441 /*
4442 *****************************************************************************
4443 * Function: dc_enable_dmub_notifications
4444 *
4445 * @brief
4446 * Calls dc_is_dmub_outbox_supported to check if dmub fw supports outbox
4447 * notifications. All DMs shall switch to dc_is_dmub_outbox_supported.
4448 * This API shall be removed after switching.
4449 *
4450 * @param
4451 * [in] dc: dc structure
4452 *
4453 * @return
4454 * True if DMUB FW supports outbox notifications, False otherwise
4455 *****************************************************************************
4456 */
dc_enable_dmub_notifications(struct dc * dc)4457 bool dc_enable_dmub_notifications(struct dc *dc)
4458 {
4459 return dc_is_dmub_outbox_supported(dc);
4460 }
4461
4462 /**
4463 *****************************************************************************
4464 * Function: dc_enable_dmub_outbox
4465 *
4466 * @brief
4467 * Enables DMUB unsolicited notifications to x86 via outbox
4468 *
4469 * @param
4470 * [in] dc: dc structure
4471 *
4472 * @return
4473 * None
4474 *****************************************************************************
4475 */
dc_enable_dmub_outbox(struct dc * dc)4476 void dc_enable_dmub_outbox(struct dc *dc)
4477 {
4478 struct dc_context *dc_ctx = dc->ctx;
4479
4480 dmub_enable_outbox_notification(dc_ctx->dmub_srv);
4481 DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
4482 }
4483
4484 /**
4485 * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
4486 * Sets port index appropriately for legacy DDC
4487 * @dc: dc structure
4488 * @link_index: link index
4489 * @payload: aux payload
4490 *
4491 * Returns: True if successful, False if failure
4492 */
dc_process_dmub_aux_transfer_async(struct dc * dc,uint32_t link_index,struct aux_payload * payload)4493 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
4494 uint32_t link_index,
4495 struct aux_payload *payload)
4496 {
4497 uint8_t action;
4498 union dmub_rb_cmd cmd = {0};
4499 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4500
4501 ASSERT(payload->length <= 16);
4502
4503 cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
4504 cmd.dp_aux_access.header.payload_bytes = 0;
4505 /* For dpia, ddc_pin is set to NULL */
4506 if (!dc->links[link_index]->ddc->ddc_pin)
4507 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
4508 else
4509 cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
4510
4511 cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
4512 cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
4513 cmd.dp_aux_access.aux_control.timeout = 0;
4514 cmd.dp_aux_access.aux_control.dpaux.address = payload->address;
4515 cmd.dp_aux_access.aux_control.dpaux.is_i2c_over_aux = payload->i2c_over_aux;
4516 cmd.dp_aux_access.aux_control.dpaux.length = payload->length;
4517
4518 /* set aux action */
4519 if (payload->i2c_over_aux) {
4520 if (payload->write) {
4521 if (payload->mot)
4522 action = DP_AUX_REQ_ACTION_I2C_WRITE_MOT;
4523 else
4524 action = DP_AUX_REQ_ACTION_I2C_WRITE;
4525 } else {
4526 if (payload->mot)
4527 action = DP_AUX_REQ_ACTION_I2C_READ_MOT;
4528 else
4529 action = DP_AUX_REQ_ACTION_I2C_READ;
4530 }
4531 } else {
4532 if (payload->write)
4533 action = DP_AUX_REQ_ACTION_DPCD_WRITE;
4534 else
4535 action = DP_AUX_REQ_ACTION_DPCD_READ;
4536 }
4537
4538 cmd.dp_aux_access.aux_control.dpaux.action = action;
4539
4540 if (payload->length && payload->write) {
4541 memcpy(cmd.dp_aux_access.aux_control.dpaux.data,
4542 payload->data,
4543 payload->length
4544 );
4545 }
4546
4547 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4548 dc_dmub_srv_cmd_execute(dmub_srv);
4549 dc_dmub_srv_wait_idle(dmub_srv);
4550
4551 return true;
4552 }
4553
get_link_index_from_dpia_port_index(const struct dc * dc,uint8_t dpia_port_index)4554 uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
4555 uint8_t dpia_port_index)
4556 {
4557 uint8_t index, link_index = 0xFF;
4558
4559 for (index = 0; index < dc->link_count; index++) {
4560 /* ddc_hw_inst has dpia port index for dpia links
4561 * and ddc instance for legacy links
4562 */
4563 if (!dc->links[index]->ddc->ddc_pin) {
4564 if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
4565 link_index = index;
4566 break;
4567 }
4568 }
4569 }
4570 ASSERT(link_index != 0xFF);
4571 return link_index;
4572 }
4573
4574 /**
4575 *****************************************************************************
4576 * Function: dc_process_dmub_set_config_async
4577 *
4578 * @brief
4579 * Submits set_config command to dmub via inbox message
4580 *
4581 * @param
4582 * [in] dc: dc structure
4583 * [in] link_index: link index
4584 * [in] payload: aux payload
4585 * [out] notify: set_config immediate reply
4586 *
4587 * @return
4588 * True if successful, False if failure
4589 *****************************************************************************
4590 */
dc_process_dmub_set_config_async(struct dc * dc,uint32_t link_index,struct set_config_cmd_payload * payload,struct dmub_notification * notify)4591 bool dc_process_dmub_set_config_async(struct dc *dc,
4592 uint32_t link_index,
4593 struct set_config_cmd_payload *payload,
4594 struct dmub_notification *notify)
4595 {
4596 union dmub_rb_cmd cmd = {0};
4597 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4598 bool is_cmd_complete = true;
4599
4600 /* prepare SET_CONFIG command */
4601 cmd.set_config_access.header.type = DMUB_CMD__DPIA;
4602 cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
4603
4604 cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
4605 cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
4606 cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
4607
4608 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
4609 /* command is not processed by dmub */
4610 notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
4611 return is_cmd_complete;
4612 }
4613
4614 /* command processed by dmub, if ret_status is 1, it is completed instantly */
4615 if (cmd.set_config_access.header.ret_status == 1)
4616 notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
4617 else
4618 /* cmd pending, will receive notification via outbox */
4619 is_cmd_complete = false;
4620
4621 return is_cmd_complete;
4622 }
4623
4624 /**
4625 *****************************************************************************
4626 * Function: dc_process_dmub_set_mst_slots
4627 *
4628 * @brief
4629 * Submits mst slot allocation command to dmub via inbox message
4630 *
4631 * @param
4632 * [in] dc: dc structure
4633 * [in] link_index: link index
4634 * [in] mst_alloc_slots: mst slots to be allotted
4635 * [out] mst_slots_in_use: mst slots in use returned in failure case
4636 *
4637 * @return
4638 * DC_OK if successful, DC_ERROR if failure
4639 *****************************************************************************
4640 */
dc_process_dmub_set_mst_slots(const struct dc * dc,uint32_t link_index,uint8_t mst_alloc_slots,uint8_t * mst_slots_in_use)4641 enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
4642 uint32_t link_index,
4643 uint8_t mst_alloc_slots,
4644 uint8_t *mst_slots_in_use)
4645 {
4646 union dmub_rb_cmd cmd = {0};
4647 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4648
4649 /* prepare MST_ALLOC_SLOTS command */
4650 cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
4651 cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
4652
4653 cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
4654 cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
4655
4656 if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
4657 /* command is not processed by dmub */
4658 return DC_ERROR_UNEXPECTED;
4659
4660 /* command processed by dmub, if ret_status is 1 */
4661 if (cmd.set_config_access.header.ret_status != 1)
4662 /* command processing error */
4663 return DC_ERROR_UNEXPECTED;
4664
4665 /* command processed and we have a status of 2, mst not enabled in dpia */
4666 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
4667 return DC_FAIL_UNSUPPORTED_1;
4668
4669 /* previously configured mst alloc and used slots did not match */
4670 if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
4671 *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
4672 return DC_NOT_SUPPORTED;
4673 }
4674
4675 return DC_OK;
4676 }
4677
4678 /**
4679 *****************************************************************************
4680 * Function: dc_process_dmub_dpia_hpd_int_enable
4681 *
4682 * @brief
4683 * Submits dpia hpd int enable command to dmub via inbox message
4684 *
4685 * @param
4686 * [in] dc: dc structure
4687 * [in] hpd_int_enable: 1 for hpd int enable, 0 to disable
4688 *
4689 * @return
4690 * None
4691 *****************************************************************************
4692 */
dc_process_dmub_dpia_hpd_int_enable(const struct dc * dc,uint32_t hpd_int_enable)4693 void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
4694 uint32_t hpd_int_enable)
4695 {
4696 union dmub_rb_cmd cmd = {0};
4697 struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
4698
4699 cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
4700 cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
4701
4702 dc_dmub_srv_cmd_queue(dmub_srv, &cmd);
4703 dc_dmub_srv_cmd_execute(dmub_srv);
4704 dc_dmub_srv_wait_idle(dmub_srv);
4705
4706 DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
4707 }
4708
4709 /**
4710 * dc_disable_accelerated_mode - disable accelerated mode
4711 * @dc: dc structure
4712 */
dc_disable_accelerated_mode(struct dc * dc)4713 void dc_disable_accelerated_mode(struct dc *dc)
4714 {
4715 bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
4716 }
4717
4718
4719 /**
4720 *****************************************************************************
4721 * dc_notify_vsync_int_state() - notifies vsync enable/disable state
4722 * @dc: dc structure
4723 * @stream: stream where vsync int state changed
4724 * @enable: whether vsync is enabled or disabled
4725 *
4726 * Called when vsync is enabled/disabled
4727 * Will notify DMUB to start/stop ABM interrupts after steady state is reached
4728 *
4729 *****************************************************************************
4730 */
dc_notify_vsync_int_state(struct dc * dc,struct dc_stream_state * stream,bool enable)4731 void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
4732 {
4733 int i;
4734 int edp_num;
4735 struct pipe_ctx *pipe = NULL;
4736 struct dc_link *link = stream->sink->link;
4737 struct dc_link *edp_links[MAX_NUM_EDP];
4738
4739
4740 if (link->psr_settings.psr_feature_enabled)
4741 return;
4742
4743 /*find primary pipe associated with stream*/
4744 for (i = 0; i < MAX_PIPES; i++) {
4745 pipe = &dc->current_state->res_ctx.pipe_ctx[i];
4746
4747 if (pipe->stream == stream && pipe->stream_res.tg)
4748 break;
4749 }
4750
4751 if (i == MAX_PIPES) {
4752 ASSERT(0);
4753 return;
4754 }
4755
4756 get_edp_links(dc, edp_links, &edp_num);
4757
4758 /* Determine panel inst */
4759 for (i = 0; i < edp_num; i++) {
4760 if (edp_links[i] == link)
4761 break;
4762 }
4763
4764 if (i == edp_num) {
4765 return;
4766 }
4767
4768 if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
4769 pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
4770 }
4771 /*
4772 * dc_extended_blank_supported: Decide whether extended blank is supported
4773 *
4774 * Extended blank is a freesync optimization feature to be enabled in the future.
4775 * During the extra vblank period gained from freesync, we have the ability to enter z9/z10.
4776 *
4777 * @param [in] dc: Current DC state
4778 * @return: Indicate whether extended blank is supported (true or false)
4779 */
dc_extended_blank_supported(struct dc * dc)4780 bool dc_extended_blank_supported(struct dc *dc)
4781 {
4782 return dc->debug.extended_blank_optimization && !dc->debug.disable_z10
4783 && dc->caps.zstate_support && dc->caps.is_apu;
4784 }
4785