1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: AMD
23 *
24 */
25
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "link_enc_cfg.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 #include "dpcd_defs.h"
42 #include "link/protocols/link_dpcd.h"
43 #include "link_service_types.h"
44 #include "link/protocols/link_dp_capability.h"
45 #include "link/protocols/link_ddc.h"
46
47 #include "vid.h"
48 #include "amdgpu.h"
49 #include "amdgpu_display.h"
50 #include "amdgpu_ucode.h"
51 #include "atom.h"
52 #include "amdgpu_dm.h"
53 #include "amdgpu_dm_plane.h"
54 #include "amdgpu_dm_crtc.h"
55 #include "amdgpu_dm_hdcp.h"
56 #include <drm/display/drm_hdcp_helper.h>
57 #include "amdgpu_pm.h"
58 #include "amdgpu_atombios.h"
59
60 #include "amd_shared.h"
61 #include "amdgpu_dm_irq.h"
62 #include "dm_helpers.h"
63 #include "amdgpu_dm_mst_types.h"
64 #if defined(CONFIG_DEBUG_FS)
65 #include "amdgpu_dm_debugfs.h"
66 #endif
67 #include "amdgpu_dm_psr.h"
68
69 #include "ivsrcid/ivsrcid_vislands30.h"
70
71 #include <linux/backlight.h>
72 #include <linux/module.h>
73 #include <linux/moduleparam.h>
74 #include <linux/types.h>
75 #include <linux/pm_runtime.h>
76 #include <linux/pci.h>
77 #include <linux/firmware.h>
78 #include <linux/component.h>
79 #include <linux/dmi.h>
80
81 #include <drm/display/drm_dp_mst_helper.h>
82 #include <drm/display/drm_hdmi_helper.h>
83 #include <drm/drm_atomic.h>
84 #include <drm/drm_atomic_uapi.h>
85 #include <drm/drm_atomic_helper.h>
86 #include <drm/drm_blend.h>
87 #include <drm/drm_fourcc.h>
88 #include <drm/drm_edid.h>
89 #include <drm/drm_vblank.h>
90 #include <drm/drm_audio_component.h>
91 #include <drm/drm_gem_atomic_helper.h>
92 #include <drm/drm_plane_helper.h>
93
94 #include <acpi/video.h>
95
96 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
97
98 #include "dcn/dcn_1_0_offset.h"
99 #include "dcn/dcn_1_0_sh_mask.h"
100 #include "soc15_hw_ip.h"
101 #include "soc15_common.h"
102 #include "vega10_ip_offset.h"
103
104 #include "gc/gc_11_0_0_offset.h"
105 #include "gc/gc_11_0_0_sh_mask.h"
106
107 #include "modules/inc/mod_freesync.h"
108 #include "modules/power/power_helpers.h"
109
110 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
112 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
114 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
116 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
117 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
118 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
119 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
120 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
121 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
122 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
123 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
124 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
125 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
126 #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin"
127 MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB);
128 #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin"
129 MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB);
130 #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin"
131 MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB);
132
133 #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin"
134 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB);
135 #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin"
136 MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB);
137
138 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
139 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
140
141 #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
142 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
143
144 /* Number of bytes in PSP header for firmware. */
145 #define PSP_HEADER_BYTES 0x100
146
147 /* Number of bytes in PSP footer for firmware. */
148 #define PSP_FOOTER_BYTES 0x100
149
150 /**
151 * DOC: overview
152 *
153 * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
154 * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
155 * requests into DC requests, and DC responses into DRM responses.
156 *
157 * The root control structure is &struct amdgpu_display_manager.
158 */
159
160 /* basic init/fini API */
161 static int amdgpu_dm_init(struct amdgpu_device *adev);
162 static void amdgpu_dm_fini(struct amdgpu_device *adev);
163 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
164
get_subconnector_type(struct dc_link * link)165 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
166 {
167 switch (link->dpcd_caps.dongle_type) {
168 case DISPLAY_DONGLE_NONE:
169 return DRM_MODE_SUBCONNECTOR_Native;
170 case DISPLAY_DONGLE_DP_VGA_CONVERTER:
171 return DRM_MODE_SUBCONNECTOR_VGA;
172 case DISPLAY_DONGLE_DP_DVI_CONVERTER:
173 case DISPLAY_DONGLE_DP_DVI_DONGLE:
174 return DRM_MODE_SUBCONNECTOR_DVID;
175 case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
176 case DISPLAY_DONGLE_DP_HDMI_DONGLE:
177 return DRM_MODE_SUBCONNECTOR_HDMIA;
178 case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
179 default:
180 return DRM_MODE_SUBCONNECTOR_Unknown;
181 }
182 }
183
update_subconnector_property(struct amdgpu_dm_connector * aconnector)184 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
185 {
186 struct dc_link *link = aconnector->dc_link;
187 struct drm_connector *connector = &aconnector->base;
188 enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
189
190 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
191 return;
192
193 if (aconnector->dc_sink)
194 subconnector = get_subconnector_type(link);
195
196 drm_object_property_set_value(&connector->base,
197 connector->dev->mode_config.dp_subconnector_property,
198 subconnector);
199 }
200
201 /*
202 * initializes drm_device display related structures, based on the information
203 * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
204 * drm_encoder, drm_mode_config
205 *
206 * Returns 0 on success
207 */
208 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
209 /* removes and deallocates the drm structures, created by the above function */
210 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
211
212 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
213 struct amdgpu_dm_connector *amdgpu_dm_connector,
214 u32 link_index,
215 struct amdgpu_encoder *amdgpu_encoder);
216 static int amdgpu_dm_encoder_init(struct drm_device *dev,
217 struct amdgpu_encoder *aencoder,
218 uint32_t link_index);
219
220 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
221
222 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
223
224 static int amdgpu_dm_atomic_check(struct drm_device *dev,
225 struct drm_atomic_state *state);
226
227 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
228 static void handle_hpd_rx_irq(void *param);
229
230 static bool
231 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
232 struct drm_crtc_state *new_crtc_state);
233 /*
234 * dm_vblank_get_counter
235 *
236 * @brief
237 * Get counter for number of vertical blanks
238 *
239 * @param
240 * struct amdgpu_device *adev - [in] desired amdgpu device
241 * int disp_idx - [in] which CRTC to get the counter from
242 *
243 * @return
244 * Counter for vertical blanks
245 */
dm_vblank_get_counter(struct amdgpu_device * adev,int crtc)246 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
247 {
248 struct amdgpu_crtc *acrtc = NULL;
249
250 if (crtc >= adev->mode_info.num_crtc)
251 return 0;
252
253 acrtc = adev->mode_info.crtcs[crtc];
254
255 if (!acrtc->dm_irq_params.stream) {
256 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
257 crtc);
258 return 0;
259 }
260
261 return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
262 }
263
dm_crtc_get_scanoutpos(struct amdgpu_device * adev,int crtc,u32 * vbl,u32 * position)264 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
265 u32 *vbl, u32 *position)
266 {
267 u32 v_blank_start, v_blank_end, h_position, v_position;
268 struct amdgpu_crtc *acrtc = NULL;
269
270 if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
271 return -EINVAL;
272
273 acrtc = adev->mode_info.crtcs[crtc];
274
275 if (!acrtc->dm_irq_params.stream) {
276 DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
277 crtc);
278 return 0;
279 }
280
281 /*
282 * TODO rework base driver to use values directly.
283 * for now parse it back into reg-format
284 */
285 dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
286 &v_blank_start,
287 &v_blank_end,
288 &h_position,
289 &v_position);
290
291 *position = v_position | (h_position << 16);
292 *vbl = v_blank_start | (v_blank_end << 16);
293
294 return 0;
295 }
296
dm_is_idle(void * handle)297 static bool dm_is_idle(void *handle)
298 {
299 /* XXX todo */
300 return true;
301 }
302
dm_wait_for_idle(void * handle)303 static int dm_wait_for_idle(void *handle)
304 {
305 /* XXX todo */
306 return 0;
307 }
308
dm_check_soft_reset(void * handle)309 static bool dm_check_soft_reset(void *handle)
310 {
311 return false;
312 }
313
dm_soft_reset(void * handle)314 static int dm_soft_reset(void *handle)
315 {
316 /* XXX todo */
317 return 0;
318 }
319
320 static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device * adev,int otg_inst)321 get_crtc_by_otg_inst(struct amdgpu_device *adev,
322 int otg_inst)
323 {
324 struct drm_device *dev = adev_to_drm(adev);
325 struct drm_crtc *crtc;
326 struct amdgpu_crtc *amdgpu_crtc;
327
328 if (WARN_ON(otg_inst == -1))
329 return adev->mode_info.crtcs[0];
330
331 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
332 amdgpu_crtc = to_amdgpu_crtc(crtc);
333
334 if (amdgpu_crtc->otg_inst == otg_inst)
335 return amdgpu_crtc;
336 }
337
338 return NULL;
339 }
340
is_dc_timing_adjust_needed(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)341 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
342 struct dm_crtc_state *new_state)
343 {
344 if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)
345 return true;
346 else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
347 return true;
348 else
349 return false;
350 }
351
reverse_planes_order(struct dc_surface_update * array_of_surface_update,int planes_count)352 static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
353 int planes_count)
354 {
355 int i, j;
356
357 for (i = 0, j = planes_count - 1; i < j; i++, j--)
358 swap(array_of_surface_update[i], array_of_surface_update[j]);
359 }
360
361 /**
362 * update_planes_and_stream_adapter() - Send planes to be updated in DC
363 *
364 * DC has a generic way to update planes and stream via
365 * dc_update_planes_and_stream function; however, DM might need some
366 * adjustments and preparation before calling it. This function is a wrapper
367 * for the dc_update_planes_and_stream that does any required configuration
368 * before passing control to DC.
369 *
370 * @dc: Display Core control structure
371 * @update_type: specify whether it is FULL/MEDIUM/FAST update
372 * @planes_count: planes count to update
373 * @stream: stream state
374 * @stream_update: stream update
375 * @array_of_surface_update: dc surface update pointer
376 *
377 */
update_planes_and_stream_adapter(struct dc * dc,int update_type,int planes_count,struct dc_stream_state * stream,struct dc_stream_update * stream_update,struct dc_surface_update * array_of_surface_update)378 static inline bool update_planes_and_stream_adapter(struct dc *dc,
379 int update_type,
380 int planes_count,
381 struct dc_stream_state *stream,
382 struct dc_stream_update *stream_update,
383 struct dc_surface_update *array_of_surface_update)
384 {
385 reverse_planes_order(array_of_surface_update, planes_count);
386
387 /*
388 * Previous frame finished and HW is ready for optimization.
389 */
390 if (update_type == UPDATE_TYPE_FAST)
391 dc_post_update_surfaces_to_stream(dc);
392
393 return dc_update_planes_and_stream(dc,
394 array_of_surface_update,
395 planes_count,
396 stream,
397 stream_update);
398 }
399
400 /**
401 * dm_pflip_high_irq() - Handle pageflip interrupt
402 * @interrupt_params: ignored
403 *
404 * Handles the pageflip interrupt by notifying all interested parties
405 * that the pageflip has been completed.
406 */
dm_pflip_high_irq(void * interrupt_params)407 static void dm_pflip_high_irq(void *interrupt_params)
408 {
409 struct amdgpu_crtc *amdgpu_crtc;
410 struct common_irq_params *irq_params = interrupt_params;
411 struct amdgpu_device *adev = irq_params->adev;
412 unsigned long flags;
413 struct drm_pending_vblank_event *e;
414 u32 vpos, hpos, v_blank_start, v_blank_end;
415 bool vrr_active;
416
417 amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
418
419 /* IRQ could occur when in initial stage */
420 /* TODO work and BO cleanup */
421 if (amdgpu_crtc == NULL) {
422 DC_LOG_PFLIP("CRTC is null, returning.\n");
423 return;
424 }
425
426 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
427
428 if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
429 DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
430 amdgpu_crtc->pflip_status,
431 AMDGPU_FLIP_SUBMITTED,
432 amdgpu_crtc->crtc_id,
433 amdgpu_crtc);
434 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
435 return;
436 }
437
438 /* page flip completed. */
439 e = amdgpu_crtc->event;
440 amdgpu_crtc->event = NULL;
441
442 WARN_ON(!e);
443
444 vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
445
446 /* Fixed refresh rate, or VRR scanout position outside front-porch? */
447 if (!vrr_active ||
448 !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
449 &v_blank_end, &hpos, &vpos) ||
450 (vpos < v_blank_start)) {
451 /* Update to correct count and vblank timestamp if racing with
452 * vblank irq. This also updates to the correct vblank timestamp
453 * even in VRR mode, as scanout is past the front-porch atm.
454 */
455 drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
456
457 /* Wake up userspace by sending the pageflip event with proper
458 * count and timestamp of vblank of flip completion.
459 */
460 if (e) {
461 drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
462
463 /* Event sent, so done with vblank for this flip */
464 drm_crtc_vblank_put(&amdgpu_crtc->base);
465 }
466 } else if (e) {
467 /* VRR active and inside front-porch: vblank count and
468 * timestamp for pageflip event will only be up to date after
469 * drm_crtc_handle_vblank() has been executed from late vblank
470 * irq handler after start of back-porch (vline 0). We queue the
471 * pageflip event for send-out by drm_crtc_handle_vblank() with
472 * updated timestamp and count, once it runs after us.
473 *
474 * We need to open-code this instead of using the helper
475 * drm_crtc_arm_vblank_event(), as that helper would
476 * call drm_crtc_accurate_vblank_count(), which we must
477 * not call in VRR mode while we are in front-porch!
478 */
479
480 /* sequence will be replaced by real count during send-out. */
481 e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
482 e->pipe = amdgpu_crtc->crtc_id;
483
484 list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
485 e = NULL;
486 }
487
488 /* Keep track of vblank of this flip for flip throttling. We use the
489 * cooked hw counter, as that one incremented at start of this vblank
490 * of pageflip completion, so last_flip_vblank is the forbidden count
491 * for queueing new pageflips if vsync + VRR is enabled.
492 */
493 amdgpu_crtc->dm_irq_params.last_flip_vblank =
494 amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
495
496 amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
497 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
498
499 DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
500 amdgpu_crtc->crtc_id, amdgpu_crtc,
501 vrr_active, (int) !e);
502 }
503
dm_vupdate_high_irq(void * interrupt_params)504 static void dm_vupdate_high_irq(void *interrupt_params)
505 {
506 struct common_irq_params *irq_params = interrupt_params;
507 struct amdgpu_device *adev = irq_params->adev;
508 struct amdgpu_crtc *acrtc;
509 struct drm_device *drm_dev;
510 struct drm_vblank_crtc *vblank;
511 ktime_t frame_duration_ns, previous_timestamp;
512 unsigned long flags;
513 int vrr_active;
514
515 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
516
517 if (acrtc) {
518 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
519 drm_dev = acrtc->base.dev;
520 vblank = &drm_dev->vblank[acrtc->base.index];
521 previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
522 frame_duration_ns = vblank->time - previous_timestamp;
523
524 if (frame_duration_ns > 0) {
525 trace_amdgpu_refresh_rate_track(acrtc->base.index,
526 frame_duration_ns,
527 ktime_divns(NSEC_PER_SEC, frame_duration_ns));
528 atomic64_set(&irq_params->previous_timestamp, vblank->time);
529 }
530
531 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
532 acrtc->crtc_id,
533 vrr_active);
534
535 /* Core vblank handling is done here after end of front-porch in
536 * vrr mode, as vblank timestamping will give valid results
537 * while now done after front-porch. This will also deliver
538 * page-flip completion events that have been queued to us
539 * if a pageflip happened inside front-porch.
540 */
541 if (vrr_active) {
542 amdgpu_dm_crtc_handle_vblank(acrtc);
543
544 /* BTR processing for pre-DCE12 ASICs */
545 if (acrtc->dm_irq_params.stream &&
546 adev->family < AMDGPU_FAMILY_AI) {
547 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
548 mod_freesync_handle_v_update(
549 adev->dm.freesync_module,
550 acrtc->dm_irq_params.stream,
551 &acrtc->dm_irq_params.vrr_params);
552
553 dc_stream_adjust_vmin_vmax(
554 adev->dm.dc,
555 acrtc->dm_irq_params.stream,
556 &acrtc->dm_irq_params.vrr_params.adjust);
557 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
558 }
559 }
560 }
561 }
562
563 /**
564 * dm_crtc_high_irq() - Handles CRTC interrupt
565 * @interrupt_params: used for determining the CRTC instance
566 *
567 * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
568 * event handler.
569 */
dm_crtc_high_irq(void * interrupt_params)570 static void dm_crtc_high_irq(void *interrupt_params)
571 {
572 struct common_irq_params *irq_params = interrupt_params;
573 struct amdgpu_device *adev = irq_params->adev;
574 struct amdgpu_crtc *acrtc;
575 unsigned long flags;
576 int vrr_active;
577
578 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
579 if (!acrtc)
580 return;
581
582 vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
583
584 DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
585 vrr_active, acrtc->dm_irq_params.active_planes);
586
587 /**
588 * Core vblank handling at start of front-porch is only possible
589 * in non-vrr mode, as only there vblank timestamping will give
590 * valid results while done in front-porch. Otherwise defer it
591 * to dm_vupdate_high_irq after end of front-porch.
592 */
593 if (!vrr_active)
594 amdgpu_dm_crtc_handle_vblank(acrtc);
595
596 /**
597 * Following stuff must happen at start of vblank, for crc
598 * computation and below-the-range btr support in vrr mode.
599 */
600 amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
601
602 /* BTR updates need to happen before VUPDATE on Vega and above. */
603 if (adev->family < AMDGPU_FAMILY_AI)
604 return;
605
606 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
607
608 if (acrtc->dm_irq_params.stream &&
609 acrtc->dm_irq_params.vrr_params.supported &&
610 acrtc->dm_irq_params.freesync_config.state ==
611 VRR_STATE_ACTIVE_VARIABLE) {
612 mod_freesync_handle_v_update(adev->dm.freesync_module,
613 acrtc->dm_irq_params.stream,
614 &acrtc->dm_irq_params.vrr_params);
615
616 dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
617 &acrtc->dm_irq_params.vrr_params.adjust);
618 }
619
620 /*
621 * If there aren't any active_planes then DCH HUBP may be clock-gated.
622 * In that case, pageflip completion interrupts won't fire and pageflip
623 * completion events won't get delivered. Prevent this by sending
624 * pending pageflip events from here if a flip is still pending.
625 *
626 * If any planes are enabled, use dm_pflip_high_irq() instead, to
627 * avoid race conditions between flip programming and completion,
628 * which could cause too early flip completion events.
629 */
630 if (adev->family >= AMDGPU_FAMILY_RV &&
631 acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
632 acrtc->dm_irq_params.active_planes == 0) {
633 if (acrtc->event) {
634 drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
635 acrtc->event = NULL;
636 drm_crtc_vblank_put(&acrtc->base);
637 }
638 acrtc->pflip_status = AMDGPU_FLIP_NONE;
639 }
640
641 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
642 }
643
644 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
645 /**
646 * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
647 * DCN generation ASICs
648 * @interrupt_params: interrupt parameters
649 *
650 * Used to set crc window/read out crc value at vertical line 0 position
651 */
dm_dcn_vertical_interrupt0_high_irq(void * interrupt_params)652 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
653 {
654 struct common_irq_params *irq_params = interrupt_params;
655 struct amdgpu_device *adev = irq_params->adev;
656 struct amdgpu_crtc *acrtc;
657
658 acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
659
660 if (!acrtc)
661 return;
662
663 amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
664 }
665 #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */
666
667 /**
668 * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command.
669 * @adev: amdgpu_device pointer
670 * @notify: dmub notification structure
671 *
672 * Dmub AUX or SET_CONFIG command completion processing callback
673 * Copies dmub notification to DM which is to be read by AUX command.
674 * issuing thread and also signals the event to wake up the thread.
675 */
dmub_aux_setconfig_callback(struct amdgpu_device * adev,struct dmub_notification * notify)676 static void dmub_aux_setconfig_callback(struct amdgpu_device *adev,
677 struct dmub_notification *notify)
678 {
679 if (adev->dm.dmub_notify)
680 memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
681 if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
682 complete(&adev->dm.dmub_aux_transfer_done);
683 }
684
685 /**
686 * dmub_hpd_callback - DMUB HPD interrupt processing callback.
687 * @adev: amdgpu_device pointer
688 * @notify: dmub notification structure
689 *
690 * Dmub Hpd interrupt processing callback. Gets displayindex through the
691 * ink index and calls helper to do the processing.
692 */
dmub_hpd_callback(struct amdgpu_device * adev,struct dmub_notification * notify)693 static void dmub_hpd_callback(struct amdgpu_device *adev,
694 struct dmub_notification *notify)
695 {
696 struct amdgpu_dm_connector *aconnector;
697 struct amdgpu_dm_connector *hpd_aconnector = NULL;
698 struct drm_connector *connector;
699 struct drm_connector_list_iter iter;
700 struct dc_link *link;
701 u8 link_index = 0;
702 struct drm_device *dev;
703
704 if (adev == NULL)
705 return;
706
707 if (notify == NULL) {
708 DRM_ERROR("DMUB HPD callback notification was NULL");
709 return;
710 }
711
712 if (notify->link_index > adev->dm.dc->link_count) {
713 DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
714 return;
715 }
716
717 link_index = notify->link_index;
718 link = adev->dm.dc->links[link_index];
719 dev = adev->dm.ddev;
720
721 drm_connector_list_iter_begin(dev, &iter);
722 drm_for_each_connector_iter(connector, &iter) {
723 aconnector = to_amdgpu_dm_connector(connector);
724 if (link && aconnector->dc_link == link) {
725 if (notify->type == DMUB_NOTIFICATION_HPD)
726 DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
727 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
728 DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
729 else
730 DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
731 notify->type, link_index);
732
733 hpd_aconnector = aconnector;
734 break;
735 }
736 }
737 drm_connector_list_iter_end(&iter);
738
739 if (hpd_aconnector) {
740 if (notify->type == DMUB_NOTIFICATION_HPD)
741 handle_hpd_irq_helper(hpd_aconnector);
742 else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
743 handle_hpd_rx_irq(hpd_aconnector);
744 }
745 }
746
747 /**
748 * register_dmub_notify_callback - Sets callback for DMUB notify
749 * @adev: amdgpu_device pointer
750 * @type: Type of dmub notification
751 * @callback: Dmub interrupt callback function
752 * @dmub_int_thread_offload: offload indicator
753 *
754 * API to register a dmub callback handler for a dmub notification
755 * Also sets indicator whether callback processing to be offloaded.
756 * to dmub interrupt handling thread
757 * Return: true if successfully registered, false if there is existing registration
758 */
register_dmub_notify_callback(struct amdgpu_device * adev,enum dmub_notification_type type,dmub_notify_interrupt_callback_t callback,bool dmub_int_thread_offload)759 static bool register_dmub_notify_callback(struct amdgpu_device *adev,
760 enum dmub_notification_type type,
761 dmub_notify_interrupt_callback_t callback,
762 bool dmub_int_thread_offload)
763 {
764 if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
765 adev->dm.dmub_callback[type] = callback;
766 adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
767 } else
768 return false;
769
770 return true;
771 }
772
dm_handle_hpd_work(struct work_struct * work)773 static void dm_handle_hpd_work(struct work_struct *work)
774 {
775 struct dmub_hpd_work *dmub_hpd_wrk;
776
777 dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
778
779 if (!dmub_hpd_wrk->dmub_notify) {
780 DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
781 return;
782 }
783
784 if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
785 dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
786 dmub_hpd_wrk->dmub_notify);
787 }
788
789 kfree(dmub_hpd_wrk->dmub_notify);
790 kfree(dmub_hpd_wrk);
791
792 }
793
794 #define DMUB_TRACE_MAX_READ 64
795 /**
796 * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
797 * @interrupt_params: used for determining the Outbox instance
798 *
799 * Handles the Outbox Interrupt
800 * event handler.
801 */
dm_dmub_outbox1_low_irq(void * interrupt_params)802 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
803 {
804 struct dmub_notification notify;
805 struct common_irq_params *irq_params = interrupt_params;
806 struct amdgpu_device *adev = irq_params->adev;
807 struct amdgpu_display_manager *dm = &adev->dm;
808 struct dmcub_trace_buf_entry entry = { 0 };
809 u32 count = 0;
810 struct dmub_hpd_work *dmub_hpd_wrk;
811 struct dc_link *plink = NULL;
812
813 if (dc_enable_dmub_notifications(adev->dm.dc) &&
814 irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
815
816 do {
817 dc_stat_get_dmub_notification(adev->dm.dc, ¬ify);
818 if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
819 DRM_ERROR("DM: notify type %d invalid!", notify.type);
820 continue;
821 }
822 if (!dm->dmub_callback[notify.type]) {
823 DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type);
824 continue;
825 }
826 if (dm->dmub_thread_offload[notify.type] == true) {
827 dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
828 if (!dmub_hpd_wrk) {
829 DRM_ERROR("Failed to allocate dmub_hpd_wrk");
830 return;
831 }
832 dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification),
833 GFP_ATOMIC);
834 if (!dmub_hpd_wrk->dmub_notify) {
835 kfree(dmub_hpd_wrk);
836 DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
837 return;
838 }
839 INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
840 dmub_hpd_wrk->adev = adev;
841 if (notify.type == DMUB_NOTIFICATION_HPD) {
842 plink = adev->dm.dc->links[notify.link_index];
843 if (plink) {
844 plink->hpd_status =
845 notify.hpd_status == DP_HPD_PLUG;
846 }
847 }
848 queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
849 } else {
850 dm->dmub_callback[notify.type](adev, ¬ify);
851 }
852 } while (notify.pending_notification);
853 }
854
855
856 do {
857 if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
858 trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
859 entry.param0, entry.param1);
860
861 DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
862 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
863 } else
864 break;
865
866 count++;
867
868 } while (count <= DMUB_TRACE_MAX_READ);
869
870 if (count > DMUB_TRACE_MAX_READ)
871 DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
872 }
873
dm_set_clockgating_state(void * handle,enum amd_clockgating_state state)874 static int dm_set_clockgating_state(void *handle,
875 enum amd_clockgating_state state)
876 {
877 return 0;
878 }
879
dm_set_powergating_state(void * handle,enum amd_powergating_state state)880 static int dm_set_powergating_state(void *handle,
881 enum amd_powergating_state state)
882 {
883 return 0;
884 }
885
886 /* Prototypes of private functions */
887 static int dm_early_init(void *handle);
888
889 /* Allocate memory for FBC compressed data */
amdgpu_dm_fbc_init(struct drm_connector * connector)890 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
891 {
892 struct drm_device *dev = connector->dev;
893 struct amdgpu_device *adev = drm_to_adev(dev);
894 struct dm_compressor_info *compressor = &adev->dm.compressor;
895 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
896 struct drm_display_mode *mode;
897 unsigned long max_size = 0;
898
899 if (adev->dm.dc->fbc_compressor == NULL)
900 return;
901
902 if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
903 return;
904
905 if (compressor->bo_ptr)
906 return;
907
908
909 list_for_each_entry(mode, &connector->modes, head) {
910 if (max_size < mode->htotal * mode->vtotal)
911 max_size = mode->htotal * mode->vtotal;
912 }
913
914 if (max_size) {
915 int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
916 AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
917 &compressor->gpu_addr, &compressor->cpu_addr);
918
919 if (r)
920 DRM_ERROR("DM: Failed to initialize FBC\n");
921 else {
922 adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
923 DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
924 }
925
926 }
927
928 }
929
amdgpu_dm_audio_component_get_eld(struct device * kdev,int port,int pipe,bool * enabled,unsigned char * buf,int max_bytes)930 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
931 int pipe, bool *enabled,
932 unsigned char *buf, int max_bytes)
933 {
934 struct drm_device *dev = dev_get_drvdata(kdev);
935 struct amdgpu_device *adev = drm_to_adev(dev);
936 struct drm_connector *connector;
937 struct drm_connector_list_iter conn_iter;
938 struct amdgpu_dm_connector *aconnector;
939 int ret = 0;
940
941 *enabled = false;
942
943 mutex_lock(&adev->dm.audio_lock);
944
945 drm_connector_list_iter_begin(dev, &conn_iter);
946 drm_for_each_connector_iter(connector, &conn_iter) {
947 aconnector = to_amdgpu_dm_connector(connector);
948 if (aconnector->audio_inst != port)
949 continue;
950
951 *enabled = true;
952 ret = drm_eld_size(connector->eld);
953 memcpy(buf, connector->eld, min(max_bytes, ret));
954
955 break;
956 }
957 drm_connector_list_iter_end(&conn_iter);
958
959 mutex_unlock(&adev->dm.audio_lock);
960
961 DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
962
963 return ret;
964 }
965
966 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
967 .get_eld = amdgpu_dm_audio_component_get_eld,
968 };
969
amdgpu_dm_audio_component_bind(struct device * kdev,struct device * hda_kdev,void * data)970 static int amdgpu_dm_audio_component_bind(struct device *kdev,
971 struct device *hda_kdev, void *data)
972 {
973 struct drm_device *dev = dev_get_drvdata(kdev);
974 struct amdgpu_device *adev = drm_to_adev(dev);
975 struct drm_audio_component *acomp = data;
976
977 acomp->ops = &amdgpu_dm_audio_component_ops;
978 acomp->dev = kdev;
979 adev->dm.audio_component = acomp;
980
981 return 0;
982 }
983
amdgpu_dm_audio_component_unbind(struct device * kdev,struct device * hda_kdev,void * data)984 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
985 struct device *hda_kdev, void *data)
986 {
987 struct drm_device *dev = dev_get_drvdata(kdev);
988 struct amdgpu_device *adev = drm_to_adev(dev);
989 struct drm_audio_component *acomp = data;
990
991 acomp->ops = NULL;
992 acomp->dev = NULL;
993 adev->dm.audio_component = NULL;
994 }
995
996 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
997 .bind = amdgpu_dm_audio_component_bind,
998 .unbind = amdgpu_dm_audio_component_unbind,
999 };
1000
amdgpu_dm_audio_init(struct amdgpu_device * adev)1001 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
1002 {
1003 int i, ret;
1004
1005 if (!amdgpu_audio)
1006 return 0;
1007
1008 adev->mode_info.audio.enabled = true;
1009
1010 adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
1011
1012 for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
1013 adev->mode_info.audio.pin[i].channels = -1;
1014 adev->mode_info.audio.pin[i].rate = -1;
1015 adev->mode_info.audio.pin[i].bits_per_sample = -1;
1016 adev->mode_info.audio.pin[i].status_bits = 0;
1017 adev->mode_info.audio.pin[i].category_code = 0;
1018 adev->mode_info.audio.pin[i].connected = false;
1019 adev->mode_info.audio.pin[i].id =
1020 adev->dm.dc->res_pool->audios[i]->inst;
1021 adev->mode_info.audio.pin[i].offset = 0;
1022 }
1023
1024 ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1025 if (ret < 0)
1026 return ret;
1027
1028 adev->dm.audio_registered = true;
1029
1030 return 0;
1031 }
1032
amdgpu_dm_audio_fini(struct amdgpu_device * adev)1033 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
1034 {
1035 if (!amdgpu_audio)
1036 return;
1037
1038 if (!adev->mode_info.audio.enabled)
1039 return;
1040
1041 if (adev->dm.audio_registered) {
1042 component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
1043 adev->dm.audio_registered = false;
1044 }
1045
1046 /* TODO: Disable audio? */
1047
1048 adev->mode_info.audio.enabled = false;
1049 }
1050
amdgpu_dm_audio_eld_notify(struct amdgpu_device * adev,int pin)1051 static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
1052 {
1053 struct drm_audio_component *acomp = adev->dm.audio_component;
1054
1055 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
1056 DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
1057
1058 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
1059 pin, -1);
1060 }
1061 }
1062
dm_dmub_hw_init(struct amdgpu_device * adev)1063 static int dm_dmub_hw_init(struct amdgpu_device *adev)
1064 {
1065 const struct dmcub_firmware_header_v1_0 *hdr;
1066 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1067 struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
1068 const struct firmware *dmub_fw = adev->dm.dmub_fw;
1069 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
1070 struct abm *abm = adev->dm.dc->res_pool->abm;
1071 struct dmub_srv_hw_params hw_params;
1072 enum dmub_status status;
1073 const unsigned char *fw_inst_const, *fw_bss_data;
1074 u32 i, fw_inst_const_size, fw_bss_data_size;
1075 bool has_hw_support;
1076
1077 if (!dmub_srv)
1078 /* DMUB isn't supported on the ASIC. */
1079 return 0;
1080
1081 if (!fb_info) {
1082 DRM_ERROR("No framebuffer info for DMUB service.\n");
1083 return -EINVAL;
1084 }
1085
1086 if (!dmub_fw) {
1087 /* Firmware required for DMUB support. */
1088 DRM_ERROR("No firmware provided for DMUB.\n");
1089 return -EINVAL;
1090 }
1091
1092 status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1093 if (status != DMUB_STATUS_OK) {
1094 DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1095 return -EINVAL;
1096 }
1097
1098 if (!has_hw_support) {
1099 DRM_INFO("DMUB unsupported on ASIC\n");
1100 return 0;
1101 }
1102
1103 /* Reset DMCUB if it was previously running - before we overwrite its memory. */
1104 status = dmub_srv_hw_reset(dmub_srv);
1105 if (status != DMUB_STATUS_OK)
1106 DRM_WARN("Error resetting DMUB HW: %d\n", status);
1107
1108 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1109
1110 fw_inst_const = dmub_fw->data +
1111 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1112 PSP_HEADER_BYTES;
1113
1114 fw_bss_data = dmub_fw->data +
1115 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1116 le32_to_cpu(hdr->inst_const_bytes);
1117
1118 /* Copy firmware and bios info into FB memory. */
1119 fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1120 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1121
1122 fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1123
1124 /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1125 * amdgpu_ucode_init_single_fw will load dmub firmware
1126 * fw_inst_const part to cw0; otherwise, the firmware back door load
1127 * will be done by dm_dmub_hw_init
1128 */
1129 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1130 memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1131 fw_inst_const_size);
1132 }
1133
1134 if (fw_bss_data_size)
1135 memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1136 fw_bss_data, fw_bss_data_size);
1137
1138 /* Copy firmware bios info into FB memory. */
1139 memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1140 adev->bios_size);
1141
1142 /* Reset regions that need to be reset. */
1143 memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1144 fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1145
1146 memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1147 fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1148
1149 memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1150 fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1151
1152 /* Initialize hardware. */
1153 memset(&hw_params, 0, sizeof(hw_params));
1154 hw_params.fb_base = adev->gmc.fb_start;
1155 hw_params.fb_offset = adev->vm_manager.vram_base_offset;
1156
1157 /* backdoor load firmware and trigger dmub running */
1158 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1159 hw_params.load_inst_const = true;
1160
1161 if (dmcu)
1162 hw_params.psp_version = dmcu->psp_version;
1163
1164 for (i = 0; i < fb_info->num_fb; ++i)
1165 hw_params.fb[i] = &fb_info->fb[i];
1166
1167 switch (adev->ip_versions[DCE_HWIP][0]) {
1168 case IP_VERSION(3, 1, 3):
1169 case IP_VERSION(3, 1, 4):
1170 hw_params.dpia_supported = true;
1171 hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
1172 break;
1173 default:
1174 break;
1175 }
1176
1177 status = dmub_srv_hw_init(dmub_srv, &hw_params);
1178 if (status != DMUB_STATUS_OK) {
1179 DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1180 return -EINVAL;
1181 }
1182
1183 /* Wait for firmware load to finish. */
1184 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1185 if (status != DMUB_STATUS_OK)
1186 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1187
1188 /* Init DMCU and ABM if available. */
1189 if (dmcu && abm) {
1190 dmcu->funcs->dmcu_init(dmcu);
1191 abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1192 }
1193
1194 if (!adev->dm.dc->ctx->dmub_srv)
1195 adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1196 if (!adev->dm.dc->ctx->dmub_srv) {
1197 DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1198 return -ENOMEM;
1199 }
1200
1201 DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1202 adev->dm.dmcub_fw_version);
1203
1204 return 0;
1205 }
1206
dm_dmub_hw_resume(struct amdgpu_device * adev)1207 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1208 {
1209 struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1210 enum dmub_status status;
1211 bool init;
1212
1213 if (!dmub_srv) {
1214 /* DMUB isn't supported on the ASIC. */
1215 return;
1216 }
1217
1218 status = dmub_srv_is_hw_init(dmub_srv, &init);
1219 if (status != DMUB_STATUS_OK)
1220 DRM_WARN("DMUB hardware init check failed: %d\n", status);
1221
1222 if (status == DMUB_STATUS_OK && init) {
1223 /* Wait for firmware load to finish. */
1224 status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1225 if (status != DMUB_STATUS_OK)
1226 DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1227 } else {
1228 /* Perform the full hardware initialization. */
1229 dm_dmub_hw_init(adev);
1230 }
1231 }
1232
mmhub_read_system_context(struct amdgpu_device * adev,struct dc_phy_addr_space_config * pa_config)1233 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1234 {
1235 u64 pt_base;
1236 u32 logical_addr_low;
1237 u32 logical_addr_high;
1238 u32 agp_base, agp_bot, agp_top;
1239 PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1240
1241 memset(pa_config, 0, sizeof(*pa_config));
1242
1243 agp_base = 0;
1244 agp_bot = adev->gmc.agp_start >> 24;
1245 agp_top = adev->gmc.agp_end >> 24;
1246
1247 /* AGP aperture is disabled */
1248 if (agp_bot == agp_top) {
1249 logical_addr_low = adev->gmc.fb_start >> 18;
1250 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
1251 AMD_APU_IS_RENOIR |
1252 AMD_APU_IS_GREEN_SARDINE))
1253 /*
1254 * Raven2 has a HW issue that it is unable to use the vram which
1255 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1256 * workaround that increase system aperture high address (add 1)
1257 * to get rid of the VM fault and hardware hang.
1258 */
1259 logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
1260 else
1261 logical_addr_high = adev->gmc.fb_end >> 18;
1262 } else {
1263 logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1264 if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
1265 AMD_APU_IS_RENOIR |
1266 AMD_APU_IS_GREEN_SARDINE))
1267 /*
1268 * Raven2 has a HW issue that it is unable to use the vram which
1269 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1270 * workaround that increase system aperture high address (add 1)
1271 * to get rid of the VM fault and hardware hang.
1272 */
1273 logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1274 else
1275 logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1276 }
1277
1278 pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1279
1280 page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
1281 AMDGPU_GPU_PAGE_SHIFT);
1282 page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
1283 AMDGPU_GPU_PAGE_SHIFT);
1284 page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
1285 AMDGPU_GPU_PAGE_SHIFT);
1286 page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
1287 AMDGPU_GPU_PAGE_SHIFT);
1288 page_table_base.high_part = upper_32_bits(pt_base);
1289 page_table_base.low_part = lower_32_bits(pt_base);
1290
1291 pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1292 pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1293
1294 pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24;
1295 pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1296 pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1297
1298 pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1299 pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset;
1300 pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1301
1302 pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1303 pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1304 pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1305
1306 pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
1307
1308 }
1309
force_connector_state(struct amdgpu_dm_connector * aconnector,enum drm_connector_force force_state)1310 static void force_connector_state(
1311 struct amdgpu_dm_connector *aconnector,
1312 enum drm_connector_force force_state)
1313 {
1314 struct drm_connector *connector = &aconnector->base;
1315
1316 mutex_lock(&connector->dev->mode_config.mutex);
1317 aconnector->base.force = force_state;
1318 mutex_unlock(&connector->dev->mode_config.mutex);
1319
1320 mutex_lock(&aconnector->hpd_lock);
1321 drm_kms_helper_connector_hotplug_event(connector);
1322 mutex_unlock(&aconnector->hpd_lock);
1323 }
1324
dm_handle_hpd_rx_offload_work(struct work_struct * work)1325 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1326 {
1327 struct hpd_rx_irq_offload_work *offload_work;
1328 struct amdgpu_dm_connector *aconnector;
1329 struct dc_link *dc_link;
1330 struct amdgpu_device *adev;
1331 enum dc_connection_type new_connection_type = dc_connection_none;
1332 unsigned long flags;
1333 union test_response test_response;
1334
1335 memset(&test_response, 0, sizeof(test_response));
1336
1337 offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1338 aconnector = offload_work->offload_wq->aconnector;
1339
1340 if (!aconnector) {
1341 DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1342 goto skip;
1343 }
1344
1345 adev = drm_to_adev(aconnector->base.dev);
1346 dc_link = aconnector->dc_link;
1347
1348 mutex_lock(&aconnector->hpd_lock);
1349 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
1350 DRM_ERROR("KMS: Failed to detect connector\n");
1351 mutex_unlock(&aconnector->hpd_lock);
1352
1353 if (new_connection_type == dc_connection_none)
1354 goto skip;
1355
1356 if (amdgpu_in_reset(adev))
1357 goto skip;
1358
1359 if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
1360 offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
1361 dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT);
1362 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1363 offload_work->offload_wq->is_handling_mst_msg_rdy_event = false;
1364 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1365 goto skip;
1366 }
1367
1368 mutex_lock(&adev->dm.dc_lock);
1369 if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
1370 dc_link_dp_handle_automated_test(dc_link);
1371
1372 if (aconnector->timing_changed) {
1373 /* force connector disconnect and reconnect */
1374 force_connector_state(aconnector, DRM_FORCE_OFF);
1375 msleep(100);
1376 force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
1377 }
1378
1379 test_response.bits.ACK = 1;
1380
1381 core_link_write_dpcd(
1382 dc_link,
1383 DP_TEST_RESPONSE,
1384 &test_response.raw,
1385 sizeof(test_response));
1386 } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1387 dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
1388 dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1389 /* offload_work->data is from handle_hpd_rx_irq->
1390 * schedule_hpd_rx_offload_work.this is defer handle
1391 * for hpd short pulse. upon here, link status may be
1392 * changed, need get latest link status from dpcd
1393 * registers. if link status is good, skip run link
1394 * training again.
1395 */
1396 union hpd_irq_data irq_data;
1397
1398 memset(&irq_data, 0, sizeof(irq_data));
1399
1400 /* before dc_link_dp_handle_link_loss, allow new link lost handle
1401 * request be added to work queue if link lost at end of dc_link_
1402 * dp_handle_link_loss
1403 */
1404 spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1405 offload_work->offload_wq->is_handling_link_loss = false;
1406 spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1407
1408 if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
1409 dc_link_check_link_loss_status(dc_link, &irq_data))
1410 dc_link_dp_handle_link_loss(dc_link);
1411 }
1412 mutex_unlock(&adev->dm.dc_lock);
1413
1414 skip:
1415 kfree(offload_work);
1416
1417 }
1418
hpd_rx_irq_create_workqueue(struct dc * dc)1419 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1420 {
1421 int max_caps = dc->caps.max_links;
1422 int i = 0;
1423 struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1424
1425 hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1426
1427 if (!hpd_rx_offload_wq)
1428 return NULL;
1429
1430
1431 for (i = 0; i < max_caps; i++) {
1432 hpd_rx_offload_wq[i].wq =
1433 create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1434
1435 if (hpd_rx_offload_wq[i].wq == NULL) {
1436 DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1437 goto out_err;
1438 }
1439
1440 spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
1441 }
1442
1443 return hpd_rx_offload_wq;
1444
1445 out_err:
1446 for (i = 0; i < max_caps; i++) {
1447 if (hpd_rx_offload_wq[i].wq)
1448 destroy_workqueue(hpd_rx_offload_wq[i].wq);
1449 }
1450 kfree(hpd_rx_offload_wq);
1451 return NULL;
1452 }
1453
1454 struct amdgpu_stutter_quirk {
1455 u16 chip_vendor;
1456 u16 chip_device;
1457 u16 subsys_vendor;
1458 u16 subsys_device;
1459 u8 revision;
1460 };
1461
1462 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1463 /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1464 { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1465 { 0, 0, 0, 0, 0 },
1466 };
1467
dm_should_disable_stutter(struct pci_dev * pdev)1468 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1469 {
1470 const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1471
1472 while (p && p->chip_device != 0) {
1473 if (pdev->vendor == p->chip_vendor &&
1474 pdev->device == p->chip_device &&
1475 pdev->subsystem_vendor == p->subsys_vendor &&
1476 pdev->subsystem_device == p->subsys_device &&
1477 pdev->revision == p->revision) {
1478 return true;
1479 }
1480 ++p;
1481 }
1482 return false;
1483 }
1484
1485 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1486 {
1487 .matches = {
1488 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1489 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1490 },
1491 },
1492 {
1493 .matches = {
1494 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1495 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1496 },
1497 },
1498 {
1499 .matches = {
1500 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1501 DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1502 },
1503 },
1504 {
1505 .matches = {
1506 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1507 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
1508 },
1509 },
1510 {
1511 .matches = {
1512 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1513 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
1514 },
1515 },
1516 {
1517 .matches = {
1518 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1519 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
1520 },
1521 },
1522 {
1523 .matches = {
1524 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1525 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
1526 },
1527 },
1528 {
1529 .matches = {
1530 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1531 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
1532 },
1533 },
1534 {
1535 .matches = {
1536 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1537 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
1538 },
1539 },
1540 {}
1541 /* TODO: refactor this from a fixed table to a dynamic option */
1542 };
1543
retrieve_dmi_info(struct amdgpu_display_manager * dm)1544 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1545 {
1546 const struct dmi_system_id *dmi_id;
1547
1548 dm->aux_hpd_discon_quirk = false;
1549
1550 dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1551 if (dmi_id) {
1552 dm->aux_hpd_discon_quirk = true;
1553 DRM_INFO("aux_hpd_discon_quirk attached\n");
1554 }
1555 }
1556
amdgpu_dm_init(struct amdgpu_device * adev)1557 static int amdgpu_dm_init(struct amdgpu_device *adev)
1558 {
1559 struct dc_init_data init_data;
1560 struct dc_callback_init init_params;
1561 int r;
1562
1563 adev->dm.ddev = adev_to_drm(adev);
1564 adev->dm.adev = adev;
1565
1566 /* Zero all the fields */
1567 memset(&init_data, 0, sizeof(init_data));
1568 memset(&init_params, 0, sizeof(init_params));
1569
1570 mutex_init(&adev->dm.dpia_aux_lock);
1571 mutex_init(&adev->dm.dc_lock);
1572 mutex_init(&adev->dm.audio_lock);
1573
1574 if (amdgpu_dm_irq_init(adev)) {
1575 DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1576 goto error;
1577 }
1578
1579 init_data.asic_id.chip_family = adev->family;
1580
1581 init_data.asic_id.pci_revision_id = adev->pdev->revision;
1582 init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1583 init_data.asic_id.chip_id = adev->pdev->device;
1584
1585 init_data.asic_id.vram_width = adev->gmc.vram_width;
1586 /* TODO: initialize init_data.asic_id.vram_type here!!!! */
1587 init_data.asic_id.atombios_base_address =
1588 adev->mode_info.atom_context->bios;
1589
1590 init_data.driver = adev;
1591
1592 adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1593
1594 if (!adev->dm.cgs_device) {
1595 DRM_ERROR("amdgpu: failed to create cgs device.\n");
1596 goto error;
1597 }
1598
1599 init_data.cgs_device = adev->dm.cgs_device;
1600
1601 init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1602
1603 switch (adev->ip_versions[DCE_HWIP][0]) {
1604 case IP_VERSION(2, 1, 0):
1605 switch (adev->dm.dmcub_fw_version) {
1606 case 0: /* development */
1607 case 0x1: /* linux-firmware.git hash 6d9f399 */
1608 case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1609 init_data.flags.disable_dmcu = false;
1610 break;
1611 default:
1612 init_data.flags.disable_dmcu = true;
1613 }
1614 break;
1615 case IP_VERSION(2, 0, 3):
1616 init_data.flags.disable_dmcu = true;
1617 break;
1618 default:
1619 break;
1620 }
1621
1622 switch (adev->asic_type) {
1623 case CHIP_CARRIZO:
1624 case CHIP_STONEY:
1625 init_data.flags.gpu_vm_support = true;
1626 break;
1627 default:
1628 switch (adev->ip_versions[DCE_HWIP][0]) {
1629 case IP_VERSION(1, 0, 0):
1630 case IP_VERSION(1, 0, 1):
1631 /* enable S/G on PCO and RV2 */
1632 if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
1633 (adev->apu_flags & AMD_APU_IS_PICASSO))
1634 init_data.flags.gpu_vm_support = true;
1635 break;
1636 case IP_VERSION(2, 1, 0):
1637 case IP_VERSION(3, 0, 1):
1638 case IP_VERSION(3, 1, 2):
1639 case IP_VERSION(3, 1, 3):
1640 case IP_VERSION(3, 1, 4):
1641 case IP_VERSION(3, 1, 5):
1642 case IP_VERSION(3, 1, 6):
1643 init_data.flags.gpu_vm_support = true;
1644 break;
1645 default:
1646 break;
1647 }
1648 break;
1649 }
1650 if (init_data.flags.gpu_vm_support &&
1651 (amdgpu_sg_display == 0))
1652 init_data.flags.gpu_vm_support = false;
1653
1654 if (init_data.flags.gpu_vm_support)
1655 adev->mode_info.gpu_vm_support = true;
1656
1657 if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1658 init_data.flags.fbc_support = true;
1659
1660 if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1661 init_data.flags.multi_mon_pp_mclk_switch = true;
1662
1663 if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1664 init_data.flags.disable_fractional_pwm = true;
1665
1666 if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1667 init_data.flags.edp_no_power_sequencing = true;
1668
1669 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A)
1670 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true;
1671 if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
1672 init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
1673
1674 init_data.flags.seamless_boot_edp_requested = false;
1675
1676 if (check_seamless_boot_capability(adev)) {
1677 init_data.flags.seamless_boot_edp_requested = true;
1678 init_data.flags.allow_seamless_boot_optimization = true;
1679 DRM_INFO("Seamless boot condition check passed\n");
1680 }
1681
1682 init_data.flags.enable_mipi_converter_optimization = true;
1683
1684 init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
1685 init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
1686
1687 INIT_LIST_HEAD(&adev->dm.da_list);
1688
1689 retrieve_dmi_info(&adev->dm);
1690
1691 /* Display Core create. */
1692 adev->dm.dc = dc_create(&init_data);
1693
1694 if (adev->dm.dc) {
1695 DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
1696 dce_version_to_string(adev->dm.dc->ctx->dce_version));
1697 } else {
1698 DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1699 goto error;
1700 }
1701
1702 if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1703 adev->dm.dc->debug.force_single_disp_pipe_split = false;
1704 adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1705 }
1706
1707 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1708 adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1709 if (dm_should_disable_stutter(adev->pdev))
1710 adev->dm.dc->debug.disable_stutter = true;
1711
1712 if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1713 adev->dm.dc->debug.disable_stutter = true;
1714
1715 if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1716 adev->dm.dc->debug.disable_dsc = true;
1717
1718 if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1719 adev->dm.dc->debug.disable_clock_gate = true;
1720
1721 if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
1722 adev->dm.dc->debug.force_subvp_mclk_switch = true;
1723
1724 adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
1725
1726 /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
1727 adev->dm.dc->debug.ignore_cable_id = true;
1728
1729 /* TODO: There is a new drm mst change where the freedom of
1730 * vc_next_start_slot update is revoked/moved into drm, instead of in
1731 * driver. This forces us to make sure to get vc_next_start_slot updated
1732 * in drm function each time without considering if mst_state is active
1733 * or not. Otherwise, next time hotplug will give wrong start_slot
1734 * number. We are implementing a temporary solution to even notify drm
1735 * mst deallocation when link is no longer of MST type when uncommitting
1736 * the stream so we will have more time to work on a proper solution.
1737 * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
1738 * should notify drm to do a complete "reset" of its states and stop
1739 * calling further drm mst functions when link is no longer of an MST
1740 * type. This could happen when we unplug an MST hubs/displays. When
1741 * uncommit stream comes later after unplug, we should just reset
1742 * hardware states only.
1743 */
1744 adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
1745
1746 if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
1747 DRM_INFO("DP-HDMI FRL PCON supported\n");
1748
1749 r = dm_dmub_hw_init(adev);
1750 if (r) {
1751 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1752 goto error;
1753 }
1754
1755 dc_hardware_init(adev->dm.dc);
1756
1757 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1758 if (!adev->dm.hpd_rx_offload_wq) {
1759 DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1760 goto error;
1761 }
1762
1763 if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1764 struct dc_phy_addr_space_config pa_config;
1765
1766 mmhub_read_system_context(adev, &pa_config);
1767
1768 // Call the DC init_memory func
1769 dc_setup_system_context(adev->dm.dc, &pa_config);
1770 }
1771
1772 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1773 if (!adev->dm.freesync_module) {
1774 DRM_ERROR(
1775 "amdgpu: failed to initialize freesync_module.\n");
1776 } else
1777 DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1778 adev->dm.freesync_module);
1779
1780 amdgpu_dm_init_color_mod();
1781
1782 if (adev->dm.dc->caps.max_links > 0) {
1783 adev->dm.vblank_control_workqueue =
1784 create_singlethread_workqueue("dm_vblank_control_workqueue");
1785 if (!adev->dm.vblank_control_workqueue)
1786 DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1787 }
1788
1789 if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
1790 adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1791
1792 if (!adev->dm.hdcp_workqueue)
1793 DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1794 else
1795 DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1796
1797 dc_init_callbacks(adev->dm.dc, &init_params);
1798 }
1799 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
1800 init_completion(&adev->dm.dmub_aux_transfer_done);
1801 adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1802 if (!adev->dm.dmub_notify) {
1803 DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1804 goto error;
1805 }
1806
1807 adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1808 if (!adev->dm.delayed_hpd_wq) {
1809 DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1810 goto error;
1811 }
1812
1813 amdgpu_dm_outbox_init(adev);
1814 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1815 dmub_aux_setconfig_callback, false)) {
1816 DRM_ERROR("amdgpu: fail to register dmub aux callback");
1817 goto error;
1818 }
1819 /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive.
1820 * It is expected that DMUB will resend any pending notifications at this point. Note
1821 * that hpd and hpd_irq handler registration are deferred to register_hpd_handlers() to
1822 * align legacy interface initialization sequence. Connection status will be proactivly
1823 * detected once in the amdgpu_dm_initialize_drm_device.
1824 */
1825 dc_enable_dmub_outbox(adev->dm.dc);
1826
1827 /* DPIA trace goes to dmesg logs only if outbox is enabled */
1828 if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE)
1829 dc_dmub_srv_enable_dpia_trace(adev->dm.dc);
1830 }
1831
1832 if (amdgpu_dm_initialize_drm_device(adev)) {
1833 DRM_ERROR(
1834 "amdgpu: failed to initialize sw for display support.\n");
1835 goto error;
1836 }
1837
1838 /* create fake encoders for MST */
1839 dm_dp_create_fake_mst_encoders(adev);
1840
1841 /* TODO: Add_display_info? */
1842
1843 /* TODO use dynamic cursor width */
1844 adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1845 adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1846
1847 if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1848 DRM_ERROR(
1849 "amdgpu: failed to initialize sw for display support.\n");
1850 goto error;
1851 }
1852
1853 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1854 adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
1855 if (!adev->dm.secure_display_ctxs)
1856 DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n");
1857 #endif
1858
1859 DRM_DEBUG_DRIVER("KMS initialized.\n");
1860
1861 return 0;
1862 error:
1863 amdgpu_dm_fini(adev);
1864
1865 return -EINVAL;
1866 }
1867
amdgpu_dm_early_fini(void * handle)1868 static int amdgpu_dm_early_fini(void *handle)
1869 {
1870 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1871
1872 amdgpu_dm_audio_fini(adev);
1873
1874 return 0;
1875 }
1876
amdgpu_dm_fini(struct amdgpu_device * adev)1877 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1878 {
1879 int i;
1880
1881 if (adev->dm.vblank_control_workqueue) {
1882 destroy_workqueue(adev->dm.vblank_control_workqueue);
1883 adev->dm.vblank_control_workqueue = NULL;
1884 }
1885
1886 amdgpu_dm_destroy_drm_device(&adev->dm);
1887
1888 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1889 if (adev->dm.secure_display_ctxs) {
1890 for (i = 0; i < adev->mode_info.num_crtc; i++) {
1891 if (adev->dm.secure_display_ctxs[i].crtc) {
1892 flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
1893 flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
1894 }
1895 }
1896 kfree(adev->dm.secure_display_ctxs);
1897 adev->dm.secure_display_ctxs = NULL;
1898 }
1899 #endif
1900 if (adev->dm.hdcp_workqueue) {
1901 hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1902 adev->dm.hdcp_workqueue = NULL;
1903 }
1904
1905 if (adev->dm.dc)
1906 dc_deinit_callbacks(adev->dm.dc);
1907
1908 if (adev->dm.dc)
1909 dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1910
1911 if (dc_enable_dmub_notifications(adev->dm.dc)) {
1912 kfree(adev->dm.dmub_notify);
1913 adev->dm.dmub_notify = NULL;
1914 destroy_workqueue(adev->dm.delayed_hpd_wq);
1915 adev->dm.delayed_hpd_wq = NULL;
1916 }
1917
1918 if (adev->dm.dmub_bo)
1919 amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1920 &adev->dm.dmub_bo_gpu_addr,
1921 &adev->dm.dmub_bo_cpu_addr);
1922
1923 if (adev->dm.hpd_rx_offload_wq) {
1924 for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1925 if (adev->dm.hpd_rx_offload_wq[i].wq) {
1926 destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1927 adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1928 }
1929 }
1930
1931 kfree(adev->dm.hpd_rx_offload_wq);
1932 adev->dm.hpd_rx_offload_wq = NULL;
1933 }
1934
1935 /* DC Destroy TODO: Replace destroy DAL */
1936 if (adev->dm.dc)
1937 dc_destroy(&adev->dm.dc);
1938 /*
1939 * TODO: pageflip, vlank interrupt
1940 *
1941 * amdgpu_dm_irq_fini(adev);
1942 */
1943
1944 if (adev->dm.cgs_device) {
1945 amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1946 adev->dm.cgs_device = NULL;
1947 }
1948 if (adev->dm.freesync_module) {
1949 mod_freesync_destroy(adev->dm.freesync_module);
1950 adev->dm.freesync_module = NULL;
1951 }
1952
1953 mutex_destroy(&adev->dm.audio_lock);
1954 mutex_destroy(&adev->dm.dc_lock);
1955 mutex_destroy(&adev->dm.dpia_aux_lock);
1956 }
1957
load_dmcu_fw(struct amdgpu_device * adev)1958 static int load_dmcu_fw(struct amdgpu_device *adev)
1959 {
1960 const char *fw_name_dmcu = NULL;
1961 int r;
1962 const struct dmcu_firmware_header_v1_0 *hdr;
1963
1964 switch (adev->asic_type) {
1965 #if defined(CONFIG_DRM_AMD_DC_SI)
1966 case CHIP_TAHITI:
1967 case CHIP_PITCAIRN:
1968 case CHIP_VERDE:
1969 case CHIP_OLAND:
1970 #endif
1971 case CHIP_BONAIRE:
1972 case CHIP_HAWAII:
1973 case CHIP_KAVERI:
1974 case CHIP_KABINI:
1975 case CHIP_MULLINS:
1976 case CHIP_TONGA:
1977 case CHIP_FIJI:
1978 case CHIP_CARRIZO:
1979 case CHIP_STONEY:
1980 case CHIP_POLARIS11:
1981 case CHIP_POLARIS10:
1982 case CHIP_POLARIS12:
1983 case CHIP_VEGAM:
1984 case CHIP_VEGA10:
1985 case CHIP_VEGA12:
1986 case CHIP_VEGA20:
1987 return 0;
1988 case CHIP_NAVI12:
1989 fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1990 break;
1991 case CHIP_RAVEN:
1992 if (ASICREV_IS_PICASSO(adev->external_rev_id))
1993 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1994 else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1995 fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1996 else
1997 return 0;
1998 break;
1999 default:
2000 switch (adev->ip_versions[DCE_HWIP][0]) {
2001 case IP_VERSION(2, 0, 2):
2002 case IP_VERSION(2, 0, 3):
2003 case IP_VERSION(2, 0, 0):
2004 case IP_VERSION(2, 1, 0):
2005 case IP_VERSION(3, 0, 0):
2006 case IP_VERSION(3, 0, 2):
2007 case IP_VERSION(3, 0, 3):
2008 case IP_VERSION(3, 0, 1):
2009 case IP_VERSION(3, 1, 2):
2010 case IP_VERSION(3, 1, 3):
2011 case IP_VERSION(3, 1, 4):
2012 case IP_VERSION(3, 1, 5):
2013 case IP_VERSION(3, 1, 6):
2014 case IP_VERSION(3, 2, 0):
2015 case IP_VERSION(3, 2, 1):
2016 return 0;
2017 default:
2018 break;
2019 }
2020 DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
2021 return -EINVAL;
2022 }
2023
2024 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
2025 DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
2026 return 0;
2027 }
2028
2029 r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
2030 if (r == -ENODEV) {
2031 /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
2032 DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
2033 adev->dm.fw_dmcu = NULL;
2034 return 0;
2035 }
2036 if (r) {
2037 dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
2038 fw_name_dmcu);
2039 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2040 return r;
2041 }
2042
2043 hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
2044 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
2045 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
2046 adev->firmware.fw_size +=
2047 ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2048
2049 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
2050 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
2051 adev->firmware.fw_size +=
2052 ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
2053
2054 adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
2055
2056 DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
2057
2058 return 0;
2059 }
2060
amdgpu_dm_dmub_reg_read(void * ctx,uint32_t address)2061 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
2062 {
2063 struct amdgpu_device *adev = ctx;
2064
2065 return dm_read_reg(adev->dm.dc->ctx, address);
2066 }
2067
amdgpu_dm_dmub_reg_write(void * ctx,uint32_t address,uint32_t value)2068 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
2069 uint32_t value)
2070 {
2071 struct amdgpu_device *adev = ctx;
2072
2073 return dm_write_reg(adev->dm.dc->ctx, address, value);
2074 }
2075
dm_dmub_sw_init(struct amdgpu_device * adev)2076 static int dm_dmub_sw_init(struct amdgpu_device *adev)
2077 {
2078 struct dmub_srv_create_params create_params;
2079 struct dmub_srv_region_params region_params;
2080 struct dmub_srv_region_info region_info;
2081 struct dmub_srv_memory_params memory_params;
2082 struct dmub_srv_fb_info *fb_info;
2083 struct dmub_srv *dmub_srv;
2084 const struct dmcub_firmware_header_v1_0 *hdr;
2085 enum dmub_asic dmub_asic;
2086 enum dmub_status status;
2087 int r;
2088
2089 switch (adev->ip_versions[DCE_HWIP][0]) {
2090 case IP_VERSION(2, 1, 0):
2091 dmub_asic = DMUB_ASIC_DCN21;
2092 break;
2093 case IP_VERSION(3, 0, 0):
2094 dmub_asic = DMUB_ASIC_DCN30;
2095 break;
2096 case IP_VERSION(3, 0, 1):
2097 dmub_asic = DMUB_ASIC_DCN301;
2098 break;
2099 case IP_VERSION(3, 0, 2):
2100 dmub_asic = DMUB_ASIC_DCN302;
2101 break;
2102 case IP_VERSION(3, 0, 3):
2103 dmub_asic = DMUB_ASIC_DCN303;
2104 break;
2105 case IP_VERSION(3, 1, 2):
2106 case IP_VERSION(3, 1, 3):
2107 dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
2108 break;
2109 case IP_VERSION(3, 1, 4):
2110 dmub_asic = DMUB_ASIC_DCN314;
2111 break;
2112 case IP_VERSION(3, 1, 5):
2113 dmub_asic = DMUB_ASIC_DCN315;
2114 break;
2115 case IP_VERSION(3, 1, 6):
2116 dmub_asic = DMUB_ASIC_DCN316;
2117 break;
2118 case IP_VERSION(3, 2, 0):
2119 dmub_asic = DMUB_ASIC_DCN32;
2120 break;
2121 case IP_VERSION(3, 2, 1):
2122 dmub_asic = DMUB_ASIC_DCN321;
2123 break;
2124 default:
2125 /* ASIC doesn't support DMUB. */
2126 return 0;
2127 }
2128
2129 hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
2130 adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
2131
2132 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
2133 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
2134 AMDGPU_UCODE_ID_DMCUB;
2135 adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
2136 adev->dm.dmub_fw;
2137 adev->firmware.fw_size +=
2138 ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
2139
2140 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
2141 adev->dm.dmcub_fw_version);
2142 }
2143
2144
2145 adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
2146 dmub_srv = adev->dm.dmub_srv;
2147
2148 if (!dmub_srv) {
2149 DRM_ERROR("Failed to allocate DMUB service!\n");
2150 return -ENOMEM;
2151 }
2152
2153 memset(&create_params, 0, sizeof(create_params));
2154 create_params.user_ctx = adev;
2155 create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
2156 create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
2157 create_params.asic = dmub_asic;
2158
2159 /* Create the DMUB service. */
2160 status = dmub_srv_create(dmub_srv, &create_params);
2161 if (status != DMUB_STATUS_OK) {
2162 DRM_ERROR("Error creating DMUB service: %d\n", status);
2163 return -EINVAL;
2164 }
2165
2166 /* Calculate the size of all the regions for the DMUB service. */
2167 memset(®ion_params, 0, sizeof(region_params));
2168
2169 region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
2170 PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
2171 region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
2172 region_params.vbios_size = adev->bios_size;
2173 region_params.fw_bss_data = region_params.bss_data_size ?
2174 adev->dm.dmub_fw->data +
2175 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2176 le32_to_cpu(hdr->inst_const_bytes) : NULL;
2177 region_params.fw_inst_const =
2178 adev->dm.dmub_fw->data +
2179 le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
2180 PSP_HEADER_BYTES;
2181 region_params.is_mailbox_in_inbox = false;
2182
2183 status = dmub_srv_calc_region_info(dmub_srv, ®ion_params,
2184 ®ion_info);
2185
2186 if (status != DMUB_STATUS_OK) {
2187 DRM_ERROR("Error calculating DMUB region info: %d\n", status);
2188 return -EINVAL;
2189 }
2190
2191 /*
2192 * Allocate a framebuffer based on the total size of all the regions.
2193 * TODO: Move this into GART.
2194 */
2195 r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
2196 AMDGPU_GEM_DOMAIN_VRAM |
2197 AMDGPU_GEM_DOMAIN_GTT,
2198 &adev->dm.dmub_bo,
2199 &adev->dm.dmub_bo_gpu_addr,
2200 &adev->dm.dmub_bo_cpu_addr);
2201 if (r)
2202 return r;
2203
2204 /* Rebase the regions on the framebuffer address. */
2205 memset(&memory_params, 0, sizeof(memory_params));
2206 memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
2207 memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
2208 memory_params.region_info = ®ion_info;
2209
2210 adev->dm.dmub_fb_info =
2211 kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
2212 fb_info = adev->dm.dmub_fb_info;
2213
2214 if (!fb_info) {
2215 DRM_ERROR(
2216 "Failed to allocate framebuffer info for DMUB service!\n");
2217 return -ENOMEM;
2218 }
2219
2220 status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
2221 if (status != DMUB_STATUS_OK) {
2222 DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
2223 return -EINVAL;
2224 }
2225
2226 return 0;
2227 }
2228
dm_sw_init(void * handle)2229 static int dm_sw_init(void *handle)
2230 {
2231 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2232 int r;
2233
2234 r = dm_dmub_sw_init(adev);
2235 if (r)
2236 return r;
2237
2238 return load_dmcu_fw(adev);
2239 }
2240
dm_sw_fini(void * handle)2241 static int dm_sw_fini(void *handle)
2242 {
2243 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2244
2245 kfree(adev->dm.dmub_fb_info);
2246 adev->dm.dmub_fb_info = NULL;
2247
2248 if (adev->dm.dmub_srv) {
2249 dmub_srv_destroy(adev->dm.dmub_srv);
2250 kfree(adev->dm.dmub_srv);
2251 adev->dm.dmub_srv = NULL;
2252 }
2253
2254 amdgpu_ucode_release(&adev->dm.dmub_fw);
2255 amdgpu_ucode_release(&adev->dm.fw_dmcu);
2256
2257 return 0;
2258 }
2259
detect_mst_link_for_all_connectors(struct drm_device * dev)2260 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2261 {
2262 struct amdgpu_dm_connector *aconnector;
2263 struct drm_connector *connector;
2264 struct drm_connector_list_iter iter;
2265 int ret = 0;
2266
2267 drm_connector_list_iter_begin(dev, &iter);
2268 drm_for_each_connector_iter(connector, &iter) {
2269 aconnector = to_amdgpu_dm_connector(connector);
2270 if (aconnector->dc_link->type == dc_connection_mst_branch &&
2271 aconnector->mst_mgr.aux) {
2272 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2273 aconnector,
2274 aconnector->base.base.id);
2275
2276 ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2277 if (ret < 0) {
2278 DRM_ERROR("DM_MST: Failed to start MST\n");
2279 aconnector->dc_link->type =
2280 dc_connection_single;
2281 ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2282 aconnector->dc_link);
2283 break;
2284 }
2285 }
2286 }
2287 drm_connector_list_iter_end(&iter);
2288
2289 return ret;
2290 }
2291
dm_late_init(void * handle)2292 static int dm_late_init(void *handle)
2293 {
2294 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2295
2296 struct dmcu_iram_parameters params;
2297 unsigned int linear_lut[16];
2298 int i;
2299 struct dmcu *dmcu = NULL;
2300
2301 dmcu = adev->dm.dc->res_pool->dmcu;
2302
2303 for (i = 0; i < 16; i++)
2304 linear_lut[i] = 0xFFFF * i / 15;
2305
2306 params.set = 0;
2307 params.backlight_ramping_override = false;
2308 params.backlight_ramping_start = 0xCCCC;
2309 params.backlight_ramping_reduction = 0xCCCCCCCC;
2310 params.backlight_lut_array_size = 16;
2311 params.backlight_lut_array = linear_lut;
2312
2313 /* Min backlight level after ABM reduction, Don't allow below 1%
2314 * 0xFFFF x 0.01 = 0x28F
2315 */
2316 params.min_abm_backlight = 0x28F;
2317 /* In the case where abm is implemented on dmcub,
2318 * dmcu object will be null.
2319 * ABM 2.4 and up are implemented on dmcub.
2320 */
2321 if (dmcu) {
2322 if (!dmcu_load_iram(dmcu, params))
2323 return -EINVAL;
2324 } else if (adev->dm.dc->ctx->dmub_srv) {
2325 struct dc_link *edp_links[MAX_NUM_EDP];
2326 int edp_num;
2327
2328 dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
2329 for (i = 0; i < edp_num; i++) {
2330 if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2331 return -EINVAL;
2332 }
2333 }
2334
2335 return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2336 }
2337
resume_mst_branch_status(struct drm_dp_mst_topology_mgr * mgr)2338 static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
2339 {
2340 int ret;
2341 u8 guid[16];
2342 u64 tmp64;
2343
2344 mutex_lock(&mgr->lock);
2345 if (!mgr->mst_primary)
2346 goto out_fail;
2347
2348 if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
2349 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2350 goto out_fail;
2351 }
2352
2353 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2354 DP_MST_EN |
2355 DP_UP_REQ_EN |
2356 DP_UPSTREAM_IS_SRC);
2357 if (ret < 0) {
2358 drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
2359 goto out_fail;
2360 }
2361
2362 /* Some hubs forget their guids after they resume */
2363 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2364 if (ret != 16) {
2365 drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
2366 goto out_fail;
2367 }
2368
2369 if (memchr_inv(guid, 0, 16) == NULL) {
2370 tmp64 = get_jiffies_64();
2371 memcpy(&guid[0], &tmp64, sizeof(u64));
2372 memcpy(&guid[8], &tmp64, sizeof(u64));
2373
2374 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
2375
2376 if (ret != 16) {
2377 drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
2378 goto out_fail;
2379 }
2380 }
2381
2382 memcpy(mgr->mst_primary->guid, guid, 16);
2383
2384 out_fail:
2385 mutex_unlock(&mgr->lock);
2386 }
2387
s3_handle_mst(struct drm_device * dev,bool suspend)2388 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2389 {
2390 struct amdgpu_dm_connector *aconnector;
2391 struct drm_connector *connector;
2392 struct drm_connector_list_iter iter;
2393 struct drm_dp_mst_topology_mgr *mgr;
2394
2395 drm_connector_list_iter_begin(dev, &iter);
2396 drm_for_each_connector_iter(connector, &iter) {
2397 aconnector = to_amdgpu_dm_connector(connector);
2398 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2399 aconnector->mst_root)
2400 continue;
2401
2402 mgr = &aconnector->mst_mgr;
2403
2404 if (suspend) {
2405 drm_dp_mst_topology_mgr_suspend(mgr);
2406 } else {
2407 /* if extended timeout is supported in hardware,
2408 * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
2409 * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
2410 */
2411 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
2412 if (!dp_is_lttpr_present(aconnector->dc_link))
2413 try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
2414
2415 /* TODO: move resume_mst_branch_status() into drm mst resume again
2416 * once topology probing work is pulled out from mst resume into mst
2417 * resume 2nd step. mst resume 2nd step should be called after old
2418 * state getting restored (i.e. drm_atomic_helper_resume()).
2419 */
2420 resume_mst_branch_status(mgr);
2421 }
2422 }
2423 drm_connector_list_iter_end(&iter);
2424 }
2425
amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device * adev)2426 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2427 {
2428 int ret = 0;
2429
2430 /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2431 * on window driver dc implementation.
2432 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2433 * should be passed to smu during boot up and resume from s3.
2434 * boot up: dc calculate dcn watermark clock settings within dc_create,
2435 * dcn20_resource_construct
2436 * then call pplib functions below to pass the settings to smu:
2437 * smu_set_watermarks_for_clock_ranges
2438 * smu_set_watermarks_table
2439 * navi10_set_watermarks_table
2440 * smu_write_watermarks_table
2441 *
2442 * For Renoir, clock settings of dcn watermark are also fixed values.
2443 * dc has implemented different flow for window driver:
2444 * dc_hardware_init / dc_set_power_state
2445 * dcn10_init_hw
2446 * notify_wm_ranges
2447 * set_wm_ranges
2448 * -- Linux
2449 * smu_set_watermarks_for_clock_ranges
2450 * renoir_set_watermarks_table
2451 * smu_write_watermarks_table
2452 *
2453 * For Linux,
2454 * dc_hardware_init -> amdgpu_dm_init
2455 * dc_set_power_state --> dm_resume
2456 *
2457 * therefore, this function apply to navi10/12/14 but not Renoir
2458 * *
2459 */
2460 switch (adev->ip_versions[DCE_HWIP][0]) {
2461 case IP_VERSION(2, 0, 2):
2462 case IP_VERSION(2, 0, 0):
2463 break;
2464 default:
2465 return 0;
2466 }
2467
2468 ret = amdgpu_dpm_write_watermarks_table(adev);
2469 if (ret) {
2470 DRM_ERROR("Failed to update WMTABLE!\n");
2471 return ret;
2472 }
2473
2474 return 0;
2475 }
2476
2477 /**
2478 * dm_hw_init() - Initialize DC device
2479 * @handle: The base driver device containing the amdgpu_dm device.
2480 *
2481 * Initialize the &struct amdgpu_display_manager device. This involves calling
2482 * the initializers of each DM component, then populating the struct with them.
2483 *
2484 * Although the function implies hardware initialization, both hardware and
2485 * software are initialized here. Splitting them out to their relevant init
2486 * hooks is a future TODO item.
2487 *
2488 * Some notable things that are initialized here:
2489 *
2490 * - Display Core, both software and hardware
2491 * - DC modules that we need (freesync and color management)
2492 * - DRM software states
2493 * - Interrupt sources and handlers
2494 * - Vblank support
2495 * - Debug FS entries, if enabled
2496 */
dm_hw_init(void * handle)2497 static int dm_hw_init(void *handle)
2498 {
2499 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2500 /* Create DAL display manager */
2501 amdgpu_dm_init(adev);
2502 amdgpu_dm_hpd_init(adev);
2503
2504 return 0;
2505 }
2506
2507 /**
2508 * dm_hw_fini() - Teardown DC device
2509 * @handle: The base driver device containing the amdgpu_dm device.
2510 *
2511 * Teardown components within &struct amdgpu_display_manager that require
2512 * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2513 * were loaded. Also flush IRQ workqueues and disable them.
2514 */
dm_hw_fini(void * handle)2515 static int dm_hw_fini(void *handle)
2516 {
2517 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2518
2519 amdgpu_dm_hpd_fini(adev);
2520
2521 amdgpu_dm_irq_fini(adev);
2522 amdgpu_dm_fini(adev);
2523 return 0;
2524 }
2525
2526
dm_gpureset_toggle_interrupts(struct amdgpu_device * adev,struct dc_state * state,bool enable)2527 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2528 struct dc_state *state, bool enable)
2529 {
2530 enum dc_irq_source irq_source;
2531 struct amdgpu_crtc *acrtc;
2532 int rc = -EBUSY;
2533 int i = 0;
2534
2535 for (i = 0; i < state->stream_count; i++) {
2536 acrtc = get_crtc_by_otg_inst(
2537 adev, state->stream_status[i].primary_otg_inst);
2538
2539 if (acrtc && state->stream_status[i].plane_count != 0) {
2540 irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2541 rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2542 if (rc)
2543 DRM_WARN("Failed to %s pflip interrupts\n",
2544 enable ? "enable" : "disable");
2545
2546 if (enable) {
2547 if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state)))
2548 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true);
2549 } else
2550 rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false);
2551
2552 if (rc)
2553 DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis");
2554
2555 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
2556 /* During gpu-reset we disable and then enable vblank irq, so
2557 * don't use amdgpu_irq_get/put() to avoid refcount change.
2558 */
2559 if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
2560 DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis");
2561 }
2562 }
2563
2564 }
2565
amdgpu_dm_commit_zero_streams(struct dc * dc)2566 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2567 {
2568 struct dc_state *context = NULL;
2569 enum dc_status res = DC_ERROR_UNEXPECTED;
2570 int i;
2571 struct dc_stream_state *del_streams[MAX_PIPES];
2572 int del_streams_count = 0;
2573
2574 memset(del_streams, 0, sizeof(del_streams));
2575
2576 context = dc_create_state(dc);
2577 if (context == NULL)
2578 goto context_alloc_fail;
2579
2580 dc_resource_state_copy_construct_current(dc, context);
2581
2582 /* First remove from context all streams */
2583 for (i = 0; i < context->stream_count; i++) {
2584 struct dc_stream_state *stream = context->streams[i];
2585
2586 del_streams[del_streams_count++] = stream;
2587 }
2588
2589 /* Remove all planes for removed streams and then remove the streams */
2590 for (i = 0; i < del_streams_count; i++) {
2591 if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2592 res = DC_FAIL_DETACH_SURFACES;
2593 goto fail;
2594 }
2595
2596 res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2597 if (res != DC_OK)
2598 goto fail;
2599 }
2600
2601 res = dc_commit_streams(dc, context->streams, context->stream_count);
2602
2603 fail:
2604 dc_release_state(context);
2605
2606 context_alloc_fail:
2607 return res;
2608 }
2609
hpd_rx_irq_work_suspend(struct amdgpu_display_manager * dm)2610 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2611 {
2612 int i;
2613
2614 if (dm->hpd_rx_offload_wq) {
2615 for (i = 0; i < dm->dc->caps.max_links; i++)
2616 flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2617 }
2618 }
2619
dm_suspend(void * handle)2620 static int dm_suspend(void *handle)
2621 {
2622 struct amdgpu_device *adev = handle;
2623 struct amdgpu_display_manager *dm = &adev->dm;
2624 int ret = 0;
2625
2626 if (amdgpu_in_reset(adev)) {
2627 mutex_lock(&dm->dc_lock);
2628
2629 dc_allow_idle_optimizations(adev->dm.dc, false);
2630
2631 dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2632
2633 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2634
2635 amdgpu_dm_commit_zero_streams(dm->dc);
2636
2637 amdgpu_dm_irq_suspend(adev);
2638
2639 hpd_rx_irq_work_suspend(dm);
2640
2641 return ret;
2642 }
2643
2644 WARN_ON(adev->dm.cached_state);
2645 adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2646
2647 s3_handle_mst(adev_to_drm(adev), true);
2648
2649 amdgpu_dm_irq_suspend(adev);
2650
2651 hpd_rx_irq_work_suspend(dm);
2652
2653 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2654
2655 return 0;
2656 }
2657
2658 struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state * state,struct drm_crtc * crtc)2659 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2660 struct drm_crtc *crtc)
2661 {
2662 u32 i;
2663 struct drm_connector_state *new_con_state;
2664 struct drm_connector *connector;
2665 struct drm_crtc *crtc_from_state;
2666
2667 for_each_new_connector_in_state(state, connector, new_con_state, i) {
2668 crtc_from_state = new_con_state->crtc;
2669
2670 if (crtc_from_state == crtc)
2671 return to_amdgpu_dm_connector(connector);
2672 }
2673
2674 return NULL;
2675 }
2676
emulated_link_detect(struct dc_link * link)2677 static void emulated_link_detect(struct dc_link *link)
2678 {
2679 struct dc_sink_init_data sink_init_data = { 0 };
2680 struct display_sink_capability sink_caps = { 0 };
2681 enum dc_edid_status edid_status;
2682 struct dc_context *dc_ctx = link->ctx;
2683 struct dc_sink *sink = NULL;
2684 struct dc_sink *prev_sink = NULL;
2685
2686 link->type = dc_connection_none;
2687 prev_sink = link->local_sink;
2688
2689 if (prev_sink)
2690 dc_sink_release(prev_sink);
2691
2692 switch (link->connector_signal) {
2693 case SIGNAL_TYPE_HDMI_TYPE_A: {
2694 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2695 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2696 break;
2697 }
2698
2699 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2700 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2701 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2702 break;
2703 }
2704
2705 case SIGNAL_TYPE_DVI_DUAL_LINK: {
2706 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2707 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2708 break;
2709 }
2710
2711 case SIGNAL_TYPE_LVDS: {
2712 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2713 sink_caps.signal = SIGNAL_TYPE_LVDS;
2714 break;
2715 }
2716
2717 case SIGNAL_TYPE_EDP: {
2718 sink_caps.transaction_type =
2719 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2720 sink_caps.signal = SIGNAL_TYPE_EDP;
2721 break;
2722 }
2723
2724 case SIGNAL_TYPE_DISPLAY_PORT: {
2725 sink_caps.transaction_type =
2726 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2727 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2728 break;
2729 }
2730
2731 default:
2732 DC_ERROR("Invalid connector type! signal:%d\n",
2733 link->connector_signal);
2734 return;
2735 }
2736
2737 sink_init_data.link = link;
2738 sink_init_data.sink_signal = sink_caps.signal;
2739
2740 sink = dc_sink_create(&sink_init_data);
2741 if (!sink) {
2742 DC_ERROR("Failed to create sink!\n");
2743 return;
2744 }
2745
2746 /* dc_sink_create returns a new reference */
2747 link->local_sink = sink;
2748
2749 edid_status = dm_helpers_read_local_edid(
2750 link->ctx,
2751 link,
2752 sink);
2753
2754 if (edid_status != EDID_OK)
2755 DC_ERROR("Failed to read EDID");
2756
2757 }
2758
dm_gpureset_commit_state(struct dc_state * dc_state,struct amdgpu_display_manager * dm)2759 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2760 struct amdgpu_display_manager *dm)
2761 {
2762 struct {
2763 struct dc_surface_update surface_updates[MAX_SURFACES];
2764 struct dc_plane_info plane_infos[MAX_SURFACES];
2765 struct dc_scaling_info scaling_infos[MAX_SURFACES];
2766 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2767 struct dc_stream_update stream_update;
2768 } *bundle;
2769 int k, m;
2770
2771 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2772
2773 if (!bundle) {
2774 dm_error("Failed to allocate update bundle\n");
2775 goto cleanup;
2776 }
2777
2778 for (k = 0; k < dc_state->stream_count; k++) {
2779 bundle->stream_update.stream = dc_state->streams[k];
2780
2781 for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2782 bundle->surface_updates[m].surface =
2783 dc_state->stream_status->plane_states[m];
2784 bundle->surface_updates[m].surface->force_full_update =
2785 true;
2786 }
2787
2788 update_planes_and_stream_adapter(dm->dc,
2789 UPDATE_TYPE_FULL,
2790 dc_state->stream_status->plane_count,
2791 dc_state->streams[k],
2792 &bundle->stream_update,
2793 bundle->surface_updates);
2794 }
2795
2796 cleanup:
2797 kfree(bundle);
2798 }
2799
dm_resume(void * handle)2800 static int dm_resume(void *handle)
2801 {
2802 struct amdgpu_device *adev = handle;
2803 struct drm_device *ddev = adev_to_drm(adev);
2804 struct amdgpu_display_manager *dm = &adev->dm;
2805 struct amdgpu_dm_connector *aconnector;
2806 struct drm_connector *connector;
2807 struct drm_connector_list_iter iter;
2808 struct drm_crtc *crtc;
2809 struct drm_crtc_state *new_crtc_state;
2810 struct dm_crtc_state *dm_new_crtc_state;
2811 struct drm_plane *plane;
2812 struct drm_plane_state *new_plane_state;
2813 struct dm_plane_state *dm_new_plane_state;
2814 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2815 enum dc_connection_type new_connection_type = dc_connection_none;
2816 struct dc_state *dc_state;
2817 int i, r, j, ret;
2818 bool need_hotplug = false;
2819
2820 if (amdgpu_in_reset(adev)) {
2821 dc_state = dm->cached_dc_state;
2822
2823 /*
2824 * The dc->current_state is backed up into dm->cached_dc_state
2825 * before we commit 0 streams.
2826 *
2827 * DC will clear link encoder assignments on the real state
2828 * but the changes won't propagate over to the copy we made
2829 * before the 0 streams commit.
2830 *
2831 * DC expects that link encoder assignments are *not* valid
2832 * when committing a state, so as a workaround we can copy
2833 * off of the current state.
2834 *
2835 * We lose the previous assignments, but we had already
2836 * commit 0 streams anyway.
2837 */
2838 link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
2839
2840 r = dm_dmub_hw_init(adev);
2841 if (r)
2842 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2843
2844 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2845 dc_resume(dm->dc);
2846
2847 amdgpu_dm_irq_resume_early(adev);
2848
2849 for (i = 0; i < dc_state->stream_count; i++) {
2850 dc_state->streams[i]->mode_changed = true;
2851 for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2852 dc_state->stream_status[i].plane_states[j]->update_flags.raw
2853 = 0xffffffff;
2854 }
2855 }
2856
2857 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2858 amdgpu_dm_outbox_init(adev);
2859 dc_enable_dmub_outbox(adev->dm.dc);
2860 }
2861
2862 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
2863
2864 dm_gpureset_commit_state(dm->cached_dc_state, dm);
2865
2866 dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2867
2868 dc_release_state(dm->cached_dc_state);
2869 dm->cached_dc_state = NULL;
2870
2871 amdgpu_dm_irq_resume_late(adev);
2872
2873 mutex_unlock(&dm->dc_lock);
2874
2875 return 0;
2876 }
2877 /* Recreate dc_state - DC invalidates it when setting power state to S3. */
2878 dc_release_state(dm_state->context);
2879 dm_state->context = dc_create_state(dm->dc);
2880 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2881 dc_resource_state_construct(dm->dc, dm_state->context);
2882
2883 /* Before powering on DC we need to re-initialize DMUB. */
2884 dm_dmub_hw_resume(adev);
2885
2886 /* Re-enable outbox interrupts for DPIA. */
2887 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
2888 amdgpu_dm_outbox_init(adev);
2889 dc_enable_dmub_outbox(adev->dm.dc);
2890 }
2891
2892 /* power on hardware */
2893 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2894
2895 /* program HPD filter */
2896 dc_resume(dm->dc);
2897
2898 /*
2899 * early enable HPD Rx IRQ, should be done before set mode as short
2900 * pulse interrupts are used for MST
2901 */
2902 amdgpu_dm_irq_resume_early(adev);
2903
2904 /* On resume we need to rewrite the MSTM control bits to enable MST*/
2905 s3_handle_mst(ddev, false);
2906
2907 /* Do detection*/
2908 drm_connector_list_iter_begin(ddev, &iter);
2909 drm_for_each_connector_iter(connector, &iter) {
2910 aconnector = to_amdgpu_dm_connector(connector);
2911
2912 if (!aconnector->dc_link)
2913 continue;
2914
2915 /*
2916 * this is the case when traversing through already created end sink
2917 * MST connectors, should be skipped
2918 */
2919 if (aconnector && aconnector->mst_root)
2920 continue;
2921
2922 mutex_lock(&aconnector->hpd_lock);
2923 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
2924 DRM_ERROR("KMS: Failed to detect connector\n");
2925
2926 if (aconnector->base.force && new_connection_type == dc_connection_none) {
2927 emulated_link_detect(aconnector->dc_link);
2928 } else {
2929 mutex_lock(&dm->dc_lock);
2930 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2931 mutex_unlock(&dm->dc_lock);
2932 }
2933
2934 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2935 aconnector->fake_enable = false;
2936
2937 if (aconnector->dc_sink)
2938 dc_sink_release(aconnector->dc_sink);
2939 aconnector->dc_sink = NULL;
2940 amdgpu_dm_update_connector_after_detect(aconnector);
2941 mutex_unlock(&aconnector->hpd_lock);
2942 }
2943 drm_connector_list_iter_end(&iter);
2944
2945 /* Force mode set in atomic commit */
2946 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2947 new_crtc_state->active_changed = true;
2948
2949 /*
2950 * atomic_check is expected to create the dc states. We need to release
2951 * them here, since they were duplicated as part of the suspend
2952 * procedure.
2953 */
2954 for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2955 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2956 if (dm_new_crtc_state->stream) {
2957 WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2958 dc_stream_release(dm_new_crtc_state->stream);
2959 dm_new_crtc_state->stream = NULL;
2960 }
2961 }
2962
2963 for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2964 dm_new_plane_state = to_dm_plane_state(new_plane_state);
2965 if (dm_new_plane_state->dc_state) {
2966 WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2967 dc_plane_state_release(dm_new_plane_state->dc_state);
2968 dm_new_plane_state->dc_state = NULL;
2969 }
2970 }
2971
2972 drm_atomic_helper_resume(ddev, dm->cached_state);
2973
2974 dm->cached_state = NULL;
2975
2976 /* Do mst topology probing after resuming cached state*/
2977 drm_connector_list_iter_begin(ddev, &iter);
2978 drm_for_each_connector_iter(connector, &iter) {
2979 aconnector = to_amdgpu_dm_connector(connector);
2980 if (aconnector->dc_link->type != dc_connection_mst_branch ||
2981 aconnector->mst_root)
2982 continue;
2983
2984 ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
2985
2986 if (ret < 0) {
2987 dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
2988 aconnector->dc_link);
2989 need_hotplug = true;
2990 }
2991 }
2992 drm_connector_list_iter_end(&iter);
2993
2994 if (need_hotplug)
2995 drm_kms_helper_hotplug_event(ddev);
2996
2997 amdgpu_dm_irq_resume_late(adev);
2998
2999 amdgpu_dm_smu_write_watermarks_table(adev);
3000
3001 return 0;
3002 }
3003
3004 /**
3005 * DOC: DM Lifecycle
3006 *
3007 * DM (and consequently DC) is registered in the amdgpu base driver as a IP
3008 * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
3009 * the base driver's device list to be initialized and torn down accordingly.
3010 *
3011 * The functions to do so are provided as hooks in &struct amd_ip_funcs.
3012 */
3013
3014 static const struct amd_ip_funcs amdgpu_dm_funcs = {
3015 .name = "dm",
3016 .early_init = dm_early_init,
3017 .late_init = dm_late_init,
3018 .sw_init = dm_sw_init,
3019 .sw_fini = dm_sw_fini,
3020 .early_fini = amdgpu_dm_early_fini,
3021 .hw_init = dm_hw_init,
3022 .hw_fini = dm_hw_fini,
3023 .suspend = dm_suspend,
3024 .resume = dm_resume,
3025 .is_idle = dm_is_idle,
3026 .wait_for_idle = dm_wait_for_idle,
3027 .check_soft_reset = dm_check_soft_reset,
3028 .soft_reset = dm_soft_reset,
3029 .set_clockgating_state = dm_set_clockgating_state,
3030 .set_powergating_state = dm_set_powergating_state,
3031 };
3032
3033 const struct amdgpu_ip_block_version dm_ip_block = {
3034 .type = AMD_IP_BLOCK_TYPE_DCE,
3035 .major = 1,
3036 .minor = 0,
3037 .rev = 0,
3038 .funcs = &amdgpu_dm_funcs,
3039 };
3040
3041
3042 /**
3043 * DOC: atomic
3044 *
3045 * *WIP*
3046 */
3047
3048 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
3049 .fb_create = amdgpu_display_user_framebuffer_create,
3050 .get_format_info = amdgpu_dm_plane_get_format_info,
3051 .atomic_check = amdgpu_dm_atomic_check,
3052 .atomic_commit = drm_atomic_helper_commit,
3053 };
3054
3055 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
3056 .atomic_commit_tail = amdgpu_dm_atomic_commit_tail,
3057 .atomic_commit_setup = drm_dp_mst_atomic_setup_commit,
3058 };
3059
update_connector_ext_caps(struct amdgpu_dm_connector * aconnector)3060 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
3061 {
3062 struct amdgpu_dm_backlight_caps *caps;
3063 struct drm_connector *conn_base;
3064 struct amdgpu_device *adev;
3065 struct drm_luminance_range_info *luminance_range;
3066
3067 if (aconnector->bl_idx == -1 ||
3068 aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
3069 return;
3070
3071 conn_base = &aconnector->base;
3072 adev = drm_to_adev(conn_base->dev);
3073
3074 caps = &adev->dm.backlight_caps[aconnector->bl_idx];
3075 caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
3076 caps->aux_support = false;
3077
3078 if (caps->ext_caps->bits.oled == 1
3079 /*
3080 * ||
3081 * caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
3082 * caps->ext_caps->bits.hdr_aux_backlight_control == 1
3083 */)
3084 caps->aux_support = true;
3085
3086 if (amdgpu_backlight == 0)
3087 caps->aux_support = false;
3088 else if (amdgpu_backlight == 1)
3089 caps->aux_support = true;
3090
3091 luminance_range = &conn_base->display_info.luminance_range;
3092
3093 if (luminance_range->max_luminance) {
3094 caps->aux_min_input_signal = luminance_range->min_luminance;
3095 caps->aux_max_input_signal = luminance_range->max_luminance;
3096 } else {
3097 caps->aux_min_input_signal = 0;
3098 caps->aux_max_input_signal = 512;
3099 }
3100 }
3101
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector * aconnector)3102 void amdgpu_dm_update_connector_after_detect(
3103 struct amdgpu_dm_connector *aconnector)
3104 {
3105 struct drm_connector *connector = &aconnector->base;
3106 struct drm_device *dev = connector->dev;
3107 struct dc_sink *sink;
3108
3109 /* MST handled by drm_mst framework */
3110 if (aconnector->mst_mgr.mst_state == true)
3111 return;
3112
3113 sink = aconnector->dc_link->local_sink;
3114 if (sink)
3115 dc_sink_retain(sink);
3116
3117 /*
3118 * Edid mgmt connector gets first update only in mode_valid hook and then
3119 * the connector sink is set to either fake or physical sink depends on link status.
3120 * Skip if already done during boot.
3121 */
3122 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
3123 && aconnector->dc_em_sink) {
3124
3125 /*
3126 * For S3 resume with headless use eml_sink to fake stream
3127 * because on resume connector->sink is set to NULL
3128 */
3129 mutex_lock(&dev->mode_config.mutex);
3130
3131 if (sink) {
3132 if (aconnector->dc_sink) {
3133 amdgpu_dm_update_freesync_caps(connector, NULL);
3134 /*
3135 * retain and release below are used to
3136 * bump up refcount for sink because the link doesn't point
3137 * to it anymore after disconnect, so on next crtc to connector
3138 * reshuffle by UMD we will get into unwanted dc_sink release
3139 */
3140 dc_sink_release(aconnector->dc_sink);
3141 }
3142 aconnector->dc_sink = sink;
3143 dc_sink_retain(aconnector->dc_sink);
3144 amdgpu_dm_update_freesync_caps(connector,
3145 aconnector->edid);
3146 } else {
3147 amdgpu_dm_update_freesync_caps(connector, NULL);
3148 if (!aconnector->dc_sink) {
3149 aconnector->dc_sink = aconnector->dc_em_sink;
3150 dc_sink_retain(aconnector->dc_sink);
3151 }
3152 }
3153
3154 mutex_unlock(&dev->mode_config.mutex);
3155
3156 if (sink)
3157 dc_sink_release(sink);
3158 return;
3159 }
3160
3161 /*
3162 * TODO: temporary guard to look for proper fix
3163 * if this sink is MST sink, we should not do anything
3164 */
3165 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
3166 dc_sink_release(sink);
3167 return;
3168 }
3169
3170 if (aconnector->dc_sink == sink) {
3171 /*
3172 * We got a DP short pulse (Link Loss, DP CTS, etc...).
3173 * Do nothing!!
3174 */
3175 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
3176 aconnector->connector_id);
3177 if (sink)
3178 dc_sink_release(sink);
3179 return;
3180 }
3181
3182 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
3183 aconnector->connector_id, aconnector->dc_sink, sink);
3184
3185 mutex_lock(&dev->mode_config.mutex);
3186
3187 /*
3188 * 1. Update status of the drm connector
3189 * 2. Send an event and let userspace tell us what to do
3190 */
3191 if (sink) {
3192 /*
3193 * TODO: check if we still need the S3 mode update workaround.
3194 * If yes, put it here.
3195 */
3196 if (aconnector->dc_sink) {
3197 amdgpu_dm_update_freesync_caps(connector, NULL);
3198 dc_sink_release(aconnector->dc_sink);
3199 }
3200
3201 aconnector->dc_sink = sink;
3202 dc_sink_retain(aconnector->dc_sink);
3203 if (sink->dc_edid.length == 0) {
3204 aconnector->edid = NULL;
3205 if (aconnector->dc_link->aux_mode) {
3206 drm_dp_cec_unset_edid(
3207 &aconnector->dm_dp_aux.aux);
3208 }
3209 } else {
3210 aconnector->edid =
3211 (struct edid *)sink->dc_edid.raw_edid;
3212
3213 if (aconnector->dc_link->aux_mode)
3214 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
3215 aconnector->edid);
3216 }
3217
3218 if (!aconnector->timing_requested) {
3219 aconnector->timing_requested =
3220 kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
3221 if (!aconnector->timing_requested)
3222 dm_error("failed to create aconnector->requested_timing\n");
3223 }
3224
3225 drm_connector_update_edid_property(connector, aconnector->edid);
3226 amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
3227 update_connector_ext_caps(aconnector);
3228 } else {
3229 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
3230 amdgpu_dm_update_freesync_caps(connector, NULL);
3231 drm_connector_update_edid_property(connector, NULL);
3232 aconnector->num_modes = 0;
3233 dc_sink_release(aconnector->dc_sink);
3234 aconnector->dc_sink = NULL;
3235 aconnector->edid = NULL;
3236 kfree(aconnector->timing_requested);
3237 aconnector->timing_requested = NULL;
3238 /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
3239 if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
3240 connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
3241 }
3242
3243 mutex_unlock(&dev->mode_config.mutex);
3244
3245 update_subconnector_property(aconnector);
3246
3247 if (sink)
3248 dc_sink_release(sink);
3249 }
3250
handle_hpd_irq_helper(struct amdgpu_dm_connector * aconnector)3251 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
3252 {
3253 struct drm_connector *connector = &aconnector->base;
3254 struct drm_device *dev = connector->dev;
3255 enum dc_connection_type new_connection_type = dc_connection_none;
3256 struct amdgpu_device *adev = drm_to_adev(dev);
3257 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
3258 bool ret = false;
3259
3260 if (adev->dm.disable_hpd_irq)
3261 return;
3262
3263 /*
3264 * In case of failure or MST no need to update connector status or notify the OS
3265 * since (for MST case) MST does this in its own context.
3266 */
3267 mutex_lock(&aconnector->hpd_lock);
3268
3269 if (adev->dm.hdcp_workqueue) {
3270 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3271 dm_con_state->update_hdcp = true;
3272 }
3273 if (aconnector->fake_enable)
3274 aconnector->fake_enable = false;
3275
3276 aconnector->timing_changed = false;
3277
3278 if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
3279 DRM_ERROR("KMS: Failed to detect connector\n");
3280
3281 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3282 emulated_link_detect(aconnector->dc_link);
3283
3284 drm_modeset_lock_all(dev);
3285 dm_restore_drm_connector_state(dev, connector);
3286 drm_modeset_unlock_all(dev);
3287
3288 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3289 drm_kms_helper_connector_hotplug_event(connector);
3290 } else {
3291 mutex_lock(&adev->dm.dc_lock);
3292 ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
3293 mutex_unlock(&adev->dm.dc_lock);
3294 if (ret) {
3295 amdgpu_dm_update_connector_after_detect(aconnector);
3296
3297 drm_modeset_lock_all(dev);
3298 dm_restore_drm_connector_state(dev, connector);
3299 drm_modeset_unlock_all(dev);
3300
3301 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3302 drm_kms_helper_connector_hotplug_event(connector);
3303 }
3304 }
3305 mutex_unlock(&aconnector->hpd_lock);
3306
3307 }
3308
handle_hpd_irq(void * param)3309 static void handle_hpd_irq(void *param)
3310 {
3311 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3312
3313 handle_hpd_irq_helper(aconnector);
3314
3315 }
3316
schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue * offload_wq,union hpd_irq_data hpd_irq_data)3317 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3318 union hpd_irq_data hpd_irq_data)
3319 {
3320 struct hpd_rx_irq_offload_work *offload_work =
3321 kzalloc(sizeof(*offload_work), GFP_KERNEL);
3322
3323 if (!offload_work) {
3324 DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3325 return;
3326 }
3327
3328 INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3329 offload_work->data = hpd_irq_data;
3330 offload_work->offload_wq = offload_wq;
3331
3332 queue_work(offload_wq->wq, &offload_work->work);
3333 DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3334 }
3335
handle_hpd_rx_irq(void * param)3336 static void handle_hpd_rx_irq(void *param)
3337 {
3338 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3339 struct drm_connector *connector = &aconnector->base;
3340 struct drm_device *dev = connector->dev;
3341 struct dc_link *dc_link = aconnector->dc_link;
3342 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3343 bool result = false;
3344 enum dc_connection_type new_connection_type = dc_connection_none;
3345 struct amdgpu_device *adev = drm_to_adev(dev);
3346 union hpd_irq_data hpd_irq_data;
3347 bool link_loss = false;
3348 bool has_left_work = false;
3349 int idx = dc_link->link_index;
3350 struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3351
3352 memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3353
3354 if (adev->dm.disable_hpd_irq)
3355 return;
3356
3357 /*
3358 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3359 * conflict, after implement i2c helper, this mutex should be
3360 * retired.
3361 */
3362 mutex_lock(&aconnector->hpd_lock);
3363
3364 result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3365 &link_loss, true, &has_left_work);
3366
3367 if (!has_left_work)
3368 goto out;
3369
3370 if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3371 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3372 goto out;
3373 }
3374
3375 if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3376 if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3377 hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3378 bool skip = false;
3379
3380 /*
3381 * DOWN_REP_MSG_RDY is also handled by polling method
3382 * mgr->cbs->poll_hpd_irq()
3383 */
3384 spin_lock(&offload_wq->offload_lock);
3385 skip = offload_wq->is_handling_mst_msg_rdy_event;
3386
3387 if (!skip)
3388 offload_wq->is_handling_mst_msg_rdy_event = true;
3389
3390 spin_unlock(&offload_wq->offload_lock);
3391
3392 if (!skip)
3393 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3394
3395 goto out;
3396 }
3397
3398 if (link_loss) {
3399 bool skip = false;
3400
3401 spin_lock(&offload_wq->offload_lock);
3402 skip = offload_wq->is_handling_link_loss;
3403
3404 if (!skip)
3405 offload_wq->is_handling_link_loss = true;
3406
3407 spin_unlock(&offload_wq->offload_lock);
3408
3409 if (!skip)
3410 schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3411
3412 goto out;
3413 }
3414 }
3415
3416 out:
3417 if (result && !is_mst_root_connector) {
3418 /* Downstream Port status changed. */
3419 if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
3420 DRM_ERROR("KMS: Failed to detect connector\n");
3421
3422 if (aconnector->base.force && new_connection_type == dc_connection_none) {
3423 emulated_link_detect(dc_link);
3424
3425 if (aconnector->fake_enable)
3426 aconnector->fake_enable = false;
3427
3428 amdgpu_dm_update_connector_after_detect(aconnector);
3429
3430
3431 drm_modeset_lock_all(dev);
3432 dm_restore_drm_connector_state(dev, connector);
3433 drm_modeset_unlock_all(dev);
3434
3435 drm_kms_helper_connector_hotplug_event(connector);
3436 } else {
3437 bool ret = false;
3438
3439 mutex_lock(&adev->dm.dc_lock);
3440 ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
3441 mutex_unlock(&adev->dm.dc_lock);
3442
3443 if (ret) {
3444 if (aconnector->fake_enable)
3445 aconnector->fake_enable = false;
3446
3447 amdgpu_dm_update_connector_after_detect(aconnector);
3448
3449 drm_modeset_lock_all(dev);
3450 dm_restore_drm_connector_state(dev, connector);
3451 drm_modeset_unlock_all(dev);
3452
3453 drm_kms_helper_connector_hotplug_event(connector);
3454 }
3455 }
3456 }
3457 if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3458 if (adev->dm.hdcp_workqueue)
3459 hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
3460 }
3461
3462 if (dc_link->type != dc_connection_mst_branch)
3463 drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3464
3465 mutex_unlock(&aconnector->hpd_lock);
3466 }
3467
register_hpd_handlers(struct amdgpu_device * adev)3468 static void register_hpd_handlers(struct amdgpu_device *adev)
3469 {
3470 struct drm_device *dev = adev_to_drm(adev);
3471 struct drm_connector *connector;
3472 struct amdgpu_dm_connector *aconnector;
3473 const struct dc_link *dc_link;
3474 struct dc_interrupt_params int_params = {0};
3475
3476 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3477 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3478
3479 if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
3480 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true))
3481 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
3482
3483 if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true))
3484 DRM_ERROR("amdgpu: fail to register dmub hpd callback");
3485 }
3486
3487 list_for_each_entry(connector,
3488 &dev->mode_config.connector_list, head) {
3489
3490 aconnector = to_amdgpu_dm_connector(connector);
3491 dc_link = aconnector->dc_link;
3492
3493 if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) {
3494 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3495 int_params.irq_source = dc_link->irq_source_hpd;
3496
3497 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3498 handle_hpd_irq,
3499 (void *) aconnector);
3500 }
3501
3502 if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) {
3503
3504 /* Also register for DP short pulse (hpd_rx). */
3505 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3506 int_params.irq_source = dc_link->irq_source_hpd_rx;
3507
3508 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3509 handle_hpd_rx_irq,
3510 (void *) aconnector);
3511 }
3512 }
3513 }
3514
3515 #if defined(CONFIG_DRM_AMD_DC_SI)
3516 /* Register IRQ sources and initialize IRQ callbacks */
dce60_register_irq_handlers(struct amdgpu_device * adev)3517 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3518 {
3519 struct dc *dc = adev->dm.dc;
3520 struct common_irq_params *c_irq_params;
3521 struct dc_interrupt_params int_params = {0};
3522 int r;
3523 int i;
3524 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3525
3526 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3527 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3528
3529 /*
3530 * Actions of amdgpu_irq_add_id():
3531 * 1. Register a set() function with base driver.
3532 * Base driver will call set() function to enable/disable an
3533 * interrupt in DC hardware.
3534 * 2. Register amdgpu_dm_irq_handler().
3535 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3536 * coming from DC hardware.
3537 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3538 * for acknowledging and handling.
3539 */
3540
3541 /* Use VBLANK interrupt */
3542 for (i = 0; i < adev->mode_info.num_crtc; i++) {
3543 r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq);
3544 if (r) {
3545 DRM_ERROR("Failed to add crtc irq id!\n");
3546 return r;
3547 }
3548
3549 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3550 int_params.irq_source =
3551 dc_interrupt_to_irq_source(dc, i + 1, 0);
3552
3553 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3554
3555 c_irq_params->adev = adev;
3556 c_irq_params->irq_src = int_params.irq_source;
3557
3558 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3559 dm_crtc_high_irq, c_irq_params);
3560 }
3561
3562 /* Use GRPH_PFLIP interrupt */
3563 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3564 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3565 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3566 if (r) {
3567 DRM_ERROR("Failed to add page flip irq id!\n");
3568 return r;
3569 }
3570
3571 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3572 int_params.irq_source =
3573 dc_interrupt_to_irq_source(dc, i, 0);
3574
3575 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3576
3577 c_irq_params->adev = adev;
3578 c_irq_params->irq_src = int_params.irq_source;
3579
3580 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3581 dm_pflip_high_irq, c_irq_params);
3582
3583 }
3584
3585 /* HPD */
3586 r = amdgpu_irq_add_id(adev, client_id,
3587 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3588 if (r) {
3589 DRM_ERROR("Failed to add hpd irq id!\n");
3590 return r;
3591 }
3592
3593 register_hpd_handlers(adev);
3594
3595 return 0;
3596 }
3597 #endif
3598
3599 /* Register IRQ sources and initialize IRQ callbacks */
dce110_register_irq_handlers(struct amdgpu_device * adev)3600 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3601 {
3602 struct dc *dc = adev->dm.dc;
3603 struct common_irq_params *c_irq_params;
3604 struct dc_interrupt_params int_params = {0};
3605 int r;
3606 int i;
3607 unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3608
3609 if (adev->family >= AMDGPU_FAMILY_AI)
3610 client_id = SOC15_IH_CLIENTID_DCE;
3611
3612 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3613 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3614
3615 /*
3616 * Actions of amdgpu_irq_add_id():
3617 * 1. Register a set() function with base driver.
3618 * Base driver will call set() function to enable/disable an
3619 * interrupt in DC hardware.
3620 * 2. Register amdgpu_dm_irq_handler().
3621 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3622 * coming from DC hardware.
3623 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3624 * for acknowledging and handling.
3625 */
3626
3627 /* Use VBLANK interrupt */
3628 for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3629 r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3630 if (r) {
3631 DRM_ERROR("Failed to add crtc irq id!\n");
3632 return r;
3633 }
3634
3635 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3636 int_params.irq_source =
3637 dc_interrupt_to_irq_source(dc, i, 0);
3638
3639 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3640
3641 c_irq_params->adev = adev;
3642 c_irq_params->irq_src = int_params.irq_source;
3643
3644 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3645 dm_crtc_high_irq, c_irq_params);
3646 }
3647
3648 /* Use VUPDATE interrupt */
3649 for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3650 r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3651 if (r) {
3652 DRM_ERROR("Failed to add vupdate irq id!\n");
3653 return r;
3654 }
3655
3656 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3657 int_params.irq_source =
3658 dc_interrupt_to_irq_source(dc, i, 0);
3659
3660 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3661
3662 c_irq_params->adev = adev;
3663 c_irq_params->irq_src = int_params.irq_source;
3664
3665 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3666 dm_vupdate_high_irq, c_irq_params);
3667 }
3668
3669 /* Use GRPH_PFLIP interrupt */
3670 for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3671 i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3672 r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3673 if (r) {
3674 DRM_ERROR("Failed to add page flip irq id!\n");
3675 return r;
3676 }
3677
3678 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3679 int_params.irq_source =
3680 dc_interrupt_to_irq_source(dc, i, 0);
3681
3682 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3683
3684 c_irq_params->adev = adev;
3685 c_irq_params->irq_src = int_params.irq_source;
3686
3687 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3688 dm_pflip_high_irq, c_irq_params);
3689
3690 }
3691
3692 /* HPD */
3693 r = amdgpu_irq_add_id(adev, client_id,
3694 VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3695 if (r) {
3696 DRM_ERROR("Failed to add hpd irq id!\n");
3697 return r;
3698 }
3699
3700 register_hpd_handlers(adev);
3701
3702 return 0;
3703 }
3704
3705 /* Register IRQ sources and initialize IRQ callbacks */
dcn10_register_irq_handlers(struct amdgpu_device * adev)3706 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3707 {
3708 struct dc *dc = adev->dm.dc;
3709 struct common_irq_params *c_irq_params;
3710 struct dc_interrupt_params int_params = {0};
3711 int r;
3712 int i;
3713 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3714 static const unsigned int vrtl_int_srcid[] = {
3715 DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3716 DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3717 DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3718 DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3719 DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3720 DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3721 };
3722 #endif
3723
3724 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3725 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3726
3727 /*
3728 * Actions of amdgpu_irq_add_id():
3729 * 1. Register a set() function with base driver.
3730 * Base driver will call set() function to enable/disable an
3731 * interrupt in DC hardware.
3732 * 2. Register amdgpu_dm_irq_handler().
3733 * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3734 * coming from DC hardware.
3735 * amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3736 * for acknowledging and handling.
3737 */
3738
3739 /* Use VSTARTUP interrupt */
3740 for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3741 i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3742 i++) {
3743 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3744
3745 if (r) {
3746 DRM_ERROR("Failed to add crtc irq id!\n");
3747 return r;
3748 }
3749
3750 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3751 int_params.irq_source =
3752 dc_interrupt_to_irq_source(dc, i, 0);
3753
3754 c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3755
3756 c_irq_params->adev = adev;
3757 c_irq_params->irq_src = int_params.irq_source;
3758
3759 amdgpu_dm_irq_register_interrupt(
3760 adev, &int_params, dm_crtc_high_irq, c_irq_params);
3761 }
3762
3763 /* Use otg vertical line interrupt */
3764 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3765 for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3766 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3767 vrtl_int_srcid[i], &adev->vline0_irq);
3768
3769 if (r) {
3770 DRM_ERROR("Failed to add vline0 irq id!\n");
3771 return r;
3772 }
3773
3774 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3775 int_params.irq_source =
3776 dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3777
3778 if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3779 DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3780 break;
3781 }
3782
3783 c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3784 - DC_IRQ_SOURCE_DC1_VLINE0];
3785
3786 c_irq_params->adev = adev;
3787 c_irq_params->irq_src = int_params.irq_source;
3788
3789 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3790 dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3791 }
3792 #endif
3793
3794 /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3795 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3796 * to trigger at end of each vblank, regardless of state of the lock,
3797 * matching DCE behaviour.
3798 */
3799 for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3800 i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3801 i++) {
3802 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3803
3804 if (r) {
3805 DRM_ERROR("Failed to add vupdate irq id!\n");
3806 return r;
3807 }
3808
3809 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3810 int_params.irq_source =
3811 dc_interrupt_to_irq_source(dc, i, 0);
3812
3813 c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3814
3815 c_irq_params->adev = adev;
3816 c_irq_params->irq_src = int_params.irq_source;
3817
3818 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3819 dm_vupdate_high_irq, c_irq_params);
3820 }
3821
3822 /* Use GRPH_PFLIP interrupt */
3823 for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3824 i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3825 i++) {
3826 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3827 if (r) {
3828 DRM_ERROR("Failed to add page flip irq id!\n");
3829 return r;
3830 }
3831
3832 int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3833 int_params.irq_source =
3834 dc_interrupt_to_irq_source(dc, i, 0);
3835
3836 c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3837
3838 c_irq_params->adev = adev;
3839 c_irq_params->irq_src = int_params.irq_source;
3840
3841 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3842 dm_pflip_high_irq, c_irq_params);
3843
3844 }
3845
3846 /* HPD */
3847 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3848 &adev->hpd_irq);
3849 if (r) {
3850 DRM_ERROR("Failed to add hpd irq id!\n");
3851 return r;
3852 }
3853
3854 register_hpd_handlers(adev);
3855
3856 return 0;
3857 }
3858 /* Register Outbox IRQ sources and initialize IRQ callbacks */
register_outbox_irq_handlers(struct amdgpu_device * adev)3859 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3860 {
3861 struct dc *dc = adev->dm.dc;
3862 struct common_irq_params *c_irq_params;
3863 struct dc_interrupt_params int_params = {0};
3864 int r, i;
3865
3866 int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3867 int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3868
3869 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3870 &adev->dmub_outbox_irq);
3871 if (r) {
3872 DRM_ERROR("Failed to add outbox irq id!\n");
3873 return r;
3874 }
3875
3876 if (dc->ctx->dmub_srv) {
3877 i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3878 int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3879 int_params.irq_source =
3880 dc_interrupt_to_irq_source(dc, i, 0);
3881
3882 c_irq_params = &adev->dm.dmub_outbox_params[0];
3883
3884 c_irq_params->adev = adev;
3885 c_irq_params->irq_src = int_params.irq_source;
3886
3887 amdgpu_dm_irq_register_interrupt(adev, &int_params,
3888 dm_dmub_outbox1_low_irq, c_irq_params);
3889 }
3890
3891 return 0;
3892 }
3893
3894 /*
3895 * Acquires the lock for the atomic state object and returns
3896 * the new atomic state.
3897 *
3898 * This should only be called during atomic check.
3899 */
dm_atomic_get_state(struct drm_atomic_state * state,struct dm_atomic_state ** dm_state)3900 int dm_atomic_get_state(struct drm_atomic_state *state,
3901 struct dm_atomic_state **dm_state)
3902 {
3903 struct drm_device *dev = state->dev;
3904 struct amdgpu_device *adev = drm_to_adev(dev);
3905 struct amdgpu_display_manager *dm = &adev->dm;
3906 struct drm_private_state *priv_state;
3907
3908 if (*dm_state)
3909 return 0;
3910
3911 priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3912 if (IS_ERR(priv_state))
3913 return PTR_ERR(priv_state);
3914
3915 *dm_state = to_dm_atomic_state(priv_state);
3916
3917 return 0;
3918 }
3919
3920 static struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state * state)3921 dm_atomic_get_new_state(struct drm_atomic_state *state)
3922 {
3923 struct drm_device *dev = state->dev;
3924 struct amdgpu_device *adev = drm_to_adev(dev);
3925 struct amdgpu_display_manager *dm = &adev->dm;
3926 struct drm_private_obj *obj;
3927 struct drm_private_state *new_obj_state;
3928 int i;
3929
3930 for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3931 if (obj->funcs == dm->atomic_obj.funcs)
3932 return to_dm_atomic_state(new_obj_state);
3933 }
3934
3935 return NULL;
3936 }
3937
3938 static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj * obj)3939 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3940 {
3941 struct dm_atomic_state *old_state, *new_state;
3942
3943 new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3944 if (!new_state)
3945 return NULL;
3946
3947 __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3948
3949 old_state = to_dm_atomic_state(obj->state);
3950
3951 if (old_state && old_state->context)
3952 new_state->context = dc_copy_state(old_state->context);
3953
3954 if (!new_state->context) {
3955 kfree(new_state);
3956 return NULL;
3957 }
3958
3959 return &new_state->base;
3960 }
3961
dm_atomic_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)3962 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3963 struct drm_private_state *state)
3964 {
3965 struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3966
3967 if (dm_state && dm_state->context)
3968 dc_release_state(dm_state->context);
3969
3970 kfree(dm_state);
3971 }
3972
3973 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3974 .atomic_duplicate_state = dm_atomic_duplicate_state,
3975 .atomic_destroy_state = dm_atomic_destroy_state,
3976 };
3977
amdgpu_dm_mode_config_init(struct amdgpu_device * adev)3978 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3979 {
3980 struct dm_atomic_state *state;
3981 int r;
3982
3983 adev->mode_info.mode_config_initialized = true;
3984
3985 adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3986 adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3987
3988 adev_to_drm(adev)->mode_config.max_width = 16384;
3989 adev_to_drm(adev)->mode_config.max_height = 16384;
3990
3991 adev_to_drm(adev)->mode_config.preferred_depth = 24;
3992 if (adev->asic_type == CHIP_HAWAII)
3993 /* disable prefer shadow for now due to hibernation issues */
3994 adev_to_drm(adev)->mode_config.prefer_shadow = 0;
3995 else
3996 adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3997 /* indicates support for immediate flip */
3998 adev_to_drm(adev)->mode_config.async_page_flip = true;
3999
4000 state = kzalloc(sizeof(*state), GFP_KERNEL);
4001 if (!state)
4002 return -ENOMEM;
4003
4004 state->context = dc_create_state(adev->dm.dc);
4005 if (!state->context) {
4006 kfree(state);
4007 return -ENOMEM;
4008 }
4009
4010 dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
4011
4012 drm_atomic_private_obj_init(adev_to_drm(adev),
4013 &adev->dm.atomic_obj,
4014 &state->base,
4015 &dm_atomic_state_funcs);
4016
4017 r = amdgpu_display_modeset_create_props(adev);
4018 if (r) {
4019 dc_release_state(state->context);
4020 kfree(state);
4021 return r;
4022 }
4023
4024 r = amdgpu_dm_audio_init(adev);
4025 if (r) {
4026 dc_release_state(state->context);
4027 kfree(state);
4028 return r;
4029 }
4030
4031 return 0;
4032 }
4033
4034 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
4035 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
4036 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
4037
amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager * dm,int bl_idx)4038 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
4039 int bl_idx)
4040 {
4041 #if defined(CONFIG_ACPI)
4042 struct amdgpu_dm_backlight_caps caps;
4043
4044 memset(&caps, 0, sizeof(caps));
4045
4046 if (dm->backlight_caps[bl_idx].caps_valid)
4047 return;
4048
4049 amdgpu_acpi_get_backlight_caps(&caps);
4050 if (caps.caps_valid) {
4051 dm->backlight_caps[bl_idx].caps_valid = true;
4052 if (caps.aux_support)
4053 return;
4054 dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
4055 dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
4056 } else {
4057 dm->backlight_caps[bl_idx].min_input_signal =
4058 AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4059 dm->backlight_caps[bl_idx].max_input_signal =
4060 AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4061 }
4062 #else
4063 if (dm->backlight_caps[bl_idx].aux_support)
4064 return;
4065
4066 dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
4067 dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
4068 #endif
4069 }
4070
get_brightness_range(const struct amdgpu_dm_backlight_caps * caps,unsigned int * min,unsigned int * max)4071 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
4072 unsigned int *min, unsigned int *max)
4073 {
4074 if (!caps)
4075 return 0;
4076
4077 if (caps->aux_support) {
4078 // Firmware limits are in nits, DC API wants millinits.
4079 *max = 1000 * caps->aux_max_input_signal;
4080 *min = 1000 * caps->aux_min_input_signal;
4081 } else {
4082 // Firmware limits are 8-bit, PWM control is 16-bit.
4083 *max = 0x101 * caps->max_input_signal;
4084 *min = 0x101 * caps->min_input_signal;
4085 }
4086 return 1;
4087 }
4088
convert_brightness_from_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)4089 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
4090 uint32_t brightness)
4091 {
4092 unsigned int min, max;
4093
4094 if (!get_brightness_range(caps, &min, &max))
4095 return brightness;
4096
4097 // Rescale 0..255 to min..max
4098 return min + DIV_ROUND_CLOSEST((max - min) * brightness,
4099 AMDGPU_MAX_BL_LEVEL);
4100 }
4101
convert_brightness_to_user(const struct amdgpu_dm_backlight_caps * caps,uint32_t brightness)4102 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
4103 uint32_t brightness)
4104 {
4105 unsigned int min, max;
4106
4107 if (!get_brightness_range(caps, &min, &max))
4108 return brightness;
4109
4110 if (brightness < min)
4111 return 0;
4112 // Rescale min..max to 0..255
4113 return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
4114 max - min);
4115 }
4116
amdgpu_dm_backlight_set_level(struct amdgpu_display_manager * dm,int bl_idx,u32 user_brightness)4117 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
4118 int bl_idx,
4119 u32 user_brightness)
4120 {
4121 struct amdgpu_dm_backlight_caps caps;
4122 struct dc_link *link;
4123 u32 brightness;
4124 bool rc;
4125
4126 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4127 caps = dm->backlight_caps[bl_idx];
4128
4129 dm->brightness[bl_idx] = user_brightness;
4130 /* update scratch register */
4131 if (bl_idx == 0)
4132 amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
4133 brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
4134 link = (struct dc_link *)dm->backlight_link[bl_idx];
4135
4136 /* Change brightness based on AUX property */
4137 if (caps.aux_support) {
4138 rc = dc_link_set_backlight_level_nits(link, true, brightness,
4139 AUX_BL_DEFAULT_TRANSITION_TIME_MS);
4140 if (!rc)
4141 DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
4142 } else {
4143 rc = dc_link_set_backlight_level(link, brightness, 0);
4144 if (!rc)
4145 DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
4146 }
4147
4148 if (rc)
4149 dm->actual_brightness[bl_idx] = user_brightness;
4150 }
4151
amdgpu_dm_backlight_update_status(struct backlight_device * bd)4152 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
4153 {
4154 struct amdgpu_display_manager *dm = bl_get_data(bd);
4155 int i;
4156
4157 for (i = 0; i < dm->num_of_edps; i++) {
4158 if (bd == dm->backlight_dev[i])
4159 break;
4160 }
4161 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4162 i = 0;
4163 amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
4164
4165 return 0;
4166 }
4167
amdgpu_dm_backlight_get_level(struct amdgpu_display_manager * dm,int bl_idx)4168 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
4169 int bl_idx)
4170 {
4171 int ret;
4172 struct amdgpu_dm_backlight_caps caps;
4173 struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
4174
4175 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4176 caps = dm->backlight_caps[bl_idx];
4177
4178 if (caps.aux_support) {
4179 u32 avg, peak;
4180 bool rc;
4181
4182 rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
4183 if (!rc)
4184 return dm->brightness[bl_idx];
4185 return convert_brightness_to_user(&caps, avg);
4186 }
4187
4188 ret = dc_link_get_backlight_level(link);
4189
4190 if (ret == DC_ERROR_UNEXPECTED)
4191 return dm->brightness[bl_idx];
4192
4193 return convert_brightness_to_user(&caps, ret);
4194 }
4195
amdgpu_dm_backlight_get_brightness(struct backlight_device * bd)4196 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
4197 {
4198 struct amdgpu_display_manager *dm = bl_get_data(bd);
4199 int i;
4200
4201 for (i = 0; i < dm->num_of_edps; i++) {
4202 if (bd == dm->backlight_dev[i])
4203 break;
4204 }
4205 if (i >= AMDGPU_DM_MAX_NUM_EDP)
4206 i = 0;
4207 return amdgpu_dm_backlight_get_level(dm, i);
4208 }
4209
4210 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4211 .options = BL_CORE_SUSPENDRESUME,
4212 .get_brightness = amdgpu_dm_backlight_get_brightness,
4213 .update_status = amdgpu_dm_backlight_update_status,
4214 };
4215
4216 static void
amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector * aconnector)4217 amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
4218 {
4219 struct drm_device *drm = aconnector->base.dev;
4220 struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
4221 struct backlight_properties props = { 0 };
4222 char bl_name[16];
4223
4224 if (aconnector->bl_idx == -1)
4225 return;
4226
4227 if (!acpi_video_backlight_use_native()) {
4228 drm_info(drm, "Skipping amdgpu DM backlight registration\n");
4229 /* Try registering an ACPI video backlight device instead. */
4230 acpi_video_register_backlight();
4231 return;
4232 }
4233
4234 props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4235 props.brightness = AMDGPU_MAX_BL_LEVEL;
4236 props.type = BACKLIGHT_RAW;
4237
4238 snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4239 drm->primary->index + aconnector->bl_idx);
4240
4241 dm->backlight_dev[aconnector->bl_idx] =
4242 backlight_device_register(bl_name, aconnector->base.kdev, dm,
4243 &amdgpu_dm_backlight_ops, &props);
4244
4245 if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
4246 DRM_ERROR("DM: Backlight registration failed!\n");
4247 dm->backlight_dev[aconnector->bl_idx] = NULL;
4248 } else
4249 DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4250 }
4251
initialize_plane(struct amdgpu_display_manager * dm,struct amdgpu_mode_info * mode_info,int plane_id,enum drm_plane_type plane_type,const struct dc_plane_cap * plane_cap)4252 static int initialize_plane(struct amdgpu_display_manager *dm,
4253 struct amdgpu_mode_info *mode_info, int plane_id,
4254 enum drm_plane_type plane_type,
4255 const struct dc_plane_cap *plane_cap)
4256 {
4257 struct drm_plane *plane;
4258 unsigned long possible_crtcs;
4259 int ret = 0;
4260
4261 plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4262 if (!plane) {
4263 DRM_ERROR("KMS: Failed to allocate plane\n");
4264 return -ENOMEM;
4265 }
4266 plane->type = plane_type;
4267
4268 /*
4269 * HACK: IGT tests expect that the primary plane for a CRTC
4270 * can only have one possible CRTC. Only expose support for
4271 * any CRTC if they're not going to be used as a primary plane
4272 * for a CRTC - like overlay or underlay planes.
4273 */
4274 possible_crtcs = 1 << plane_id;
4275 if (plane_id >= dm->dc->caps.max_streams)
4276 possible_crtcs = 0xff;
4277
4278 ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4279
4280 if (ret) {
4281 DRM_ERROR("KMS: Failed to initialize plane\n");
4282 kfree(plane);
4283 return ret;
4284 }
4285
4286 if (mode_info)
4287 mode_info->planes[plane_id] = plane;
4288
4289 return ret;
4290 }
4291
4292
setup_backlight_device(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector)4293 static void setup_backlight_device(struct amdgpu_display_manager *dm,
4294 struct amdgpu_dm_connector *aconnector)
4295 {
4296 struct dc_link *link = aconnector->dc_link;
4297 int bl_idx = dm->num_of_edps;
4298
4299 if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
4300 link->type == dc_connection_none)
4301 return;
4302
4303 if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
4304 drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
4305 return;
4306 }
4307
4308 aconnector->bl_idx = bl_idx;
4309
4310 amdgpu_dm_update_backlight_caps(dm, bl_idx);
4311 dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
4312 dm->backlight_link[bl_idx] = link;
4313 dm->num_of_edps++;
4314
4315 update_connector_ext_caps(aconnector);
4316 }
4317
4318 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
4319
4320 /*
4321 * In this architecture, the association
4322 * connector -> encoder -> crtc
4323 * id not really requried. The crtc and connector will hold the
4324 * display_index as an abstraction to use with DAL component
4325 *
4326 * Returns 0 on success
4327 */
amdgpu_dm_initialize_drm_device(struct amdgpu_device * adev)4328 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4329 {
4330 struct amdgpu_display_manager *dm = &adev->dm;
4331 s32 i;
4332 struct amdgpu_dm_connector *aconnector = NULL;
4333 struct amdgpu_encoder *aencoder = NULL;
4334 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4335 u32 link_cnt;
4336 s32 primary_planes;
4337 enum dc_connection_type new_connection_type = dc_connection_none;
4338 const struct dc_plane_cap *plane;
4339 bool psr_feature_enabled = false;
4340 int max_overlay = dm->dc->caps.max_slave_planes;
4341
4342 dm->display_indexes_num = dm->dc->caps.max_streams;
4343 /* Update the actual used number of crtc */
4344 adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4345
4346 amdgpu_dm_set_irq_funcs(adev);
4347
4348 link_cnt = dm->dc->caps.max_links;
4349 if (amdgpu_dm_mode_config_init(dm->adev)) {
4350 DRM_ERROR("DM: Failed to initialize mode config\n");
4351 return -EINVAL;
4352 }
4353
4354 /* There is one primary plane per CRTC */
4355 primary_planes = dm->dc->caps.max_streams;
4356 ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4357
4358 /*
4359 * Initialize primary planes, implicit planes for legacy IOCTLS.
4360 * Order is reversed to match iteration order in atomic check.
4361 */
4362 for (i = (primary_planes - 1); i >= 0; i--) {
4363 plane = &dm->dc->caps.planes[i];
4364
4365 if (initialize_plane(dm, mode_info, i,
4366 DRM_PLANE_TYPE_PRIMARY, plane)) {
4367 DRM_ERROR("KMS: Failed to initialize primary plane\n");
4368 goto fail;
4369 }
4370 }
4371
4372 /*
4373 * Initialize overlay planes, index starting after primary planes.
4374 * These planes have a higher DRM index than the primary planes since
4375 * they should be considered as having a higher z-order.
4376 * Order is reversed to match iteration order in atomic check.
4377 *
4378 * Only support DCN for now, and only expose one so we don't encourage
4379 * userspace to use up all the pipes.
4380 */
4381 for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4382 struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4383
4384 /* Do not create overlay if MPO disabled */
4385 if (amdgpu_dc_debug_mask & DC_DISABLE_MPO)
4386 break;
4387
4388 if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4389 continue;
4390
4391 if (!plane->pixel_format_support.argb8888)
4392 continue;
4393
4394 if (max_overlay-- == 0)
4395 break;
4396
4397 if (initialize_plane(dm, NULL, primary_planes + i,
4398 DRM_PLANE_TYPE_OVERLAY, plane)) {
4399 DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4400 goto fail;
4401 }
4402 }
4403
4404 for (i = 0; i < dm->dc->caps.max_streams; i++)
4405 if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4406 DRM_ERROR("KMS: Failed to initialize crtc\n");
4407 goto fail;
4408 }
4409
4410 /* Use Outbox interrupt */
4411 switch (adev->ip_versions[DCE_HWIP][0]) {
4412 case IP_VERSION(3, 0, 0):
4413 case IP_VERSION(3, 1, 2):
4414 case IP_VERSION(3, 1, 3):
4415 case IP_VERSION(3, 1, 4):
4416 case IP_VERSION(3, 1, 5):
4417 case IP_VERSION(3, 1, 6):
4418 case IP_VERSION(3, 2, 0):
4419 case IP_VERSION(3, 2, 1):
4420 case IP_VERSION(2, 1, 0):
4421 if (register_outbox_irq_handlers(dm->adev)) {
4422 DRM_ERROR("DM: Failed to initialize IRQ\n");
4423 goto fail;
4424 }
4425 break;
4426 default:
4427 DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
4428 adev->ip_versions[DCE_HWIP][0]);
4429 }
4430
4431 /* Determine whether to enable PSR support by default. */
4432 if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
4433 switch (adev->ip_versions[DCE_HWIP][0]) {
4434 case IP_VERSION(3, 1, 2):
4435 case IP_VERSION(3, 1, 3):
4436 case IP_VERSION(3, 1, 4):
4437 case IP_VERSION(3, 1, 5):
4438 case IP_VERSION(3, 1, 6):
4439 case IP_VERSION(3, 2, 0):
4440 case IP_VERSION(3, 2, 1):
4441 psr_feature_enabled = true;
4442 break;
4443 default:
4444 psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
4445 break;
4446 }
4447 }
4448
4449 /* loops over all connectors on the board */
4450 for (i = 0; i < link_cnt; i++) {
4451 struct dc_link *link = NULL;
4452
4453 if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4454 DRM_ERROR(
4455 "KMS: Cannot support more than %d display indexes\n",
4456 AMDGPU_DM_MAX_DISPLAY_INDEX);
4457 continue;
4458 }
4459
4460 aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4461 if (!aconnector)
4462 goto fail;
4463
4464 aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4465 if (!aencoder)
4466 goto fail;
4467
4468 if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4469 DRM_ERROR("KMS: Failed to initialize encoder\n");
4470 goto fail;
4471 }
4472
4473 if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4474 DRM_ERROR("KMS: Failed to initialize connector\n");
4475 goto fail;
4476 }
4477
4478 link = dc_get_link_at_index(dm->dc, i);
4479
4480 if (dm->hpd_rx_offload_wq)
4481 dm->hpd_rx_offload_wq[aconnector->base.index].aconnector =
4482 aconnector;
4483
4484 if (!dc_link_detect_connection_type(link, &new_connection_type))
4485 DRM_ERROR("KMS: Failed to detect connector\n");
4486
4487 if (aconnector->base.force && new_connection_type == dc_connection_none) {
4488 emulated_link_detect(link);
4489 amdgpu_dm_update_connector_after_detect(aconnector);
4490 } else {
4491 bool ret = false;
4492
4493 mutex_lock(&dm->dc_lock);
4494 ret = dc_link_detect(link, DETECT_REASON_BOOT);
4495 mutex_unlock(&dm->dc_lock);
4496
4497 if (ret) {
4498 amdgpu_dm_update_connector_after_detect(aconnector);
4499 setup_backlight_device(dm, aconnector);
4500
4501 if (psr_feature_enabled)
4502 amdgpu_dm_set_psr_caps(link);
4503
4504 /* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4505 * PSR is also supported.
4506 */
4507 if (link->psr_settings.psr_feature_enabled)
4508 adev_to_drm(adev)->vblank_disable_immediate = false;
4509 }
4510 }
4511 amdgpu_set_panel_orientation(&aconnector->base);
4512 }
4513
4514 /* Software is initialized. Now we can register interrupt handlers. */
4515 switch (adev->asic_type) {
4516 #if defined(CONFIG_DRM_AMD_DC_SI)
4517 case CHIP_TAHITI:
4518 case CHIP_PITCAIRN:
4519 case CHIP_VERDE:
4520 case CHIP_OLAND:
4521 if (dce60_register_irq_handlers(dm->adev)) {
4522 DRM_ERROR("DM: Failed to initialize IRQ\n");
4523 goto fail;
4524 }
4525 break;
4526 #endif
4527 case CHIP_BONAIRE:
4528 case CHIP_HAWAII:
4529 case CHIP_KAVERI:
4530 case CHIP_KABINI:
4531 case CHIP_MULLINS:
4532 case CHIP_TONGA:
4533 case CHIP_FIJI:
4534 case CHIP_CARRIZO:
4535 case CHIP_STONEY:
4536 case CHIP_POLARIS11:
4537 case CHIP_POLARIS10:
4538 case CHIP_POLARIS12:
4539 case CHIP_VEGAM:
4540 case CHIP_VEGA10:
4541 case CHIP_VEGA12:
4542 case CHIP_VEGA20:
4543 if (dce110_register_irq_handlers(dm->adev)) {
4544 DRM_ERROR("DM: Failed to initialize IRQ\n");
4545 goto fail;
4546 }
4547 break;
4548 default:
4549 switch (adev->ip_versions[DCE_HWIP][0]) {
4550 case IP_VERSION(1, 0, 0):
4551 case IP_VERSION(1, 0, 1):
4552 case IP_VERSION(2, 0, 2):
4553 case IP_VERSION(2, 0, 3):
4554 case IP_VERSION(2, 0, 0):
4555 case IP_VERSION(2, 1, 0):
4556 case IP_VERSION(3, 0, 0):
4557 case IP_VERSION(3, 0, 2):
4558 case IP_VERSION(3, 0, 3):
4559 case IP_VERSION(3, 0, 1):
4560 case IP_VERSION(3, 1, 2):
4561 case IP_VERSION(3, 1, 3):
4562 case IP_VERSION(3, 1, 4):
4563 case IP_VERSION(3, 1, 5):
4564 case IP_VERSION(3, 1, 6):
4565 case IP_VERSION(3, 2, 0):
4566 case IP_VERSION(3, 2, 1):
4567 if (dcn10_register_irq_handlers(dm->adev)) {
4568 DRM_ERROR("DM: Failed to initialize IRQ\n");
4569 goto fail;
4570 }
4571 break;
4572 default:
4573 DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
4574 adev->ip_versions[DCE_HWIP][0]);
4575 goto fail;
4576 }
4577 break;
4578 }
4579
4580 return 0;
4581 fail:
4582 kfree(aencoder);
4583 kfree(aconnector);
4584
4585 return -EINVAL;
4586 }
4587
amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager * dm)4588 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4589 {
4590 drm_atomic_private_obj_fini(&dm->atomic_obj);
4591 }
4592
4593 /******************************************************************************
4594 * amdgpu_display_funcs functions
4595 *****************************************************************************/
4596
4597 /*
4598 * dm_bandwidth_update - program display watermarks
4599 *
4600 * @adev: amdgpu_device pointer
4601 *
4602 * Calculate and program the display watermarks and line buffer allocation.
4603 */
dm_bandwidth_update(struct amdgpu_device * adev)4604 static void dm_bandwidth_update(struct amdgpu_device *adev)
4605 {
4606 /* TODO: implement later */
4607 }
4608
4609 static const struct amdgpu_display_funcs dm_display_funcs = {
4610 .bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4611 .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4612 .backlight_set_level = NULL, /* never called for DC */
4613 .backlight_get_level = NULL, /* never called for DC */
4614 .hpd_sense = NULL,/* called unconditionally */
4615 .hpd_set_polarity = NULL, /* called unconditionally */
4616 .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4617 .page_flip_get_scanoutpos =
4618 dm_crtc_get_scanoutpos,/* called unconditionally */
4619 .add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4620 .add_connector = NULL, /* VBIOS parsing. DAL does it. */
4621 };
4622
4623 #if defined(CONFIG_DEBUG_KERNEL_DC)
4624
s3_debug_store(struct device * device,struct device_attribute * attr,const char * buf,size_t count)4625 static ssize_t s3_debug_store(struct device *device,
4626 struct device_attribute *attr,
4627 const char *buf,
4628 size_t count)
4629 {
4630 int ret;
4631 int s3_state;
4632 struct drm_device *drm_dev = dev_get_drvdata(device);
4633 struct amdgpu_device *adev = drm_to_adev(drm_dev);
4634
4635 ret = kstrtoint(buf, 0, &s3_state);
4636
4637 if (ret == 0) {
4638 if (s3_state) {
4639 dm_resume(adev);
4640 drm_kms_helper_hotplug_event(adev_to_drm(adev));
4641 } else
4642 dm_suspend(adev);
4643 }
4644
4645 return ret == 0 ? count : 0;
4646 }
4647
4648 DEVICE_ATTR_WO(s3_debug);
4649
4650 #endif
4651
dm_init_microcode(struct amdgpu_device * adev)4652 static int dm_init_microcode(struct amdgpu_device *adev)
4653 {
4654 char *fw_name_dmub;
4655 int r;
4656
4657 switch (adev->ip_versions[DCE_HWIP][0]) {
4658 case IP_VERSION(2, 1, 0):
4659 fw_name_dmub = FIRMWARE_RENOIR_DMUB;
4660 if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
4661 fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
4662 break;
4663 case IP_VERSION(3, 0, 0):
4664 if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
4665 fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
4666 else
4667 fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
4668 break;
4669 case IP_VERSION(3, 0, 1):
4670 fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
4671 break;
4672 case IP_VERSION(3, 0, 2):
4673 fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
4674 break;
4675 case IP_VERSION(3, 0, 3):
4676 fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
4677 break;
4678 case IP_VERSION(3, 1, 2):
4679 case IP_VERSION(3, 1, 3):
4680 fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
4681 break;
4682 case IP_VERSION(3, 1, 4):
4683 fw_name_dmub = FIRMWARE_DCN_314_DMUB;
4684 break;
4685 case IP_VERSION(3, 1, 5):
4686 fw_name_dmub = FIRMWARE_DCN_315_DMUB;
4687 break;
4688 case IP_VERSION(3, 1, 6):
4689 fw_name_dmub = FIRMWARE_DCN316_DMUB;
4690 break;
4691 case IP_VERSION(3, 2, 0):
4692 fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
4693 break;
4694 case IP_VERSION(3, 2, 1):
4695 fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
4696 break;
4697 default:
4698 /* ASIC doesn't support DMUB. */
4699 return 0;
4700 }
4701 r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
4702 if (r)
4703 DRM_ERROR("DMUB firmware loading failed: %d\n", r);
4704 return r;
4705 }
4706
dm_early_init(void * handle)4707 static int dm_early_init(void *handle)
4708 {
4709 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4710 struct amdgpu_mode_info *mode_info = &adev->mode_info;
4711 struct atom_context *ctx = mode_info->atom_context;
4712 int index = GetIndexIntoMasterTable(DATA, Object_Header);
4713 u16 data_offset;
4714
4715 /* if there is no object header, skip DM */
4716 if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
4717 adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
4718 dev_info(adev->dev, "No object header, skipping DM\n");
4719 return -ENOENT;
4720 }
4721
4722 switch (adev->asic_type) {
4723 #if defined(CONFIG_DRM_AMD_DC_SI)
4724 case CHIP_TAHITI:
4725 case CHIP_PITCAIRN:
4726 case CHIP_VERDE:
4727 adev->mode_info.num_crtc = 6;
4728 adev->mode_info.num_hpd = 6;
4729 adev->mode_info.num_dig = 6;
4730 break;
4731 case CHIP_OLAND:
4732 adev->mode_info.num_crtc = 2;
4733 adev->mode_info.num_hpd = 2;
4734 adev->mode_info.num_dig = 2;
4735 break;
4736 #endif
4737 case CHIP_BONAIRE:
4738 case CHIP_HAWAII:
4739 adev->mode_info.num_crtc = 6;
4740 adev->mode_info.num_hpd = 6;
4741 adev->mode_info.num_dig = 6;
4742 break;
4743 case CHIP_KAVERI:
4744 adev->mode_info.num_crtc = 4;
4745 adev->mode_info.num_hpd = 6;
4746 adev->mode_info.num_dig = 7;
4747 break;
4748 case CHIP_KABINI:
4749 case CHIP_MULLINS:
4750 adev->mode_info.num_crtc = 2;
4751 adev->mode_info.num_hpd = 6;
4752 adev->mode_info.num_dig = 6;
4753 break;
4754 case CHIP_FIJI:
4755 case CHIP_TONGA:
4756 adev->mode_info.num_crtc = 6;
4757 adev->mode_info.num_hpd = 6;
4758 adev->mode_info.num_dig = 7;
4759 break;
4760 case CHIP_CARRIZO:
4761 adev->mode_info.num_crtc = 3;
4762 adev->mode_info.num_hpd = 6;
4763 adev->mode_info.num_dig = 9;
4764 break;
4765 case CHIP_STONEY:
4766 adev->mode_info.num_crtc = 2;
4767 adev->mode_info.num_hpd = 6;
4768 adev->mode_info.num_dig = 9;
4769 break;
4770 case CHIP_POLARIS11:
4771 case CHIP_POLARIS12:
4772 adev->mode_info.num_crtc = 5;
4773 adev->mode_info.num_hpd = 5;
4774 adev->mode_info.num_dig = 5;
4775 break;
4776 case CHIP_POLARIS10:
4777 case CHIP_VEGAM:
4778 adev->mode_info.num_crtc = 6;
4779 adev->mode_info.num_hpd = 6;
4780 adev->mode_info.num_dig = 6;
4781 break;
4782 case CHIP_VEGA10:
4783 case CHIP_VEGA12:
4784 case CHIP_VEGA20:
4785 adev->mode_info.num_crtc = 6;
4786 adev->mode_info.num_hpd = 6;
4787 adev->mode_info.num_dig = 6;
4788 break;
4789 default:
4790
4791 switch (adev->ip_versions[DCE_HWIP][0]) {
4792 case IP_VERSION(2, 0, 2):
4793 case IP_VERSION(3, 0, 0):
4794 adev->mode_info.num_crtc = 6;
4795 adev->mode_info.num_hpd = 6;
4796 adev->mode_info.num_dig = 6;
4797 break;
4798 case IP_VERSION(2, 0, 0):
4799 case IP_VERSION(3, 0, 2):
4800 adev->mode_info.num_crtc = 5;
4801 adev->mode_info.num_hpd = 5;
4802 adev->mode_info.num_dig = 5;
4803 break;
4804 case IP_VERSION(2, 0, 3):
4805 case IP_VERSION(3, 0, 3):
4806 adev->mode_info.num_crtc = 2;
4807 adev->mode_info.num_hpd = 2;
4808 adev->mode_info.num_dig = 2;
4809 break;
4810 case IP_VERSION(1, 0, 0):
4811 case IP_VERSION(1, 0, 1):
4812 case IP_VERSION(3, 0, 1):
4813 case IP_VERSION(2, 1, 0):
4814 case IP_VERSION(3, 1, 2):
4815 case IP_VERSION(3, 1, 3):
4816 case IP_VERSION(3, 1, 4):
4817 case IP_VERSION(3, 1, 5):
4818 case IP_VERSION(3, 1, 6):
4819 case IP_VERSION(3, 2, 0):
4820 case IP_VERSION(3, 2, 1):
4821 adev->mode_info.num_crtc = 4;
4822 adev->mode_info.num_hpd = 4;
4823 adev->mode_info.num_dig = 4;
4824 break;
4825 default:
4826 DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
4827 adev->ip_versions[DCE_HWIP][0]);
4828 return -EINVAL;
4829 }
4830 break;
4831 }
4832
4833 if (adev->mode_info.funcs == NULL)
4834 adev->mode_info.funcs = &dm_display_funcs;
4835
4836 /*
4837 * Note: Do NOT change adev->audio_endpt_rreg and
4838 * adev->audio_endpt_wreg because they are initialised in
4839 * amdgpu_device_init()
4840 */
4841 #if defined(CONFIG_DEBUG_KERNEL_DC)
4842 device_create_file(
4843 adev_to_drm(adev)->dev,
4844 &dev_attr_s3_debug);
4845 #endif
4846 adev->dc_enabled = true;
4847
4848 return dm_init_microcode(adev);
4849 }
4850
modereset_required(struct drm_crtc_state * crtc_state)4851 static bool modereset_required(struct drm_crtc_state *crtc_state)
4852 {
4853 return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4854 }
4855
amdgpu_dm_encoder_destroy(struct drm_encoder * encoder)4856 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4857 {
4858 drm_encoder_cleanup(encoder);
4859 kfree(encoder);
4860 }
4861
4862 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4863 .destroy = amdgpu_dm_encoder_destroy,
4864 };
4865
4866 static int
fill_plane_color_attributes(const struct drm_plane_state * plane_state,const enum surface_pixel_format format,enum dc_color_space * color_space)4867 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
4868 const enum surface_pixel_format format,
4869 enum dc_color_space *color_space)
4870 {
4871 bool full_range;
4872
4873 *color_space = COLOR_SPACE_SRGB;
4874
4875 /* DRM color properties only affect non-RGB formats. */
4876 if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
4877 return 0;
4878
4879 full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
4880
4881 switch (plane_state->color_encoding) {
4882 case DRM_COLOR_YCBCR_BT601:
4883 if (full_range)
4884 *color_space = COLOR_SPACE_YCBCR601;
4885 else
4886 *color_space = COLOR_SPACE_YCBCR601_LIMITED;
4887 break;
4888
4889 case DRM_COLOR_YCBCR_BT709:
4890 if (full_range)
4891 *color_space = COLOR_SPACE_YCBCR709;
4892 else
4893 *color_space = COLOR_SPACE_YCBCR709_LIMITED;
4894 break;
4895
4896 case DRM_COLOR_YCBCR_BT2020:
4897 if (full_range)
4898 *color_space = COLOR_SPACE_2020_YCBCR;
4899 else
4900 return -EINVAL;
4901 break;
4902
4903 default:
4904 return -EINVAL;
4905 }
4906
4907 return 0;
4908 }
4909
4910 static int
fill_dc_plane_info_and_addr(struct amdgpu_device * adev,const struct drm_plane_state * plane_state,const u64 tiling_flags,struct dc_plane_info * plane_info,struct dc_plane_address * address,bool tmz_surface,bool force_disable_dcc)4911 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
4912 const struct drm_plane_state *plane_state,
4913 const u64 tiling_flags,
4914 struct dc_plane_info *plane_info,
4915 struct dc_plane_address *address,
4916 bool tmz_surface,
4917 bool force_disable_dcc)
4918 {
4919 const struct drm_framebuffer *fb = plane_state->fb;
4920 const struct amdgpu_framebuffer *afb =
4921 to_amdgpu_framebuffer(plane_state->fb);
4922 int ret;
4923
4924 memset(plane_info, 0, sizeof(*plane_info));
4925
4926 switch (fb->format->format) {
4927 case DRM_FORMAT_C8:
4928 plane_info->format =
4929 SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
4930 break;
4931 case DRM_FORMAT_RGB565:
4932 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
4933 break;
4934 case DRM_FORMAT_XRGB8888:
4935 case DRM_FORMAT_ARGB8888:
4936 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
4937 break;
4938 case DRM_FORMAT_XRGB2101010:
4939 case DRM_FORMAT_ARGB2101010:
4940 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
4941 break;
4942 case DRM_FORMAT_XBGR2101010:
4943 case DRM_FORMAT_ABGR2101010:
4944 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
4945 break;
4946 case DRM_FORMAT_XBGR8888:
4947 case DRM_FORMAT_ABGR8888:
4948 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
4949 break;
4950 case DRM_FORMAT_NV21:
4951 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
4952 break;
4953 case DRM_FORMAT_NV12:
4954 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
4955 break;
4956 case DRM_FORMAT_P010:
4957 plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
4958 break;
4959 case DRM_FORMAT_XRGB16161616F:
4960 case DRM_FORMAT_ARGB16161616F:
4961 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
4962 break;
4963 case DRM_FORMAT_XBGR16161616F:
4964 case DRM_FORMAT_ABGR16161616F:
4965 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
4966 break;
4967 case DRM_FORMAT_XRGB16161616:
4968 case DRM_FORMAT_ARGB16161616:
4969 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
4970 break;
4971 case DRM_FORMAT_XBGR16161616:
4972 case DRM_FORMAT_ABGR16161616:
4973 plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
4974 break;
4975 default:
4976 DRM_ERROR(
4977 "Unsupported screen format %p4cc\n",
4978 &fb->format->format);
4979 return -EINVAL;
4980 }
4981
4982 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
4983 case DRM_MODE_ROTATE_0:
4984 plane_info->rotation = ROTATION_ANGLE_0;
4985 break;
4986 case DRM_MODE_ROTATE_90:
4987 plane_info->rotation = ROTATION_ANGLE_90;
4988 break;
4989 case DRM_MODE_ROTATE_180:
4990 plane_info->rotation = ROTATION_ANGLE_180;
4991 break;
4992 case DRM_MODE_ROTATE_270:
4993 plane_info->rotation = ROTATION_ANGLE_270;
4994 break;
4995 default:
4996 plane_info->rotation = ROTATION_ANGLE_0;
4997 break;
4998 }
4999
5000
5001 plane_info->visible = true;
5002 plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5003
5004 plane_info->layer_index = plane_state->normalized_zpos;
5005
5006 ret = fill_plane_color_attributes(plane_state, plane_info->format,
5007 &plane_info->color_space);
5008 if (ret)
5009 return ret;
5010
5011 ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
5012 plane_info->rotation, tiling_flags,
5013 &plane_info->tiling_info,
5014 &plane_info->plane_size,
5015 &plane_info->dcc, address,
5016 tmz_surface, force_disable_dcc);
5017 if (ret)
5018 return ret;
5019
5020 amdgpu_dm_plane_fill_blending_from_plane_state(
5021 plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
5022 &plane_info->global_alpha, &plane_info->global_alpha_value);
5023
5024 return 0;
5025 }
5026
fill_dc_plane_attributes(struct amdgpu_device * adev,struct dc_plane_state * dc_plane_state,struct drm_plane_state * plane_state,struct drm_crtc_state * crtc_state)5027 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5028 struct dc_plane_state *dc_plane_state,
5029 struct drm_plane_state *plane_state,
5030 struct drm_crtc_state *crtc_state)
5031 {
5032 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5033 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5034 struct dc_scaling_info scaling_info;
5035 struct dc_plane_info plane_info;
5036 int ret;
5037 bool force_disable_dcc = false;
5038
5039 ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
5040 if (ret)
5041 return ret;
5042
5043 dc_plane_state->src_rect = scaling_info.src_rect;
5044 dc_plane_state->dst_rect = scaling_info.dst_rect;
5045 dc_plane_state->clip_rect = scaling_info.clip_rect;
5046 dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5047
5048 force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5049 ret = fill_dc_plane_info_and_addr(adev, plane_state,
5050 afb->tiling_flags,
5051 &plane_info,
5052 &dc_plane_state->address,
5053 afb->tmz_surface,
5054 force_disable_dcc);
5055 if (ret)
5056 return ret;
5057
5058 dc_plane_state->format = plane_info.format;
5059 dc_plane_state->color_space = plane_info.color_space;
5060 dc_plane_state->format = plane_info.format;
5061 dc_plane_state->plane_size = plane_info.plane_size;
5062 dc_plane_state->rotation = plane_info.rotation;
5063 dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5064 dc_plane_state->stereo_format = plane_info.stereo_format;
5065 dc_plane_state->tiling_info = plane_info.tiling_info;
5066 dc_plane_state->visible = plane_info.visible;
5067 dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5068 dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha;
5069 dc_plane_state->global_alpha = plane_info.global_alpha;
5070 dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5071 dc_plane_state->dcc = plane_info.dcc;
5072 dc_plane_state->layer_index = plane_info.layer_index;
5073 dc_plane_state->flip_int_enabled = true;
5074
5075 /*
5076 * Always set input transfer function, since plane state is refreshed
5077 * every time.
5078 */
5079 ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5080 if (ret)
5081 return ret;
5082
5083 return 0;
5084 }
5085
fill_dc_dirty_rect(struct drm_plane * plane,struct rect * dirty_rect,int32_t x,s32 y,s32 width,s32 height,int * i,bool ffu)5086 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
5087 struct rect *dirty_rect, int32_t x,
5088 s32 y, s32 width, s32 height,
5089 int *i, bool ffu)
5090 {
5091 WARN_ON(*i >= DC_MAX_DIRTY_RECTS);
5092
5093 dirty_rect->x = x;
5094 dirty_rect->y = y;
5095 dirty_rect->width = width;
5096 dirty_rect->height = height;
5097
5098 if (ffu)
5099 drm_dbg(plane->dev,
5100 "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n",
5101 plane->base.id, width, height);
5102 else
5103 drm_dbg(plane->dev,
5104 "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)",
5105 plane->base.id, x, y, width, height);
5106
5107 (*i)++;
5108 }
5109
5110 /**
5111 * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates
5112 *
5113 * @plane: DRM plane containing dirty regions that need to be flushed to the eDP
5114 * remote fb
5115 * @old_plane_state: Old state of @plane
5116 * @new_plane_state: New state of @plane
5117 * @crtc_state: New state of CRTC connected to the @plane
5118 * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
5119 * @dirty_regions_changed: dirty regions changed
5120 *
5121 * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
5122 * (referred to as "damage clips" in DRM nomenclature) that require updating on
5123 * the eDP remote buffer. The responsibility of specifying the dirty regions is
5124 * amdgpu_dm's.
5125 *
5126 * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the
5127 * plane with regions that require flushing to the eDP remote buffer. In
5128 * addition, certain use cases - such as cursor and multi-plane overlay (MPO) -
5129 * implicitly provide damage clips without any client support via the plane
5130 * bounds.
5131 */
fill_dc_dirty_rects(struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,struct drm_crtc_state * crtc_state,struct dc_flip_addrs * flip_addrs,bool * dirty_regions_changed)5132 static void fill_dc_dirty_rects(struct drm_plane *plane,
5133 struct drm_plane_state *old_plane_state,
5134 struct drm_plane_state *new_plane_state,
5135 struct drm_crtc_state *crtc_state,
5136 struct dc_flip_addrs *flip_addrs,
5137 bool *dirty_regions_changed)
5138 {
5139 struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5140 struct rect *dirty_rects = flip_addrs->dirty_rects;
5141 u32 num_clips;
5142 struct drm_mode_rect *clips;
5143 bool bb_changed;
5144 bool fb_changed;
5145 u32 i = 0;
5146 *dirty_regions_changed = false;
5147
5148 /*
5149 * Cursor plane has it's own dirty rect update interface. See
5150 * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data
5151 */
5152 if (plane->type == DRM_PLANE_TYPE_CURSOR)
5153 return;
5154
5155 if (new_plane_state->rotation != DRM_MODE_ROTATE_0)
5156 goto ffu;
5157
5158 num_clips = drm_plane_get_damage_clips_count(new_plane_state);
5159 clips = drm_plane_get_damage_clips(new_plane_state);
5160
5161 if (!dm_crtc_state->mpo_requested) {
5162 if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS)
5163 goto ffu;
5164
5165 for (; flip_addrs->dirty_rect_count < num_clips; clips++)
5166 fill_dc_dirty_rect(new_plane_state->plane,
5167 &dirty_rects[flip_addrs->dirty_rect_count],
5168 clips->x1, clips->y1,
5169 clips->x2 - clips->x1, clips->y2 - clips->y1,
5170 &flip_addrs->dirty_rect_count,
5171 false);
5172 return;
5173 }
5174
5175 /*
5176 * MPO is requested. Add entire plane bounding box to dirty rects if
5177 * flipped to or damaged.
5178 *
5179 * If plane is moved or resized, also add old bounding box to dirty
5180 * rects.
5181 */
5182 fb_changed = old_plane_state->fb->base.id !=
5183 new_plane_state->fb->base.id;
5184 bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x ||
5185 old_plane_state->crtc_y != new_plane_state->crtc_y ||
5186 old_plane_state->crtc_w != new_plane_state->crtc_w ||
5187 old_plane_state->crtc_h != new_plane_state->crtc_h);
5188
5189 drm_dbg(plane->dev,
5190 "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n",
5191 new_plane_state->plane->base.id,
5192 bb_changed, fb_changed, num_clips);
5193
5194 *dirty_regions_changed = bb_changed;
5195
5196 if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS)
5197 goto ffu;
5198
5199 if (bb_changed) {
5200 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5201 new_plane_state->crtc_x,
5202 new_plane_state->crtc_y,
5203 new_plane_state->crtc_w,
5204 new_plane_state->crtc_h, &i, false);
5205
5206 /* Add old plane bounding-box if plane is moved or resized */
5207 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5208 old_plane_state->crtc_x,
5209 old_plane_state->crtc_y,
5210 old_plane_state->crtc_w,
5211 old_plane_state->crtc_h, &i, false);
5212 }
5213
5214 if (num_clips) {
5215 for (; i < num_clips; clips++)
5216 fill_dc_dirty_rect(new_plane_state->plane,
5217 &dirty_rects[i], clips->x1,
5218 clips->y1, clips->x2 - clips->x1,
5219 clips->y2 - clips->y1, &i, false);
5220 } else if (fb_changed && !bb_changed) {
5221 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
5222 new_plane_state->crtc_x,
5223 new_plane_state->crtc_y,
5224 new_plane_state->crtc_w,
5225 new_plane_state->crtc_h, &i, false);
5226 }
5227
5228 flip_addrs->dirty_rect_count = i;
5229 return;
5230
5231 ffu:
5232 fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0,
5233 dm_crtc_state->base.mode.crtc_hdisplay,
5234 dm_crtc_state->base.mode.crtc_vdisplay,
5235 &flip_addrs->dirty_rect_count, true);
5236 }
5237
update_stream_scaling_settings(const struct drm_display_mode * mode,const struct dm_connector_state * dm_state,struct dc_stream_state * stream)5238 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5239 const struct dm_connector_state *dm_state,
5240 struct dc_stream_state *stream)
5241 {
5242 enum amdgpu_rmx_type rmx_type;
5243
5244 struct rect src = { 0 }; /* viewport in composition space*/
5245 struct rect dst = { 0 }; /* stream addressable area */
5246
5247 /* no mode. nothing to be done */
5248 if (!mode)
5249 return;
5250
5251 /* Full screen scaling by default */
5252 src.width = mode->hdisplay;
5253 src.height = mode->vdisplay;
5254 dst.width = stream->timing.h_addressable;
5255 dst.height = stream->timing.v_addressable;
5256
5257 if (dm_state) {
5258 rmx_type = dm_state->scaling;
5259 if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5260 if (src.width * dst.height <
5261 src.height * dst.width) {
5262 /* height needs less upscaling/more downscaling */
5263 dst.width = src.width *
5264 dst.height / src.height;
5265 } else {
5266 /* width needs less upscaling/more downscaling */
5267 dst.height = src.height *
5268 dst.width / src.width;
5269 }
5270 } else if (rmx_type == RMX_CENTER) {
5271 dst = src;
5272 }
5273
5274 dst.x = (stream->timing.h_addressable - dst.width) / 2;
5275 dst.y = (stream->timing.v_addressable - dst.height) / 2;
5276
5277 if (dm_state->underscan_enable) {
5278 dst.x += dm_state->underscan_hborder / 2;
5279 dst.y += dm_state->underscan_vborder / 2;
5280 dst.width -= dm_state->underscan_hborder;
5281 dst.height -= dm_state->underscan_vborder;
5282 }
5283 }
5284
5285 stream->src = src;
5286 stream->dst = dst;
5287
5288 DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n",
5289 dst.x, dst.y, dst.width, dst.height);
5290
5291 }
5292
5293 static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector * connector,bool is_y420,int requested_bpc)5294 convert_color_depth_from_display_info(const struct drm_connector *connector,
5295 bool is_y420, int requested_bpc)
5296 {
5297 u8 bpc;
5298
5299 if (is_y420) {
5300 bpc = 8;
5301
5302 /* Cap display bpc based on HDMI 2.0 HF-VSDB */
5303 if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5304 bpc = 16;
5305 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5306 bpc = 12;
5307 else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5308 bpc = 10;
5309 } else {
5310 bpc = (uint8_t)connector->display_info.bpc;
5311 /* Assume 8 bpc by default if no bpc is specified. */
5312 bpc = bpc ? bpc : 8;
5313 }
5314
5315 if (requested_bpc > 0) {
5316 /*
5317 * Cap display bpc based on the user requested value.
5318 *
5319 * The value for state->max_bpc may not correctly updated
5320 * depending on when the connector gets added to the state
5321 * or if this was called outside of atomic check, so it
5322 * can't be used directly.
5323 */
5324 bpc = min_t(u8, bpc, requested_bpc);
5325
5326 /* Round down to the nearest even number. */
5327 bpc = bpc - (bpc & 1);
5328 }
5329
5330 switch (bpc) {
5331 case 0:
5332 /*
5333 * Temporary Work around, DRM doesn't parse color depth for
5334 * EDID revision before 1.4
5335 * TODO: Fix edid parsing
5336 */
5337 return COLOR_DEPTH_888;
5338 case 6:
5339 return COLOR_DEPTH_666;
5340 case 8:
5341 return COLOR_DEPTH_888;
5342 case 10:
5343 return COLOR_DEPTH_101010;
5344 case 12:
5345 return COLOR_DEPTH_121212;
5346 case 14:
5347 return COLOR_DEPTH_141414;
5348 case 16:
5349 return COLOR_DEPTH_161616;
5350 default:
5351 return COLOR_DEPTH_UNDEFINED;
5352 }
5353 }
5354
5355 static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode * mode_in)5356 get_aspect_ratio(const struct drm_display_mode *mode_in)
5357 {
5358 /* 1-1 mapping, since both enums follow the HDMI spec. */
5359 return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5360 }
5361
5362 static enum dc_color_space
get_output_color_space(const struct dc_crtc_timing * dc_crtc_timing,const struct drm_connector_state * connector_state)5363 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
5364 const struct drm_connector_state *connector_state)
5365 {
5366 enum dc_color_space color_space = COLOR_SPACE_SRGB;
5367
5368 switch (connector_state->colorspace) {
5369 case DRM_MODE_COLORIMETRY_BT601_YCC:
5370 if (dc_crtc_timing->flags.Y_ONLY)
5371 color_space = COLOR_SPACE_YCBCR601_LIMITED;
5372 else
5373 color_space = COLOR_SPACE_YCBCR601;
5374 break;
5375 case DRM_MODE_COLORIMETRY_BT709_YCC:
5376 if (dc_crtc_timing->flags.Y_ONLY)
5377 color_space = COLOR_SPACE_YCBCR709_LIMITED;
5378 else
5379 color_space = COLOR_SPACE_YCBCR709;
5380 break;
5381 case DRM_MODE_COLORIMETRY_OPRGB:
5382 color_space = COLOR_SPACE_ADOBERGB;
5383 break;
5384 case DRM_MODE_COLORIMETRY_BT2020_RGB:
5385 case DRM_MODE_COLORIMETRY_BT2020_YCC:
5386 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB)
5387 color_space = COLOR_SPACE_2020_RGB_FULLRANGE;
5388 else
5389 color_space = COLOR_SPACE_2020_YCBCR;
5390 break;
5391 case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601
5392 default:
5393 if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) {
5394 color_space = COLOR_SPACE_SRGB;
5395 /*
5396 * 27030khz is the separation point between HDTV and SDTV
5397 * according to HDMI spec, we use YCbCr709 and YCbCr601
5398 * respectively
5399 */
5400 } else if (dc_crtc_timing->pix_clk_100hz > 270300) {
5401 if (dc_crtc_timing->flags.Y_ONLY)
5402 color_space =
5403 COLOR_SPACE_YCBCR709_LIMITED;
5404 else
5405 color_space = COLOR_SPACE_YCBCR709;
5406 } else {
5407 if (dc_crtc_timing->flags.Y_ONLY)
5408 color_space =
5409 COLOR_SPACE_YCBCR601_LIMITED;
5410 else
5411 color_space = COLOR_SPACE_YCBCR601;
5412 }
5413 break;
5414 }
5415
5416 return color_space;
5417 }
5418
adjust_colour_depth_from_display_info(struct dc_crtc_timing * timing_out,const struct drm_display_info * info)5419 static bool adjust_colour_depth_from_display_info(
5420 struct dc_crtc_timing *timing_out,
5421 const struct drm_display_info *info)
5422 {
5423 enum dc_color_depth depth = timing_out->display_color_depth;
5424 int normalized_clk;
5425
5426 do {
5427 normalized_clk = timing_out->pix_clk_100hz / 10;
5428 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5429 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5430 normalized_clk /= 2;
5431 /* Adjusting pix clock following on HDMI spec based on colour depth */
5432 switch (depth) {
5433 case COLOR_DEPTH_888:
5434 break;
5435 case COLOR_DEPTH_101010:
5436 normalized_clk = (normalized_clk * 30) / 24;
5437 break;
5438 case COLOR_DEPTH_121212:
5439 normalized_clk = (normalized_clk * 36) / 24;
5440 break;
5441 case COLOR_DEPTH_161616:
5442 normalized_clk = (normalized_clk * 48) / 24;
5443 break;
5444 default:
5445 /* The above depths are the only ones valid for HDMI. */
5446 return false;
5447 }
5448 if (normalized_clk <= info->max_tmds_clock) {
5449 timing_out->display_color_depth = depth;
5450 return true;
5451 }
5452 } while (--depth > COLOR_DEPTH_666);
5453 return false;
5454 }
5455
fill_stream_properties_from_drm_display_mode(struct dc_stream_state * stream,const struct drm_display_mode * mode_in,const struct drm_connector * connector,const struct drm_connector_state * connector_state,const struct dc_stream_state * old_stream,int requested_bpc)5456 static void fill_stream_properties_from_drm_display_mode(
5457 struct dc_stream_state *stream,
5458 const struct drm_display_mode *mode_in,
5459 const struct drm_connector *connector,
5460 const struct drm_connector_state *connector_state,
5461 const struct dc_stream_state *old_stream,
5462 int requested_bpc)
5463 {
5464 struct dc_crtc_timing *timing_out = &stream->timing;
5465 const struct drm_display_info *info = &connector->display_info;
5466 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5467 struct hdmi_vendor_infoframe hv_frame;
5468 struct hdmi_avi_infoframe avi_frame;
5469
5470 memset(&hv_frame, 0, sizeof(hv_frame));
5471 memset(&avi_frame, 0, sizeof(avi_frame));
5472
5473 timing_out->h_border_left = 0;
5474 timing_out->h_border_right = 0;
5475 timing_out->v_border_top = 0;
5476 timing_out->v_border_bottom = 0;
5477 /* TODO: un-hardcode */
5478 if (drm_mode_is_420_only(info, mode_in)
5479 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5480 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5481 else if (drm_mode_is_420_also(info, mode_in)
5482 && aconnector->force_yuv420_output)
5483 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5484 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444)
5485 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5486 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5487 else
5488 timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5489
5490 timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5491 timing_out->display_color_depth = convert_color_depth_from_display_info(
5492 connector,
5493 (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5494 requested_bpc);
5495 timing_out->scan_type = SCANNING_TYPE_NODATA;
5496 timing_out->hdmi_vic = 0;
5497
5498 if (old_stream) {
5499 timing_out->vic = old_stream->timing.vic;
5500 timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5501 timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5502 } else {
5503 timing_out->vic = drm_match_cea_mode(mode_in);
5504 if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5505 timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5506 if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5507 timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5508 }
5509
5510 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5511 drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5512 timing_out->vic = avi_frame.video_code;
5513 drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5514 timing_out->hdmi_vic = hv_frame.vic;
5515 }
5516
5517 if (is_freesync_video_mode(mode_in, aconnector)) {
5518 timing_out->h_addressable = mode_in->hdisplay;
5519 timing_out->h_total = mode_in->htotal;
5520 timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5521 timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5522 timing_out->v_total = mode_in->vtotal;
5523 timing_out->v_addressable = mode_in->vdisplay;
5524 timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5525 timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5526 timing_out->pix_clk_100hz = mode_in->clock * 10;
5527 } else {
5528 timing_out->h_addressable = mode_in->crtc_hdisplay;
5529 timing_out->h_total = mode_in->crtc_htotal;
5530 timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5531 timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5532 timing_out->v_total = mode_in->crtc_vtotal;
5533 timing_out->v_addressable = mode_in->crtc_vdisplay;
5534 timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5535 timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5536 timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5537 }
5538
5539 timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5540
5541 stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5542 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5543 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5544 if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5545 drm_mode_is_420_also(info, mode_in) &&
5546 timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5547 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5548 adjust_colour_depth_from_display_info(timing_out, info);
5549 }
5550 }
5551
5552 stream->output_color_space = get_output_color_space(timing_out, connector_state);
5553 }
5554
fill_audio_info(struct audio_info * audio_info,const struct drm_connector * drm_connector,const struct dc_sink * dc_sink)5555 static void fill_audio_info(struct audio_info *audio_info,
5556 const struct drm_connector *drm_connector,
5557 const struct dc_sink *dc_sink)
5558 {
5559 int i = 0;
5560 int cea_revision = 0;
5561 const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5562
5563 audio_info->manufacture_id = edid_caps->manufacturer_id;
5564 audio_info->product_id = edid_caps->product_id;
5565
5566 cea_revision = drm_connector->display_info.cea_rev;
5567
5568 strscpy(audio_info->display_name,
5569 edid_caps->display_name,
5570 AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5571
5572 if (cea_revision >= 3) {
5573 audio_info->mode_count = edid_caps->audio_mode_count;
5574
5575 for (i = 0; i < audio_info->mode_count; ++i) {
5576 audio_info->modes[i].format_code =
5577 (enum audio_format_code)
5578 (edid_caps->audio_modes[i].format_code);
5579 audio_info->modes[i].channel_count =
5580 edid_caps->audio_modes[i].channel_count;
5581 audio_info->modes[i].sample_rates.all =
5582 edid_caps->audio_modes[i].sample_rate;
5583 audio_info->modes[i].sample_size =
5584 edid_caps->audio_modes[i].sample_size;
5585 }
5586 }
5587
5588 audio_info->flags.all = edid_caps->speaker_flags;
5589
5590 /* TODO: We only check for the progressive mode, check for interlace mode too */
5591 if (drm_connector->latency_present[0]) {
5592 audio_info->video_latency = drm_connector->video_latency[0];
5593 audio_info->audio_latency = drm_connector->audio_latency[0];
5594 }
5595
5596 /* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5597
5598 }
5599
5600 static void
copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode * src_mode,struct drm_display_mode * dst_mode)5601 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5602 struct drm_display_mode *dst_mode)
5603 {
5604 dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5605 dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5606 dst_mode->crtc_clock = src_mode->crtc_clock;
5607 dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5608 dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5609 dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start;
5610 dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5611 dst_mode->crtc_htotal = src_mode->crtc_htotal;
5612 dst_mode->crtc_hskew = src_mode->crtc_hskew;
5613 dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5614 dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5615 dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5616 dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5617 dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5618 }
5619
5620 static void
decide_crtc_timing_for_drm_display_mode(struct drm_display_mode * drm_mode,const struct drm_display_mode * native_mode,bool scale_enabled)5621 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5622 const struct drm_display_mode *native_mode,
5623 bool scale_enabled)
5624 {
5625 if (scale_enabled) {
5626 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5627 } else if (native_mode->clock == drm_mode->clock &&
5628 native_mode->htotal == drm_mode->htotal &&
5629 native_mode->vtotal == drm_mode->vtotal) {
5630 copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5631 } else {
5632 /* no scaling nor amdgpu inserted, no need to patch */
5633 }
5634 }
5635
5636 static struct dc_sink *
create_fake_sink(struct amdgpu_dm_connector * aconnector)5637 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5638 {
5639 struct dc_sink_init_data sink_init_data = { 0 };
5640 struct dc_sink *sink = NULL;
5641
5642 sink_init_data.link = aconnector->dc_link;
5643 sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5644
5645 sink = dc_sink_create(&sink_init_data);
5646 if (!sink) {
5647 DRM_ERROR("Failed to create sink!\n");
5648 return NULL;
5649 }
5650 sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5651
5652 return sink;
5653 }
5654
set_multisync_trigger_params(struct dc_stream_state * stream)5655 static void set_multisync_trigger_params(
5656 struct dc_stream_state *stream)
5657 {
5658 struct dc_stream_state *master = NULL;
5659
5660 if (stream->triggered_crtc_reset.enabled) {
5661 master = stream->triggered_crtc_reset.event_source;
5662 stream->triggered_crtc_reset.event =
5663 master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5664 CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5665 stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5666 }
5667 }
5668
set_master_stream(struct dc_stream_state * stream_set[],int stream_count)5669 static void set_master_stream(struct dc_stream_state *stream_set[],
5670 int stream_count)
5671 {
5672 int j, highest_rfr = 0, master_stream = 0;
5673
5674 for (j = 0; j < stream_count; j++) {
5675 if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5676 int refresh_rate = 0;
5677
5678 refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5679 (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5680 if (refresh_rate > highest_rfr) {
5681 highest_rfr = refresh_rate;
5682 master_stream = j;
5683 }
5684 }
5685 }
5686 for (j = 0; j < stream_count; j++) {
5687 if (stream_set[j])
5688 stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5689 }
5690 }
5691
dm_enable_per_frame_crtc_master_sync(struct dc_state * context)5692 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5693 {
5694 int i = 0;
5695 struct dc_stream_state *stream;
5696
5697 if (context->stream_count < 2)
5698 return;
5699 for (i = 0; i < context->stream_count ; i++) {
5700 if (!context->streams[i])
5701 continue;
5702 /*
5703 * TODO: add a function to read AMD VSDB bits and set
5704 * crtc_sync_master.multi_sync_enabled flag
5705 * For now it's set to false
5706 */
5707 }
5708
5709 set_master_stream(context->streams, context->stream_count);
5710
5711 for (i = 0; i < context->stream_count ; i++) {
5712 stream = context->streams[i];
5713
5714 if (!stream)
5715 continue;
5716
5717 set_multisync_trigger_params(stream);
5718 }
5719 }
5720
5721 /**
5722 * DOC: FreeSync Video
5723 *
5724 * When a userspace application wants to play a video, the content follows a
5725 * standard format definition that usually specifies the FPS for that format.
5726 * The below list illustrates some video format and the expected FPS,
5727 * respectively:
5728 *
5729 * - TV/NTSC (23.976 FPS)
5730 * - Cinema (24 FPS)
5731 * - TV/PAL (25 FPS)
5732 * - TV/NTSC (29.97 FPS)
5733 * - TV/NTSC (30 FPS)
5734 * - Cinema HFR (48 FPS)
5735 * - TV/PAL (50 FPS)
5736 * - Commonly used (60 FPS)
5737 * - Multiples of 24 (48,72,96 FPS)
5738 *
5739 * The list of standards video format is not huge and can be added to the
5740 * connector modeset list beforehand. With that, userspace can leverage
5741 * FreeSync to extends the front porch in order to attain the target refresh
5742 * rate. Such a switch will happen seamlessly, without screen blanking or
5743 * reprogramming of the output in any other way. If the userspace requests a
5744 * modesetting change compatible with FreeSync modes that only differ in the
5745 * refresh rate, DC will skip the full update and avoid blink during the
5746 * transition. For example, the video player can change the modesetting from
5747 * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
5748 * causing any display blink. This same concept can be applied to a mode
5749 * setting change.
5750 */
5751 static struct drm_display_mode *
get_highest_refresh_rate_mode(struct amdgpu_dm_connector * aconnector,bool use_probed_modes)5752 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
5753 bool use_probed_modes)
5754 {
5755 struct drm_display_mode *m, *m_pref = NULL;
5756 u16 current_refresh, highest_refresh;
5757 struct list_head *list_head = use_probed_modes ?
5758 &aconnector->base.probed_modes :
5759 &aconnector->base.modes;
5760
5761 if (aconnector->freesync_vid_base.clock != 0)
5762 return &aconnector->freesync_vid_base;
5763
5764 /* Find the preferred mode */
5765 list_for_each_entry(m, list_head, head) {
5766 if (m->type & DRM_MODE_TYPE_PREFERRED) {
5767 m_pref = m;
5768 break;
5769 }
5770 }
5771
5772 if (!m_pref) {
5773 /* Probably an EDID with no preferred mode. Fallback to first entry */
5774 m_pref = list_first_entry_or_null(
5775 &aconnector->base.modes, struct drm_display_mode, head);
5776 if (!m_pref) {
5777 DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
5778 return NULL;
5779 }
5780 }
5781
5782 highest_refresh = drm_mode_vrefresh(m_pref);
5783
5784 /*
5785 * Find the mode with highest refresh rate with same resolution.
5786 * For some monitors, preferred mode is not the mode with highest
5787 * supported refresh rate.
5788 */
5789 list_for_each_entry(m, list_head, head) {
5790 current_refresh = drm_mode_vrefresh(m);
5791
5792 if (m->hdisplay == m_pref->hdisplay &&
5793 m->vdisplay == m_pref->vdisplay &&
5794 highest_refresh < current_refresh) {
5795 highest_refresh = current_refresh;
5796 m_pref = m;
5797 }
5798 }
5799
5800 drm_mode_copy(&aconnector->freesync_vid_base, m_pref);
5801 return m_pref;
5802 }
5803
is_freesync_video_mode(const struct drm_display_mode * mode,struct amdgpu_dm_connector * aconnector)5804 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
5805 struct amdgpu_dm_connector *aconnector)
5806 {
5807 struct drm_display_mode *high_mode;
5808 int timing_diff;
5809
5810 high_mode = get_highest_refresh_rate_mode(aconnector, false);
5811 if (!high_mode || !mode)
5812 return false;
5813
5814 timing_diff = high_mode->vtotal - mode->vtotal;
5815
5816 if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
5817 high_mode->hdisplay != mode->hdisplay ||
5818 high_mode->vdisplay != mode->vdisplay ||
5819 high_mode->hsync_start != mode->hsync_start ||
5820 high_mode->hsync_end != mode->hsync_end ||
5821 high_mode->htotal != mode->htotal ||
5822 high_mode->hskew != mode->hskew ||
5823 high_mode->vscan != mode->vscan ||
5824 high_mode->vsync_start - mode->vsync_start != timing_diff ||
5825 high_mode->vsync_end - mode->vsync_end != timing_diff)
5826 return false;
5827 else
5828 return true;
5829 }
5830
update_dsc_caps(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)5831 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
5832 struct dc_sink *sink, struct dc_stream_state *stream,
5833 struct dsc_dec_dpcd_caps *dsc_caps)
5834 {
5835 stream->timing.flags.DSC = 0;
5836 dsc_caps->is_dsc_supported = false;
5837
5838 if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
5839 sink->sink_signal == SIGNAL_TYPE_EDP)) {
5840 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE ||
5841 sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
5842 dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
5843 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
5844 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
5845 dsc_caps);
5846 }
5847 }
5848
5849
apply_dsc_policy_for_edp(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps,uint32_t max_dsc_target_bpp_limit_override)5850 static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
5851 struct dc_sink *sink, struct dc_stream_state *stream,
5852 struct dsc_dec_dpcd_caps *dsc_caps,
5853 uint32_t max_dsc_target_bpp_limit_override)
5854 {
5855 const struct dc_link_settings *verified_link_cap = NULL;
5856 u32 link_bw_in_kbps;
5857 u32 edp_min_bpp_x16, edp_max_bpp_x16;
5858 struct dc *dc = sink->ctx->dc;
5859 struct dc_dsc_bw_range bw_range = {0};
5860 struct dc_dsc_config dsc_cfg = {0};
5861 struct dc_dsc_config_options dsc_options = {0};
5862
5863 dc_dsc_get_default_config_option(dc, &dsc_options);
5864 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
5865
5866 verified_link_cap = dc_link_get_link_cap(stream->link);
5867 link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
5868 edp_min_bpp_x16 = 8 * 16;
5869 edp_max_bpp_x16 = 8 * 16;
5870
5871 if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel)
5872 edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel;
5873
5874 if (edp_max_bpp_x16 < edp_min_bpp_x16)
5875 edp_min_bpp_x16 = edp_max_bpp_x16;
5876
5877 if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0],
5878 dc->debug.dsc_min_slice_height_override,
5879 edp_min_bpp_x16, edp_max_bpp_x16,
5880 dsc_caps,
5881 &stream->timing,
5882 dc_link_get_highest_encoding_format(aconnector->dc_link),
5883 &bw_range)) {
5884
5885 if (bw_range.max_kbps < link_bw_in_kbps) {
5886 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5887 dsc_caps,
5888 &dsc_options,
5889 0,
5890 &stream->timing,
5891 dc_link_get_highest_encoding_format(aconnector->dc_link),
5892 &dsc_cfg)) {
5893 stream->timing.dsc_cfg = dsc_cfg;
5894 stream->timing.flags.DSC = 1;
5895 stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16;
5896 }
5897 return;
5898 }
5899 }
5900
5901 if (dc_dsc_compute_config(dc->res_pool->dscs[0],
5902 dsc_caps,
5903 &dsc_options,
5904 link_bw_in_kbps,
5905 &stream->timing,
5906 dc_link_get_highest_encoding_format(aconnector->dc_link),
5907 &dsc_cfg)) {
5908 stream->timing.dsc_cfg = dsc_cfg;
5909 stream->timing.flags.DSC = 1;
5910 }
5911 }
5912
5913
apply_dsc_policy_for_stream(struct amdgpu_dm_connector * aconnector,struct dc_sink * sink,struct dc_stream_state * stream,struct dsc_dec_dpcd_caps * dsc_caps)5914 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
5915 struct dc_sink *sink, struct dc_stream_state *stream,
5916 struct dsc_dec_dpcd_caps *dsc_caps)
5917 {
5918 struct drm_connector *drm_connector = &aconnector->base;
5919 u32 link_bandwidth_kbps;
5920 struct dc *dc = sink->ctx->dc;
5921 u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
5922 u32 dsc_max_supported_bw_in_kbps;
5923 u32 max_dsc_target_bpp_limit_override =
5924 drm_connector->display_info.max_dsc_bpp;
5925 struct dc_dsc_config_options dsc_options = {0};
5926
5927 dc_dsc_get_default_config_option(dc, &dsc_options);
5928 dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
5929
5930 link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
5931 dc_link_get_link_cap(aconnector->dc_link));
5932
5933 /* Set DSC policy according to dsc_clock_en */
5934 dc_dsc_policy_set_enable_dsc_when_not_needed(
5935 aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
5936
5937 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP &&
5938 !aconnector->dc_link->panel_config.dsc.disable_dsc_edp &&
5939 dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) {
5940
5941 apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override);
5942
5943 } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
5944 if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
5945 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5946 dsc_caps,
5947 &dsc_options,
5948 link_bandwidth_kbps,
5949 &stream->timing,
5950 dc_link_get_highest_encoding_format(aconnector->dc_link),
5951 &stream->timing.dsc_cfg)) {
5952 stream->timing.flags.DSC = 1;
5953 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
5954 }
5955 } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
5956 timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
5957 dc_link_get_highest_encoding_format(aconnector->dc_link));
5958 max_supported_bw_in_kbps = link_bandwidth_kbps;
5959 dsc_max_supported_bw_in_kbps = link_bandwidth_kbps;
5960
5961 if (timing_bw_in_kbps > max_supported_bw_in_kbps &&
5962 max_supported_bw_in_kbps > 0 &&
5963 dsc_max_supported_bw_in_kbps > 0)
5964 if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
5965 dsc_caps,
5966 &dsc_options,
5967 dsc_max_supported_bw_in_kbps,
5968 &stream->timing,
5969 dc_link_get_highest_encoding_format(aconnector->dc_link),
5970 &stream->timing.dsc_cfg)) {
5971 stream->timing.flags.DSC = 1;
5972 DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
5973 __func__, drm_connector->name);
5974 }
5975 }
5976 }
5977
5978 /* Overwrite the stream flag if DSC is enabled through debugfs */
5979 if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
5980 stream->timing.flags.DSC = 1;
5981
5982 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
5983 stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
5984
5985 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
5986 stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
5987
5988 if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
5989 stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
5990 }
5991
5992 static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream,int requested_bpc)5993 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
5994 const struct drm_display_mode *drm_mode,
5995 const struct dm_connector_state *dm_state,
5996 const struct dc_stream_state *old_stream,
5997 int requested_bpc)
5998 {
5999 struct drm_display_mode *preferred_mode = NULL;
6000 struct drm_connector *drm_connector;
6001 const struct drm_connector_state *con_state = &dm_state->base;
6002 struct dc_stream_state *stream = NULL;
6003 struct drm_display_mode mode;
6004 struct drm_display_mode saved_mode;
6005 struct drm_display_mode *freesync_mode = NULL;
6006 bool native_mode_found = false;
6007 bool recalculate_timing = false;
6008 bool scale = dm_state->scaling != RMX_OFF;
6009 int mode_refresh;
6010 int preferred_refresh = 0;
6011 enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
6012 struct dsc_dec_dpcd_caps dsc_caps;
6013
6014 struct dc_sink *sink = NULL;
6015
6016 drm_mode_init(&mode, drm_mode);
6017 memset(&saved_mode, 0, sizeof(saved_mode));
6018
6019 if (aconnector == NULL) {
6020 DRM_ERROR("aconnector is NULL!\n");
6021 return stream;
6022 }
6023
6024 drm_connector = &aconnector->base;
6025
6026 if (!aconnector->dc_sink) {
6027 sink = create_fake_sink(aconnector);
6028 if (!sink)
6029 return stream;
6030 } else {
6031 sink = aconnector->dc_sink;
6032 dc_sink_retain(sink);
6033 }
6034
6035 stream = dc_create_stream_for_sink(sink);
6036
6037 if (stream == NULL) {
6038 DRM_ERROR("Failed to create stream for sink!\n");
6039 goto finish;
6040 }
6041
6042 stream->dm_stream_context = aconnector;
6043
6044 stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6045 drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6046
6047 list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6048 /* Search for preferred mode */
6049 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6050 native_mode_found = true;
6051 break;
6052 }
6053 }
6054 if (!native_mode_found)
6055 preferred_mode = list_first_entry_or_null(
6056 &aconnector->base.modes,
6057 struct drm_display_mode,
6058 head);
6059
6060 mode_refresh = drm_mode_vrefresh(&mode);
6061
6062 if (preferred_mode == NULL) {
6063 /*
6064 * This may not be an error, the use case is when we have no
6065 * usermode calls to reset and set mode upon hotplug. In this
6066 * case, we call set mode ourselves to restore the previous mode
6067 * and the modelist may not be filled in time.
6068 */
6069 DRM_DEBUG_DRIVER("No preferred mode found\n");
6070 } else {
6071 recalculate_timing = is_freesync_video_mode(&mode, aconnector);
6072 if (recalculate_timing) {
6073 freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6074 drm_mode_copy(&saved_mode, &mode);
6075 saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
6076 drm_mode_copy(&mode, freesync_mode);
6077 mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
6078 } else {
6079 decide_crtc_timing_for_drm_display_mode(
6080 &mode, preferred_mode, scale);
6081
6082 preferred_refresh = drm_mode_vrefresh(preferred_mode);
6083 }
6084 }
6085
6086 if (recalculate_timing)
6087 drm_mode_set_crtcinfo(&saved_mode, 0);
6088
6089 /*
6090 * If scaling is enabled and refresh rate didn't change
6091 * we copy the vic and polarities of the old timings
6092 */
6093 if (!scale || mode_refresh != preferred_refresh)
6094 fill_stream_properties_from_drm_display_mode(
6095 stream, &mode, &aconnector->base, con_state, NULL,
6096 requested_bpc);
6097 else
6098 fill_stream_properties_from_drm_display_mode(
6099 stream, &mode, &aconnector->base, con_state, old_stream,
6100 requested_bpc);
6101
6102 if (aconnector->timing_changed) {
6103 DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
6104 __func__,
6105 stream->timing.display_color_depth,
6106 aconnector->timing_requested->display_color_depth);
6107 stream->timing = *aconnector->timing_requested;
6108 }
6109
6110 /* SST DSC determination policy */
6111 update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6112 if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6113 apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6114
6115 update_stream_scaling_settings(&mode, dm_state, stream);
6116
6117 fill_audio_info(
6118 &stream->audio_info,
6119 drm_connector,
6120 sink);
6121
6122 update_stream_signal(stream, sink);
6123
6124 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6125 mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6126 else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
6127 stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST ||
6128 stream->signal == SIGNAL_TYPE_EDP) {
6129 //
6130 // should decide stream support vsc sdp colorimetry capability
6131 // before building vsc info packet
6132 //
6133 stream->use_vsc_sdp_for_colorimetry = false;
6134 if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6135 stream->use_vsc_sdp_for_colorimetry =
6136 aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6137 } else {
6138 if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6139 stream->use_vsc_sdp_for_colorimetry = true;
6140 }
6141 if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
6142 tf = TRANSFER_FUNC_GAMMA_22;
6143 mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
6144
6145 if (stream->link->psr_settings.psr_feature_enabled)
6146 aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6147 }
6148 finish:
6149 dc_sink_release(sink);
6150
6151 return stream;
6152 }
6153
6154 static enum drm_connector_status
amdgpu_dm_connector_detect(struct drm_connector * connector,bool force)6155 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6156 {
6157 bool connected;
6158 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6159
6160 /*
6161 * Notes:
6162 * 1. This interface is NOT called in context of HPD irq.
6163 * 2. This interface *is called* in context of user-mode ioctl. Which
6164 * makes it a bad place for *any* MST-related activity.
6165 */
6166
6167 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6168 !aconnector->fake_enable)
6169 connected = (aconnector->dc_sink != NULL);
6170 else
6171 connected = (aconnector->base.force == DRM_FORCE_ON ||
6172 aconnector->base.force == DRM_FORCE_ON_DIGITAL);
6173
6174 update_subconnector_property(aconnector);
6175
6176 return (connected ? connector_status_connected :
6177 connector_status_disconnected);
6178 }
6179
amdgpu_dm_connector_atomic_set_property(struct drm_connector * connector,struct drm_connector_state * connector_state,struct drm_property * property,uint64_t val)6180 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6181 struct drm_connector_state *connector_state,
6182 struct drm_property *property,
6183 uint64_t val)
6184 {
6185 struct drm_device *dev = connector->dev;
6186 struct amdgpu_device *adev = drm_to_adev(dev);
6187 struct dm_connector_state *dm_old_state =
6188 to_dm_connector_state(connector->state);
6189 struct dm_connector_state *dm_new_state =
6190 to_dm_connector_state(connector_state);
6191
6192 int ret = -EINVAL;
6193
6194 if (property == dev->mode_config.scaling_mode_property) {
6195 enum amdgpu_rmx_type rmx_type;
6196
6197 switch (val) {
6198 case DRM_MODE_SCALE_CENTER:
6199 rmx_type = RMX_CENTER;
6200 break;
6201 case DRM_MODE_SCALE_ASPECT:
6202 rmx_type = RMX_ASPECT;
6203 break;
6204 case DRM_MODE_SCALE_FULLSCREEN:
6205 rmx_type = RMX_FULL;
6206 break;
6207 case DRM_MODE_SCALE_NONE:
6208 default:
6209 rmx_type = RMX_OFF;
6210 break;
6211 }
6212
6213 if (dm_old_state->scaling == rmx_type)
6214 return 0;
6215
6216 dm_new_state->scaling = rmx_type;
6217 ret = 0;
6218 } else if (property == adev->mode_info.underscan_hborder_property) {
6219 dm_new_state->underscan_hborder = val;
6220 ret = 0;
6221 } else if (property == adev->mode_info.underscan_vborder_property) {
6222 dm_new_state->underscan_vborder = val;
6223 ret = 0;
6224 } else if (property == adev->mode_info.underscan_property) {
6225 dm_new_state->underscan_enable = val;
6226 ret = 0;
6227 } else if (property == adev->mode_info.abm_level_property) {
6228 dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
6229 ret = 0;
6230 }
6231
6232 return ret;
6233 }
6234
amdgpu_dm_connector_atomic_get_property(struct drm_connector * connector,const struct drm_connector_state * state,struct drm_property * property,uint64_t * val)6235 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6236 const struct drm_connector_state *state,
6237 struct drm_property *property,
6238 uint64_t *val)
6239 {
6240 struct drm_device *dev = connector->dev;
6241 struct amdgpu_device *adev = drm_to_adev(dev);
6242 struct dm_connector_state *dm_state =
6243 to_dm_connector_state(state);
6244 int ret = -EINVAL;
6245
6246 if (property == dev->mode_config.scaling_mode_property) {
6247 switch (dm_state->scaling) {
6248 case RMX_CENTER:
6249 *val = DRM_MODE_SCALE_CENTER;
6250 break;
6251 case RMX_ASPECT:
6252 *val = DRM_MODE_SCALE_ASPECT;
6253 break;
6254 case RMX_FULL:
6255 *val = DRM_MODE_SCALE_FULLSCREEN;
6256 break;
6257 case RMX_OFF:
6258 default:
6259 *val = DRM_MODE_SCALE_NONE;
6260 break;
6261 }
6262 ret = 0;
6263 } else if (property == adev->mode_info.underscan_hborder_property) {
6264 *val = dm_state->underscan_hborder;
6265 ret = 0;
6266 } else if (property == adev->mode_info.underscan_vborder_property) {
6267 *val = dm_state->underscan_vborder;
6268 ret = 0;
6269 } else if (property == adev->mode_info.underscan_property) {
6270 *val = dm_state->underscan_enable;
6271 ret = 0;
6272 } else if (property == adev->mode_info.abm_level_property) {
6273 *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
6274 dm_state->abm_level : 0;
6275 ret = 0;
6276 }
6277
6278 return ret;
6279 }
6280
amdgpu_dm_connector_unregister(struct drm_connector * connector)6281 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6282 {
6283 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6284
6285 drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6286 }
6287
amdgpu_dm_connector_destroy(struct drm_connector * connector)6288 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6289 {
6290 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6291 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6292 struct amdgpu_display_manager *dm = &adev->dm;
6293
6294 /*
6295 * Call only if mst_mgr was initialized before since it's not done
6296 * for all connector types.
6297 */
6298 if (aconnector->mst_mgr.dev)
6299 drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6300
6301 if (aconnector->bl_idx != -1) {
6302 backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
6303 dm->backlight_dev[aconnector->bl_idx] = NULL;
6304 }
6305
6306 if (aconnector->dc_em_sink)
6307 dc_sink_release(aconnector->dc_em_sink);
6308 aconnector->dc_em_sink = NULL;
6309 if (aconnector->dc_sink)
6310 dc_sink_release(aconnector->dc_sink);
6311 aconnector->dc_sink = NULL;
6312
6313 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6314 drm_connector_unregister(connector);
6315 drm_connector_cleanup(connector);
6316 if (aconnector->i2c) {
6317 i2c_del_adapter(&aconnector->i2c->base);
6318 kfree(aconnector->i2c);
6319 }
6320 kfree(aconnector->dm_dp_aux.aux.name);
6321
6322 kfree(connector);
6323 }
6324
amdgpu_dm_connector_funcs_reset(struct drm_connector * connector)6325 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6326 {
6327 struct dm_connector_state *state =
6328 to_dm_connector_state(connector->state);
6329
6330 if (connector->state)
6331 __drm_atomic_helper_connector_destroy_state(connector->state);
6332
6333 kfree(state);
6334
6335 state = kzalloc(sizeof(*state), GFP_KERNEL);
6336
6337 if (state) {
6338 state->scaling = RMX_OFF;
6339 state->underscan_enable = false;
6340 state->underscan_hborder = 0;
6341 state->underscan_vborder = 0;
6342 state->base.max_requested_bpc = 8;
6343 state->vcpi_slots = 0;
6344 state->pbn = 0;
6345
6346 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6347 state->abm_level = amdgpu_dm_abm_level ?:
6348 ABM_LEVEL_IMMEDIATE_DISABLE;
6349
6350 __drm_atomic_helper_connector_reset(connector, &state->base);
6351 }
6352 }
6353
6354 struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector * connector)6355 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6356 {
6357 struct dm_connector_state *state =
6358 to_dm_connector_state(connector->state);
6359
6360 struct dm_connector_state *new_state =
6361 kmemdup(state, sizeof(*state), GFP_KERNEL);
6362
6363 if (!new_state)
6364 return NULL;
6365
6366 __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6367
6368 new_state->freesync_capable = state->freesync_capable;
6369 new_state->abm_level = state->abm_level;
6370 new_state->scaling = state->scaling;
6371 new_state->underscan_enable = state->underscan_enable;
6372 new_state->underscan_hborder = state->underscan_hborder;
6373 new_state->underscan_vborder = state->underscan_vborder;
6374 new_state->vcpi_slots = state->vcpi_slots;
6375 new_state->pbn = state->pbn;
6376 return &new_state->base;
6377 }
6378
6379 static int
amdgpu_dm_connector_late_register(struct drm_connector * connector)6380 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6381 {
6382 struct amdgpu_dm_connector *amdgpu_dm_connector =
6383 to_amdgpu_dm_connector(connector);
6384 int r;
6385
6386 amdgpu_dm_register_backlight_device(amdgpu_dm_connector);
6387
6388 if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6389 (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6390 amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6391 r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6392 if (r)
6393 return r;
6394 }
6395
6396 #if defined(CONFIG_DEBUG_FS)
6397 connector_debugfs_init(amdgpu_dm_connector);
6398 #endif
6399
6400 return 0;
6401 }
6402
amdgpu_dm_connector_funcs_force(struct drm_connector * connector)6403 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
6404 {
6405 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6406 struct dc_link *dc_link = aconnector->dc_link;
6407 struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
6408 struct edid *edid;
6409
6410 if (!connector->edid_override)
6411 return;
6412
6413 drm_edid_override_connector_update(&aconnector->base);
6414 edid = aconnector->base.edid_blob_ptr->data;
6415 aconnector->edid = edid;
6416
6417 /* Update emulated (virtual) sink's EDID */
6418 if (dc_em_sink && dc_link) {
6419 memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps));
6420 memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH);
6421 dm_helpers_parse_edid_caps(
6422 dc_link,
6423 &dc_em_sink->dc_edid,
6424 &dc_em_sink->edid_caps);
6425 }
6426 }
6427
6428 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6429 .reset = amdgpu_dm_connector_funcs_reset,
6430 .detect = amdgpu_dm_connector_detect,
6431 .fill_modes = drm_helper_probe_single_connector_modes,
6432 .destroy = amdgpu_dm_connector_destroy,
6433 .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6434 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6435 .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6436 .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6437 .late_register = amdgpu_dm_connector_late_register,
6438 .early_unregister = amdgpu_dm_connector_unregister,
6439 .force = amdgpu_dm_connector_funcs_force
6440 };
6441
get_modes(struct drm_connector * connector)6442 static int get_modes(struct drm_connector *connector)
6443 {
6444 return amdgpu_dm_connector_get_modes(connector);
6445 }
6446
create_eml_sink(struct amdgpu_dm_connector * aconnector)6447 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6448 {
6449 struct dc_sink_init_data init_params = {
6450 .link = aconnector->dc_link,
6451 .sink_signal = SIGNAL_TYPE_VIRTUAL
6452 };
6453 struct edid *edid;
6454
6455 if (!aconnector->base.edid_blob_ptr) {
6456 /* if connector->edid_override valid, pass
6457 * it to edid_override to edid_blob_ptr
6458 */
6459
6460 drm_edid_override_connector_update(&aconnector->base);
6461
6462 if (!aconnector->base.edid_blob_ptr) {
6463 DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6464 aconnector->base.name);
6465
6466 aconnector->base.force = DRM_FORCE_OFF;
6467 return;
6468 }
6469 }
6470
6471 edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6472
6473 aconnector->edid = edid;
6474
6475 aconnector->dc_em_sink = dc_link_add_remote_sink(
6476 aconnector->dc_link,
6477 (uint8_t *)edid,
6478 (edid->extensions + 1) * EDID_LENGTH,
6479 &init_params);
6480
6481 if (aconnector->base.force == DRM_FORCE_ON) {
6482 aconnector->dc_sink = aconnector->dc_link->local_sink ?
6483 aconnector->dc_link->local_sink :
6484 aconnector->dc_em_sink;
6485 dc_sink_retain(aconnector->dc_sink);
6486 }
6487 }
6488
handle_edid_mgmt(struct amdgpu_dm_connector * aconnector)6489 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6490 {
6491 struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6492
6493 /*
6494 * In case of headless boot with force on for DP managed connector
6495 * Those settings have to be != 0 to get initial modeset
6496 */
6497 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6498 link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6499 link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6500 }
6501
6502 create_eml_sink(aconnector);
6503 }
6504
dm_validate_stream_and_context(struct dc * dc,struct dc_stream_state * stream)6505 static enum dc_status dm_validate_stream_and_context(struct dc *dc,
6506 struct dc_stream_state *stream)
6507 {
6508 enum dc_status dc_result = DC_ERROR_UNEXPECTED;
6509 struct dc_plane_state *dc_plane_state = NULL;
6510 struct dc_state *dc_state = NULL;
6511
6512 if (!stream)
6513 goto cleanup;
6514
6515 dc_plane_state = dc_create_plane_state(dc);
6516 if (!dc_plane_state)
6517 goto cleanup;
6518
6519 dc_state = dc_create_state(dc);
6520 if (!dc_state)
6521 goto cleanup;
6522
6523 /* populate stream to plane */
6524 dc_plane_state->src_rect.height = stream->src.height;
6525 dc_plane_state->src_rect.width = stream->src.width;
6526 dc_plane_state->dst_rect.height = stream->src.height;
6527 dc_plane_state->dst_rect.width = stream->src.width;
6528 dc_plane_state->clip_rect.height = stream->src.height;
6529 dc_plane_state->clip_rect.width = stream->src.width;
6530 dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256;
6531 dc_plane_state->plane_size.surface_size.height = stream->src.height;
6532 dc_plane_state->plane_size.surface_size.width = stream->src.width;
6533 dc_plane_state->plane_size.chroma_size.height = stream->src.height;
6534 dc_plane_state->plane_size.chroma_size.width = stream->src.width;
6535 dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
6536 dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
6537 dc_plane_state->rotation = ROTATION_ANGLE_0;
6538 dc_plane_state->is_tiling_rotated = false;
6539 dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL;
6540
6541 dc_result = dc_validate_stream(dc, stream);
6542 if (dc_result == DC_OK)
6543 dc_result = dc_validate_plane(dc, dc_plane_state);
6544
6545 if (dc_result == DC_OK)
6546 dc_result = dc_add_stream_to_ctx(dc, dc_state, stream);
6547
6548 if (dc_result == DC_OK && !dc_add_plane_to_context(
6549 dc,
6550 stream,
6551 dc_plane_state,
6552 dc_state))
6553 dc_result = DC_FAIL_ATTACH_SURFACES;
6554
6555 if (dc_result == DC_OK)
6556 dc_result = dc_validate_global_state(dc, dc_state, true);
6557
6558 cleanup:
6559 if (dc_state)
6560 dc_release_state(dc_state);
6561
6562 if (dc_plane_state)
6563 dc_plane_state_release(dc_plane_state);
6564
6565 return dc_result;
6566 }
6567
6568 struct dc_stream_state *
create_validate_stream_for_sink(struct amdgpu_dm_connector * aconnector,const struct drm_display_mode * drm_mode,const struct dm_connector_state * dm_state,const struct dc_stream_state * old_stream)6569 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6570 const struct drm_display_mode *drm_mode,
6571 const struct dm_connector_state *dm_state,
6572 const struct dc_stream_state *old_stream)
6573 {
6574 struct drm_connector *connector = &aconnector->base;
6575 struct amdgpu_device *adev = drm_to_adev(connector->dev);
6576 struct dc_stream_state *stream;
6577 const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6578 int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6579 enum dc_status dc_result = DC_OK;
6580
6581 do {
6582 stream = create_stream_for_sink(aconnector, drm_mode,
6583 dm_state, old_stream,
6584 requested_bpc);
6585 if (stream == NULL) {
6586 DRM_ERROR("Failed to create stream for sink!\n");
6587 break;
6588 }
6589
6590 dc_result = dc_validate_stream(adev->dm.dc, stream);
6591 if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
6592 dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream);
6593
6594 if (dc_result == DC_OK)
6595 dc_result = dm_validate_stream_and_context(adev->dm.dc, stream);
6596
6597 if (dc_result != DC_OK) {
6598 DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6599 drm_mode->hdisplay,
6600 drm_mode->vdisplay,
6601 drm_mode->clock,
6602 dc_result,
6603 dc_status_to_str(dc_result));
6604
6605 dc_stream_release(stream);
6606 stream = NULL;
6607 requested_bpc -= 2; /* lower bpc to retry validation */
6608 }
6609
6610 } while (stream == NULL && requested_bpc >= 6);
6611
6612 if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6613 DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6614
6615 aconnector->force_yuv420_output = true;
6616 stream = create_validate_stream_for_sink(aconnector, drm_mode,
6617 dm_state, old_stream);
6618 aconnector->force_yuv420_output = false;
6619 }
6620
6621 return stream;
6622 }
6623
amdgpu_dm_connector_mode_valid(struct drm_connector * connector,struct drm_display_mode * mode)6624 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6625 struct drm_display_mode *mode)
6626 {
6627 int result = MODE_ERROR;
6628 struct dc_sink *dc_sink;
6629 /* TODO: Unhardcode stream count */
6630 struct dc_stream_state *stream;
6631 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6632
6633 if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6634 (mode->flags & DRM_MODE_FLAG_DBLSCAN))
6635 return result;
6636
6637 /*
6638 * Only run this the first time mode_valid is called to initilialize
6639 * EDID mgmt
6640 */
6641 if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6642 !aconnector->dc_em_sink)
6643 handle_edid_mgmt(aconnector);
6644
6645 dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6646
6647 if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6648 aconnector->base.force != DRM_FORCE_ON) {
6649 DRM_ERROR("dc_sink is NULL!\n");
6650 goto fail;
6651 }
6652
6653 drm_mode_set_crtcinfo(mode, 0);
6654
6655 stream = create_validate_stream_for_sink(aconnector, mode,
6656 to_dm_connector_state(connector->state),
6657 NULL);
6658 if (stream) {
6659 dc_stream_release(stream);
6660 result = MODE_OK;
6661 }
6662
6663 fail:
6664 /* TODO: error handling*/
6665 return result;
6666 }
6667
fill_hdr_info_packet(const struct drm_connector_state * state,struct dc_info_packet * out)6668 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6669 struct dc_info_packet *out)
6670 {
6671 struct hdmi_drm_infoframe frame;
6672 unsigned char buf[30]; /* 26 + 4 */
6673 ssize_t len;
6674 int ret, i;
6675
6676 memset(out, 0, sizeof(*out));
6677
6678 if (!state->hdr_output_metadata)
6679 return 0;
6680
6681 ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6682 if (ret)
6683 return ret;
6684
6685 len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6686 if (len < 0)
6687 return (int)len;
6688
6689 /* Static metadata is a fixed 26 bytes + 4 byte header. */
6690 if (len != 30)
6691 return -EINVAL;
6692
6693 /* Prepare the infopacket for DC. */
6694 switch (state->connector->connector_type) {
6695 case DRM_MODE_CONNECTOR_HDMIA:
6696 out->hb0 = 0x87; /* type */
6697 out->hb1 = 0x01; /* version */
6698 out->hb2 = 0x1A; /* length */
6699 out->sb[0] = buf[3]; /* checksum */
6700 i = 1;
6701 break;
6702
6703 case DRM_MODE_CONNECTOR_DisplayPort:
6704 case DRM_MODE_CONNECTOR_eDP:
6705 out->hb0 = 0x00; /* sdp id, zero */
6706 out->hb1 = 0x87; /* type */
6707 out->hb2 = 0x1D; /* payload len - 1 */
6708 out->hb3 = (0x13 << 2); /* sdp version */
6709 out->sb[0] = 0x01; /* version */
6710 out->sb[1] = 0x1A; /* length */
6711 i = 2;
6712 break;
6713
6714 default:
6715 return -EINVAL;
6716 }
6717
6718 memcpy(&out->sb[i], &buf[4], 26);
6719 out->valid = true;
6720
6721 print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6722 sizeof(out->sb), false);
6723
6724 return 0;
6725 }
6726
6727 static int
amdgpu_dm_connector_atomic_check(struct drm_connector * conn,struct drm_atomic_state * state)6728 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6729 struct drm_atomic_state *state)
6730 {
6731 struct drm_connector_state *new_con_state =
6732 drm_atomic_get_new_connector_state(state, conn);
6733 struct drm_connector_state *old_con_state =
6734 drm_atomic_get_old_connector_state(state, conn);
6735 struct drm_crtc *crtc = new_con_state->crtc;
6736 struct drm_crtc_state *new_crtc_state;
6737 struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn);
6738 int ret;
6739
6740 trace_amdgpu_dm_connector_atomic_check(new_con_state);
6741
6742 if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
6743 ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr);
6744 if (ret < 0)
6745 return ret;
6746 }
6747
6748 if (!crtc)
6749 return 0;
6750
6751 if (new_con_state->colorspace != old_con_state->colorspace) {
6752 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6753 if (IS_ERR(new_crtc_state))
6754 return PTR_ERR(new_crtc_state);
6755
6756 new_crtc_state->mode_changed = true;
6757 }
6758
6759 if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6760 struct dc_info_packet hdr_infopacket;
6761
6762 ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6763 if (ret)
6764 return ret;
6765
6766 new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6767 if (IS_ERR(new_crtc_state))
6768 return PTR_ERR(new_crtc_state);
6769
6770 /*
6771 * DC considers the stream backends changed if the
6772 * static metadata changes. Forcing the modeset also
6773 * gives a simple way for userspace to switch from
6774 * 8bpc to 10bpc when setting the metadata to enter
6775 * or exit HDR.
6776 *
6777 * Changing the static metadata after it's been
6778 * set is permissible, however. So only force a
6779 * modeset if we're entering or exiting HDR.
6780 */
6781 new_crtc_state->mode_changed = new_crtc_state->mode_changed ||
6782 !old_con_state->hdr_output_metadata ||
6783 !new_con_state->hdr_output_metadata;
6784 }
6785
6786 return 0;
6787 }
6788
6789 static const struct drm_connector_helper_funcs
6790 amdgpu_dm_connector_helper_funcs = {
6791 /*
6792 * If hotplugging a second bigger display in FB Con mode, bigger resolution
6793 * modes will be filtered by drm_mode_validate_size(), and those modes
6794 * are missing after user start lightdm. So we need to renew modes list.
6795 * in get_modes call back, not just return the modes count
6796 */
6797 .get_modes = get_modes,
6798 .mode_valid = amdgpu_dm_connector_mode_valid,
6799 .atomic_check = amdgpu_dm_connector_atomic_check,
6800 };
6801
dm_encoder_helper_disable(struct drm_encoder * encoder)6802 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
6803 {
6804
6805 }
6806
convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)6807 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth)
6808 {
6809 switch (display_color_depth) {
6810 case COLOR_DEPTH_666:
6811 return 6;
6812 case COLOR_DEPTH_888:
6813 return 8;
6814 case COLOR_DEPTH_101010:
6815 return 10;
6816 case COLOR_DEPTH_121212:
6817 return 12;
6818 case COLOR_DEPTH_141414:
6819 return 14;
6820 case COLOR_DEPTH_161616:
6821 return 16;
6822 default:
6823 break;
6824 }
6825 return 0;
6826 }
6827
dm_encoder_helper_atomic_check(struct drm_encoder * encoder,struct drm_crtc_state * crtc_state,struct drm_connector_state * conn_state)6828 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
6829 struct drm_crtc_state *crtc_state,
6830 struct drm_connector_state *conn_state)
6831 {
6832 struct drm_atomic_state *state = crtc_state->state;
6833 struct drm_connector *connector = conn_state->connector;
6834 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6835 struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
6836 const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
6837 struct drm_dp_mst_topology_mgr *mst_mgr;
6838 struct drm_dp_mst_port *mst_port;
6839 struct drm_dp_mst_topology_state *mst_state;
6840 enum dc_color_depth color_depth;
6841 int clock, bpp = 0;
6842 bool is_y420 = false;
6843
6844 if (!aconnector->mst_output_port)
6845 return 0;
6846
6847 mst_port = aconnector->mst_output_port;
6848 mst_mgr = &aconnector->mst_root->mst_mgr;
6849
6850 if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
6851 return 0;
6852
6853 mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr);
6854 if (IS_ERR(mst_state))
6855 return PTR_ERR(mst_state);
6856
6857 mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
6858
6859 if (!state->duplicated) {
6860 int max_bpc = conn_state->max_requested_bpc;
6861
6862 is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
6863 aconnector->force_yuv420_output;
6864 color_depth = convert_color_depth_from_display_info(connector,
6865 is_y420,
6866 max_bpc);
6867 bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
6868 clock = adjusted_mode->clock;
6869 dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4);
6870 }
6871
6872 dm_new_connector_state->vcpi_slots =
6873 drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port,
6874 dm_new_connector_state->pbn);
6875 if (dm_new_connector_state->vcpi_slots < 0) {
6876 DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
6877 return dm_new_connector_state->vcpi_slots;
6878 }
6879 return 0;
6880 }
6881
6882 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
6883 .disable = dm_encoder_helper_disable,
6884 .atomic_check = dm_encoder_helper_atomic_check
6885 };
6886
dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state * state,struct dc_state * dc_state,struct dsc_mst_fairness_vars * vars)6887 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
6888 struct dc_state *dc_state,
6889 struct dsc_mst_fairness_vars *vars)
6890 {
6891 struct dc_stream_state *stream = NULL;
6892 struct drm_connector *connector;
6893 struct drm_connector_state *new_con_state;
6894 struct amdgpu_dm_connector *aconnector;
6895 struct dm_connector_state *dm_conn_state;
6896 int i, j, ret;
6897 int vcpi, pbn_div, pbn, slot_num = 0;
6898
6899 for_each_new_connector_in_state(state, connector, new_con_state, i) {
6900
6901 aconnector = to_amdgpu_dm_connector(connector);
6902
6903 if (!aconnector->mst_output_port)
6904 continue;
6905
6906 if (!new_con_state || !new_con_state->crtc)
6907 continue;
6908
6909 dm_conn_state = to_dm_connector_state(new_con_state);
6910
6911 for (j = 0; j < dc_state->stream_count; j++) {
6912 stream = dc_state->streams[j];
6913 if (!stream)
6914 continue;
6915
6916 if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector)
6917 break;
6918
6919 stream = NULL;
6920 }
6921
6922 if (!stream)
6923 continue;
6924
6925 pbn_div = dm_mst_get_pbn_divider(stream->link);
6926 /* pbn is calculated by compute_mst_dsc_configs_for_state*/
6927 for (j = 0; j < dc_state->stream_count; j++) {
6928 if (vars[j].aconnector == aconnector) {
6929 pbn = vars[j].pbn;
6930 break;
6931 }
6932 }
6933
6934 if (j == dc_state->stream_count)
6935 continue;
6936
6937 slot_num = DIV_ROUND_UP(pbn, pbn_div);
6938
6939 if (stream->timing.flags.DSC != 1) {
6940 dm_conn_state->pbn = pbn;
6941 dm_conn_state->vcpi_slots = slot_num;
6942
6943 ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
6944 dm_conn_state->pbn, false);
6945 if (ret < 0)
6946 return ret;
6947
6948 continue;
6949 }
6950
6951 vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
6952 if (vcpi < 0)
6953 return vcpi;
6954
6955 dm_conn_state->pbn = pbn;
6956 dm_conn_state->vcpi_slots = vcpi;
6957 }
6958 return 0;
6959 }
6960
to_drm_connector_type(enum signal_type st)6961 static int to_drm_connector_type(enum signal_type st)
6962 {
6963 switch (st) {
6964 case SIGNAL_TYPE_HDMI_TYPE_A:
6965 return DRM_MODE_CONNECTOR_HDMIA;
6966 case SIGNAL_TYPE_EDP:
6967 return DRM_MODE_CONNECTOR_eDP;
6968 case SIGNAL_TYPE_LVDS:
6969 return DRM_MODE_CONNECTOR_LVDS;
6970 case SIGNAL_TYPE_RGB:
6971 return DRM_MODE_CONNECTOR_VGA;
6972 case SIGNAL_TYPE_DISPLAY_PORT:
6973 case SIGNAL_TYPE_DISPLAY_PORT_MST:
6974 return DRM_MODE_CONNECTOR_DisplayPort;
6975 case SIGNAL_TYPE_DVI_DUAL_LINK:
6976 case SIGNAL_TYPE_DVI_SINGLE_LINK:
6977 return DRM_MODE_CONNECTOR_DVID;
6978 case SIGNAL_TYPE_VIRTUAL:
6979 return DRM_MODE_CONNECTOR_VIRTUAL;
6980
6981 default:
6982 return DRM_MODE_CONNECTOR_Unknown;
6983 }
6984 }
6985
amdgpu_dm_connector_to_encoder(struct drm_connector * connector)6986 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
6987 {
6988 struct drm_encoder *encoder;
6989
6990 /* There is only one encoder per connector */
6991 drm_connector_for_each_possible_encoder(connector, encoder)
6992 return encoder;
6993
6994 return NULL;
6995 }
6996
amdgpu_dm_get_native_mode(struct drm_connector * connector)6997 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
6998 {
6999 struct drm_encoder *encoder;
7000 struct amdgpu_encoder *amdgpu_encoder;
7001
7002 encoder = amdgpu_dm_connector_to_encoder(connector);
7003
7004 if (encoder == NULL)
7005 return;
7006
7007 amdgpu_encoder = to_amdgpu_encoder(encoder);
7008
7009 amdgpu_encoder->native_mode.clock = 0;
7010
7011 if (!list_empty(&connector->probed_modes)) {
7012 struct drm_display_mode *preferred_mode = NULL;
7013
7014 list_for_each_entry(preferred_mode,
7015 &connector->probed_modes,
7016 head) {
7017 if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7018 amdgpu_encoder->native_mode = *preferred_mode;
7019
7020 break;
7021 }
7022
7023 }
7024 }
7025
7026 static struct drm_display_mode *
amdgpu_dm_create_common_mode(struct drm_encoder * encoder,char * name,int hdisplay,int vdisplay)7027 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7028 char *name,
7029 int hdisplay, int vdisplay)
7030 {
7031 struct drm_device *dev = encoder->dev;
7032 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7033 struct drm_display_mode *mode = NULL;
7034 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7035
7036 mode = drm_mode_duplicate(dev, native_mode);
7037
7038 if (mode == NULL)
7039 return NULL;
7040
7041 mode->hdisplay = hdisplay;
7042 mode->vdisplay = vdisplay;
7043 mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7044 strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7045
7046 return mode;
7047
7048 }
7049
amdgpu_dm_connector_add_common_modes(struct drm_encoder * encoder,struct drm_connector * connector)7050 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7051 struct drm_connector *connector)
7052 {
7053 struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7054 struct drm_display_mode *mode = NULL;
7055 struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7056 struct amdgpu_dm_connector *amdgpu_dm_connector =
7057 to_amdgpu_dm_connector(connector);
7058 int i;
7059 int n;
7060 struct mode_size {
7061 char name[DRM_DISPLAY_MODE_LEN];
7062 int w;
7063 int h;
7064 } common_modes[] = {
7065 { "640x480", 640, 480},
7066 { "800x600", 800, 600},
7067 { "1024x768", 1024, 768},
7068 { "1280x720", 1280, 720},
7069 { "1280x800", 1280, 800},
7070 {"1280x1024", 1280, 1024},
7071 { "1440x900", 1440, 900},
7072 {"1680x1050", 1680, 1050},
7073 {"1600x1200", 1600, 1200},
7074 {"1920x1080", 1920, 1080},
7075 {"1920x1200", 1920, 1200}
7076 };
7077
7078 n = ARRAY_SIZE(common_modes);
7079
7080 for (i = 0; i < n; i++) {
7081 struct drm_display_mode *curmode = NULL;
7082 bool mode_existed = false;
7083
7084 if (common_modes[i].w > native_mode->hdisplay ||
7085 common_modes[i].h > native_mode->vdisplay ||
7086 (common_modes[i].w == native_mode->hdisplay &&
7087 common_modes[i].h == native_mode->vdisplay))
7088 continue;
7089
7090 list_for_each_entry(curmode, &connector->probed_modes, head) {
7091 if (common_modes[i].w == curmode->hdisplay &&
7092 common_modes[i].h == curmode->vdisplay) {
7093 mode_existed = true;
7094 break;
7095 }
7096 }
7097
7098 if (mode_existed)
7099 continue;
7100
7101 mode = amdgpu_dm_create_common_mode(encoder,
7102 common_modes[i].name, common_modes[i].w,
7103 common_modes[i].h);
7104 if (!mode)
7105 continue;
7106
7107 drm_mode_probed_add(connector, mode);
7108 amdgpu_dm_connector->num_modes++;
7109 }
7110 }
7111
amdgpu_set_panel_orientation(struct drm_connector * connector)7112 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7113 {
7114 struct drm_encoder *encoder;
7115 struct amdgpu_encoder *amdgpu_encoder;
7116 const struct drm_display_mode *native_mode;
7117
7118 if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7119 connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7120 return;
7121
7122 mutex_lock(&connector->dev->mode_config.mutex);
7123 amdgpu_dm_connector_get_modes(connector);
7124 mutex_unlock(&connector->dev->mode_config.mutex);
7125
7126 encoder = amdgpu_dm_connector_to_encoder(connector);
7127 if (!encoder)
7128 return;
7129
7130 amdgpu_encoder = to_amdgpu_encoder(encoder);
7131
7132 native_mode = &amdgpu_encoder->native_mode;
7133 if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7134 return;
7135
7136 drm_connector_set_panel_orientation_with_quirk(connector,
7137 DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7138 native_mode->hdisplay,
7139 native_mode->vdisplay);
7140 }
7141
amdgpu_dm_connector_ddc_get_modes(struct drm_connector * connector,struct edid * edid)7142 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7143 struct edid *edid)
7144 {
7145 struct amdgpu_dm_connector *amdgpu_dm_connector =
7146 to_amdgpu_dm_connector(connector);
7147
7148 if (edid) {
7149 /* empty probed_modes */
7150 INIT_LIST_HEAD(&connector->probed_modes);
7151 amdgpu_dm_connector->num_modes =
7152 drm_add_edid_modes(connector, edid);
7153
7154 /* sorting the probed modes before calling function
7155 * amdgpu_dm_get_native_mode() since EDID can have
7156 * more than one preferred mode. The modes that are
7157 * later in the probed mode list could be of higher
7158 * and preferred resolution. For example, 3840x2160
7159 * resolution in base EDID preferred timing and 4096x2160
7160 * preferred resolution in DID extension block later.
7161 */
7162 drm_mode_sort(&connector->probed_modes);
7163 amdgpu_dm_get_native_mode(connector);
7164
7165 /* Freesync capabilities are reset by calling
7166 * drm_add_edid_modes() and need to be
7167 * restored here.
7168 */
7169 amdgpu_dm_update_freesync_caps(connector, edid);
7170 } else {
7171 amdgpu_dm_connector->num_modes = 0;
7172 }
7173 }
7174
is_duplicate_mode(struct amdgpu_dm_connector * aconnector,struct drm_display_mode * mode)7175 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
7176 struct drm_display_mode *mode)
7177 {
7178 struct drm_display_mode *m;
7179
7180 list_for_each_entry(m, &aconnector->base.probed_modes, head) {
7181 if (drm_mode_equal(m, mode))
7182 return true;
7183 }
7184
7185 return false;
7186 }
7187
add_fs_modes(struct amdgpu_dm_connector * aconnector)7188 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
7189 {
7190 const struct drm_display_mode *m;
7191 struct drm_display_mode *new_mode;
7192 uint i;
7193 u32 new_modes_count = 0;
7194
7195 /* Standard FPS values
7196 *
7197 * 23.976 - TV/NTSC
7198 * 24 - Cinema
7199 * 25 - TV/PAL
7200 * 29.97 - TV/NTSC
7201 * 30 - TV/NTSC
7202 * 48 - Cinema HFR
7203 * 50 - TV/PAL
7204 * 60 - Commonly used
7205 * 48,72,96,120 - Multiples of 24
7206 */
7207 static const u32 common_rates[] = {
7208 23976, 24000, 25000, 29970, 30000,
7209 48000, 50000, 60000, 72000, 96000, 120000
7210 };
7211
7212 /*
7213 * Find mode with highest refresh rate with the same resolution
7214 * as the preferred mode. Some monitors report a preferred mode
7215 * with lower resolution than the highest refresh rate supported.
7216 */
7217
7218 m = get_highest_refresh_rate_mode(aconnector, true);
7219 if (!m)
7220 return 0;
7221
7222 for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
7223 u64 target_vtotal, target_vtotal_diff;
7224 u64 num, den;
7225
7226 if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
7227 continue;
7228
7229 if (common_rates[i] < aconnector->min_vfreq * 1000 ||
7230 common_rates[i] > aconnector->max_vfreq * 1000)
7231 continue;
7232
7233 num = (unsigned long long)m->clock * 1000 * 1000;
7234 den = common_rates[i] * (unsigned long long)m->htotal;
7235 target_vtotal = div_u64(num, den);
7236 target_vtotal_diff = target_vtotal - m->vtotal;
7237
7238 /* Check for illegal modes */
7239 if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
7240 m->vsync_end + target_vtotal_diff < m->vsync_start ||
7241 m->vtotal + target_vtotal_diff < m->vsync_end)
7242 continue;
7243
7244 new_mode = drm_mode_duplicate(aconnector->base.dev, m);
7245 if (!new_mode)
7246 goto out;
7247
7248 new_mode->vtotal += (u16)target_vtotal_diff;
7249 new_mode->vsync_start += (u16)target_vtotal_diff;
7250 new_mode->vsync_end += (u16)target_vtotal_diff;
7251 new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7252 new_mode->type |= DRM_MODE_TYPE_DRIVER;
7253
7254 if (!is_duplicate_mode(aconnector, new_mode)) {
7255 drm_mode_probed_add(&aconnector->base, new_mode);
7256 new_modes_count += 1;
7257 } else
7258 drm_mode_destroy(aconnector->base.dev, new_mode);
7259 }
7260 out:
7261 return new_modes_count;
7262 }
7263
amdgpu_dm_connector_add_freesync_modes(struct drm_connector * connector,struct edid * edid)7264 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
7265 struct edid *edid)
7266 {
7267 struct amdgpu_dm_connector *amdgpu_dm_connector =
7268 to_amdgpu_dm_connector(connector);
7269
7270 if (!edid)
7271 return;
7272
7273 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
7274 amdgpu_dm_connector->num_modes +=
7275 add_fs_modes(amdgpu_dm_connector);
7276 }
7277
amdgpu_dm_connector_get_modes(struct drm_connector * connector)7278 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
7279 {
7280 struct amdgpu_dm_connector *amdgpu_dm_connector =
7281 to_amdgpu_dm_connector(connector);
7282 struct drm_encoder *encoder;
7283 struct edid *edid = amdgpu_dm_connector->edid;
7284 struct dc_link_settings *verified_link_cap =
7285 &amdgpu_dm_connector->dc_link->verified_link_cap;
7286 const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
7287
7288 encoder = amdgpu_dm_connector_to_encoder(connector);
7289
7290 if (!drm_edid_is_valid(edid)) {
7291 amdgpu_dm_connector->num_modes =
7292 drm_add_modes_noedid(connector, 640, 480);
7293 if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
7294 amdgpu_dm_connector->num_modes +=
7295 drm_add_modes_noedid(connector, 1920, 1080);
7296 } else {
7297 amdgpu_dm_connector_ddc_get_modes(connector, edid);
7298 amdgpu_dm_connector_add_common_modes(encoder, connector);
7299 amdgpu_dm_connector_add_freesync_modes(connector, edid);
7300 }
7301 amdgpu_dm_fbc_init(connector);
7302
7303 return amdgpu_dm_connector->num_modes;
7304 }
7305
7306 static const u32 supported_colorspaces =
7307 BIT(DRM_MODE_COLORIMETRY_BT709_YCC) |
7308 BIT(DRM_MODE_COLORIMETRY_OPRGB) |
7309 BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) |
7310 BIT(DRM_MODE_COLORIMETRY_BT2020_YCC);
7311
amdgpu_dm_connector_init_helper(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,int connector_type,struct dc_link * link,int link_index)7312 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
7313 struct amdgpu_dm_connector *aconnector,
7314 int connector_type,
7315 struct dc_link *link,
7316 int link_index)
7317 {
7318 struct amdgpu_device *adev = drm_to_adev(dm->ddev);
7319
7320 /*
7321 * Some of the properties below require access to state, like bpc.
7322 * Allocate some default initial connector state with our reset helper.
7323 */
7324 if (aconnector->base.funcs->reset)
7325 aconnector->base.funcs->reset(&aconnector->base);
7326
7327 aconnector->connector_id = link_index;
7328 aconnector->bl_idx = -1;
7329 aconnector->dc_link = link;
7330 aconnector->base.interlace_allowed = false;
7331 aconnector->base.doublescan_allowed = false;
7332 aconnector->base.stereo_allowed = false;
7333 aconnector->base.dpms = DRM_MODE_DPMS_OFF;
7334 aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
7335 aconnector->audio_inst = -1;
7336 aconnector->pack_sdp_v1_3 = false;
7337 aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
7338 memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
7339 mutex_init(&aconnector->hpd_lock);
7340 mutex_init(&aconnector->handle_mst_msg_ready);
7341
7342 /*
7343 * configure support HPD hot plug connector_>polled default value is 0
7344 * which means HPD hot plug not supported
7345 */
7346 switch (connector_type) {
7347 case DRM_MODE_CONNECTOR_HDMIA:
7348 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7349 aconnector->base.ycbcr_420_allowed =
7350 link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
7351 break;
7352 case DRM_MODE_CONNECTOR_DisplayPort:
7353 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7354 link->link_enc = link_enc_cfg_get_link_enc(link);
7355 ASSERT(link->link_enc);
7356 if (link->link_enc)
7357 aconnector->base.ycbcr_420_allowed =
7358 link->link_enc->features.dp_ycbcr420_supported ? true : false;
7359 break;
7360 case DRM_MODE_CONNECTOR_DVID:
7361 aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
7362 break;
7363 default:
7364 break;
7365 }
7366
7367 drm_object_attach_property(&aconnector->base.base,
7368 dm->ddev->mode_config.scaling_mode_property,
7369 DRM_MODE_SCALE_NONE);
7370
7371 drm_object_attach_property(&aconnector->base.base,
7372 adev->mode_info.underscan_property,
7373 UNDERSCAN_OFF);
7374 drm_object_attach_property(&aconnector->base.base,
7375 adev->mode_info.underscan_hborder_property,
7376 0);
7377 drm_object_attach_property(&aconnector->base.base,
7378 adev->mode_info.underscan_vborder_property,
7379 0);
7380
7381 if (!aconnector->mst_root)
7382 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
7383
7384 aconnector->base.state->max_bpc = 16;
7385 aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
7386
7387 if (connector_type == DRM_MODE_CONNECTOR_eDP &&
7388 (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
7389 drm_object_attach_property(&aconnector->base.base,
7390 adev->mode_info.abm_level_property, 0);
7391 }
7392
7393 if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
7394 if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
7395 drm_connector_attach_colorspace_property(&aconnector->base);
7396 } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) ||
7397 connector_type == DRM_MODE_CONNECTOR_eDP) {
7398 if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces))
7399 drm_connector_attach_colorspace_property(&aconnector->base);
7400 }
7401
7402 if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
7403 connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
7404 connector_type == DRM_MODE_CONNECTOR_eDP) {
7405 drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
7406
7407 if (!aconnector->mst_root)
7408 drm_connector_attach_vrr_capable_property(&aconnector->base);
7409
7410 if (adev->dm.hdcp_workqueue)
7411 drm_connector_attach_content_protection_property(&aconnector->base, true);
7412 }
7413 }
7414
amdgpu_dm_i2c_xfer(struct i2c_adapter * i2c_adap,struct i2c_msg * msgs,int num)7415 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
7416 struct i2c_msg *msgs, int num)
7417 {
7418 struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
7419 struct ddc_service *ddc_service = i2c->ddc_service;
7420 struct i2c_command cmd;
7421 int i;
7422 int result = -EIO;
7423
7424 if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
7425 return result;
7426
7427 cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
7428
7429 if (!cmd.payloads)
7430 return result;
7431
7432 cmd.number_of_payloads = num;
7433 cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
7434 cmd.speed = 100;
7435
7436 for (i = 0; i < num; i++) {
7437 cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
7438 cmd.payloads[i].address = msgs[i].addr;
7439 cmd.payloads[i].length = msgs[i].len;
7440 cmd.payloads[i].data = msgs[i].buf;
7441 }
7442
7443 if (dc_submit_i2c(
7444 ddc_service->ctx->dc,
7445 ddc_service->link->link_index,
7446 &cmd))
7447 result = num;
7448
7449 kfree(cmd.payloads);
7450 return result;
7451 }
7452
amdgpu_dm_i2c_func(struct i2c_adapter * adap)7453 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
7454 {
7455 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
7456 }
7457
7458 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
7459 .master_xfer = amdgpu_dm_i2c_xfer,
7460 .functionality = amdgpu_dm_i2c_func,
7461 };
7462
7463 static struct amdgpu_i2c_adapter *
create_i2c(struct ddc_service * ddc_service,int link_index,int * res)7464 create_i2c(struct ddc_service *ddc_service,
7465 int link_index,
7466 int *res)
7467 {
7468 struct amdgpu_device *adev = ddc_service->ctx->driver_context;
7469 struct amdgpu_i2c_adapter *i2c;
7470
7471 i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
7472 if (!i2c)
7473 return NULL;
7474 i2c->base.owner = THIS_MODULE;
7475 i2c->base.class = I2C_CLASS_DDC;
7476 i2c->base.dev.parent = &adev->pdev->dev;
7477 i2c->base.algo = &amdgpu_dm_i2c_algo;
7478 snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
7479 i2c_set_adapdata(&i2c->base, i2c);
7480 i2c->ddc_service = ddc_service;
7481
7482 return i2c;
7483 }
7484
7485
7486 /*
7487 * Note: this function assumes that dc_link_detect() was called for the
7488 * dc_link which will be represented by this aconnector.
7489 */
amdgpu_dm_connector_init(struct amdgpu_display_manager * dm,struct amdgpu_dm_connector * aconnector,u32 link_index,struct amdgpu_encoder * aencoder)7490 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
7491 struct amdgpu_dm_connector *aconnector,
7492 u32 link_index,
7493 struct amdgpu_encoder *aencoder)
7494 {
7495 int res = 0;
7496 int connector_type;
7497 struct dc *dc = dm->dc;
7498 struct dc_link *link = dc_get_link_at_index(dc, link_index);
7499 struct amdgpu_i2c_adapter *i2c;
7500
7501 link->priv = aconnector;
7502
7503
7504 i2c = create_i2c(link->ddc, link->link_index, &res);
7505 if (!i2c) {
7506 DRM_ERROR("Failed to create i2c adapter data\n");
7507 return -ENOMEM;
7508 }
7509
7510 aconnector->i2c = i2c;
7511 res = i2c_add_adapter(&i2c->base);
7512
7513 if (res) {
7514 DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
7515 goto out_free;
7516 }
7517
7518 connector_type = to_drm_connector_type(link->connector_signal);
7519
7520 res = drm_connector_init_with_ddc(
7521 dm->ddev,
7522 &aconnector->base,
7523 &amdgpu_dm_connector_funcs,
7524 connector_type,
7525 &i2c->base);
7526
7527 if (res) {
7528 DRM_ERROR("connector_init failed\n");
7529 aconnector->connector_id = -1;
7530 goto out_free;
7531 }
7532
7533 drm_connector_helper_add(
7534 &aconnector->base,
7535 &amdgpu_dm_connector_helper_funcs);
7536
7537 amdgpu_dm_connector_init_helper(
7538 dm,
7539 aconnector,
7540 connector_type,
7541 link,
7542 link_index);
7543
7544 drm_connector_attach_encoder(
7545 &aconnector->base, &aencoder->base);
7546
7547 if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
7548 || connector_type == DRM_MODE_CONNECTOR_eDP)
7549 amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
7550
7551 out_free:
7552 if (res) {
7553 kfree(i2c);
7554 aconnector->i2c = NULL;
7555 }
7556 return res;
7557 }
7558
amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device * adev)7559 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
7560 {
7561 switch (adev->mode_info.num_crtc) {
7562 case 1:
7563 return 0x1;
7564 case 2:
7565 return 0x3;
7566 case 3:
7567 return 0x7;
7568 case 4:
7569 return 0xf;
7570 case 5:
7571 return 0x1f;
7572 case 6:
7573 default:
7574 return 0x3f;
7575 }
7576 }
7577
amdgpu_dm_encoder_init(struct drm_device * dev,struct amdgpu_encoder * aencoder,uint32_t link_index)7578 static int amdgpu_dm_encoder_init(struct drm_device *dev,
7579 struct amdgpu_encoder *aencoder,
7580 uint32_t link_index)
7581 {
7582 struct amdgpu_device *adev = drm_to_adev(dev);
7583
7584 int res = drm_encoder_init(dev,
7585 &aencoder->base,
7586 &amdgpu_dm_encoder_funcs,
7587 DRM_MODE_ENCODER_TMDS,
7588 NULL);
7589
7590 aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
7591
7592 if (!res)
7593 aencoder->encoder_id = link_index;
7594 else
7595 aencoder->encoder_id = -1;
7596
7597 drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
7598
7599 return res;
7600 }
7601
manage_dm_interrupts(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,bool enable)7602 static void manage_dm_interrupts(struct amdgpu_device *adev,
7603 struct amdgpu_crtc *acrtc,
7604 bool enable)
7605 {
7606 /*
7607 * We have no guarantee that the frontend index maps to the same
7608 * backend index - some even map to more than one.
7609 *
7610 * TODO: Use a different interrupt or check DC itself for the mapping.
7611 */
7612 int irq_type =
7613 amdgpu_display_crtc_idx_to_irq_type(
7614 adev,
7615 acrtc->crtc_id);
7616
7617 if (enable) {
7618 drm_crtc_vblank_on(&acrtc->base);
7619 amdgpu_irq_get(
7620 adev,
7621 &adev->pageflip_irq,
7622 irq_type);
7623 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7624 amdgpu_irq_get(
7625 adev,
7626 &adev->vline0_irq,
7627 irq_type);
7628 #endif
7629 } else {
7630 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
7631 amdgpu_irq_put(
7632 adev,
7633 &adev->vline0_irq,
7634 irq_type);
7635 #endif
7636 amdgpu_irq_put(
7637 adev,
7638 &adev->pageflip_irq,
7639 irq_type);
7640 drm_crtc_vblank_off(&acrtc->base);
7641 }
7642 }
7643
dm_update_pflip_irq_state(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc)7644 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
7645 struct amdgpu_crtc *acrtc)
7646 {
7647 int irq_type =
7648 amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
7649
7650 /**
7651 * This reads the current state for the IRQ and force reapplies
7652 * the setting to hardware.
7653 */
7654 amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
7655 }
7656
7657 static bool
is_scaling_state_different(const struct dm_connector_state * dm_state,const struct dm_connector_state * old_dm_state)7658 is_scaling_state_different(const struct dm_connector_state *dm_state,
7659 const struct dm_connector_state *old_dm_state)
7660 {
7661 if (dm_state->scaling != old_dm_state->scaling)
7662 return true;
7663 if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
7664 if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
7665 return true;
7666 } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
7667 if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
7668 return true;
7669 } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
7670 dm_state->underscan_vborder != old_dm_state->underscan_vborder)
7671 return true;
7672 return false;
7673 }
7674
is_content_protection_different(struct drm_crtc_state * new_crtc_state,struct drm_crtc_state * old_crtc_state,struct drm_connector_state * new_conn_state,struct drm_connector_state * old_conn_state,const struct drm_connector * connector,struct hdcp_workqueue * hdcp_w)7675 static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
7676 struct drm_crtc_state *old_crtc_state,
7677 struct drm_connector_state *new_conn_state,
7678 struct drm_connector_state *old_conn_state,
7679 const struct drm_connector *connector,
7680 struct hdcp_workqueue *hdcp_w)
7681 {
7682 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7683 struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
7684
7685 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
7686 connector->index, connector->status, connector->dpms);
7687 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
7688 old_conn_state->content_protection, new_conn_state->content_protection);
7689
7690 if (old_crtc_state)
7691 pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7692 old_crtc_state->enable,
7693 old_crtc_state->active,
7694 old_crtc_state->mode_changed,
7695 old_crtc_state->active_changed,
7696 old_crtc_state->connectors_changed);
7697
7698 if (new_crtc_state)
7699 pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
7700 new_crtc_state->enable,
7701 new_crtc_state->active,
7702 new_crtc_state->mode_changed,
7703 new_crtc_state->active_changed,
7704 new_crtc_state->connectors_changed);
7705
7706 /* hdcp content type change */
7707 if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
7708 new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
7709 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7710 pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
7711 return true;
7712 }
7713
7714 /* CP is being re enabled, ignore this */
7715 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
7716 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7717 if (new_crtc_state && new_crtc_state->mode_changed) {
7718 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7719 pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
7720 return true;
7721 }
7722 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
7723 pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
7724 return false;
7725 }
7726
7727 /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
7728 *
7729 * Handles: UNDESIRED -> ENABLED
7730 */
7731 if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
7732 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
7733 new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
7734
7735 /* Stream removed and re-enabled
7736 *
7737 * Can sometimes overlap with the HPD case,
7738 * thus set update_hdcp to false to avoid
7739 * setting HDCP multiple times.
7740 *
7741 * Handles: DESIRED -> DESIRED (Special case)
7742 */
7743 if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
7744 new_conn_state->crtc && new_conn_state->crtc->enabled &&
7745 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7746 dm_con_state->update_hdcp = false;
7747 pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
7748 __func__);
7749 return true;
7750 }
7751
7752 /* Hot-plug, headless s3, dpms
7753 *
7754 * Only start HDCP if the display is connected/enabled.
7755 * update_hdcp flag will be set to false until the next
7756 * HPD comes in.
7757 *
7758 * Handles: DESIRED -> DESIRED (Special case)
7759 */
7760 if (dm_con_state->update_hdcp &&
7761 new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
7762 connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
7763 dm_con_state->update_hdcp = false;
7764 pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
7765 __func__);
7766 return true;
7767 }
7768
7769 if (old_conn_state->content_protection == new_conn_state->content_protection) {
7770 if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
7771 if (new_crtc_state && new_crtc_state->mode_changed) {
7772 pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
7773 __func__);
7774 return true;
7775 }
7776 pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
7777 __func__);
7778 return false;
7779 }
7780
7781 pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
7782 return false;
7783 }
7784
7785 if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
7786 pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
7787 __func__);
7788 return true;
7789 }
7790
7791 pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
7792 return false;
7793 }
7794
remove_stream(struct amdgpu_device * adev,struct amdgpu_crtc * acrtc,struct dc_stream_state * stream)7795 static void remove_stream(struct amdgpu_device *adev,
7796 struct amdgpu_crtc *acrtc,
7797 struct dc_stream_state *stream)
7798 {
7799 /* this is the update mode case */
7800
7801 acrtc->otg_inst = -1;
7802 acrtc->enabled = false;
7803 }
7804
prepare_flip_isr(struct amdgpu_crtc * acrtc)7805 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
7806 {
7807
7808 assert_spin_locked(&acrtc->base.dev->event_lock);
7809 WARN_ON(acrtc->event);
7810
7811 acrtc->event = acrtc->base.state->event;
7812
7813 /* Set the flip status */
7814 acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
7815
7816 /* Mark this event as consumed */
7817 acrtc->base.state->event = NULL;
7818
7819 DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
7820 acrtc->crtc_id);
7821 }
7822
update_freesync_state_on_stream(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state,struct dc_stream_state * new_stream,struct dc_plane_state * surface,u32 flip_timestamp_in_us)7823 static void update_freesync_state_on_stream(
7824 struct amdgpu_display_manager *dm,
7825 struct dm_crtc_state *new_crtc_state,
7826 struct dc_stream_state *new_stream,
7827 struct dc_plane_state *surface,
7828 u32 flip_timestamp_in_us)
7829 {
7830 struct mod_vrr_params vrr_params;
7831 struct dc_info_packet vrr_infopacket = {0};
7832 struct amdgpu_device *adev = dm->adev;
7833 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7834 unsigned long flags;
7835 bool pack_sdp_v1_3 = false;
7836 struct amdgpu_dm_connector *aconn;
7837 enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
7838
7839 if (!new_stream)
7840 return;
7841
7842 /*
7843 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7844 * For now it's sufficient to just guard against these conditions.
7845 */
7846
7847 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7848 return;
7849
7850 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7851 vrr_params = acrtc->dm_irq_params.vrr_params;
7852
7853 if (surface) {
7854 mod_freesync_handle_preflip(
7855 dm->freesync_module,
7856 surface,
7857 new_stream,
7858 flip_timestamp_in_us,
7859 &vrr_params);
7860
7861 if (adev->family < AMDGPU_FAMILY_AI &&
7862 amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
7863 mod_freesync_handle_v_update(dm->freesync_module,
7864 new_stream, &vrr_params);
7865
7866 /* Need to call this before the frame ends. */
7867 dc_stream_adjust_vmin_vmax(dm->dc,
7868 new_crtc_state->stream,
7869 &vrr_params.adjust);
7870 }
7871 }
7872
7873 aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
7874
7875 if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) {
7876 pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
7877
7878 if (aconn->vsdb_info.amd_vsdb_version == 1)
7879 packet_type = PACKET_TYPE_FS_V1;
7880 else if (aconn->vsdb_info.amd_vsdb_version == 2)
7881 packet_type = PACKET_TYPE_FS_V2;
7882 else if (aconn->vsdb_info.amd_vsdb_version == 3)
7883 packet_type = PACKET_TYPE_FS_V3;
7884
7885 mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
7886 &new_stream->adaptive_sync_infopacket);
7887 }
7888
7889 mod_freesync_build_vrr_infopacket(
7890 dm->freesync_module,
7891 new_stream,
7892 &vrr_params,
7893 packet_type,
7894 TRANSFER_FUNC_UNKNOWN,
7895 &vrr_infopacket,
7896 pack_sdp_v1_3);
7897
7898 new_crtc_state->freesync_vrr_info_changed |=
7899 (memcmp(&new_crtc_state->vrr_infopacket,
7900 &vrr_infopacket,
7901 sizeof(vrr_infopacket)) != 0);
7902
7903 acrtc->dm_irq_params.vrr_params = vrr_params;
7904 new_crtc_state->vrr_infopacket = vrr_infopacket;
7905
7906 new_stream->vrr_infopacket = vrr_infopacket;
7907 new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
7908
7909 if (new_crtc_state->freesync_vrr_info_changed)
7910 DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
7911 new_crtc_state->base.crtc->base.id,
7912 (int)new_crtc_state->base.vrr_enabled,
7913 (int)vrr_params.state);
7914
7915 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7916 }
7917
update_stream_irq_parameters(struct amdgpu_display_manager * dm,struct dm_crtc_state * new_crtc_state)7918 static void update_stream_irq_parameters(
7919 struct amdgpu_display_manager *dm,
7920 struct dm_crtc_state *new_crtc_state)
7921 {
7922 struct dc_stream_state *new_stream = new_crtc_state->stream;
7923 struct mod_vrr_params vrr_params;
7924 struct mod_freesync_config config = new_crtc_state->freesync_config;
7925 struct amdgpu_device *adev = dm->adev;
7926 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
7927 unsigned long flags;
7928
7929 if (!new_stream)
7930 return;
7931
7932 /*
7933 * TODO: Determine why min/max totals and vrefresh can be 0 here.
7934 * For now it's sufficient to just guard against these conditions.
7935 */
7936 if (!new_stream->timing.h_total || !new_stream->timing.v_total)
7937 return;
7938
7939 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
7940 vrr_params = acrtc->dm_irq_params.vrr_params;
7941
7942 if (new_crtc_state->vrr_supported &&
7943 config.min_refresh_in_uhz &&
7944 config.max_refresh_in_uhz) {
7945 /*
7946 * if freesync compatible mode was set, config.state will be set
7947 * in atomic check
7948 */
7949 if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
7950 (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
7951 new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
7952 vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
7953 vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
7954 vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
7955 vrr_params.state = VRR_STATE_ACTIVE_FIXED;
7956 } else {
7957 config.state = new_crtc_state->base.vrr_enabled ?
7958 VRR_STATE_ACTIVE_VARIABLE :
7959 VRR_STATE_INACTIVE;
7960 }
7961 } else {
7962 config.state = VRR_STATE_UNSUPPORTED;
7963 }
7964
7965 mod_freesync_build_vrr_params(dm->freesync_module,
7966 new_stream,
7967 &config, &vrr_params);
7968
7969 new_crtc_state->freesync_config = config;
7970 /* Copy state for access from DM IRQ handler */
7971 acrtc->dm_irq_params.freesync_config = config;
7972 acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
7973 acrtc->dm_irq_params.vrr_params = vrr_params;
7974 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
7975 }
7976
amdgpu_dm_handle_vrr_transition(struct dm_crtc_state * old_state,struct dm_crtc_state * new_state)7977 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
7978 struct dm_crtc_state *new_state)
7979 {
7980 bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
7981 bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
7982
7983 if (!old_vrr_active && new_vrr_active) {
7984 /* Transition VRR inactive -> active:
7985 * While VRR is active, we must not disable vblank irq, as a
7986 * reenable after disable would compute bogus vblank/pflip
7987 * timestamps if it likely happened inside display front-porch.
7988 *
7989 * We also need vupdate irq for the actual core vblank handling
7990 * at end of vblank.
7991 */
7992 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
7993 WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
7994 DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
7995 __func__, new_state->base.crtc->base.id);
7996 } else if (old_vrr_active && !new_vrr_active) {
7997 /* Transition VRR active -> inactive:
7998 * Allow vblank irq disable again for fixed refresh rate.
7999 */
8000 WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
8001 drm_crtc_vblank_put(new_state->base.crtc);
8002 DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8003 __func__, new_state->base.crtc->base.id);
8004 }
8005 }
8006
amdgpu_dm_commit_cursors(struct drm_atomic_state * state)8007 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8008 {
8009 struct drm_plane *plane;
8010 struct drm_plane_state *old_plane_state;
8011 int i;
8012
8013 /*
8014 * TODO: Make this per-stream so we don't issue redundant updates for
8015 * commits with multiple streams.
8016 */
8017 for_each_old_plane_in_state(state, plane, old_plane_state, i)
8018 if (plane->type == DRM_PLANE_TYPE_CURSOR)
8019 amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
8020 }
8021
get_mem_type(struct drm_framebuffer * fb)8022 static inline uint32_t get_mem_type(struct drm_framebuffer *fb)
8023 {
8024 struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]);
8025
8026 return abo->tbo.resource ? abo->tbo.resource->mem_type : 0;
8027 }
8028
amdgpu_dm_commit_planes(struct drm_atomic_state * state,struct drm_device * dev,struct amdgpu_display_manager * dm,struct drm_crtc * pcrtc,bool wait_for_vblank)8029 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8030 struct drm_device *dev,
8031 struct amdgpu_display_manager *dm,
8032 struct drm_crtc *pcrtc,
8033 bool wait_for_vblank)
8034 {
8035 u32 i;
8036 u64 timestamp_ns = ktime_get_ns();
8037 struct drm_plane *plane;
8038 struct drm_plane_state *old_plane_state, *new_plane_state;
8039 struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8040 struct drm_crtc_state *new_pcrtc_state =
8041 drm_atomic_get_new_crtc_state(state, pcrtc);
8042 struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8043 struct dm_crtc_state *dm_old_crtc_state =
8044 to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8045 int planes_count = 0, vpos, hpos;
8046 unsigned long flags;
8047 u32 target_vblank, last_flip_vblank;
8048 bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
8049 bool cursor_update = false;
8050 bool pflip_present = false;
8051 bool dirty_rects_changed = false;
8052 struct {
8053 struct dc_surface_update surface_updates[MAX_SURFACES];
8054 struct dc_plane_info plane_infos[MAX_SURFACES];
8055 struct dc_scaling_info scaling_infos[MAX_SURFACES];
8056 struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8057 struct dc_stream_update stream_update;
8058 } *bundle;
8059
8060 bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8061
8062 if (!bundle) {
8063 dm_error("Failed to allocate update bundle\n");
8064 goto cleanup;
8065 }
8066
8067 /*
8068 * Disable the cursor first if we're disabling all the planes.
8069 * It'll remain on the screen after the planes are re-enabled
8070 * if we don't.
8071 */
8072 if (acrtc_state->active_planes == 0)
8073 amdgpu_dm_commit_cursors(state);
8074
8075 /* update planes when needed */
8076 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8077 struct drm_crtc *crtc = new_plane_state->crtc;
8078 struct drm_crtc_state *new_crtc_state;
8079 struct drm_framebuffer *fb = new_plane_state->fb;
8080 struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8081 bool plane_needs_flip;
8082 struct dc_plane_state *dc_plane;
8083 struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8084
8085 /* Cursor plane is handled after stream updates */
8086 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
8087 if ((fb && crtc == pcrtc) ||
8088 (old_plane_state->fb && old_plane_state->crtc == pcrtc))
8089 cursor_update = true;
8090
8091 continue;
8092 }
8093
8094 if (!fb || !crtc || pcrtc != crtc)
8095 continue;
8096
8097 new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8098 if (!new_crtc_state->active)
8099 continue;
8100
8101 dc_plane = dm_new_plane_state->dc_state;
8102 if (!dc_plane)
8103 continue;
8104
8105 bundle->surface_updates[planes_count].surface = dc_plane;
8106 if (new_pcrtc_state->color_mgmt_changed) {
8107 bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8108 bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8109 bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8110 }
8111
8112 amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
8113 &bundle->scaling_infos[planes_count]);
8114
8115 bundle->surface_updates[planes_count].scaling_info =
8116 &bundle->scaling_infos[planes_count];
8117
8118 plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8119
8120 pflip_present = pflip_present || plane_needs_flip;
8121
8122 if (!plane_needs_flip) {
8123 planes_count += 1;
8124 continue;
8125 }
8126
8127 fill_dc_plane_info_and_addr(
8128 dm->adev, new_plane_state,
8129 afb->tiling_flags,
8130 &bundle->plane_infos[planes_count],
8131 &bundle->flip_addrs[planes_count].address,
8132 afb->tmz_surface, false);
8133
8134 drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n",
8135 new_plane_state->plane->index,
8136 bundle->plane_infos[planes_count].dcc.enable);
8137
8138 bundle->surface_updates[planes_count].plane_info =
8139 &bundle->plane_infos[planes_count];
8140
8141 if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
8142 acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
8143 fill_dc_dirty_rects(plane, old_plane_state,
8144 new_plane_state, new_crtc_state,
8145 &bundle->flip_addrs[planes_count],
8146 &dirty_rects_changed);
8147
8148 /*
8149 * If the dirty regions changed, PSR-SU need to be disabled temporarily
8150 * and enabled it again after dirty regions are stable to avoid video glitch.
8151 * PSR-SU will be enabled in vblank_control_worker() if user pause the video
8152 * during the PSR-SU was disabled.
8153 */
8154 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8155 acrtc_attach->dm_irq_params.allow_psr_entry &&
8156 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8157 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8158 #endif
8159 dirty_rects_changed) {
8160 mutex_lock(&dm->dc_lock);
8161 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
8162 timestamp_ns;
8163 if (acrtc_state->stream->link->psr_settings.psr_allow_active)
8164 amdgpu_dm_psr_disable(acrtc_state->stream);
8165 mutex_unlock(&dm->dc_lock);
8166 }
8167 }
8168
8169 /*
8170 * Only allow immediate flips for fast updates that don't
8171 * change memory domain, FB pitch, DCC state, rotation or
8172 * mirroring.
8173 *
8174 * dm_crtc_helper_atomic_check() only accepts async flips with
8175 * fast updates.
8176 */
8177 if (crtc->state->async_flip &&
8178 (acrtc_state->update_type != UPDATE_TYPE_FAST ||
8179 get_mem_type(old_plane_state->fb) != get_mem_type(fb)))
8180 drm_warn_once(state->dev,
8181 "[PLANE:%d:%s] async flip with non-fast update\n",
8182 plane->base.id, plane->name);
8183
8184 bundle->flip_addrs[planes_count].flip_immediate =
8185 crtc->state->async_flip &&
8186 acrtc_state->update_type == UPDATE_TYPE_FAST &&
8187 get_mem_type(old_plane_state->fb) == get_mem_type(fb);
8188
8189 timestamp_ns = ktime_get_ns();
8190 bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
8191 bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
8192 bundle->surface_updates[planes_count].surface = dc_plane;
8193
8194 if (!bundle->surface_updates[planes_count].surface) {
8195 DRM_ERROR("No surface for CRTC: id=%d\n",
8196 acrtc_attach->crtc_id);
8197 continue;
8198 }
8199
8200 if (plane == pcrtc->primary)
8201 update_freesync_state_on_stream(
8202 dm,
8203 acrtc_state,
8204 acrtc_state->stream,
8205 dc_plane,
8206 bundle->flip_addrs[planes_count].flip_timestamp_in_us);
8207
8208 drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n",
8209 __func__,
8210 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
8211 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
8212
8213 planes_count += 1;
8214
8215 }
8216
8217 if (pflip_present) {
8218 if (!vrr_active) {
8219 /* Use old throttling in non-vrr fixed refresh rate mode
8220 * to keep flip scheduling based on target vblank counts
8221 * working in a backwards compatible way, e.g., for
8222 * clients using the GLX_OML_sync_control extension or
8223 * DRI3/Present extension with defined target_msc.
8224 */
8225 last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
8226 } else {
8227 /* For variable refresh rate mode only:
8228 * Get vblank of last completed flip to avoid > 1 vrr
8229 * flips per video frame by use of throttling, but allow
8230 * flip programming anywhere in the possibly large
8231 * variable vrr vblank interval for fine-grained flip
8232 * timing control and more opportunity to avoid stutter
8233 * on late submission of flips.
8234 */
8235 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8236 last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
8237 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8238 }
8239
8240 target_vblank = last_flip_vblank + wait_for_vblank;
8241
8242 /*
8243 * Wait until we're out of the vertical blank period before the one
8244 * targeted by the flip
8245 */
8246 while ((acrtc_attach->enabled &&
8247 (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
8248 0, &vpos, &hpos, NULL,
8249 NULL, &pcrtc->hwmode)
8250 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
8251 (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
8252 (int)(target_vblank -
8253 amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
8254 usleep_range(1000, 1100);
8255 }
8256
8257 /**
8258 * Prepare the flip event for the pageflip interrupt to handle.
8259 *
8260 * This only works in the case where we've already turned on the
8261 * appropriate hardware blocks (eg. HUBP) so in the transition case
8262 * from 0 -> n planes we have to skip a hardware generated event
8263 * and rely on sending it from software.
8264 */
8265 if (acrtc_attach->base.state->event &&
8266 acrtc_state->active_planes > 0) {
8267 drm_crtc_vblank_get(pcrtc);
8268
8269 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8270
8271 WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
8272 prepare_flip_isr(acrtc_attach);
8273
8274 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8275 }
8276
8277 if (acrtc_state->stream) {
8278 if (acrtc_state->freesync_vrr_info_changed)
8279 bundle->stream_update.vrr_infopacket =
8280 &acrtc_state->stream->vrr_infopacket;
8281 }
8282 } else if (cursor_update && acrtc_state->active_planes > 0 &&
8283 acrtc_attach->base.state->event) {
8284 drm_crtc_vblank_get(pcrtc);
8285
8286 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8287
8288 acrtc_attach->event = acrtc_attach->base.state->event;
8289 acrtc_attach->base.state->event = NULL;
8290
8291 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8292 }
8293
8294 /* Update the planes if changed or disable if we don't have any. */
8295 if ((planes_count || acrtc_state->active_planes == 0) &&
8296 acrtc_state->stream) {
8297 /*
8298 * If PSR or idle optimizations are enabled then flush out
8299 * any pending work before hardware programming.
8300 */
8301 if (dm->vblank_control_workqueue)
8302 flush_workqueue(dm->vblank_control_workqueue);
8303
8304 bundle->stream_update.stream = acrtc_state->stream;
8305 if (new_pcrtc_state->mode_changed) {
8306 bundle->stream_update.src = acrtc_state->stream->src;
8307 bundle->stream_update.dst = acrtc_state->stream->dst;
8308 }
8309
8310 if (new_pcrtc_state->color_mgmt_changed) {
8311 /*
8312 * TODO: This isn't fully correct since we've actually
8313 * already modified the stream in place.
8314 */
8315 bundle->stream_update.gamut_remap =
8316 &acrtc_state->stream->gamut_remap_matrix;
8317 bundle->stream_update.output_csc_transform =
8318 &acrtc_state->stream->csc_color_matrix;
8319 bundle->stream_update.out_transfer_func =
8320 acrtc_state->stream->out_transfer_func;
8321 }
8322
8323 acrtc_state->stream->abm_level = acrtc_state->abm_level;
8324 if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
8325 bundle->stream_update.abm_level = &acrtc_state->abm_level;
8326
8327 mutex_lock(&dm->dc_lock);
8328 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8329 acrtc_state->stream->link->psr_settings.psr_allow_active)
8330 amdgpu_dm_psr_disable(acrtc_state->stream);
8331 mutex_unlock(&dm->dc_lock);
8332
8333 /*
8334 * If FreeSync state on the stream has changed then we need to
8335 * re-adjust the min/max bounds now that DC doesn't handle this
8336 * as part of commit.
8337 */
8338 if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
8339 spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
8340 dc_stream_adjust_vmin_vmax(
8341 dm->dc, acrtc_state->stream,
8342 &acrtc_attach->dm_irq_params.vrr_params.adjust);
8343 spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
8344 }
8345 mutex_lock(&dm->dc_lock);
8346 update_planes_and_stream_adapter(dm->dc,
8347 acrtc_state->update_type,
8348 planes_count,
8349 acrtc_state->stream,
8350 &bundle->stream_update,
8351 bundle->surface_updates);
8352
8353 /**
8354 * Enable or disable the interrupts on the backend.
8355 *
8356 * Most pipes are put into power gating when unused.
8357 *
8358 * When power gating is enabled on a pipe we lose the
8359 * interrupt enablement state when power gating is disabled.
8360 *
8361 * So we need to update the IRQ control state in hardware
8362 * whenever the pipe turns on (since it could be previously
8363 * power gated) or off (since some pipes can't be power gated
8364 * on some ASICs).
8365 */
8366 if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
8367 dm_update_pflip_irq_state(drm_to_adev(dev),
8368 acrtc_attach);
8369
8370 if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
8371 acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8372 !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
8373 amdgpu_dm_link_setup_psr(acrtc_state->stream);
8374
8375 /* Decrement skip count when PSR is enabled and we're doing fast updates. */
8376 if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
8377 acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
8378 struct amdgpu_dm_connector *aconn =
8379 (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
8380
8381 if (aconn->psr_skip_count > 0)
8382 aconn->psr_skip_count--;
8383
8384 /* Allow PSR when skip count is 0. */
8385 acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
8386
8387 /*
8388 * If sink supports PSR SU, there is no need to rely on
8389 * a vblank event disable request to enable PSR. PSR SU
8390 * can be enabled immediately once OS demonstrates an
8391 * adequate number of fast atomic commits to notify KMD
8392 * of update events. See `vblank_control_worker()`.
8393 */
8394 if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
8395 acrtc_attach->dm_irq_params.allow_psr_entry &&
8396 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8397 !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
8398 #endif
8399 !acrtc_state->stream->link->psr_settings.psr_allow_active &&
8400 (timestamp_ns -
8401 acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
8402 500000000)
8403 amdgpu_dm_psr_enable(acrtc_state->stream);
8404 } else {
8405 acrtc_attach->dm_irq_params.allow_psr_entry = false;
8406 }
8407
8408 mutex_unlock(&dm->dc_lock);
8409 }
8410
8411 /*
8412 * Update cursor state *after* programming all the planes.
8413 * This avoids redundant programming in the case where we're going
8414 * to be disabling a single plane - those pipes are being disabled.
8415 */
8416 if (acrtc_state->active_planes)
8417 amdgpu_dm_commit_cursors(state);
8418
8419 cleanup:
8420 kfree(bundle);
8421 }
8422
amdgpu_dm_commit_audio(struct drm_device * dev,struct drm_atomic_state * state)8423 static void amdgpu_dm_commit_audio(struct drm_device *dev,
8424 struct drm_atomic_state *state)
8425 {
8426 struct amdgpu_device *adev = drm_to_adev(dev);
8427 struct amdgpu_dm_connector *aconnector;
8428 struct drm_connector *connector;
8429 struct drm_connector_state *old_con_state, *new_con_state;
8430 struct drm_crtc_state *new_crtc_state;
8431 struct dm_crtc_state *new_dm_crtc_state;
8432 const struct dc_stream_status *status;
8433 int i, inst;
8434
8435 /* Notify device removals. */
8436 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8437 if (old_con_state->crtc != new_con_state->crtc) {
8438 /* CRTC changes require notification. */
8439 goto notify;
8440 }
8441
8442 if (!new_con_state->crtc)
8443 continue;
8444
8445 new_crtc_state = drm_atomic_get_new_crtc_state(
8446 state, new_con_state->crtc);
8447
8448 if (!new_crtc_state)
8449 continue;
8450
8451 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8452 continue;
8453
8454 notify:
8455 aconnector = to_amdgpu_dm_connector(connector);
8456
8457 mutex_lock(&adev->dm.audio_lock);
8458 inst = aconnector->audio_inst;
8459 aconnector->audio_inst = -1;
8460 mutex_unlock(&adev->dm.audio_lock);
8461
8462 amdgpu_dm_audio_eld_notify(adev, inst);
8463 }
8464
8465 /* Notify audio device additions. */
8466 for_each_new_connector_in_state(state, connector, new_con_state, i) {
8467 if (!new_con_state->crtc)
8468 continue;
8469
8470 new_crtc_state = drm_atomic_get_new_crtc_state(
8471 state, new_con_state->crtc);
8472
8473 if (!new_crtc_state)
8474 continue;
8475
8476 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
8477 continue;
8478
8479 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
8480 if (!new_dm_crtc_state->stream)
8481 continue;
8482
8483 status = dc_stream_get_status(new_dm_crtc_state->stream);
8484 if (!status)
8485 continue;
8486
8487 aconnector = to_amdgpu_dm_connector(connector);
8488
8489 mutex_lock(&adev->dm.audio_lock);
8490 inst = status->audio_inst;
8491 aconnector->audio_inst = inst;
8492 mutex_unlock(&adev->dm.audio_lock);
8493
8494 amdgpu_dm_audio_eld_notify(adev, inst);
8495 }
8496 }
8497
8498 /*
8499 * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
8500 * @crtc_state: the DRM CRTC state
8501 * @stream_state: the DC stream state.
8502 *
8503 * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
8504 * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
8505 */
amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state * crtc_state,struct dc_stream_state * stream_state)8506 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
8507 struct dc_stream_state *stream_state)
8508 {
8509 stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
8510 }
8511
amdgpu_dm_commit_streams(struct drm_atomic_state * state,struct dc_state * dc_state)8512 static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
8513 struct dc_state *dc_state)
8514 {
8515 struct drm_device *dev = state->dev;
8516 struct amdgpu_device *adev = drm_to_adev(dev);
8517 struct amdgpu_display_manager *dm = &adev->dm;
8518 struct drm_crtc *crtc;
8519 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8520 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8521 bool mode_set_reset_required = false;
8522 u32 i;
8523
8524 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
8525 new_crtc_state, i) {
8526 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8527
8528 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8529
8530 if (old_crtc_state->active &&
8531 (!new_crtc_state->active ||
8532 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8533 manage_dm_interrupts(adev, acrtc, false);
8534 dc_stream_release(dm_old_crtc_state->stream);
8535 }
8536 }
8537
8538 drm_atomic_helper_calc_timestamping_constants(state);
8539
8540 /* update changed items */
8541 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8542 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8543
8544 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8545 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8546
8547 drm_dbg_state(state->dev,
8548 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
8549 acrtc->crtc_id,
8550 new_crtc_state->enable,
8551 new_crtc_state->active,
8552 new_crtc_state->planes_changed,
8553 new_crtc_state->mode_changed,
8554 new_crtc_state->active_changed,
8555 new_crtc_state->connectors_changed);
8556
8557 /* Disable cursor if disabling crtc */
8558 if (old_crtc_state->active && !new_crtc_state->active) {
8559 struct dc_cursor_position position;
8560
8561 memset(&position, 0, sizeof(position));
8562 mutex_lock(&dm->dc_lock);
8563 dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
8564 mutex_unlock(&dm->dc_lock);
8565 }
8566
8567 /* Copy all transient state flags into dc state */
8568 if (dm_new_crtc_state->stream) {
8569 amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
8570 dm_new_crtc_state->stream);
8571 }
8572
8573 /* handles headless hotplug case, updating new_state and
8574 * aconnector as needed
8575 */
8576
8577 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
8578
8579 DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
8580
8581 if (!dm_new_crtc_state->stream) {
8582 /*
8583 * this could happen because of issues with
8584 * userspace notifications delivery.
8585 * In this case userspace tries to set mode on
8586 * display which is disconnected in fact.
8587 * dc_sink is NULL in this case on aconnector.
8588 * We expect reset mode will come soon.
8589 *
8590 * This can also happen when unplug is done
8591 * during resume sequence ended
8592 *
8593 * In this case, we want to pretend we still
8594 * have a sink to keep the pipe running so that
8595 * hw state is consistent with the sw state
8596 */
8597 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
8598 __func__, acrtc->base.base.id);
8599 continue;
8600 }
8601
8602 if (dm_old_crtc_state->stream)
8603 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8604
8605 pm_runtime_get_noresume(dev->dev);
8606
8607 acrtc->enabled = true;
8608 acrtc->hw_mode = new_crtc_state->mode;
8609 crtc->hwmode = new_crtc_state->mode;
8610 mode_set_reset_required = true;
8611 } else if (modereset_required(new_crtc_state)) {
8612 DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
8613 /* i.e. reset mode */
8614 if (dm_old_crtc_state->stream)
8615 remove_stream(adev, acrtc, dm_old_crtc_state->stream);
8616
8617 mode_set_reset_required = true;
8618 }
8619 } /* for_each_crtc_in_state() */
8620
8621 /* if there mode set or reset, disable eDP PSR */
8622 if (mode_set_reset_required) {
8623 if (dm->vblank_control_workqueue)
8624 flush_workqueue(dm->vblank_control_workqueue);
8625
8626 amdgpu_dm_psr_disable_all(dm);
8627 }
8628
8629 dm_enable_per_frame_crtc_master_sync(dc_state);
8630 mutex_lock(&dm->dc_lock);
8631 WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
8632
8633 /* Allow idle optimization when vblank count is 0 for display off */
8634 if (dm->active_vblank_irq_count == 0)
8635 dc_allow_idle_optimizations(dm->dc, true);
8636 mutex_unlock(&dm->dc_lock);
8637
8638 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8639 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8640
8641 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8642
8643 if (dm_new_crtc_state->stream != NULL) {
8644 const struct dc_stream_status *status =
8645 dc_stream_get_status(dm_new_crtc_state->stream);
8646
8647 if (!status)
8648 status = dc_stream_get_status_from_state(dc_state,
8649 dm_new_crtc_state->stream);
8650 if (!status)
8651 DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
8652 else
8653 acrtc->otg_inst = status->primary_otg_inst;
8654 }
8655 }
8656 }
8657
8658 /**
8659 * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
8660 * @state: The atomic state to commit
8661 *
8662 * This will tell DC to commit the constructed DC state from atomic_check,
8663 * programming the hardware. Any failures here implies a hardware failure, since
8664 * atomic check should have filtered anything non-kosher.
8665 */
amdgpu_dm_atomic_commit_tail(struct drm_atomic_state * state)8666 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
8667 {
8668 struct drm_device *dev = state->dev;
8669 struct amdgpu_device *adev = drm_to_adev(dev);
8670 struct amdgpu_display_manager *dm = &adev->dm;
8671 struct dm_atomic_state *dm_state;
8672 struct dc_state *dc_state = NULL;
8673 u32 i, j;
8674 struct drm_crtc *crtc;
8675 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
8676 unsigned long flags;
8677 bool wait_for_vblank = true;
8678 struct drm_connector *connector;
8679 struct drm_connector_state *old_con_state, *new_con_state;
8680 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
8681 int crtc_disable_count = 0;
8682
8683 trace_amdgpu_dm_atomic_commit_tail_begin(state);
8684
8685 drm_atomic_helper_update_legacy_modeset_state(dev, state);
8686 drm_dp_mst_atomic_wait_for_dependencies(state);
8687
8688 dm_state = dm_atomic_get_new_state(state);
8689 if (dm_state && dm_state->context) {
8690 dc_state = dm_state->context;
8691 amdgpu_dm_commit_streams(state, dc_state);
8692 }
8693
8694 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8695 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8696 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8697 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8698
8699 if (!adev->dm.hdcp_workqueue)
8700 continue;
8701
8702 pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
8703
8704 if (!connector)
8705 continue;
8706
8707 pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
8708 connector->index, connector->status, connector->dpms);
8709 pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
8710 old_con_state->content_protection, new_con_state->content_protection);
8711
8712 if (aconnector->dc_sink) {
8713 if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
8714 aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
8715 pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
8716 aconnector->dc_sink->edid_caps.display_name);
8717 }
8718 }
8719
8720 new_crtc_state = NULL;
8721 old_crtc_state = NULL;
8722
8723 if (acrtc) {
8724 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8725 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8726 }
8727
8728 if (old_crtc_state)
8729 pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8730 old_crtc_state->enable,
8731 old_crtc_state->active,
8732 old_crtc_state->mode_changed,
8733 old_crtc_state->active_changed,
8734 old_crtc_state->connectors_changed);
8735
8736 if (new_crtc_state)
8737 pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
8738 new_crtc_state->enable,
8739 new_crtc_state->active,
8740 new_crtc_state->mode_changed,
8741 new_crtc_state->active_changed,
8742 new_crtc_state->connectors_changed);
8743 }
8744
8745 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8746 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8747 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8748 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8749
8750 if (!adev->dm.hdcp_workqueue)
8751 continue;
8752
8753 new_crtc_state = NULL;
8754 old_crtc_state = NULL;
8755
8756 if (acrtc) {
8757 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8758 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8759 }
8760
8761 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8762
8763 if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
8764 connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
8765 hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
8766 new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8767 dm_new_con_state->update_hdcp = true;
8768 continue;
8769 }
8770
8771 if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
8772 old_con_state, connector, adev->dm.hdcp_workqueue)) {
8773 /* when display is unplugged from mst hub, connctor will
8774 * be destroyed within dm_dp_mst_connector_destroy. connector
8775 * hdcp perperties, like type, undesired, desired, enabled,
8776 * will be lost. So, save hdcp properties into hdcp_work within
8777 * amdgpu_dm_atomic_commit_tail. if the same display is
8778 * plugged back with same display index, its hdcp properties
8779 * will be retrieved from hdcp_work within dm_dp_mst_get_modes
8780 */
8781
8782 bool enable_encryption = false;
8783
8784 if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
8785 enable_encryption = true;
8786
8787 if (aconnector->dc_link && aconnector->dc_sink &&
8788 aconnector->dc_link->type == dc_connection_mst_branch) {
8789 struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
8790 struct hdcp_workqueue *hdcp_w =
8791 &hdcp_work[aconnector->dc_link->link_index];
8792
8793 hdcp_w->hdcp_content_type[connector->index] =
8794 new_con_state->hdcp_content_type;
8795 hdcp_w->content_protection[connector->index] =
8796 new_con_state->content_protection;
8797 }
8798
8799 if (new_crtc_state && new_crtc_state->mode_changed &&
8800 new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
8801 enable_encryption = true;
8802
8803 DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
8804
8805 hdcp_update_display(
8806 adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
8807 new_con_state->hdcp_content_type, enable_encryption);
8808 }
8809 }
8810
8811 /* Handle connector state changes */
8812 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
8813 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
8814 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
8815 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
8816 struct dc_surface_update *dummy_updates;
8817 struct dc_stream_update stream_update;
8818 struct dc_info_packet hdr_packet;
8819 struct dc_stream_status *status = NULL;
8820 bool abm_changed, hdr_changed, scaling_changed;
8821
8822 memset(&stream_update, 0, sizeof(stream_update));
8823
8824 if (acrtc) {
8825 new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
8826 old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
8827 }
8828
8829 /* Skip any modesets/resets */
8830 if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
8831 continue;
8832
8833 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8834 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8835
8836 scaling_changed = is_scaling_state_different(dm_new_con_state,
8837 dm_old_con_state);
8838
8839 abm_changed = dm_new_crtc_state->abm_level !=
8840 dm_old_crtc_state->abm_level;
8841
8842 hdr_changed =
8843 !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
8844
8845 if (!scaling_changed && !abm_changed && !hdr_changed)
8846 continue;
8847
8848 stream_update.stream = dm_new_crtc_state->stream;
8849 if (scaling_changed) {
8850 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
8851 dm_new_con_state, dm_new_crtc_state->stream);
8852
8853 stream_update.src = dm_new_crtc_state->stream->src;
8854 stream_update.dst = dm_new_crtc_state->stream->dst;
8855 }
8856
8857 if (abm_changed) {
8858 dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
8859
8860 stream_update.abm_level = &dm_new_crtc_state->abm_level;
8861 }
8862
8863 if (hdr_changed) {
8864 fill_hdr_info_packet(new_con_state, &hdr_packet);
8865 stream_update.hdr_static_metadata = &hdr_packet;
8866 }
8867
8868 status = dc_stream_get_status(dm_new_crtc_state->stream);
8869
8870 if (WARN_ON(!status))
8871 continue;
8872
8873 WARN_ON(!status->plane_count);
8874
8875 /*
8876 * TODO: DC refuses to perform stream updates without a dc_surface_update.
8877 * Here we create an empty update on each plane.
8878 * To fix this, DC should permit updating only stream properties.
8879 */
8880 dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC);
8881 for (j = 0; j < status->plane_count; j++)
8882 dummy_updates[j].surface = status->plane_states[0];
8883
8884
8885 mutex_lock(&dm->dc_lock);
8886 dc_update_planes_and_stream(dm->dc,
8887 dummy_updates,
8888 status->plane_count,
8889 dm_new_crtc_state->stream,
8890 &stream_update);
8891 mutex_unlock(&dm->dc_lock);
8892 kfree(dummy_updates);
8893 }
8894
8895 /**
8896 * Enable interrupts for CRTCs that are newly enabled or went through
8897 * a modeset. It was intentionally deferred until after the front end
8898 * state was modified to wait until the OTG was on and so the IRQ
8899 * handlers didn't access stale or invalid state.
8900 */
8901 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
8902 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
8903 #ifdef CONFIG_DEBUG_FS
8904 enum amdgpu_dm_pipe_crc_source cur_crc_src;
8905 #endif
8906 /* Count number of newly disabled CRTCs for dropping PM refs later. */
8907 if (old_crtc_state->active && !new_crtc_state->active)
8908 crtc_disable_count++;
8909
8910 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8911 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
8912
8913 /* For freesync config update on crtc state and params for irq */
8914 update_stream_irq_parameters(dm, dm_new_crtc_state);
8915
8916 #ifdef CONFIG_DEBUG_FS
8917 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8918 cur_crc_src = acrtc->dm_irq_params.crc_src;
8919 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8920 #endif
8921
8922 if (new_crtc_state->active &&
8923 (!old_crtc_state->active ||
8924 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8925 dc_stream_retain(dm_new_crtc_state->stream);
8926 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
8927 manage_dm_interrupts(adev, acrtc, true);
8928 }
8929 /* Handle vrr on->off / off->on transitions */
8930 amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state);
8931
8932 #ifdef CONFIG_DEBUG_FS
8933 if (new_crtc_state->active &&
8934 (!old_crtc_state->active ||
8935 drm_atomic_crtc_needs_modeset(new_crtc_state))) {
8936 /**
8937 * Frontend may have changed so reapply the CRC capture
8938 * settings for the stream.
8939 */
8940 if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
8941 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8942 if (amdgpu_dm_crc_window_is_activated(crtc)) {
8943 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8944 acrtc->dm_irq_params.window_param.update_win = true;
8945
8946 /**
8947 * It takes 2 frames for HW to stably generate CRC when
8948 * resuming from suspend, so we set skip_frame_cnt 2.
8949 */
8950 acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
8951 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8952 }
8953 #endif
8954 if (amdgpu_dm_crtc_configure_crc_source(
8955 crtc, dm_new_crtc_state, cur_crc_src))
8956 DRM_DEBUG_DRIVER("Failed to configure crc source");
8957 }
8958 }
8959 #endif
8960 }
8961
8962 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
8963 if (new_crtc_state->async_flip)
8964 wait_for_vblank = false;
8965
8966 /* update planes when needed per crtc*/
8967 for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
8968 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
8969
8970 if (dm_new_crtc_state->stream)
8971 amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank);
8972 }
8973
8974 /* Update audio instances for each connector. */
8975 amdgpu_dm_commit_audio(dev, state);
8976
8977 /* restore the backlight level */
8978 for (i = 0; i < dm->num_of_edps; i++) {
8979 if (dm->backlight_dev[i] &&
8980 (dm->actual_brightness[i] != dm->brightness[i]))
8981 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
8982 }
8983
8984 /*
8985 * send vblank event on all events not handled in flip and
8986 * mark consumed event for drm_atomic_helper_commit_hw_done
8987 */
8988 spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8989 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
8990
8991 if (new_crtc_state->event)
8992 drm_send_event_locked(dev, &new_crtc_state->event->base);
8993
8994 new_crtc_state->event = NULL;
8995 }
8996 spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8997
8998 /* Signal HW programming completion */
8999 drm_atomic_helper_commit_hw_done(state);
9000
9001 if (wait_for_vblank)
9002 drm_atomic_helper_wait_for_flip_done(dev, state);
9003
9004 drm_atomic_helper_cleanup_planes(dev, state);
9005
9006 /* Don't free the memory if we are hitting this as part of suspend.
9007 * This way we don't free any memory during suspend; see
9008 * amdgpu_bo_free_kernel(). The memory will be freed in the first
9009 * non-suspend modeset or when the driver is torn down.
9010 */
9011 if (!adev->in_suspend) {
9012 /* return the stolen vga memory back to VRAM */
9013 if (!adev->mman.keep_stolen_vga_memory)
9014 amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9015 amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9016 }
9017
9018 /*
9019 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9020 * so we can put the GPU into runtime suspend if we're not driving any
9021 * displays anymore
9022 */
9023 for (i = 0; i < crtc_disable_count; i++)
9024 pm_runtime_put_autosuspend(dev->dev);
9025 pm_runtime_mark_last_busy(dev->dev);
9026 }
9027
dm_force_atomic_commit(struct drm_connector * connector)9028 static int dm_force_atomic_commit(struct drm_connector *connector)
9029 {
9030 int ret = 0;
9031 struct drm_device *ddev = connector->dev;
9032 struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9033 struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9034 struct drm_plane *plane = disconnected_acrtc->base.primary;
9035 struct drm_connector_state *conn_state;
9036 struct drm_crtc_state *crtc_state;
9037 struct drm_plane_state *plane_state;
9038
9039 if (!state)
9040 return -ENOMEM;
9041
9042 state->acquire_ctx = ddev->mode_config.acquire_ctx;
9043
9044 /* Construct an atomic state to restore previous display setting */
9045
9046 /*
9047 * Attach connectors to drm_atomic_state
9048 */
9049 conn_state = drm_atomic_get_connector_state(state, connector);
9050
9051 ret = PTR_ERR_OR_ZERO(conn_state);
9052 if (ret)
9053 goto out;
9054
9055 /* Attach crtc to drm_atomic_state*/
9056 crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9057
9058 ret = PTR_ERR_OR_ZERO(crtc_state);
9059 if (ret)
9060 goto out;
9061
9062 /* force a restore */
9063 crtc_state->mode_changed = true;
9064
9065 /* Attach plane to drm_atomic_state */
9066 plane_state = drm_atomic_get_plane_state(state, plane);
9067
9068 ret = PTR_ERR_OR_ZERO(plane_state);
9069 if (ret)
9070 goto out;
9071
9072 /* Call commit internally with the state we just constructed */
9073 ret = drm_atomic_commit(state);
9074
9075 out:
9076 drm_atomic_state_put(state);
9077 if (ret)
9078 DRM_ERROR("Restoring old state failed with %i\n", ret);
9079
9080 return ret;
9081 }
9082
9083 /*
9084 * This function handles all cases when set mode does not come upon hotplug.
9085 * This includes when a display is unplugged then plugged back into the
9086 * same port and when running without usermode desktop manager supprot
9087 */
dm_restore_drm_connector_state(struct drm_device * dev,struct drm_connector * connector)9088 void dm_restore_drm_connector_state(struct drm_device *dev,
9089 struct drm_connector *connector)
9090 {
9091 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9092 struct amdgpu_crtc *disconnected_acrtc;
9093 struct dm_crtc_state *acrtc_state;
9094
9095 if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9096 return;
9097
9098 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9099 if (!disconnected_acrtc)
9100 return;
9101
9102 acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9103 if (!acrtc_state->stream)
9104 return;
9105
9106 /*
9107 * If the previous sink is not released and different from the current,
9108 * we deduce we are in a state where we can not rely on usermode call
9109 * to turn on the display, so we do it here
9110 */
9111 if (acrtc_state->stream->sink != aconnector->dc_sink)
9112 dm_force_atomic_commit(&aconnector->base);
9113 }
9114
9115 /*
9116 * Grabs all modesetting locks to serialize against any blocking commits,
9117 * Waits for completion of all non blocking commits.
9118 */
do_aquire_global_lock(struct drm_device * dev,struct drm_atomic_state * state)9119 static int do_aquire_global_lock(struct drm_device *dev,
9120 struct drm_atomic_state *state)
9121 {
9122 struct drm_crtc *crtc;
9123 struct drm_crtc_commit *commit;
9124 long ret;
9125
9126 /*
9127 * Adding all modeset locks to aquire_ctx will
9128 * ensure that when the framework release it the
9129 * extra locks we are locking here will get released to
9130 */
9131 ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9132 if (ret)
9133 return ret;
9134
9135 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9136 spin_lock(&crtc->commit_lock);
9137 commit = list_first_entry_or_null(&crtc->commit_list,
9138 struct drm_crtc_commit, commit_entry);
9139 if (commit)
9140 drm_crtc_commit_get(commit);
9141 spin_unlock(&crtc->commit_lock);
9142
9143 if (!commit)
9144 continue;
9145
9146 /*
9147 * Make sure all pending HW programming completed and
9148 * page flips done
9149 */
9150 ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9151
9152 if (ret > 0)
9153 ret = wait_for_completion_interruptible_timeout(
9154 &commit->flip_done, 10*HZ);
9155
9156 if (ret == 0)
9157 DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n",
9158 crtc->base.id, crtc->name);
9159
9160 drm_crtc_commit_put(commit);
9161 }
9162
9163 return ret < 0 ? ret : 0;
9164 }
9165
get_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state,struct dm_connector_state * new_con_state)9166 static void get_freesync_config_for_crtc(
9167 struct dm_crtc_state *new_crtc_state,
9168 struct dm_connector_state *new_con_state)
9169 {
9170 struct mod_freesync_config config = {0};
9171 struct amdgpu_dm_connector *aconnector =
9172 to_amdgpu_dm_connector(new_con_state->base.connector);
9173 struct drm_display_mode *mode = &new_crtc_state->base.mode;
9174 int vrefresh = drm_mode_vrefresh(mode);
9175 bool fs_vid_mode = false;
9176
9177 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9178 vrefresh >= aconnector->min_vfreq &&
9179 vrefresh <= aconnector->max_vfreq;
9180
9181 if (new_crtc_state->vrr_supported) {
9182 new_crtc_state->stream->ignore_msa_timing_param = true;
9183 fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9184
9185 config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9186 config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9187 config.vsif_supported = true;
9188 config.btr = true;
9189
9190 if (fs_vid_mode) {
9191 config.state = VRR_STATE_ACTIVE_FIXED;
9192 config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9193 goto out;
9194 } else if (new_crtc_state->base.vrr_enabled) {
9195 config.state = VRR_STATE_ACTIVE_VARIABLE;
9196 } else {
9197 config.state = VRR_STATE_INACTIVE;
9198 }
9199 }
9200 out:
9201 new_crtc_state->freesync_config = config;
9202 }
9203
reset_freesync_config_for_crtc(struct dm_crtc_state * new_crtc_state)9204 static void reset_freesync_config_for_crtc(
9205 struct dm_crtc_state *new_crtc_state)
9206 {
9207 new_crtc_state->vrr_supported = false;
9208
9209 memset(&new_crtc_state->vrr_infopacket, 0,
9210 sizeof(new_crtc_state->vrr_infopacket));
9211 }
9212
9213 static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state)9214 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9215 struct drm_crtc_state *new_crtc_state)
9216 {
9217 const struct drm_display_mode *old_mode, *new_mode;
9218
9219 if (!old_crtc_state || !new_crtc_state)
9220 return false;
9221
9222 old_mode = &old_crtc_state->mode;
9223 new_mode = &new_crtc_state->mode;
9224
9225 if (old_mode->clock == new_mode->clock &&
9226 old_mode->hdisplay == new_mode->hdisplay &&
9227 old_mode->vdisplay == new_mode->vdisplay &&
9228 old_mode->htotal == new_mode->htotal &&
9229 old_mode->vtotal != new_mode->vtotal &&
9230 old_mode->hsync_start == new_mode->hsync_start &&
9231 old_mode->vsync_start != new_mode->vsync_start &&
9232 old_mode->hsync_end == new_mode->hsync_end &&
9233 old_mode->vsync_end != new_mode->vsync_end &&
9234 old_mode->hskew == new_mode->hskew &&
9235 old_mode->vscan == new_mode->vscan &&
9236 (old_mode->vsync_end - old_mode->vsync_start) ==
9237 (new_mode->vsync_end - new_mode->vsync_start))
9238 return true;
9239
9240 return false;
9241 }
9242
set_freesync_fixed_config(struct dm_crtc_state * dm_new_crtc_state)9243 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state)
9244 {
9245 u64 num, den, res;
9246 struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9247
9248 dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9249
9250 num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9251 den = (unsigned long long)new_crtc_state->mode.htotal *
9252 (unsigned long long)new_crtc_state->mode.vtotal;
9253
9254 res = div_u64(num, den);
9255 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9256 }
9257
dm_update_crtc_state(struct amdgpu_display_manager * dm,struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * old_crtc_state,struct drm_crtc_state * new_crtc_state,bool enable,bool * lock_and_validation_needed)9258 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9259 struct drm_atomic_state *state,
9260 struct drm_crtc *crtc,
9261 struct drm_crtc_state *old_crtc_state,
9262 struct drm_crtc_state *new_crtc_state,
9263 bool enable,
9264 bool *lock_and_validation_needed)
9265 {
9266 struct dm_atomic_state *dm_state = NULL;
9267 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9268 struct dc_stream_state *new_stream;
9269 int ret = 0;
9270
9271 /*
9272 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
9273 * update changed items
9274 */
9275 struct amdgpu_crtc *acrtc = NULL;
9276 struct amdgpu_dm_connector *aconnector = NULL;
9277 struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
9278 struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
9279
9280 new_stream = NULL;
9281
9282 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9283 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9284 acrtc = to_amdgpu_crtc(crtc);
9285 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
9286
9287 /* TODO This hack should go away */
9288 if (aconnector && enable) {
9289 /* Make sure fake sink is created in plug-in scenario */
9290 drm_new_conn_state = drm_atomic_get_new_connector_state(state,
9291 &aconnector->base);
9292 drm_old_conn_state = drm_atomic_get_old_connector_state(state,
9293 &aconnector->base);
9294
9295 if (IS_ERR(drm_new_conn_state)) {
9296 ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
9297 goto fail;
9298 }
9299
9300 dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
9301 dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
9302
9303 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9304 goto skip_modeset;
9305
9306 new_stream = create_validate_stream_for_sink(aconnector,
9307 &new_crtc_state->mode,
9308 dm_new_conn_state,
9309 dm_old_crtc_state->stream);
9310
9311 /*
9312 * we can have no stream on ACTION_SET if a display
9313 * was disconnected during S3, in this case it is not an
9314 * error, the OS will be updated after detection, and
9315 * will do the right thing on next atomic commit
9316 */
9317
9318 if (!new_stream) {
9319 DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9320 __func__, acrtc->base.base.id);
9321 ret = -ENOMEM;
9322 goto fail;
9323 }
9324
9325 /*
9326 * TODO: Check VSDB bits to decide whether this should
9327 * be enabled or not.
9328 */
9329 new_stream->triggered_crtc_reset.enabled =
9330 dm->force_timing_sync;
9331
9332 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9333
9334 ret = fill_hdr_info_packet(drm_new_conn_state,
9335 &new_stream->hdr_static_metadata);
9336 if (ret)
9337 goto fail;
9338
9339 /*
9340 * If we already removed the old stream from the context
9341 * (and set the new stream to NULL) then we can't reuse
9342 * the old stream even if the stream and scaling are unchanged.
9343 * We'll hit the BUG_ON and black screen.
9344 *
9345 * TODO: Refactor this function to allow this check to work
9346 * in all conditions.
9347 */
9348 if (dm_new_crtc_state->stream &&
9349 is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
9350 goto skip_modeset;
9351
9352 if (dm_new_crtc_state->stream &&
9353 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9354 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
9355 new_crtc_state->mode_changed = false;
9356 DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
9357 new_crtc_state->mode_changed);
9358 }
9359 }
9360
9361 /* mode_changed flag may get updated above, need to check again */
9362 if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9363 goto skip_modeset;
9364
9365 drm_dbg_state(state->dev,
9366 "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n",
9367 acrtc->crtc_id,
9368 new_crtc_state->enable,
9369 new_crtc_state->active,
9370 new_crtc_state->planes_changed,
9371 new_crtc_state->mode_changed,
9372 new_crtc_state->active_changed,
9373 new_crtc_state->connectors_changed);
9374
9375 /* Remove stream for any changed/disabled CRTC */
9376 if (!enable) {
9377
9378 if (!dm_old_crtc_state->stream)
9379 goto skip_modeset;
9380
9381 /* Unset freesync video if it was active before */
9382 if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) {
9383 dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE;
9384 dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0;
9385 }
9386
9387 /* Now check if we should set freesync video mode */
9388 if (dm_new_crtc_state->stream &&
9389 dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
9390 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) &&
9391 is_timing_unchanged_for_freesync(new_crtc_state,
9392 old_crtc_state)) {
9393 new_crtc_state->mode_changed = false;
9394 DRM_DEBUG_DRIVER(
9395 "Mode change not required for front porch change, setting mode_changed to %d",
9396 new_crtc_state->mode_changed);
9397
9398 set_freesync_fixed_config(dm_new_crtc_state);
9399
9400 goto skip_modeset;
9401 } else if (aconnector &&
9402 is_freesync_video_mode(&new_crtc_state->mode,
9403 aconnector)) {
9404 struct drm_display_mode *high_mode;
9405
9406 high_mode = get_highest_refresh_rate_mode(aconnector, false);
9407 if (!drm_mode_equal(&new_crtc_state->mode, high_mode))
9408 set_freesync_fixed_config(dm_new_crtc_state);
9409 }
9410
9411 ret = dm_atomic_get_state(state, &dm_state);
9412 if (ret)
9413 goto fail;
9414
9415 DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
9416 crtc->base.id);
9417
9418 /* i.e. reset mode */
9419 if (dc_remove_stream_from_ctx(
9420 dm->dc,
9421 dm_state->context,
9422 dm_old_crtc_state->stream) != DC_OK) {
9423 ret = -EINVAL;
9424 goto fail;
9425 }
9426
9427 dc_stream_release(dm_old_crtc_state->stream);
9428 dm_new_crtc_state->stream = NULL;
9429
9430 reset_freesync_config_for_crtc(dm_new_crtc_state);
9431
9432 *lock_and_validation_needed = true;
9433
9434 } else {/* Add stream for any updated/enabled CRTC */
9435 /*
9436 * Quick fix to prevent NULL pointer on new_stream when
9437 * added MST connectors not found in existing crtc_state in the chained mode
9438 * TODO: need to dig out the root cause of that
9439 */
9440 if (!aconnector)
9441 goto skip_modeset;
9442
9443 if (modereset_required(new_crtc_state))
9444 goto skip_modeset;
9445
9446 if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
9447 dm_old_crtc_state->stream)) {
9448
9449 WARN_ON(dm_new_crtc_state->stream);
9450
9451 ret = dm_atomic_get_state(state, &dm_state);
9452 if (ret)
9453 goto fail;
9454
9455 dm_new_crtc_state->stream = new_stream;
9456
9457 dc_stream_retain(new_stream);
9458
9459 DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
9460 crtc->base.id);
9461
9462 if (dc_add_stream_to_ctx(
9463 dm->dc,
9464 dm_state->context,
9465 dm_new_crtc_state->stream) != DC_OK) {
9466 ret = -EINVAL;
9467 goto fail;
9468 }
9469
9470 *lock_and_validation_needed = true;
9471 }
9472 }
9473
9474 skip_modeset:
9475 /* Release extra reference */
9476 if (new_stream)
9477 dc_stream_release(new_stream);
9478
9479 /*
9480 * We want to do dc stream updates that do not require a
9481 * full modeset below.
9482 */
9483 if (!(enable && aconnector && new_crtc_state->active))
9484 return 0;
9485 /*
9486 * Given above conditions, the dc state cannot be NULL because:
9487 * 1. We're in the process of enabling CRTCs (just been added
9488 * to the dc context, or already is on the context)
9489 * 2. Has a valid connector attached, and
9490 * 3. Is currently active and enabled.
9491 * => The dc stream state currently exists.
9492 */
9493 BUG_ON(dm_new_crtc_state->stream == NULL);
9494
9495 /* Scaling or underscan settings */
9496 if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
9497 drm_atomic_crtc_needs_modeset(new_crtc_state))
9498 update_stream_scaling_settings(
9499 &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
9500
9501 /* ABM settings */
9502 dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
9503
9504 /*
9505 * Color management settings. We also update color properties
9506 * when a modeset is needed, to ensure it gets reprogrammed.
9507 */
9508 if (dm_new_crtc_state->base.color_mgmt_changed ||
9509 drm_atomic_crtc_needs_modeset(new_crtc_state)) {
9510 ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
9511 if (ret)
9512 goto fail;
9513 }
9514
9515 /* Update Freesync settings. */
9516 get_freesync_config_for_crtc(dm_new_crtc_state,
9517 dm_new_conn_state);
9518
9519 return ret;
9520
9521 fail:
9522 if (new_stream)
9523 dc_stream_release(new_stream);
9524 return ret;
9525 }
9526
should_reset_plane(struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state)9527 static bool should_reset_plane(struct drm_atomic_state *state,
9528 struct drm_plane *plane,
9529 struct drm_plane_state *old_plane_state,
9530 struct drm_plane_state *new_plane_state)
9531 {
9532 struct drm_plane *other;
9533 struct drm_plane_state *old_other_state, *new_other_state;
9534 struct drm_crtc_state *new_crtc_state;
9535 struct amdgpu_device *adev = drm_to_adev(plane->dev);
9536 int i;
9537
9538 /*
9539 * TODO: Remove this hack for all asics once it proves that the
9540 * fast updates works fine on DCN3.2+.
9541 */
9542 if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
9543 return true;
9544
9545 /* Exit early if we know that we're adding or removing the plane. */
9546 if (old_plane_state->crtc != new_plane_state->crtc)
9547 return true;
9548
9549 /* old crtc == new_crtc == NULL, plane not in context. */
9550 if (!new_plane_state->crtc)
9551 return false;
9552
9553 new_crtc_state =
9554 drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
9555
9556 if (!new_crtc_state)
9557 return true;
9558
9559 /* CRTC Degamma changes currently require us to recreate planes. */
9560 if (new_crtc_state->color_mgmt_changed)
9561 return true;
9562
9563 if (drm_atomic_crtc_needs_modeset(new_crtc_state))
9564 return true;
9565
9566 /*
9567 * If there are any new primary or overlay planes being added or
9568 * removed then the z-order can potentially change. To ensure
9569 * correct z-order and pipe acquisition the current DC architecture
9570 * requires us to remove and recreate all existing planes.
9571 *
9572 * TODO: Come up with a more elegant solution for this.
9573 */
9574 for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
9575 struct amdgpu_framebuffer *old_afb, *new_afb;
9576
9577 if (other->type == DRM_PLANE_TYPE_CURSOR)
9578 continue;
9579
9580 if (old_other_state->crtc != new_plane_state->crtc &&
9581 new_other_state->crtc != new_plane_state->crtc)
9582 continue;
9583
9584 if (old_other_state->crtc != new_other_state->crtc)
9585 return true;
9586
9587 /* Src/dst size and scaling updates. */
9588 if (old_other_state->src_w != new_other_state->src_w ||
9589 old_other_state->src_h != new_other_state->src_h ||
9590 old_other_state->crtc_w != new_other_state->crtc_w ||
9591 old_other_state->crtc_h != new_other_state->crtc_h)
9592 return true;
9593
9594 /* Rotation / mirroring updates. */
9595 if (old_other_state->rotation != new_other_state->rotation)
9596 return true;
9597
9598 /* Blending updates. */
9599 if (old_other_state->pixel_blend_mode !=
9600 new_other_state->pixel_blend_mode)
9601 return true;
9602
9603 /* Alpha updates. */
9604 if (old_other_state->alpha != new_other_state->alpha)
9605 return true;
9606
9607 /* Colorspace changes. */
9608 if (old_other_state->color_range != new_other_state->color_range ||
9609 old_other_state->color_encoding != new_other_state->color_encoding)
9610 return true;
9611
9612 /* Framebuffer checks fall at the end. */
9613 if (!old_other_state->fb || !new_other_state->fb)
9614 continue;
9615
9616 /* Pixel format changes can require bandwidth updates. */
9617 if (old_other_state->fb->format != new_other_state->fb->format)
9618 return true;
9619
9620 old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
9621 new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
9622
9623 /* Tiling and DCC changes also require bandwidth updates. */
9624 if (old_afb->tiling_flags != new_afb->tiling_flags ||
9625 old_afb->base.modifier != new_afb->base.modifier)
9626 return true;
9627 }
9628
9629 return false;
9630 }
9631
dm_check_cursor_fb(struct amdgpu_crtc * new_acrtc,struct drm_plane_state * new_plane_state,struct drm_framebuffer * fb)9632 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
9633 struct drm_plane_state *new_plane_state,
9634 struct drm_framebuffer *fb)
9635 {
9636 struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
9637 struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
9638 unsigned int pitch;
9639 bool linear;
9640
9641 if (fb->width > new_acrtc->max_cursor_width ||
9642 fb->height > new_acrtc->max_cursor_height) {
9643 DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
9644 new_plane_state->fb->width,
9645 new_plane_state->fb->height);
9646 return -EINVAL;
9647 }
9648 if (new_plane_state->src_w != fb->width << 16 ||
9649 new_plane_state->src_h != fb->height << 16) {
9650 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9651 return -EINVAL;
9652 }
9653
9654 /* Pitch in pixels */
9655 pitch = fb->pitches[0] / fb->format->cpp[0];
9656
9657 if (fb->width != pitch) {
9658 DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
9659 fb->width, pitch);
9660 return -EINVAL;
9661 }
9662
9663 switch (pitch) {
9664 case 64:
9665 case 128:
9666 case 256:
9667 /* FB pitch is supported by cursor plane */
9668 break;
9669 default:
9670 DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
9671 return -EINVAL;
9672 }
9673
9674 /* Core DRM takes care of checking FB modifiers, so we only need to
9675 * check tiling flags when the FB doesn't have a modifier.
9676 */
9677 if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
9678 if (adev->family < AMDGPU_FAMILY_AI) {
9679 linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
9680 AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
9681 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
9682 } else {
9683 linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
9684 }
9685 if (!linear) {
9686 DRM_DEBUG_ATOMIC("Cursor FB not linear");
9687 return -EINVAL;
9688 }
9689 }
9690
9691 return 0;
9692 }
9693
dm_update_plane_state(struct dc * dc,struct drm_atomic_state * state,struct drm_plane * plane,struct drm_plane_state * old_plane_state,struct drm_plane_state * new_plane_state,bool enable,bool * lock_and_validation_needed,bool * is_top_most_overlay)9694 static int dm_update_plane_state(struct dc *dc,
9695 struct drm_atomic_state *state,
9696 struct drm_plane *plane,
9697 struct drm_plane_state *old_plane_state,
9698 struct drm_plane_state *new_plane_state,
9699 bool enable,
9700 bool *lock_and_validation_needed,
9701 bool *is_top_most_overlay)
9702 {
9703
9704 struct dm_atomic_state *dm_state = NULL;
9705 struct drm_crtc *new_plane_crtc, *old_plane_crtc;
9706 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9707 struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
9708 struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
9709 struct amdgpu_crtc *new_acrtc;
9710 bool needs_reset;
9711 int ret = 0;
9712
9713
9714 new_plane_crtc = new_plane_state->crtc;
9715 old_plane_crtc = old_plane_state->crtc;
9716 dm_new_plane_state = to_dm_plane_state(new_plane_state);
9717 dm_old_plane_state = to_dm_plane_state(old_plane_state);
9718
9719 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
9720 if (!enable || !new_plane_crtc ||
9721 drm_atomic_plane_disabling(plane->state, new_plane_state))
9722 return 0;
9723
9724 new_acrtc = to_amdgpu_crtc(new_plane_crtc);
9725
9726 if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
9727 DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
9728 return -EINVAL;
9729 }
9730
9731 if (new_plane_state->fb) {
9732 ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
9733 new_plane_state->fb);
9734 if (ret)
9735 return ret;
9736 }
9737
9738 return 0;
9739 }
9740
9741 needs_reset = should_reset_plane(state, plane, old_plane_state,
9742 new_plane_state);
9743
9744 /* Remove any changed/removed planes */
9745 if (!enable) {
9746 if (!needs_reset)
9747 return 0;
9748
9749 if (!old_plane_crtc)
9750 return 0;
9751
9752 old_crtc_state = drm_atomic_get_old_crtc_state(
9753 state, old_plane_crtc);
9754 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9755
9756 if (!dm_old_crtc_state->stream)
9757 return 0;
9758
9759 DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
9760 plane->base.id, old_plane_crtc->base.id);
9761
9762 ret = dm_atomic_get_state(state, &dm_state);
9763 if (ret)
9764 return ret;
9765
9766 if (!dc_remove_plane_from_context(
9767 dc,
9768 dm_old_crtc_state->stream,
9769 dm_old_plane_state->dc_state,
9770 dm_state->context)) {
9771
9772 return -EINVAL;
9773 }
9774
9775 if (dm_old_plane_state->dc_state)
9776 dc_plane_state_release(dm_old_plane_state->dc_state);
9777
9778 dm_new_plane_state->dc_state = NULL;
9779
9780 *lock_and_validation_needed = true;
9781
9782 } else { /* Add new planes */
9783 struct dc_plane_state *dc_new_plane_state;
9784
9785 if (drm_atomic_plane_disabling(plane->state, new_plane_state))
9786 return 0;
9787
9788 if (!new_plane_crtc)
9789 return 0;
9790
9791 new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
9792 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9793
9794 if (!dm_new_crtc_state->stream)
9795 return 0;
9796
9797 if (!needs_reset)
9798 return 0;
9799
9800 ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
9801 if (ret)
9802 return ret;
9803
9804 WARN_ON(dm_new_plane_state->dc_state);
9805
9806 dc_new_plane_state = dc_create_plane_state(dc);
9807 if (!dc_new_plane_state)
9808 return -ENOMEM;
9809
9810 /* Block top most plane from being a video plane */
9811 if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
9812 if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
9813 return -EINVAL;
9814
9815 *is_top_most_overlay = false;
9816 }
9817
9818 DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
9819 plane->base.id, new_plane_crtc->base.id);
9820
9821 ret = fill_dc_plane_attributes(
9822 drm_to_adev(new_plane_crtc->dev),
9823 dc_new_plane_state,
9824 new_plane_state,
9825 new_crtc_state);
9826 if (ret) {
9827 dc_plane_state_release(dc_new_plane_state);
9828 return ret;
9829 }
9830
9831 ret = dm_atomic_get_state(state, &dm_state);
9832 if (ret) {
9833 dc_plane_state_release(dc_new_plane_state);
9834 return ret;
9835 }
9836
9837 /*
9838 * Any atomic check errors that occur after this will
9839 * not need a release. The plane state will be attached
9840 * to the stream, and therefore part of the atomic
9841 * state. It'll be released when the atomic state is
9842 * cleaned.
9843 */
9844 if (!dc_add_plane_to_context(
9845 dc,
9846 dm_new_crtc_state->stream,
9847 dc_new_plane_state,
9848 dm_state->context)) {
9849
9850 dc_plane_state_release(dc_new_plane_state);
9851 return -EINVAL;
9852 }
9853
9854 dm_new_plane_state->dc_state = dc_new_plane_state;
9855
9856 dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
9857
9858 /* Tell DC to do a full surface update every time there
9859 * is a plane change. Inefficient, but works for now.
9860 */
9861 dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
9862
9863 *lock_and_validation_needed = true;
9864 }
9865
9866
9867 return ret;
9868 }
9869
dm_get_oriented_plane_size(struct drm_plane_state * plane_state,int * src_w,int * src_h)9870 static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
9871 int *src_w, int *src_h)
9872 {
9873 switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
9874 case DRM_MODE_ROTATE_90:
9875 case DRM_MODE_ROTATE_270:
9876 *src_w = plane_state->src_h >> 16;
9877 *src_h = plane_state->src_w >> 16;
9878 break;
9879 case DRM_MODE_ROTATE_0:
9880 case DRM_MODE_ROTATE_180:
9881 default:
9882 *src_w = plane_state->src_w >> 16;
9883 *src_h = plane_state->src_h >> 16;
9884 break;
9885 }
9886 }
9887
9888 static void
dm_get_plane_scale(struct drm_plane_state * plane_state,int * out_plane_scale_w,int * out_plane_scale_h)9889 dm_get_plane_scale(struct drm_plane_state *plane_state,
9890 int *out_plane_scale_w, int *out_plane_scale_h)
9891 {
9892 int plane_src_w, plane_src_h;
9893
9894 dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
9895 *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
9896 *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
9897 }
9898
dm_check_crtc_cursor(struct drm_atomic_state * state,struct drm_crtc * crtc,struct drm_crtc_state * new_crtc_state)9899 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
9900 struct drm_crtc *crtc,
9901 struct drm_crtc_state *new_crtc_state)
9902 {
9903 struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
9904 struct drm_plane_state *old_plane_state, *new_plane_state;
9905 struct drm_plane_state *new_cursor_state, *new_underlying_state;
9906 int i;
9907 int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
9908 bool any_relevant_change = false;
9909
9910 /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
9911 * cursor per pipe but it's going to inherit the scaling and
9912 * positioning from the underlying pipe. Check the cursor plane's
9913 * blending properties match the underlying planes'.
9914 */
9915
9916 /* If no plane was enabled or changed scaling, no need to check again */
9917 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
9918 int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
9919
9920 if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
9921 continue;
9922
9923 if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
9924 any_relevant_change = true;
9925 break;
9926 }
9927
9928 if (new_plane_state->fb == old_plane_state->fb &&
9929 new_plane_state->crtc_w == old_plane_state->crtc_w &&
9930 new_plane_state->crtc_h == old_plane_state->crtc_h)
9931 continue;
9932
9933 dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
9934 dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
9935
9936 if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
9937 any_relevant_change = true;
9938 break;
9939 }
9940 }
9941
9942 if (!any_relevant_change)
9943 return 0;
9944
9945 new_cursor_state = drm_atomic_get_plane_state(state, cursor);
9946 if (IS_ERR(new_cursor_state))
9947 return PTR_ERR(new_cursor_state);
9948
9949 if (!new_cursor_state->fb)
9950 return 0;
9951
9952 dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
9953
9954 /* Need to check all enabled planes, even if this commit doesn't change
9955 * their state
9956 */
9957 i = drm_atomic_add_affected_planes(state, crtc);
9958 if (i)
9959 return i;
9960
9961 for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
9962 /* Narrow down to non-cursor planes on the same CRTC as the cursor */
9963 if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
9964 continue;
9965
9966 /* Ignore disabled planes */
9967 if (!new_underlying_state->fb)
9968 continue;
9969
9970 dm_get_plane_scale(new_underlying_state,
9971 &underlying_scale_w, &underlying_scale_h);
9972
9973 if (cursor_scale_w != underlying_scale_w ||
9974 cursor_scale_h != underlying_scale_h) {
9975 drm_dbg_atomic(crtc->dev,
9976 "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
9977 cursor->base.id, cursor->name, underlying->base.id, underlying->name);
9978 return -EINVAL;
9979 }
9980
9981 /* If this plane covers the whole CRTC, no need to check planes underneath */
9982 if (new_underlying_state->crtc_x <= 0 &&
9983 new_underlying_state->crtc_y <= 0 &&
9984 new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
9985 new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
9986 break;
9987 }
9988
9989 return 0;
9990 }
9991
add_affected_mst_dsc_crtcs(struct drm_atomic_state * state,struct drm_crtc * crtc)9992 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
9993 {
9994 struct drm_connector *connector;
9995 struct drm_connector_state *conn_state, *old_conn_state;
9996 struct amdgpu_dm_connector *aconnector = NULL;
9997 int i;
9998
9999 for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10000 if (!conn_state->crtc)
10001 conn_state = old_conn_state;
10002
10003 if (conn_state->crtc != crtc)
10004 continue;
10005
10006 aconnector = to_amdgpu_dm_connector(connector);
10007 if (!aconnector->mst_output_port || !aconnector->mst_root)
10008 aconnector = NULL;
10009 else
10010 break;
10011 }
10012
10013 if (!aconnector)
10014 return 0;
10015
10016 return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
10017 }
10018
10019 /**
10020 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10021 *
10022 * @dev: The DRM device
10023 * @state: The atomic state to commit
10024 *
10025 * Validate that the given atomic state is programmable by DC into hardware.
10026 * This involves constructing a &struct dc_state reflecting the new hardware
10027 * state we wish to commit, then querying DC to see if it is programmable. It's
10028 * important not to modify the existing DC state. Otherwise, atomic_check
10029 * may unexpectedly commit hardware changes.
10030 *
10031 * When validating the DC state, it's important that the right locks are
10032 * acquired. For full updates case which removes/adds/updates streams on one
10033 * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10034 * that any such full update commit will wait for completion of any outstanding
10035 * flip using DRMs synchronization events.
10036 *
10037 * Note that DM adds the affected connectors for all CRTCs in state, when that
10038 * might not seem necessary. This is because DC stream creation requires the
10039 * DC sink, which is tied to the DRM connector state. Cleaning this up should
10040 * be possible but non-trivial - a possible TODO item.
10041 *
10042 * Return: -Error code if validation failed.
10043 */
amdgpu_dm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)10044 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10045 struct drm_atomic_state *state)
10046 {
10047 struct amdgpu_device *adev = drm_to_adev(dev);
10048 struct dm_atomic_state *dm_state = NULL;
10049 struct dc *dc = adev->dm.dc;
10050 struct drm_connector *connector;
10051 struct drm_connector_state *old_con_state, *new_con_state;
10052 struct drm_crtc *crtc;
10053 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10054 struct drm_plane *plane;
10055 struct drm_plane_state *old_plane_state, *new_plane_state;
10056 enum dc_status status;
10057 int ret, i;
10058 bool lock_and_validation_needed = false;
10059 bool is_top_most_overlay = true;
10060 struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10061 struct drm_dp_mst_topology_mgr *mgr;
10062 struct drm_dp_mst_topology_state *mst_state;
10063 struct dsc_mst_fairness_vars vars[MAX_PIPES];
10064
10065 trace_amdgpu_dm_atomic_check_begin(state);
10066
10067 ret = drm_atomic_helper_check_modeset(dev, state);
10068 if (ret) {
10069 DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n");
10070 goto fail;
10071 }
10072
10073 /* Check connector changes */
10074 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10075 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10076 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10077
10078 /* Skip connectors that are disabled or part of modeset already. */
10079 if (!new_con_state->crtc)
10080 continue;
10081
10082 new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10083 if (IS_ERR(new_crtc_state)) {
10084 DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n");
10085 ret = PTR_ERR(new_crtc_state);
10086 goto fail;
10087 }
10088
10089 if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
10090 dm_old_con_state->scaling != dm_new_con_state->scaling)
10091 new_crtc_state->connectors_changed = true;
10092 }
10093
10094 if (dc_resource_is_dsc_encoding_supported(dc)) {
10095 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10096 if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10097 ret = add_affected_mst_dsc_crtcs(state, crtc);
10098 if (ret) {
10099 DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n");
10100 goto fail;
10101 }
10102 }
10103 }
10104 }
10105 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10106 dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10107
10108 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10109 !new_crtc_state->color_mgmt_changed &&
10110 old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10111 dm_old_crtc_state->dsc_force_changed == false)
10112 continue;
10113
10114 ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10115 if (ret) {
10116 DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n");
10117 goto fail;
10118 }
10119
10120 if (!new_crtc_state->enable)
10121 continue;
10122
10123 ret = drm_atomic_add_affected_connectors(state, crtc);
10124 if (ret) {
10125 DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n");
10126 goto fail;
10127 }
10128
10129 ret = drm_atomic_add_affected_planes(state, crtc);
10130 if (ret) {
10131 DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n");
10132 goto fail;
10133 }
10134
10135 if (dm_old_crtc_state->dsc_force_changed)
10136 new_crtc_state->mode_changed = true;
10137 }
10138
10139 /*
10140 * Add all primary and overlay planes on the CRTC to the state
10141 * whenever a plane is enabled to maintain correct z-ordering
10142 * and to enable fast surface updates.
10143 */
10144 drm_for_each_crtc(crtc, dev) {
10145 bool modified = false;
10146
10147 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10148 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10149 continue;
10150
10151 if (new_plane_state->crtc == crtc ||
10152 old_plane_state->crtc == crtc) {
10153 modified = true;
10154 break;
10155 }
10156 }
10157
10158 if (!modified)
10159 continue;
10160
10161 drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10162 if (plane->type == DRM_PLANE_TYPE_CURSOR)
10163 continue;
10164
10165 new_plane_state =
10166 drm_atomic_get_plane_state(state, plane);
10167
10168 if (IS_ERR(new_plane_state)) {
10169 ret = PTR_ERR(new_plane_state);
10170 DRM_DEBUG_DRIVER("new_plane_state is BAD\n");
10171 goto fail;
10172 }
10173 }
10174 }
10175
10176 /*
10177 * DC consults the zpos (layer_index in DC terminology) to determine the
10178 * hw plane on which to enable the hw cursor (see
10179 * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
10180 * atomic state, so call drm helper to normalize zpos.
10181 */
10182 ret = drm_atomic_normalize_zpos(dev, state);
10183 if (ret) {
10184 drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
10185 goto fail;
10186 }
10187
10188 /* Remove exiting planes if they are modified */
10189 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10190 if (old_plane_state->fb && new_plane_state->fb &&
10191 get_mem_type(old_plane_state->fb) !=
10192 get_mem_type(new_plane_state->fb))
10193 lock_and_validation_needed = true;
10194
10195 ret = dm_update_plane_state(dc, state, plane,
10196 old_plane_state,
10197 new_plane_state,
10198 false,
10199 &lock_and_validation_needed,
10200 &is_top_most_overlay);
10201 if (ret) {
10202 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10203 goto fail;
10204 }
10205 }
10206
10207 /* Disable all crtcs which require disable */
10208 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10209 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10210 old_crtc_state,
10211 new_crtc_state,
10212 false,
10213 &lock_and_validation_needed);
10214 if (ret) {
10215 DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n");
10216 goto fail;
10217 }
10218 }
10219
10220 /* Enable all crtcs which require enable */
10221 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10222 ret = dm_update_crtc_state(&adev->dm, state, crtc,
10223 old_crtc_state,
10224 new_crtc_state,
10225 true,
10226 &lock_and_validation_needed);
10227 if (ret) {
10228 DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n");
10229 goto fail;
10230 }
10231 }
10232
10233 /* Add new/modified planes */
10234 for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10235 ret = dm_update_plane_state(dc, state, plane,
10236 old_plane_state,
10237 new_plane_state,
10238 true,
10239 &lock_and_validation_needed,
10240 &is_top_most_overlay);
10241 if (ret) {
10242 DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
10243 goto fail;
10244 }
10245 }
10246
10247 if (dc_resource_is_dsc_encoding_supported(dc)) {
10248 ret = pre_validate_dsc(state, &dm_state, vars);
10249 if (ret != 0)
10250 goto fail;
10251 }
10252
10253 /* Run this here since we want to validate the streams we created */
10254 ret = drm_atomic_helper_check_planes(dev, state);
10255 if (ret) {
10256 DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n");
10257 goto fail;
10258 }
10259
10260 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10261 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10262 if (dm_new_crtc_state->mpo_requested)
10263 DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
10264 }
10265
10266 /* Check cursor planes scaling */
10267 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10268 ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10269 if (ret) {
10270 DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n");
10271 goto fail;
10272 }
10273 }
10274
10275 if (state->legacy_cursor_update) {
10276 /*
10277 * This is a fast cursor update coming from the plane update
10278 * helper, check if it can be done asynchronously for better
10279 * performance.
10280 */
10281 state->async_update =
10282 !drm_atomic_helper_async_check(dev, state);
10283
10284 /*
10285 * Skip the remaining global validation if this is an async
10286 * update. Cursor updates can be done without affecting
10287 * state or bandwidth calcs and this avoids the performance
10288 * penalty of locking the private state object and
10289 * allocating a new dc_state.
10290 */
10291 if (state->async_update)
10292 return 0;
10293 }
10294
10295 /* Check scaling and underscan changes*/
10296 /* TODO Removed scaling changes validation due to inability to commit
10297 * new stream into context w\o causing full reset. Need to
10298 * decide how to handle.
10299 */
10300 for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10301 struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10302 struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10303 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10304
10305 /* Skip any modesets/resets */
10306 if (!acrtc || drm_atomic_crtc_needs_modeset(
10307 drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10308 continue;
10309
10310 /* Skip any thing not scale or underscan changes */
10311 if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10312 continue;
10313
10314 lock_and_validation_needed = true;
10315 }
10316
10317 /* set the slot info for each mst_state based on the link encoding format */
10318 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
10319 struct amdgpu_dm_connector *aconnector;
10320 struct drm_connector *connector;
10321 struct drm_connector_list_iter iter;
10322 u8 link_coding_cap;
10323
10324 drm_connector_list_iter_begin(dev, &iter);
10325 drm_for_each_connector_iter(connector, &iter) {
10326 if (connector->index == mst_state->mgr->conn_base_id) {
10327 aconnector = to_amdgpu_dm_connector(connector);
10328 link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
10329 drm_dp_mst_update_slots(mst_state, link_coding_cap);
10330
10331 break;
10332 }
10333 }
10334 drm_connector_list_iter_end(&iter);
10335 }
10336
10337 /**
10338 * Streams and planes are reset when there are changes that affect
10339 * bandwidth. Anything that affects bandwidth needs to go through
10340 * DC global validation to ensure that the configuration can be applied
10341 * to hardware.
10342 *
10343 * We have to currently stall out here in atomic_check for outstanding
10344 * commits to finish in this case because our IRQ handlers reference
10345 * DRM state directly - we can end up disabling interrupts too early
10346 * if we don't.
10347 *
10348 * TODO: Remove this stall and drop DM state private objects.
10349 */
10350 if (lock_and_validation_needed) {
10351 ret = dm_atomic_get_state(state, &dm_state);
10352 if (ret) {
10353 DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n");
10354 goto fail;
10355 }
10356
10357 ret = do_aquire_global_lock(dev, state);
10358 if (ret) {
10359 DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n");
10360 goto fail;
10361 }
10362
10363 if (dc_resource_is_dsc_encoding_supported(dc)) {
10364 ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
10365 if (ret) {
10366 DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
10367 ret = -EINVAL;
10368 goto fail;
10369 }
10370 }
10371
10372 ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10373 if (ret) {
10374 DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
10375 goto fail;
10376 }
10377
10378 /*
10379 * Perform validation of MST topology in the state:
10380 * We need to perform MST atomic check before calling
10381 * dc_validate_global_state(), or there is a chance
10382 * to get stuck in an infinite loop and hang eventually.
10383 */
10384 ret = drm_dp_mst_atomic_check(state);
10385 if (ret) {
10386 DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n");
10387 goto fail;
10388 }
10389 status = dc_validate_global_state(dc, dm_state->context, true);
10390 if (status != DC_OK) {
10391 DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)",
10392 dc_status_to_str(status), status);
10393 ret = -EINVAL;
10394 goto fail;
10395 }
10396 } else {
10397 /*
10398 * The commit is a fast update. Fast updates shouldn't change
10399 * the DC context, affect global validation, and can have their
10400 * commit work done in parallel with other commits not touching
10401 * the same resource. If we have a new DC context as part of
10402 * the DM atomic state from validation we need to free it and
10403 * retain the existing one instead.
10404 *
10405 * Furthermore, since the DM atomic state only contains the DC
10406 * context and can safely be annulled, we can free the state
10407 * and clear the associated private object now to free
10408 * some memory and avoid a possible use-after-free later.
10409 */
10410
10411 for (i = 0; i < state->num_private_objs; i++) {
10412 struct drm_private_obj *obj = state->private_objs[i].ptr;
10413
10414 if (obj->funcs == adev->dm.atomic_obj.funcs) {
10415 int j = state->num_private_objs-1;
10416
10417 dm_atomic_destroy_state(obj,
10418 state->private_objs[i].state);
10419
10420 /* If i is not at the end of the array then the
10421 * last element needs to be moved to where i was
10422 * before the array can safely be truncated.
10423 */
10424 if (i != j)
10425 state->private_objs[i] =
10426 state->private_objs[j];
10427
10428 state->private_objs[j].ptr = NULL;
10429 state->private_objs[j].state = NULL;
10430 state->private_objs[j].old_state = NULL;
10431 state->private_objs[j].new_state = NULL;
10432
10433 state->num_private_objs = j;
10434 break;
10435 }
10436 }
10437 }
10438
10439 /* Store the overall update type for use later in atomic check. */
10440 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10441 struct dm_crtc_state *dm_new_crtc_state =
10442 to_dm_crtc_state(new_crtc_state);
10443
10444 /*
10445 * Only allow async flips for fast updates that don't change
10446 * the FB pitch, the DCC state, rotation, etc.
10447 */
10448 if (new_crtc_state->async_flip && lock_and_validation_needed) {
10449 drm_dbg_atomic(crtc->dev,
10450 "[CRTC:%d:%s] async flips are only supported for fast updates\n",
10451 crtc->base.id, crtc->name);
10452 ret = -EINVAL;
10453 goto fail;
10454 }
10455
10456 dm_new_crtc_state->update_type = lock_and_validation_needed ?
10457 UPDATE_TYPE_FULL : UPDATE_TYPE_FAST;
10458 }
10459
10460 /* Must be success */
10461 WARN_ON(ret);
10462
10463 trace_amdgpu_dm_atomic_check_finish(state, ret);
10464
10465 return ret;
10466
10467 fail:
10468 if (ret == -EDEADLK)
10469 DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
10470 else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
10471 DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
10472 else
10473 DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret);
10474
10475 trace_amdgpu_dm_atomic_check_finish(state, ret);
10476
10477 return ret;
10478 }
10479
is_dp_capable_without_timing_msa(struct dc * dc,struct amdgpu_dm_connector * amdgpu_dm_connector)10480 static bool is_dp_capable_without_timing_msa(struct dc *dc,
10481 struct amdgpu_dm_connector *amdgpu_dm_connector)
10482 {
10483 u8 dpcd_data;
10484 bool capable = false;
10485
10486 if (amdgpu_dm_connector->dc_link &&
10487 dm_helpers_dp_read_dpcd(
10488 NULL,
10489 amdgpu_dm_connector->dc_link,
10490 DP_DOWN_STREAM_PORT_COUNT,
10491 &dpcd_data,
10492 sizeof(dpcd_data))) {
10493 capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
10494 }
10495
10496 return capable;
10497 }
10498
dm_edid_parser_send_cea(struct amdgpu_display_manager * dm,unsigned int offset,unsigned int total_length,u8 * data,unsigned int length,struct amdgpu_hdmi_vsdb_info * vsdb)10499 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
10500 unsigned int offset,
10501 unsigned int total_length,
10502 u8 *data,
10503 unsigned int length,
10504 struct amdgpu_hdmi_vsdb_info *vsdb)
10505 {
10506 bool res;
10507 union dmub_rb_cmd cmd;
10508 struct dmub_cmd_send_edid_cea *input;
10509 struct dmub_cmd_edid_cea_output *output;
10510
10511 if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
10512 return false;
10513
10514 memset(&cmd, 0, sizeof(cmd));
10515
10516 input = &cmd.edid_cea.data.input;
10517
10518 cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
10519 cmd.edid_cea.header.sub_type = 0;
10520 cmd.edid_cea.header.payload_bytes =
10521 sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
10522 input->offset = offset;
10523 input->length = length;
10524 input->cea_total_length = total_length;
10525 memcpy(input->payload, data, length);
10526
10527 res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY);
10528 if (!res) {
10529 DRM_ERROR("EDID CEA parser failed\n");
10530 return false;
10531 }
10532
10533 output = &cmd.edid_cea.data.output;
10534
10535 if (output->type == DMUB_CMD__EDID_CEA_ACK) {
10536 if (!output->ack.success) {
10537 DRM_ERROR("EDID CEA ack failed at offset %d\n",
10538 output->ack.offset);
10539 }
10540 } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
10541 if (!output->amd_vsdb.vsdb_found)
10542 return false;
10543
10544 vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
10545 vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
10546 vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
10547 vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
10548 } else {
10549 DRM_WARN("Unknown EDID CEA parser results\n");
10550 return false;
10551 }
10552
10553 return true;
10554 }
10555
parse_edid_cea_dmcu(struct amdgpu_display_manager * dm,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)10556 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
10557 u8 *edid_ext, int len,
10558 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10559 {
10560 int i;
10561
10562 /* send extension block to DMCU for parsing */
10563 for (i = 0; i < len; i += 8) {
10564 bool res;
10565 int offset;
10566
10567 /* send 8 bytes a time */
10568 if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
10569 return false;
10570
10571 if (i+8 == len) {
10572 /* EDID block sent completed, expect result */
10573 int version, min_rate, max_rate;
10574
10575 res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
10576 if (res) {
10577 /* amd vsdb found */
10578 vsdb_info->freesync_supported = 1;
10579 vsdb_info->amd_vsdb_version = version;
10580 vsdb_info->min_refresh_rate_hz = min_rate;
10581 vsdb_info->max_refresh_rate_hz = max_rate;
10582 return true;
10583 }
10584 /* not amd vsdb */
10585 return false;
10586 }
10587
10588 /* check for ack*/
10589 res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
10590 if (!res)
10591 return false;
10592 }
10593
10594 return false;
10595 }
10596
parse_edid_cea_dmub(struct amdgpu_display_manager * dm,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)10597 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
10598 u8 *edid_ext, int len,
10599 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10600 {
10601 int i;
10602
10603 /* send extension block to DMCU for parsing */
10604 for (i = 0; i < len; i += 8) {
10605 /* send 8 bytes a time */
10606 if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
10607 return false;
10608 }
10609
10610 return vsdb_info->freesync_supported;
10611 }
10612
parse_edid_cea(struct amdgpu_dm_connector * aconnector,u8 * edid_ext,int len,struct amdgpu_hdmi_vsdb_info * vsdb_info)10613 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
10614 u8 *edid_ext, int len,
10615 struct amdgpu_hdmi_vsdb_info *vsdb_info)
10616 {
10617 struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
10618 bool ret;
10619
10620 mutex_lock(&adev->dm.dc_lock);
10621 if (adev->dm.dmub_srv)
10622 ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
10623 else
10624 ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
10625 mutex_unlock(&adev->dm.dc_lock);
10626 return ret;
10627 }
10628
parse_amd_vsdb(struct amdgpu_dm_connector * aconnector,struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)10629 static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10630 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10631 {
10632 u8 *edid_ext = NULL;
10633 int i;
10634 int j = 0;
10635
10636 if (edid == NULL || edid->extensions == 0)
10637 return -ENODEV;
10638
10639 /* Find DisplayID extension */
10640 for (i = 0; i < edid->extensions; i++) {
10641 edid_ext = (void *)(edid + (i + 1));
10642 if (edid_ext[0] == DISPLAYID_EXT)
10643 break;
10644 }
10645
10646 while (j < EDID_LENGTH) {
10647 struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j];
10648 unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]);
10649
10650 if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID &&
10651 amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) {
10652 vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false;
10653 vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3;
10654 DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode);
10655
10656 return true;
10657 }
10658 j++;
10659 }
10660
10661 return false;
10662 }
10663
parse_hdmi_amd_vsdb(struct amdgpu_dm_connector * aconnector,struct edid * edid,struct amdgpu_hdmi_vsdb_info * vsdb_info)10664 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
10665 struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
10666 {
10667 u8 *edid_ext = NULL;
10668 int i;
10669 bool valid_vsdb_found = false;
10670
10671 /*----- drm_find_cea_extension() -----*/
10672 /* No EDID or EDID extensions */
10673 if (edid == NULL || edid->extensions == 0)
10674 return -ENODEV;
10675
10676 /* Find CEA extension */
10677 for (i = 0; i < edid->extensions; i++) {
10678 edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
10679 if (edid_ext[0] == CEA_EXT)
10680 break;
10681 }
10682
10683 if (i == edid->extensions)
10684 return -ENODEV;
10685
10686 /*----- cea_db_offsets() -----*/
10687 if (edid_ext[0] != CEA_EXT)
10688 return -ENODEV;
10689
10690 valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
10691
10692 return valid_vsdb_found ? i : -ENODEV;
10693 }
10694
10695 /**
10696 * amdgpu_dm_update_freesync_caps - Update Freesync capabilities
10697 *
10698 * @connector: Connector to query.
10699 * @edid: EDID from monitor
10700 *
10701 * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep
10702 * track of some of the display information in the internal data struct used by
10703 * amdgpu_dm. This function checks which type of connector we need to set the
10704 * FreeSync parameters.
10705 */
amdgpu_dm_update_freesync_caps(struct drm_connector * connector,struct edid * edid)10706 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
10707 struct edid *edid)
10708 {
10709 int i = 0;
10710 struct detailed_timing *timing;
10711 struct detailed_non_pixel *data;
10712 struct detailed_data_monitor_range *range;
10713 struct amdgpu_dm_connector *amdgpu_dm_connector =
10714 to_amdgpu_dm_connector(connector);
10715 struct dm_connector_state *dm_con_state = NULL;
10716 struct dc_sink *sink;
10717
10718 struct drm_device *dev = connector->dev;
10719 struct amdgpu_device *adev = drm_to_adev(dev);
10720 struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
10721 bool freesync_capable = false;
10722 enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
10723
10724 if (!connector->state) {
10725 DRM_ERROR("%s - Connector has no state", __func__);
10726 goto update;
10727 }
10728
10729 sink = amdgpu_dm_connector->dc_sink ?
10730 amdgpu_dm_connector->dc_sink :
10731 amdgpu_dm_connector->dc_em_sink;
10732
10733 if (!edid || !sink) {
10734 dm_con_state = to_dm_connector_state(connector->state);
10735
10736 amdgpu_dm_connector->min_vfreq = 0;
10737 amdgpu_dm_connector->max_vfreq = 0;
10738 amdgpu_dm_connector->pixel_clock_mhz = 0;
10739 connector->display_info.monitor_range.min_vfreq = 0;
10740 connector->display_info.monitor_range.max_vfreq = 0;
10741 freesync_capable = false;
10742
10743 goto update;
10744 }
10745
10746 dm_con_state = to_dm_connector_state(connector->state);
10747
10748 if (!adev->dm.freesync_module)
10749 goto update;
10750
10751 if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
10752 || sink->sink_signal == SIGNAL_TYPE_EDP) {
10753 bool edid_check_required = false;
10754
10755 if (edid) {
10756 edid_check_required = is_dp_capable_without_timing_msa(
10757 adev->dm.dc,
10758 amdgpu_dm_connector);
10759 }
10760
10761 if (edid_check_required == true && (edid->version > 1 ||
10762 (edid->version == 1 && edid->revision > 1))) {
10763 for (i = 0; i < 4; i++) {
10764
10765 timing = &edid->detailed_timings[i];
10766 data = &timing->data.other_data;
10767 range = &data->data.range;
10768 /*
10769 * Check if monitor has continuous frequency mode
10770 */
10771 if (data->type != EDID_DETAIL_MONITOR_RANGE)
10772 continue;
10773 /*
10774 * Check for flag range limits only. If flag == 1 then
10775 * no additional timing information provided.
10776 * Default GTF, GTF Secondary curve and CVT are not
10777 * supported
10778 */
10779 if (range->flags != 1)
10780 continue;
10781
10782 amdgpu_dm_connector->min_vfreq = range->min_vfreq;
10783 amdgpu_dm_connector->max_vfreq = range->max_vfreq;
10784 amdgpu_dm_connector->pixel_clock_mhz =
10785 range->pixel_clock_mhz * 10;
10786
10787 connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
10788 connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
10789
10790 break;
10791 }
10792
10793 if (amdgpu_dm_connector->max_vfreq -
10794 amdgpu_dm_connector->min_vfreq > 10) {
10795
10796 freesync_capable = true;
10797 }
10798 }
10799 parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10800
10801 if (vsdb_info.replay_mode) {
10802 amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode;
10803 amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version;
10804 amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP;
10805 }
10806
10807 } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
10808 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10809 if (i >= 0 && vsdb_info.freesync_supported) {
10810 timing = &edid->detailed_timings[i];
10811 data = &timing->data.other_data;
10812
10813 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10814 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10815 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10816 freesync_capable = true;
10817
10818 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10819 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10820 }
10821 }
10822
10823 as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
10824
10825 if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
10826 i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
10827 if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
10828
10829 amdgpu_dm_connector->pack_sdp_v1_3 = true;
10830 amdgpu_dm_connector->as_type = as_type;
10831 amdgpu_dm_connector->vsdb_info = vsdb_info;
10832
10833 amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
10834 amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
10835 if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
10836 freesync_capable = true;
10837
10838 connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
10839 connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
10840 }
10841 }
10842
10843 update:
10844 if (dm_con_state)
10845 dm_con_state->freesync_capable = freesync_capable;
10846
10847 if (connector->vrr_capable_property)
10848 drm_connector_set_vrr_capable_property(connector,
10849 freesync_capable);
10850 }
10851
amdgpu_dm_trigger_timing_sync(struct drm_device * dev)10852 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
10853 {
10854 struct amdgpu_device *adev = drm_to_adev(dev);
10855 struct dc *dc = adev->dm.dc;
10856 int i;
10857
10858 mutex_lock(&adev->dm.dc_lock);
10859 if (dc->current_state) {
10860 for (i = 0; i < dc->current_state->stream_count; ++i)
10861 dc->current_state->streams[i]
10862 ->triggered_crtc_reset.enabled =
10863 adev->dm.force_timing_sync;
10864
10865 dm_enable_per_frame_crtc_master_sync(dc->current_state);
10866 dc_trigger_sync(dc, dc->current_state);
10867 }
10868 mutex_unlock(&adev->dm.dc_lock);
10869 }
10870
dm_write_reg_func(const struct dc_context * ctx,uint32_t address,u32 value,const char * func_name)10871 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
10872 u32 value, const char *func_name)
10873 {
10874 #ifdef DM_CHECK_ADDR_0
10875 if (address == 0) {
10876 DC_ERR("invalid register write. address = 0");
10877 return;
10878 }
10879 #endif
10880 cgs_write_register(ctx->cgs_device, address, value);
10881 trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
10882 }
10883
dm_read_reg_func(const struct dc_context * ctx,uint32_t address,const char * func_name)10884 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
10885 const char *func_name)
10886 {
10887 u32 value;
10888 #ifdef DM_CHECK_ADDR_0
10889 if (address == 0) {
10890 DC_ERR("invalid register read; address = 0\n");
10891 return 0;
10892 }
10893 #endif
10894
10895 if (ctx->dmub_srv &&
10896 ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
10897 !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
10898 ASSERT(false);
10899 return 0;
10900 }
10901
10902 value = cgs_read_register(ctx->cgs_device, address);
10903
10904 trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
10905
10906 return value;
10907 }
10908
amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context * ctx,unsigned int link_index,struct aux_payload * payload,enum aux_return_code_type * operation_result)10909 int amdgpu_dm_process_dmub_aux_transfer_sync(
10910 struct dc_context *ctx,
10911 unsigned int link_index,
10912 struct aux_payload *payload,
10913 enum aux_return_code_type *operation_result)
10914 {
10915 struct amdgpu_device *adev = ctx->driver_context;
10916 struct dmub_notification *p_notify = adev->dm.dmub_notify;
10917 int ret = -1;
10918
10919 mutex_lock(&adev->dm.dpia_aux_lock);
10920 if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) {
10921 *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
10922 goto out;
10923 }
10924
10925 if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10926 DRM_ERROR("wait_for_completion_timeout timeout!");
10927 *operation_result = AUX_RET_ERROR_TIMEOUT;
10928 goto out;
10929 }
10930
10931 if (p_notify->result != AUX_RET_SUCCESS) {
10932 /*
10933 * Transient states before tunneling is enabled could
10934 * lead to this error. We can ignore this for now.
10935 */
10936 if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) {
10937 DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n",
10938 payload->address, payload->length,
10939 p_notify->result);
10940 }
10941 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10942 goto out;
10943 }
10944
10945
10946 payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
10947 if (!payload->write && p_notify->aux_reply.length &&
10948 (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) {
10949
10950 if (payload->length != p_notify->aux_reply.length) {
10951 DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n",
10952 p_notify->aux_reply.length,
10953 payload->address, payload->length);
10954 *operation_result = AUX_RET_ERROR_INVALID_REPLY;
10955 goto out;
10956 }
10957
10958 memcpy(payload->data, p_notify->aux_reply.data,
10959 p_notify->aux_reply.length);
10960 }
10961
10962 /* success */
10963 ret = p_notify->aux_reply.length;
10964 *operation_result = p_notify->result;
10965 out:
10966 reinit_completion(&adev->dm.dmub_aux_transfer_done);
10967 mutex_unlock(&adev->dm.dpia_aux_lock);
10968 return ret;
10969 }
10970
amdgpu_dm_process_dmub_set_config_sync(struct dc_context * ctx,unsigned int link_index,struct set_config_cmd_payload * payload,enum set_config_status * operation_result)10971 int amdgpu_dm_process_dmub_set_config_sync(
10972 struct dc_context *ctx,
10973 unsigned int link_index,
10974 struct set_config_cmd_payload *payload,
10975 enum set_config_status *operation_result)
10976 {
10977 struct amdgpu_device *adev = ctx->driver_context;
10978 bool is_cmd_complete;
10979 int ret;
10980
10981 mutex_lock(&adev->dm.dpia_aux_lock);
10982 is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc,
10983 link_index, payload, adev->dm.dmub_notify);
10984
10985 if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) {
10986 ret = 0;
10987 *operation_result = adev->dm.dmub_notify->sc_status;
10988 } else {
10989 DRM_ERROR("wait_for_completion_timeout timeout!");
10990 ret = -1;
10991 *operation_result = SET_CONFIG_UNKNOWN_ERROR;
10992 }
10993
10994 if (!is_cmd_complete)
10995 reinit_completion(&adev->dm.dmub_aux_transfer_done);
10996 mutex_unlock(&adev->dm.dpia_aux_lock);
10997 return ret;
10998 }
10999
11000 /*
11001 * Check whether seamless boot is supported.
11002 *
11003 * So far we only support seamless boot on CHIP_VANGOGH.
11004 * If everything goes well, we may consider expanding
11005 * seamless boot to other ASICs.
11006 */
check_seamless_boot_capability(struct amdgpu_device * adev)11007 bool check_seamless_boot_capability(struct amdgpu_device *adev)
11008 {
11009 switch (adev->ip_versions[DCE_HWIP][0]) {
11010 case IP_VERSION(3, 0, 1):
11011 if (!adev->mman.keep_stolen_vga_memory)
11012 return true;
11013 break;
11014 default:
11015 break;
11016 }
11017
11018 return false;
11019 }
11020
dm_execute_dmub_cmd(const struct dc_context * ctx,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)11021 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
11022 {
11023 return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);
11024 }
11025
dm_execute_dmub_cmd_list(const struct dc_context * ctx,unsigned int count,union dmub_rb_cmd * cmd,enum dm_dmub_wait_type wait_type)11026 bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
11027 {
11028 return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type);
11029 }
11030