1 /*
2  * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #ifndef __AMDGPU_DM_H__
27 #define __AMDGPU_DM_H__
28 
29 #include <drm/display/drm_dp_mst_helper.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_connector.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_plane.h>
34 
35 /*
36  * This file contains the definition for amdgpu_display_manager
37  * and its API for amdgpu driver's use.
38  * This component provides all the display related functionality
39  * and this is the only component that calls DAL API.
40  * The API contained here intended for amdgpu driver use.
41  * The API that is called directly from KMS framework is located
42  * in amdgpu_dm_kms.h file
43  */
44 
45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
46 
47 #define AMDGPU_DM_MAX_CRTC 6
48 
49 #define AMDGPU_DM_MAX_NUM_EDP 2
50 
51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
52 
53 /*
54  * DMUB Async to Sync Mechanism Status
55  */
56 #define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
57 #define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
58 #define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
59 /*
60 #include "include/amdgpu_dal_power_if.h"
61 #include "amdgpu_dm_irq.h"
62 */
63 
64 #include "irq_types.h"
65 #include "signal_types.h"
66 #include "amdgpu_dm_crc.h"
67 struct aux_payload;
68 enum aux_return_code_type;
69 
70 /* Forward declarations */
71 struct amdgpu_device;
72 struct amdgpu_crtc;
73 struct drm_device;
74 struct dc;
75 struct amdgpu_bo;
76 struct dmub_srv;
77 struct dc_plane_state;
78 struct dmub_notification;
79 
80 struct common_irq_params {
81 	struct amdgpu_device *adev;
82 	enum dc_irq_source irq_src;
83 	atomic64_t previous_timestamp;
84 };
85 
86 /**
87  * struct dm_compressor_info - Buffer info used by frame buffer compression
88  * @cpu_addr: MMIO cpu addr
89  * @bo_ptr: Pointer to the buffer object
90  * @gpu_addr: MMIO gpu addr
91  */
92 struct dm_compressor_info {
93 	void *cpu_addr;
94 	struct amdgpu_bo *bo_ptr;
95 	uint64_t gpu_addr;
96 };
97 
98 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
99 
100 /**
101  * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
102  *
103  * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
104  * @dmub_notify:  notification for callback function
105  * @adev: amdgpu_device pointer
106  */
107 struct dmub_hpd_work {
108 	struct work_struct handle_hpd_work;
109 	struct dmub_notification *dmub_notify;
110 	struct amdgpu_device *adev;
111 };
112 
113 /**
114  * struct vblank_control_work - Work data for vblank control
115  * @work: Kernel work data for the work event
116  * @dm: amdgpu display manager device
117  * @acrtc: amdgpu CRTC instance for which the event has occurred
118  * @stream: DC stream for which the event has occurred
119  * @enable: true if enabling vblank
120  */
121 struct vblank_control_work {
122 	struct work_struct work;
123 	struct amdgpu_display_manager *dm;
124 	struct amdgpu_crtc *acrtc;
125 	struct dc_stream_state *stream;
126 	bool enable;
127 };
128 
129 /**
130  * struct amdgpu_dm_backlight_caps - Information about backlight
131  *
132  * Describe the backlight support for ACPI or eDP AUX.
133  */
134 struct amdgpu_dm_backlight_caps {
135 	/**
136 	 * @ext_caps: Keep the data struct with all the information about the
137 	 * display support for HDR.
138 	 */
139 	union dpcd_sink_ext_caps *ext_caps;
140 	/**
141 	 * @aux_min_input_signal: Min brightness value supported by the display
142 	 */
143 	u32 aux_min_input_signal;
144 	/**
145 	 * @aux_max_input_signal: Max brightness value supported by the display
146 	 * in nits.
147 	 */
148 	u32 aux_max_input_signal;
149 	/**
150 	 * @min_input_signal: minimum possible input in range 0-255.
151 	 */
152 	int min_input_signal;
153 	/**
154 	 * @max_input_signal: maximum possible input in range 0-255.
155 	 */
156 	int max_input_signal;
157 	/**
158 	 * @caps_valid: true if these values are from the ACPI interface.
159 	 */
160 	bool caps_valid;
161 	/**
162 	 * @aux_support: Describes if the display supports AUX backlight.
163 	 */
164 	bool aux_support;
165 };
166 
167 /**
168  * struct dal_allocation - Tracks mapped FB memory for SMU communication
169  * @list: list of dal allocations
170  * @bo: GPU buffer object
171  * @cpu_ptr: CPU virtual address of the GPU buffer object
172  * @gpu_addr: GPU virtual address of the GPU buffer object
173  */
174 struct dal_allocation {
175 	struct list_head list;
176 	struct amdgpu_bo *bo;
177 	void *cpu_ptr;
178 	u64 gpu_addr;
179 };
180 
181 /**
182  * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
183  * offload work
184  */
185 struct hpd_rx_irq_offload_work_queue {
186 	/**
187 	 * @wq: workqueue structure to queue offload work.
188 	 */
189 	struct workqueue_struct *wq;
190 	/**
191 	 * @offload_lock: To protect fields of offload work queue.
192 	 */
193 	spinlock_t offload_lock;
194 	/**
195 	 * @is_handling_link_loss: Used to prevent inserting link loss event when
196 	 * we're handling link loss
197 	 */
198 	bool is_handling_link_loss;
199 	/**
200 	 * @aconnector: The aconnector that this work queue is attached to
201 	 */
202 	struct amdgpu_dm_connector *aconnector;
203 };
204 
205 /**
206  * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
207  */
208 struct hpd_rx_irq_offload_work {
209 	/**
210 	 * @work: offload work
211 	 */
212 	struct work_struct work;
213 	/**
214 	 * @data: reference irq data which is used while handling offload work
215 	 */
216 	union hpd_irq_data data;
217 	/**
218 	 * @offload_wq: offload work queue that this work is queued to
219 	 */
220 	struct hpd_rx_irq_offload_work_queue *offload_wq;
221 };
222 
223 /**
224  * struct amdgpu_display_manager - Central amdgpu display manager device
225  *
226  * @dc: Display Core control structure
227  * @adev: AMDGPU base driver structure
228  * @ddev: DRM base driver structure
229  * @display_indexes_num: Max number of display streams supported
230  * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
231  * @backlight_dev: Backlight control device
232  * @backlight_link: Link on which to control backlight
233  * @backlight_caps: Capabilities of the backlight device
234  * @freesync_module: Module handling freesync calculations
235  * @hdcp_workqueue: AMDGPU content protection queue
236  * @fw_dmcu: Reference to DMCU firmware
237  * @dmcu_fw_version: Version of the DMCU firmware
238  * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
239  * @cached_state: Caches device atomic state for suspend/resume
240  * @cached_dc_state: Cached state of content streams
241  * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
242  * @force_timing_sync: set via debugfs. When set, indicates that all connected
243  *		       displays will be forced to synchronize.
244  * @dmcub_trace_event_en: enable dmcub trace events
245  */
246 struct amdgpu_display_manager {
247 
248 	struct dc *dc;
249 
250 	/**
251 	 * @dmub_srv:
252 	 *
253 	 * DMUB service, used for controlling the DMUB on hardware
254 	 * that supports it. The pointer to the dmub_srv will be
255 	 * NULL on hardware that does not support it.
256 	 */
257 	struct dmub_srv *dmub_srv;
258 
259 	/**
260 	 * @dmub_notify:
261 	 *
262 	 * Notification from DMUB.
263 	 */
264 
265 	struct dmub_notification *dmub_notify;
266 
267 	/**
268 	 * @dmub_callback:
269 	 *
270 	 * Callback functions to handle notification from DMUB.
271 	 */
272 
273 	dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
274 
275 	/**
276 	 * @dmub_thread_offload:
277 	 *
278 	 * Flag to indicate if callback is offload.
279 	 */
280 
281 	bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
282 
283 	/**
284 	 * @dmub_fb_info:
285 	 *
286 	 * Framebuffer regions for the DMUB.
287 	 */
288 	struct dmub_srv_fb_info *dmub_fb_info;
289 
290 	/**
291 	 * @dmub_fw:
292 	 *
293 	 * DMUB firmware, required on hardware that has DMUB support.
294 	 */
295 	const struct firmware *dmub_fw;
296 
297 	/**
298 	 * @dmub_bo:
299 	 *
300 	 * Buffer object for the DMUB.
301 	 */
302 	struct amdgpu_bo *dmub_bo;
303 
304 	/**
305 	 * @dmub_bo_gpu_addr:
306 	 *
307 	 * GPU virtual address for the DMUB buffer object.
308 	 */
309 	u64 dmub_bo_gpu_addr;
310 
311 	/**
312 	 * @dmub_bo_cpu_addr:
313 	 *
314 	 * CPU address for the DMUB buffer object.
315 	 */
316 	void *dmub_bo_cpu_addr;
317 
318 	/**
319 	 * @dmcub_fw_version:
320 	 *
321 	 * DMCUB firmware version.
322 	 */
323 	uint32_t dmcub_fw_version;
324 
325 	/**
326 	 * @cgs_device:
327 	 *
328 	 * The Common Graphics Services device. It provides an interface for
329 	 * accessing registers.
330 	 */
331 	struct cgs_device *cgs_device;
332 
333 	struct amdgpu_device *adev;
334 	struct drm_device *ddev;
335 	u16 display_indexes_num;
336 
337 	/**
338 	 * @atomic_obj:
339 	 *
340 	 * In combination with &dm_atomic_state it helps manage
341 	 * global atomic state that doesn't map cleanly into existing
342 	 * drm resources, like &dc_context.
343 	 */
344 	struct drm_private_obj atomic_obj;
345 
346 	/**
347 	 * @dc_lock:
348 	 *
349 	 * Guards access to DC functions that can issue register write
350 	 * sequences.
351 	 */
352 	struct mutex dc_lock;
353 
354 	/**
355 	 * @audio_lock:
356 	 *
357 	 * Guards access to audio instance changes.
358 	 */
359 	struct mutex audio_lock;
360 
361 	/**
362 	 * @vblank_lock:
363 	 *
364 	 * Guards access to deferred vblank work state.
365 	 */
366 	spinlock_t vblank_lock;
367 
368 	/**
369 	 * @audio_component:
370 	 *
371 	 * Used to notify ELD changes to sound driver.
372 	 */
373 	struct drm_audio_component *audio_component;
374 
375 	/**
376 	 * @audio_registered:
377 	 *
378 	 * True if the audio component has been registered
379 	 * successfully, false otherwise.
380 	 */
381 	bool audio_registered;
382 
383 	/**
384 	 * @irq_handler_list_low_tab:
385 	 *
386 	 * Low priority IRQ handler table.
387 	 *
388 	 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
389 	 * source. Low priority IRQ handlers are deferred to a workqueue to be
390 	 * processed. Hence, they can sleep.
391 	 *
392 	 * Note that handlers are called in the same order as they were
393 	 * registered (FIFO).
394 	 */
395 	struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
396 
397 	/**
398 	 * @irq_handler_list_high_tab:
399 	 *
400 	 * High priority IRQ handler table.
401 	 *
402 	 * It is a n*m table, same as &irq_handler_list_low_tab. However,
403 	 * handlers in this table are not deferred and are called immediately.
404 	 */
405 	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
406 
407 	/**
408 	 * @pflip_params:
409 	 *
410 	 * Page flip IRQ parameters, passed to registered handlers when
411 	 * triggered.
412 	 */
413 	struct common_irq_params
414 	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
415 
416 	/**
417 	 * @vblank_params:
418 	 *
419 	 * Vertical blanking IRQ parameters, passed to registered handlers when
420 	 * triggered.
421 	 */
422 	struct common_irq_params
423 	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
424 
425 	/**
426 	 * @vline0_params:
427 	 *
428 	 * OTG vertical interrupt0 IRQ parameters, passed to registered
429 	 * handlers when triggered.
430 	 */
431 	struct common_irq_params
432 	vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
433 
434 	/**
435 	 * @vupdate_params:
436 	 *
437 	 * Vertical update IRQ parameters, passed to registered handlers when
438 	 * triggered.
439 	 */
440 	struct common_irq_params
441 	vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
442 
443 	/**
444 	 * @dmub_trace_params:
445 	 *
446 	 * DMUB trace event IRQ parameters, passed to registered handlers when
447 	 * triggered.
448 	 */
449 	struct common_irq_params
450 	dmub_trace_params[1];
451 
452 	struct common_irq_params
453 	dmub_outbox_params[1];
454 
455 	spinlock_t irq_handler_list_table_lock;
456 
457 	struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
458 
459 	const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
460 
461 	uint8_t num_of_edps;
462 
463 	struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
464 
465 	struct mod_freesync *freesync_module;
466 #ifdef CONFIG_DRM_AMD_DC_HDCP
467 	struct hdcp_workqueue *hdcp_workqueue;
468 #endif
469 
470 	/**
471 	 * @vblank_control_workqueue:
472 	 *
473 	 * Deferred work for vblank control events.
474 	 */
475 	struct workqueue_struct *vblank_control_workqueue;
476 
477 	struct drm_atomic_state *cached_state;
478 	struct dc_state *cached_dc_state;
479 
480 	struct dm_compressor_info compressor;
481 
482 	const struct firmware *fw_dmcu;
483 	uint32_t dmcu_fw_version;
484 	/**
485 	 * @soc_bounding_box:
486 	 *
487 	 * gpu_info FW provided soc bounding box struct or 0 if not
488 	 * available in FW
489 	 */
490 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
491 
492 	/**
493 	 * @active_vblank_irq_count:
494 	 *
495 	 * number of currently active vblank irqs
496 	 */
497 	uint32_t active_vblank_irq_count;
498 
499 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
500 	/**
501 	 * @crc_rd_wrk:
502 	 *
503 	 * Work to be executed in a separate thread to communicate with PSP.
504 	 */
505 	struct crc_rd_work *crc_rd_wrk;
506 #endif
507 	/**
508 	 * @hpd_rx_offload_wq:
509 	 *
510 	 * Work queue to offload works of hpd_rx_irq
511 	 */
512 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
513 	/**
514 	 * @mst_encoders:
515 	 *
516 	 * fake encoders used for DP MST.
517 	 */
518 	struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
519 	bool force_timing_sync;
520 	bool disable_hpd_irq;
521 	bool dmcub_trace_event_en;
522 	/**
523 	 * @da_list:
524 	 *
525 	 * DAL fb memory allocation list, for communication with SMU.
526 	 */
527 	struct list_head da_list;
528 	struct completion dmub_aux_transfer_done;
529 	struct workqueue_struct *delayed_hpd_wq;
530 
531 	/**
532 	 * @brightness:
533 	 *
534 	 * cached backlight values.
535 	 */
536 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
537 	/**
538 	 * @actual_brightness:
539 	 *
540 	 * last successfully applied backlight values.
541 	 */
542 	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
543 
544 	/**
545 	 * @aux_hpd_discon_quirk:
546 	 *
547 	 * quirk for hpd discon while aux is on-going.
548 	 * occurred on certain intel platform
549 	 */
550 	bool aux_hpd_discon_quirk;
551 };
552 
553 enum dsc_clock_force_state {
554 	DSC_CLK_FORCE_DEFAULT = 0,
555 	DSC_CLK_FORCE_ENABLE,
556 	DSC_CLK_FORCE_DISABLE,
557 };
558 
559 struct dsc_preferred_settings {
560 	enum dsc_clock_force_state dsc_force_enable;
561 	uint32_t dsc_num_slices_v;
562 	uint32_t dsc_num_slices_h;
563 	uint32_t dsc_bits_per_pixel;
564 	bool dsc_force_disable_passthrough;
565 };
566 
567 struct amdgpu_dm_connector {
568 
569 	struct drm_connector base;
570 	uint32_t connector_id;
571 
572 	/* we need to mind the EDID between detect
573 	   and get modes due to analog/digital/tvencoder */
574 	struct edid *edid;
575 
576 	/* shared with amdgpu */
577 	struct amdgpu_hpd hpd;
578 
579 	/* number of modes generated from EDID at 'dc_sink' */
580 	int num_modes;
581 
582 	/* The 'old' sink - before an HPD.
583 	 * The 'current' sink is in dc_link->sink. */
584 	struct dc_sink *dc_sink;
585 	struct dc_link *dc_link;
586 	struct dc_sink *dc_em_sink;
587 
588 	/* DM only */
589 	struct drm_dp_mst_topology_mgr mst_mgr;
590 	struct amdgpu_dm_dp_aux dm_dp_aux;
591 	struct drm_dp_mst_port *port;
592 	struct amdgpu_dm_connector *mst_port;
593 	struct drm_dp_aux *dsc_aux;
594 
595 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
596 	struct amdgpu_i2c_adapter *i2c;
597 
598 	/* Monitor range limits */
599 	int min_vfreq ;
600 	int max_vfreq ;
601 	int pixel_clock_mhz;
602 
603 	/* Audio instance - protected by audio_lock. */
604 	int audio_inst;
605 
606 	struct mutex hpd_lock;
607 
608 	bool fake_enable;
609 #ifdef CONFIG_DEBUG_FS
610 	uint32_t debugfs_dpcd_address;
611 	uint32_t debugfs_dpcd_size;
612 #endif
613 	bool force_yuv420_output;
614 	struct dsc_preferred_settings dsc_settings;
615 	union dp_downstream_port_present mst_downstream_port_present;
616 	/* Cached display modes */
617 	struct drm_display_mode freesync_vid_base;
618 
619 	int psr_skip_count;
620 };
621 
622 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
623 
624 extern const struct amdgpu_ip_block_version dm_ip_block;
625 
626 struct dm_plane_state {
627 	struct drm_plane_state base;
628 	struct dc_plane_state *dc_state;
629 };
630 
631 struct dm_crtc_state {
632 	struct drm_crtc_state base;
633 	struct dc_stream_state *stream;
634 
635 	bool cm_has_degamma;
636 	bool cm_is_degamma_srgb;
637 
638 	bool mpo_requested;
639 
640 	int update_type;
641 	int active_planes;
642 
643 	int crc_skip_count;
644 
645 	bool freesync_timing_changed;
646 	bool freesync_vrr_info_changed;
647 
648 	bool dsc_force_changed;
649 	bool vrr_supported;
650 
651 	bool force_dpms_off;
652 	struct mod_freesync_config freesync_config;
653 	struct dc_info_packet vrr_infopacket;
654 
655 	int abm_level;
656 };
657 
658 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
659 
660 struct dm_atomic_state {
661 	struct drm_private_state base;
662 
663 	struct dc_state *context;
664 };
665 
666 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
667 
668 struct dm_connector_state {
669 	struct drm_connector_state base;
670 
671 	enum amdgpu_rmx_type scaling;
672 	uint8_t underscan_vborder;
673 	uint8_t underscan_hborder;
674 	bool underscan_enable;
675 	bool freesync_capable;
676 #ifdef CONFIG_DRM_AMD_DC_HDCP
677 	bool update_hdcp;
678 #endif
679 	uint8_t abm_level;
680 	int vcpi_slots;
681 	uint64_t pbn;
682 };
683 
684 struct amdgpu_hdmi_vsdb_info {
685 	unsigned int amd_vsdb_version;		/* VSDB version, should be used to determine which VSIF to send */
686 	bool freesync_supported;		/* FreeSync Supported */
687 	unsigned int min_refresh_rate_hz;	/* FreeSync Minimum Refresh Rate in Hz */
688 	unsigned int max_refresh_rate_hz;	/* FreeSync Maximum Refresh Rate in Hz */
689 };
690 
691 
692 #define to_dm_connector_state(x)\
693 	container_of((x), struct dm_connector_state, base)
694 
695 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
696 struct drm_connector_state *
697 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
698 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
699 					    struct drm_connector_state *state,
700 					    struct drm_property *property,
701 					    uint64_t val);
702 
703 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
704 					    const struct drm_connector_state *state,
705 					    struct drm_property *property,
706 					    uint64_t *val);
707 
708 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
709 
710 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
711 				     struct amdgpu_dm_connector *aconnector,
712 				     int connector_type,
713 				     struct dc_link *link,
714 				     int link_index);
715 
716 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
717 				   struct drm_display_mode *mode);
718 
719 void dm_restore_drm_connector_state(struct drm_device *dev,
720 				    struct drm_connector *connector);
721 
722 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
723 					struct edid *edid);
724 
725 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
726 
727 #define MAX_COLOR_LUT_ENTRIES 4096
728 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */
729 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
730 
731 void amdgpu_dm_init_color_mod(void);
732 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
733 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
734 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
735 				      struct dc_plane_state *dc_plane_state);
736 
737 void amdgpu_dm_update_connector_after_detect(
738 		struct amdgpu_dm_connector *aconnector);
739 
740 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
741 
742 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
743 					struct dc_context *ctx, unsigned int link_index,
744 					void *payload, void *operation_result);
745 
746 bool check_seamless_boot_capability(struct amdgpu_device *adev);
747 
748 struct dc_stream_state *
749 	create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
750 					const struct drm_display_mode *drm_mode,
751 					const struct dm_connector_state *dm_state,
752 					const struct dc_stream_state *old_stream);
753 
754 int dm_atomic_get_state(struct drm_atomic_state *state,
755 			struct dm_atomic_state **dm_state);
756 
757 struct amdgpu_dm_connector *
758 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
759 					     struct drm_crtc *crtc);
760 #endif /* __AMDGPU_DM_H__ */
761