1 /*
2  * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 #ifndef __AMDGPU_DM_H__
27 #define __AMDGPU_DM_H__
28 
29 #include <drm/display/drm_dp_mst_helper.h>
30 #include <drm/drm_atomic.h>
31 #include <drm/drm_connector.h>
32 #include <drm/drm_crtc.h>
33 #include <drm/drm_plane.h>
34 
35 /*
36  * This file contains the definition for amdgpu_display_manager
37  * and its API for amdgpu driver's use.
38  * This component provides all the display related functionality
39  * and this is the only component that calls DAL API.
40  * The API contained here intended for amdgpu driver use.
41  * The API that is called directly from KMS framework is located
42  * in amdgpu_dm_kms.h file
43  */
44 
45 #define AMDGPU_DM_MAX_DISPLAY_INDEX 31
46 
47 #define AMDGPU_DM_MAX_CRTC 6
48 
49 #define AMDGPU_DM_MAX_NUM_EDP 2
50 
51 #define AMDGPU_DMUB_NOTIFICATION_MAX 5
52 
53 /*
54 #include "include/amdgpu_dal_power_if.h"
55 #include "amdgpu_dm_irq.h"
56 */
57 
58 #include "irq_types.h"
59 #include "signal_types.h"
60 #include "amdgpu_dm_crc.h"
61 struct aux_payload;
62 enum aux_return_code_type;
63 
64 /* Forward declarations */
65 struct amdgpu_device;
66 struct amdgpu_crtc;
67 struct drm_device;
68 struct dc;
69 struct amdgpu_bo;
70 struct dmub_srv;
71 struct dc_plane_state;
72 struct dmub_notification;
73 
74 struct common_irq_params {
75 	struct amdgpu_device *adev;
76 	enum dc_irq_source irq_src;
77 	atomic64_t previous_timestamp;
78 };
79 
80 /**
81  * struct dm_compressor_info - Buffer info used by frame buffer compression
82  * @cpu_addr: MMIO cpu addr
83  * @bo_ptr: Pointer to the buffer object
84  * @gpu_addr: MMIO gpu addr
85  */
86 struct dm_compressor_info {
87 	void *cpu_addr;
88 	struct amdgpu_bo *bo_ptr;
89 	uint64_t gpu_addr;
90 };
91 
92 typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
93 
94 /**
95  * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
96  *
97  * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
98  * @dmub_notify:  notification for callback function
99  * @adev: amdgpu_device pointer
100  */
101 struct dmub_hpd_work {
102 	struct work_struct handle_hpd_work;
103 	struct dmub_notification *dmub_notify;
104 	struct amdgpu_device *adev;
105 };
106 
107 /**
108  * struct vblank_control_work - Work data for vblank control
109  * @work: Kernel work data for the work event
110  * @dm: amdgpu display manager device
111  * @acrtc: amdgpu CRTC instance for which the event has occurred
112  * @stream: DC stream for which the event has occurred
113  * @enable: true if enabling vblank
114  */
115 struct vblank_control_work {
116 	struct work_struct work;
117 	struct amdgpu_display_manager *dm;
118 	struct amdgpu_crtc *acrtc;
119 	struct dc_stream_state *stream;
120 	bool enable;
121 };
122 
123 /**
124  * struct amdgpu_dm_backlight_caps - Information about backlight
125  *
126  * Describe the backlight support for ACPI or eDP AUX.
127  */
128 struct amdgpu_dm_backlight_caps {
129 	/**
130 	 * @ext_caps: Keep the data struct with all the information about the
131 	 * display support for HDR.
132 	 */
133 	union dpcd_sink_ext_caps *ext_caps;
134 	/**
135 	 * @aux_min_input_signal: Min brightness value supported by the display
136 	 */
137 	u32 aux_min_input_signal;
138 	/**
139 	 * @aux_max_input_signal: Max brightness value supported by the display
140 	 * in nits.
141 	 */
142 	u32 aux_max_input_signal;
143 	/**
144 	 * @min_input_signal: minimum possible input in range 0-255.
145 	 */
146 	int min_input_signal;
147 	/**
148 	 * @max_input_signal: maximum possible input in range 0-255.
149 	 */
150 	int max_input_signal;
151 	/**
152 	 * @caps_valid: true if these values are from the ACPI interface.
153 	 */
154 	bool caps_valid;
155 	/**
156 	 * @aux_support: Describes if the display supports AUX backlight.
157 	 */
158 	bool aux_support;
159 };
160 
161 /**
162  * struct dal_allocation - Tracks mapped FB memory for SMU communication
163  * @list: list of dal allocations
164  * @bo: GPU buffer object
165  * @cpu_ptr: CPU virtual address of the GPU buffer object
166  * @gpu_addr: GPU virtual address of the GPU buffer object
167  */
168 struct dal_allocation {
169 	struct list_head list;
170 	struct amdgpu_bo *bo;
171 	void *cpu_ptr;
172 	u64 gpu_addr;
173 };
174 
175 /**
176  * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
177  * offload work
178  */
179 struct hpd_rx_irq_offload_work_queue {
180 	/**
181 	 * @wq: workqueue structure to queue offload work.
182 	 */
183 	struct workqueue_struct *wq;
184 	/**
185 	 * @offload_lock: To protect fields of offload work queue.
186 	 */
187 	spinlock_t offload_lock;
188 	/**
189 	 * @is_handling_link_loss: Used to prevent inserting link loss event when
190 	 * we're handling link loss
191 	 */
192 	bool is_handling_link_loss;
193 	/**
194 	 * @aconnector: The aconnector that this work queue is attached to
195 	 */
196 	struct amdgpu_dm_connector *aconnector;
197 };
198 
199 /**
200  * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
201  */
202 struct hpd_rx_irq_offload_work {
203 	/**
204 	 * @work: offload work
205 	 */
206 	struct work_struct work;
207 	/**
208 	 * @data: reference irq data which is used while handling offload work
209 	 */
210 	union hpd_irq_data data;
211 	/**
212 	 * @offload_wq: offload work queue that this work is queued to
213 	 */
214 	struct hpd_rx_irq_offload_work_queue *offload_wq;
215 };
216 
217 /**
218  * struct amdgpu_display_manager - Central amdgpu display manager device
219  *
220  * @dc: Display Core control structure
221  * @adev: AMDGPU base driver structure
222  * @ddev: DRM base driver structure
223  * @display_indexes_num: Max number of display streams supported
224  * @irq_handler_list_table_lock: Synchronizes access to IRQ tables
225  * @backlight_dev: Backlight control device
226  * @backlight_link: Link on which to control backlight
227  * @backlight_caps: Capabilities of the backlight device
228  * @freesync_module: Module handling freesync calculations
229  * @hdcp_workqueue: AMDGPU content protection queue
230  * @fw_dmcu: Reference to DMCU firmware
231  * @dmcu_fw_version: Version of the DMCU firmware
232  * @soc_bounding_box: SOC bounding box values provided by gpu_info FW
233  * @cached_state: Caches device atomic state for suspend/resume
234  * @cached_dc_state: Cached state of content streams
235  * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info
236  * @force_timing_sync: set via debugfs. When set, indicates that all connected
237  *		       displays will be forced to synchronize.
238  * @dmcub_trace_event_en: enable dmcub trace events
239  * @dmub_outbox_params: DMUB Outbox parameters
240  * @num_of_edps: number of backlight eDPs
241  * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the
242  *		     driver when true
243  * @dmub_aux_transfer_done: struct completion used to indicate when DMUB
244  * 			    transfers are done
245  * @delayed_hpd_wq: work queue used to delay DMUB HPD work
246  */
247 struct amdgpu_display_manager {
248 
249 	struct dc *dc;
250 
251 	/**
252 	 * @dmub_srv:
253 	 *
254 	 * DMUB service, used for controlling the DMUB on hardware
255 	 * that supports it. The pointer to the dmub_srv will be
256 	 * NULL on hardware that does not support it.
257 	 */
258 	struct dmub_srv *dmub_srv;
259 
260 	/**
261 	 * @dmub_notify:
262 	 *
263 	 * Notification from DMUB.
264 	 */
265 
266 	struct dmub_notification *dmub_notify;
267 
268 	/**
269 	 * @dmub_callback:
270 	 *
271 	 * Callback functions to handle notification from DMUB.
272 	 */
273 
274 	dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
275 
276 	/**
277 	 * @dmub_thread_offload:
278 	 *
279 	 * Flag to indicate if callback is offload.
280 	 */
281 
282 	bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
283 
284 	/**
285 	 * @dmub_fb_info:
286 	 *
287 	 * Framebuffer regions for the DMUB.
288 	 */
289 	struct dmub_srv_fb_info *dmub_fb_info;
290 
291 	/**
292 	 * @dmub_fw:
293 	 *
294 	 * DMUB firmware, required on hardware that has DMUB support.
295 	 */
296 	const struct firmware *dmub_fw;
297 
298 	/**
299 	 * @dmub_bo:
300 	 *
301 	 * Buffer object for the DMUB.
302 	 */
303 	struct amdgpu_bo *dmub_bo;
304 
305 	/**
306 	 * @dmub_bo_gpu_addr:
307 	 *
308 	 * GPU virtual address for the DMUB buffer object.
309 	 */
310 	u64 dmub_bo_gpu_addr;
311 
312 	/**
313 	 * @dmub_bo_cpu_addr:
314 	 *
315 	 * CPU address for the DMUB buffer object.
316 	 */
317 	void *dmub_bo_cpu_addr;
318 
319 	/**
320 	 * @dmcub_fw_version:
321 	 *
322 	 * DMCUB firmware version.
323 	 */
324 	uint32_t dmcub_fw_version;
325 
326 	/**
327 	 * @cgs_device:
328 	 *
329 	 * The Common Graphics Services device. It provides an interface for
330 	 * accessing registers.
331 	 */
332 	struct cgs_device *cgs_device;
333 
334 	struct amdgpu_device *adev;
335 	struct drm_device *ddev;
336 	u16 display_indexes_num;
337 
338 	/**
339 	 * @atomic_obj:
340 	 *
341 	 * In combination with &dm_atomic_state it helps manage
342 	 * global atomic state that doesn't map cleanly into existing
343 	 * drm resources, like &dc_context.
344 	 */
345 	struct drm_private_obj atomic_obj;
346 
347 	/**
348 	 * @dc_lock:
349 	 *
350 	 * Guards access to DC functions that can issue register write
351 	 * sequences.
352 	 */
353 	struct mutex dc_lock;
354 
355 	/**
356 	 * @audio_lock:
357 	 *
358 	 * Guards access to audio instance changes.
359 	 */
360 	struct mutex audio_lock;
361 
362 	/**
363 	 * @vblank_lock:
364 	 *
365 	 * Guards access to deferred vblank work state.
366 	 */
367 	spinlock_t vblank_lock;
368 
369 	/**
370 	 * @audio_component:
371 	 *
372 	 * Used to notify ELD changes to sound driver.
373 	 */
374 	struct drm_audio_component *audio_component;
375 
376 	/**
377 	 * @audio_registered:
378 	 *
379 	 * True if the audio component has been registered
380 	 * successfully, false otherwise.
381 	 */
382 	bool audio_registered;
383 
384 	/**
385 	 * @irq_handler_list_low_tab:
386 	 *
387 	 * Low priority IRQ handler table.
388 	 *
389 	 * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
390 	 * source. Low priority IRQ handlers are deferred to a workqueue to be
391 	 * processed. Hence, they can sleep.
392 	 *
393 	 * Note that handlers are called in the same order as they were
394 	 * registered (FIFO).
395 	 */
396 	struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
397 
398 	/**
399 	 * @irq_handler_list_high_tab:
400 	 *
401 	 * High priority IRQ handler table.
402 	 *
403 	 * It is a n*m table, same as &irq_handler_list_low_tab. However,
404 	 * handlers in this table are not deferred and are called immediately.
405 	 */
406 	struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
407 
408 	/**
409 	 * @pflip_params:
410 	 *
411 	 * Page flip IRQ parameters, passed to registered handlers when
412 	 * triggered.
413 	 */
414 	struct common_irq_params
415 	pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
416 
417 	/**
418 	 * @vblank_params:
419 	 *
420 	 * Vertical blanking IRQ parameters, passed to registered handlers when
421 	 * triggered.
422 	 */
423 	struct common_irq_params
424 	vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
425 
426 	/**
427 	 * @vline0_params:
428 	 *
429 	 * OTG vertical interrupt0 IRQ parameters, passed to registered
430 	 * handlers when triggered.
431 	 */
432 	struct common_irq_params
433 	vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1];
434 
435 	/**
436 	 * @vupdate_params:
437 	 *
438 	 * Vertical update IRQ parameters, passed to registered handlers when
439 	 * triggered.
440 	 */
441 	struct common_irq_params
442 	vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
443 
444 	/**
445 	 * @dmub_trace_params:
446 	 *
447 	 * DMUB trace event IRQ parameters, passed to registered handlers when
448 	 * triggered.
449 	 */
450 	struct common_irq_params
451 	dmub_trace_params[1];
452 
453 	struct common_irq_params
454 	dmub_outbox_params[1];
455 
456 	spinlock_t irq_handler_list_table_lock;
457 
458 	struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP];
459 
460 	const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP];
461 
462 	uint8_t num_of_edps;
463 
464 	struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP];
465 
466 	struct mod_freesync *freesync_module;
467 #ifdef CONFIG_DRM_AMD_DC_HDCP
468 	struct hdcp_workqueue *hdcp_workqueue;
469 #endif
470 
471 	/**
472 	 * @vblank_control_workqueue:
473 	 *
474 	 * Deferred work for vblank control events.
475 	 */
476 	struct workqueue_struct *vblank_control_workqueue;
477 
478 	struct drm_atomic_state *cached_state;
479 	struct dc_state *cached_dc_state;
480 
481 	struct dm_compressor_info compressor;
482 
483 	const struct firmware *fw_dmcu;
484 	uint32_t dmcu_fw_version;
485 	/**
486 	 * @soc_bounding_box:
487 	 *
488 	 * gpu_info FW provided soc bounding box struct or 0 if not
489 	 * available in FW
490 	 */
491 	const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
492 
493 	/**
494 	 * @active_vblank_irq_count:
495 	 *
496 	 * number of currently active vblank irqs
497 	 */
498 	uint32_t active_vblank_irq_count;
499 
500 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
501 	/**
502 	 * @crc_rd_wrk:
503 	 *
504 	 * Work to be executed in a separate thread to communicate with PSP.
505 	 */
506 	struct crc_rd_work *crc_rd_wrk;
507 #endif
508 	/**
509 	 * @hpd_rx_offload_wq:
510 	 *
511 	 * Work queue to offload works of hpd_rx_irq
512 	 */
513 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
514 	/**
515 	 * @mst_encoders:
516 	 *
517 	 * fake encoders used for DP MST.
518 	 */
519 	struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
520 	bool force_timing_sync;
521 	bool disable_hpd_irq;
522 	bool dmcub_trace_event_en;
523 	/**
524 	 * @da_list:
525 	 *
526 	 * DAL fb memory allocation list, for communication with SMU.
527 	 */
528 	struct list_head da_list;
529 	struct completion dmub_aux_transfer_done;
530 	struct workqueue_struct *delayed_hpd_wq;
531 
532 	/**
533 	 * @brightness:
534 	 *
535 	 * cached backlight values.
536 	 */
537 	u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
538 	/**
539 	 * @actual_brightness:
540 	 *
541 	 * last successfully applied backlight values.
542 	 */
543 	u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
544 
545 	/**
546 	 * @aux_hpd_discon_quirk:
547 	 *
548 	 * quirk for hpd discon while aux is on-going.
549 	 * occurred on certain intel platform
550 	 */
551 	bool aux_hpd_discon_quirk;
552 };
553 
554 enum dsc_clock_force_state {
555 	DSC_CLK_FORCE_DEFAULT = 0,
556 	DSC_CLK_FORCE_ENABLE,
557 	DSC_CLK_FORCE_DISABLE,
558 };
559 
560 struct dsc_preferred_settings {
561 	enum dsc_clock_force_state dsc_force_enable;
562 	uint32_t dsc_num_slices_v;
563 	uint32_t dsc_num_slices_h;
564 	uint32_t dsc_bits_per_pixel;
565 	bool dsc_force_disable_passthrough;
566 };
567 
568 enum mst_progress_status {
569 	MST_STATUS_DEFAULT = 0,
570 	MST_PROBE = BIT(0),
571 	MST_REMOTE_EDID = BIT(1),
572 	MST_ALLOCATE_NEW_PAYLOAD = BIT(2),
573 	MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3),
574 };
575 
576 struct amdgpu_dm_connector {
577 
578 	struct drm_connector base;
579 	uint32_t connector_id;
580 
581 	/* we need to mind the EDID between detect
582 	   and get modes due to analog/digital/tvencoder */
583 	struct edid *edid;
584 
585 	/* shared with amdgpu */
586 	struct amdgpu_hpd hpd;
587 
588 	/* number of modes generated from EDID at 'dc_sink' */
589 	int num_modes;
590 
591 	/* The 'old' sink - before an HPD.
592 	 * The 'current' sink is in dc_link->sink. */
593 	struct dc_sink *dc_sink;
594 	struct dc_link *dc_link;
595 
596 	/**
597 	 * @dc_em_sink: Reference to the emulated (virtual) sink.
598 	 */
599 	struct dc_sink *dc_em_sink;
600 
601 	/* DM only */
602 	struct drm_dp_mst_topology_mgr mst_mgr;
603 	struct amdgpu_dm_dp_aux dm_dp_aux;
604 	struct drm_dp_mst_port *port;
605 	struct amdgpu_dm_connector *mst_port;
606 	struct drm_dp_aux *dsc_aux;
607 	/* TODO see if we can merge with ddc_bus or make a dm_connector */
608 	struct amdgpu_i2c_adapter *i2c;
609 
610 	/* Monitor range limits */
611 	/**
612 	 * @min_vfreq: Minimal frequency supported by the display in Hz. This
613 	 * value is set to zero when there is no FreeSync support.
614 	 */
615 	int min_vfreq;
616 
617 	/**
618 	 * @max_vfreq: Maximum frequency supported by the display in Hz. This
619 	 * value is set to zero when there is no FreeSync support.
620 	 */
621 	int max_vfreq ;
622 	int pixel_clock_mhz;
623 
624 	/* Audio instance - protected by audio_lock. */
625 	int audio_inst;
626 
627 	struct mutex hpd_lock;
628 
629 	bool fake_enable;
630 #ifdef CONFIG_DEBUG_FS
631 	uint32_t debugfs_dpcd_address;
632 	uint32_t debugfs_dpcd_size;
633 #endif
634 	bool force_yuv420_output;
635 	struct dsc_preferred_settings dsc_settings;
636 	union dp_downstream_port_present mst_downstream_port_present;
637 	/* Cached display modes */
638 	struct drm_display_mode freesync_vid_base;
639 
640 	int psr_skip_count;
641 
642 	/* Record progress status of mst*/
643 	uint8_t mst_status;
644 };
645 
amdgpu_dm_set_mst_status(uint8_t * status,uint8_t flags,bool set)646 static inline void amdgpu_dm_set_mst_status(uint8_t *status,
647 		uint8_t flags, bool set)
648 {
649 	if (set)
650 		*status |= flags;
651 	else
652 		*status &= ~flags;
653 }
654 
655 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
656 
657 extern const struct amdgpu_ip_block_version dm_ip_block;
658 
659 struct dm_plane_state {
660 	struct drm_plane_state base;
661 	struct dc_plane_state *dc_state;
662 };
663 
664 struct dm_crtc_state {
665 	struct drm_crtc_state base;
666 	struct dc_stream_state *stream;
667 
668 	bool cm_has_degamma;
669 	bool cm_is_degamma_srgb;
670 
671 	bool mpo_requested;
672 
673 	int update_type;
674 	int active_planes;
675 
676 	int crc_skip_count;
677 
678 	bool freesync_vrr_info_changed;
679 
680 	bool dsc_force_changed;
681 	bool vrr_supported;
682 	struct mod_freesync_config freesync_config;
683 	struct dc_info_packet vrr_infopacket;
684 
685 	int abm_level;
686 };
687 
688 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
689 
690 struct dm_atomic_state {
691 	struct drm_private_state base;
692 
693 	struct dc_state *context;
694 };
695 
696 #define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
697 
698 struct dm_connector_state {
699 	struct drm_connector_state base;
700 
701 	enum amdgpu_rmx_type scaling;
702 	uint8_t underscan_vborder;
703 	uint8_t underscan_hborder;
704 	bool underscan_enable;
705 	bool freesync_capable;
706 #ifdef CONFIG_DRM_AMD_DC_HDCP
707 	bool update_hdcp;
708 #endif
709 	uint8_t abm_level;
710 	int vcpi_slots;
711 	uint64_t pbn;
712 };
713 
714 /**
715  * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info
716  *
717  * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this
718  * struct is useful to keep track of the display-specific information about
719  * FreeSync.
720  */
721 struct amdgpu_hdmi_vsdb_info {
722 	/**
723 	 * @amd_vsdb_version: Vendor Specific Data Block Version, should be
724 	 * used to determine which Vendor Specific InfoFrame (VSIF) to send.
725 	 */
726 	unsigned int amd_vsdb_version;
727 
728 	/**
729 	 * @freesync_supported: FreeSync Supported.
730 	 */
731 	bool freesync_supported;
732 
733 	/**
734 	 * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz.
735 	 */
736 	unsigned int min_refresh_rate_hz;
737 
738 	/**
739 	 * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz
740 	 */
741 	unsigned int max_refresh_rate_hz;
742 };
743 
744 
745 #define to_dm_connector_state(x)\
746 	container_of((x), struct dm_connector_state, base)
747 
748 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
749 struct drm_connector_state *
750 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
751 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
752 					    struct drm_connector_state *state,
753 					    struct drm_property *property,
754 					    uint64_t val);
755 
756 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
757 					    const struct drm_connector_state *state,
758 					    struct drm_property *property,
759 					    uint64_t *val);
760 
761 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
762 
763 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
764 				     struct amdgpu_dm_connector *aconnector,
765 				     int connector_type,
766 				     struct dc_link *link,
767 				     int link_index);
768 
769 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
770 				   struct drm_display_mode *mode);
771 
772 void dm_restore_drm_connector_state(struct drm_device *dev,
773 				    struct drm_connector *connector);
774 
775 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
776 					struct edid *edid);
777 
778 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev);
779 
780 #define MAX_COLOR_LUT_ENTRIES 4096
781 /* Legacy gamm LUT users such as X doesn't like large LUT sizes */
782 #define MAX_COLOR_LEGACY_LUT_ENTRIES 256
783 
784 void amdgpu_dm_init_color_mod(void);
785 int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state);
786 int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
787 int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
788 				      struct dc_plane_state *dc_plane_state);
789 
790 void amdgpu_dm_update_connector_after_detect(
791 		struct amdgpu_dm_connector *aconnector);
792 
793 extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
794 
795 int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
796 					struct dc_context *ctx, unsigned int link_index,
797 					void *payload, void *operation_result);
798 
799 bool check_seamless_boot_capability(struct amdgpu_device *adev);
800 
801 struct dc_stream_state *
802 	create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
803 					const struct drm_display_mode *drm_mode,
804 					const struct dm_connector_state *dm_state,
805 					const struct dc_stream_state *old_stream);
806 
807 int dm_atomic_get_state(struct drm_atomic_state *state,
808 			struct dm_atomic_state **dm_state);
809 
810 struct amdgpu_dm_connector *
811 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
812 					     struct drm_crtc *crtc);
813 
814 int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth);
815 #endif /* __AMDGPU_DM_H__ */
816