1 /*
2 * Copyright © 2014 Red Hat.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22 #ifndef _DRM_DP_MST_HELPER_H_
23 #define _DRM_DP_MST_HELPER_H_
24
25 #include <linux/types.h>
26 #include <drm/display/drm_dp_helper.h>
27 #include <drm/drm_atomic.h>
28
29 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
30 #include <linux/stackdepot.h>
31 #include <linux/timekeeping.h>
32
33 enum drm_dp_mst_topology_ref_type {
34 DRM_DP_MST_TOPOLOGY_REF_GET,
35 DRM_DP_MST_TOPOLOGY_REF_PUT,
36 };
37
38 struct drm_dp_mst_topology_ref_history {
39 struct drm_dp_mst_topology_ref_entry {
40 enum drm_dp_mst_topology_ref_type type;
41 int count;
42 ktime_t ts_nsec;
43 depot_stack_handle_t backtrace;
44 } *entries;
45 int len;
46 };
47 #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
48
49 struct drm_dp_mst_branch;
50
51 /**
52 * struct drm_dp_mst_port - MST port
53 * @port_num: port number
54 * @input: if this port is an input port. Protected by
55 * &drm_dp_mst_topology_mgr.base.lock.
56 * @mcs: message capability status - DP 1.2 spec. Protected by
57 * &drm_dp_mst_topology_mgr.base.lock.
58 * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
59 * &drm_dp_mst_topology_mgr.base.lock.
60 * @pdt: Peer Device Type. Protected by
61 * &drm_dp_mst_topology_mgr.base.lock.
62 * @ldps: Legacy Device Plug Status. Protected by
63 * &drm_dp_mst_topology_mgr.base.lock.
64 * @dpcd_rev: DPCD revision of device on this port. Protected by
65 * &drm_dp_mst_topology_mgr.base.lock.
66 * @num_sdp_streams: Number of simultaneous streams. Protected by
67 * &drm_dp_mst_topology_mgr.base.lock.
68 * @num_sdp_stream_sinks: Number of stream sinks. Protected by
69 * &drm_dp_mst_topology_mgr.base.lock.
70 * @full_pbn: Max possible bandwidth for this port. Protected by
71 * &drm_dp_mst_topology_mgr.base.lock.
72 * @next: link to next port on this branch device
73 * @aux: i2c aux transport to talk to device connected to this port, protected
74 * by &drm_dp_mst_topology_mgr.base.lock.
75 * @passthrough_aux: parent aux to which DSC pass-through requests should be
76 * sent, only set if DSC pass-through is possible.
77 * @parent: branch device parent of this port
78 * @vcpi: Virtual Channel Payload info for this port.
79 * @connector: DRM connector this port is connected to. Protected by
80 * &drm_dp_mst_topology_mgr.base.lock.
81 * @mgr: topology manager this port lives under.
82 *
83 * This structure represents an MST port endpoint on a device somewhere
84 * in the MST topology.
85 */
86 struct drm_dp_mst_port {
87 /**
88 * @topology_kref: refcount for this port's lifetime in the topology,
89 * only the DP MST helpers should need to touch this
90 */
91 struct kref topology_kref;
92
93 /**
94 * @malloc_kref: refcount for the memory allocation containing this
95 * structure. See drm_dp_mst_get_port_malloc() and
96 * drm_dp_mst_put_port_malloc().
97 */
98 struct kref malloc_kref;
99
100 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
101 /**
102 * @topology_ref_history: A history of each topology
103 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
104 */
105 struct drm_dp_mst_topology_ref_history topology_ref_history;
106 #endif
107
108 u8 port_num;
109 bool input;
110 bool mcs;
111 bool ddps;
112 u8 pdt;
113 bool ldps;
114 u8 dpcd_rev;
115 u8 num_sdp_streams;
116 u8 num_sdp_stream_sinks;
117 uint16_t full_pbn;
118 struct list_head next;
119 /**
120 * @mstb: the branch device connected to this port, if there is one.
121 * This should be considered protected for reading by
122 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
123 * &drm_dp_mst_topology_mgr.up_req_work and
124 * &drm_dp_mst_topology_mgr.work, which do not grab
125 * &drm_dp_mst_topology_mgr.lock during reads but are the only
126 * updaters of this list and are protected from writing concurrently
127 * by &drm_dp_mst_topology_mgr.probe_lock.
128 */
129 struct drm_dp_mst_branch *mstb;
130 struct drm_dp_aux aux; /* i2c bus for this port? */
131 struct drm_dp_aux *passthrough_aux;
132 struct drm_dp_mst_branch *parent;
133
134 struct drm_connector *connector;
135 struct drm_dp_mst_topology_mgr *mgr;
136
137 /**
138 * @cached_edid: for DP logical ports - make tiling work by ensuring
139 * that the EDID for all connectors is read immediately.
140 */
141 struct edid *cached_edid;
142 /**
143 * @has_audio: Tracks whether the sink connector to this port is
144 * audio-capable.
145 */
146 bool has_audio;
147
148 /**
149 * @fec_capable: bool indicating if FEC can be supported up to that
150 * point in the MST topology.
151 */
152 bool fec_capable;
153 };
154
155 /* sideband msg header - not bit struct */
156 struct drm_dp_sideband_msg_hdr {
157 u8 lct;
158 u8 lcr;
159 u8 rad[8];
160 bool broadcast;
161 bool path_msg;
162 u8 msg_len;
163 bool somt;
164 bool eomt;
165 bool seqno;
166 };
167
168 struct drm_dp_sideband_msg_rx {
169 u8 chunk[48];
170 u8 msg[256];
171 u8 curchunk_len;
172 u8 curchunk_idx; /* chunk we are parsing now */
173 u8 curchunk_hdrlen;
174 u8 curlen; /* total length of the msg */
175 bool have_somt;
176 bool have_eomt;
177 struct drm_dp_sideband_msg_hdr initial_hdr;
178 };
179
180 /**
181 * struct drm_dp_mst_branch - MST branch device.
182 * @rad: Relative Address to talk to this branch device.
183 * @lct: Link count total to talk to this branch device.
184 * @num_ports: number of ports on the branch.
185 * @port_parent: pointer to the port parent, NULL if toplevel.
186 * @mgr: topology manager for this branch device.
187 * @link_address_sent: if a link address message has been sent to this device yet.
188 * @guid: guid for DP 1.2 branch device. port under this branch can be
189 * identified by port #.
190 *
191 * This structure represents an MST branch device, there is one
192 * primary branch device at the root, along with any other branches connected
193 * to downstream port of parent branches.
194 */
195 struct drm_dp_mst_branch {
196 /**
197 * @topology_kref: refcount for this branch device's lifetime in the
198 * topology, only the DP MST helpers should need to touch this
199 */
200 struct kref topology_kref;
201
202 /**
203 * @malloc_kref: refcount for the memory allocation containing this
204 * structure. See drm_dp_mst_get_mstb_malloc() and
205 * drm_dp_mst_put_mstb_malloc().
206 */
207 struct kref malloc_kref;
208
209 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
210 /**
211 * @topology_ref_history: A history of each topology
212 * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
213 */
214 struct drm_dp_mst_topology_ref_history topology_ref_history;
215 #endif
216
217 /**
218 * @destroy_next: linked-list entry used by
219 * drm_dp_delayed_destroy_work()
220 */
221 struct list_head destroy_next;
222
223 u8 rad[8];
224 u8 lct;
225 int num_ports;
226
227 /**
228 * @ports: the list of ports on this branch device. This should be
229 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
230 * There are two exceptions to this:
231 * &drm_dp_mst_topology_mgr.up_req_work and
232 * &drm_dp_mst_topology_mgr.work, which do not grab
233 * &drm_dp_mst_topology_mgr.lock during reads but are the only
234 * updaters of this list and are protected from updating the list
235 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
236 */
237 struct list_head ports;
238
239 struct drm_dp_mst_port *port_parent;
240 struct drm_dp_mst_topology_mgr *mgr;
241
242 bool link_address_sent;
243
244 /* global unique identifier to identify branch devices */
245 u8 guid[16];
246 };
247
248
249 struct drm_dp_nak_reply {
250 u8 guid[16];
251 u8 reason;
252 u8 nak_data;
253 };
254
255 struct drm_dp_link_address_ack_reply {
256 u8 guid[16];
257 u8 nports;
258 struct drm_dp_link_addr_reply_port {
259 bool input_port;
260 u8 peer_device_type;
261 u8 port_number;
262 bool mcs;
263 bool ddps;
264 bool legacy_device_plug_status;
265 u8 dpcd_revision;
266 u8 peer_guid[16];
267 u8 num_sdp_streams;
268 u8 num_sdp_stream_sinks;
269 } ports[16];
270 };
271
272 struct drm_dp_remote_dpcd_read_ack_reply {
273 u8 port_number;
274 u8 num_bytes;
275 u8 bytes[255];
276 };
277
278 struct drm_dp_remote_dpcd_write_ack_reply {
279 u8 port_number;
280 };
281
282 struct drm_dp_remote_dpcd_write_nak_reply {
283 u8 port_number;
284 u8 reason;
285 u8 bytes_written_before_failure;
286 };
287
288 struct drm_dp_remote_i2c_read_ack_reply {
289 u8 port_number;
290 u8 num_bytes;
291 u8 bytes[255];
292 };
293
294 struct drm_dp_remote_i2c_read_nak_reply {
295 u8 port_number;
296 u8 nak_reason;
297 u8 i2c_nak_transaction;
298 };
299
300 struct drm_dp_remote_i2c_write_ack_reply {
301 u8 port_number;
302 };
303
304 struct drm_dp_query_stream_enc_status_ack_reply {
305 /* Bit[23:16]- Stream Id */
306 u8 stream_id;
307
308 /* Bit[15]- Signed */
309 bool reply_signed;
310
311 /* Bit[10:8]- Stream Output Sink Type */
312 bool unauthorizable_device_present;
313 bool legacy_device_present;
314 bool query_capable_device_present;
315
316 /* Bit[12:11]- Stream Output CP Type */
317 bool hdcp_1x_device_present;
318 bool hdcp_2x_device_present;
319
320 /* Bit[4]- Stream Authentication */
321 bool auth_completed;
322
323 /* Bit[3]- Stream Encryption */
324 bool encryption_enabled;
325
326 /* Bit[2]- Stream Repeater Function Present */
327 bool repeater_present;
328
329 /* Bit[1:0]- Stream State */
330 u8 state;
331 };
332
333 #define DRM_DP_MAX_SDP_STREAMS 16
334 struct drm_dp_allocate_payload {
335 u8 port_number;
336 u8 number_sdp_streams;
337 u8 vcpi;
338 u16 pbn;
339 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
340 };
341
342 struct drm_dp_allocate_payload_ack_reply {
343 u8 port_number;
344 u8 vcpi;
345 u16 allocated_pbn;
346 };
347
348 struct drm_dp_connection_status_notify {
349 u8 guid[16];
350 u8 port_number;
351 bool legacy_device_plug_status;
352 bool displayport_device_plug_status;
353 bool message_capability_status;
354 bool input_port;
355 u8 peer_device_type;
356 };
357
358 struct drm_dp_remote_dpcd_read {
359 u8 port_number;
360 u32 dpcd_address;
361 u8 num_bytes;
362 };
363
364 struct drm_dp_remote_dpcd_write {
365 u8 port_number;
366 u32 dpcd_address;
367 u8 num_bytes;
368 u8 *bytes;
369 };
370
371 #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
372 struct drm_dp_remote_i2c_read {
373 u8 num_transactions;
374 u8 port_number;
375 struct drm_dp_remote_i2c_read_tx {
376 u8 i2c_dev_id;
377 u8 num_bytes;
378 u8 *bytes;
379 u8 no_stop_bit;
380 u8 i2c_transaction_delay;
381 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
382 u8 read_i2c_device_id;
383 u8 num_bytes_read;
384 };
385
386 struct drm_dp_remote_i2c_write {
387 u8 port_number;
388 u8 write_i2c_device_id;
389 u8 num_bytes;
390 u8 *bytes;
391 };
392
393 struct drm_dp_query_stream_enc_status {
394 u8 stream_id;
395 u8 client_id[7]; /* 56-bit nonce */
396 u8 stream_event;
397 bool valid_stream_event;
398 u8 stream_behavior;
399 u8 valid_stream_behavior;
400 };
401
402 /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
403 struct drm_dp_port_number_req {
404 u8 port_number;
405 };
406
407 struct drm_dp_enum_path_resources_ack_reply {
408 u8 port_number;
409 bool fec_capable;
410 u16 full_payload_bw_number;
411 u16 avail_payload_bw_number;
412 };
413
414 /* covers POWER_DOWN_PHY, POWER_UP_PHY */
415 struct drm_dp_port_number_rep {
416 u8 port_number;
417 };
418
419 struct drm_dp_query_payload {
420 u8 port_number;
421 u8 vcpi;
422 };
423
424 struct drm_dp_resource_status_notify {
425 u8 port_number;
426 u8 guid[16];
427 u16 available_pbn;
428 };
429
430 struct drm_dp_query_payload_ack_reply {
431 u8 port_number;
432 u16 allocated_pbn;
433 };
434
435 struct drm_dp_sideband_msg_req_body {
436 u8 req_type;
437 union ack_req {
438 struct drm_dp_connection_status_notify conn_stat;
439 struct drm_dp_port_number_req port_num;
440 struct drm_dp_resource_status_notify resource_stat;
441
442 struct drm_dp_query_payload query_payload;
443 struct drm_dp_allocate_payload allocate_payload;
444
445 struct drm_dp_remote_dpcd_read dpcd_read;
446 struct drm_dp_remote_dpcd_write dpcd_write;
447
448 struct drm_dp_remote_i2c_read i2c_read;
449 struct drm_dp_remote_i2c_write i2c_write;
450
451 struct drm_dp_query_stream_enc_status enc_status;
452 } u;
453 };
454
455 struct drm_dp_sideband_msg_reply_body {
456 u8 reply_type;
457 u8 req_type;
458 union ack_replies {
459 struct drm_dp_nak_reply nak;
460 struct drm_dp_link_address_ack_reply link_addr;
461 struct drm_dp_port_number_rep port_number;
462
463 struct drm_dp_enum_path_resources_ack_reply path_resources;
464 struct drm_dp_allocate_payload_ack_reply allocate_payload;
465 struct drm_dp_query_payload_ack_reply query_payload;
466
467 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
468 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
469 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
470
471 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
472 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
473 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
474
475 struct drm_dp_query_stream_enc_status_ack_reply enc_status;
476 } u;
477 };
478
479 /* msg is queued to be put into a slot */
480 #define DRM_DP_SIDEBAND_TX_QUEUED 0
481 /* msg has started transmitting on a slot - still on msgq */
482 #define DRM_DP_SIDEBAND_TX_START_SEND 1
483 /* msg has finished transmitting on a slot - removed from msgq only in slot */
484 #define DRM_DP_SIDEBAND_TX_SENT 2
485 /* msg has received a response - removed from slot */
486 #define DRM_DP_SIDEBAND_TX_RX 3
487 #define DRM_DP_SIDEBAND_TX_TIMEOUT 4
488
489 struct drm_dp_sideband_msg_tx {
490 u8 msg[256];
491 u8 chunk[48];
492 u8 cur_offset;
493 u8 cur_len;
494 struct drm_dp_mst_branch *dst;
495 struct list_head next;
496 int seqno;
497 int state;
498 bool path_msg;
499 struct drm_dp_sideband_msg_reply_body reply;
500 };
501
502 /* sideband msg handler */
503 struct drm_dp_mst_topology_mgr;
504 struct drm_dp_mst_topology_cbs {
505 /* create a connector for a port */
506 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
507 /*
508 * Checks for any pending MST interrupts, passing them to MST core for
509 * processing, the same way an HPD IRQ pulse handler would do this.
510 * If provided MST core calls this callback from a poll-waiting loop
511 * when waiting for MST down message replies. The driver is expected
512 * to guard against a race between this callback and the driver's HPD
513 * IRQ pulse handler.
514 */
515 void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr);
516 };
517
518 #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
519
520 /**
521 * struct drm_dp_mst_atomic_payload - Atomic state struct for an MST payload
522 *
523 * The primary atomic state structure for a given MST payload. Stores information like current
524 * bandwidth allocation, intended action for this payload, etc.
525 */
526 struct drm_dp_mst_atomic_payload {
527 /** @port: The MST port assigned to this payload */
528 struct drm_dp_mst_port *port;
529
530 /**
531 * @vc_start_slot: The time slot that this payload starts on. Because payload start slots
532 * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic
533 * check time. This shouldn't usually matter, as the start slot should never be relevant for
534 * atomic state computations.
535 *
536 * Since this value is determined at commit time instead of check time, this value is
537 * protected by the MST helpers ensuring that async commits operating on the given topology
538 * never run in parallel. In the event that a driver does need to read this value (e.g. to
539 * inform hardware of the starting timeslot for a payload), the driver may either:
540 *
541 * * Read this field during the atomic commit after
542 * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
543 * previous MST states payload start slots have been copied over to the new state. Note
544 * that a new start slot won't be assigned/removed from this payload until
545 * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called.
546 * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
547 * get committed to hardware by calling drm_crtc_commit_wait() on each of the
548 * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
549 *
550 * If neither of the two above solutions suffice (e.g. the driver needs to read the start
551 * slot in the middle of an atomic commit without waiting for some reason), then drivers
552 * should cache this value themselves after changing payloads.
553 */
554 s8 vc_start_slot;
555
556 /** @vcpi: The Virtual Channel Payload Identifier */
557 u8 vcpi;
558 /**
559 * @time_slots:
560 * The number of timeslots allocated to this payload from the source DP Tx to
561 * the immediate downstream DP Rx
562 */
563 int time_slots;
564 /** @pbn: The payload bandwidth for this payload */
565 int pbn;
566
567 /** @delete: Whether or not we intend to delete this payload during this atomic commit */
568 bool delete : 1;
569 /** @dsc_enabled: Whether or not this payload has DSC enabled */
570 bool dsc_enabled : 1;
571
572 /** @next: The list node for this payload */
573 struct list_head next;
574 };
575
576 /**
577 * struct drm_dp_mst_topology_state - DisplayPort MST topology atomic state
578 *
579 * This struct represents the atomic state of the toplevel DisplayPort MST manager
580 */
581 struct drm_dp_mst_topology_state {
582 /** @base: Base private state for atomic */
583 struct drm_private_state base;
584
585 /** @mgr: The topology manager */
586 struct drm_dp_mst_topology_mgr *mgr;
587
588 /**
589 * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may
590 * modify this to add additional dependencies if needed.
591 */
592 u32 pending_crtc_mask;
593 /**
594 * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't
595 * populated until drm_dp_mst_atomic_wait_for_dependencies() is called.
596 */
597 struct drm_crtc_commit **commit_deps;
598 /** @num_commit_deps: The number of CRTC commits in @commit_deps */
599 size_t num_commit_deps;
600
601 /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */
602 u32 payload_mask;
603 /** @payloads: The list of payloads being created/destroyed in this state */
604 struct list_head payloads;
605
606 /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */
607 u8 total_avail_slots;
608 /** @start_slot: The first usable time slot in this topology (1 or 0) */
609 u8 start_slot;
610
611 /**
612 * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this
613 * out itself.
614 */
615 int pbn_div;
616 };
617
618 #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
619
620 /**
621 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
622 *
623 * This struct represents the toplevel displayport MST topology manager.
624 * There should be one instance of this for every MST capable DP connector
625 * on the GPU.
626 */
627 struct drm_dp_mst_topology_mgr {
628 /**
629 * @base: Base private object for atomic
630 */
631 struct drm_private_obj base;
632
633 /**
634 * @dev: device pointer for adding i2c devices etc.
635 */
636 struct drm_device *dev;
637 /**
638 * @cbs: callbacks for connector addition and destruction.
639 */
640 const struct drm_dp_mst_topology_cbs *cbs;
641 /**
642 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
643 * in one go.
644 */
645 int max_dpcd_transaction_bytes;
646 /**
647 * @aux: AUX channel for the DP MST connector this topolgy mgr is
648 * controlling.
649 */
650 struct drm_dp_aux *aux;
651 /**
652 * @max_payloads: maximum number of payloads the GPU can generate.
653 */
654 int max_payloads;
655 /**
656 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
657 * to build the MST connector path value.
658 */
659 int conn_base_id;
660
661 /**
662 * @up_req_recv: Message receiver state for up requests.
663 */
664 struct drm_dp_sideband_msg_rx up_req_recv;
665
666 /**
667 * @down_rep_recv: Message receiver state for replies to down
668 * requests.
669 */
670 struct drm_dp_sideband_msg_rx down_rep_recv;
671
672 /**
673 * @lock: protects @mst_state, @mst_primary, @dpcd, and
674 * @payload_id_table_cleared.
675 */
676 struct mutex lock;
677
678 /**
679 * @probe_lock: Prevents @work and @up_req_work, the only writers of
680 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
681 * while they update the topology.
682 */
683 struct mutex probe_lock;
684
685 /**
686 * @mst_state: If this manager is enabled for an MST capable port. False
687 * if no MST sink/branch devices is connected.
688 */
689 bool mst_state : 1;
690
691 /**
692 * @payload_id_table_cleared: Whether or not we've cleared the payload
693 * ID table for @mst_primary. Protected by @lock.
694 */
695 bool payload_id_table_cleared : 1;
696
697 /**
698 * @payload_count: The number of currently active payloads in hardware. This value is only
699 * intended to be used internally by MST helpers for payload tracking, and is only safe to
700 * read/write from the atomic commit (not check) context.
701 */
702 u8 payload_count;
703
704 /**
705 * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used
706 * internally by MST helpers for payload tracking, and is only safe to read/write from the
707 * atomic commit (not check) context.
708 */
709 u8 next_start_slot;
710
711 /**
712 * @mst_primary: Pointer to the primary/first branch device.
713 */
714 struct drm_dp_mst_branch *mst_primary;
715
716 /**
717 * @dpcd: Cache of DPCD for primary port.
718 */
719 u8 dpcd[DP_RECEIVER_CAP_SIZE];
720 /**
721 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
722 */
723 u8 sink_count;
724
725 /**
726 * @funcs: Atomic helper callbacks
727 */
728 const struct drm_private_state_funcs *funcs;
729
730 /**
731 * @qlock: protects @tx_msg_downq and &drm_dp_sideband_msg_tx.state
732 */
733 struct mutex qlock;
734
735 /**
736 * @tx_msg_downq: List of pending down requests
737 */
738 struct list_head tx_msg_downq;
739
740 /**
741 * @tx_waitq: Wait to queue stall for the tx worker.
742 */
743 wait_queue_head_t tx_waitq;
744 /**
745 * @work: Probe work.
746 */
747 struct work_struct work;
748 /**
749 * @tx_work: Sideband transmit worker. This can nest within the main
750 * @work worker for each transaction @work launches.
751 */
752 struct work_struct tx_work;
753
754 /**
755 * @destroy_port_list: List of to be destroyed connectors.
756 */
757 struct list_head destroy_port_list;
758 /**
759 * @destroy_branch_device_list: List of to be destroyed branch
760 * devices.
761 */
762 struct list_head destroy_branch_device_list;
763 /**
764 * @delayed_destroy_lock: Protects @destroy_port_list and
765 * @destroy_branch_device_list.
766 */
767 struct mutex delayed_destroy_lock;
768
769 /**
770 * @delayed_destroy_wq: Workqueue used for delayed_destroy_work items.
771 * A dedicated WQ makes it possible to drain any requeued work items
772 * on it.
773 */
774 struct workqueue_struct *delayed_destroy_wq;
775
776 /**
777 * @delayed_destroy_work: Work item to destroy MST port and branch
778 * devices, needed to avoid locking inversion.
779 */
780 struct work_struct delayed_destroy_work;
781
782 /**
783 * @up_req_list: List of pending up requests from the topology that
784 * need to be processed, in chronological order.
785 */
786 struct list_head up_req_list;
787 /**
788 * @up_req_lock: Protects @up_req_list
789 */
790 struct mutex up_req_lock;
791 /**
792 * @up_req_work: Work item to process up requests received from the
793 * topology. Needed to avoid blocking hotplug handling and sideband
794 * transmissions.
795 */
796 struct work_struct up_req_work;
797
798 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
799 /**
800 * @topology_ref_history_lock: protects
801 * &drm_dp_mst_port.topology_ref_history and
802 * &drm_dp_mst_branch.topology_ref_history.
803 */
804 struct mutex topology_ref_history_lock;
805 #endif
806 };
807
808 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
809 struct drm_device *dev, struct drm_dp_aux *aux,
810 int max_dpcd_transaction_bytes,
811 int max_payloads, int conn_base_id);
812
813 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
814
815 bool drm_dp_read_mst_cap(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
816 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
817
818 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
819
820
821 int
822 drm_dp_mst_detect_port(struct drm_connector *connector,
823 struct drm_modeset_acquire_ctx *ctx,
824 struct drm_dp_mst_topology_mgr *mgr,
825 struct drm_dp_mst_port *port);
826
827 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
828
829 int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
830 int link_rate, int link_lane_count);
831
832 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
833
834 void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap);
835
836 int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
837 struct drm_dp_mst_topology_state *mst_state,
838 struct drm_dp_mst_atomic_payload *payload);
839 int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
840 struct drm_atomic_state *state,
841 struct drm_dp_mst_atomic_payload *payload);
842 void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
843 struct drm_dp_mst_topology_state *mst_state,
844 struct drm_dp_mst_atomic_payload *payload);
845
846 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
847
848 void drm_dp_mst_dump_topology(struct seq_file *m,
849 struct drm_dp_mst_topology_mgr *mgr);
850
851 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
852 int __must_check
853 drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
854 bool sync);
855
856 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
857 unsigned int offset, void *buffer, size_t size);
858 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
859 unsigned int offset, void *buffer, size_t size);
860
861 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
862 struct drm_dp_mst_port *port);
863 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
864 struct drm_dp_mst_port *port);
865
866 struct drm_dp_mst_topology_state *
867 drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
868 struct drm_dp_mst_topology_mgr *mgr);
869 struct drm_dp_mst_topology_state *
870 drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
871 struct drm_dp_mst_topology_mgr *mgr);
872 struct drm_dp_mst_atomic_payload *
873 drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
874 struct drm_dp_mst_port *port);
875 int __must_check
876 drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
877 struct drm_dp_mst_topology_mgr *mgr,
878 struct drm_dp_mst_port *port, int pbn);
879 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
880 struct drm_dp_mst_port *port,
881 int pbn, bool enable);
882 int __must_check
883 drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
884 struct drm_dp_mst_topology_mgr *mgr);
885 int __must_check
886 drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
887 struct drm_dp_mst_topology_mgr *mgr,
888 struct drm_dp_mst_port *port);
889 void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state);
890 int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state);
891 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
892 struct drm_dp_mst_port *port, bool power_up);
893 int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
894 struct drm_dp_mst_port *port,
895 struct drm_dp_query_stream_enc_status_ack_reply *status);
896 int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
897 int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
898 struct drm_dp_mst_topology_mgr *mgr);
899
900 void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
901 void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
902
903 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
904
905 static inline struct drm_dp_mst_topology_state *
to_drm_dp_mst_topology_state(struct drm_private_state * state)906 to_drm_dp_mst_topology_state(struct drm_private_state *state)
907 {
908 return container_of(state, struct drm_dp_mst_topology_state, base);
909 }
910
911 extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
912
913 /**
914 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
915 * macro-internal use
916 * @state: &struct drm_atomic_state pointer
917 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
918 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
919 * iteration cursor
920 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
921 * iteration cursor
922 * @i: int iteration cursor, for macro-internal use
923 *
924 * Used by for_each_oldnew_mst_mgr_in_state(),
925 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
926 * call this directly.
927 *
928 * Returns:
929 * True if the current &struct drm_private_obj is a &struct
930 * drm_dp_mst_topology_mgr, false otherwise.
931 */
932 static inline bool
__drm_dp_mst_state_iter_get(struct drm_atomic_state * state,struct drm_dp_mst_topology_mgr ** mgr,struct drm_dp_mst_topology_state ** old_state,struct drm_dp_mst_topology_state ** new_state,int i)933 __drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
934 struct drm_dp_mst_topology_mgr **mgr,
935 struct drm_dp_mst_topology_state **old_state,
936 struct drm_dp_mst_topology_state **new_state,
937 int i)
938 {
939 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
940
941 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
942 return false;
943
944 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
945 if (old_state)
946 *old_state = to_dp_mst_topology_state(objs_state->old_state);
947 if (new_state)
948 *new_state = to_dp_mst_topology_state(objs_state->new_state);
949
950 return true;
951 }
952
953 /**
954 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
955 * managers in an atomic update
956 * @__state: &struct drm_atomic_state pointer
957 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
958 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
959 * state
960 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
961 * state
962 * @__i: int iteration cursor, for macro-internal use
963 *
964 * This iterates over all DRM DP MST topology managers in an atomic update,
965 * tracking both old and new state. This is useful in places where the state
966 * delta needs to be considered, for example in atomic check functions.
967 */
968 #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
969 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
970 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
971
972 /**
973 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
974 * in an atomic update
975 * @__state: &struct drm_atomic_state pointer
976 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
977 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
978 * state
979 * @__i: int iteration cursor, for macro-internal use
980 *
981 * This iterates over all DRM DP MST topology managers in an atomic update,
982 * tracking only the old state. This is useful in disable functions, where we
983 * need the old state the hardware is still in.
984 */
985 #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
986 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
987 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
988
989 /**
990 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
991 * in an atomic update
992 * @__state: &struct drm_atomic_state pointer
993 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
994 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
995 * state
996 * @__i: int iteration cursor, for macro-internal use
997 *
998 * This iterates over all DRM DP MST topology managers in an atomic update,
999 * tracking only the new state. This is useful in enable functions, where we
1000 * need the new state the hardware should be in when the atomic commit
1001 * operation has completed.
1002 */
1003 #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
1004 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
1005 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
1006
1007 #endif
1008