drm/dp_mst: Add probe_lock
[linux-2.6-block.git] / include / drm / drm_dp_mst_helper.h
CommitLineData
ad7f8a1f
DA
1/*
2 * Copyright © 2014 Red Hat.
3 *
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
13 *
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
20 * OF THIS SOFTWARE.
21 */
22#ifndef _DRM_DP_MST_HELPER_H_
23#define _DRM_DP_MST_HELPER_H_
24
25#include <linux/types.h>
26#include <drm/drm_dp_helper.h>
3f3353b7 27#include <drm/drm_atomic.h>
ad7f8a1f
DA
28
29struct drm_dp_mst_branch;
30
31/**
32197aab 32 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
ad7f8a1f
DA
33 * @vcpi: Virtual channel ID.
34 * @pbn: Payload Bandwidth Number for this channel
35 * @aligned_pbn: PBN aligned with slot size
36 * @num_slots: number of slots for this PBN
37 */
38struct drm_dp_vcpi {
39 int vcpi;
40 int pbn;
41 int aligned_pbn;
42 int num_slots;
43};
44
45/**
46 * struct drm_dp_mst_port - MST port
ad7f8a1f
DA
47 * @port_num: port number
48 * @input: if this port is an input port.
49 * @mcs: message capability status - DP 1.2 spec.
50 * @ddps: DisplayPort Device Plug Status - DP 1.2
51 * @pdt: Peer Device Type
52 * @ldps: Legacy Device Plug Status
53 * @dpcd_rev: DPCD revision of device on this port
54 * @num_sdp_streams: Number of simultaneous streams
55 * @num_sdp_stream_sinks: Number of stream sinks
56 * @available_pbn: Available bandwidth for this port.
57 * @next: link to next port on this branch device
c485e2c9
LP
58 * @aux: i2c aux transport to talk to device connected to this port, protected
59 * by &drm_dp_mst_topology_mgr.lock
ad7f8a1f
DA
60 * @parent: branch device parent of this port
61 * @vcpi: Virtual Channel Payload info for this port.
62 * @connector: DRM connector this port is connected to.
63 * @mgr: topology manager this port lives under.
64 *
65 * This structure represents an MST port endpoint on a device somewhere
66 * in the MST topology.
67 */
68struct drm_dp_mst_port {
ebcc0e6b
LP
69 /**
70 * @topology_kref: refcount for this port's lifetime in the topology,
71 * only the DP MST helpers should need to touch this
72 */
73 struct kref topology_kref;
74
75 /**
76 * @malloc_kref: refcount for the memory allocation containing this
77 * structure. See drm_dp_mst_get_port_malloc() and
78 * drm_dp_mst_put_port_malloc().
79 */
80 struct kref malloc_kref;
ad7f8a1f 81
ad7f8a1f
DA
82 u8 port_num;
83 bool input;
84 bool mcs;
85 bool ddps;
86 u8 pdt;
87 bool ldps;
88 u8 dpcd_rev;
89 u8 num_sdp_streams;
90 u8 num_sdp_stream_sinks;
91 uint16_t available_pbn;
92 struct list_head next;
14692a36
LP
93 /**
94 * @mstb: the branch device connected to this port, if there is one.
95 * This should be considered protected for reading by
96 * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
97 * &drm_dp_mst_topology_mgr.up_req_work and
98 * &drm_dp_mst_topology_mgr.work, which do not grab
99 * &drm_dp_mst_topology_mgr.lock during reads but are the only
100 * updaters of this list and are protected from writing concurrently
101 * by &drm_dp_mst_topology_mgr.probe_lock.
102 */
103 struct drm_dp_mst_branch *mstb;
ad7f8a1f
DA
104 struct drm_dp_aux aux; /* i2c bus for this port? */
105 struct drm_dp_mst_branch *parent;
106
107 struct drm_dp_vcpi vcpi;
108 struct drm_connector *connector;
109 struct drm_dp_mst_topology_mgr *mgr;
c6a0aed4 110
132d49d7
DV
111 /**
112 * @cached_edid: for DP logical ports - make tiling work by ensuring
113 * that the EDID for all connectors is read immediately.
114 */
115 struct edid *cached_edid;
116 /**
117 * @has_audio: Tracks whether the sink connector to this port is
118 * audio-capable.
119 */
ef8f9bea 120 bool has_audio;
ad7f8a1f
DA
121};
122
123/**
124 * struct drm_dp_mst_branch - MST branch device.
ad7f8a1f
DA
125 * @rad: Relative Address to talk to this branch device.
126 * @lct: Link count total to talk to this branch device.
127 * @num_ports: number of ports on the branch.
128 * @msg_slots: one bit per transmitted msg slot.
ad7f8a1f
DA
129 * @port_parent: pointer to the port parent, NULL if toplevel.
130 * @mgr: topology manager for this branch device.
131 * @tx_slots: transmission slots for this device.
132 * @last_seqno: last sequence number used to talk to this.
133 * @link_address_sent: if a link address message has been sent to this device yet.
5e93b820
HW
134 * @guid: guid for DP 1.2 branch device. port under this branch can be
135 * identified by port #.
ad7f8a1f
DA
136 *
137 * This structure represents an MST branch device, there is one
5e93b820
HW
138 * primary branch device at the root, along with any other branches connected
139 * to downstream port of parent branches.
ad7f8a1f
DA
140 */
141struct drm_dp_mst_branch {
ebcc0e6b
LP
142 /**
143 * @topology_kref: refcount for this branch device's lifetime in the
144 * topology, only the DP MST helpers should need to touch this
145 */
146 struct kref topology_kref;
147
148 /**
149 * @malloc_kref: refcount for the memory allocation containing this
150 * structure. See drm_dp_mst_get_mstb_malloc() and
151 * drm_dp_mst_put_mstb_malloc().
152 */
153 struct kref malloc_kref;
154
7cb12d48
LP
155 /**
156 * @destroy_next: linked-list entry used by
157 * drm_dp_delayed_destroy_work()
158 */
159 struct list_head destroy_next;
160
ad7f8a1f
DA
161 u8 rad[8];
162 u8 lct;
163 int num_ports;
164
165 int msg_slots;
14692a36
LP
166 /**
167 * @ports: the list of ports on this branch device. This should be
168 * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
169 * There are two exceptions to this:
170 * &drm_dp_mst_topology_mgr.up_req_work and
171 * &drm_dp_mst_topology_mgr.work, which do not grab
172 * &drm_dp_mst_topology_mgr.lock during reads but are the only
173 * updaters of this list and are protected from updating the list
174 * concurrently by @drm_dp_mst_topology_mgr.probe_lock
175 */
ad7f8a1f
DA
176 struct list_head ports;
177
178 /* list of tx ops queue for this port */
179 struct drm_dp_mst_port *port_parent;
180 struct drm_dp_mst_topology_mgr *mgr;
181
182 /* slots are protected by mstb->mgr->qlock */
183 struct drm_dp_sideband_msg_tx *tx_slots[2];
184 int last_seqno;
185 bool link_address_sent;
5e93b820
HW
186
187 /* global unique identifier to identify branch devices */
188 u8 guid[16];
ad7f8a1f
DA
189};
190
191
192/* sideband msg header - not bit struct */
193struct drm_dp_sideband_msg_hdr {
194 u8 lct;
195 u8 lcr;
196 u8 rad[8];
197 bool broadcast;
198 bool path_msg;
199 u8 msg_len;
200 bool somt;
201 bool eomt;
202 bool seqno;
203};
204
205struct drm_dp_nak_reply {
206 u8 guid[16];
207 u8 reason;
208 u8 nak_data;
209};
210
211struct drm_dp_link_address_ack_reply {
212 u8 guid[16];
213 u8 nports;
214 struct drm_dp_link_addr_reply_port {
215 bool input_port;
216 u8 peer_device_type;
217 u8 port_number;
218 bool mcs;
219 bool ddps;
220 bool legacy_device_plug_status;
221 u8 dpcd_revision;
222 u8 peer_guid[16];
223 u8 num_sdp_streams;
224 u8 num_sdp_stream_sinks;
225 } ports[16];
226};
227
228struct drm_dp_remote_dpcd_read_ack_reply {
229 u8 port_number;
230 u8 num_bytes;
231 u8 bytes[255];
232};
233
234struct drm_dp_remote_dpcd_write_ack_reply {
235 u8 port_number;
236};
237
238struct drm_dp_remote_dpcd_write_nak_reply {
239 u8 port_number;
240 u8 reason;
241 u8 bytes_written_before_failure;
242};
243
244struct drm_dp_remote_i2c_read_ack_reply {
245 u8 port_number;
246 u8 num_bytes;
247 u8 bytes[255];
248};
249
250struct drm_dp_remote_i2c_read_nak_reply {
251 u8 port_number;
252 u8 nak_reason;
253 u8 i2c_nak_transaction;
254};
255
256struct drm_dp_remote_i2c_write_ack_reply {
257 u8 port_number;
258};
259
260
261struct drm_dp_sideband_msg_rx {
262 u8 chunk[48];
263 u8 msg[256];
264 u8 curchunk_len;
265 u8 curchunk_idx; /* chunk we are parsing now */
266 u8 curchunk_hdrlen;
267 u8 curlen; /* total length of the msg */
268 bool have_somt;
269 bool have_eomt;
270 struct drm_dp_sideband_msg_hdr initial_hdr;
271};
272
ef8f9bea 273#define DRM_DP_MAX_SDP_STREAMS 16
ad7f8a1f
DA
274struct drm_dp_allocate_payload {
275 u8 port_number;
276 u8 number_sdp_streams;
277 u8 vcpi;
278 u16 pbn;
ef8f9bea 279 u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS];
ad7f8a1f
DA
280};
281
282struct drm_dp_allocate_payload_ack_reply {
283 u8 port_number;
284 u8 vcpi;
285 u16 allocated_pbn;
286};
287
288struct drm_dp_connection_status_notify {
289 u8 guid[16];
290 u8 port_number;
291 bool legacy_device_plug_status;
292 bool displayport_device_plug_status;
293 bool message_capability_status;
294 bool input_port;
295 u8 peer_device_type;
296};
297
298struct drm_dp_remote_dpcd_read {
299 u8 port_number;
300 u32 dpcd_address;
301 u8 num_bytes;
302};
303
304struct drm_dp_remote_dpcd_write {
305 u8 port_number;
306 u32 dpcd_address;
307 u8 num_bytes;
308 u8 *bytes;
309};
310
ae491542 311#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
ad7f8a1f
DA
312struct drm_dp_remote_i2c_read {
313 u8 num_transactions;
314 u8 port_number;
2f015ec6 315 struct drm_dp_remote_i2c_read_tx {
ad7f8a1f
DA
316 u8 i2c_dev_id;
317 u8 num_bytes;
318 u8 *bytes;
319 u8 no_stop_bit;
320 u8 i2c_transaction_delay;
ae491542 321 } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
ad7f8a1f
DA
322 u8 read_i2c_device_id;
323 u8 num_bytes_read;
324};
325
326struct drm_dp_remote_i2c_write {
327 u8 port_number;
328 u8 write_i2c_device_id;
329 u8 num_bytes;
330 u8 *bytes;
331};
332
333/* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */
334struct drm_dp_port_number_req {
335 u8 port_number;
336};
337
338struct drm_dp_enum_path_resources_ack_reply {
339 u8 port_number;
340 u16 full_payload_bw_number;
341 u16 avail_payload_bw_number;
342};
343
344/* covers POWER_DOWN_PHY, POWER_UP_PHY */
345struct drm_dp_port_number_rep {
346 u8 port_number;
347};
348
349struct drm_dp_query_payload {
350 u8 port_number;
351 u8 vcpi;
352};
353
354struct drm_dp_resource_status_notify {
355 u8 port_number;
356 u8 guid[16];
357 u16 available_pbn;
358};
359
360struct drm_dp_query_payload_ack_reply {
361 u8 port_number;
268de653 362 u16 allocated_pbn;
ad7f8a1f
DA
363};
364
365struct drm_dp_sideband_msg_req_body {
366 u8 req_type;
367 union ack_req {
368 struct drm_dp_connection_status_notify conn_stat;
369 struct drm_dp_port_number_req port_num;
370 struct drm_dp_resource_status_notify resource_stat;
371
372 struct drm_dp_query_payload query_payload;
373 struct drm_dp_allocate_payload allocate_payload;
374
375 struct drm_dp_remote_dpcd_read dpcd_read;
376 struct drm_dp_remote_dpcd_write dpcd_write;
377
378 struct drm_dp_remote_i2c_read i2c_read;
379 struct drm_dp_remote_i2c_write i2c_write;
380 } u;
381};
382
383struct drm_dp_sideband_msg_reply_body {
384 u8 reply_type;
385 u8 req_type;
386 union ack_replies {
387 struct drm_dp_nak_reply nak;
388 struct drm_dp_link_address_ack_reply link_addr;
389 struct drm_dp_port_number_rep port_number;
390
391 struct drm_dp_enum_path_resources_ack_reply path_resources;
392 struct drm_dp_allocate_payload_ack_reply allocate_payload;
393 struct drm_dp_query_payload_ack_reply query_payload;
394
395 struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack;
396 struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack;
397 struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack;
398
399 struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack;
400 struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack;
401 struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack;
402 } u;
403};
404
405/* msg is queued to be put into a slot */
406#define DRM_DP_SIDEBAND_TX_QUEUED 0
407/* msg has started transmitting on a slot - still on msgq */
408#define DRM_DP_SIDEBAND_TX_START_SEND 1
409/* msg has finished transmitting on a slot - removed from msgq only in slot */
410#define DRM_DP_SIDEBAND_TX_SENT 2
411/* msg has received a response - removed from slot */
412#define DRM_DP_SIDEBAND_TX_RX 3
413#define DRM_DP_SIDEBAND_TX_TIMEOUT 4
414
415struct drm_dp_sideband_msg_tx {
416 u8 msg[256];
417 u8 chunk[48];
418 u8 cur_offset;
419 u8 cur_len;
420 struct drm_dp_mst_branch *dst;
421 struct list_head next;
422 int seqno;
423 int state;
424 bool path_msg;
425 struct drm_dp_sideband_msg_reply_body reply;
426};
427
428/* sideband msg handler */
429struct drm_dp_mst_topology_mgr;
430struct drm_dp_mst_topology_cbs {
431 /* create a connector for a port */
12e6cecd 432 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
d9515c5e 433 void (*register_connector)(struct drm_connector *connector);
ad7f8a1f
DA
434 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
435 struct drm_connector *connector);
ad7f8a1f
DA
436};
437
438#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8)
439
440#define DP_PAYLOAD_LOCAL 1
441#define DP_PAYLOAD_REMOTE 2
442#define DP_PAYLOAD_DELETE_LOCAL 3
443
444struct drm_dp_payload {
445 int payload_state;
446 int start_slot;
447 int num_slots;
dfda0df3 448 int vcpi;
ad7f8a1f
DA
449};
450
a4370c77
VS
451#define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base)
452
eceae147
LP
453struct drm_dp_vcpi_allocation {
454 struct drm_dp_mst_port *port;
455 int vcpi;
456 struct list_head next;
457};
458
3f3353b7 459struct drm_dp_mst_topology_state {
a4370c77 460 struct drm_private_state base;
eceae147 461 struct list_head vcpis;
3f3353b7
PD
462 struct drm_dp_mst_topology_mgr *mgr;
463};
464
a4370c77
VS
465#define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base)
466
ad7f8a1f
DA
467/**
468 * struct drm_dp_mst_topology_mgr - DisplayPort MST manager
ad7f8a1f
DA
469 *
470 * This struct represents the toplevel displayport MST topology manager.
471 * There should be one instance of this for every MST capable DP connector
472 * on the GPU.
473 */
474struct drm_dp_mst_topology_mgr {
a4370c77
VS
475 /**
476 * @base: Base private object for atomic
477 */
478 struct drm_private_obj base;
479
132d49d7
DV
480 /**
481 * @dev: device pointer for adding i2c devices etc.
482 */
7b0a89a6 483 struct drm_device *dev;
132d49d7
DV
484 /**
485 * @cbs: callbacks for connector addition and destruction.
486 */
69a0f89c 487 const struct drm_dp_mst_topology_cbs *cbs;
132d49d7
DV
488 /**
489 * @max_dpcd_transaction_bytes: maximum number of bytes to read/write
490 * in one go.
491 */
ad7f8a1f 492 int max_dpcd_transaction_bytes;
132d49d7
DV
493 /**
494 * @aux: AUX channel for the DP MST connector this topolgy mgr is
495 * controlling.
496 */
497 struct drm_dp_aux *aux;
498 /**
499 * @max_payloads: maximum number of payloads the GPU can generate.
500 */
ad7f8a1f 501 int max_payloads;
132d49d7
DV
502 /**
503 * @conn_base_id: DRM connector ID this mgr is connected to. Only used
504 * to build the MST connector path value.
505 */
ad7f8a1f
DA
506 int conn_base_id;
507
132d49d7 508 /**
85783369 509 * @down_rep_recv: Message receiver state for down replies.
132d49d7 510 */
ad7f8a1f 511 struct drm_dp_sideband_msg_rx down_rep_recv;
132d49d7 512 /**
85783369 513 * @up_req_recv: Message receiver state for up requests.
132d49d7 514 */
ad7f8a1f
DA
515 struct drm_dp_sideband_msg_rx up_req_recv;
516
132d49d7
DV
517 /**
518 * @lock: protects mst state, primary, dpcd.
519 */
520 struct mutex lock;
ad7f8a1f 521
14692a36
LP
522 /**
523 * @probe_lock: Prevents @work and @up_req_work, the only writers of
524 * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
525 * while they update the topology.
526 */
527 struct mutex probe_lock;
528
132d49d7
DV
529 /**
530 * @mst_state: If this manager is enabled for an MST capable port. False
531 * if no MST sink/branch devices is connected.
532 */
ad7f8a1f 533 bool mst_state;
132d49d7
DV
534 /**
535 * @mst_primary: Pointer to the primary/first branch device.
536 */
ad7f8a1f 537 struct drm_dp_mst_branch *mst_primary;
5e93b820 538
132d49d7
DV
539 /**
540 * @dpcd: Cache of DPCD for primary port.
541 */
ad7f8a1f 542 u8 dpcd[DP_RECEIVER_CAP_SIZE];
132d49d7
DV
543 /**
544 * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0.
545 */
ad7f8a1f 546 u8 sink_count;
132d49d7
DV
547 /**
548 * @pbn_div: PBN to slots divisor.
549 */
ad7f8a1f 550 int pbn_div;
a538d613 551
3f3353b7
PD
552 /**
553 * @funcs: Atomic helper callbacks
554 */
555 const struct drm_private_state_funcs *funcs;
556
132d49d7 557 /**
6806cdf9
DV
558 * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
559 * &drm_dp_sideband_msg_tx.state once they are queued
132d49d7 560 */
ad7f8a1f 561 struct mutex qlock;
132d49d7
DV
562 /**
563 * @tx_msg_downq: List of pending down replies.
564 */
ad7f8a1f 565 struct list_head tx_msg_downq;
ad7f8a1f 566
132d49d7
DV
567 /**
568 * @payload_lock: Protect payload information.
569 */
ad7f8a1f 570 struct mutex payload_lock;
132d49d7
DV
571 /**
572 * @proposed_vcpis: Array of pointers for the new VCPI allocation. The
6806cdf9 573 * VCPI structure itself is &drm_dp_mst_port.vcpi.
132d49d7 574 */
ad7f8a1f 575 struct drm_dp_vcpi **proposed_vcpis;
132d49d7
DV
576 /**
577 * @payloads: Array of payloads.
578 */
ad7f8a1f 579 struct drm_dp_payload *payloads;
132d49d7
DV
580 /**
581 * @payload_mask: Elements of @payloads actually in use. Since
582 * reallocation of active outputs isn't possible gaps can be created by
583 * disabling outputs out of order compared to how they've been enabled.
584 */
ad7f8a1f 585 unsigned long payload_mask;
132d49d7
DV
586 /**
587 * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis.
588 */
dfda0df3 589 unsigned long vcpi_mask;
ad7f8a1f 590
132d49d7
DV
591 /**
592 * @tx_waitq: Wait to queue stall for the tx worker.
593 */
ad7f8a1f 594 wait_queue_head_t tx_waitq;
132d49d7
DV
595 /**
596 * @work: Probe work.
597 */
ad7f8a1f 598 struct work_struct work;
132d49d7
DV
599 /**
600 * @tx_work: Sideband transmit worker. This can nest within the main
601 * @work worker for each transaction @work launches.
602 */
ad7f8a1f 603 struct work_struct tx_work;
6b8eeca6 604
132d49d7 605 /**
7cb12d48
LP
606 * @destroy_port_list: List of to be destroyed connectors.
607 */
608 struct list_head destroy_port_list;
609 /**
610 * @destroy_branch_device_list: List of to be destroyed branch
611 * devices.
132d49d7 612 */
7cb12d48 613 struct list_head destroy_branch_device_list;
132d49d7 614 /**
7cb12d48
LP
615 * @delayed_destroy_lock: Protects @destroy_port_list and
616 * @destroy_branch_device_list.
132d49d7 617 */
7cb12d48 618 struct mutex delayed_destroy_lock;
132d49d7 619 /**
7cb12d48
LP
620 * @delayed_destroy_work: Work item to destroy MST port and branch
621 * devices, needed to avoid locking inversion.
132d49d7 622 */
7cb12d48 623 struct work_struct delayed_destroy_work;
9408cc94
LP
624
625 /**
626 * @up_req_list: List of pending up requests from the topology that
627 * need to be processed, in chronological order.
628 */
629 struct list_head up_req_list;
630 /**
631 * @up_req_lock: Protects @up_req_list
632 */
633 struct mutex up_req_lock;
634 /**
635 * @up_req_work: Work item to process up requests received from the
636 * topology. Needed to avoid blocking hotplug handling and sideband
637 * transmissions.
638 */
639 struct work_struct up_req_work;
ad7f8a1f
DA
640};
641
7b0a89a6
DP
642int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
643 struct drm_device *dev, struct drm_dp_aux *aux,
644 int max_dpcd_transaction_bytes,
645 int max_payloads, int conn_base_id);
ad7f8a1f
DA
646
647void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
648
649
650int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state);
651
652
653int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
654
655
c6a0aed4 656enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
ad7f8a1f 657
ef8f9bea
LY
658bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
659 struct drm_dp_mst_port *port);
ad7f8a1f
DA
660struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
661
662
663int drm_dp_calc_pbn_mode(int clock, int bpp);
664
665
1e797f55
PD
666bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
667 struct drm_dp_mst_port *port, int pbn, int slots);
ad7f8a1f 668
87f5942d
DA
669int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
670
ad7f8a1f
DA
671
672void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
673
674
675void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
676 struct drm_dp_mst_port *port);
677
678
679int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
680 int pbn);
681
682
683int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr);
684
685
686int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr);
687
688int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
689
690void drm_dp_mst_dump_topology(struct seq_file *m,
691 struct drm_dp_mst_topology_mgr *mgr);
692
693void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
c235316d
LP
694int __must_check
695drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
562836a2
VS
696
697ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
698 unsigned int offset, void *buffer, size_t size);
699ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
700 unsigned int offset, void *buffer, size_t size);
701
702int drm_dp_mst_connector_late_register(struct drm_connector *connector,
703 struct drm_dp_mst_port *port);
704void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
705 struct drm_dp_mst_port *port);
706
3f3353b7
PD
707struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
708 struct drm_dp_mst_topology_mgr *mgr);
eceae147
LP
709int __must_check
710drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
711 struct drm_dp_mst_topology_mgr *mgr,
712 struct drm_dp_mst_port *port, int pbn);
713int __must_check
714drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
715 struct drm_dp_mst_topology_mgr *mgr,
716 struct drm_dp_mst_port *port);
0bb9c2b2
DP
717int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
718 struct drm_dp_mst_port *port, bool power_up);
eceae147 719int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
3f3353b7 720
ebcc0e6b
LP
721void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
722void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
723
bea5c38f
LP
724extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
725
726/**
727 * __drm_dp_mst_state_iter_get - private atomic state iterator function for
728 * macro-internal use
729 * @state: &struct drm_atomic_state pointer
730 * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor
731 * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state
732 * iteration cursor
733 * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state
734 * iteration cursor
735 * @i: int iteration cursor, for macro-internal use
736 *
737 * Used by for_each_oldnew_mst_mgr_in_state(),
738 * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't
739 * call this directly.
740 *
741 * Returns:
742 * True if the current &struct drm_private_obj is a &struct
743 * drm_dp_mst_topology_mgr, false otherwise.
744 */
745static inline bool
746__drm_dp_mst_state_iter_get(struct drm_atomic_state *state,
747 struct drm_dp_mst_topology_mgr **mgr,
748 struct drm_dp_mst_topology_state **old_state,
749 struct drm_dp_mst_topology_state **new_state,
750 int i)
751{
752 struct __drm_private_objs_state *objs_state = &state->private_objs[i];
753
754 if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs)
755 return false;
756
757 *mgr = to_dp_mst_topology_mgr(objs_state->ptr);
758 if (old_state)
759 *old_state = to_dp_mst_topology_state(objs_state->old_state);
760 if (new_state)
761 *new_state = to_dp_mst_topology_state(objs_state->new_state);
762
763 return true;
764}
765
766/**
767 * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology
768 * managers in an atomic update
769 * @__state: &struct drm_atomic_state pointer
770 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
771 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
772 * state
773 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
774 * state
775 * @__i: int iteration cursor, for macro-internal use
776 *
777 * This iterates over all DRM DP MST topology managers in an atomic update,
778 * tracking both old and new state. This is useful in places where the state
779 * delta needs to be considered, for example in atomic check functions.
780 */
781#define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \
782 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
783 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i)))
784
785/**
786 * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers
787 * in an atomic update
788 * @__state: &struct drm_atomic_state pointer
789 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
790 * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old
791 * state
792 * @__i: int iteration cursor, for macro-internal use
793 *
794 * This iterates over all DRM DP MST topology managers in an atomic update,
795 * tracking only the old state. This is useful in disable functions, where we
796 * need the old state the hardware is still in.
797 */
798#define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \
799 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
800 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i)))
801
802/**
803 * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers
804 * in an atomic update
805 * @__state: &struct drm_atomic_state pointer
806 * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor
807 * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new
808 * state
809 * @__i: int iteration cursor, for macro-internal use
810 *
811 * This iterates over all DRM DP MST topology managers in an atomic update,
812 * tracking only the new state. This is useful in enable functions, where we
813 * need the new state the hardware should be in when the atomic commit
814 * operation has completed.
815 */
816#define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \
817 for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \
818 for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i)))
819
ad7f8a1f 820#endif