Commit | Line | Data |
---|---|---|
ad7f8a1f DA |
1 | /* |
2 | * Copyright © 2014 Red Hat. | |
3 | * | |
4 | * Permission to use, copy, modify, distribute, and sell this software and its | |
5 | * documentation for any purpose is hereby granted without fee, provided that | |
6 | * the above copyright notice appear in all copies and that both that copyright | |
7 | * notice and this permission notice appear in supporting documentation, and | |
8 | * that the name of the copyright holders not be used in advertising or | |
9 | * publicity pertaining to distribution of the software without specific, | |
10 | * written prior permission. The copyright holders make no representations | |
11 | * about the suitability of this software for any purpose. It is provided "as | |
12 | * is" without express or implied warranty. | |
13 | * | |
14 | * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, | |
15 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO | |
16 | * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR | |
17 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, | |
18 | * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER | |
19 | * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE | |
20 | * OF THIS SOFTWARE. | |
21 | */ | |
22 | #ifndef _DRM_DP_MST_HELPER_H_ | |
23 | #define _DRM_DP_MST_HELPER_H_ | |
24 | ||
25 | #include <linux/types.h> | |
26 | #include <drm/drm_dp_helper.h> | |
3f3353b7 | 27 | #include <drm/drm_atomic.h> |
ad7f8a1f DA |
28 | |
29 | struct drm_dp_mst_branch; | |
30 | ||
31 | /** | |
32197aab | 32 | * struct drm_dp_vcpi - Virtual Channel Payload Identifier |
ad7f8a1f DA |
33 | * @vcpi: Virtual channel ID. |
34 | * @pbn: Payload Bandwidth Number for this channel | |
35 | * @aligned_pbn: PBN aligned with slot size | |
36 | * @num_slots: number of slots for this PBN | |
37 | */ | |
38 | struct drm_dp_vcpi { | |
39 | int vcpi; | |
40 | int pbn; | |
41 | int aligned_pbn; | |
42 | int num_slots; | |
43 | }; | |
44 | ||
45 | /** | |
46 | * struct drm_dp_mst_port - MST port | |
ad7f8a1f | 47 | * @port_num: port number |
3f9b3f02 LP |
48 | * @input: if this port is an input port. Protected by |
49 | * &drm_dp_mst_topology_mgr.base.lock. | |
50 | * @mcs: message capability status - DP 1.2 spec. Protected by | |
51 | * &drm_dp_mst_topology_mgr.base.lock. | |
52 | * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by | |
53 | * &drm_dp_mst_topology_mgr.base.lock. | |
54 | * @pdt: Peer Device Type. Protected by | |
55 | * &drm_dp_mst_topology_mgr.base.lock. | |
56 | * @ldps: Legacy Device Plug Status. Protected by | |
57 | * &drm_dp_mst_topology_mgr.base.lock. | |
58 | * @dpcd_rev: DPCD revision of device on this port. Protected by | |
59 | * &drm_dp_mst_topology_mgr.base.lock. | |
60 | * @num_sdp_streams: Number of simultaneous streams. Protected by | |
61 | * &drm_dp_mst_topology_mgr.base.lock. | |
62 | * @num_sdp_stream_sinks: Number of stream sinks. Protected by | |
63 | * &drm_dp_mst_topology_mgr.base.lock. | |
64 | * @available_pbn: Available bandwidth for this port. Protected by | |
65 | * &drm_dp_mst_topology_mgr.base.lock. | |
ad7f8a1f | 66 | * @next: link to next port on this branch device |
c485e2c9 | 67 | * @aux: i2c aux transport to talk to device connected to this port, protected |
3f9b3f02 | 68 | * by &drm_dp_mst_topology_mgr.base.lock. |
ad7f8a1f DA |
69 | * @parent: branch device parent of this port |
70 | * @vcpi: Virtual Channel Payload info for this port. | |
3f9b3f02 LP |
71 | * @connector: DRM connector this port is connected to. Protected by |
72 | * &drm_dp_mst_topology_mgr.base.lock. | |
ad7f8a1f DA |
73 | * @mgr: topology manager this port lives under. |
74 | * | |
75 | * This structure represents an MST port endpoint on a device somewhere | |
76 | * in the MST topology. | |
77 | */ | |
78 | struct drm_dp_mst_port { | |
ebcc0e6b LP |
79 | /** |
80 | * @topology_kref: refcount for this port's lifetime in the topology, | |
81 | * only the DP MST helpers should need to touch this | |
82 | */ | |
83 | struct kref topology_kref; | |
84 | ||
85 | /** | |
86 | * @malloc_kref: refcount for the memory allocation containing this | |
87 | * structure. See drm_dp_mst_get_port_malloc() and | |
88 | * drm_dp_mst_put_port_malloc(). | |
89 | */ | |
90 | struct kref malloc_kref; | |
ad7f8a1f | 91 | |
ad7f8a1f DA |
92 | u8 port_num; |
93 | bool input; | |
94 | bool mcs; | |
95 | bool ddps; | |
96 | u8 pdt; | |
97 | bool ldps; | |
98 | u8 dpcd_rev; | |
99 | u8 num_sdp_streams; | |
100 | u8 num_sdp_stream_sinks; | |
101 | uint16_t available_pbn; | |
102 | struct list_head next; | |
14692a36 LP |
103 | /** |
104 | * @mstb: the branch device connected to this port, if there is one. | |
105 | * This should be considered protected for reading by | |
106 | * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this: | |
107 | * &drm_dp_mst_topology_mgr.up_req_work and | |
108 | * &drm_dp_mst_topology_mgr.work, which do not grab | |
109 | * &drm_dp_mst_topology_mgr.lock during reads but are the only | |
110 | * updaters of this list and are protected from writing concurrently | |
111 | * by &drm_dp_mst_topology_mgr.probe_lock. | |
112 | */ | |
113 | struct drm_dp_mst_branch *mstb; | |
ad7f8a1f DA |
114 | struct drm_dp_aux aux; /* i2c bus for this port? */ |
115 | struct drm_dp_mst_branch *parent; | |
116 | ||
117 | struct drm_dp_vcpi vcpi; | |
118 | struct drm_connector *connector; | |
119 | struct drm_dp_mst_topology_mgr *mgr; | |
c6a0aed4 | 120 | |
132d49d7 DV |
121 | /** |
122 | * @cached_edid: for DP logical ports - make tiling work by ensuring | |
123 | * that the EDID for all connectors is read immediately. | |
124 | */ | |
125 | struct edid *cached_edid; | |
126 | /** | |
127 | * @has_audio: Tracks whether the sink connector to this port is | |
128 | * audio-capable. | |
129 | */ | |
ef8f9bea | 130 | bool has_audio; |
ad7f8a1f DA |
131 | }; |
132 | ||
133 | /** | |
134 | * struct drm_dp_mst_branch - MST branch device. | |
ad7f8a1f DA |
135 | * @rad: Relative Address to talk to this branch device. |
136 | * @lct: Link count total to talk to this branch device. | |
137 | * @num_ports: number of ports on the branch. | |
138 | * @msg_slots: one bit per transmitted msg slot. | |
ad7f8a1f DA |
139 | * @port_parent: pointer to the port parent, NULL if toplevel. |
140 | * @mgr: topology manager for this branch device. | |
141 | * @tx_slots: transmission slots for this device. | |
142 | * @last_seqno: last sequence number used to talk to this. | |
143 | * @link_address_sent: if a link address message has been sent to this device yet. | |
5e93b820 HW |
144 | * @guid: guid for DP 1.2 branch device. port under this branch can be |
145 | * identified by port #. | |
ad7f8a1f DA |
146 | * |
147 | * This structure represents an MST branch device, there is one | |
5e93b820 HW |
148 | * primary branch device at the root, along with any other branches connected |
149 | * to downstream port of parent branches. | |
ad7f8a1f DA |
150 | */ |
151 | struct drm_dp_mst_branch { | |
ebcc0e6b LP |
152 | /** |
153 | * @topology_kref: refcount for this branch device's lifetime in the | |
154 | * topology, only the DP MST helpers should need to touch this | |
155 | */ | |
156 | struct kref topology_kref; | |
157 | ||
158 | /** | |
159 | * @malloc_kref: refcount for the memory allocation containing this | |
160 | * structure. See drm_dp_mst_get_mstb_malloc() and | |
161 | * drm_dp_mst_put_mstb_malloc(). | |
162 | */ | |
163 | struct kref malloc_kref; | |
164 | ||
7cb12d48 LP |
165 | /** |
166 | * @destroy_next: linked-list entry used by | |
167 | * drm_dp_delayed_destroy_work() | |
168 | */ | |
169 | struct list_head destroy_next; | |
170 | ||
ad7f8a1f DA |
171 | u8 rad[8]; |
172 | u8 lct; | |
173 | int num_ports; | |
174 | ||
175 | int msg_slots; | |
14692a36 LP |
176 | /** |
177 | * @ports: the list of ports on this branch device. This should be | |
178 | * considered protected for reading by &drm_dp_mst_topology_mgr.lock. | |
179 | * There are two exceptions to this: | |
180 | * &drm_dp_mst_topology_mgr.up_req_work and | |
181 | * &drm_dp_mst_topology_mgr.work, which do not grab | |
182 | * &drm_dp_mst_topology_mgr.lock during reads but are the only | |
183 | * updaters of this list and are protected from updating the list | |
184 | * concurrently by @drm_dp_mst_topology_mgr.probe_lock | |
185 | */ | |
ad7f8a1f DA |
186 | struct list_head ports; |
187 | ||
188 | /* list of tx ops queue for this port */ | |
189 | struct drm_dp_mst_port *port_parent; | |
190 | struct drm_dp_mst_topology_mgr *mgr; | |
191 | ||
192 | /* slots are protected by mstb->mgr->qlock */ | |
193 | struct drm_dp_sideband_msg_tx *tx_slots[2]; | |
194 | int last_seqno; | |
195 | bool link_address_sent; | |
5e93b820 HW |
196 | |
197 | /* global unique identifier to identify branch devices */ | |
198 | u8 guid[16]; | |
ad7f8a1f DA |
199 | }; |
200 | ||
201 | ||
202 | /* sideband msg header - not bit struct */ | |
203 | struct drm_dp_sideband_msg_hdr { | |
204 | u8 lct; | |
205 | u8 lcr; | |
206 | u8 rad[8]; | |
207 | bool broadcast; | |
208 | bool path_msg; | |
209 | u8 msg_len; | |
210 | bool somt; | |
211 | bool eomt; | |
212 | bool seqno; | |
213 | }; | |
214 | ||
215 | struct drm_dp_nak_reply { | |
216 | u8 guid[16]; | |
217 | u8 reason; | |
218 | u8 nak_data; | |
219 | }; | |
220 | ||
221 | struct drm_dp_link_address_ack_reply { | |
222 | u8 guid[16]; | |
223 | u8 nports; | |
224 | struct drm_dp_link_addr_reply_port { | |
225 | bool input_port; | |
226 | u8 peer_device_type; | |
227 | u8 port_number; | |
228 | bool mcs; | |
229 | bool ddps; | |
230 | bool legacy_device_plug_status; | |
231 | u8 dpcd_revision; | |
232 | u8 peer_guid[16]; | |
233 | u8 num_sdp_streams; | |
234 | u8 num_sdp_stream_sinks; | |
235 | } ports[16]; | |
236 | }; | |
237 | ||
238 | struct drm_dp_remote_dpcd_read_ack_reply { | |
239 | u8 port_number; | |
240 | u8 num_bytes; | |
241 | u8 bytes[255]; | |
242 | }; | |
243 | ||
244 | struct drm_dp_remote_dpcd_write_ack_reply { | |
245 | u8 port_number; | |
246 | }; | |
247 | ||
248 | struct drm_dp_remote_dpcd_write_nak_reply { | |
249 | u8 port_number; | |
250 | u8 reason; | |
251 | u8 bytes_written_before_failure; | |
252 | }; | |
253 | ||
254 | struct drm_dp_remote_i2c_read_ack_reply { | |
255 | u8 port_number; | |
256 | u8 num_bytes; | |
257 | u8 bytes[255]; | |
258 | }; | |
259 | ||
260 | struct drm_dp_remote_i2c_read_nak_reply { | |
261 | u8 port_number; | |
262 | u8 nak_reason; | |
263 | u8 i2c_nak_transaction; | |
264 | }; | |
265 | ||
266 | struct drm_dp_remote_i2c_write_ack_reply { | |
267 | u8 port_number; | |
268 | }; | |
269 | ||
270 | ||
271 | struct drm_dp_sideband_msg_rx { | |
272 | u8 chunk[48]; | |
273 | u8 msg[256]; | |
274 | u8 curchunk_len; | |
275 | u8 curchunk_idx; /* chunk we are parsing now */ | |
276 | u8 curchunk_hdrlen; | |
277 | u8 curlen; /* total length of the msg */ | |
278 | bool have_somt; | |
279 | bool have_eomt; | |
280 | struct drm_dp_sideband_msg_hdr initial_hdr; | |
281 | }; | |
282 | ||
ef8f9bea | 283 | #define DRM_DP_MAX_SDP_STREAMS 16 |
ad7f8a1f DA |
284 | struct drm_dp_allocate_payload { |
285 | u8 port_number; | |
286 | u8 number_sdp_streams; | |
287 | u8 vcpi; | |
288 | u16 pbn; | |
ef8f9bea | 289 | u8 sdp_stream_sink[DRM_DP_MAX_SDP_STREAMS]; |
ad7f8a1f DA |
290 | }; |
291 | ||
292 | struct drm_dp_allocate_payload_ack_reply { | |
293 | u8 port_number; | |
294 | u8 vcpi; | |
295 | u16 allocated_pbn; | |
296 | }; | |
297 | ||
298 | struct drm_dp_connection_status_notify { | |
299 | u8 guid[16]; | |
300 | u8 port_number; | |
301 | bool legacy_device_plug_status; | |
302 | bool displayport_device_plug_status; | |
303 | bool message_capability_status; | |
304 | bool input_port; | |
305 | u8 peer_device_type; | |
306 | }; | |
307 | ||
308 | struct drm_dp_remote_dpcd_read { | |
309 | u8 port_number; | |
310 | u32 dpcd_address; | |
311 | u8 num_bytes; | |
312 | }; | |
313 | ||
314 | struct drm_dp_remote_dpcd_write { | |
315 | u8 port_number; | |
316 | u32 dpcd_address; | |
317 | u8 num_bytes; | |
318 | u8 *bytes; | |
319 | }; | |
320 | ||
ae491542 | 321 | #define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4 |
ad7f8a1f DA |
322 | struct drm_dp_remote_i2c_read { |
323 | u8 num_transactions; | |
324 | u8 port_number; | |
2f015ec6 | 325 | struct drm_dp_remote_i2c_read_tx { |
ad7f8a1f DA |
326 | u8 i2c_dev_id; |
327 | u8 num_bytes; | |
328 | u8 *bytes; | |
329 | u8 no_stop_bit; | |
330 | u8 i2c_transaction_delay; | |
ae491542 | 331 | } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS]; |
ad7f8a1f DA |
332 | u8 read_i2c_device_id; |
333 | u8 num_bytes_read; | |
334 | }; | |
335 | ||
336 | struct drm_dp_remote_i2c_write { | |
337 | u8 port_number; | |
338 | u8 write_i2c_device_id; | |
339 | u8 num_bytes; | |
340 | u8 *bytes; | |
341 | }; | |
342 | ||
343 | /* this covers ENUM_RESOURCES, POWER_DOWN_PHY, POWER_UP_PHY */ | |
344 | struct drm_dp_port_number_req { | |
345 | u8 port_number; | |
346 | }; | |
347 | ||
348 | struct drm_dp_enum_path_resources_ack_reply { | |
349 | u8 port_number; | |
350 | u16 full_payload_bw_number; | |
351 | u16 avail_payload_bw_number; | |
352 | }; | |
353 | ||
354 | /* covers POWER_DOWN_PHY, POWER_UP_PHY */ | |
355 | struct drm_dp_port_number_rep { | |
356 | u8 port_number; | |
357 | }; | |
358 | ||
359 | struct drm_dp_query_payload { | |
360 | u8 port_number; | |
361 | u8 vcpi; | |
362 | }; | |
363 | ||
364 | struct drm_dp_resource_status_notify { | |
365 | u8 port_number; | |
366 | u8 guid[16]; | |
367 | u16 available_pbn; | |
368 | }; | |
369 | ||
370 | struct drm_dp_query_payload_ack_reply { | |
371 | u8 port_number; | |
268de653 | 372 | u16 allocated_pbn; |
ad7f8a1f DA |
373 | }; |
374 | ||
375 | struct drm_dp_sideband_msg_req_body { | |
376 | u8 req_type; | |
377 | union ack_req { | |
378 | struct drm_dp_connection_status_notify conn_stat; | |
379 | struct drm_dp_port_number_req port_num; | |
380 | struct drm_dp_resource_status_notify resource_stat; | |
381 | ||
382 | struct drm_dp_query_payload query_payload; | |
383 | struct drm_dp_allocate_payload allocate_payload; | |
384 | ||
385 | struct drm_dp_remote_dpcd_read dpcd_read; | |
386 | struct drm_dp_remote_dpcd_write dpcd_write; | |
387 | ||
388 | struct drm_dp_remote_i2c_read i2c_read; | |
389 | struct drm_dp_remote_i2c_write i2c_write; | |
390 | } u; | |
391 | }; | |
392 | ||
393 | struct drm_dp_sideband_msg_reply_body { | |
394 | u8 reply_type; | |
395 | u8 req_type; | |
396 | union ack_replies { | |
397 | struct drm_dp_nak_reply nak; | |
398 | struct drm_dp_link_address_ack_reply link_addr; | |
399 | struct drm_dp_port_number_rep port_number; | |
400 | ||
401 | struct drm_dp_enum_path_resources_ack_reply path_resources; | |
402 | struct drm_dp_allocate_payload_ack_reply allocate_payload; | |
403 | struct drm_dp_query_payload_ack_reply query_payload; | |
404 | ||
405 | struct drm_dp_remote_dpcd_read_ack_reply remote_dpcd_read_ack; | |
406 | struct drm_dp_remote_dpcd_write_ack_reply remote_dpcd_write_ack; | |
407 | struct drm_dp_remote_dpcd_write_nak_reply remote_dpcd_write_nack; | |
408 | ||
409 | struct drm_dp_remote_i2c_read_ack_reply remote_i2c_read_ack; | |
410 | struct drm_dp_remote_i2c_read_nak_reply remote_i2c_read_nack; | |
411 | struct drm_dp_remote_i2c_write_ack_reply remote_i2c_write_ack; | |
412 | } u; | |
413 | }; | |
414 | ||
415 | /* msg is queued to be put into a slot */ | |
416 | #define DRM_DP_SIDEBAND_TX_QUEUED 0 | |
417 | /* msg has started transmitting on a slot - still on msgq */ | |
418 | #define DRM_DP_SIDEBAND_TX_START_SEND 1 | |
419 | /* msg has finished transmitting on a slot - removed from msgq only in slot */ | |
420 | #define DRM_DP_SIDEBAND_TX_SENT 2 | |
421 | /* msg has received a response - removed from slot */ | |
422 | #define DRM_DP_SIDEBAND_TX_RX 3 | |
423 | #define DRM_DP_SIDEBAND_TX_TIMEOUT 4 | |
424 | ||
425 | struct drm_dp_sideband_msg_tx { | |
426 | u8 msg[256]; | |
427 | u8 chunk[48]; | |
428 | u8 cur_offset; | |
429 | u8 cur_len; | |
430 | struct drm_dp_mst_branch *dst; | |
431 | struct list_head next; | |
432 | int seqno; | |
433 | int state; | |
434 | bool path_msg; | |
435 | struct drm_dp_sideband_msg_reply_body reply; | |
436 | }; | |
437 | ||
438 | /* sideband msg handler */ | |
439 | struct drm_dp_mst_topology_mgr; | |
440 | struct drm_dp_mst_topology_cbs { | |
441 | /* create a connector for a port */ | |
12e6cecd | 442 | struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path); |
d9515c5e | 443 | void (*register_connector)(struct drm_connector *connector); |
ad7f8a1f DA |
444 | void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, |
445 | struct drm_connector *connector); | |
ad7f8a1f DA |
446 | }; |
447 | ||
448 | #define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) | |
449 | ||
450 | #define DP_PAYLOAD_LOCAL 1 | |
451 | #define DP_PAYLOAD_REMOTE 2 | |
452 | #define DP_PAYLOAD_DELETE_LOCAL 3 | |
453 | ||
454 | struct drm_dp_payload { | |
455 | int payload_state; | |
456 | int start_slot; | |
457 | int num_slots; | |
dfda0df3 | 458 | int vcpi; |
ad7f8a1f DA |
459 | }; |
460 | ||
a4370c77 VS |
461 | #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) |
462 | ||
eceae147 LP |
463 | struct drm_dp_vcpi_allocation { |
464 | struct drm_dp_mst_port *port; | |
465 | int vcpi; | |
466 | struct list_head next; | |
467 | }; | |
468 | ||
3f3353b7 | 469 | struct drm_dp_mst_topology_state { |
a4370c77 | 470 | struct drm_private_state base; |
eceae147 | 471 | struct list_head vcpis; |
3f3353b7 PD |
472 | struct drm_dp_mst_topology_mgr *mgr; |
473 | }; | |
474 | ||
a4370c77 VS |
475 | #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) |
476 | ||
ad7f8a1f DA |
477 | /** |
478 | * struct drm_dp_mst_topology_mgr - DisplayPort MST manager | |
ad7f8a1f DA |
479 | * |
480 | * This struct represents the toplevel displayport MST topology manager. | |
481 | * There should be one instance of this for every MST capable DP connector | |
482 | * on the GPU. | |
483 | */ | |
484 | struct drm_dp_mst_topology_mgr { | |
a4370c77 VS |
485 | /** |
486 | * @base: Base private object for atomic | |
487 | */ | |
488 | struct drm_private_obj base; | |
489 | ||
132d49d7 DV |
490 | /** |
491 | * @dev: device pointer for adding i2c devices etc. | |
492 | */ | |
7b0a89a6 | 493 | struct drm_device *dev; |
132d49d7 DV |
494 | /** |
495 | * @cbs: callbacks for connector addition and destruction. | |
496 | */ | |
69a0f89c | 497 | const struct drm_dp_mst_topology_cbs *cbs; |
132d49d7 DV |
498 | /** |
499 | * @max_dpcd_transaction_bytes: maximum number of bytes to read/write | |
500 | * in one go. | |
501 | */ | |
ad7f8a1f | 502 | int max_dpcd_transaction_bytes; |
132d49d7 DV |
503 | /** |
504 | * @aux: AUX channel for the DP MST connector this topolgy mgr is | |
505 | * controlling. | |
506 | */ | |
507 | struct drm_dp_aux *aux; | |
508 | /** | |
509 | * @max_payloads: maximum number of payloads the GPU can generate. | |
510 | */ | |
ad7f8a1f | 511 | int max_payloads; |
132d49d7 DV |
512 | /** |
513 | * @conn_base_id: DRM connector ID this mgr is connected to. Only used | |
514 | * to build the MST connector path value. | |
515 | */ | |
ad7f8a1f DA |
516 | int conn_base_id; |
517 | ||
132d49d7 | 518 | /** |
85783369 | 519 | * @down_rep_recv: Message receiver state for down replies. |
132d49d7 | 520 | */ |
ad7f8a1f | 521 | struct drm_dp_sideband_msg_rx down_rep_recv; |
132d49d7 | 522 | /** |
85783369 | 523 | * @up_req_recv: Message receiver state for up requests. |
132d49d7 | 524 | */ |
ad7f8a1f DA |
525 | struct drm_dp_sideband_msg_rx up_req_recv; |
526 | ||
132d49d7 DV |
527 | /** |
528 | * @lock: protects mst state, primary, dpcd. | |
529 | */ | |
530 | struct mutex lock; | |
ad7f8a1f | 531 | |
14692a36 LP |
532 | /** |
533 | * @probe_lock: Prevents @work and @up_req_work, the only writers of | |
534 | * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing | |
535 | * while they update the topology. | |
536 | */ | |
537 | struct mutex probe_lock; | |
538 | ||
132d49d7 DV |
539 | /** |
540 | * @mst_state: If this manager is enabled for an MST capable port. False | |
541 | * if no MST sink/branch devices is connected. | |
542 | */ | |
ad7f8a1f | 543 | bool mst_state; |
132d49d7 DV |
544 | /** |
545 | * @mst_primary: Pointer to the primary/first branch device. | |
546 | */ | |
ad7f8a1f | 547 | struct drm_dp_mst_branch *mst_primary; |
5e93b820 | 548 | |
132d49d7 DV |
549 | /** |
550 | * @dpcd: Cache of DPCD for primary port. | |
551 | */ | |
ad7f8a1f | 552 | u8 dpcd[DP_RECEIVER_CAP_SIZE]; |
132d49d7 DV |
553 | /** |
554 | * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. | |
555 | */ | |
ad7f8a1f | 556 | u8 sink_count; |
132d49d7 DV |
557 | /** |
558 | * @pbn_div: PBN to slots divisor. | |
559 | */ | |
ad7f8a1f | 560 | int pbn_div; |
a538d613 | 561 | |
3f3353b7 PD |
562 | /** |
563 | * @funcs: Atomic helper callbacks | |
564 | */ | |
565 | const struct drm_private_state_funcs *funcs; | |
566 | ||
132d49d7 | 567 | /** |
6806cdf9 DV |
568 | * @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and |
569 | * &drm_dp_sideband_msg_tx.state once they are queued | |
132d49d7 | 570 | */ |
ad7f8a1f | 571 | struct mutex qlock; |
132d49d7 DV |
572 | /** |
573 | * @tx_msg_downq: List of pending down replies. | |
574 | */ | |
ad7f8a1f | 575 | struct list_head tx_msg_downq; |
ad7f8a1f | 576 | |
132d49d7 DV |
577 | /** |
578 | * @payload_lock: Protect payload information. | |
579 | */ | |
ad7f8a1f | 580 | struct mutex payload_lock; |
132d49d7 DV |
581 | /** |
582 | * @proposed_vcpis: Array of pointers for the new VCPI allocation. The | |
6806cdf9 | 583 | * VCPI structure itself is &drm_dp_mst_port.vcpi. |
132d49d7 | 584 | */ |
ad7f8a1f | 585 | struct drm_dp_vcpi **proposed_vcpis; |
132d49d7 DV |
586 | /** |
587 | * @payloads: Array of payloads. | |
588 | */ | |
ad7f8a1f | 589 | struct drm_dp_payload *payloads; |
132d49d7 DV |
590 | /** |
591 | * @payload_mask: Elements of @payloads actually in use. Since | |
592 | * reallocation of active outputs isn't possible gaps can be created by | |
593 | * disabling outputs out of order compared to how they've been enabled. | |
594 | */ | |
ad7f8a1f | 595 | unsigned long payload_mask; |
132d49d7 DV |
596 | /** |
597 | * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis. | |
598 | */ | |
dfda0df3 | 599 | unsigned long vcpi_mask; |
ad7f8a1f | 600 | |
132d49d7 DV |
601 | /** |
602 | * @tx_waitq: Wait to queue stall for the tx worker. | |
603 | */ | |
ad7f8a1f | 604 | wait_queue_head_t tx_waitq; |
132d49d7 DV |
605 | /** |
606 | * @work: Probe work. | |
607 | */ | |
ad7f8a1f | 608 | struct work_struct work; |
132d49d7 DV |
609 | /** |
610 | * @tx_work: Sideband transmit worker. This can nest within the main | |
611 | * @work worker for each transaction @work launches. | |
612 | */ | |
ad7f8a1f | 613 | struct work_struct tx_work; |
6b8eeca6 | 614 | |
132d49d7 | 615 | /** |
7cb12d48 LP |
616 | * @destroy_port_list: List of to be destroyed connectors. |
617 | */ | |
618 | struct list_head destroy_port_list; | |
619 | /** | |
620 | * @destroy_branch_device_list: List of to be destroyed branch | |
621 | * devices. | |
132d49d7 | 622 | */ |
7cb12d48 | 623 | struct list_head destroy_branch_device_list; |
132d49d7 | 624 | /** |
7cb12d48 LP |
625 | * @delayed_destroy_lock: Protects @destroy_port_list and |
626 | * @destroy_branch_device_list. | |
132d49d7 | 627 | */ |
7cb12d48 | 628 | struct mutex delayed_destroy_lock; |
132d49d7 | 629 | /** |
7cb12d48 LP |
630 | * @delayed_destroy_work: Work item to destroy MST port and branch |
631 | * devices, needed to avoid locking inversion. | |
132d49d7 | 632 | */ |
7cb12d48 | 633 | struct work_struct delayed_destroy_work; |
9408cc94 LP |
634 | |
635 | /** | |
636 | * @up_req_list: List of pending up requests from the topology that | |
637 | * need to be processed, in chronological order. | |
638 | */ | |
639 | struct list_head up_req_list; | |
640 | /** | |
641 | * @up_req_lock: Protects @up_req_list | |
642 | */ | |
643 | struct mutex up_req_lock; | |
644 | /** | |
645 | * @up_req_work: Work item to process up requests received from the | |
646 | * topology. Needed to avoid blocking hotplug handling and sideband | |
647 | * transmissions. | |
648 | */ | |
649 | struct work_struct up_req_work; | |
ad7f8a1f DA |
650 | }; |
651 | ||
7b0a89a6 DP |
652 | int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, |
653 | struct drm_device *dev, struct drm_dp_aux *aux, | |
654 | int max_dpcd_transaction_bytes, | |
655 | int max_payloads, int conn_base_id); | |
ad7f8a1f DA |
656 | |
657 | void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); | |
658 | ||
659 | ||
660 | int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state); | |
661 | ||
662 | ||
663 | int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); | |
664 | ||
665 | ||
3f9b3f02 LP |
666 | int |
667 | drm_dp_mst_detect_port(struct drm_connector *connector, | |
668 | struct drm_modeset_acquire_ctx *ctx, | |
669 | struct drm_dp_mst_topology_mgr *mgr, | |
670 | struct drm_dp_mst_port *port); | |
ad7f8a1f | 671 | |
ef8f9bea LY |
672 | bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr, |
673 | struct drm_dp_mst_port *port); | |
ad7f8a1f DA |
674 | struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
675 | ||
676 | ||
677 | int drm_dp_calc_pbn_mode(int clock, int bpp); | |
678 | ||
679 | ||
1e797f55 PD |
680 | bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, |
681 | struct drm_dp_mst_port *port, int pbn, int slots); | |
ad7f8a1f | 682 | |
87f5942d DA |
683 | int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); |
684 | ||
ad7f8a1f DA |
685 | |
686 | void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); | |
687 | ||
688 | ||
689 | void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, | |
690 | struct drm_dp_mst_port *port); | |
691 | ||
692 | ||
693 | int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, | |
694 | int pbn); | |
695 | ||
696 | ||
697 | int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr); | |
698 | ||
699 | ||
700 | int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); | |
701 | ||
702 | int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); | |
703 | ||
704 | void drm_dp_mst_dump_topology(struct seq_file *m, | |
705 | struct drm_dp_mst_topology_mgr *mgr); | |
706 | ||
707 | void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr); | |
c235316d | 708 | int __must_check |
6f85f738 LP |
709 | drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, |
710 | bool sync); | |
562836a2 VS |
711 | |
712 | ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux, | |
713 | unsigned int offset, void *buffer, size_t size); | |
714 | ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux, | |
715 | unsigned int offset, void *buffer, size_t size); | |
716 | ||
717 | int drm_dp_mst_connector_late_register(struct drm_connector *connector, | |
718 | struct drm_dp_mst_port *port); | |
719 | void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, | |
720 | struct drm_dp_mst_port *port); | |
721 | ||
3f3353b7 PD |
722 | struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, |
723 | struct drm_dp_mst_topology_mgr *mgr); | |
eceae147 LP |
724 | int __must_check |
725 | drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, | |
726 | struct drm_dp_mst_topology_mgr *mgr, | |
727 | struct drm_dp_mst_port *port, int pbn); | |
728 | int __must_check | |
729 | drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, | |
730 | struct drm_dp_mst_topology_mgr *mgr, | |
731 | struct drm_dp_mst_port *port); | |
0bb9c2b2 DP |
732 | int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, |
733 | struct drm_dp_mst_port *port, bool power_up); | |
eceae147 | 734 | int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); |
3f3353b7 | 735 | |
ebcc0e6b LP |
736 | void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); |
737 | void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); | |
738 | ||
bea5c38f LP |
739 | extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; |
740 | ||
741 | /** | |
742 | * __drm_dp_mst_state_iter_get - private atomic state iterator function for | |
743 | * macro-internal use | |
744 | * @state: &struct drm_atomic_state pointer | |
745 | * @mgr: pointer to the &struct drm_dp_mst_topology_mgr iteration cursor | |
746 | * @old_state: optional pointer to the old &struct drm_dp_mst_topology_state | |
747 | * iteration cursor | |
748 | * @new_state: optional pointer to the new &struct drm_dp_mst_topology_state | |
749 | * iteration cursor | |
750 | * @i: int iteration cursor, for macro-internal use | |
751 | * | |
752 | * Used by for_each_oldnew_mst_mgr_in_state(), | |
753 | * for_each_old_mst_mgr_in_state(), and for_each_new_mst_mgr_in_state(). Don't | |
754 | * call this directly. | |
755 | * | |
756 | * Returns: | |
757 | * True if the current &struct drm_private_obj is a &struct | |
758 | * drm_dp_mst_topology_mgr, false otherwise. | |
759 | */ | |
760 | static inline bool | |
761 | __drm_dp_mst_state_iter_get(struct drm_atomic_state *state, | |
762 | struct drm_dp_mst_topology_mgr **mgr, | |
763 | struct drm_dp_mst_topology_state **old_state, | |
764 | struct drm_dp_mst_topology_state **new_state, | |
765 | int i) | |
766 | { | |
767 | struct __drm_private_objs_state *objs_state = &state->private_objs[i]; | |
768 | ||
769 | if (objs_state->ptr->funcs != &drm_dp_mst_topology_state_funcs) | |
770 | return false; | |
771 | ||
772 | *mgr = to_dp_mst_topology_mgr(objs_state->ptr); | |
773 | if (old_state) | |
774 | *old_state = to_dp_mst_topology_state(objs_state->old_state); | |
775 | if (new_state) | |
776 | *new_state = to_dp_mst_topology_state(objs_state->new_state); | |
777 | ||
778 | return true; | |
779 | } | |
780 | ||
781 | /** | |
782 | * for_each_oldnew_mst_mgr_in_state - iterate over all DP MST topology | |
783 | * managers in an atomic update | |
784 | * @__state: &struct drm_atomic_state pointer | |
785 | * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor | |
786 | * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old | |
787 | * state | |
788 | * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new | |
789 | * state | |
790 | * @__i: int iteration cursor, for macro-internal use | |
791 | * | |
792 | * This iterates over all DRM DP MST topology managers in an atomic update, | |
793 | * tracking both old and new state. This is useful in places where the state | |
794 | * delta needs to be considered, for example in atomic check functions. | |
795 | */ | |
796 | #define for_each_oldnew_mst_mgr_in_state(__state, mgr, old_state, new_state, __i) \ | |
797 | for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ | |
798 | for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), &(new_state), (__i))) | |
799 | ||
800 | /** | |
801 | * for_each_old_mst_mgr_in_state - iterate over all DP MST topology managers | |
802 | * in an atomic update | |
803 | * @__state: &struct drm_atomic_state pointer | |
804 | * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor | |
805 | * @old_state: &struct drm_dp_mst_topology_state iteration cursor for the old | |
806 | * state | |
807 | * @__i: int iteration cursor, for macro-internal use | |
808 | * | |
809 | * This iterates over all DRM DP MST topology managers in an atomic update, | |
810 | * tracking only the old state. This is useful in disable functions, where we | |
811 | * need the old state the hardware is still in. | |
812 | */ | |
813 | #define for_each_old_mst_mgr_in_state(__state, mgr, old_state, __i) \ | |
814 | for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ | |
815 | for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), &(old_state), NULL, (__i))) | |
816 | ||
817 | /** | |
818 | * for_each_new_mst_mgr_in_state - iterate over all DP MST topology managers | |
819 | * in an atomic update | |
820 | * @__state: &struct drm_atomic_state pointer | |
821 | * @mgr: &struct drm_dp_mst_topology_mgr iteration cursor | |
822 | * @new_state: &struct drm_dp_mst_topology_state iteration cursor for the new | |
823 | * state | |
824 | * @__i: int iteration cursor, for macro-internal use | |
825 | * | |
826 | * This iterates over all DRM DP MST topology managers in an atomic update, | |
827 | * tracking only the new state. This is useful in enable functions, where we | |
828 | * need the new state the hardware should be in when the atomic commit | |
829 | * operation has completed. | |
830 | */ | |
831 | #define for_each_new_mst_mgr_in_state(__state, mgr, new_state, __i) \ | |
832 | for ((__i) = 0; (__i) < (__state)->num_private_objs; (__i)++) \ | |
833 | for_each_if(__drm_dp_mst_state_iter_get((__state), &(mgr), NULL, &(new_state), (__i))) | |
834 | ||
ad7f8a1f | 835 | #endif |