2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/delay.h>
24 #include <linux/errno.h>
25 #include <linux/i2c.h>
26 #include <linux/init.h>
27 #include <linux/kernel.h>
28 #include <linux/sched.h>
29 #include <linux/seq_file.h>
31 #include <drm/drm_atomic.h>
32 #include <drm/drm_atomic_helper.h>
33 #include <drm/drm_dp_mst_helper.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_fixed.h>
36 #include <drm/drm_print.h>
37 #include <drm/drm_probe_helper.h>
42 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
43 * protocol. The helpers contain a topology manager and bandwidth manager.
44 * The helpers encapsulate the sending and received of sideband msgs.
46 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
48 static int test_calc_pbn_mode(void);
50 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
52 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
54 struct drm_dp_payload *payload);
56 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
57 struct drm_dp_mst_port *port,
58 int offset, int size, u8 *bytes);
60 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
61 struct drm_dp_mst_branch *mstb);
62 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
63 struct drm_dp_mst_branch *mstb,
64 struct drm_dp_mst_port *port);
65 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
68 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
69 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
70 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
72 #define DP_STR(x) [DP_ ## x] = #x
74 static const char *drm_dp_mst_req_type_str(u8 req_type)
76 static const char * const req_type_str[] = {
77 DP_STR(GET_MSG_TRANSACTION_VERSION),
79 DP_STR(CONNECTION_STATUS_NOTIFY),
80 DP_STR(ENUM_PATH_RESOURCES),
81 DP_STR(ALLOCATE_PAYLOAD),
82 DP_STR(QUERY_PAYLOAD),
83 DP_STR(RESOURCE_STATUS_NOTIFY),
84 DP_STR(CLEAR_PAYLOAD_ID_TABLE),
85 DP_STR(REMOTE_DPCD_READ),
86 DP_STR(REMOTE_DPCD_WRITE),
87 DP_STR(REMOTE_I2C_READ),
88 DP_STR(REMOTE_I2C_WRITE),
90 DP_STR(POWER_DOWN_PHY),
91 DP_STR(SINK_EVENT_NOTIFY),
92 DP_STR(QUERY_STREAM_ENC_STATUS),
95 if (req_type >= ARRAY_SIZE(req_type_str) ||
96 !req_type_str[req_type])
99 return req_type_str[req_type];
103 #define DP_STR(x) [DP_NAK_ ## x] = #x
105 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
107 static const char * const nak_reason_str[] = {
108 DP_STR(WRITE_FAILURE),
109 DP_STR(INVALID_READ),
113 DP_STR(LINK_FAILURE),
114 DP_STR(NO_RESOURCES),
117 DP_STR(ALLOCATE_FAIL),
120 if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
121 !nak_reason_str[nak_reason])
124 return nak_reason_str[nak_reason];
129 /* sideband msg handling */
130 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
135 int number_of_bits = num_nibbles * 4;
138 while (number_of_bits != 0) {
141 remainder |= (data[array_index] & bitmask) >> bitshift;
149 if ((remainder & 0x10) == 0x10)
154 while (number_of_bits != 0) {
157 if ((remainder & 0x10) != 0)
164 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
169 int number_of_bits = number_of_bytes * 8;
172 while (number_of_bits != 0) {
175 remainder |= (data[array_index] & bitmask) >> bitshift;
183 if ((remainder & 0x100) == 0x100)
188 while (number_of_bits != 0) {
191 if ((remainder & 0x100) != 0)
195 return remainder & 0xff;
197 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
200 size += (hdr->lct / 2);
204 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
210 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
211 for (i = 0; i < (hdr->lct / 2); i++)
212 buf[idx++] = hdr->rad[i];
213 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
214 (hdr->msg_len & 0x3f);
215 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
217 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
218 buf[idx - 1] |= (crc4 & 0xf);
223 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
224 u8 *buf, int buflen, u8 *hdrlen)
233 len += ((buf[0] & 0xf0) >> 4) / 2;
236 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
238 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
239 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
243 hdr->lct = (buf[0] & 0xf0) >> 4;
244 hdr->lcr = (buf[0] & 0xf);
246 for (i = 0; i < (hdr->lct / 2); i++)
247 hdr->rad[i] = buf[idx++];
248 hdr->broadcast = (buf[idx] >> 7) & 0x1;
249 hdr->path_msg = (buf[idx] >> 6) & 0x1;
250 hdr->msg_len = buf[idx] & 0x3f;
252 hdr->somt = (buf[idx] >> 7) & 0x1;
253 hdr->eomt = (buf[idx] >> 6) & 0x1;
254 hdr->seqno = (buf[idx] >> 4) & 0x1;
260 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
261 struct drm_dp_sideband_msg_tx *raw)
266 buf[idx++] = req->req_type & 0x7f;
268 switch (req->req_type) {
269 case DP_ENUM_PATH_RESOURCES:
270 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
273 case DP_ALLOCATE_PAYLOAD:
274 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
275 (req->u.allocate_payload.number_sdp_streams & 0xf);
277 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
279 buf[idx] = (req->u.allocate_payload.pbn >> 8);
281 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
283 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
284 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
285 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
288 if (req->u.allocate_payload.number_sdp_streams & 1) {
289 i = req->u.allocate_payload.number_sdp_streams - 1;
290 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
294 case DP_QUERY_PAYLOAD:
295 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
297 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
300 case DP_REMOTE_DPCD_READ:
301 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
302 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
304 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
306 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
308 buf[idx] = (req->u.dpcd_read.num_bytes);
312 case DP_REMOTE_DPCD_WRITE:
313 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
314 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
316 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
318 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
320 buf[idx] = (req->u.dpcd_write.num_bytes);
322 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
323 idx += req->u.dpcd_write.num_bytes;
325 case DP_REMOTE_I2C_READ:
326 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
327 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
329 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
330 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
332 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
334 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
335 idx += req->u.i2c_read.transactions[i].num_bytes;
337 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
338 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
341 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
343 buf[idx] = (req->u.i2c_read.num_bytes_read);
347 case DP_REMOTE_I2C_WRITE:
348 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
350 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
352 buf[idx] = (req->u.i2c_write.num_bytes);
354 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
355 idx += req->u.i2c_write.num_bytes;
358 case DP_POWER_DOWN_PHY:
359 case DP_POWER_UP_PHY:
360 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
367 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
370 crc4 = drm_dp_msg_data_crc4(msg, len);
374 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
375 struct drm_dp_sideband_msg_tx *raw)
380 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
385 /* this adds a chunk of msg to the builder to get the final msg */
386 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
387 u8 *replybuf, u8 replybuflen, bool hdr)
394 struct drm_dp_sideband_msg_hdr recv_hdr;
395 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
397 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
402 * ignore out-of-order messages or messages that are part of a
405 if (!recv_hdr.somt && !msg->have_somt)
408 /* get length contained in this portion */
409 msg->curchunk_len = recv_hdr.msg_len;
410 msg->curchunk_hdrlen = hdrlen;
412 /* we have already gotten an somt - don't bother parsing */
413 if (recv_hdr.somt && msg->have_somt)
417 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
418 msg->have_somt = true;
421 msg->have_eomt = true;
423 /* copy the bytes for the remainder of this header chunk */
424 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
425 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
427 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
428 msg->curchunk_idx += replybuflen;
431 if (msg->curchunk_idx >= msg->curchunk_len) {
433 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
434 /* copy chunk into bigger msg */
435 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
436 msg->curlen += msg->curchunk_len - 1;
441 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
442 struct drm_dp_sideband_msg_reply_body *repmsg)
446 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
448 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
450 if (idx > raw->curlen)
452 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
453 if (raw->msg[idx] & 0x80)
454 repmsg->u.link_addr.ports[i].input_port = 1;
456 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
457 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
460 if (idx > raw->curlen)
462 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
463 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
464 if (repmsg->u.link_addr.ports[i].input_port == 0)
465 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
467 if (idx > raw->curlen)
469 if (repmsg->u.link_addr.ports[i].input_port == 0) {
470 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
472 if (idx > raw->curlen)
474 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
476 if (idx > raw->curlen)
478 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
479 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
483 if (idx > raw->curlen)
489 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
493 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
494 struct drm_dp_sideband_msg_reply_body *repmsg)
497 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
499 if (idx > raw->curlen)
501 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
503 if (idx > raw->curlen)
506 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
509 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
513 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
514 struct drm_dp_sideband_msg_reply_body *repmsg)
517 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
519 if (idx > raw->curlen)
523 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
527 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
528 struct drm_dp_sideband_msg_reply_body *repmsg)
532 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
534 if (idx > raw->curlen)
536 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
539 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
542 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
546 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
547 struct drm_dp_sideband_msg_reply_body *repmsg)
550 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
552 if (idx > raw->curlen)
554 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
556 if (idx > raw->curlen)
558 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
560 if (idx > raw->curlen)
564 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
568 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
569 struct drm_dp_sideband_msg_reply_body *repmsg)
572 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
574 if (idx > raw->curlen)
576 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
578 if (idx > raw->curlen)
580 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
582 if (idx > raw->curlen)
586 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
590 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
591 struct drm_dp_sideband_msg_reply_body *repmsg)
594 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
596 if (idx > raw->curlen)
598 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
600 if (idx > raw->curlen)
604 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
608 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
609 struct drm_dp_sideband_msg_reply_body *repmsg)
613 repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
615 if (idx > raw->curlen) {
616 DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
623 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
624 struct drm_dp_sideband_msg_reply_body *msg)
626 memset(msg, 0, sizeof(*msg));
627 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
628 msg->req_type = (raw->msg[0] & 0x7f);
630 if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
631 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
632 msg->u.nak.reason = raw->msg[17];
633 msg->u.nak.nak_data = raw->msg[18];
637 switch (msg->req_type) {
638 case DP_LINK_ADDRESS:
639 return drm_dp_sideband_parse_link_address(raw, msg);
640 case DP_QUERY_PAYLOAD:
641 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
642 case DP_REMOTE_DPCD_READ:
643 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
644 case DP_REMOTE_DPCD_WRITE:
645 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
646 case DP_REMOTE_I2C_READ:
647 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
648 case DP_ENUM_PATH_RESOURCES:
649 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
650 case DP_ALLOCATE_PAYLOAD:
651 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
652 case DP_POWER_DOWN_PHY:
653 case DP_POWER_UP_PHY:
654 return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
656 DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
657 drm_dp_mst_req_type_str(msg->req_type));
662 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
663 struct drm_dp_sideband_msg_req_body *msg)
667 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
669 if (idx > raw->curlen)
672 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
674 if (idx > raw->curlen)
677 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
678 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
679 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
680 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
681 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
685 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
689 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
690 struct drm_dp_sideband_msg_req_body *msg)
694 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
696 if (idx > raw->curlen)
699 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
701 if (idx > raw->curlen)
704 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
708 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
712 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
713 struct drm_dp_sideband_msg_req_body *msg)
715 memset(msg, 0, sizeof(*msg));
716 msg->req_type = (raw->msg[0] & 0x7f);
718 switch (msg->req_type) {
719 case DP_CONNECTION_STATUS_NOTIFY:
720 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
721 case DP_RESOURCE_STATUS_NOTIFY:
722 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
724 DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
725 drm_dp_mst_req_type_str(msg->req_type));
730 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
732 struct drm_dp_sideband_msg_req_body req;
734 req.req_type = DP_REMOTE_DPCD_WRITE;
735 req.u.dpcd_write.port_number = port_num;
736 req.u.dpcd_write.dpcd_address = offset;
737 req.u.dpcd_write.num_bytes = num_bytes;
738 req.u.dpcd_write.bytes = bytes;
739 drm_dp_encode_sideband_req(&req, msg);
744 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
746 struct drm_dp_sideband_msg_req_body req;
748 req.req_type = DP_LINK_ADDRESS;
749 drm_dp_encode_sideband_req(&req, msg);
753 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
755 struct drm_dp_sideband_msg_req_body req;
757 req.req_type = DP_ENUM_PATH_RESOURCES;
758 req.u.port_num.port_number = port_num;
759 drm_dp_encode_sideband_req(&req, msg);
760 msg->path_msg = true;
764 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
765 u8 vcpi, uint16_t pbn,
766 u8 number_sdp_streams,
769 struct drm_dp_sideband_msg_req_body req;
770 memset(&req, 0, sizeof(req));
771 req.req_type = DP_ALLOCATE_PAYLOAD;
772 req.u.allocate_payload.port_number = port_num;
773 req.u.allocate_payload.vcpi = vcpi;
774 req.u.allocate_payload.pbn = pbn;
775 req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
776 memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
778 drm_dp_encode_sideband_req(&req, msg);
779 msg->path_msg = true;
783 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
784 int port_num, bool power_up)
786 struct drm_dp_sideband_msg_req_body req;
789 req.req_type = DP_POWER_UP_PHY;
791 req.req_type = DP_POWER_DOWN_PHY;
793 req.u.port_num.port_number = port_num;
794 drm_dp_encode_sideband_req(&req, msg);
795 msg->path_msg = true;
799 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
800 struct drm_dp_vcpi *vcpi)
804 mutex_lock(&mgr->payload_lock);
805 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
806 if (ret > mgr->max_payloads) {
808 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
812 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
813 if (vcpi_ret > mgr->max_payloads) {
815 DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
819 set_bit(ret, &mgr->payload_mask);
820 set_bit(vcpi_ret, &mgr->vcpi_mask);
821 vcpi->vcpi = vcpi_ret + 1;
822 mgr->proposed_vcpis[ret - 1] = vcpi;
824 mutex_unlock(&mgr->payload_lock);
828 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
835 mutex_lock(&mgr->payload_lock);
836 DRM_DEBUG_KMS("putting payload %d\n", vcpi);
837 clear_bit(vcpi - 1, &mgr->vcpi_mask);
839 for (i = 0; i < mgr->max_payloads; i++) {
840 if (mgr->proposed_vcpis[i])
841 if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
842 mgr->proposed_vcpis[i] = NULL;
843 clear_bit(i + 1, &mgr->payload_mask);
846 mutex_unlock(&mgr->payload_lock);
849 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
850 struct drm_dp_sideband_msg_tx *txmsg)
855 * All updates to txmsg->state are protected by mgr->qlock, and the two
856 * cases we check here are terminal states. For those the barriers
857 * provided by the wake_up/wait_event pair are enough.
859 state = READ_ONCE(txmsg->state);
860 return (state == DRM_DP_SIDEBAND_TX_RX ||
861 state == DRM_DP_SIDEBAND_TX_TIMEOUT);
864 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
865 struct drm_dp_sideband_msg_tx *txmsg)
867 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
870 ret = wait_event_timeout(mgr->tx_waitq,
871 check_txmsg_state(mgr, txmsg),
873 mutex_lock(&mstb->mgr->qlock);
875 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
880 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
882 /* dump some state */
886 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
887 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
888 list_del(&txmsg->next);
891 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
892 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
893 mstb->tx_slots[txmsg->seqno] = NULL;
897 mutex_unlock(&mgr->qlock);
902 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
904 struct drm_dp_mst_branch *mstb;
906 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
912 memcpy(mstb->rad, rad, lct / 2);
913 INIT_LIST_HEAD(&mstb->ports);
914 kref_init(&mstb->topology_kref);
915 kref_init(&mstb->malloc_kref);
919 static void drm_dp_free_mst_branch_device(struct kref *kref)
921 struct drm_dp_mst_branch *mstb =
922 container_of(kref, struct drm_dp_mst_branch, malloc_kref);
924 if (mstb->port_parent)
925 drm_dp_mst_put_port_malloc(mstb->port_parent);
931 * DOC: Branch device and port refcounting
933 * Topology refcount overview
934 * ~~~~~~~~~~~~~~~~~~~~~~~~~~
936 * The refcounting schemes for &struct drm_dp_mst_branch and &struct
937 * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
938 * two different kinds of refcounts: topology refcounts, and malloc refcounts.
940 * Topology refcounts are not exposed to drivers, and are handled internally
941 * by the DP MST helpers. The helpers use them in order to prevent the
942 * in-memory topology state from being changed in the middle of critical
943 * operations like changing the internal state of payload allocations. This
944 * means each branch and port will be considered to be connected to the rest
945 * of the topology until its topology refcount reaches zero. Additionally,
946 * for ports this means that their associated &struct drm_connector will stay
947 * registered with userspace until the port's refcount reaches 0.
949 * Malloc refcount overview
950 * ~~~~~~~~~~~~~~~~~~~~~~~~
952 * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
953 * drm_dp_mst_branch allocated even after all of its topology references have
954 * been dropped, so that the driver or MST helpers can safely access each
955 * branch's last known state before it was disconnected from the topology.
956 * When the malloc refcount of a port or branch reaches 0, the memory
957 * allocation containing the &struct drm_dp_mst_branch or &struct
958 * drm_dp_mst_port respectively will be freed.
960 * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
961 * to drivers. As of writing this documentation, there are no drivers that
962 * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
963 * helpers. Exposing this API to drivers in a race-free manner would take more
964 * tweaking of the refcounting scheme, however patches are welcome provided
965 * there is a legitimate driver usecase for this.
967 * Refcount relationships in a topology
968 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
970 * Let's take a look at why the relationship between topology and malloc
971 * refcounts is designed the way it is.
973 * .. kernel-figure:: dp-mst/topology-figure-1.dot
975 * An example of topology and malloc refs in a DP MST topology with two
976 * active payloads. Topology refcount increments are indicated by solid
977 * lines, and malloc refcount increments are indicated by dashed lines.
978 * Each starts from the branch which incremented the refcount, and ends at
979 * the branch to which the refcount belongs to, i.e. the arrow points the
980 * same way as the C pointers used to reference a structure.
982 * As you can see in the above figure, every branch increments the topology
983 * refcount of its children, and increments the malloc refcount of its
984 * parent. Additionally, every payload increments the malloc refcount of its
985 * assigned port by 1.
987 * So, what would happen if MSTB #3 from the above figure was unplugged from
988 * the system, but the driver hadn't yet removed payload #2 from port #3? The
989 * topology would start to look like the figure below.
991 * .. kernel-figure:: dp-mst/topology-figure-2.dot
993 * Ports and branch devices which have been released from memory are
994 * colored grey, and references which have been removed are colored red.
996 * Whenever a port or branch device's topology refcount reaches zero, it will
997 * decrement the topology refcounts of all its children, the malloc refcount
998 * of its parent, and finally its own malloc refcount. For MSTB #4 and port
999 * #4, this means they both have been disconnected from the topology and freed
1000 * from memory. But, because payload #2 is still holding a reference to port
1001 * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
1002 * is still accessible from memory. This also means port #3 has not yet
1003 * decremented the malloc refcount of MSTB #3, so its &struct
1004 * drm_dp_mst_branch will also stay allocated in memory until port #3's
1005 * malloc refcount reaches 0.
1007 * This relationship is necessary because in order to release payload #2, we
1008 * need to be able to figure out the last relative of port #3 that's still
1009 * connected to the topology. In this case, we would travel up the topology as
1012 * .. kernel-figure:: dp-mst/topology-figure-3.dot
1014 * And finally, remove payload #2 by communicating with port #2 through
1015 * sideband transactions.
1019 * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
1021 * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
1023 * Increments &drm_dp_mst_branch.malloc_kref. When
1024 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1025 * will be released and @mstb may no longer be used.
1027 * See also: drm_dp_mst_put_mstb_malloc()
1030 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
1032 kref_get(&mstb->malloc_kref);
1033 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
1037 * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
1039 * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
1041 * Decrements &drm_dp_mst_branch.malloc_kref. When
1042 * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
1043 * will be released and @mstb may no longer be used.
1045 * See also: drm_dp_mst_get_mstb_malloc()
1048 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
1050 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
1051 kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
1054 static void drm_dp_free_mst_port(struct kref *kref)
1056 struct drm_dp_mst_port *port =
1057 container_of(kref, struct drm_dp_mst_port, malloc_kref);
1059 drm_dp_mst_put_mstb_malloc(port->parent);
1064 * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
1065 * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
1067 * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1068 * reaches 0, the memory allocation for @port will be released and @port may
1069 * no longer be used.
1071 * Because @port could potentially be freed at any time by the DP MST helpers
1072 * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
1073 * function, drivers that which to make use of &struct drm_dp_mst_port should
1074 * ensure that they grab at least one main malloc reference to their MST ports
1075 * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
1076 * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
1078 * See also: drm_dp_mst_put_port_malloc()
1081 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
1083 kref_get(&port->malloc_kref);
1084 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
1086 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
1089 * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
1090 * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
1092 * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
1093 * reaches 0, the memory allocation for @port will be released and @port may
1094 * no longer be used.
1096 * See also: drm_dp_mst_get_port_malloc()
1099 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
1101 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
1102 kref_put(&port->malloc_kref, drm_dp_free_mst_port);
1104 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
1106 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
1108 struct drm_dp_mst_branch *mstb =
1109 container_of(kref, struct drm_dp_mst_branch, topology_kref);
1110 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
1111 struct drm_dp_mst_port *port, *tmp;
1112 bool wake_tx = false;
1114 mutex_lock(&mgr->lock);
1115 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
1116 list_del(&port->next);
1117 drm_dp_mst_topology_put_port(port);
1119 mutex_unlock(&mgr->lock);
1121 /* drop any tx slots msg */
1122 mutex_lock(&mstb->mgr->qlock);
1123 if (mstb->tx_slots[0]) {
1124 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1125 mstb->tx_slots[0] = NULL;
1128 if (mstb->tx_slots[1]) {
1129 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1130 mstb->tx_slots[1] = NULL;
1133 mutex_unlock(&mstb->mgr->qlock);
1136 wake_up_all(&mstb->mgr->tx_waitq);
1138 drm_dp_mst_put_mstb_malloc(mstb);
1142 * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
1143 * branch device unless it's zero
1144 * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
1146 * Attempts to grab a topology reference to @mstb, if it hasn't yet been
1147 * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
1148 * reached 0). Holding a topology reference implies that a malloc reference
1149 * will be held to @mstb as long as the user holds the topology reference.
1151 * Care should be taken to ensure that the user has at least one malloc
1152 * reference to @mstb. If you already have a topology reference to @mstb, you
1153 * should use drm_dp_mst_topology_get_mstb() instead.
1156 * drm_dp_mst_topology_get_mstb()
1157 * drm_dp_mst_topology_put_mstb()
1160 * * 1: A topology reference was grabbed successfully
1161 * * 0: @port is no longer in the topology, no reference was grabbed
1163 static int __must_check
1164 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
1166 int ret = kref_get_unless_zero(&mstb->topology_kref);
1169 DRM_DEBUG("mstb %p (%d)\n", mstb,
1170 kref_read(&mstb->topology_kref));
1176 * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
1178 * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
1180 * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
1181 * not it's already reached 0. This is only valid to use in scenarios where
1182 * you are already guaranteed to have at least one active topology reference
1183 * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
1186 * drm_dp_mst_topology_try_get_mstb()
1187 * drm_dp_mst_topology_put_mstb()
1189 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
1191 WARN_ON(kref_read(&mstb->topology_kref) == 0);
1192 kref_get(&mstb->topology_kref);
1193 DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
1197 * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
1199 * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
1201 * Releases a topology reference from @mstb by decrementing
1202 * &drm_dp_mst_branch.topology_kref.
1205 * drm_dp_mst_topology_try_get_mstb()
1206 * drm_dp_mst_topology_get_mstb()
1209 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
1211 DRM_DEBUG("mstb %p (%d)\n",
1212 mstb, kref_read(&mstb->topology_kref) - 1);
1213 kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
1216 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
1218 struct drm_dp_mst_branch *mstb;
1221 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1222 case DP_PEER_DEVICE_SST_SINK:
1223 /* remove i2c over sideband */
1224 drm_dp_mst_unregister_i2c_bus(&port->aux);
1226 case DP_PEER_DEVICE_MST_BRANCHING:
1229 drm_dp_mst_topology_put_mstb(mstb);
1234 static void drm_dp_destroy_port(struct kref *kref)
1236 struct drm_dp_mst_port *port =
1237 container_of(kref, struct drm_dp_mst_port, topology_kref);
1238 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
1241 kfree(port->cached_edid);
1244 * The only time we don't have a connector
1245 * on an output port is if the connector init
1248 if (port->connector) {
1249 /* we can't destroy the connector here, as
1250 * we might be holding the mode_config.mutex
1251 * from an EDID retrieval */
1253 mutex_lock(&mgr->destroy_connector_lock);
1254 list_add(&port->next, &mgr->destroy_connector_list);
1255 mutex_unlock(&mgr->destroy_connector_lock);
1256 schedule_work(&mgr->destroy_connector_work);
1259 /* no need to clean up vcpi
1260 * as if we have no connector we never setup a vcpi */
1261 drm_dp_port_teardown_pdt(port, port->pdt);
1262 port->pdt = DP_PEER_DEVICE_NONE;
1264 drm_dp_mst_put_port_malloc(port);
1268 * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
1269 * port unless it's zero
1270 * @port: &struct drm_dp_mst_port to increment the topology refcount of
1272 * Attempts to grab a topology reference to @port, if it hasn't yet been
1273 * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
1274 * 0). Holding a topology reference implies that a malloc reference will be
1275 * held to @port as long as the user holds the topology reference.
1277 * Care should be taken to ensure that the user has at least one malloc
1278 * reference to @port. If you already have a topology reference to @port, you
1279 * should use drm_dp_mst_topology_get_port() instead.
1282 * drm_dp_mst_topology_get_port()
1283 * drm_dp_mst_topology_put_port()
1286 * * 1: A topology reference was grabbed successfully
1287 * * 0: @port is no longer in the topology, no reference was grabbed
1289 static int __must_check
1290 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
1292 int ret = kref_get_unless_zero(&port->topology_kref);
1295 DRM_DEBUG("port %p (%d)\n", port,
1296 kref_read(&port->topology_kref));
1302 * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
1303 * @port: The &struct drm_dp_mst_port to increment the topology refcount of
1305 * Increments &drm_dp_mst_port.topology_refcount without checking whether or
1306 * not it's already reached 0. This is only valid to use in scenarios where
1307 * you are already guaranteed to have at least one active topology reference
1308 * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
1311 * drm_dp_mst_topology_try_get_port()
1312 * drm_dp_mst_topology_put_port()
1314 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
1316 WARN_ON(kref_read(&port->topology_kref) == 0);
1317 kref_get(&port->topology_kref);
1318 DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
1322 * drm_dp_mst_topology_put_port() - release a topology reference to a port
1323 * @port: The &struct drm_dp_mst_port to release the topology reference from
1325 * Releases a topology reference from @port by decrementing
1326 * &drm_dp_mst_port.topology_kref.
1329 * drm_dp_mst_topology_try_get_port()
1330 * drm_dp_mst_topology_get_port()
1332 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
1334 DRM_DEBUG("port %p (%d)\n",
1335 port, kref_read(&port->topology_kref) - 1);
1336 kref_put(&port->topology_kref, drm_dp_destroy_port);
1339 static struct drm_dp_mst_branch *
1340 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
1341 struct drm_dp_mst_branch *to_find)
1343 struct drm_dp_mst_port *port;
1344 struct drm_dp_mst_branch *rmstb;
1346 if (to_find == mstb)
1349 list_for_each_entry(port, &mstb->ports, next) {
1351 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1352 port->mstb, to_find);
1360 static struct drm_dp_mst_branch *
1361 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
1362 struct drm_dp_mst_branch *mstb)
1364 struct drm_dp_mst_branch *rmstb = NULL;
1366 mutex_lock(&mgr->lock);
1367 if (mgr->mst_primary) {
1368 rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
1369 mgr->mst_primary, mstb);
1371 if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
1374 mutex_unlock(&mgr->lock);
1378 static struct drm_dp_mst_port *
1379 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
1380 struct drm_dp_mst_port *to_find)
1382 struct drm_dp_mst_port *port, *mport;
1384 list_for_each_entry(port, &mstb->ports, next) {
1385 if (port == to_find)
1389 mport = drm_dp_mst_topology_get_port_validated_locked(
1390 port->mstb, to_find);
1398 static struct drm_dp_mst_port *
1399 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
1400 struct drm_dp_mst_port *port)
1402 struct drm_dp_mst_port *rport = NULL;
1404 mutex_lock(&mgr->lock);
1405 if (mgr->mst_primary) {
1406 rport = drm_dp_mst_topology_get_port_validated_locked(
1407 mgr->mst_primary, port);
1409 if (rport && !drm_dp_mst_topology_try_get_port(rport))
1412 mutex_unlock(&mgr->lock);
1416 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
1418 struct drm_dp_mst_port *port;
1421 list_for_each_entry(port, &mstb->ports, next) {
1422 if (port->port_num == port_num) {
1423 ret = drm_dp_mst_topology_try_get_port(port);
1424 return ret ? port : NULL;
1432 * calculate a new RAD for this MST branch device
1433 * if parent has an LCT of 2 then it has 1 nibble of RAD,
1434 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
1436 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
1439 int parent_lct = port->parent->lct;
1441 int idx = (parent_lct - 1) / 2;
1442 if (parent_lct > 1) {
1443 memcpy(rad, port->parent->rad, idx + 1);
1444 shift = (parent_lct % 2) ? 4 : 0;
1448 rad[idx] |= port->port_num << shift;
1449 return parent_lct + 1;
1453 * return sends link address for new mstb
1455 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
1459 bool send_link = false;
1460 switch (port->pdt) {
1461 case DP_PEER_DEVICE_DP_LEGACY_CONV:
1462 case DP_PEER_DEVICE_SST_SINK:
1463 /* add i2c over sideband */
1464 ret = drm_dp_mst_register_i2c_bus(&port->aux);
1466 case DP_PEER_DEVICE_MST_BRANCHING:
1467 lct = drm_dp_calculate_rad(port, rad);
1469 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
1471 port->mstb->mgr = port->mgr;
1472 port->mstb->port_parent = port;
1474 * Make sure this port's memory allocation stays
1475 * around until its child MSTB releases it
1477 drm_dp_mst_get_port_malloc(port);
1486 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
1490 memcpy(mstb->guid, guid, 16);
1492 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
1493 if (mstb->port_parent) {
1494 ret = drm_dp_send_dpcd_write(
1502 ret = drm_dp_dpcd_write(
1511 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
1514 size_t proppath_size)
1518 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1519 for (i = 0; i < (mstb->lct - 1); i++) {
1520 int shift = (i % 2) ? 0 : 4;
1521 int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
1522 snprintf(temp, sizeof(temp), "-%d", port_num);
1523 strlcat(proppath, temp, proppath_size);
1525 snprintf(temp, sizeof(temp), "-%d", pnum);
1526 strlcat(proppath, temp, proppath_size);
1529 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1530 struct drm_device *dev,
1531 struct drm_dp_link_addr_reply_port *port_msg)
1533 struct drm_dp_mst_port *port;
1535 bool created = false;
1539 port = drm_dp_get_port(mstb, port_msg->port_number);
1541 port = kzalloc(sizeof(*port), GFP_KERNEL);
1544 kref_init(&port->topology_kref);
1545 kref_init(&port->malloc_kref);
1546 port->parent = mstb;
1547 port->port_num = port_msg->port_number;
1548 port->mgr = mstb->mgr;
1549 port->aux.name = "DPMST";
1550 port->aux.dev = dev->dev;
1553 * Make sure the memory allocation for our parent branch stays
1554 * around until our own memory allocation is released
1556 drm_dp_mst_get_mstb_malloc(mstb);
1560 old_pdt = port->pdt;
1561 old_ddps = port->ddps;
1564 port->pdt = port_msg->peer_device_type;
1565 port->input = port_msg->input_port;
1566 port->mcs = port_msg->mcs;
1567 port->ddps = port_msg->ddps;
1568 port->ldps = port_msg->legacy_device_plug_status;
1569 port->dpcd_rev = port_msg->dpcd_revision;
1570 port->num_sdp_streams = port_msg->num_sdp_streams;
1571 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1573 /* manage mstb port lists with mgr lock - take a reference
1576 mutex_lock(&mstb->mgr->lock);
1577 drm_dp_mst_topology_get_port(port);
1578 list_add(&port->next, &mstb->ports);
1579 mutex_unlock(&mstb->mgr->lock);
1582 if (old_ddps != port->ddps) {
1585 drm_dp_send_enum_path_resources(mstb->mgr,
1589 port->available_pbn = 0;
1593 if (old_pdt != port->pdt && !port->input) {
1594 drm_dp_port_teardown_pdt(port, old_pdt);
1596 ret = drm_dp_port_setup_pdt(port);
1598 drm_dp_send_link_address(mstb->mgr, port->mstb);
1601 if (created && !port->input) {
1604 build_mst_prop_path(mstb, port->port_num, proppath,
1606 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
1609 if (!port->connector) {
1610 /* remove it from the port list */
1611 mutex_lock(&mstb->mgr->lock);
1612 list_del(&port->next);
1613 mutex_unlock(&mstb->mgr->lock);
1614 /* drop port list reference */
1615 drm_dp_mst_topology_put_port(port);
1618 if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
1619 port->pdt == DP_PEER_DEVICE_SST_SINK) &&
1620 port->port_num >= DP_MST_LOGICAL_PORT_0) {
1621 port->cached_edid = drm_get_edid(port->connector,
1623 drm_connector_set_tile_property(port->connector);
1625 (*mstb->mgr->cbs->register_connector)(port->connector);
1629 /* put reference to this port */
1630 drm_dp_mst_topology_put_port(port);
1633 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1634 struct drm_dp_connection_status_notify *conn_stat)
1636 struct drm_dp_mst_port *port;
1639 bool dowork = false;
1640 port = drm_dp_get_port(mstb, conn_stat->port_number);
1644 old_ddps = port->ddps;
1645 old_pdt = port->pdt;
1646 port->pdt = conn_stat->peer_device_type;
1647 port->mcs = conn_stat->message_capability_status;
1648 port->ldps = conn_stat->legacy_device_plug_status;
1649 port->ddps = conn_stat->displayport_device_plug_status;
1651 if (old_ddps != port->ddps) {
1655 port->available_pbn = 0;
1658 if (old_pdt != port->pdt && !port->input) {
1659 drm_dp_port_teardown_pdt(port, old_pdt);
1661 if (drm_dp_port_setup_pdt(port))
1665 drm_dp_mst_topology_put_port(port);
1667 queue_work(system_long_wq, &mstb->mgr->work);
1671 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1674 struct drm_dp_mst_branch *mstb;
1675 struct drm_dp_mst_port *port;
1677 /* find the port by iterating down */
1679 mutex_lock(&mgr->lock);
1680 mstb = mgr->mst_primary;
1685 for (i = 0; i < lct - 1; i++) {
1686 int shift = (i % 2) ? 0 : 4;
1687 int port_num = (rad[i / 2] >> shift) & 0xf;
1689 list_for_each_entry(port, &mstb->ports, next) {
1690 if (port->port_num == port_num) {
1693 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1701 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1705 mutex_unlock(&mgr->lock);
1709 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
1710 struct drm_dp_mst_branch *mstb,
1713 struct drm_dp_mst_branch *found_mstb;
1714 struct drm_dp_mst_port *port;
1716 if (memcmp(mstb->guid, guid, 16) == 0)
1720 list_for_each_entry(port, &mstb->ports, next) {
1724 found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
1733 static struct drm_dp_mst_branch *
1734 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
1737 struct drm_dp_mst_branch *mstb;
1740 /* find the port by iterating down */
1741 mutex_lock(&mgr->lock);
1743 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
1745 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1750 mutex_unlock(&mgr->lock);
1754 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1755 struct drm_dp_mst_branch *mstb)
1757 struct drm_dp_mst_port *port;
1758 struct drm_dp_mst_branch *mstb_child;
1759 if (!mstb->link_address_sent)
1760 drm_dp_send_link_address(mgr, mstb);
1762 list_for_each_entry(port, &mstb->ports, next) {
1769 if (!port->available_pbn)
1770 drm_dp_send_enum_path_resources(mgr, mstb, port);
1773 mstb_child = drm_dp_mst_topology_get_mstb_validated(
1776 drm_dp_check_and_send_link_address(mgr, mstb_child);
1777 drm_dp_mst_topology_put_mstb(mstb_child);
1783 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1785 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1786 struct drm_dp_mst_branch *mstb;
1789 mutex_lock(&mgr->lock);
1790 mstb = mgr->mst_primary;
1792 ret = drm_dp_mst_topology_try_get_mstb(mstb);
1796 mutex_unlock(&mgr->lock);
1798 drm_dp_check_and_send_link_address(mgr, mstb);
1799 drm_dp_mst_topology_put_mstb(mstb);
1803 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1808 if (memchr_inv(guid, 0, 16))
1811 salt = get_jiffies_64();
1813 memcpy(&guid[0], &salt, sizeof(u64));
1814 memcpy(&guid[8], &salt, sizeof(u64));
1820 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1822 struct drm_dp_sideband_msg_req_body req;
1824 req.req_type = DP_REMOTE_DPCD_READ;
1825 req.u.dpcd_read.port_number = port_num;
1826 req.u.dpcd_read.dpcd_address = offset;
1827 req.u.dpcd_read.num_bytes = num_bytes;
1828 drm_dp_encode_sideband_req(&req, msg);
1834 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1835 bool up, u8 *msg, int len)
1838 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1839 int tosend, total, offset;
1846 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1848 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1851 if (ret != tosend) {
1852 if (ret == -EIO && retries < 5) {
1856 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1862 } while (total > 0);
1866 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1867 struct drm_dp_sideband_msg_tx *txmsg)
1869 struct drm_dp_mst_branch *mstb = txmsg->dst;
1872 /* both msg slots are full */
1873 if (txmsg->seqno == -1) {
1874 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1875 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1878 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1879 txmsg->seqno = mstb->last_seqno;
1880 mstb->last_seqno ^= 1;
1881 } else if (mstb->tx_slots[0] == NULL)
1885 mstb->tx_slots[txmsg->seqno] = txmsg;
1888 req_type = txmsg->msg[0] & 0x7f;
1889 if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
1890 req_type == DP_RESOURCE_STATUS_NOTIFY)
1894 hdr->path_msg = txmsg->path_msg;
1895 hdr->lct = mstb->lct;
1896 hdr->lcr = mstb->lct - 1;
1898 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1899 hdr->seqno = txmsg->seqno;
1903 * process a single block of the next message in the sideband queue
1905 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1906 struct drm_dp_sideband_msg_tx *txmsg,
1910 struct drm_dp_sideband_msg_hdr hdr;
1911 int len, space, idx, tosend;
1914 memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
1916 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1918 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1921 /* make hdr from dst mst - for replies use seqno
1922 otherwise assign one */
1923 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1927 /* amount left to send in this message */
1928 len = txmsg->cur_len - txmsg->cur_offset;
1930 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1931 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1933 tosend = min(len, space);
1934 if (len == txmsg->cur_len)
1940 hdr.msg_len = tosend + 1;
1941 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1942 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1943 /* add crc at end */
1944 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1947 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1949 DRM_DEBUG_KMS("sideband msg failed to send\n");
1953 txmsg->cur_offset += tosend;
1954 if (txmsg->cur_offset == txmsg->cur_len) {
1955 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1961 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1963 struct drm_dp_sideband_msg_tx *txmsg;
1966 WARN_ON(!mutex_is_locked(&mgr->qlock));
1968 /* construct a chunk from the first msg in the tx_msg queue */
1969 if (list_empty(&mgr->tx_msg_downq))
1972 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1973 ret = process_single_tx_qlock(mgr, txmsg, false);
1975 /* txmsg is sent it should be in the slots now */
1976 list_del(&txmsg->next);
1978 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1979 list_del(&txmsg->next);
1980 if (txmsg->seqno != -1)
1981 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1982 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1983 wake_up_all(&mgr->tx_waitq);
1987 /* called holding qlock */
1988 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1989 struct drm_dp_sideband_msg_tx *txmsg)
1993 /* construct a chunk from the first msg in the tx_msg queue */
1994 ret = process_single_tx_qlock(mgr, txmsg, true);
1997 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1999 if (txmsg->seqno != -1) {
2000 WARN_ON((unsigned int)txmsg->seqno >
2001 ARRAY_SIZE(txmsg->dst->tx_slots));
2002 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
2006 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
2007 struct drm_dp_sideband_msg_tx *txmsg)
2009 mutex_lock(&mgr->qlock);
2010 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
2011 if (list_is_singular(&mgr->tx_msg_downq))
2012 process_single_down_tx_qlock(mgr);
2013 mutex_unlock(&mgr->qlock);
2016 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
2017 struct drm_dp_mst_branch *mstb)
2020 struct drm_dp_sideband_msg_tx *txmsg;
2023 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2028 len = build_link_address(txmsg);
2030 mstb->link_address_sent = true;
2031 drm_dp_queue_down_tx(mgr, txmsg);
2033 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2037 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2038 DRM_DEBUG_KMS("link address nak received\n");
2040 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
2041 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2042 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
2043 txmsg->reply.u.link_addr.ports[i].input_port,
2044 txmsg->reply.u.link_addr.ports[i].peer_device_type,
2045 txmsg->reply.u.link_addr.ports[i].port_number,
2046 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
2047 txmsg->reply.u.link_addr.ports[i].mcs,
2048 txmsg->reply.u.link_addr.ports[i].ddps,
2049 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
2050 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
2051 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
2054 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
2056 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
2057 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
2059 drm_kms_helper_hotplug_event(mgr->dev);
2062 mstb->link_address_sent = false;
2063 DRM_DEBUG_KMS("link address failed %d\n", ret);
2069 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
2070 struct drm_dp_mst_branch *mstb,
2071 struct drm_dp_mst_port *port)
2074 struct drm_dp_sideband_msg_tx *txmsg;
2077 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2082 len = build_enum_path_resources(txmsg, port->port_num);
2084 drm_dp_queue_down_tx(mgr, txmsg);
2086 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2088 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
2089 DRM_DEBUG_KMS("enum path resources nak received\n");
2091 if (port->port_num != txmsg->reply.u.path_resources.port_number)
2092 DRM_ERROR("got incorrect port in response\n");
2093 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
2094 txmsg->reply.u.path_resources.avail_payload_bw_number);
2095 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
2103 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
2105 if (!mstb->port_parent)
2108 if (mstb->port_parent->mstb != mstb)
2109 return mstb->port_parent;
2111 return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
2115 * Searches upwards in the topology starting from mstb to try to find the
2116 * closest available parent of mstb that's still connected to the rest of the
2117 * topology. This can be used in order to perform operations like releasing
2118 * payloads, where the branch device which owned the payload may no longer be
2119 * around and thus would require that the payload on the last living relative
2122 static struct drm_dp_mst_branch *
2123 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
2124 struct drm_dp_mst_branch *mstb,
2127 struct drm_dp_mst_branch *rmstb = NULL;
2128 struct drm_dp_mst_port *found_port;
2130 mutex_lock(&mgr->lock);
2131 if (!mgr->mst_primary)
2135 found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
2139 if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
2140 rmstb = found_port->parent;
2141 *port_num = found_port->port_num;
2143 /* Search again, starting from this parent */
2144 mstb = found_port->parent;
2148 mutex_unlock(&mgr->lock);
2152 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
2153 struct drm_dp_mst_port *port,
2157 struct drm_dp_sideband_msg_tx *txmsg;
2158 struct drm_dp_mst_branch *mstb;
2159 int len, ret, port_num;
2160 u8 sinks[DRM_DP_MAX_SDP_STREAMS];
2163 port_num = port->port_num;
2164 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2166 mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
2174 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2180 for (i = 0; i < port->num_sdp_streams; i++)
2184 len = build_allocate_payload(txmsg, port_num,
2186 pbn, port->num_sdp_streams, sinks);
2188 drm_dp_queue_down_tx(mgr, txmsg);
2191 * FIXME: there is a small chance that between getting the last
2192 * connected mstb and sending the payload message, the last connected
2193 * mstb could also be removed from the topology. In the future, this
2194 * needs to be fixed by restarting the
2195 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
2196 * timeout if the topology is still connected to the system.
2198 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2200 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2207 drm_dp_mst_topology_put_mstb(mstb);
2211 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
2212 struct drm_dp_mst_port *port, bool power_up)
2214 struct drm_dp_sideband_msg_tx *txmsg;
2217 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2221 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2223 drm_dp_mst_topology_put_port(port);
2227 txmsg->dst = port->parent;
2228 len = build_power_updown_phy(txmsg, port->port_num, power_up);
2229 drm_dp_queue_down_tx(mgr, txmsg);
2231 ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
2233 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2239 drm_dp_mst_topology_put_port(port);
2243 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
2245 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2247 struct drm_dp_payload *payload)
2251 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
2253 payload->payload_state = 0;
2256 payload->payload_state = DP_PAYLOAD_LOCAL;
2260 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2261 struct drm_dp_mst_port *port,
2263 struct drm_dp_payload *payload)
2266 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
2269 payload->payload_state = DP_PAYLOAD_REMOTE;
2273 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
2274 struct drm_dp_mst_port *port,
2276 struct drm_dp_payload *payload)
2278 DRM_DEBUG_KMS("\n");
2279 /* it's okay for these to fail */
2281 drm_dp_payload_send_msg(mgr, port, id, 0);
2284 drm_dp_dpcd_write_payload(mgr, id, payload);
2285 payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
2289 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
2291 struct drm_dp_payload *payload)
2293 payload->payload_state = 0;
2298 * drm_dp_update_payload_part1() - Execute payload update part 1
2299 * @mgr: manager to use.
2301 * This iterates over all proposed virtual channels, and tries to
2302 * allocate space in the link for them. For 0->slots transitions,
2303 * this step just writes the VCPI to the MST device. For slots->0
2304 * transitions, this writes the updated VCPIs and removes the
2305 * remote VC payloads.
2307 * after calling this the driver should generate ACT and payload
2310 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
2312 struct drm_dp_payload req_payload;
2313 struct drm_dp_mst_port *port;
2317 mutex_lock(&mgr->payload_lock);
2318 for (i = 0; i < mgr->max_payloads; i++) {
2319 struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
2320 struct drm_dp_payload *payload = &mgr->payloads[i];
2321 bool put_port = false;
2323 /* solve the current payloads - compare to the hw ones
2324 - update the hw view */
2325 req_payload.start_slot = cur_slots;
2327 port = container_of(vcpi, struct drm_dp_mst_port,
2330 /* Validated ports don't matter if we're releasing
2333 if (vcpi->num_slots) {
2334 port = drm_dp_mst_topology_get_port_validated(
2337 mutex_unlock(&mgr->payload_lock);
2343 req_payload.num_slots = vcpi->num_slots;
2344 req_payload.vcpi = vcpi->vcpi;
2347 req_payload.num_slots = 0;
2350 payload->start_slot = req_payload.start_slot;
2351 /* work out what is required to happen with this payload */
2352 if (payload->num_slots != req_payload.num_slots) {
2354 /* need to push an update for this payload */
2355 if (req_payload.num_slots) {
2356 drm_dp_create_payload_step1(mgr, vcpi->vcpi,
2358 payload->num_slots = req_payload.num_slots;
2359 payload->vcpi = req_payload.vcpi;
2361 } else if (payload->num_slots) {
2362 payload->num_slots = 0;
2363 drm_dp_destroy_payload_step1(mgr, port,
2366 req_payload.payload_state =
2367 payload->payload_state;
2368 payload->start_slot = 0;
2370 payload->payload_state = req_payload.payload_state;
2372 cur_slots += req_payload.num_slots;
2375 drm_dp_mst_topology_put_port(port);
2378 for (i = 0; i < mgr->max_payloads; i++) {
2379 if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
2382 DRM_DEBUG_KMS("removing payload %d\n", i);
2383 for (j = i; j < mgr->max_payloads - 1; j++) {
2384 mgr->payloads[j] = mgr->payloads[j + 1];
2385 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
2387 if (mgr->proposed_vcpis[j] &&
2388 mgr->proposed_vcpis[j]->num_slots) {
2389 set_bit(j + 1, &mgr->payload_mask);
2391 clear_bit(j + 1, &mgr->payload_mask);
2395 memset(&mgr->payloads[mgr->max_payloads - 1], 0,
2396 sizeof(struct drm_dp_payload));
2397 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
2398 clear_bit(mgr->max_payloads, &mgr->payload_mask);
2400 mutex_unlock(&mgr->payload_lock);
2404 EXPORT_SYMBOL(drm_dp_update_payload_part1);
2407 * drm_dp_update_payload_part2() - Execute payload update part 2
2408 * @mgr: manager to use.
2410 * This iterates over all proposed virtual channels, and tries to
2411 * allocate space in the link for them. For 0->slots transitions,
2412 * this step writes the remote VC payload commands. For slots->0
2413 * this just resets some internal state.
2415 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
2417 struct drm_dp_mst_port *port;
2420 mutex_lock(&mgr->payload_lock);
2421 for (i = 0; i < mgr->max_payloads; i++) {
2423 if (!mgr->proposed_vcpis[i])
2426 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2428 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
2429 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
2430 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2431 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
2432 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
2435 mutex_unlock(&mgr->payload_lock);
2439 mutex_unlock(&mgr->payload_lock);
2442 EXPORT_SYMBOL(drm_dp_update_payload_part2);
2444 #if 0 /* unused as of yet */
2445 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
2446 struct drm_dp_mst_port *port,
2447 int offset, int size)
2450 struct drm_dp_sideband_msg_tx *txmsg;
2452 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2456 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
2457 txmsg->dst = port->parent;
2459 drm_dp_queue_down_tx(mgr, txmsg);
2465 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
2466 struct drm_dp_mst_port *port,
2467 int offset, int size, u8 *bytes)
2471 struct drm_dp_sideband_msg_tx *txmsg;
2472 struct drm_dp_mst_branch *mstb;
2474 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
2478 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2484 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
2487 drm_dp_queue_down_tx(mgr, txmsg);
2489 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2491 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2498 drm_dp_mst_topology_put_mstb(mstb);
2502 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
2504 struct drm_dp_sideband_msg_reply_body reply;
2506 reply.reply_type = DP_SIDEBAND_REPLY_ACK;
2507 reply.req_type = req_type;
2508 drm_dp_encode_sideband_reply(&reply, msg);
2512 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
2513 struct drm_dp_mst_branch *mstb,
2514 int req_type, int seqno, bool broadcast)
2516 struct drm_dp_sideband_msg_tx *txmsg;
2518 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2523 txmsg->seqno = seqno;
2524 drm_dp_encode_up_ack_reply(txmsg, req_type);
2526 mutex_lock(&mgr->qlock);
2528 process_single_up_tx_qlock(mgr, txmsg);
2530 mutex_unlock(&mgr->qlock);
2536 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
2540 switch (dp_link_bw) {
2542 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
2543 dp_link_bw, dp_link_count);
2546 case DP_LINK_BW_1_62:
2547 *out = 3 * dp_link_count;
2549 case DP_LINK_BW_2_7:
2550 *out = 5 * dp_link_count;
2552 case DP_LINK_BW_5_4:
2553 *out = 10 * dp_link_count;
2555 case DP_LINK_BW_8_1:
2556 *out = 15 * dp_link_count;
2563 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
2564 * @mgr: manager to set state for
2565 * @mst_state: true to enable MST on this connector - false to disable.
2567 * This is called by the driver when it detects an MST capable device plugged
2568 * into a DP MST capable port, or when a DP MST capable device is unplugged.
2570 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
2573 struct drm_dp_mst_branch *mstb = NULL;
2575 mutex_lock(&mgr->lock);
2576 if (mst_state == mgr->mst_state)
2579 mgr->mst_state = mst_state;
2580 /* set the device into MST mode */
2582 WARN_ON(mgr->mst_primary);
2585 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2586 if (ret != DP_RECEIVER_CAP_SIZE) {
2587 DRM_DEBUG_KMS("failed to read DPCD\n");
2591 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
2592 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
2598 /* add initial branch device at LCT 1 */
2599 mstb = drm_dp_add_mst_branch_device(1, NULL);
2606 /* give this the main reference */
2607 mgr->mst_primary = mstb;
2608 drm_dp_mst_topology_get_mstb(mgr->mst_primary);
2610 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2611 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2617 struct drm_dp_payload reset_pay;
2618 reset_pay.start_slot = 0;
2619 reset_pay.num_slots = 0x3f;
2620 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
2623 queue_work(system_long_wq, &mgr->work);
2627 /* disable MST on the device */
2628 mstb = mgr->mst_primary;
2629 mgr->mst_primary = NULL;
2630 /* this can fail if the device is gone */
2631 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
2633 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
2634 mgr->payload_mask = 0;
2635 set_bit(0, &mgr->payload_mask);
2640 mutex_unlock(&mgr->lock);
2642 drm_dp_mst_topology_put_mstb(mstb);
2646 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
2649 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
2650 * @mgr: manager to suspend
2652 * This function tells the MST device that we can't handle UP messages
2653 * anymore. This should stop it from sending any since we are suspended.
2655 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
2657 mutex_lock(&mgr->lock);
2658 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2659 DP_MST_EN | DP_UPSTREAM_IS_SRC);
2660 mutex_unlock(&mgr->lock);
2661 flush_work(&mgr->work);
2662 flush_work(&mgr->destroy_connector_work);
2664 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
2667 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
2668 * @mgr: manager to resume
2670 * This will fetch DPCD and see if the device is still there,
2671 * if it is, it will rewrite the MSTM control bits, and return.
2673 * if the device fails this returns -1, and the driver should do
2674 * a full MST reprobe, in case we were undocked.
2676 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
2680 mutex_lock(&mgr->lock);
2682 if (mgr->mst_primary) {
2686 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
2687 if (sret != DP_RECEIVER_CAP_SIZE) {
2688 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2693 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
2694 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
2696 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
2701 /* Some hubs forget their guids after they resume */
2702 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
2704 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
2708 drm_dp_check_mstb_guid(mgr->mst_primary, guid);
2715 mutex_unlock(&mgr->lock);
2718 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
2720 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
2724 int replylen, origlen, curreply;
2726 struct drm_dp_sideband_msg_rx *msg;
2727 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
2728 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
2730 len = min(mgr->max_dpcd_transaction_bytes, 16);
2731 ret = drm_dp_dpcd_read(mgr->aux, basereg,
2734 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
2737 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
2739 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
2742 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
2747 while (replylen > 0) {
2748 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
2749 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
2752 DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
2757 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
2759 DRM_DEBUG_KMS("failed to build sideband msg\n");
2769 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
2773 if (!drm_dp_get_one_sb_msg(mgr, false)) {
2774 memset(&mgr->down_rep_recv, 0,
2775 sizeof(struct drm_dp_sideband_msg_rx));
2779 if (mgr->down_rep_recv.have_eomt) {
2780 struct drm_dp_sideband_msg_tx *txmsg;
2781 struct drm_dp_mst_branch *mstb;
2783 mstb = drm_dp_get_mst_branch_device(mgr,
2784 mgr->down_rep_recv.initial_hdr.lct,
2785 mgr->down_rep_recv.initial_hdr.rad);
2788 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
2789 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2793 /* find the message */
2794 slot = mgr->down_rep_recv.initial_hdr.seqno;
2795 mutex_lock(&mgr->qlock);
2796 txmsg = mstb->tx_slots[slot];
2797 /* remove from slots */
2798 mutex_unlock(&mgr->qlock);
2801 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2803 mgr->down_rep_recv.initial_hdr.seqno,
2804 mgr->down_rep_recv.initial_hdr.lct,
2805 mgr->down_rep_recv.initial_hdr.rad[0],
2806 mgr->down_rep_recv.msg[0]);
2807 drm_dp_mst_topology_put_mstb(mstb);
2808 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2812 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2814 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
2815 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
2816 txmsg->reply.req_type,
2817 drm_dp_mst_req_type_str(txmsg->reply.req_type),
2818 txmsg->reply.u.nak.reason,
2819 drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
2820 txmsg->reply.u.nak.nak_data);
2822 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2823 drm_dp_mst_topology_put_mstb(mstb);
2825 mutex_lock(&mgr->qlock);
2826 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2827 mstb->tx_slots[slot] = NULL;
2828 mutex_unlock(&mgr->qlock);
2830 wake_up_all(&mgr->tx_waitq);
2835 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2839 if (!drm_dp_get_one_sb_msg(mgr, true)) {
2840 memset(&mgr->up_req_recv, 0,
2841 sizeof(struct drm_dp_sideband_msg_rx));
2845 if (mgr->up_req_recv.have_eomt) {
2846 struct drm_dp_sideband_msg_req_body msg;
2847 struct drm_dp_mst_branch *mstb = NULL;
2850 if (!mgr->up_req_recv.initial_hdr.broadcast) {
2851 mstb = drm_dp_get_mst_branch_device(mgr,
2852 mgr->up_req_recv.initial_hdr.lct,
2853 mgr->up_req_recv.initial_hdr.rad);
2855 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2856 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2861 seqno = mgr->up_req_recv.initial_hdr.seqno;
2862 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2864 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2865 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2868 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
2871 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2872 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2876 drm_dp_update_port(mstb, &msg.u.conn_stat);
2878 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2879 drm_kms_helper_hotplug_event(mgr->dev);
2881 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2882 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2884 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
2887 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2888 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2892 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2896 drm_dp_mst_topology_put_mstb(mstb);
2898 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2904 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2905 * @mgr: manager to notify irq for.
2906 * @esi: 4 bytes from SINK_COUNT_ESI
2907 * @handled: whether the hpd interrupt was consumed or not
2909 * This should be called from the driver when it detects a short IRQ,
2910 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2911 * topology manager will process the sideband messages received as a result
2914 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2921 if (sc != mgr->sink_count) {
2922 mgr->sink_count = sc;
2926 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2927 ret = drm_dp_mst_handle_down_rep(mgr);
2931 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2932 ret |= drm_dp_mst_handle_up_req(mgr);
2936 drm_dp_mst_kick_tx(mgr);
2939 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2942 * drm_dp_mst_detect_port() - get connection status for an MST port
2943 * @connector: DRM connector for this port
2944 * @mgr: manager for this port
2945 * @port: unverified pointer to a port
2947 * This returns the current connection state for a port. It validates the
2948 * port pointer still exists so the caller doesn't require a reference
2950 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2951 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2953 enum drm_connector_status status = connector_status_disconnected;
2955 /* we need to search for the port in the mgr in case it's gone */
2956 port = drm_dp_mst_topology_get_port_validated(mgr, port);
2958 return connector_status_disconnected;
2963 switch (port->pdt) {
2964 case DP_PEER_DEVICE_NONE:
2965 case DP_PEER_DEVICE_MST_BRANCHING:
2968 case DP_PEER_DEVICE_SST_SINK:
2969 status = connector_status_connected;
2970 /* for logical ports - cache the EDID */
2971 if (port->port_num >= 8 && !port->cached_edid) {
2972 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2975 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2977 status = connector_status_connected;
2981 drm_dp_mst_topology_put_port(port);
2984 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2987 * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
2988 * @mgr: manager for this port
2989 * @port: unverified pointer to a port.
2991 * This returns whether the port supports audio or not.
2993 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
2994 struct drm_dp_mst_port *port)
2998 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3001 ret = port->has_audio;
3002 drm_dp_mst_topology_put_port(port);
3005 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
3008 * drm_dp_mst_get_edid() - get EDID for an MST port
3009 * @connector: toplevel connector to get EDID for
3010 * @mgr: manager for this port
3011 * @port: unverified pointer to a port.
3013 * This returns an EDID for the port connected to a connector,
3014 * It validates the pointer still exists so the caller doesn't require a
3017 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3019 struct edid *edid = NULL;
3021 /* we need to search for the port in the mgr in case it's gone */
3022 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3026 if (port->cached_edid)
3027 edid = drm_edid_duplicate(port->cached_edid);
3029 edid = drm_get_edid(connector, &port->aux.ddc);
3031 port->has_audio = drm_detect_monitor_audio(edid);
3032 drm_dp_mst_topology_put_port(port);
3035 EXPORT_SYMBOL(drm_dp_mst_get_edid);
3038 * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
3039 * @mgr: manager to use
3040 * @pbn: payload bandwidth to convert into slots.
3042 * Calculate the number of VCPI slots that will be required for the given PBN
3043 * value. This function is deprecated, and should not be used in atomic
3047 * The total slots required for this port, or error.
3049 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
3054 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3056 /* max. time slots - one slot for MTP header */
3061 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
3063 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3064 struct drm_dp_vcpi *vcpi, int pbn, int slots)
3068 /* max. time slots - one slot for MTP header */
3073 vcpi->aligned_pbn = slots * mgr->pbn_div;
3074 vcpi->num_slots = slots;
3076 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
3083 * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
3084 * @state: global atomic state
3085 * @mgr: MST topology manager for the port
3086 * @port: port to find vcpi slots for
3087 * @pbn: bandwidth required for the mode in PBN
3089 * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
3090 * may have had. Any atomic drivers which support MST must call this function
3091 * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
3092 * current VCPI allocation for the new state, but only when
3093 * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
3094 * to ensure compatibility with userspace applications that still use the
3095 * legacy modesetting UAPI.
3097 * Allocations set by this function are not checked against the bandwidth
3098 * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
3100 * Additionally, it is OK to call this function multiple times on the same
3101 * @port as needed. It is not OK however, to call this function and
3102 * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
3105 * drm_dp_atomic_release_vcpi_slots()
3106 * drm_dp_mst_atomic_check()
3109 * Total slots in the atomic state assigned for this port, or a negative error
3110 * code if the port no longer exists
3112 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
3113 struct drm_dp_mst_topology_mgr *mgr,
3114 struct drm_dp_mst_port *port, int pbn)
3116 struct drm_dp_mst_topology_state *topology_state;
3117 struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
3118 int prev_slots, req_slots, ret;
3120 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3121 if (IS_ERR(topology_state))
3122 return PTR_ERR(topology_state);
3124 /* Find the current allocation for this port, if any */
3125 list_for_each_entry(pos, &topology_state->vcpis, next) {
3126 if (pos->port == port) {
3128 prev_slots = vcpi->vcpi;
3131 * This should never happen, unless the driver tries
3132 * releasing and allocating the same VCPI allocation,
3135 if (WARN_ON(!prev_slots)) {
3136 DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
3147 req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
3149 DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
3150 port->connector->base.id, port->connector->name,
3151 port, prev_slots, req_slots);
3153 /* Add the new allocation to the state */
3155 vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
3159 drm_dp_mst_get_port_malloc(port);
3161 list_add(&vcpi->next, &topology_state->vcpis);
3163 vcpi->vcpi = req_slots;
3168 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
3171 * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
3172 * @state: global atomic state
3173 * @mgr: MST topology manager for the port
3174 * @port: The port to release the VCPI slots from
3176 * Releases any VCPI slots that have been allocated to a port in the atomic
3177 * state. Any atomic drivers which support MST must call this function in
3178 * their &drm_connector_helper_funcs.atomic_check() callback when the
3179 * connector will no longer have VCPI allocated (e.g. because its CRTC was
3180 * removed) when it had VCPI allocated in the previous atomic state.
3182 * It is OK to call this even if @port has been removed from the system.
3183 * Additionally, it is OK to call this function multiple times on the same
3184 * @port as needed. It is not OK however, to call this function and
3185 * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
3189 * drm_dp_atomic_find_vcpi_slots()
3190 * drm_dp_mst_atomic_check()
3193 * 0 if all slots for this port were added back to
3194 * &drm_dp_mst_topology_state.avail_slots or negative error code
3196 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
3197 struct drm_dp_mst_topology_mgr *mgr,
3198 struct drm_dp_mst_port *port)
3200 struct drm_dp_mst_topology_state *topology_state;
3201 struct drm_dp_vcpi_allocation *pos;
3204 topology_state = drm_atomic_get_mst_topology_state(state, mgr);
3205 if (IS_ERR(topology_state))
3206 return PTR_ERR(topology_state);
3208 list_for_each_entry(pos, &topology_state->vcpis, next) {
3209 if (pos->port == port) {
3214 if (WARN_ON(!found)) {
3215 DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
3216 port, &topology_state->base);
3220 DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
3222 drm_dp_mst_put_port_malloc(port);
3228 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
3231 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
3232 * @mgr: manager for this port
3233 * @port: port to allocate a virtual channel for.
3234 * @pbn: payload bandwidth number to request
3235 * @slots: returned number of slots for this PBN.
3237 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3238 struct drm_dp_mst_port *port, int pbn, int slots)
3242 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3249 if (port->vcpi.vcpi > 0) {
3250 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
3251 port->vcpi.vcpi, port->vcpi.pbn, pbn);
3252 if (pbn == port->vcpi.pbn) {
3253 drm_dp_mst_topology_put_port(port);
3258 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
3260 DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
3261 DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
3264 DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
3265 pbn, port->vcpi.num_slots);
3267 /* Keep port allocated until its payload has been removed */
3268 drm_dp_mst_get_port_malloc(port);
3269 drm_dp_mst_topology_put_port(port);
3274 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
3276 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3279 port = drm_dp_mst_topology_get_port_validated(mgr, port);
3283 slots = port->vcpi.num_slots;
3284 drm_dp_mst_topology_put_port(port);
3287 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
3290 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
3291 * @mgr: manager for this port
3292 * @port: unverified pointer to a port.
3294 * This just resets the number of slots for the ports VCPI for later programming.
3296 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
3299 * A port with VCPI will remain allocated until its VCPI is
3300 * released, no verified ref needed
3303 port->vcpi.num_slots = 0;
3305 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
3308 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
3309 * @mgr: manager for this port
3310 * @port: port to deallocate vcpi for
3312 * This can be called unconditionally, regardless of whether
3313 * drm_dp_mst_allocate_vcpi() succeeded or not.
3315 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
3316 struct drm_dp_mst_port *port)
3318 if (!port->vcpi.vcpi)
3321 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3322 port->vcpi.num_slots = 0;
3324 port->vcpi.aligned_pbn = 0;
3325 port->vcpi.vcpi = 0;
3326 drm_dp_mst_put_port_malloc(port);
3328 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
3330 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
3331 int id, struct drm_dp_payload *payload)
3333 u8 payload_alloc[3], status;
3337 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
3338 DP_PAYLOAD_TABLE_UPDATED);
3340 payload_alloc[0] = id;
3341 payload_alloc[1] = payload->start_slot;
3342 payload_alloc[2] = payload->num_slots;
3344 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
3346 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
3351 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3353 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3357 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
3360 usleep_range(10000, 20000);
3363 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
3374 * drm_dp_check_act_status() - Check ACT handled status.
3375 * @mgr: manager to use
3377 * Check the payload status bits in the DPCD for ACT handled completion.
3379 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
3386 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
3389 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
3393 if (status & DP_PAYLOAD_ACT_HANDLED)
3398 } while (count < 30);
3400 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
3401 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
3409 EXPORT_SYMBOL(drm_dp_check_act_status);
3412 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
3413 * @clock: dot clock for the mode
3414 * @bpp: bpp for the mode.
3416 * This uses the formula in the spec to calculate the PBN value for a mode.
3418 int drm_dp_calc_pbn_mode(int clock, int bpp)
3428 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3429 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3430 * common multiplier to render an integer PBN for all link rate/lane
3431 * counts combinations
3433 * peak_kbps *= (1006/1000)
3434 * peak_kbps *= (64/54)
3435 * peak_kbps *= 8 convert to bytes
3438 numerator = 64 * 1006;
3439 denominator = 54 * 8 * 1000 * 1000;
3442 peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3444 return drm_fixp2int_ceil(peak_kbps);
3446 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3448 static int test_calc_pbn_mode(void)
3451 ret = drm_dp_calc_pbn_mode(154000, 30);
3453 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3454 154000, 30, 689, ret);
3457 ret = drm_dp_calc_pbn_mode(234000, 30);
3459 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3460 234000, 30, 1047, ret);
3463 ret = drm_dp_calc_pbn_mode(297000, 24);
3465 DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3466 297000, 24, 1063, ret);
3472 /* we want to kick the TX after we've ack the up/down IRQs. */
3473 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
3475 queue_work(system_long_wq, &mgr->tx_work);
3478 static void drm_dp_mst_dump_mstb(struct seq_file *m,
3479 struct drm_dp_mst_branch *mstb)
3481 struct drm_dp_mst_port *port;
3482 int tabs = mstb->lct;
3486 for (i = 0; i < tabs; i++)
3490 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
3491 list_for_each_entry(port, &mstb->ports, next) {
3492 seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
3494 drm_dp_mst_dump_mstb(m, port->mstb);
3498 #define DP_PAYLOAD_TABLE_SIZE 64
3500 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
3505 for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
3506 if (drm_dp_dpcd_read(mgr->aux,
3507 DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
3514 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
3515 struct drm_dp_mst_port *port, char *name,
3518 struct edid *mst_edid;
3520 mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
3521 drm_edid_get_monitor_name(mst_edid, name, namelen);
3525 * drm_dp_mst_dump_topology(): dump topology to seq file.
3526 * @m: seq_file to dump output to
3527 * @mgr: manager to dump current topology for.
3529 * helper to dump MST topology to a seq file for debugfs.
3531 void drm_dp_mst_dump_topology(struct seq_file *m,
3532 struct drm_dp_mst_topology_mgr *mgr)
3535 struct drm_dp_mst_port *port;
3537 mutex_lock(&mgr->lock);
3538 if (mgr->mst_primary)
3539 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
3542 mutex_unlock(&mgr->lock);
3544 mutex_lock(&mgr->payload_lock);
3545 seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
3548 for (i = 0; i < mgr->max_payloads; i++) {
3549 if (mgr->proposed_vcpis[i]) {
3552 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
3553 fetch_monitor_name(mgr, port, name, sizeof(name));
3554 seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
3555 port->port_num, port->vcpi.vcpi,
3556 port->vcpi.num_slots,
3557 (*name != 0) ? name : "Unknown");
3559 seq_printf(m, "vcpi %d:unused\n", i);
3561 for (i = 0; i < mgr->max_payloads; i++) {
3562 seq_printf(m, "payload %d: %d, %d, %d\n",
3564 mgr->payloads[i].payload_state,
3565 mgr->payloads[i].start_slot,
3566 mgr->payloads[i].num_slots);
3570 mutex_unlock(&mgr->payload_lock);
3572 mutex_lock(&mgr->lock);
3573 if (mgr->mst_primary) {
3574 u8 buf[DP_PAYLOAD_TABLE_SIZE];
3577 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
3578 seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
3579 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
3580 seq_printf(m, "faux/mst: %*ph\n", 2, buf);
3581 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
3582 seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
3584 /* dump the standard OUI branch header */
3585 ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
3586 seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
3587 for (i = 0x3; i < 0x8 && buf[i]; i++)
3588 seq_printf(m, "%c", buf[i]);
3589 seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
3590 buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
3591 if (dump_dp_payload_table(mgr, buf))
3592 seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
3595 mutex_unlock(&mgr->lock);
3598 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
3600 static void drm_dp_tx_work(struct work_struct *work)
3602 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
3604 mutex_lock(&mgr->qlock);
3605 if (!list_empty(&mgr->tx_msg_downq))
3606 process_single_down_tx_qlock(mgr);
3607 mutex_unlock(&mgr->qlock);
3610 static void drm_dp_destroy_connector_work(struct work_struct *work)
3612 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3613 struct drm_dp_mst_port *port;
3614 bool send_hotplug = false;
3616 * Not a regular list traverse as we have to drop the destroy
3617 * connector lock before destroying the connector, to avoid AB->BA
3618 * ordering between this lock and the config mutex.
3621 mutex_lock(&mgr->destroy_connector_lock);
3622 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
3624 mutex_unlock(&mgr->destroy_connector_lock);
3627 list_del(&port->next);
3628 mutex_unlock(&mgr->destroy_connector_lock);
3630 INIT_LIST_HEAD(&port->next);
3632 mgr->cbs->destroy_connector(mgr, port->connector);
3634 drm_dp_port_teardown_pdt(port, port->pdt);
3635 port->pdt = DP_PEER_DEVICE_NONE;
3637 drm_dp_mst_put_port_malloc(port);
3638 send_hotplug = true;
3641 drm_kms_helper_hotplug_event(mgr->dev);
3644 static struct drm_private_state *
3645 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
3647 struct drm_dp_mst_topology_state *state, *old_state =
3648 to_dp_mst_topology_state(obj->state);
3649 struct drm_dp_vcpi_allocation *pos, *vcpi;
3651 state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
3655 __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
3657 INIT_LIST_HEAD(&state->vcpis);
3659 list_for_each_entry(pos, &old_state->vcpis, next) {
3660 /* Prune leftover freed VCPI allocations */
3664 vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
3668 drm_dp_mst_get_port_malloc(vcpi->port);
3669 list_add(&vcpi->next, &state->vcpis);
3672 return &state->base;
3675 list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
3676 drm_dp_mst_put_port_malloc(pos->port);
3684 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
3685 struct drm_private_state *state)
3687 struct drm_dp_mst_topology_state *mst_state =
3688 to_dp_mst_topology_state(state);
3689 struct drm_dp_vcpi_allocation *pos, *tmp;
3691 list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
3692 /* We only keep references to ports with non-zero VCPIs */
3694 drm_dp_mst_put_port_malloc(pos->port);
3702 drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
3703 struct drm_dp_mst_topology_state *mst_state)
3705 struct drm_dp_vcpi_allocation *vcpi;
3706 int avail_slots = 63, payload_count = 0;
3708 list_for_each_entry(vcpi, &mst_state->vcpis, next) {
3709 /* Releasing VCPI is always OK-even if the port is gone */
3711 DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
3716 DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
3717 vcpi->port, vcpi->vcpi);
3719 avail_slots -= vcpi->vcpi;
3720 if (avail_slots < 0) {
3721 DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
3722 vcpi->port, mst_state,
3723 avail_slots + vcpi->vcpi);
3727 if (++payload_count > mgr->max_payloads) {
3728 DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
3729 mgr, mst_state, mgr->max_payloads);
3733 DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
3734 mgr, mst_state, avail_slots,
3741 * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
3742 * atomic update is valid
3743 * @state: Pointer to the new &struct drm_dp_mst_topology_state
3745 * Checks the given topology state for an atomic update to ensure that it's
3746 * valid. This includes checking whether there's enough bandwidth to support
3747 * the new VCPI allocations in the atomic update.
3749 * Any atomic drivers supporting DP MST must make sure to call this after
3750 * checking the rest of their state in their
3751 * &drm_mode_config_funcs.atomic_check() callback.
3754 * drm_dp_atomic_find_vcpi_slots()
3755 * drm_dp_atomic_release_vcpi_slots()
3759 * 0 if the new state is valid, negative error code otherwise.
3761 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
3763 struct drm_dp_mst_topology_mgr *mgr;
3764 struct drm_dp_mst_topology_state *mst_state;
3767 for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
3768 ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
3775 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
3777 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
3778 .atomic_duplicate_state = drm_dp_mst_duplicate_state,
3779 .atomic_destroy_state = drm_dp_mst_destroy_state,
3781 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
3784 * drm_atomic_get_mst_topology_state: get MST topology state
3786 * @state: global atomic state
3787 * @mgr: MST topology manager, also the private object in this case
3789 * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
3790 * state vtable so that the private object state returned is that of a MST
3791 * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
3792 * to care of the locking, so warn if don't hold the connection_mutex.
3796 * The MST topology state or error pointer.
3798 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
3799 struct drm_dp_mst_topology_mgr *mgr)
3801 struct drm_device *dev = mgr->dev;
3803 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
3804 return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
3806 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
3809 * drm_dp_mst_topology_mgr_init - initialise a topology manager
3810 * @mgr: manager struct to initialise
3811 * @dev: device providing this structure - for i2c addition.
3812 * @aux: DP helper aux channel to talk to this device
3813 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
3814 * @max_payloads: maximum number of payloads this GPU can source
3815 * @conn_base_id: the connector object ID the MST device is connected to.
3817 * Return 0 for success, or negative error code on failure
3819 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3820 struct drm_device *dev, struct drm_dp_aux *aux,
3821 int max_dpcd_transaction_bytes,
3822 int max_payloads, int conn_base_id)
3824 struct drm_dp_mst_topology_state *mst_state;
3826 mutex_init(&mgr->lock);
3827 mutex_init(&mgr->qlock);
3828 mutex_init(&mgr->payload_lock);
3829 mutex_init(&mgr->destroy_connector_lock);
3830 INIT_LIST_HEAD(&mgr->tx_msg_downq);
3831 INIT_LIST_HEAD(&mgr->destroy_connector_list);
3832 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3833 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
3834 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
3835 init_waitqueue_head(&mgr->tx_waitq);
3838 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
3839 mgr->max_payloads = max_payloads;
3840 mgr->conn_base_id = conn_base_id;
3841 if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
3842 max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
3844 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
3847 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
3848 if (!mgr->proposed_vcpis)
3850 set_bit(0, &mgr->payload_mask);
3851 if (test_calc_pbn_mode() < 0)
3852 DRM_ERROR("MST PBN self-test failed\n");
3854 mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
3855 if (mst_state == NULL)
3858 mst_state->mgr = mgr;
3859 INIT_LIST_HEAD(&mst_state->vcpis);
3861 drm_atomic_private_obj_init(dev, &mgr->base,
3863 &drm_dp_mst_topology_state_funcs);
3867 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
3870 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
3871 * @mgr: manager to destroy
3873 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
3875 drm_dp_mst_topology_mgr_set_mst(mgr, false);
3876 flush_work(&mgr->work);
3877 flush_work(&mgr->destroy_connector_work);
3878 mutex_lock(&mgr->payload_lock);
3879 kfree(mgr->payloads);
3880 mgr->payloads = NULL;
3881 kfree(mgr->proposed_vcpis);
3882 mgr->proposed_vcpis = NULL;
3883 mutex_unlock(&mgr->payload_lock);
3886 drm_atomic_private_obj_fini(&mgr->base);
3889 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
3891 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
3895 if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
3898 for (i = 0; i < num - 1; i++) {
3899 if (msgs[i].flags & I2C_M_RD ||
3904 return msgs[num - 1].flags & I2C_M_RD &&
3905 msgs[num - 1].len <= 0xff;
3909 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
3912 struct drm_dp_aux *aux = adapter->algo_data;
3913 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
3914 struct drm_dp_mst_branch *mstb;
3915 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
3917 struct drm_dp_sideband_msg_req_body msg;
3918 struct drm_dp_sideband_msg_tx *txmsg = NULL;
3921 mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
3925 if (!remote_i2c_read_ok(msgs, num)) {
3926 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
3931 memset(&msg, 0, sizeof(msg));
3932 msg.req_type = DP_REMOTE_I2C_READ;
3933 msg.u.i2c_read.num_transactions = num - 1;
3934 msg.u.i2c_read.port_number = port->port_num;
3935 for (i = 0; i < num - 1; i++) {
3936 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
3937 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
3938 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
3939 msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
3941 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
3942 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
3944 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3951 drm_dp_encode_sideband_req(&msg, txmsg);
3953 drm_dp_queue_down_tx(mgr, txmsg);
3955 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
3958 if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
3962 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
3966 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
3971 drm_dp_mst_topology_put_mstb(mstb);
3975 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
3977 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
3978 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
3979 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
3980 I2C_FUNC_10BIT_ADDR;
3983 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
3984 .functionality = drm_dp_mst_i2c_functionality,
3985 .master_xfer = drm_dp_mst_i2c_xfer,
3989 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
3990 * @aux: DisplayPort AUX channel
3992 * Returns 0 on success or a negative error code on failure.
3994 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
3996 aux->ddc.algo = &drm_dp_mst_i2c_algo;
3997 aux->ddc.algo_data = aux;
3998 aux->ddc.retries = 3;
4000 aux->ddc.class = I2C_CLASS_DDC;
4001 aux->ddc.owner = THIS_MODULE;
4002 aux->ddc.dev.parent = aux->dev;
4003 aux->ddc.dev.of_node = aux->dev->of_node;
4005 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
4006 sizeof(aux->ddc.name));
4008 return i2c_add_adapter(&aux->ddc);
4012 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
4013 * @aux: DisplayPort AUX channel
4015 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
4017 i2c_del_adapter(&aux->ddc);