2 * Copyright © 2014 Red Hat
4 * Permission to use, copy, modify, distribute, and sell this software and its
5 * documentation for any purpose is hereby granted without fee, provided that
6 * the above copyright notice appear in all copies and that both that copyright
7 * notice and this permission notice appear in supporting documentation, and
8 * that the name of the copyright holders not be used in advertising or
9 * publicity pertaining to distribution of the software without specific,
10 * written prior permission. The copyright holders make no representations
11 * about the suitability of this software for any purpose. It is provided "as
12 * is" without express or implied warranty.
14 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
15 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
16 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
17 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
18 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
19 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
23 #include <linux/kernel.h>
24 #include <linux/delay.h>
25 #include <linux/init.h>
26 #include <linux/errno.h>
27 #include <linux/sched.h>
28 #include <linux/i2c.h>
29 #include <drm/drm_dp_mst_helper.h>
32 #include <drm/drm_fixed.h>
37 * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
38 * protocol. The helpers contain a topology manager and bandwidth manager.
39 * The helpers encapsulate the sending and received of sideband msgs.
41 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
43 static int test_calc_pbn_mode(void);
45 static void drm_dp_put_port(struct drm_dp_mst_port *port);
47 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
49 struct drm_dp_payload *payload);
51 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
52 struct drm_dp_mst_port *port,
53 int offset, int size, u8 *bytes);
55 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
56 struct drm_dp_mst_branch *mstb);
57 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
58 struct drm_dp_mst_branch *mstb,
59 struct drm_dp_mst_port *port);
60 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
63 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
64 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
65 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
66 /* sideband msg handling */
67 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
72 int number_of_bits = num_nibbles * 4;
75 while (number_of_bits != 0) {
78 remainder |= (data[array_index] & bitmask) >> bitshift;
86 if ((remainder & 0x10) == 0x10)
91 while (number_of_bits != 0) {
94 if ((remainder & 0x10) != 0)
101 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
106 int number_of_bits = number_of_bytes * 8;
109 while (number_of_bits != 0) {
112 remainder |= (data[array_index] & bitmask) >> bitshift;
120 if ((remainder & 0x100) == 0x100)
125 while (number_of_bits != 0) {
128 if ((remainder & 0x100) != 0)
132 return remainder & 0xff;
134 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
137 size += (hdr->lct / 2);
141 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
147 buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
148 for (i = 0; i < (hdr->lct / 2); i++)
149 buf[idx++] = hdr->rad[i];
150 buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
151 (hdr->msg_len & 0x3f);
152 buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
154 crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
155 buf[idx - 1] |= (crc4 & 0xf);
160 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
161 u8 *buf, int buflen, u8 *hdrlen)
170 len += ((buf[0] & 0xf0) >> 4) / 2;
173 crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
175 if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
176 DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
180 hdr->lct = (buf[0] & 0xf0) >> 4;
181 hdr->lcr = (buf[0] & 0xf);
183 for (i = 0; i < (hdr->lct / 2); i++)
184 hdr->rad[i] = buf[idx++];
185 hdr->broadcast = (buf[idx] >> 7) & 0x1;
186 hdr->path_msg = (buf[idx] >> 6) & 0x1;
187 hdr->msg_len = buf[idx] & 0x3f;
189 hdr->somt = (buf[idx] >> 7) & 0x1;
190 hdr->eomt = (buf[idx] >> 6) & 0x1;
191 hdr->seqno = (buf[idx] >> 4) & 0x1;
197 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
198 struct drm_dp_sideband_msg_tx *raw)
203 buf[idx++] = req->req_type & 0x7f;
205 switch (req->req_type) {
206 case DP_ENUM_PATH_RESOURCES:
207 buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
210 case DP_ALLOCATE_PAYLOAD:
211 buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
212 (req->u.allocate_payload.number_sdp_streams & 0xf);
214 buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
216 buf[idx] = (req->u.allocate_payload.pbn >> 8);
218 buf[idx] = (req->u.allocate_payload.pbn & 0xff);
220 for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
221 buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
222 (req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
225 if (req->u.allocate_payload.number_sdp_streams & 1) {
226 i = req->u.allocate_payload.number_sdp_streams - 1;
227 buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
231 case DP_QUERY_PAYLOAD:
232 buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
234 buf[idx] = (req->u.query_payload.vcpi & 0x7f);
237 case DP_REMOTE_DPCD_READ:
238 buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
239 buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
241 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
243 buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
245 buf[idx] = (req->u.dpcd_read.num_bytes);
249 case DP_REMOTE_DPCD_WRITE:
250 buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
251 buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
253 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
255 buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
257 buf[idx] = (req->u.dpcd_write.num_bytes);
259 memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
260 idx += req->u.dpcd_write.num_bytes;
262 case DP_REMOTE_I2C_READ:
263 buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
264 buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
266 for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
267 buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
269 buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
271 memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
272 idx += req->u.i2c_read.transactions[i].num_bytes;
274 buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
275 buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
278 buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
280 buf[idx] = (req->u.i2c_read.num_bytes_read);
284 case DP_REMOTE_I2C_WRITE:
285 buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
287 buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
289 buf[idx] = (req->u.i2c_write.num_bytes);
291 memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
292 idx += req->u.i2c_write.num_bytes;
298 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
301 crc4 = drm_dp_msg_data_crc4(msg, len);
305 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
306 struct drm_dp_sideband_msg_tx *raw)
311 buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
316 /* this adds a chunk of msg to the builder to get the final msg */
317 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
318 u8 *replybuf, u8 replybuflen, bool hdr)
325 struct drm_dp_sideband_msg_hdr recv_hdr;
326 ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
328 print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
332 /* get length contained in this portion */
333 msg->curchunk_len = recv_hdr.msg_len;
334 msg->curchunk_hdrlen = hdrlen;
336 /* we have already gotten an somt - don't bother parsing */
337 if (recv_hdr.somt && msg->have_somt)
341 memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
342 msg->have_somt = true;
345 msg->have_eomt = true;
347 /* copy the bytes for the remainder of this header chunk */
348 msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
349 memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
351 memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
352 msg->curchunk_idx += replybuflen;
355 if (msg->curchunk_idx >= msg->curchunk_len) {
357 crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
358 /* copy chunk into bigger msg */
359 memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
360 msg->curlen += msg->curchunk_len - 1;
365 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
366 struct drm_dp_sideband_msg_reply_body *repmsg)
370 memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
372 repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
374 if (idx > raw->curlen)
376 for (i = 0; i < repmsg->u.link_addr.nports; i++) {
377 if (raw->msg[idx] & 0x80)
378 repmsg->u.link_addr.ports[i].input_port = 1;
380 repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
381 repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
384 if (idx > raw->curlen)
386 repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
387 repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
388 if (repmsg->u.link_addr.ports[i].input_port == 0)
389 repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
391 if (idx > raw->curlen)
393 if (repmsg->u.link_addr.ports[i].input_port == 0) {
394 repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
396 if (idx > raw->curlen)
398 memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
400 if (idx > raw->curlen)
402 repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
403 repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
407 if (idx > raw->curlen)
413 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
417 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
418 struct drm_dp_sideband_msg_reply_body *repmsg)
421 repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
423 if (idx > raw->curlen)
425 repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
426 if (idx > raw->curlen)
429 memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
432 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
436 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
437 struct drm_dp_sideband_msg_reply_body *repmsg)
440 repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
442 if (idx > raw->curlen)
446 DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
450 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
451 struct drm_dp_sideband_msg_reply_body *repmsg)
455 repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
457 if (idx > raw->curlen)
459 repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
462 memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
465 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
469 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
470 struct drm_dp_sideband_msg_reply_body *repmsg)
473 repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
475 if (idx > raw->curlen)
477 repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
479 if (idx > raw->curlen)
481 repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
483 if (idx > raw->curlen)
487 DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
491 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
492 struct drm_dp_sideband_msg_reply_body *repmsg)
495 repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
497 if (idx > raw->curlen)
499 repmsg->u.allocate_payload.vcpi = raw->msg[idx];
501 if (idx > raw->curlen)
503 repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
505 if (idx > raw->curlen)
509 DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
513 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
514 struct drm_dp_sideband_msg_reply_body *repmsg)
517 repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
519 if (idx > raw->curlen)
521 repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
523 if (idx > raw->curlen)
527 DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
531 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
532 struct drm_dp_sideband_msg_reply_body *msg)
534 memset(msg, 0, sizeof(*msg));
535 msg->reply_type = (raw->msg[0] & 0x80) >> 7;
536 msg->req_type = (raw->msg[0] & 0x7f);
538 if (msg->reply_type) {
539 memcpy(msg->u.nak.guid, &raw->msg[1], 16);
540 msg->u.nak.reason = raw->msg[17];
541 msg->u.nak.nak_data = raw->msg[18];
545 switch (msg->req_type) {
546 case DP_LINK_ADDRESS:
547 return drm_dp_sideband_parse_link_address(raw, msg);
548 case DP_QUERY_PAYLOAD:
549 return drm_dp_sideband_parse_query_payload_ack(raw, msg);
550 case DP_REMOTE_DPCD_READ:
551 return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
552 case DP_REMOTE_DPCD_WRITE:
553 return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
554 case DP_REMOTE_I2C_READ:
555 return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
556 case DP_ENUM_PATH_RESOURCES:
557 return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
558 case DP_ALLOCATE_PAYLOAD:
559 return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
561 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
566 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
567 struct drm_dp_sideband_msg_req_body *msg)
571 msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
573 if (idx > raw->curlen)
576 memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
578 if (idx > raw->curlen)
581 msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
582 msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
583 msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
584 msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
585 msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
589 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
593 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
594 struct drm_dp_sideband_msg_req_body *msg)
598 msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
600 if (idx > raw->curlen)
603 memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
605 if (idx > raw->curlen)
608 msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
612 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
616 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
617 struct drm_dp_sideband_msg_req_body *msg)
619 memset(msg, 0, sizeof(*msg));
620 msg->req_type = (raw->msg[0] & 0x7f);
622 switch (msg->req_type) {
623 case DP_CONNECTION_STATUS_NOTIFY:
624 return drm_dp_sideband_parse_connection_status_notify(raw, msg);
625 case DP_RESOURCE_STATUS_NOTIFY:
626 return drm_dp_sideband_parse_resource_status_notify(raw, msg);
628 DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
633 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
635 struct drm_dp_sideband_msg_req_body req;
637 req.req_type = DP_REMOTE_DPCD_WRITE;
638 req.u.dpcd_write.port_number = port_num;
639 req.u.dpcd_write.dpcd_address = offset;
640 req.u.dpcd_write.num_bytes = num_bytes;
641 req.u.dpcd_write.bytes = bytes;
642 drm_dp_encode_sideband_req(&req, msg);
647 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
649 struct drm_dp_sideband_msg_req_body req;
651 req.req_type = DP_LINK_ADDRESS;
652 drm_dp_encode_sideband_req(&req, msg);
656 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
658 struct drm_dp_sideband_msg_req_body req;
660 req.req_type = DP_ENUM_PATH_RESOURCES;
661 req.u.port_num.port_number = port_num;
662 drm_dp_encode_sideband_req(&req, msg);
663 msg->path_msg = true;
667 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
668 u8 vcpi, uint16_t pbn)
670 struct drm_dp_sideband_msg_req_body req;
671 memset(&req, 0, sizeof(req));
672 req.req_type = DP_ALLOCATE_PAYLOAD;
673 req.u.allocate_payload.port_number = port_num;
674 req.u.allocate_payload.vcpi = vcpi;
675 req.u.allocate_payload.pbn = pbn;
676 drm_dp_encode_sideband_req(&req, msg);
677 msg->path_msg = true;
681 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
682 struct drm_dp_vcpi *vcpi)
686 mutex_lock(&mgr->payload_lock);
687 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
688 if (ret > mgr->max_payloads) {
690 DRM_DEBUG_KMS("out of payload ids %d\n", ret);
694 set_bit(ret, &mgr->payload_mask);
696 mgr->proposed_vcpis[ret - 1] = vcpi;
698 mutex_unlock(&mgr->payload_lock);
702 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
708 mutex_lock(&mgr->payload_lock);
709 DRM_DEBUG_KMS("putting payload %d\n", id);
710 clear_bit(id, &mgr->payload_mask);
711 mgr->proposed_vcpis[id - 1] = NULL;
712 mutex_unlock(&mgr->payload_lock);
715 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
716 struct drm_dp_sideband_msg_tx *txmsg)
719 mutex_lock(&mgr->qlock);
720 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
721 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
722 mutex_unlock(&mgr->qlock);
726 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
727 struct drm_dp_sideband_msg_tx *txmsg)
729 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
732 ret = wait_event_timeout(mgr->tx_waitq,
733 check_txmsg_state(mgr, txmsg),
735 mutex_lock(&mstb->mgr->qlock);
737 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
742 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
744 /* dump some state */
748 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
749 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
750 list_del(&txmsg->next);
753 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
754 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
755 mstb->tx_slots[txmsg->seqno] = NULL;
759 mutex_unlock(&mgr->qlock);
764 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
766 struct drm_dp_mst_branch *mstb;
768 mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
774 memcpy(mstb->rad, rad, lct / 2);
775 INIT_LIST_HEAD(&mstb->ports);
776 kref_init(&mstb->kref);
780 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
782 struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
783 struct drm_dp_mst_port *port, *tmp;
784 bool wake_tx = false;
786 cancel_work_sync(&mstb->mgr->work);
789 * destroy all ports - don't need lock
790 * as there are no more references to the mst branch
791 * device at this point.
793 list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
794 list_del(&port->next);
795 drm_dp_put_port(port);
798 /* drop any tx slots msg */
799 mutex_lock(&mstb->mgr->qlock);
800 if (mstb->tx_slots[0]) {
801 mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
802 mstb->tx_slots[0] = NULL;
805 if (mstb->tx_slots[1]) {
806 mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
807 mstb->tx_slots[1] = NULL;
810 mutex_unlock(&mstb->mgr->qlock);
813 wake_up(&mstb->mgr->tx_waitq);
817 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
819 kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
823 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
826 case DP_PEER_DEVICE_DP_LEGACY_CONV:
827 case DP_PEER_DEVICE_SST_SINK:
828 /* remove i2c over sideband */
829 drm_dp_mst_unregister_i2c_bus(&port->aux);
831 case DP_PEER_DEVICE_MST_BRANCHING:
832 drm_dp_put_mst_branch_device(port->mstb);
838 static void drm_dp_destroy_port(struct kref *kref)
840 struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
841 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
843 port->vcpi.num_slots = 0;
845 (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
846 drm_dp_port_teardown_pdt(port, port->pdt);
848 if (!port->input && port->vcpi.vcpi > 0)
849 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
853 (*mgr->cbs->hotplug)(mgr);
856 static void drm_dp_put_port(struct drm_dp_mst_port *port)
858 kref_put(&port->kref, drm_dp_destroy_port);
861 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
863 struct drm_dp_mst_port *port;
864 struct drm_dp_mst_branch *rmstb;
865 if (to_find == mstb) {
866 kref_get(&mstb->kref);
869 list_for_each_entry(port, &mstb->ports, next) {
871 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
879 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
881 struct drm_dp_mst_branch *rmstb = NULL;
882 mutex_lock(&mgr->lock);
883 if (mgr->mst_primary)
884 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
885 mutex_unlock(&mgr->lock);
889 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
891 struct drm_dp_mst_port *port, *mport;
893 list_for_each_entry(port, &mstb->ports, next) {
894 if (port == to_find) {
895 kref_get(&port->kref);
899 mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
907 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
909 struct drm_dp_mst_port *rport = NULL;
910 mutex_lock(&mgr->lock);
911 if (mgr->mst_primary)
912 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
913 mutex_unlock(&mgr->lock);
917 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
919 struct drm_dp_mst_port *port;
921 list_for_each_entry(port, &mstb->ports, next) {
922 if (port->port_num == port_num) {
923 kref_get(&port->kref);
932 * calculate a new RAD for this MST branch device
933 * if parent has an LCT of 2 then it has 1 nibble of RAD,
934 * if parent has an LCT of 3 then it has 2 nibbles of RAD,
936 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
939 int lct = port->parent->lct;
943 memcpy(rad, port->parent->rad, idx);
944 shift = (lct % 2) ? 4 : 0;
948 rad[idx] |= port->port_num << shift;
953 * return sends link address for new mstb
955 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
959 bool send_link = false;
961 case DP_PEER_DEVICE_DP_LEGACY_CONV:
962 case DP_PEER_DEVICE_SST_SINK:
963 /* add i2c over sideband */
964 ret = drm_dp_mst_register_i2c_bus(&port->aux);
966 case DP_PEER_DEVICE_MST_BRANCHING:
967 lct = drm_dp_calculate_rad(port, rad);
969 port->mstb = drm_dp_add_mst_branch_device(lct, rad);
970 port->mstb->mgr = port->mgr;
971 port->mstb->port_parent = port;
979 static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
980 struct drm_dp_mst_port *port)
983 if (port->dpcd_rev >= 0x12) {
984 port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
985 if (!port->guid_valid) {
986 ret = drm_dp_send_dpcd_write(mstb->mgr,
990 port->guid_valid = true;
995 static void build_mst_prop_path(struct drm_dp_mst_port *port,
996 struct drm_dp_mst_branch *mstb,
1001 snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id);
1002 for (i = 0; i < (mstb->lct - 1); i++) {
1003 int shift = (i % 2) ? 0 : 4;
1004 int port_num = mstb->rad[i / 2] >> shift;
1005 snprintf(temp, 8, "-%d", port_num);
1006 strncat(proppath, temp, 255);
1008 snprintf(temp, 8, "-%d", port->port_num);
1009 strncat(proppath, temp, 255);
1012 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1014 struct drm_dp_link_addr_reply_port *port_msg)
1016 struct drm_dp_mst_port *port;
1018 bool created = false;
1021 port = drm_dp_get_port(mstb, port_msg->port_number);
1023 port = kzalloc(sizeof(*port), GFP_KERNEL);
1026 kref_init(&port->kref);
1027 port->parent = mstb;
1028 port->port_num = port_msg->port_number;
1029 port->mgr = mstb->mgr;
1030 port->aux.name = "DPMST";
1031 port->aux.dev = dev;
1034 old_pdt = port->pdt;
1035 old_ddps = port->ddps;
1038 port->pdt = port_msg->peer_device_type;
1039 port->input = port_msg->input_port;
1040 port->mcs = port_msg->mcs;
1041 port->ddps = port_msg->ddps;
1042 port->ldps = port_msg->legacy_device_plug_status;
1043 port->dpcd_rev = port_msg->dpcd_revision;
1044 port->num_sdp_streams = port_msg->num_sdp_streams;
1045 port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
1046 memcpy(port->guid, port_msg->peer_guid, 16);
1048 /* manage mstb port lists with mgr lock - take a reference
1051 mutex_lock(&mstb->mgr->lock);
1052 kref_get(&port->kref);
1053 list_add(&port->next, &mstb->ports);
1054 mutex_unlock(&mstb->mgr->lock);
1057 if (old_ddps != port->ddps) {
1059 drm_dp_check_port_guid(mstb, port);
1061 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
1063 port->guid_valid = false;
1064 port->available_pbn = 0;
1068 if (old_pdt != port->pdt && !port->input) {
1069 drm_dp_port_teardown_pdt(port, old_pdt);
1071 ret = drm_dp_port_setup_pdt(port);
1073 drm_dp_send_link_address(mstb->mgr, port->mstb);
1074 port->mstb->link_address_sent = true;
1078 if (created && !port->input) {
1080 build_mst_prop_path(port, mstb, proppath);
1081 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1084 /* put reference to this port */
1085 drm_dp_put_port(port);
1088 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1089 struct drm_dp_connection_status_notify *conn_stat)
1091 struct drm_dp_mst_port *port;
1094 bool dowork = false;
1095 port = drm_dp_get_port(mstb, conn_stat->port_number);
1099 old_ddps = port->ddps;
1100 old_pdt = port->pdt;
1101 port->pdt = conn_stat->peer_device_type;
1102 port->mcs = conn_stat->message_capability_status;
1103 port->ldps = conn_stat->legacy_device_plug_status;
1104 port->ddps = conn_stat->displayport_device_plug_status;
1106 if (old_ddps != port->ddps) {
1108 drm_dp_check_port_guid(mstb, port);
1111 port->guid_valid = false;
1112 port->available_pbn = 0;
1115 if (old_pdt != port->pdt && !port->input) {
1116 drm_dp_port_teardown_pdt(port, old_pdt);
1118 if (drm_dp_port_setup_pdt(port))
1122 drm_dp_put_port(port);
1124 queue_work(system_long_wq, &mstb->mgr->work);
1128 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
1131 struct drm_dp_mst_branch *mstb;
1132 struct drm_dp_mst_port *port;
1134 /* find the port by iterating down */
1135 mstb = mgr->mst_primary;
1137 for (i = 0; i < lct - 1; i++) {
1138 int shift = (i % 2) ? 0 : 4;
1139 int port_num = rad[i / 2] >> shift;
1141 list_for_each_entry(port, &mstb->ports, next) {
1142 if (port->port_num == port_num) {
1144 DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
1153 kref_get(&mstb->kref);
1157 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1158 struct drm_dp_mst_branch *mstb)
1160 struct drm_dp_mst_port *port;
1162 if (!mstb->link_address_sent) {
1163 drm_dp_send_link_address(mgr, mstb);
1164 mstb->link_address_sent = true;
1166 list_for_each_entry(port, &mstb->ports, next) {
1173 if (!port->available_pbn)
1174 drm_dp_send_enum_path_resources(mgr, mstb, port);
1177 drm_dp_check_and_send_link_address(mgr, port->mstb);
1181 static void drm_dp_mst_link_probe_work(struct work_struct *work)
1183 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
1185 drm_dp_check_and_send_link_address(mgr, mgr->mst_primary);
1189 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
1192 static u8 zero_guid[16];
1194 if (!memcmp(guid, zero_guid, 16)) {
1195 u64 salt = get_jiffies_64();
1196 memcpy(&guid[0], &salt, sizeof(u64));
1197 memcpy(&guid[8], &salt, sizeof(u64));
1204 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
1206 struct drm_dp_sideband_msg_req_body req;
1208 req.req_type = DP_REMOTE_DPCD_READ;
1209 req.u.dpcd_read.port_number = port_num;
1210 req.u.dpcd_read.dpcd_address = offset;
1211 req.u.dpcd_read.num_bytes = num_bytes;
1212 drm_dp_encode_sideband_req(&req, msg);
1218 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
1219 bool up, u8 *msg, int len)
1222 int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
1223 int tosend, total, offset;
1230 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
1232 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
1235 if (ret != tosend) {
1236 if (ret == -EIO && retries < 5) {
1240 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1247 } while (total > 0);
1251 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
1252 struct drm_dp_sideband_msg_tx *txmsg)
1254 struct drm_dp_mst_branch *mstb = txmsg->dst;
1256 /* both msg slots are full */
1257 if (txmsg->seqno == -1) {
1258 if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
1259 DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
1262 if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
1263 txmsg->seqno = mstb->last_seqno;
1264 mstb->last_seqno ^= 1;
1265 } else if (mstb->tx_slots[0] == NULL)
1269 mstb->tx_slots[txmsg->seqno] = txmsg;
1272 hdr->path_msg = txmsg->path_msg;
1273 hdr->lct = mstb->lct;
1274 hdr->lcr = mstb->lct - 1;
1276 memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
1277 hdr->seqno = txmsg->seqno;
1281 * process a single block of the next message in the sideband queue
1283 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
1284 struct drm_dp_sideband_msg_tx *txmsg,
1288 struct drm_dp_sideband_msg_hdr hdr;
1289 int len, space, idx, tosend;
1292 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
1294 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
1297 /* make hdr from dst mst - for replies use seqno
1298 otherwise assign one */
1299 ret = set_hdr_from_dst_qlock(&hdr, txmsg);
1303 /* amount left to send in this message */
1304 len = txmsg->cur_len - txmsg->cur_offset;
1306 /* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
1307 space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
1309 tosend = min(len, space);
1310 if (len == txmsg->cur_len)
1316 hdr.msg_len = tosend + 1;
1317 drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
1318 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
1319 /* add crc at end */
1320 drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
1323 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
1325 DRM_DEBUG_KMS("sideband msg failed to send\n");
1329 txmsg->cur_offset += tosend;
1330 if (txmsg->cur_offset == txmsg->cur_len) {
1331 txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
1337 /* must be called holding qlock */
1338 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1340 struct drm_dp_sideband_msg_tx *txmsg;
1343 /* construct a chunk from the first msg in the tx_msg queue */
1344 if (list_empty(&mgr->tx_msg_downq)) {
1345 mgr->tx_down_in_progress = false;
1348 mgr->tx_down_in_progress = true;
1350 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1351 ret = process_single_tx_qlock(mgr, txmsg, false);
1353 /* txmsg is sent it should be in the slots now */
1354 list_del(&txmsg->next);
1356 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1357 list_del(&txmsg->next);
1358 if (txmsg->seqno != -1)
1359 txmsg->dst->tx_slots[txmsg->seqno] = NULL;
1360 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1361 wake_up(&mgr->tx_waitq);
1363 if (list_empty(&mgr->tx_msg_downq)) {
1364 mgr->tx_down_in_progress = false;
1369 /* called holding qlock */
1370 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1372 struct drm_dp_sideband_msg_tx *txmsg;
1375 /* construct a chunk from the first msg in the tx_msg queue */
1376 if (list_empty(&mgr->tx_msg_upq)) {
1377 mgr->tx_up_in_progress = false;
1381 txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
1382 ret = process_single_tx_qlock(mgr, txmsg, true);
1384 /* up txmsgs aren't put in slots - so free after we send it */
1385 list_del(&txmsg->next);
1388 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
1389 mgr->tx_up_in_progress = true;
1392 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1393 struct drm_dp_sideband_msg_tx *txmsg)
1395 mutex_lock(&mgr->qlock);
1396 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1397 if (!mgr->tx_down_in_progress)
1398 process_single_down_tx_qlock(mgr);
1399 mutex_unlock(&mgr->qlock);
1402 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1403 struct drm_dp_mst_branch *mstb)
1406 struct drm_dp_sideband_msg_tx *txmsg;
1409 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1414 len = build_link_address(txmsg);
1416 drm_dp_queue_down_tx(mgr, txmsg);
1418 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1422 if (txmsg->reply.reply_type == 1)
1423 DRM_DEBUG_KMS("link address nak received\n");
1425 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
1426 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1427 DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
1428 txmsg->reply.u.link_addr.ports[i].input_port,
1429 txmsg->reply.u.link_addr.ports[i].peer_device_type,
1430 txmsg->reply.u.link_addr.ports[i].port_number,
1431 txmsg->reply.u.link_addr.ports[i].dpcd_revision,
1432 txmsg->reply.u.link_addr.ports[i].mcs,
1433 txmsg->reply.u.link_addr.ports[i].ddps,
1434 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
1435 txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
1436 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
1438 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1439 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1441 (*mgr->cbs->hotplug)(mgr);
1444 DRM_DEBUG_KMS("link address failed %d\n", ret);
1450 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
1451 struct drm_dp_mst_branch *mstb,
1452 struct drm_dp_mst_port *port)
1455 struct drm_dp_sideband_msg_tx *txmsg;
1458 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1463 len = build_enum_path_resources(txmsg, port->port_num);
1465 drm_dp_queue_down_tx(mgr, txmsg);
1467 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1469 if (txmsg->reply.reply_type == 1)
1470 DRM_DEBUG_KMS("enum path resources nak received\n");
1472 if (port->port_num != txmsg->reply.u.path_resources.port_number)
1473 DRM_ERROR("got incorrect port in response\n");
1474 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
1475 txmsg->reply.u.path_resources.avail_payload_bw_number);
1476 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
1484 int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1485 struct drm_dp_mst_port *port,
1489 struct drm_dp_sideband_msg_tx *txmsg;
1490 struct drm_dp_mst_branch *mstb;
1493 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1497 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1504 len = build_allocate_payload(txmsg, port->port_num,
1508 drm_dp_queue_down_tx(mgr, txmsg);
1510 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1512 if (txmsg->reply.reply_type == 1) {
1519 drm_dp_put_mst_branch_device(mstb);
1523 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1525 struct drm_dp_payload *payload)
1529 ret = drm_dp_dpcd_write_payload(mgr, id, payload);
1531 payload->payload_state = 0;
1534 payload->payload_state = DP_PAYLOAD_LOCAL;
1538 int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1539 struct drm_dp_mst_port *port,
1541 struct drm_dp_payload *payload)
1544 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
1547 payload->payload_state = DP_PAYLOAD_REMOTE;
1551 int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
1552 struct drm_dp_mst_port *port,
1554 struct drm_dp_payload *payload)
1556 DRM_DEBUG_KMS("\n");
1557 /* its okay for these to fail */
1559 drm_dp_payload_send_msg(mgr, port, id, 0);
1562 drm_dp_dpcd_write_payload(mgr, id, payload);
1563 payload->payload_state = 0;
1567 int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
1569 struct drm_dp_payload *payload)
1571 payload->payload_state = 0;
1576 * drm_dp_update_payload_part1() - Execute payload update part 1
1577 * @mgr: manager to use.
1579 * This iterates over all proposed virtual channels, and tries to
1580 * allocate space in the link for them. For 0->slots transitions,
1581 * this step just writes the VCPI to the MST device. For slots->0
1582 * transitions, this writes the updated VCPIs and removes the
1583 * remote VC payloads.
1585 * after calling this the driver should generate ACT and payload
1588 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1592 struct drm_dp_payload req_payload;
1593 struct drm_dp_mst_port *port;
1595 mutex_lock(&mgr->payload_lock);
1596 for (i = 0; i < mgr->max_payloads; i++) {
1597 /* solve the current payloads - compare to the hw ones
1598 - update the hw view */
1599 req_payload.start_slot = cur_slots;
1600 if (mgr->proposed_vcpis[i]) {
1601 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1602 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1605 req_payload.num_slots = 0;
1607 /* work out what is required to happen with this payload */
1608 if (mgr->payloads[i].start_slot != req_payload.start_slot ||
1609 mgr->payloads[i].num_slots != req_payload.num_slots) {
1611 /* need to push an update for this payload */
1612 if (req_payload.num_slots) {
1613 drm_dp_create_payload_step1(mgr, i + 1, &req_payload);
1614 mgr->payloads[i].num_slots = req_payload.num_slots;
1615 } else if (mgr->payloads[i].num_slots) {
1616 mgr->payloads[i].num_slots = 0;
1617 drm_dp_destroy_payload_step1(mgr, port, i + 1, &mgr->payloads[i]);
1618 req_payload.payload_state = mgr->payloads[i].payload_state;
1620 req_payload.payload_state = 0;
1622 mgr->payloads[i].start_slot = req_payload.start_slot;
1623 mgr->payloads[i].payload_state = req_payload.payload_state;
1625 cur_slots += req_payload.num_slots;
1627 mutex_unlock(&mgr->payload_lock);
1631 EXPORT_SYMBOL(drm_dp_update_payload_part1);
1634 * drm_dp_update_payload_part2() - Execute payload update part 2
1635 * @mgr: manager to use.
1637 * This iterates over all proposed virtual channels, and tries to
1638 * allocate space in the link for them. For 0->slots transitions,
1639 * this step writes the remote VC payload commands. For slots->0
1640 * this just resets some internal state.
1642 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
1644 struct drm_dp_mst_port *port;
1647 mutex_lock(&mgr->payload_lock);
1648 for (i = 0; i < mgr->max_payloads; i++) {
1650 if (!mgr->proposed_vcpis[i])
1653 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1655 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
1656 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
1657 ret = drm_dp_create_payload_step2(mgr, port, i + 1, &mgr->payloads[i]);
1658 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
1659 ret = drm_dp_destroy_payload_step2(mgr, i + 1, &mgr->payloads[i]);
1662 mutex_unlock(&mgr->payload_lock);
1666 mutex_unlock(&mgr->payload_lock);
1669 EXPORT_SYMBOL(drm_dp_update_payload_part2);
1671 #if 0 /* unused as of yet */
1672 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1673 struct drm_dp_mst_port *port,
1674 int offset, int size)
1677 struct drm_dp_sideband_msg_tx *txmsg;
1679 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1683 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1684 txmsg->dst = port->parent;
1686 drm_dp_queue_down_tx(mgr, txmsg);
1692 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
1693 struct drm_dp_mst_port *port,
1694 int offset, int size, u8 *bytes)
1698 struct drm_dp_sideband_msg_tx *txmsg;
1699 struct drm_dp_mst_branch *mstb;
1701 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1705 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1711 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
1714 drm_dp_queue_down_tx(mgr, txmsg);
1716 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
1718 if (txmsg->reply.reply_type == 1) {
1725 drm_dp_put_mst_branch_device(mstb);
1729 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
1731 struct drm_dp_sideband_msg_reply_body reply;
1733 reply.reply_type = 1;
1734 reply.req_type = req_type;
1735 drm_dp_encode_sideband_reply(&reply, msg);
1739 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1740 struct drm_dp_mst_branch *mstb,
1741 int req_type, int seqno, bool broadcast)
1743 struct drm_dp_sideband_msg_tx *txmsg;
1745 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1750 txmsg->seqno = seqno;
1751 drm_dp_encode_up_ack_reply(txmsg, req_type);
1753 mutex_lock(&mgr->qlock);
1754 list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
1755 if (!mgr->tx_up_in_progress) {
1756 process_single_up_tx_qlock(mgr);
1758 mutex_unlock(&mgr->qlock);
1762 static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count)
1764 switch (dp_link_bw) {
1765 case DP_LINK_BW_1_62:
1766 return 3 * dp_link_count;
1767 case DP_LINK_BW_2_7:
1768 return 5 * dp_link_count;
1769 case DP_LINK_BW_5_4:
1770 return 10 * dp_link_count;
1776 * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
1777 * @mgr: manager to set state for
1778 * @mst_state: true to enable MST on this connector - false to disable.
1780 * This is called by the driver when it detects an MST capable device plugged
1781 * into a DP MST capable port, or when a DP MST capable device is unplugged.
1783 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
1786 struct drm_dp_mst_branch *mstb = NULL;
1788 mutex_lock(&mgr->lock);
1789 if (mst_state == mgr->mst_state)
1792 mgr->mst_state = mst_state;
1793 /* set the device into MST mode */
1795 WARN_ON(mgr->mst_primary);
1798 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1799 if (ret != DP_RECEIVER_CAP_SIZE) {
1800 DRM_DEBUG_KMS("failed to read DPCD\n");
1804 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
1805 mgr->total_pbn = 2560;
1806 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1807 mgr->avail_slots = mgr->total_slots;
1809 /* add initial branch device at LCT 1 */
1810 mstb = drm_dp_add_mst_branch_device(1, NULL);
1817 /* give this the main reference */
1818 mgr->mst_primary = mstb;
1819 kref_get(&mgr->mst_primary->kref);
1822 struct drm_dp_payload reset_pay;
1823 reset_pay.start_slot = 0;
1824 reset_pay.num_slots = 0x3f;
1825 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
1828 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1829 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1836 ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
1838 DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
1842 mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
1843 if (!mgr->guid_valid) {
1844 ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
1845 mgr->guid_valid = true;
1848 queue_work(system_long_wq, &mgr->work);
1852 /* disable MST on the device */
1853 mstb = mgr->mst_primary;
1854 mgr->mst_primary = NULL;
1855 /* this can fail if the device is gone */
1856 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
1858 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
1859 mgr->payload_mask = 0;
1860 set_bit(0, &mgr->payload_mask);
1864 mutex_unlock(&mgr->lock);
1866 drm_dp_put_mst_branch_device(mstb);
1870 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
1873 * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
1874 * @mgr: manager to suspend
1876 * This function tells the MST device that we can't handle UP messages
1877 * anymore. This should stop it from sending any since we are suspended.
1879 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
1881 mutex_lock(&mgr->lock);
1882 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1883 DP_MST_EN | DP_UPSTREAM_IS_SRC);
1884 mutex_unlock(&mgr->lock);
1886 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
1889 * drm_dp_mst_topology_mgr_resume() - resume the MST manager
1890 * @mgr: manager to resume
1892 * This will fetch DPCD and see if the device is still there,
1893 * if it is, it will rewrite the MSTM control bits, and return.
1895 * if the device fails this returns -1, and the driver should do
1896 * a full MST reprobe, in case we were undocked.
1898 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1902 mutex_lock(&mgr->lock);
1904 if (mgr->mst_primary) {
1906 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1907 if (sret != DP_RECEIVER_CAP_SIZE) {
1908 DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1913 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
1914 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
1916 DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
1925 mutex_unlock(&mgr->lock);
1928 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
1930 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
1934 int replylen, origlen, curreply;
1936 struct drm_dp_sideband_msg_rx *msg;
1937 int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
1938 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
1940 len = min(mgr->max_dpcd_transaction_bytes, 16);
1941 ret = drm_dp_dpcd_read(mgr->aux, basereg,
1944 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
1947 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
1949 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
1952 replylen = msg->curchunk_len + msg->curchunk_hdrlen;
1957 while (replylen > 0) {
1958 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
1959 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
1962 DRM_DEBUG_KMS("failed to read a chunk\n");
1964 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
1966 DRM_DEBUG_KMS("failed to build sideband msg\n");
1972 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
1976 drm_dp_get_one_sb_msg(mgr, false);
1978 if (mgr->down_rep_recv.have_eomt) {
1979 struct drm_dp_sideband_msg_tx *txmsg;
1980 struct drm_dp_mst_branch *mstb;
1982 mstb = drm_dp_get_mst_branch_device(mgr,
1983 mgr->down_rep_recv.initial_hdr.lct,
1984 mgr->down_rep_recv.initial_hdr.rad);
1987 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
1988 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
1992 /* find the message */
1993 slot = mgr->down_rep_recv.initial_hdr.seqno;
1994 mutex_lock(&mgr->qlock);
1995 txmsg = mstb->tx_slots[slot];
1996 /* remove from slots */
1997 mutex_unlock(&mgr->qlock);
2000 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
2002 mgr->down_rep_recv.initial_hdr.seqno,
2003 mgr->down_rep_recv.initial_hdr.lct,
2004 mgr->down_rep_recv.initial_hdr.rad[0],
2005 mgr->down_rep_recv.msg[0]);
2006 drm_dp_put_mst_branch_device(mstb);
2007 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2011 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
2012 if (txmsg->reply.reply_type == 1) {
2013 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
2016 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2017 drm_dp_put_mst_branch_device(mstb);
2019 mutex_lock(&mgr->qlock);
2020 txmsg->state = DRM_DP_SIDEBAND_TX_RX;
2021 mstb->tx_slots[slot] = NULL;
2022 mutex_unlock(&mgr->qlock);
2024 wake_up(&mgr->tx_waitq);
2029 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2032 drm_dp_get_one_sb_msg(mgr, true);
2034 if (mgr->up_req_recv.have_eomt) {
2035 struct drm_dp_sideband_msg_req_body msg;
2036 struct drm_dp_mst_branch *mstb;
2038 mstb = drm_dp_get_mst_branch_device(mgr,
2039 mgr->up_req_recv.initial_hdr.lct,
2040 mgr->up_req_recv.initial_hdr.rad);
2042 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
2043 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2047 seqno = mgr->up_req_recv.initial_hdr.seqno;
2048 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
2050 if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
2051 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2052 drm_dp_update_port(mstb, &msg.u.conn_stat);
2053 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2054 (*mgr->cbs->hotplug)(mgr);
2056 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2057 drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
2058 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
2061 drm_dp_put_mst_branch_device(mstb);
2062 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
2068 * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
2069 * @mgr: manager to notify irq for.
2070 * @esi: 4 bytes from SINK_COUNT_ESI
2072 * This should be called from the driver when it detects a short IRQ,
2073 * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
2074 * topology manager will process the sideband messages received as a result
2077 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
2084 if (sc != mgr->sink_count) {
2085 mgr->sink_count = sc;
2089 if (esi[1] & DP_DOWN_REP_MSG_RDY) {
2090 ret = drm_dp_mst_handle_down_rep(mgr);
2094 if (esi[1] & DP_UP_REQ_MSG_RDY) {
2095 ret |= drm_dp_mst_handle_up_req(mgr);
2099 drm_dp_mst_kick_tx(mgr);
2102 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2105 * drm_dp_mst_detect_port() - get connection status for an MST port
2106 * @mgr: manager for this port
2107 * @port: unverified pointer to a port
2109 * This returns the current connection state for a port. It validates the
2110 * port pointer still exists so the caller doesn't require a reference
2112 enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2114 enum drm_connector_status status = connector_status_disconnected;
2116 /* we need to search for the port in the mgr in case its gone */
2117 port = drm_dp_get_validated_port_ref(mgr, port);
2119 return connector_status_disconnected;
2124 switch (port->pdt) {
2125 case DP_PEER_DEVICE_NONE:
2126 case DP_PEER_DEVICE_MST_BRANCHING:
2129 case DP_PEER_DEVICE_SST_SINK:
2130 status = connector_status_connected;
2132 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2134 status = connector_status_connected;
2138 drm_dp_put_port(port);
2141 EXPORT_SYMBOL(drm_dp_mst_detect_port);
2144 * drm_dp_mst_get_edid() - get EDID for an MST port
2145 * @connector: toplevel connector to get EDID for
2146 * @mgr: manager for this port
2147 * @port: unverified pointer to a port.
2149 * This returns an EDID for the port connected to a connector,
2150 * It validates the pointer still exists so the caller doesn't require a
2153 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2155 struct edid *edid = NULL;
2157 /* we need to search for the port in the mgr in case its gone */
2158 port = drm_dp_get_validated_port_ref(mgr, port);
2162 edid = drm_get_edid(connector, &port->aux.ddc);
2163 drm_dp_put_port(port);
2166 EXPORT_SYMBOL(drm_dp_mst_get_edid);
2169 * drm_dp_find_vcpi_slots() - find slots for this PBN value
2170 * @mgr: manager to use
2171 * @pbn: payload bandwidth to convert into slots.
2173 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
2178 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2180 if (num_slots > mgr->avail_slots)
2184 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
2186 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
2187 struct drm_dp_vcpi *vcpi, int pbn)
2192 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
2194 if (num_slots > mgr->avail_slots)
2198 vcpi->aligned_pbn = num_slots * mgr->pbn_div;
2199 vcpi->num_slots = num_slots;
2201 ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
2208 * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
2209 * @mgr: manager for this port
2210 * @port: port to allocate a virtual channel for.
2211 * @pbn: payload bandwidth number to request
2212 * @slots: returned number of slots for this PBN.
2214 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
2218 port = drm_dp_get_validated_port_ref(mgr, port);
2222 if (port->vcpi.vcpi > 0) {
2223 DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
2224 if (pbn == port->vcpi.pbn) {
2225 *slots = port->vcpi.num_slots;
2230 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
2232 DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
2235 DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
2236 *slots = port->vcpi.num_slots;
2238 drm_dp_put_port(port);
2243 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
2246 * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
2247 * @mgr: manager for this port
2248 * @port: unverified pointer to a port.
2250 * This just resets the number of slots for the ports VCPI for later programming.
2252 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2254 port = drm_dp_get_validated_port_ref(mgr, port);
2257 port->vcpi.num_slots = 0;
2258 drm_dp_put_port(port);
2260 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
2263 * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
2264 * @mgr: manager for this port
2265 * @port: unverified port to deallocate vcpi for
2267 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2269 port = drm_dp_get_validated_port_ref(mgr, port);
2273 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
2274 port->vcpi.num_slots = 0;
2276 port->vcpi.aligned_pbn = 0;
2277 port->vcpi.vcpi = 0;
2278 drm_dp_put_port(port);
2280 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
2282 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
2283 int id, struct drm_dp_payload *payload)
2285 u8 payload_alloc[3], status;
2289 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
2290 DP_PAYLOAD_TABLE_UPDATED);
2292 payload_alloc[0] = id;
2293 payload_alloc[1] = payload->start_slot;
2294 payload_alloc[2] = payload->num_slots;
2296 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
2298 DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
2303 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2305 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2309 if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
2312 usleep_range(10000, 20000);
2315 DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
2326 * drm_dp_check_act_status() - Check ACT handled status.
2327 * @mgr: manager to use
2329 * Check the payload status bits in the DPCD for ACT handled completion.
2331 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
2338 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
2341 DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
2345 if (status & DP_PAYLOAD_ACT_HANDLED)
2350 } while (count < 30);
2352 if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
2353 DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
2361 EXPORT_SYMBOL(drm_dp_check_act_status);
2364 * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
2365 * @clock: dot clock for the mode
2366 * @bpp: bpp for the mode.
2368 * This uses the formula in the spec to calculate the PBN value for a mode.
2370 int drm_dp_calc_pbn_mode(int clock, int bpp)
2375 fixed20_12 margin, tmp;
2378 pix_bw.full = dfixed_const(clock);
2379 fbpp.full = dfixed_const(bpp);
2380 tmp.full = dfixed_const(8);
2381 fbpp.full = dfixed_div(fbpp, tmp);
2383 result.full = dfixed_mul(pix_bw, fbpp);
2384 margin.full = dfixed_const(54);
2385 tmp.full = dfixed_const(64);
2386 margin.full = dfixed_div(margin, tmp);
2387 result.full = dfixed_div(result, margin);
2389 margin.full = dfixed_const(1006);
2390 tmp.full = dfixed_const(1000);
2391 margin.full = dfixed_div(margin, tmp);
2392 result.full = dfixed_mul(result, margin);
2394 result.full = dfixed_div(result, tmp);
2395 result.full = dfixed_ceil(result);
2396 res = dfixed_trunc(result);
2399 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
2401 static int test_calc_pbn_mode(void)
2404 ret = drm_dp_calc_pbn_mode(154000, 30);
2407 ret = drm_dp_calc_pbn_mode(234000, 30);
2413 /* we want to kick the TX after we've ack the up/down IRQs. */
2414 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
2416 queue_work(system_long_wq, &mgr->tx_work);
2419 static void drm_dp_mst_dump_mstb(struct seq_file *m,
2420 struct drm_dp_mst_branch *mstb)
2422 struct drm_dp_mst_port *port;
2423 int tabs = mstb->lct;
2427 for (i = 0; i < tabs; i++)
2431 seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
2432 list_for_each_entry(port, &mstb->ports, next) {
2433 seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
2435 drm_dp_mst_dump_mstb(m, port->mstb);
2439 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
2444 for (i = 0; i < 4; i++) {
2445 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
2455 * drm_dp_mst_dump_topology(): dump topology to seq file.
2456 * @m: seq_file to dump output to
2457 * @mgr: manager to dump current topology for.
2459 * helper to dump MST topology to a seq file for debugfs.
2461 void drm_dp_mst_dump_topology(struct seq_file *m,
2462 struct drm_dp_mst_topology_mgr *mgr)
2465 struct drm_dp_mst_port *port;
2466 mutex_lock(&mgr->lock);
2467 if (mgr->mst_primary)
2468 drm_dp_mst_dump_mstb(m, mgr->mst_primary);
2471 mutex_unlock(&mgr->lock);
2473 mutex_lock(&mgr->payload_lock);
2474 seq_printf(m, "vcpi: %lx\n", mgr->payload_mask);
2476 for (i = 0; i < mgr->max_payloads; i++) {
2477 if (mgr->proposed_vcpis[i]) {
2478 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
2479 seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
2481 seq_printf(m, "vcpi %d:unsed\n", i);
2483 for (i = 0; i < mgr->max_payloads; i++) {
2484 seq_printf(m, "payload %d: %d, %d, %d\n",
2486 mgr->payloads[i].payload_state,
2487 mgr->payloads[i].start_slot,
2488 mgr->payloads[i].num_slots);
2492 mutex_unlock(&mgr->payload_lock);
2494 mutex_lock(&mgr->lock);
2495 if (mgr->mst_primary) {
2499 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
2500 seq_printf(m, "dpcd: ");
2501 for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
2502 seq_printf(m, "%02x ", buf[i]);
2503 seq_printf(m, "\n");
2504 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
2505 seq_printf(m, "faux/mst: ");
2506 for (i = 0; i < 2; i++)
2507 seq_printf(m, "%02x ", buf[i]);
2508 seq_printf(m, "\n");
2509 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
2510 seq_printf(m, "mst ctrl: ");
2511 for (i = 0; i < 1; i++)
2512 seq_printf(m, "%02x ", buf[i]);
2513 seq_printf(m, "\n");
2515 bret = dump_dp_payload_table(mgr, buf);
2517 seq_printf(m, "payload table: ");
2518 for (i = 0; i < 63; i++)
2519 seq_printf(m, "%02x ", buf[i]);
2520 seq_printf(m, "\n");
2525 mutex_unlock(&mgr->lock);
2528 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
2530 static void drm_dp_tx_work(struct work_struct *work)
2532 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2534 mutex_lock(&mgr->qlock);
2535 if (mgr->tx_down_in_progress)
2536 process_single_down_tx_qlock(mgr);
2537 mutex_unlock(&mgr->qlock);
2541 * drm_dp_mst_topology_mgr_init - initialise a topology manager
2542 * @mgr: manager struct to initialise
2543 * @dev: device providing this structure - for i2c addition.
2544 * @aux: DP helper aux channel to talk to this device
2545 * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
2546 * @max_payloads: maximum number of payloads this GPU can source
2547 * @conn_base_id: the connector object ID the MST device is connected to.
2549 * Return 0 for success, or negative error code on failure
2551 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
2552 struct device *dev, struct drm_dp_aux *aux,
2553 int max_dpcd_transaction_bytes,
2554 int max_payloads, int conn_base_id)
2556 mutex_init(&mgr->lock);
2557 mutex_init(&mgr->qlock);
2558 mutex_init(&mgr->payload_lock);
2559 INIT_LIST_HEAD(&mgr->tx_msg_upq);
2560 INIT_LIST_HEAD(&mgr->tx_msg_downq);
2561 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
2562 INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
2563 init_waitqueue_head(&mgr->tx_waitq);
2566 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
2567 mgr->max_payloads = max_payloads;
2568 mgr->conn_base_id = conn_base_id;
2569 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
2572 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
2573 if (!mgr->proposed_vcpis)
2575 set_bit(0, &mgr->payload_mask);
2576 test_calc_pbn_mode();
2579 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
2582 * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
2583 * @mgr: manager to destroy
2585 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
2587 mutex_lock(&mgr->payload_lock);
2588 kfree(mgr->payloads);
2589 mgr->payloads = NULL;
2590 kfree(mgr->proposed_vcpis);
2591 mgr->proposed_vcpis = NULL;
2592 mutex_unlock(&mgr->payload_lock);
2596 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
2599 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
2602 struct drm_dp_aux *aux = adapter->algo_data;
2603 struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
2604 struct drm_dp_mst_branch *mstb;
2605 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
2607 bool reading = false;
2608 struct drm_dp_sideband_msg_req_body msg;
2609 struct drm_dp_sideband_msg_tx *txmsg = NULL;
2612 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
2616 /* construct i2c msg */
2617 /* see if last msg is a read */
2618 if (msgs[num - 1].flags & I2C_M_RD)
2622 DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
2627 msg.req_type = DP_REMOTE_I2C_READ;
2628 msg.u.i2c_read.num_transactions = num - 1;
2629 msg.u.i2c_read.port_number = port->port_num;
2630 for (i = 0; i < num - 1; i++) {
2631 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2632 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2633 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2635 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2636 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2638 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
2645 drm_dp_encode_sideband_req(&msg, txmsg);
2647 drm_dp_queue_down_tx(mgr, txmsg);
2649 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2652 if (txmsg->reply.reply_type == 1) { /* got a NAK back */
2656 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
2660 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
2665 drm_dp_put_mst_branch_device(mstb);
2669 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
2671 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
2672 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
2673 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
2674 I2C_FUNC_10BIT_ADDR;
2677 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
2678 .functionality = drm_dp_mst_i2c_functionality,
2679 .master_xfer = drm_dp_mst_i2c_xfer,
2683 * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
2684 * @aux: DisplayPort AUX channel
2686 * Returns 0 on success or a negative error code on failure.
2688 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
2690 aux->ddc.algo = &drm_dp_mst_i2c_algo;
2691 aux->ddc.algo_data = aux;
2692 aux->ddc.retries = 3;
2694 aux->ddc.class = I2C_CLASS_DDC;
2695 aux->ddc.owner = THIS_MODULE;
2696 aux->ddc.dev.parent = aux->dev;
2697 aux->ddc.dev.of_node = aux->dev->of_node;
2699 strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
2700 sizeof(aux->ddc.name));
2702 return i2c_add_adapter(&aux->ddc);
2706 * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
2707 * @aux: DisplayPort AUX channel
2709 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
2711 i2c_del_adapter(&aux->ddc);