xref: /openbsd-src/sys/dev/pci/drm/display/drm_dp_mst_topology.c (revision d852286ee8ed0e977d0b824dd5b25ba139666334)
11bb76ff1Sjsg /*
21bb76ff1Sjsg  * Copyright © 2014 Red Hat
31bb76ff1Sjsg  *
41bb76ff1Sjsg  * Permission to use, copy, modify, distribute, and sell this software and its
51bb76ff1Sjsg  * documentation for any purpose is hereby granted without fee, provided that
61bb76ff1Sjsg  * the above copyright notice appear in all copies and that both that copyright
71bb76ff1Sjsg  * notice and this permission notice appear in supporting documentation, and
81bb76ff1Sjsg  * that the name of the copyright holders not be used in advertising or
91bb76ff1Sjsg  * publicity pertaining to distribution of the software without specific,
101bb76ff1Sjsg  * written prior permission.  The copyright holders make no representations
111bb76ff1Sjsg  * about the suitability of this software for any purpose.  It is provided "as
121bb76ff1Sjsg  * is" without express or implied warranty.
131bb76ff1Sjsg  *
141bb76ff1Sjsg  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
151bb76ff1Sjsg  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
161bb76ff1Sjsg  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
171bb76ff1Sjsg  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
181bb76ff1Sjsg  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
191bb76ff1Sjsg  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
201bb76ff1Sjsg  * OF THIS SOFTWARE.
211bb76ff1Sjsg  */
221bb76ff1Sjsg 
231bb76ff1Sjsg #include <linux/bitfield.h>
241bb76ff1Sjsg #include <linux/delay.h>
251bb76ff1Sjsg #include <linux/errno.h>
261bb76ff1Sjsg #include <linux/i2c.h>
271bb76ff1Sjsg #include <linux/init.h>
281bb76ff1Sjsg #include <linux/kernel.h>
291bb76ff1Sjsg #include <linux/random.h>
301bb76ff1Sjsg #include <linux/sched.h>
311bb76ff1Sjsg #include <linux/seq_file.h>
321bb76ff1Sjsg #include <linux/iopoll.h>
331bb76ff1Sjsg 
341bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
351bb76ff1Sjsg #include <linux/stacktrace.h>
361bb76ff1Sjsg #include <linux/sort.h>
371bb76ff1Sjsg #include <linux/timekeeping.h>
381bb76ff1Sjsg #include <linux/math64.h>
391bb76ff1Sjsg #endif
401bb76ff1Sjsg 
411bb76ff1Sjsg #include <drm/display/drm_dp_mst_helper.h>
421bb76ff1Sjsg #include <drm/drm_atomic.h>
431bb76ff1Sjsg #include <drm/drm_atomic_helper.h>
441bb76ff1Sjsg #include <drm/drm_drv.h>
451bb76ff1Sjsg #include <drm/drm_edid.h>
461bb76ff1Sjsg #include <drm/drm_print.h>
471bb76ff1Sjsg #include <drm/drm_probe_helper.h>
481bb76ff1Sjsg 
491bb76ff1Sjsg #include "drm_dp_helper_internal.h"
501bb76ff1Sjsg #include "drm_dp_mst_topology_internal.h"
511bb76ff1Sjsg 
521bb76ff1Sjsg /**
531bb76ff1Sjsg  * DOC: dp mst helper
541bb76ff1Sjsg  *
551bb76ff1Sjsg  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
561bb76ff1Sjsg  * protocol. The helpers contain a topology manager and bandwidth manager.
571bb76ff1Sjsg  * The helpers encapsulate the sending and received of sideband msgs.
581bb76ff1Sjsg  */
591bb76ff1Sjsg struct drm_dp_pending_up_req {
601bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr hdr;
611bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body msg;
621bb76ff1Sjsg 	struct list_head next;
631bb76ff1Sjsg };
641bb76ff1Sjsg 
651bb76ff1Sjsg static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
661bb76ff1Sjsg 				  char *buf);
671bb76ff1Sjsg 
681bb76ff1Sjsg static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
691bb76ff1Sjsg 
701bb76ff1Sjsg static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
711bb76ff1Sjsg 				     int id, u8 start_slot, u8 num_slots);
721bb76ff1Sjsg 
731bb76ff1Sjsg static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
741bb76ff1Sjsg 				 struct drm_dp_mst_port *port,
751bb76ff1Sjsg 				 int offset, int size, u8 *bytes);
761bb76ff1Sjsg static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
771bb76ff1Sjsg 				  struct drm_dp_mst_port *port,
781bb76ff1Sjsg 				  int offset, int size, u8 *bytes);
791bb76ff1Sjsg 
801bb76ff1Sjsg static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
811bb76ff1Sjsg 				    struct drm_dp_mst_branch *mstb);
821bb76ff1Sjsg 
831bb76ff1Sjsg static void
841bb76ff1Sjsg drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
851bb76ff1Sjsg 				   struct drm_dp_mst_branch *mstb);
861bb76ff1Sjsg 
871bb76ff1Sjsg static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
881bb76ff1Sjsg 					   struct drm_dp_mst_branch *mstb,
891bb76ff1Sjsg 					   struct drm_dp_mst_port *port);
901bb76ff1Sjsg static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
911bb76ff1Sjsg 				 u8 *guid);
921bb76ff1Sjsg 
931bb76ff1Sjsg static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
941bb76ff1Sjsg static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
951bb76ff1Sjsg static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
961bb76ff1Sjsg 
971bb76ff1Sjsg static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
981bb76ff1Sjsg 						 struct drm_dp_mst_branch *branch);
991bb76ff1Sjsg 
1001bb76ff1Sjsg #define DBG_PREFIX "[dp_mst]"
1011bb76ff1Sjsg 
1021bb76ff1Sjsg #define DP_STR(x) [DP_ ## x] = #x
1031bb76ff1Sjsg 
1041bb76ff1Sjsg static const char *drm_dp_mst_req_type_str(u8 req_type)
1051bb76ff1Sjsg {
1061bb76ff1Sjsg 	static const char * const req_type_str[] = {
1071bb76ff1Sjsg 		DP_STR(GET_MSG_TRANSACTION_VERSION),
1081bb76ff1Sjsg 		DP_STR(LINK_ADDRESS),
1091bb76ff1Sjsg 		DP_STR(CONNECTION_STATUS_NOTIFY),
1101bb76ff1Sjsg 		DP_STR(ENUM_PATH_RESOURCES),
1111bb76ff1Sjsg 		DP_STR(ALLOCATE_PAYLOAD),
1121bb76ff1Sjsg 		DP_STR(QUERY_PAYLOAD),
1131bb76ff1Sjsg 		DP_STR(RESOURCE_STATUS_NOTIFY),
1141bb76ff1Sjsg 		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
1151bb76ff1Sjsg 		DP_STR(REMOTE_DPCD_READ),
1161bb76ff1Sjsg 		DP_STR(REMOTE_DPCD_WRITE),
1171bb76ff1Sjsg 		DP_STR(REMOTE_I2C_READ),
1181bb76ff1Sjsg 		DP_STR(REMOTE_I2C_WRITE),
1191bb76ff1Sjsg 		DP_STR(POWER_UP_PHY),
1201bb76ff1Sjsg 		DP_STR(POWER_DOWN_PHY),
1211bb76ff1Sjsg 		DP_STR(SINK_EVENT_NOTIFY),
1221bb76ff1Sjsg 		DP_STR(QUERY_STREAM_ENC_STATUS),
1231bb76ff1Sjsg 	};
1241bb76ff1Sjsg 
1251bb76ff1Sjsg 	if (req_type >= ARRAY_SIZE(req_type_str) ||
1261bb76ff1Sjsg 	    !req_type_str[req_type])
1271bb76ff1Sjsg 		return "unknown";
1281bb76ff1Sjsg 
1291bb76ff1Sjsg 	return req_type_str[req_type];
1301bb76ff1Sjsg }
1311bb76ff1Sjsg 
1321bb76ff1Sjsg #undef DP_STR
1331bb76ff1Sjsg #define DP_STR(x) [DP_NAK_ ## x] = #x
1341bb76ff1Sjsg 
1351bb76ff1Sjsg static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
1361bb76ff1Sjsg {
1371bb76ff1Sjsg 	static const char * const nak_reason_str[] = {
1381bb76ff1Sjsg 		DP_STR(WRITE_FAILURE),
1391bb76ff1Sjsg 		DP_STR(INVALID_READ),
1401bb76ff1Sjsg 		DP_STR(CRC_FAILURE),
1411bb76ff1Sjsg 		DP_STR(BAD_PARAM),
1421bb76ff1Sjsg 		DP_STR(DEFER),
1431bb76ff1Sjsg 		DP_STR(LINK_FAILURE),
1441bb76ff1Sjsg 		DP_STR(NO_RESOURCES),
1451bb76ff1Sjsg 		DP_STR(DPCD_FAIL),
1461bb76ff1Sjsg 		DP_STR(I2C_NAK),
1471bb76ff1Sjsg 		DP_STR(ALLOCATE_FAIL),
1481bb76ff1Sjsg 	};
1491bb76ff1Sjsg 
1501bb76ff1Sjsg 	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
1511bb76ff1Sjsg 	    !nak_reason_str[nak_reason])
1521bb76ff1Sjsg 		return "unknown";
1531bb76ff1Sjsg 
1541bb76ff1Sjsg 	return nak_reason_str[nak_reason];
1551bb76ff1Sjsg }
1561bb76ff1Sjsg 
1571bb76ff1Sjsg #undef DP_STR
1581bb76ff1Sjsg #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
1591bb76ff1Sjsg 
1601bb76ff1Sjsg static const char *drm_dp_mst_sideband_tx_state_str(int state)
1611bb76ff1Sjsg {
1621bb76ff1Sjsg 	static const char * const sideband_reason_str[] = {
1631bb76ff1Sjsg 		DP_STR(QUEUED),
1641bb76ff1Sjsg 		DP_STR(START_SEND),
1651bb76ff1Sjsg 		DP_STR(SENT),
1661bb76ff1Sjsg 		DP_STR(RX),
1671bb76ff1Sjsg 		DP_STR(TIMEOUT),
1681bb76ff1Sjsg 	};
1691bb76ff1Sjsg 
1701bb76ff1Sjsg 	if (state >= ARRAY_SIZE(sideband_reason_str) ||
1711bb76ff1Sjsg 	    !sideband_reason_str[state])
1721bb76ff1Sjsg 		return "unknown";
1731bb76ff1Sjsg 
1741bb76ff1Sjsg 	return sideband_reason_str[state];
1751bb76ff1Sjsg }
1761bb76ff1Sjsg 
1771bb76ff1Sjsg static int
1781bb76ff1Sjsg drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
1791bb76ff1Sjsg {
1801bb76ff1Sjsg 	int i;
1811bb76ff1Sjsg 	u8 unpacked_rad[16];
1821bb76ff1Sjsg 
1831bb76ff1Sjsg 	for (i = 0; i < lct; i++) {
1841bb76ff1Sjsg 		if (i % 2)
1851bb76ff1Sjsg 			unpacked_rad[i] = rad[i / 2] >> 4;
1861bb76ff1Sjsg 		else
1871bb76ff1Sjsg 			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
1881bb76ff1Sjsg 	}
1891bb76ff1Sjsg 
1901bb76ff1Sjsg 	/* TODO: Eventually add something to printk so we can format the rad
1911bb76ff1Sjsg 	 * like this: 1.2.3
1921bb76ff1Sjsg 	 */
1931bb76ff1Sjsg 	return snprintf(out, len, "%*phC", lct, unpacked_rad);
1941bb76ff1Sjsg }
1951bb76ff1Sjsg 
1961bb76ff1Sjsg /* sideband msg handling */
1971bb76ff1Sjsg static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
1981bb76ff1Sjsg {
1991bb76ff1Sjsg 	u8 bitmask = 0x80;
2001bb76ff1Sjsg 	u8 bitshift = 7;
2011bb76ff1Sjsg 	u8 array_index = 0;
2021bb76ff1Sjsg 	int number_of_bits = num_nibbles * 4;
2031bb76ff1Sjsg 	u8 remainder = 0;
2041bb76ff1Sjsg 
2051bb76ff1Sjsg 	while (number_of_bits != 0) {
2061bb76ff1Sjsg 		number_of_bits--;
2071bb76ff1Sjsg 		remainder <<= 1;
2081bb76ff1Sjsg 		remainder |= (data[array_index] & bitmask) >> bitshift;
2091bb76ff1Sjsg 		bitmask >>= 1;
2101bb76ff1Sjsg 		bitshift--;
2111bb76ff1Sjsg 		if (bitmask == 0) {
2121bb76ff1Sjsg 			bitmask = 0x80;
2131bb76ff1Sjsg 			bitshift = 7;
2141bb76ff1Sjsg 			array_index++;
2151bb76ff1Sjsg 		}
2161bb76ff1Sjsg 		if ((remainder & 0x10) == 0x10)
2171bb76ff1Sjsg 			remainder ^= 0x13;
2181bb76ff1Sjsg 	}
2191bb76ff1Sjsg 
2201bb76ff1Sjsg 	number_of_bits = 4;
2211bb76ff1Sjsg 	while (number_of_bits != 0) {
2221bb76ff1Sjsg 		number_of_bits--;
2231bb76ff1Sjsg 		remainder <<= 1;
2241bb76ff1Sjsg 		if ((remainder & 0x10) != 0)
2251bb76ff1Sjsg 			remainder ^= 0x13;
2261bb76ff1Sjsg 	}
2271bb76ff1Sjsg 
2281bb76ff1Sjsg 	return remainder;
2291bb76ff1Sjsg }
2301bb76ff1Sjsg 
2311bb76ff1Sjsg static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
2321bb76ff1Sjsg {
2331bb76ff1Sjsg 	u8 bitmask = 0x80;
2341bb76ff1Sjsg 	u8 bitshift = 7;
2351bb76ff1Sjsg 	u8 array_index = 0;
2361bb76ff1Sjsg 	int number_of_bits = number_of_bytes * 8;
2371bb76ff1Sjsg 	u16 remainder = 0;
2381bb76ff1Sjsg 
2391bb76ff1Sjsg 	while (number_of_bits != 0) {
2401bb76ff1Sjsg 		number_of_bits--;
2411bb76ff1Sjsg 		remainder <<= 1;
2421bb76ff1Sjsg 		remainder |= (data[array_index] & bitmask) >> bitshift;
2431bb76ff1Sjsg 		bitmask >>= 1;
2441bb76ff1Sjsg 		bitshift--;
2451bb76ff1Sjsg 		if (bitmask == 0) {
2461bb76ff1Sjsg 			bitmask = 0x80;
2471bb76ff1Sjsg 			bitshift = 7;
2481bb76ff1Sjsg 			array_index++;
2491bb76ff1Sjsg 		}
2501bb76ff1Sjsg 		if ((remainder & 0x100) == 0x100)
2511bb76ff1Sjsg 			remainder ^= 0xd5;
2521bb76ff1Sjsg 	}
2531bb76ff1Sjsg 
2541bb76ff1Sjsg 	number_of_bits = 8;
2551bb76ff1Sjsg 	while (number_of_bits != 0) {
2561bb76ff1Sjsg 		number_of_bits--;
2571bb76ff1Sjsg 		remainder <<= 1;
2581bb76ff1Sjsg 		if ((remainder & 0x100) != 0)
2591bb76ff1Sjsg 			remainder ^= 0xd5;
2601bb76ff1Sjsg 	}
2611bb76ff1Sjsg 
2621bb76ff1Sjsg 	return remainder & 0xff;
2631bb76ff1Sjsg }
2641bb76ff1Sjsg static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
2651bb76ff1Sjsg {
2661bb76ff1Sjsg 	u8 size = 3;
2671bb76ff1Sjsg 
2681bb76ff1Sjsg 	size += (hdr->lct / 2);
2691bb76ff1Sjsg 	return size;
2701bb76ff1Sjsg }
2711bb76ff1Sjsg 
2721bb76ff1Sjsg static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
2731bb76ff1Sjsg 					   u8 *buf, int *len)
2741bb76ff1Sjsg {
2751bb76ff1Sjsg 	int idx = 0;
2761bb76ff1Sjsg 	int i;
2771bb76ff1Sjsg 	u8 crc4;
2781bb76ff1Sjsg 
2791bb76ff1Sjsg 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
2801bb76ff1Sjsg 	for (i = 0; i < (hdr->lct / 2); i++)
2811bb76ff1Sjsg 		buf[idx++] = hdr->rad[i];
2821bb76ff1Sjsg 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
2831bb76ff1Sjsg 		(hdr->msg_len & 0x3f);
2841bb76ff1Sjsg 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
2851bb76ff1Sjsg 
2861bb76ff1Sjsg 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
2871bb76ff1Sjsg 	buf[idx - 1] |= (crc4 & 0xf);
2881bb76ff1Sjsg 
2891bb76ff1Sjsg 	*len = idx;
2901bb76ff1Sjsg }
2911bb76ff1Sjsg 
2921bb76ff1Sjsg static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
2931bb76ff1Sjsg 					   struct drm_dp_sideband_msg_hdr *hdr,
2941bb76ff1Sjsg 					   u8 *buf, int buflen, u8 *hdrlen)
2951bb76ff1Sjsg {
2961bb76ff1Sjsg 	u8 crc4;
2971bb76ff1Sjsg 	u8 len;
2981bb76ff1Sjsg 	int i;
2991bb76ff1Sjsg 	u8 idx;
3001bb76ff1Sjsg 
3011bb76ff1Sjsg 	if (buf[0] == 0)
3021bb76ff1Sjsg 		return false;
3031bb76ff1Sjsg 	len = 3;
3041bb76ff1Sjsg 	len += ((buf[0] & 0xf0) >> 4) / 2;
3051bb76ff1Sjsg 	if (len > buflen)
3061bb76ff1Sjsg 		return false;
3071bb76ff1Sjsg 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
3081bb76ff1Sjsg 
3091bb76ff1Sjsg 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
3101bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
3111bb76ff1Sjsg 		return false;
3121bb76ff1Sjsg 	}
3131bb76ff1Sjsg 
3141bb76ff1Sjsg 	hdr->lct = (buf[0] & 0xf0) >> 4;
3151bb76ff1Sjsg 	hdr->lcr = (buf[0] & 0xf);
3161bb76ff1Sjsg 	idx = 1;
3171bb76ff1Sjsg 	for (i = 0; i < (hdr->lct / 2); i++)
3181bb76ff1Sjsg 		hdr->rad[i] = buf[idx++];
3191bb76ff1Sjsg 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
3201bb76ff1Sjsg 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
3211bb76ff1Sjsg 	hdr->msg_len = buf[idx] & 0x3f;
32263cc23d3Sjsg 	if (hdr->msg_len < 1)		/* min space for body CRC */
32363cc23d3Sjsg 		return false;
32463cc23d3Sjsg 
3251bb76ff1Sjsg 	idx++;
3261bb76ff1Sjsg 	hdr->somt = (buf[idx] >> 7) & 0x1;
3271bb76ff1Sjsg 	hdr->eomt = (buf[idx] >> 6) & 0x1;
3281bb76ff1Sjsg 	hdr->seqno = (buf[idx] >> 4) & 0x1;
3291bb76ff1Sjsg 	idx++;
3301bb76ff1Sjsg 	*hdrlen = idx;
3311bb76ff1Sjsg 	return true;
3321bb76ff1Sjsg }
3331bb76ff1Sjsg 
3341bb76ff1Sjsg void
3351bb76ff1Sjsg drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
3361bb76ff1Sjsg 			   struct drm_dp_sideband_msg_tx *raw)
3371bb76ff1Sjsg {
3381bb76ff1Sjsg 	int idx = 0;
3391bb76ff1Sjsg 	int i;
3401bb76ff1Sjsg 	u8 *buf = raw->msg;
3411bb76ff1Sjsg 
3421bb76ff1Sjsg 	buf[idx++] = req->req_type & 0x7f;
3431bb76ff1Sjsg 
3441bb76ff1Sjsg 	switch (req->req_type) {
3451bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
3461bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
3471bb76ff1Sjsg 	case DP_POWER_UP_PHY:
3481bb76ff1Sjsg 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
3491bb76ff1Sjsg 		idx++;
3501bb76ff1Sjsg 		break;
3511bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
3521bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
3531bb76ff1Sjsg 			(req->u.allocate_payload.number_sdp_streams & 0xf);
3541bb76ff1Sjsg 		idx++;
3551bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
3561bb76ff1Sjsg 		idx++;
3571bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
3581bb76ff1Sjsg 		idx++;
3591bb76ff1Sjsg 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
3601bb76ff1Sjsg 		idx++;
3611bb76ff1Sjsg 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
3621bb76ff1Sjsg 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
3631bb76ff1Sjsg 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
3641bb76ff1Sjsg 			idx++;
3651bb76ff1Sjsg 		}
3661bb76ff1Sjsg 		if (req->u.allocate_payload.number_sdp_streams & 1) {
3671bb76ff1Sjsg 			i = req->u.allocate_payload.number_sdp_streams - 1;
3681bb76ff1Sjsg 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
3691bb76ff1Sjsg 			idx++;
3701bb76ff1Sjsg 		}
3711bb76ff1Sjsg 		break;
3721bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
3731bb76ff1Sjsg 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
3741bb76ff1Sjsg 		idx++;
3751bb76ff1Sjsg 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
3761bb76ff1Sjsg 		idx++;
3771bb76ff1Sjsg 		break;
3781bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
3791bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
3801bb76ff1Sjsg 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
3811bb76ff1Sjsg 		idx++;
3821bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
3831bb76ff1Sjsg 		idx++;
3841bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
3851bb76ff1Sjsg 		idx++;
3861bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_read.num_bytes);
3871bb76ff1Sjsg 		idx++;
3881bb76ff1Sjsg 		break;
3891bb76ff1Sjsg 
3901bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
3911bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
3921bb76ff1Sjsg 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
3931bb76ff1Sjsg 		idx++;
3941bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
3951bb76ff1Sjsg 		idx++;
3961bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
3971bb76ff1Sjsg 		idx++;
3981bb76ff1Sjsg 		buf[idx] = (req->u.dpcd_write.num_bytes);
3991bb76ff1Sjsg 		idx++;
4001bb76ff1Sjsg 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
4011bb76ff1Sjsg 		idx += req->u.dpcd_write.num_bytes;
4021bb76ff1Sjsg 		break;
4031bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
4041bb76ff1Sjsg 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
4051bb76ff1Sjsg 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
4061bb76ff1Sjsg 		idx++;
4071bb76ff1Sjsg 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
4081bb76ff1Sjsg 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
4091bb76ff1Sjsg 			idx++;
4101bb76ff1Sjsg 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
4111bb76ff1Sjsg 			idx++;
4121bb76ff1Sjsg 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
4131bb76ff1Sjsg 			idx += req->u.i2c_read.transactions[i].num_bytes;
4141bb76ff1Sjsg 
4151bb76ff1Sjsg 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
4161bb76ff1Sjsg 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
4171bb76ff1Sjsg 			idx++;
4181bb76ff1Sjsg 		}
4191bb76ff1Sjsg 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
4201bb76ff1Sjsg 		idx++;
4211bb76ff1Sjsg 		buf[idx] = (req->u.i2c_read.num_bytes_read);
4221bb76ff1Sjsg 		idx++;
4231bb76ff1Sjsg 		break;
4241bb76ff1Sjsg 
4251bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
4261bb76ff1Sjsg 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
4271bb76ff1Sjsg 		idx++;
4281bb76ff1Sjsg 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
4291bb76ff1Sjsg 		idx++;
4301bb76ff1Sjsg 		buf[idx] = (req->u.i2c_write.num_bytes);
4311bb76ff1Sjsg 		idx++;
4321bb76ff1Sjsg 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
4331bb76ff1Sjsg 		idx += req->u.i2c_write.num_bytes;
4341bb76ff1Sjsg 		break;
4351bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS: {
4361bb76ff1Sjsg 		const struct drm_dp_query_stream_enc_status *msg;
4371bb76ff1Sjsg 
4381bb76ff1Sjsg 		msg = &req->u.enc_status;
4391bb76ff1Sjsg 		buf[idx] = msg->stream_id;
4401bb76ff1Sjsg 		idx++;
4411bb76ff1Sjsg 		memcpy(&buf[idx], msg->client_id, sizeof(msg->client_id));
4421bb76ff1Sjsg 		idx += sizeof(msg->client_id);
4431bb76ff1Sjsg 		buf[idx] = 0;
4441bb76ff1Sjsg 		buf[idx] |= FIELD_PREP(GENMASK(1, 0), msg->stream_event);
4451bb76ff1Sjsg 		buf[idx] |= msg->valid_stream_event ? BIT(2) : 0;
4461bb76ff1Sjsg 		buf[idx] |= FIELD_PREP(GENMASK(4, 3), msg->stream_behavior);
4471bb76ff1Sjsg 		buf[idx] |= msg->valid_stream_behavior ? BIT(5) : 0;
4481bb76ff1Sjsg 		idx++;
4491bb76ff1Sjsg 		}
4501bb76ff1Sjsg 		break;
4511bb76ff1Sjsg 	}
4521bb76ff1Sjsg 	raw->cur_len = idx;
4531bb76ff1Sjsg }
4541bb76ff1Sjsg EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
4551bb76ff1Sjsg 
4561bb76ff1Sjsg /* Decode a sideband request we've encoded, mainly used for debugging */
4571bb76ff1Sjsg int
4581bb76ff1Sjsg drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
4591bb76ff1Sjsg 			   struct drm_dp_sideband_msg_req_body *req)
4601bb76ff1Sjsg {
4611bb76ff1Sjsg 	const u8 *buf = raw->msg;
4621bb76ff1Sjsg 	int i, idx = 0;
4631bb76ff1Sjsg 
4641bb76ff1Sjsg 	req->req_type = buf[idx++] & 0x7f;
4651bb76ff1Sjsg 	switch (req->req_type) {
4661bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
4671bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
4681bb76ff1Sjsg 	case DP_POWER_UP_PHY:
4691bb76ff1Sjsg 		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
4701bb76ff1Sjsg 		break;
4711bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
4721bb76ff1Sjsg 		{
4731bb76ff1Sjsg 			struct drm_dp_allocate_payload *a =
4741bb76ff1Sjsg 				&req->u.allocate_payload;
4751bb76ff1Sjsg 
4761bb76ff1Sjsg 			a->number_sdp_streams = buf[idx] & 0xf;
4771bb76ff1Sjsg 			a->port_number = (buf[idx] >> 4) & 0xf;
4781bb76ff1Sjsg 
4791bb76ff1Sjsg 			WARN_ON(buf[++idx] & 0x80);
4801bb76ff1Sjsg 			a->vcpi = buf[idx] & 0x7f;
4811bb76ff1Sjsg 
4821bb76ff1Sjsg 			a->pbn = buf[++idx] << 8;
4831bb76ff1Sjsg 			a->pbn |= buf[++idx];
4841bb76ff1Sjsg 
4851bb76ff1Sjsg 			idx++;
4861bb76ff1Sjsg 			for (i = 0; i < a->number_sdp_streams; i++) {
4871bb76ff1Sjsg 				a->sdp_stream_sink[i] =
4881bb76ff1Sjsg 					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
4891bb76ff1Sjsg 			}
4901bb76ff1Sjsg 		}
4911bb76ff1Sjsg 		break;
4921bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
4931bb76ff1Sjsg 		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
4941bb76ff1Sjsg 		WARN_ON(buf[++idx] & 0x80);
4951bb76ff1Sjsg 		req->u.query_payload.vcpi = buf[idx] & 0x7f;
4961bb76ff1Sjsg 		break;
4971bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
4981bb76ff1Sjsg 		{
4991bb76ff1Sjsg 			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
5001bb76ff1Sjsg 
5011bb76ff1Sjsg 			r->port_number = (buf[idx] >> 4) & 0xf;
5021bb76ff1Sjsg 
5031bb76ff1Sjsg 			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
5041bb76ff1Sjsg 			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
5051bb76ff1Sjsg 			r->dpcd_address |= buf[++idx] & 0xff;
5061bb76ff1Sjsg 
5071bb76ff1Sjsg 			r->num_bytes = buf[++idx];
5081bb76ff1Sjsg 		}
5091bb76ff1Sjsg 		break;
5101bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
5111bb76ff1Sjsg 		{
5121bb76ff1Sjsg 			struct drm_dp_remote_dpcd_write *w =
5131bb76ff1Sjsg 				&req->u.dpcd_write;
5141bb76ff1Sjsg 
5151bb76ff1Sjsg 			w->port_number = (buf[idx] >> 4) & 0xf;
5161bb76ff1Sjsg 
5171bb76ff1Sjsg 			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
5181bb76ff1Sjsg 			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
5191bb76ff1Sjsg 			w->dpcd_address |= buf[++idx] & 0xff;
5201bb76ff1Sjsg 
5211bb76ff1Sjsg 			w->num_bytes = buf[++idx];
5221bb76ff1Sjsg 
5231bb76ff1Sjsg 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
5241bb76ff1Sjsg 					   GFP_KERNEL);
5251bb76ff1Sjsg 			if (!w->bytes)
5261bb76ff1Sjsg 				return -ENOMEM;
5271bb76ff1Sjsg 		}
5281bb76ff1Sjsg 		break;
5291bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
5301bb76ff1Sjsg 		{
5311bb76ff1Sjsg 			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
5321bb76ff1Sjsg 			struct drm_dp_remote_i2c_read_tx *tx;
5331bb76ff1Sjsg 			bool failed = false;
5341bb76ff1Sjsg 
5351bb76ff1Sjsg 			r->num_transactions = buf[idx] & 0x3;
5361bb76ff1Sjsg 			r->port_number = (buf[idx] >> 4) & 0xf;
5371bb76ff1Sjsg 			for (i = 0; i < r->num_transactions; i++) {
5381bb76ff1Sjsg 				tx = &r->transactions[i];
5391bb76ff1Sjsg 
5401bb76ff1Sjsg 				tx->i2c_dev_id = buf[++idx] & 0x7f;
5411bb76ff1Sjsg 				tx->num_bytes = buf[++idx];
5421bb76ff1Sjsg 				tx->bytes = kmemdup(&buf[++idx],
5431bb76ff1Sjsg 						    tx->num_bytes,
5441bb76ff1Sjsg 						    GFP_KERNEL);
5451bb76ff1Sjsg 				if (!tx->bytes) {
5461bb76ff1Sjsg 					failed = true;
5471bb76ff1Sjsg 					break;
5481bb76ff1Sjsg 				}
5491bb76ff1Sjsg 				idx += tx->num_bytes;
5501bb76ff1Sjsg 				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
5511bb76ff1Sjsg 				tx->i2c_transaction_delay = buf[idx] & 0xf;
5521bb76ff1Sjsg 			}
5531bb76ff1Sjsg 
5541bb76ff1Sjsg 			if (failed) {
5551bb76ff1Sjsg 				for (i = 0; i < r->num_transactions; i++) {
5561bb76ff1Sjsg 					tx = &r->transactions[i];
5571bb76ff1Sjsg 					kfree(tx->bytes);
5581bb76ff1Sjsg 				}
5591bb76ff1Sjsg 				return -ENOMEM;
5601bb76ff1Sjsg 			}
5611bb76ff1Sjsg 
5621bb76ff1Sjsg 			r->read_i2c_device_id = buf[++idx] & 0x7f;
5631bb76ff1Sjsg 			r->num_bytes_read = buf[++idx];
5641bb76ff1Sjsg 		}
5651bb76ff1Sjsg 		break;
5661bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
5671bb76ff1Sjsg 		{
5681bb76ff1Sjsg 			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
5691bb76ff1Sjsg 
5701bb76ff1Sjsg 			w->port_number = (buf[idx] >> 4) & 0xf;
5711bb76ff1Sjsg 			w->write_i2c_device_id = buf[++idx] & 0x7f;
5721bb76ff1Sjsg 			w->num_bytes = buf[++idx];
5731bb76ff1Sjsg 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
5741bb76ff1Sjsg 					   GFP_KERNEL);
5751bb76ff1Sjsg 			if (!w->bytes)
5761bb76ff1Sjsg 				return -ENOMEM;
5771bb76ff1Sjsg 		}
5781bb76ff1Sjsg 		break;
5791bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS:
5801bb76ff1Sjsg 		req->u.enc_status.stream_id = buf[idx++];
5811bb76ff1Sjsg 		for (i = 0; i < sizeof(req->u.enc_status.client_id); i++)
5821bb76ff1Sjsg 			req->u.enc_status.client_id[i] = buf[idx++];
5831bb76ff1Sjsg 
5841bb76ff1Sjsg 		req->u.enc_status.stream_event = FIELD_GET(GENMASK(1, 0),
5851bb76ff1Sjsg 							   buf[idx]);
5861bb76ff1Sjsg 		req->u.enc_status.valid_stream_event = FIELD_GET(BIT(2),
5871bb76ff1Sjsg 								 buf[idx]);
5881bb76ff1Sjsg 		req->u.enc_status.stream_behavior = FIELD_GET(GENMASK(4, 3),
5891bb76ff1Sjsg 							      buf[idx]);
5901bb76ff1Sjsg 		req->u.enc_status.valid_stream_behavior = FIELD_GET(BIT(5),
5911bb76ff1Sjsg 								    buf[idx]);
5921bb76ff1Sjsg 		break;
5931bb76ff1Sjsg 	}
5941bb76ff1Sjsg 
5951bb76ff1Sjsg 	return 0;
5961bb76ff1Sjsg }
5971bb76ff1Sjsg EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
5981bb76ff1Sjsg 
5991bb76ff1Sjsg void
6001bb76ff1Sjsg drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
6011bb76ff1Sjsg 				  int indent, struct drm_printer *printer)
6021bb76ff1Sjsg {
6031bb76ff1Sjsg 	int i;
6041bb76ff1Sjsg 
6051bb76ff1Sjsg #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
6061bb76ff1Sjsg 	if (req->req_type == DP_LINK_ADDRESS) {
6071bb76ff1Sjsg 		/* No contents to print */
6081bb76ff1Sjsg 		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
6091bb76ff1Sjsg 		return;
6101bb76ff1Sjsg 	}
6111bb76ff1Sjsg 
6121bb76ff1Sjsg 	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
6131bb76ff1Sjsg 	indent++;
6141bb76ff1Sjsg 
6151bb76ff1Sjsg 	switch (req->req_type) {
6161bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
6171bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
6181bb76ff1Sjsg 	case DP_POWER_UP_PHY:
6191bb76ff1Sjsg 		P("port=%d\n", req->u.port_num.port_number);
6201bb76ff1Sjsg 		break;
6211bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
6221bb76ff1Sjsg 		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
6231bb76ff1Sjsg 		  req->u.allocate_payload.port_number,
6241bb76ff1Sjsg 		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
6251bb76ff1Sjsg 		  req->u.allocate_payload.number_sdp_streams,
6261bb76ff1Sjsg 		  req->u.allocate_payload.number_sdp_streams,
6271bb76ff1Sjsg 		  req->u.allocate_payload.sdp_stream_sink);
6281bb76ff1Sjsg 		break;
6291bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
6301bb76ff1Sjsg 		P("port=%d vcpi=%d\n",
6311bb76ff1Sjsg 		  req->u.query_payload.port_number,
6321bb76ff1Sjsg 		  req->u.query_payload.vcpi);
6331bb76ff1Sjsg 		break;
6341bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
6351bb76ff1Sjsg 		P("port=%d dpcd_addr=%05x len=%d\n",
6361bb76ff1Sjsg 		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
6371bb76ff1Sjsg 		  req->u.dpcd_read.num_bytes);
6381bb76ff1Sjsg 		break;
6391bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
6401bb76ff1Sjsg 		P("port=%d addr=%05x len=%d: %*ph\n",
6411bb76ff1Sjsg 		  req->u.dpcd_write.port_number,
6421bb76ff1Sjsg 		  req->u.dpcd_write.dpcd_address,
6431bb76ff1Sjsg 		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
6441bb76ff1Sjsg 		  req->u.dpcd_write.bytes);
6451bb76ff1Sjsg 		break;
6461bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
6471bb76ff1Sjsg 		P("port=%d num_tx=%d id=%d size=%d:\n",
6481bb76ff1Sjsg 		  req->u.i2c_read.port_number,
6491bb76ff1Sjsg 		  req->u.i2c_read.num_transactions,
6501bb76ff1Sjsg 		  req->u.i2c_read.read_i2c_device_id,
6511bb76ff1Sjsg 		  req->u.i2c_read.num_bytes_read);
6521bb76ff1Sjsg 
6531bb76ff1Sjsg 		indent++;
6541bb76ff1Sjsg 		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
6551bb76ff1Sjsg 			const struct drm_dp_remote_i2c_read_tx *rtx =
6561bb76ff1Sjsg 				&req->u.i2c_read.transactions[i];
6571bb76ff1Sjsg 
6581bb76ff1Sjsg 			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
6591bb76ff1Sjsg 			  i, rtx->i2c_dev_id, rtx->num_bytes,
6601bb76ff1Sjsg 			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
6611bb76ff1Sjsg 			  rtx->num_bytes, rtx->bytes);
6621bb76ff1Sjsg 		}
6631bb76ff1Sjsg 		break;
6641bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
6651bb76ff1Sjsg 		P("port=%d id=%d size=%d: %*ph\n",
6661bb76ff1Sjsg 		  req->u.i2c_write.port_number,
6671bb76ff1Sjsg 		  req->u.i2c_write.write_i2c_device_id,
6681bb76ff1Sjsg 		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
6691bb76ff1Sjsg 		  req->u.i2c_write.bytes);
6701bb76ff1Sjsg 		break;
6711bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS:
6721bb76ff1Sjsg 		P("stream_id=%u client_id=%*ph stream_event=%x "
6731bb76ff1Sjsg 		  "valid_event=%d stream_behavior=%x valid_behavior=%d",
6741bb76ff1Sjsg 		  req->u.enc_status.stream_id,
6751bb76ff1Sjsg 		  (int)ARRAY_SIZE(req->u.enc_status.client_id),
6761bb76ff1Sjsg 		  req->u.enc_status.client_id, req->u.enc_status.stream_event,
6771bb76ff1Sjsg 		  req->u.enc_status.valid_stream_event,
6781bb76ff1Sjsg 		  req->u.enc_status.stream_behavior,
6791bb76ff1Sjsg 		  req->u.enc_status.valid_stream_behavior);
6801bb76ff1Sjsg 		break;
6811bb76ff1Sjsg 	default:
6821bb76ff1Sjsg 		P("???\n");
6831bb76ff1Sjsg 		break;
6841bb76ff1Sjsg 	}
6851bb76ff1Sjsg #undef P
6861bb76ff1Sjsg }
6871bb76ff1Sjsg EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
6881bb76ff1Sjsg 
6891bb76ff1Sjsg static inline void
6901bb76ff1Sjsg drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
6911bb76ff1Sjsg 				const struct drm_dp_sideband_msg_tx *txmsg)
6921bb76ff1Sjsg {
6931bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
6941bb76ff1Sjsg 	char buf[64];
6951bb76ff1Sjsg 	int ret;
6961bb76ff1Sjsg 	int i;
6971bb76ff1Sjsg 
6981bb76ff1Sjsg 	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
6991bb76ff1Sjsg 			      sizeof(buf));
7001bb76ff1Sjsg 	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
7011bb76ff1Sjsg 		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
7021bb76ff1Sjsg 		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
7031bb76ff1Sjsg 		   txmsg->path_msg, buf);
7041bb76ff1Sjsg 
7051bb76ff1Sjsg 	ret = drm_dp_decode_sideband_req(txmsg, &req);
7061bb76ff1Sjsg 	if (ret) {
7071bb76ff1Sjsg 		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
7081bb76ff1Sjsg 		return;
7091bb76ff1Sjsg 	}
7101bb76ff1Sjsg 	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
7111bb76ff1Sjsg 
7121bb76ff1Sjsg 	switch (req.req_type) {
7131bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
7141bb76ff1Sjsg 		kfree(req.u.dpcd_write.bytes);
7151bb76ff1Sjsg 		break;
7161bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
7171bb76ff1Sjsg 		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
7181bb76ff1Sjsg 			kfree(req.u.i2c_read.transactions[i].bytes);
7191bb76ff1Sjsg 		break;
7201bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
7211bb76ff1Sjsg 		kfree(req.u.i2c_write.bytes);
7221bb76ff1Sjsg 		break;
7231bb76ff1Sjsg 	}
7241bb76ff1Sjsg }
7251bb76ff1Sjsg 
7261bb76ff1Sjsg static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
7271bb76ff1Sjsg {
7281bb76ff1Sjsg 	u8 crc4;
7291bb76ff1Sjsg 
7301bb76ff1Sjsg 	crc4 = drm_dp_msg_data_crc4(msg, len);
7311bb76ff1Sjsg 	msg[len] = crc4;
7321bb76ff1Sjsg }
7331bb76ff1Sjsg 
7341bb76ff1Sjsg static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
7351bb76ff1Sjsg 					 struct drm_dp_sideband_msg_tx *raw)
7361bb76ff1Sjsg {
7371bb76ff1Sjsg 	int idx = 0;
7381bb76ff1Sjsg 	u8 *buf = raw->msg;
7391bb76ff1Sjsg 
7401bb76ff1Sjsg 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
7411bb76ff1Sjsg 
7421bb76ff1Sjsg 	raw->cur_len = idx;
7431bb76ff1Sjsg }
7441bb76ff1Sjsg 
7451bb76ff1Sjsg static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
7461bb76ff1Sjsg 					  struct drm_dp_sideband_msg_hdr *hdr,
7471bb76ff1Sjsg 					  u8 hdrlen)
7481bb76ff1Sjsg {
7491bb76ff1Sjsg 	/*
7501bb76ff1Sjsg 	 * ignore out-of-order messages or messages that are part of a
7511bb76ff1Sjsg 	 * failed transaction
7521bb76ff1Sjsg 	 */
7531bb76ff1Sjsg 	if (!hdr->somt && !msg->have_somt)
7541bb76ff1Sjsg 		return false;
7551bb76ff1Sjsg 
7561bb76ff1Sjsg 	/* get length contained in this portion */
7571bb76ff1Sjsg 	msg->curchunk_idx = 0;
7581bb76ff1Sjsg 	msg->curchunk_len = hdr->msg_len;
7591bb76ff1Sjsg 	msg->curchunk_hdrlen = hdrlen;
7601bb76ff1Sjsg 
7611bb76ff1Sjsg 	/* we have already gotten an somt - don't bother parsing */
7621bb76ff1Sjsg 	if (hdr->somt && msg->have_somt)
7631bb76ff1Sjsg 		return false;
7641bb76ff1Sjsg 
7651bb76ff1Sjsg 	if (hdr->somt) {
7661bb76ff1Sjsg 		memcpy(&msg->initial_hdr, hdr,
7671bb76ff1Sjsg 		       sizeof(struct drm_dp_sideband_msg_hdr));
7681bb76ff1Sjsg 		msg->have_somt = true;
7691bb76ff1Sjsg 	}
7701bb76ff1Sjsg 	if (hdr->eomt)
7711bb76ff1Sjsg 		msg->have_eomt = true;
7721bb76ff1Sjsg 
7731bb76ff1Sjsg 	return true;
7741bb76ff1Sjsg }
7751bb76ff1Sjsg 
7761bb76ff1Sjsg /* this adds a chunk of msg to the builder to get the final msg */
7771bb76ff1Sjsg static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
7781bb76ff1Sjsg 					   u8 *replybuf, u8 replybuflen)
7791bb76ff1Sjsg {
7801bb76ff1Sjsg 	u8 crc4;
7811bb76ff1Sjsg 
7821bb76ff1Sjsg 	memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
7831bb76ff1Sjsg 	msg->curchunk_idx += replybuflen;
7841bb76ff1Sjsg 
7851bb76ff1Sjsg 	if (msg->curchunk_idx >= msg->curchunk_len) {
7861bb76ff1Sjsg 		/* do CRC */
7871bb76ff1Sjsg 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
7881bb76ff1Sjsg 		if (crc4 != msg->chunk[msg->curchunk_len - 1])
7891bb76ff1Sjsg 			print_hex_dump(KERN_DEBUG, "wrong crc",
7901bb76ff1Sjsg 				       DUMP_PREFIX_NONE, 16, 1,
7911bb76ff1Sjsg 				       msg->chunk,  msg->curchunk_len, false);
7921bb76ff1Sjsg 		/* copy chunk into bigger msg */
7931bb76ff1Sjsg 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
7941bb76ff1Sjsg 		msg->curlen += msg->curchunk_len - 1;
7951bb76ff1Sjsg 	}
7961bb76ff1Sjsg 	return true;
7971bb76ff1Sjsg }
7981bb76ff1Sjsg 
7991bb76ff1Sjsg static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
8001bb76ff1Sjsg 					       struct drm_dp_sideband_msg_rx *raw,
8011bb76ff1Sjsg 					       struct drm_dp_sideband_msg_reply_body *repmsg)
8021bb76ff1Sjsg {
8031bb76ff1Sjsg 	int idx = 1;
8041bb76ff1Sjsg 	int i;
8051bb76ff1Sjsg 
8061bb76ff1Sjsg 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
8071bb76ff1Sjsg 	idx += 16;
8081bb76ff1Sjsg 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
8091bb76ff1Sjsg 	idx++;
8101bb76ff1Sjsg 	if (idx > raw->curlen)
8111bb76ff1Sjsg 		goto fail_len;
8121bb76ff1Sjsg 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
8131bb76ff1Sjsg 		if (raw->msg[idx] & 0x80)
8141bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].input_port = 1;
8151bb76ff1Sjsg 
8161bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
8171bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
8181bb76ff1Sjsg 
8191bb76ff1Sjsg 		idx++;
8201bb76ff1Sjsg 		if (idx > raw->curlen)
8211bb76ff1Sjsg 			goto fail_len;
8221bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
8231bb76ff1Sjsg 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
8241bb76ff1Sjsg 		if (repmsg->u.link_addr.ports[i].input_port == 0)
8251bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
8261bb76ff1Sjsg 		idx++;
8271bb76ff1Sjsg 		if (idx > raw->curlen)
8281bb76ff1Sjsg 			goto fail_len;
8291bb76ff1Sjsg 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
8301bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
8311bb76ff1Sjsg 			idx++;
8321bb76ff1Sjsg 			if (idx > raw->curlen)
8331bb76ff1Sjsg 				goto fail_len;
8341bb76ff1Sjsg 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
8351bb76ff1Sjsg 			idx += 16;
8361bb76ff1Sjsg 			if (idx > raw->curlen)
8371bb76ff1Sjsg 				goto fail_len;
8381bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
8391bb76ff1Sjsg 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
8401bb76ff1Sjsg 			idx++;
8411bb76ff1Sjsg 
8421bb76ff1Sjsg 		}
8431bb76ff1Sjsg 		if (idx > raw->curlen)
8441bb76ff1Sjsg 			goto fail_len;
8451bb76ff1Sjsg 	}
8461bb76ff1Sjsg 
8471bb76ff1Sjsg 	return true;
8481bb76ff1Sjsg fail_len:
8491bb76ff1Sjsg 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
8501bb76ff1Sjsg 	return false;
8511bb76ff1Sjsg }
8521bb76ff1Sjsg 
8531bb76ff1Sjsg static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
8541bb76ff1Sjsg 						   struct drm_dp_sideband_msg_reply_body *repmsg)
8551bb76ff1Sjsg {
8561bb76ff1Sjsg 	int idx = 1;
8571bb76ff1Sjsg 
8581bb76ff1Sjsg 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
8591bb76ff1Sjsg 	idx++;
8601bb76ff1Sjsg 	if (idx > raw->curlen)
8611bb76ff1Sjsg 		goto fail_len;
8621bb76ff1Sjsg 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
8631bb76ff1Sjsg 	idx++;
8641bb76ff1Sjsg 	if (idx > raw->curlen)
8651bb76ff1Sjsg 		goto fail_len;
8661bb76ff1Sjsg 
8671bb76ff1Sjsg 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
8681bb76ff1Sjsg 	return true;
8691bb76ff1Sjsg fail_len:
8701bb76ff1Sjsg 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
8711bb76ff1Sjsg 	return false;
8721bb76ff1Sjsg }
8731bb76ff1Sjsg 
8741bb76ff1Sjsg static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
8751bb76ff1Sjsg 						      struct drm_dp_sideband_msg_reply_body *repmsg)
8761bb76ff1Sjsg {
8771bb76ff1Sjsg 	int idx = 1;
8781bb76ff1Sjsg 
8791bb76ff1Sjsg 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
8801bb76ff1Sjsg 	idx++;
8811bb76ff1Sjsg 	if (idx > raw->curlen)
8821bb76ff1Sjsg 		goto fail_len;
8831bb76ff1Sjsg 	return true;
8841bb76ff1Sjsg fail_len:
8851bb76ff1Sjsg 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
8861bb76ff1Sjsg 	return false;
8871bb76ff1Sjsg }
8881bb76ff1Sjsg 
8891bb76ff1Sjsg static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
8901bb76ff1Sjsg 						      struct drm_dp_sideband_msg_reply_body *repmsg)
8911bb76ff1Sjsg {
8921bb76ff1Sjsg 	int idx = 1;
8931bb76ff1Sjsg 
8941bb76ff1Sjsg 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
8951bb76ff1Sjsg 	idx++;
8961bb76ff1Sjsg 	if (idx > raw->curlen)
8971bb76ff1Sjsg 		goto fail_len;
8981bb76ff1Sjsg 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
8991bb76ff1Sjsg 	idx++;
9001bb76ff1Sjsg 	/* TODO check */
9011bb76ff1Sjsg 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
9021bb76ff1Sjsg 	return true;
9031bb76ff1Sjsg fail_len:
9041bb76ff1Sjsg 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
9051bb76ff1Sjsg 	return false;
9061bb76ff1Sjsg }
9071bb76ff1Sjsg 
9081bb76ff1Sjsg static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
9091bb76ff1Sjsg 							  struct drm_dp_sideband_msg_reply_body *repmsg)
9101bb76ff1Sjsg {
9111bb76ff1Sjsg 	int idx = 1;
9121bb76ff1Sjsg 
9131bb76ff1Sjsg 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
9141bb76ff1Sjsg 	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
9151bb76ff1Sjsg 	idx++;
9161bb76ff1Sjsg 	if (idx > raw->curlen)
9171bb76ff1Sjsg 		goto fail_len;
9181bb76ff1Sjsg 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
9191bb76ff1Sjsg 	idx += 2;
9201bb76ff1Sjsg 	if (idx > raw->curlen)
9211bb76ff1Sjsg 		goto fail_len;
9221bb76ff1Sjsg 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
9231bb76ff1Sjsg 	idx += 2;
9241bb76ff1Sjsg 	if (idx > raw->curlen)
9251bb76ff1Sjsg 		goto fail_len;
9261bb76ff1Sjsg 	return true;
9271bb76ff1Sjsg fail_len:
9281bb76ff1Sjsg 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
9291bb76ff1Sjsg 	return false;
9301bb76ff1Sjsg }
9311bb76ff1Sjsg 
9321bb76ff1Sjsg static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
9331bb76ff1Sjsg 							  struct drm_dp_sideband_msg_reply_body *repmsg)
9341bb76ff1Sjsg {
9351bb76ff1Sjsg 	int idx = 1;
9361bb76ff1Sjsg 
9371bb76ff1Sjsg 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
9381bb76ff1Sjsg 	idx++;
9391bb76ff1Sjsg 	if (idx > raw->curlen)
9401bb76ff1Sjsg 		goto fail_len;
9411bb76ff1Sjsg 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
9421bb76ff1Sjsg 	idx++;
9431bb76ff1Sjsg 	if (idx > raw->curlen)
9441bb76ff1Sjsg 		goto fail_len;
9451bb76ff1Sjsg 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
9461bb76ff1Sjsg 	idx += 2;
9471bb76ff1Sjsg 	if (idx > raw->curlen)
9481bb76ff1Sjsg 		goto fail_len;
9491bb76ff1Sjsg 	return true;
9501bb76ff1Sjsg fail_len:
9511bb76ff1Sjsg 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
9521bb76ff1Sjsg 	return false;
9531bb76ff1Sjsg }
9541bb76ff1Sjsg 
9551bb76ff1Sjsg static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
9561bb76ff1Sjsg 						    struct drm_dp_sideband_msg_reply_body *repmsg)
9571bb76ff1Sjsg {
9581bb76ff1Sjsg 	int idx = 1;
9591bb76ff1Sjsg 
9601bb76ff1Sjsg 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
9611bb76ff1Sjsg 	idx++;
9621bb76ff1Sjsg 	if (idx > raw->curlen)
9631bb76ff1Sjsg 		goto fail_len;
9641bb76ff1Sjsg 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
9651bb76ff1Sjsg 	idx += 2;
9661bb76ff1Sjsg 	if (idx > raw->curlen)
9671bb76ff1Sjsg 		goto fail_len;
9681bb76ff1Sjsg 	return true;
9691bb76ff1Sjsg fail_len:
9701bb76ff1Sjsg 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
9711bb76ff1Sjsg 	return false;
9721bb76ff1Sjsg }
9731bb76ff1Sjsg 
9741bb76ff1Sjsg static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
9751bb76ff1Sjsg 						       struct drm_dp_sideband_msg_reply_body *repmsg)
9761bb76ff1Sjsg {
9771bb76ff1Sjsg 	int idx = 1;
9781bb76ff1Sjsg 
9791bb76ff1Sjsg 	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
9801bb76ff1Sjsg 	idx++;
9811bb76ff1Sjsg 	if (idx > raw->curlen) {
9821bb76ff1Sjsg 		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
9831bb76ff1Sjsg 			      idx, raw->curlen);
9841bb76ff1Sjsg 		return false;
9851bb76ff1Sjsg 	}
9861bb76ff1Sjsg 	return true;
9871bb76ff1Sjsg }
9881bb76ff1Sjsg 
9891bb76ff1Sjsg static bool
9901bb76ff1Sjsg drm_dp_sideband_parse_query_stream_enc_status(
9911bb76ff1Sjsg 				struct drm_dp_sideband_msg_rx *raw,
9921bb76ff1Sjsg 				struct drm_dp_sideband_msg_reply_body *repmsg)
9931bb76ff1Sjsg {
9941bb76ff1Sjsg 	struct drm_dp_query_stream_enc_status_ack_reply *reply;
9951bb76ff1Sjsg 
9961bb76ff1Sjsg 	reply = &repmsg->u.enc_status;
9971bb76ff1Sjsg 
9981bb76ff1Sjsg 	reply->stream_id = raw->msg[3];
9991bb76ff1Sjsg 
10001bb76ff1Sjsg 	reply->reply_signed = raw->msg[2] & BIT(0);
10011bb76ff1Sjsg 
10021bb76ff1Sjsg 	/*
10031bb76ff1Sjsg 	 * NOTE: It's my impression from reading the spec that the below parsing
10041bb76ff1Sjsg 	 * is correct. However I noticed while testing with an HDCP 1.4 display
10051bb76ff1Sjsg 	 * through an HDCP 2.2 hub that only bit 3 was set. In that case, I
10061bb76ff1Sjsg 	 * would expect both bits to be set. So keep the parsing following the
10071bb76ff1Sjsg 	 * spec, but beware reality might not match the spec (at least for some
10081bb76ff1Sjsg 	 * configurations).
10091bb76ff1Sjsg 	 */
10101bb76ff1Sjsg 	reply->hdcp_1x_device_present = raw->msg[2] & BIT(4);
10111bb76ff1Sjsg 	reply->hdcp_2x_device_present = raw->msg[2] & BIT(3);
10121bb76ff1Sjsg 
10131bb76ff1Sjsg 	reply->query_capable_device_present = raw->msg[2] & BIT(5);
10141bb76ff1Sjsg 	reply->legacy_device_present = raw->msg[2] & BIT(6);
10151bb76ff1Sjsg 	reply->unauthorizable_device_present = raw->msg[2] & BIT(7);
10161bb76ff1Sjsg 
10171bb76ff1Sjsg 	reply->auth_completed = !!(raw->msg[1] & BIT(3));
10181bb76ff1Sjsg 	reply->encryption_enabled = !!(raw->msg[1] & BIT(4));
10191bb76ff1Sjsg 	reply->repeater_present = !!(raw->msg[1] & BIT(5));
10201bb76ff1Sjsg 	reply->state = (raw->msg[1] & GENMASK(7, 6)) >> 6;
10211bb76ff1Sjsg 
10221bb76ff1Sjsg 	return true;
10231bb76ff1Sjsg }
10241bb76ff1Sjsg 
10251bb76ff1Sjsg static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
10261bb76ff1Sjsg 					struct drm_dp_sideband_msg_rx *raw,
10271bb76ff1Sjsg 					struct drm_dp_sideband_msg_reply_body *msg)
10281bb76ff1Sjsg {
10291bb76ff1Sjsg 	memset(msg, 0, sizeof(*msg));
10301bb76ff1Sjsg 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
10311bb76ff1Sjsg 	msg->req_type = (raw->msg[0] & 0x7f);
10321bb76ff1Sjsg 
10331bb76ff1Sjsg 	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
10341bb76ff1Sjsg 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
10351bb76ff1Sjsg 		msg->u.nak.reason = raw->msg[17];
10361bb76ff1Sjsg 		msg->u.nak.nak_data = raw->msg[18];
10371bb76ff1Sjsg 		return false;
10381bb76ff1Sjsg 	}
10391bb76ff1Sjsg 
10401bb76ff1Sjsg 	switch (msg->req_type) {
10411bb76ff1Sjsg 	case DP_LINK_ADDRESS:
10421bb76ff1Sjsg 		return drm_dp_sideband_parse_link_address(mgr, raw, msg);
10431bb76ff1Sjsg 	case DP_QUERY_PAYLOAD:
10441bb76ff1Sjsg 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
10451bb76ff1Sjsg 	case DP_REMOTE_DPCD_READ:
10461bb76ff1Sjsg 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
10471bb76ff1Sjsg 	case DP_REMOTE_DPCD_WRITE:
10481bb76ff1Sjsg 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
10491bb76ff1Sjsg 	case DP_REMOTE_I2C_READ:
10501bb76ff1Sjsg 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
10511bb76ff1Sjsg 	case DP_REMOTE_I2C_WRITE:
10521bb76ff1Sjsg 		return true; /* since there's nothing to parse */
10531bb76ff1Sjsg 	case DP_ENUM_PATH_RESOURCES:
10541bb76ff1Sjsg 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
10551bb76ff1Sjsg 	case DP_ALLOCATE_PAYLOAD:
10561bb76ff1Sjsg 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
10571bb76ff1Sjsg 	case DP_POWER_DOWN_PHY:
10581bb76ff1Sjsg 	case DP_POWER_UP_PHY:
10591bb76ff1Sjsg 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
10601bb76ff1Sjsg 	case DP_CLEAR_PAYLOAD_ID_TABLE:
10611bb76ff1Sjsg 		return true; /* since there's nothing to parse */
10621bb76ff1Sjsg 	case DP_QUERY_STREAM_ENC_STATUS:
10631bb76ff1Sjsg 		return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
10641bb76ff1Sjsg 	default:
10651bb76ff1Sjsg 		drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
10661bb76ff1Sjsg 			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
10671bb76ff1Sjsg 		return false;
10681bb76ff1Sjsg 	}
10691bb76ff1Sjsg }
10701bb76ff1Sjsg 
10711bb76ff1Sjsg static bool
10721bb76ff1Sjsg drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
10731bb76ff1Sjsg 					       struct drm_dp_sideband_msg_rx *raw,
10741bb76ff1Sjsg 					       struct drm_dp_sideband_msg_req_body *msg)
10751bb76ff1Sjsg {
10761bb76ff1Sjsg 	int idx = 1;
10771bb76ff1Sjsg 
10781bb76ff1Sjsg 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
10791bb76ff1Sjsg 	idx++;
10801bb76ff1Sjsg 	if (idx > raw->curlen)
10811bb76ff1Sjsg 		goto fail_len;
10821bb76ff1Sjsg 
10831bb76ff1Sjsg 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
10841bb76ff1Sjsg 	idx += 16;
10851bb76ff1Sjsg 	if (idx > raw->curlen)
10861bb76ff1Sjsg 		goto fail_len;
10871bb76ff1Sjsg 
10881bb76ff1Sjsg 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
10891bb76ff1Sjsg 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
10901bb76ff1Sjsg 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
10911bb76ff1Sjsg 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
10921bb76ff1Sjsg 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
10931bb76ff1Sjsg 	idx++;
10941bb76ff1Sjsg 	return true;
10951bb76ff1Sjsg fail_len:
10961bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
10971bb76ff1Sjsg 		    idx, raw->curlen);
10981bb76ff1Sjsg 	return false;
10991bb76ff1Sjsg }
11001bb76ff1Sjsg 
11011bb76ff1Sjsg static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
11021bb76ff1Sjsg 							 struct drm_dp_sideband_msg_rx *raw,
11031bb76ff1Sjsg 							 struct drm_dp_sideband_msg_req_body *msg)
11041bb76ff1Sjsg {
11051bb76ff1Sjsg 	int idx = 1;
11061bb76ff1Sjsg 
11071bb76ff1Sjsg 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
11081bb76ff1Sjsg 	idx++;
11091bb76ff1Sjsg 	if (idx > raw->curlen)
11101bb76ff1Sjsg 		goto fail_len;
11111bb76ff1Sjsg 
11121bb76ff1Sjsg 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
11131bb76ff1Sjsg 	idx += 16;
11141bb76ff1Sjsg 	if (idx > raw->curlen)
11151bb76ff1Sjsg 		goto fail_len;
11161bb76ff1Sjsg 
11171bb76ff1Sjsg 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
11181bb76ff1Sjsg 	idx++;
11191bb76ff1Sjsg 	return true;
11201bb76ff1Sjsg fail_len:
11211bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
11221bb76ff1Sjsg 	return false;
11231bb76ff1Sjsg }
11241bb76ff1Sjsg 
11251bb76ff1Sjsg static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
11261bb76ff1Sjsg 				      struct drm_dp_sideband_msg_rx *raw,
11271bb76ff1Sjsg 				      struct drm_dp_sideband_msg_req_body *msg)
11281bb76ff1Sjsg {
11291bb76ff1Sjsg 	memset(msg, 0, sizeof(*msg));
11301bb76ff1Sjsg 	msg->req_type = (raw->msg[0] & 0x7f);
11311bb76ff1Sjsg 
11321bb76ff1Sjsg 	switch (msg->req_type) {
11331bb76ff1Sjsg 	case DP_CONNECTION_STATUS_NOTIFY:
11341bb76ff1Sjsg 		return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
11351bb76ff1Sjsg 	case DP_RESOURCE_STATUS_NOTIFY:
11361bb76ff1Sjsg 		return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
11371bb76ff1Sjsg 	default:
11381bb76ff1Sjsg 		drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
11391bb76ff1Sjsg 			msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
11401bb76ff1Sjsg 		return false;
11411bb76ff1Sjsg 	}
11421bb76ff1Sjsg }
11431bb76ff1Sjsg 
11441bb76ff1Sjsg static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
11451bb76ff1Sjsg 			     u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
11461bb76ff1Sjsg {
11471bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11481bb76ff1Sjsg 
11491bb76ff1Sjsg 	req.req_type = DP_REMOTE_DPCD_WRITE;
11501bb76ff1Sjsg 	req.u.dpcd_write.port_number = port_num;
11511bb76ff1Sjsg 	req.u.dpcd_write.dpcd_address = offset;
11521bb76ff1Sjsg 	req.u.dpcd_write.num_bytes = num_bytes;
11531bb76ff1Sjsg 	req.u.dpcd_write.bytes = bytes;
11541bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11551bb76ff1Sjsg }
11561bb76ff1Sjsg 
11571bb76ff1Sjsg static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
11581bb76ff1Sjsg {
11591bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11601bb76ff1Sjsg 
11611bb76ff1Sjsg 	req.req_type = DP_LINK_ADDRESS;
11621bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11631bb76ff1Sjsg }
11641bb76ff1Sjsg 
11651bb76ff1Sjsg static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
11661bb76ff1Sjsg {
11671bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11681bb76ff1Sjsg 
11691bb76ff1Sjsg 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
11701bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11711bb76ff1Sjsg 	msg->path_msg = true;
11721bb76ff1Sjsg }
11731bb76ff1Sjsg 
11741bb76ff1Sjsg static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
11751bb76ff1Sjsg 				     int port_num)
11761bb76ff1Sjsg {
11771bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11781bb76ff1Sjsg 
11791bb76ff1Sjsg 	req.req_type = DP_ENUM_PATH_RESOURCES;
11801bb76ff1Sjsg 	req.u.port_num.port_number = port_num;
11811bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
11821bb76ff1Sjsg 	msg->path_msg = true;
11831bb76ff1Sjsg 	return 0;
11841bb76ff1Sjsg }
11851bb76ff1Sjsg 
11861bb76ff1Sjsg static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
11871bb76ff1Sjsg 				   int port_num,
11881bb76ff1Sjsg 				   u8 vcpi, uint16_t pbn,
11891bb76ff1Sjsg 				   u8 number_sdp_streams,
11901bb76ff1Sjsg 				   u8 *sdp_stream_sink)
11911bb76ff1Sjsg {
11921bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
11931bb76ff1Sjsg 
11941bb76ff1Sjsg 	memset(&req, 0, sizeof(req));
11951bb76ff1Sjsg 	req.req_type = DP_ALLOCATE_PAYLOAD;
11961bb76ff1Sjsg 	req.u.allocate_payload.port_number = port_num;
11971bb76ff1Sjsg 	req.u.allocate_payload.vcpi = vcpi;
11981bb76ff1Sjsg 	req.u.allocate_payload.pbn = pbn;
11991bb76ff1Sjsg 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
12001bb76ff1Sjsg 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
12011bb76ff1Sjsg 		   number_sdp_streams);
12021bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
12031bb76ff1Sjsg 	msg->path_msg = true;
12041bb76ff1Sjsg }
12051bb76ff1Sjsg 
12061bb76ff1Sjsg static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
12071bb76ff1Sjsg 				   int port_num, bool power_up)
12081bb76ff1Sjsg {
12091bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
12101bb76ff1Sjsg 
12111bb76ff1Sjsg 	if (power_up)
12121bb76ff1Sjsg 		req.req_type = DP_POWER_UP_PHY;
12131bb76ff1Sjsg 	else
12141bb76ff1Sjsg 		req.req_type = DP_POWER_DOWN_PHY;
12151bb76ff1Sjsg 
12161bb76ff1Sjsg 	req.u.port_num.port_number = port_num;
12171bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
12181bb76ff1Sjsg 	msg->path_msg = true;
12191bb76ff1Sjsg }
12201bb76ff1Sjsg 
12211bb76ff1Sjsg static int
12221bb76ff1Sjsg build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id,
12231bb76ff1Sjsg 			      u8 *q_id)
12241bb76ff1Sjsg {
12251bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
12261bb76ff1Sjsg 
12271bb76ff1Sjsg 	req.req_type = DP_QUERY_STREAM_ENC_STATUS;
12281bb76ff1Sjsg 	req.u.enc_status.stream_id = stream_id;
12291bb76ff1Sjsg 	memcpy(req.u.enc_status.client_id, q_id,
12301bb76ff1Sjsg 	       sizeof(req.u.enc_status.client_id));
12311bb76ff1Sjsg 	req.u.enc_status.stream_event = 0;
12321bb76ff1Sjsg 	req.u.enc_status.valid_stream_event = false;
12331bb76ff1Sjsg 	req.u.enc_status.stream_behavior = 0;
12341bb76ff1Sjsg 	req.u.enc_status.valid_stream_behavior = false;
12351bb76ff1Sjsg 
12361bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
12371bb76ff1Sjsg 	return 0;
12381bb76ff1Sjsg }
12391bb76ff1Sjsg 
12401bb76ff1Sjsg static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
12411bb76ff1Sjsg 			      struct drm_dp_sideband_msg_tx *txmsg)
12421bb76ff1Sjsg {
12431bb76ff1Sjsg 	unsigned int state;
12441bb76ff1Sjsg 
12451bb76ff1Sjsg 	/*
12461bb76ff1Sjsg 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
12471bb76ff1Sjsg 	 * cases we check here are terminal states. For those the barriers
12481bb76ff1Sjsg 	 * provided by the wake_up/wait_event pair are enough.
12491bb76ff1Sjsg 	 */
12501bb76ff1Sjsg 	state = READ_ONCE(txmsg->state);
12511bb76ff1Sjsg 	return (state == DRM_DP_SIDEBAND_TX_RX ||
12521bb76ff1Sjsg 		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
12531bb76ff1Sjsg }
12541bb76ff1Sjsg 
12551bb76ff1Sjsg static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
12561bb76ff1Sjsg 				    struct drm_dp_sideband_msg_tx *txmsg)
12571bb76ff1Sjsg {
12581bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
12591bb76ff1Sjsg 	unsigned long wait_timeout = msecs_to_jiffies(4000);
12601bb76ff1Sjsg 	unsigned long wait_expires = jiffies + wait_timeout;
12611bb76ff1Sjsg 	int ret;
12621bb76ff1Sjsg 
12631bb76ff1Sjsg 	for (;;) {
12641bb76ff1Sjsg 		/*
12651bb76ff1Sjsg 		 * If the driver provides a way for this, change to
12661bb76ff1Sjsg 		 * poll-waiting for the MST reply interrupt if we didn't receive
12671bb76ff1Sjsg 		 * it for 50 msec. This would cater for cases where the HPD
12681bb76ff1Sjsg 		 * pulse signal got lost somewhere, even though the sink raised
12691bb76ff1Sjsg 		 * the corresponding MST interrupt correctly. One example is the
12701bb76ff1Sjsg 		 * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
12711bb76ff1Sjsg 		 * filters out short pulses with a duration less than ~540 usec.
12721bb76ff1Sjsg 		 *
12731bb76ff1Sjsg 		 * The poll period is 50 msec to avoid missing an interrupt
12741bb76ff1Sjsg 		 * after the sink has cleared it (after a 110msec timeout
12751bb76ff1Sjsg 		 * since it raised the interrupt).
12761bb76ff1Sjsg 		 */
12771bb76ff1Sjsg 		ret = wait_event_timeout(mgr->tx_waitq,
12781bb76ff1Sjsg 					 check_txmsg_state(mgr, txmsg),
12791bb76ff1Sjsg 					 mgr->cbs->poll_hpd_irq ?
12801bb76ff1Sjsg 						msecs_to_jiffies(50) :
12811bb76ff1Sjsg 						wait_timeout);
12821bb76ff1Sjsg 
12831bb76ff1Sjsg 		if (ret || !mgr->cbs->poll_hpd_irq ||
12841bb76ff1Sjsg 		    time_after(jiffies, wait_expires))
12851bb76ff1Sjsg 			break;
12861bb76ff1Sjsg 
12871bb76ff1Sjsg 		mgr->cbs->poll_hpd_irq(mgr);
12881bb76ff1Sjsg 	}
12891bb76ff1Sjsg 
12901bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
12911bb76ff1Sjsg 	if (ret > 0) {
12921bb76ff1Sjsg 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
12931bb76ff1Sjsg 			ret = -EIO;
12941bb76ff1Sjsg 			goto out;
12951bb76ff1Sjsg 		}
12961bb76ff1Sjsg 	} else {
12971bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
12981bb76ff1Sjsg 			    txmsg, txmsg->state, txmsg->seqno);
12991bb76ff1Sjsg 
13001bb76ff1Sjsg 		/* dump some state */
13011bb76ff1Sjsg 		ret = -EIO;
13021bb76ff1Sjsg 
13031bb76ff1Sjsg 		/* remove from q */
13041bb76ff1Sjsg 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
13051bb76ff1Sjsg 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
13061bb76ff1Sjsg 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
13071bb76ff1Sjsg 			list_del(&txmsg->next);
13081bb76ff1Sjsg 	}
13091bb76ff1Sjsg out:
13101bb76ff1Sjsg 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
13111bb76ff1Sjsg 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
13121bb76ff1Sjsg 
13131bb76ff1Sjsg 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
13141bb76ff1Sjsg 	}
13151bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
13161bb76ff1Sjsg 
13171bb76ff1Sjsg 	drm_dp_mst_kick_tx(mgr);
13181bb76ff1Sjsg 	return ret;
13191bb76ff1Sjsg }
13201bb76ff1Sjsg 
13211bb76ff1Sjsg static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
13221bb76ff1Sjsg {
13231bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
13241bb76ff1Sjsg 
13251bb76ff1Sjsg 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
13261bb76ff1Sjsg 	if (!mstb)
13271bb76ff1Sjsg 		return NULL;
13281bb76ff1Sjsg 
13291bb76ff1Sjsg 	mstb->lct = lct;
13301bb76ff1Sjsg 	if (lct > 1)
13311bb76ff1Sjsg 		memcpy(mstb->rad, rad, lct / 2);
13321bb76ff1Sjsg 	INIT_LIST_HEAD(&mstb->ports);
13331bb76ff1Sjsg 	kref_init(&mstb->topology_kref);
13341bb76ff1Sjsg 	kref_init(&mstb->malloc_kref);
13351bb76ff1Sjsg 	return mstb;
13361bb76ff1Sjsg }
13371bb76ff1Sjsg 
13381bb76ff1Sjsg static void drm_dp_free_mst_branch_device(struct kref *kref)
13391bb76ff1Sjsg {
13401bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb =
13411bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
13421bb76ff1Sjsg 
13431bb76ff1Sjsg 	if (mstb->port_parent)
13441bb76ff1Sjsg 		drm_dp_mst_put_port_malloc(mstb->port_parent);
13451bb76ff1Sjsg 
13461bb76ff1Sjsg 	kfree(mstb);
13471bb76ff1Sjsg }
13481bb76ff1Sjsg 
13491bb76ff1Sjsg /**
13501bb76ff1Sjsg  * DOC: Branch device and port refcounting
13511bb76ff1Sjsg  *
13521bb76ff1Sjsg  * Topology refcount overview
13531bb76ff1Sjsg  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
13541bb76ff1Sjsg  *
13551bb76ff1Sjsg  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
13561bb76ff1Sjsg  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
13571bb76ff1Sjsg  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
13581bb76ff1Sjsg  *
13591bb76ff1Sjsg  * Topology refcounts are not exposed to drivers, and are handled internally
13601bb76ff1Sjsg  * by the DP MST helpers. The helpers use them in order to prevent the
13611bb76ff1Sjsg  * in-memory topology state from being changed in the middle of critical
13621bb76ff1Sjsg  * operations like changing the internal state of payload allocations. This
13631bb76ff1Sjsg  * means each branch and port will be considered to be connected to the rest
13641bb76ff1Sjsg  * of the topology until its topology refcount reaches zero. Additionally,
13651bb76ff1Sjsg  * for ports this means that their associated &struct drm_connector will stay
13661bb76ff1Sjsg  * registered with userspace until the port's refcount reaches 0.
13671bb76ff1Sjsg  *
13681bb76ff1Sjsg  * Malloc refcount overview
13691bb76ff1Sjsg  * ~~~~~~~~~~~~~~~~~~~~~~~~
13701bb76ff1Sjsg  *
13711bb76ff1Sjsg  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
13721bb76ff1Sjsg  * drm_dp_mst_branch allocated even after all of its topology references have
13731bb76ff1Sjsg  * been dropped, so that the driver or MST helpers can safely access each
13741bb76ff1Sjsg  * branch's last known state before it was disconnected from the topology.
13751bb76ff1Sjsg  * When the malloc refcount of a port or branch reaches 0, the memory
13761bb76ff1Sjsg  * allocation containing the &struct drm_dp_mst_branch or &struct
13771bb76ff1Sjsg  * drm_dp_mst_port respectively will be freed.
13781bb76ff1Sjsg  *
13791bb76ff1Sjsg  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
13801bb76ff1Sjsg  * to drivers. As of writing this documentation, there are no drivers that
13811bb76ff1Sjsg  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
13821bb76ff1Sjsg  * helpers. Exposing this API to drivers in a race-free manner would take more
13831bb76ff1Sjsg  * tweaking of the refcounting scheme, however patches are welcome provided
13841bb76ff1Sjsg  * there is a legitimate driver usecase for this.
13851bb76ff1Sjsg  *
13861bb76ff1Sjsg  * Refcount relationships in a topology
13871bb76ff1Sjsg  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13881bb76ff1Sjsg  *
13891bb76ff1Sjsg  * Let's take a look at why the relationship between topology and malloc
13901bb76ff1Sjsg  * refcounts is designed the way it is.
13911bb76ff1Sjsg  *
13921bb76ff1Sjsg  * .. kernel-figure:: dp-mst/topology-figure-1.dot
13931bb76ff1Sjsg  *
13941bb76ff1Sjsg  *    An example of topology and malloc refs in a DP MST topology with two
13951bb76ff1Sjsg  *    active payloads. Topology refcount increments are indicated by solid
13961bb76ff1Sjsg  *    lines, and malloc refcount increments are indicated by dashed lines.
13971bb76ff1Sjsg  *    Each starts from the branch which incremented the refcount, and ends at
13981bb76ff1Sjsg  *    the branch to which the refcount belongs to, i.e. the arrow points the
13991bb76ff1Sjsg  *    same way as the C pointers used to reference a structure.
14001bb76ff1Sjsg  *
14011bb76ff1Sjsg  * As you can see in the above figure, every branch increments the topology
14021bb76ff1Sjsg  * refcount of its children, and increments the malloc refcount of its
14031bb76ff1Sjsg  * parent. Additionally, every payload increments the malloc refcount of its
14041bb76ff1Sjsg  * assigned port by 1.
14051bb76ff1Sjsg  *
14061bb76ff1Sjsg  * So, what would happen if MSTB #3 from the above figure was unplugged from
14071bb76ff1Sjsg  * the system, but the driver hadn't yet removed payload #2 from port #3? The
14081bb76ff1Sjsg  * topology would start to look like the figure below.
14091bb76ff1Sjsg  *
14101bb76ff1Sjsg  * .. kernel-figure:: dp-mst/topology-figure-2.dot
14111bb76ff1Sjsg  *
14121bb76ff1Sjsg  *    Ports and branch devices which have been released from memory are
14131bb76ff1Sjsg  *    colored grey, and references which have been removed are colored red.
14141bb76ff1Sjsg  *
14151bb76ff1Sjsg  * Whenever a port or branch device's topology refcount reaches zero, it will
14161bb76ff1Sjsg  * decrement the topology refcounts of all its children, the malloc refcount
14171bb76ff1Sjsg  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
14181bb76ff1Sjsg  * #4, this means they both have been disconnected from the topology and freed
14191bb76ff1Sjsg  * from memory. But, because payload #2 is still holding a reference to port
14201bb76ff1Sjsg  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
14211bb76ff1Sjsg  * is still accessible from memory. This also means port #3 has not yet
14221bb76ff1Sjsg  * decremented the malloc refcount of MSTB #3, so its &struct
14231bb76ff1Sjsg  * drm_dp_mst_branch will also stay allocated in memory until port #3's
14241bb76ff1Sjsg  * malloc refcount reaches 0.
14251bb76ff1Sjsg  *
14261bb76ff1Sjsg  * This relationship is necessary because in order to release payload #2, we
14271bb76ff1Sjsg  * need to be able to figure out the last relative of port #3 that's still
14281bb76ff1Sjsg  * connected to the topology. In this case, we would travel up the topology as
14291bb76ff1Sjsg  * shown below.
14301bb76ff1Sjsg  *
14311bb76ff1Sjsg  * .. kernel-figure:: dp-mst/topology-figure-3.dot
14321bb76ff1Sjsg  *
14331bb76ff1Sjsg  * And finally, remove payload #2 by communicating with port #2 through
14341bb76ff1Sjsg  * sideband transactions.
14351bb76ff1Sjsg  */
14361bb76ff1Sjsg 
14371bb76ff1Sjsg /**
14381bb76ff1Sjsg  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
14391bb76ff1Sjsg  * device
14401bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
14411bb76ff1Sjsg  *
14421bb76ff1Sjsg  * Increments &drm_dp_mst_branch.malloc_kref. When
14431bb76ff1Sjsg  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
14441bb76ff1Sjsg  * will be released and @mstb may no longer be used.
14451bb76ff1Sjsg  *
14461bb76ff1Sjsg  * See also: drm_dp_mst_put_mstb_malloc()
14471bb76ff1Sjsg  */
14481bb76ff1Sjsg static void
14491bb76ff1Sjsg drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
14501bb76ff1Sjsg {
14511bb76ff1Sjsg 	kref_get(&mstb->malloc_kref);
14521bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
14531bb76ff1Sjsg }
14541bb76ff1Sjsg 
14551bb76ff1Sjsg /**
14561bb76ff1Sjsg  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
14571bb76ff1Sjsg  * device
14581bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
14591bb76ff1Sjsg  *
14601bb76ff1Sjsg  * Decrements &drm_dp_mst_branch.malloc_kref. When
14611bb76ff1Sjsg  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
14621bb76ff1Sjsg  * will be released and @mstb may no longer be used.
14631bb76ff1Sjsg  *
14641bb76ff1Sjsg  * See also: drm_dp_mst_get_mstb_malloc()
14651bb76ff1Sjsg  */
14661bb76ff1Sjsg static void
14671bb76ff1Sjsg drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
14681bb76ff1Sjsg {
14691bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
14701bb76ff1Sjsg 	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
14711bb76ff1Sjsg }
14721bb76ff1Sjsg 
14731bb76ff1Sjsg static void drm_dp_free_mst_port(struct kref *kref)
14741bb76ff1Sjsg {
14751bb76ff1Sjsg 	struct drm_dp_mst_port *port =
14761bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_port, malloc_kref);
14771bb76ff1Sjsg 
14781bb76ff1Sjsg 	drm_dp_mst_put_mstb_malloc(port->parent);
14791bb76ff1Sjsg 	kfree(port);
14801bb76ff1Sjsg }
14811bb76ff1Sjsg 
14821bb76ff1Sjsg /**
14831bb76ff1Sjsg  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
14841bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
14851bb76ff1Sjsg  *
14861bb76ff1Sjsg  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
14871bb76ff1Sjsg  * reaches 0, the memory allocation for @port will be released and @port may
14881bb76ff1Sjsg  * no longer be used.
14891bb76ff1Sjsg  *
14901bb76ff1Sjsg  * Because @port could potentially be freed at any time by the DP MST helpers
14911bb76ff1Sjsg  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
14921bb76ff1Sjsg  * function, drivers that which to make use of &struct drm_dp_mst_port should
14931bb76ff1Sjsg  * ensure that they grab at least one main malloc reference to their MST ports
14941bb76ff1Sjsg  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
14951bb76ff1Sjsg  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
14961bb76ff1Sjsg  *
14971bb76ff1Sjsg  * See also: drm_dp_mst_put_port_malloc()
14981bb76ff1Sjsg  */
14991bb76ff1Sjsg void
15001bb76ff1Sjsg drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
15011bb76ff1Sjsg {
15021bb76ff1Sjsg 	kref_get(&port->malloc_kref);
15031bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
15041bb76ff1Sjsg }
15051bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
15061bb76ff1Sjsg 
15071bb76ff1Sjsg /**
15081bb76ff1Sjsg  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
15091bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
15101bb76ff1Sjsg  *
15111bb76ff1Sjsg  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
15121bb76ff1Sjsg  * reaches 0, the memory allocation for @port will be released and @port may
15131bb76ff1Sjsg  * no longer be used.
15141bb76ff1Sjsg  *
15151bb76ff1Sjsg  * See also: drm_dp_mst_get_port_malloc()
15161bb76ff1Sjsg  */
15171bb76ff1Sjsg void
15181bb76ff1Sjsg drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
15191bb76ff1Sjsg {
15201bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
15211bb76ff1Sjsg 	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
15221bb76ff1Sjsg }
15231bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
15241bb76ff1Sjsg 
15251bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
15261bb76ff1Sjsg 
15271bb76ff1Sjsg #define STACK_DEPTH 8
15281bb76ff1Sjsg 
15291bb76ff1Sjsg static noinline void
15301bb76ff1Sjsg __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
15311bb76ff1Sjsg 		    struct drm_dp_mst_topology_ref_history *history,
15321bb76ff1Sjsg 		    enum drm_dp_mst_topology_ref_type type)
15331bb76ff1Sjsg {
15341bb76ff1Sjsg 	struct drm_dp_mst_topology_ref_entry *entry = NULL;
15351bb76ff1Sjsg 	depot_stack_handle_t backtrace;
15361bb76ff1Sjsg 	ulong stack_entries[STACK_DEPTH];
15371bb76ff1Sjsg 	uint n;
15381bb76ff1Sjsg 	int i;
15391bb76ff1Sjsg 
15401bb76ff1Sjsg 	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
15411bb76ff1Sjsg 	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
15421bb76ff1Sjsg 	if (!backtrace)
15431bb76ff1Sjsg 		return;
15441bb76ff1Sjsg 
15451bb76ff1Sjsg 	/* Try to find an existing entry for this backtrace */
15461bb76ff1Sjsg 	for (i = 0; i < history->len; i++) {
15471bb76ff1Sjsg 		if (history->entries[i].backtrace == backtrace) {
15481bb76ff1Sjsg 			entry = &history->entries[i];
15491bb76ff1Sjsg 			break;
15501bb76ff1Sjsg 		}
15511bb76ff1Sjsg 	}
15521bb76ff1Sjsg 
15531bb76ff1Sjsg 	/* Otherwise add one */
15541bb76ff1Sjsg 	if (!entry) {
15551bb76ff1Sjsg 		struct drm_dp_mst_topology_ref_entry *new;
15561bb76ff1Sjsg 		int new_len = history->len + 1;
15571bb76ff1Sjsg 
15581bb76ff1Sjsg 		new = krealloc(history->entries, sizeof(*new) * new_len,
15591bb76ff1Sjsg 			       GFP_KERNEL);
15601bb76ff1Sjsg 		if (!new)
15611bb76ff1Sjsg 			return;
15621bb76ff1Sjsg 
15631bb76ff1Sjsg 		entry = &new[history->len];
15641bb76ff1Sjsg 		history->len = new_len;
15651bb76ff1Sjsg 		history->entries = new;
15661bb76ff1Sjsg 
15671bb76ff1Sjsg 		entry->backtrace = backtrace;
15681bb76ff1Sjsg 		entry->type = type;
15691bb76ff1Sjsg 		entry->count = 0;
15701bb76ff1Sjsg 	}
15711bb76ff1Sjsg 	entry->count++;
15721bb76ff1Sjsg 	entry->ts_nsec = ktime_get_ns();
15731bb76ff1Sjsg }
15741bb76ff1Sjsg 
15751bb76ff1Sjsg static int
15761bb76ff1Sjsg topology_ref_history_cmp(const void *a, const void *b)
15771bb76ff1Sjsg {
15781bb76ff1Sjsg 	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
15791bb76ff1Sjsg 
15801bb76ff1Sjsg 	if (entry_a->ts_nsec > entry_b->ts_nsec)
15811bb76ff1Sjsg 		return 1;
15821bb76ff1Sjsg 	else if (entry_a->ts_nsec < entry_b->ts_nsec)
15831bb76ff1Sjsg 		return -1;
15841bb76ff1Sjsg 	else
15851bb76ff1Sjsg 		return 0;
15861bb76ff1Sjsg }
15871bb76ff1Sjsg 
15881bb76ff1Sjsg static inline const char *
15891bb76ff1Sjsg topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
15901bb76ff1Sjsg {
15911bb76ff1Sjsg 	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
15921bb76ff1Sjsg 		return "get";
15931bb76ff1Sjsg 	else
15941bb76ff1Sjsg 		return "put";
15951bb76ff1Sjsg }
15961bb76ff1Sjsg 
15971bb76ff1Sjsg static void
15981bb76ff1Sjsg __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
15991bb76ff1Sjsg 			    void *ptr, const char *type_str)
16001bb76ff1Sjsg {
16011bb76ff1Sjsg 	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
16021bb76ff1Sjsg 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
16031bb76ff1Sjsg 	int i;
16041bb76ff1Sjsg 
16051bb76ff1Sjsg 	if (!buf)
16061bb76ff1Sjsg 		return;
16071bb76ff1Sjsg 
16081bb76ff1Sjsg 	if (!history->len)
16091bb76ff1Sjsg 		goto out;
16101bb76ff1Sjsg 
16111bb76ff1Sjsg 	/* First, sort the list so that it goes from oldest to newest
16121bb76ff1Sjsg 	 * reference entry
16131bb76ff1Sjsg 	 */
16141bb76ff1Sjsg 	sort(history->entries, history->len, sizeof(*history->entries),
16151bb76ff1Sjsg 	     topology_ref_history_cmp, NULL);
16161bb76ff1Sjsg 
16171bb76ff1Sjsg 	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
16181bb76ff1Sjsg 		   type_str, ptr);
16191bb76ff1Sjsg 
16201bb76ff1Sjsg 	for (i = 0; i < history->len; i++) {
16211bb76ff1Sjsg 		const struct drm_dp_mst_topology_ref_entry *entry =
16221bb76ff1Sjsg 			&history->entries[i];
16231bb76ff1Sjsg 		u64 ts_nsec = entry->ts_nsec;
16241bb76ff1Sjsg 		u32 rem_nsec = do_div(ts_nsec, 1000000000);
16251bb76ff1Sjsg 
16261bb76ff1Sjsg 		stack_depot_snprint(entry->backtrace, buf, PAGE_SIZE, 4);
16271bb76ff1Sjsg 
16281bb76ff1Sjsg 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
16291bb76ff1Sjsg 			   entry->count,
16301bb76ff1Sjsg 			   topology_ref_type_to_str(entry->type),
16311bb76ff1Sjsg 			   ts_nsec, rem_nsec / 1000, buf);
16321bb76ff1Sjsg 	}
16331bb76ff1Sjsg 
16341bb76ff1Sjsg 	/* Now free the history, since this is the only time we expose it */
16351bb76ff1Sjsg 	kfree(history->entries);
16361bb76ff1Sjsg out:
16371bb76ff1Sjsg 	kfree(buf);
16381bb76ff1Sjsg }
16391bb76ff1Sjsg 
16401bb76ff1Sjsg static __always_inline void
16411bb76ff1Sjsg drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
16421bb76ff1Sjsg {
16431bb76ff1Sjsg 	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
16441bb76ff1Sjsg 				    "MSTB");
16451bb76ff1Sjsg }
16461bb76ff1Sjsg 
16471bb76ff1Sjsg static __always_inline void
16481bb76ff1Sjsg drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
16491bb76ff1Sjsg {
16501bb76ff1Sjsg 	__dump_topology_ref_history(&port->topology_ref_history, port,
16511bb76ff1Sjsg 				    "Port");
16521bb76ff1Sjsg }
16531bb76ff1Sjsg 
16541bb76ff1Sjsg static __always_inline void
16551bb76ff1Sjsg save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
16561bb76ff1Sjsg 		       enum drm_dp_mst_topology_ref_type type)
16571bb76ff1Sjsg {
16581bb76ff1Sjsg 	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
16591bb76ff1Sjsg }
16601bb76ff1Sjsg 
16611bb76ff1Sjsg static __always_inline void
16621bb76ff1Sjsg save_port_topology_ref(struct drm_dp_mst_port *port,
16631bb76ff1Sjsg 		       enum drm_dp_mst_topology_ref_type type)
16641bb76ff1Sjsg {
16651bb76ff1Sjsg 	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
16661bb76ff1Sjsg }
16671bb76ff1Sjsg 
16681bb76ff1Sjsg static inline void
16691bb76ff1Sjsg topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
16701bb76ff1Sjsg {
16711bb76ff1Sjsg 	mutex_lock(&mgr->topology_ref_history_lock);
16721bb76ff1Sjsg }
16731bb76ff1Sjsg 
16741bb76ff1Sjsg static inline void
16751bb76ff1Sjsg topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
16761bb76ff1Sjsg {
16771bb76ff1Sjsg 	mutex_unlock(&mgr->topology_ref_history_lock);
16781bb76ff1Sjsg }
16791bb76ff1Sjsg #else
16801bb76ff1Sjsg static inline void
16811bb76ff1Sjsg topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
16821bb76ff1Sjsg static inline void
16831bb76ff1Sjsg topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
16841bb76ff1Sjsg static inline void
16851bb76ff1Sjsg drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
16861bb76ff1Sjsg static inline void
16871bb76ff1Sjsg drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
16881bb76ff1Sjsg #define save_mstb_topology_ref(mstb, type)
16891bb76ff1Sjsg #define save_port_topology_ref(port, type)
16901bb76ff1Sjsg #endif
16911bb76ff1Sjsg 
16921bb76ff1Sjsg struct drm_dp_mst_atomic_payload *
16931bb76ff1Sjsg drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state,
16941bb76ff1Sjsg 				 struct drm_dp_mst_port *port)
16951bb76ff1Sjsg {
16961bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
16971bb76ff1Sjsg 
16981bb76ff1Sjsg 	list_for_each_entry(payload, &state->payloads, next)
16991bb76ff1Sjsg 		if (payload->port == port)
17001bb76ff1Sjsg 			return payload;
17011bb76ff1Sjsg 
17021bb76ff1Sjsg 	return NULL;
17031bb76ff1Sjsg }
17041bb76ff1Sjsg EXPORT_SYMBOL(drm_atomic_get_mst_payload_state);
17051bb76ff1Sjsg 
17061bb76ff1Sjsg static void drm_dp_destroy_mst_branch_device(struct kref *kref)
17071bb76ff1Sjsg {
17081bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb =
17091bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_branch, topology_kref);
17101bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
17111bb76ff1Sjsg 
17121bb76ff1Sjsg 	drm_dp_mst_dump_mstb_topology_history(mstb);
17131bb76ff1Sjsg 
17141bb76ff1Sjsg 	INIT_LIST_HEAD(&mstb->destroy_next);
17151bb76ff1Sjsg 
17161bb76ff1Sjsg 	/*
17171bb76ff1Sjsg 	 * This can get called under mgr->mutex, so we need to perform the
17181bb76ff1Sjsg 	 * actual destruction of the mstb in another worker
17191bb76ff1Sjsg 	 */
17201bb76ff1Sjsg 	mutex_lock(&mgr->delayed_destroy_lock);
17211bb76ff1Sjsg 	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
17221bb76ff1Sjsg 	mutex_unlock(&mgr->delayed_destroy_lock);
17231bb76ff1Sjsg 	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
17241bb76ff1Sjsg }
17251bb76ff1Sjsg 
17261bb76ff1Sjsg /**
17271bb76ff1Sjsg  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
17281bb76ff1Sjsg  * branch device unless it's zero
17291bb76ff1Sjsg  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
17301bb76ff1Sjsg  *
17311bb76ff1Sjsg  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
17321bb76ff1Sjsg  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
17331bb76ff1Sjsg  * reached 0). Holding a topology reference implies that a malloc reference
17341bb76ff1Sjsg  * will be held to @mstb as long as the user holds the topology reference.
17351bb76ff1Sjsg  *
17361bb76ff1Sjsg  * Care should be taken to ensure that the user has at least one malloc
17371bb76ff1Sjsg  * reference to @mstb. If you already have a topology reference to @mstb, you
17381bb76ff1Sjsg  * should use drm_dp_mst_topology_get_mstb() instead.
17391bb76ff1Sjsg  *
17401bb76ff1Sjsg  * See also:
17411bb76ff1Sjsg  * drm_dp_mst_topology_get_mstb()
17421bb76ff1Sjsg  * drm_dp_mst_topology_put_mstb()
17431bb76ff1Sjsg  *
17441bb76ff1Sjsg  * Returns:
17451bb76ff1Sjsg  * * 1: A topology reference was grabbed successfully
17461bb76ff1Sjsg  * * 0: @port is no longer in the topology, no reference was grabbed
17471bb76ff1Sjsg  */
17481bb76ff1Sjsg static int __must_check
17491bb76ff1Sjsg drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
17501bb76ff1Sjsg {
17511bb76ff1Sjsg 	int ret;
17521bb76ff1Sjsg 
17531bb76ff1Sjsg 	topology_ref_history_lock(mstb->mgr);
17541bb76ff1Sjsg 	ret = kref_get_unless_zero(&mstb->topology_kref);
17551bb76ff1Sjsg 	if (ret) {
17561bb76ff1Sjsg 		drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
17571bb76ff1Sjsg 		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
17581bb76ff1Sjsg 	}
17591bb76ff1Sjsg 
17601bb76ff1Sjsg 	topology_ref_history_unlock(mstb->mgr);
17611bb76ff1Sjsg 
17621bb76ff1Sjsg 	return ret;
17631bb76ff1Sjsg }
17641bb76ff1Sjsg 
17651bb76ff1Sjsg /**
17661bb76ff1Sjsg  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
17671bb76ff1Sjsg  * branch device
17681bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
17691bb76ff1Sjsg  *
17701bb76ff1Sjsg  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
17711bb76ff1Sjsg  * not it's already reached 0. This is only valid to use in scenarios where
17721bb76ff1Sjsg  * you are already guaranteed to have at least one active topology reference
17731bb76ff1Sjsg  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
17741bb76ff1Sjsg  *
17751bb76ff1Sjsg  * See also:
17761bb76ff1Sjsg  * drm_dp_mst_topology_try_get_mstb()
17771bb76ff1Sjsg  * drm_dp_mst_topology_put_mstb()
17781bb76ff1Sjsg  */
17791bb76ff1Sjsg static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
17801bb76ff1Sjsg {
17811bb76ff1Sjsg 	topology_ref_history_lock(mstb->mgr);
17821bb76ff1Sjsg 
17831bb76ff1Sjsg 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
17841bb76ff1Sjsg 	WARN_ON(kref_read(&mstb->topology_kref) == 0);
17851bb76ff1Sjsg 	kref_get(&mstb->topology_kref);
17861bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
17871bb76ff1Sjsg 
17881bb76ff1Sjsg 	topology_ref_history_unlock(mstb->mgr);
17891bb76ff1Sjsg }
17901bb76ff1Sjsg 
17911bb76ff1Sjsg /**
17921bb76ff1Sjsg  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
17931bb76ff1Sjsg  * device
17941bb76ff1Sjsg  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
17951bb76ff1Sjsg  *
17961bb76ff1Sjsg  * Releases a topology reference from @mstb by decrementing
17971bb76ff1Sjsg  * &drm_dp_mst_branch.topology_kref.
17981bb76ff1Sjsg  *
17991bb76ff1Sjsg  * See also:
18001bb76ff1Sjsg  * drm_dp_mst_topology_try_get_mstb()
18011bb76ff1Sjsg  * drm_dp_mst_topology_get_mstb()
18021bb76ff1Sjsg  */
18031bb76ff1Sjsg static void
18041bb76ff1Sjsg drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
18051bb76ff1Sjsg {
18061bb76ff1Sjsg 	topology_ref_history_lock(mstb->mgr);
18071bb76ff1Sjsg 
18081bb76ff1Sjsg 	drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
18091bb76ff1Sjsg 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
18101bb76ff1Sjsg 
18111bb76ff1Sjsg 	topology_ref_history_unlock(mstb->mgr);
18121bb76ff1Sjsg 	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
18131bb76ff1Sjsg }
18141bb76ff1Sjsg 
18151bb76ff1Sjsg static void drm_dp_destroy_port(struct kref *kref)
18161bb76ff1Sjsg {
18171bb76ff1Sjsg 	struct drm_dp_mst_port *port =
18181bb76ff1Sjsg 		container_of(kref, struct drm_dp_mst_port, topology_kref);
18191bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
18201bb76ff1Sjsg 
18211bb76ff1Sjsg 	drm_dp_mst_dump_port_topology_history(port);
18221bb76ff1Sjsg 
18231bb76ff1Sjsg 	/* There's nothing that needs locking to destroy an input port yet */
18241bb76ff1Sjsg 	if (port->input) {
18251bb76ff1Sjsg 		drm_dp_mst_put_port_malloc(port);
18261bb76ff1Sjsg 		return;
18271bb76ff1Sjsg 	}
18281bb76ff1Sjsg 
1829f005ef32Sjsg 	drm_edid_free(port->cached_edid);
18301bb76ff1Sjsg 
18311bb76ff1Sjsg 	/*
18321bb76ff1Sjsg 	 * we can't destroy the connector here, as we might be holding the
18331bb76ff1Sjsg 	 * mode_config.mutex from an EDID retrieval
18341bb76ff1Sjsg 	 */
18351bb76ff1Sjsg 	mutex_lock(&mgr->delayed_destroy_lock);
18361bb76ff1Sjsg 	list_add(&port->next, &mgr->destroy_port_list);
18371bb76ff1Sjsg 	mutex_unlock(&mgr->delayed_destroy_lock);
18381bb76ff1Sjsg 	queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
18391bb76ff1Sjsg }
18401bb76ff1Sjsg 
18411bb76ff1Sjsg /**
18421bb76ff1Sjsg  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
18431bb76ff1Sjsg  * port unless it's zero
18441bb76ff1Sjsg  * @port: &struct drm_dp_mst_port to increment the topology refcount of
18451bb76ff1Sjsg  *
18461bb76ff1Sjsg  * Attempts to grab a topology reference to @port, if it hasn't yet been
18471bb76ff1Sjsg  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
18481bb76ff1Sjsg  * 0). Holding a topology reference implies that a malloc reference will be
18491bb76ff1Sjsg  * held to @port as long as the user holds the topology reference.
18501bb76ff1Sjsg  *
18511bb76ff1Sjsg  * Care should be taken to ensure that the user has at least one malloc
18521bb76ff1Sjsg  * reference to @port. If you already have a topology reference to @port, you
18531bb76ff1Sjsg  * should use drm_dp_mst_topology_get_port() instead.
18541bb76ff1Sjsg  *
18551bb76ff1Sjsg  * See also:
18561bb76ff1Sjsg  * drm_dp_mst_topology_get_port()
18571bb76ff1Sjsg  * drm_dp_mst_topology_put_port()
18581bb76ff1Sjsg  *
18591bb76ff1Sjsg  * Returns:
18601bb76ff1Sjsg  * * 1: A topology reference was grabbed successfully
18611bb76ff1Sjsg  * * 0: @port is no longer in the topology, no reference was grabbed
18621bb76ff1Sjsg  */
18631bb76ff1Sjsg static int __must_check
18641bb76ff1Sjsg drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
18651bb76ff1Sjsg {
18661bb76ff1Sjsg 	int ret;
18671bb76ff1Sjsg 
18681bb76ff1Sjsg 	topology_ref_history_lock(port->mgr);
18691bb76ff1Sjsg 	ret = kref_get_unless_zero(&port->topology_kref);
18701bb76ff1Sjsg 	if (ret) {
18711bb76ff1Sjsg 		drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
18721bb76ff1Sjsg 		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
18731bb76ff1Sjsg 	}
18741bb76ff1Sjsg 
18751bb76ff1Sjsg 	topology_ref_history_unlock(port->mgr);
18761bb76ff1Sjsg 	return ret;
18771bb76ff1Sjsg }
18781bb76ff1Sjsg 
18791bb76ff1Sjsg /**
18801bb76ff1Sjsg  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
18811bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
18821bb76ff1Sjsg  *
18831bb76ff1Sjsg  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
18841bb76ff1Sjsg  * not it's already reached 0. This is only valid to use in scenarios where
18851bb76ff1Sjsg  * you are already guaranteed to have at least one active topology reference
18861bb76ff1Sjsg  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
18871bb76ff1Sjsg  *
18881bb76ff1Sjsg  * See also:
18891bb76ff1Sjsg  * drm_dp_mst_topology_try_get_port()
18901bb76ff1Sjsg  * drm_dp_mst_topology_put_port()
18911bb76ff1Sjsg  */
18921bb76ff1Sjsg static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
18931bb76ff1Sjsg {
18941bb76ff1Sjsg 	topology_ref_history_lock(port->mgr);
18951bb76ff1Sjsg 
18961bb76ff1Sjsg 	WARN_ON(kref_read(&port->topology_kref) == 0);
18971bb76ff1Sjsg 	kref_get(&port->topology_kref);
18981bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
18991bb76ff1Sjsg 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
19001bb76ff1Sjsg 
19011bb76ff1Sjsg 	topology_ref_history_unlock(port->mgr);
19021bb76ff1Sjsg }
19031bb76ff1Sjsg 
19041bb76ff1Sjsg /**
19051bb76ff1Sjsg  * drm_dp_mst_topology_put_port() - release a topology reference to a port
19061bb76ff1Sjsg  * @port: The &struct drm_dp_mst_port to release the topology reference from
19071bb76ff1Sjsg  *
19081bb76ff1Sjsg  * Releases a topology reference from @port by decrementing
19091bb76ff1Sjsg  * &drm_dp_mst_port.topology_kref.
19101bb76ff1Sjsg  *
19111bb76ff1Sjsg  * See also:
19121bb76ff1Sjsg  * drm_dp_mst_topology_try_get_port()
19131bb76ff1Sjsg  * drm_dp_mst_topology_get_port()
19141bb76ff1Sjsg  */
19151bb76ff1Sjsg static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
19161bb76ff1Sjsg {
19171bb76ff1Sjsg 	topology_ref_history_lock(port->mgr);
19181bb76ff1Sjsg 
19191bb76ff1Sjsg 	drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
19201bb76ff1Sjsg 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
19211bb76ff1Sjsg 
19221bb76ff1Sjsg 	topology_ref_history_unlock(port->mgr);
19231bb76ff1Sjsg 	kref_put(&port->topology_kref, drm_dp_destroy_port);
19241bb76ff1Sjsg }
19251bb76ff1Sjsg 
19261bb76ff1Sjsg static struct drm_dp_mst_branch *
19271bb76ff1Sjsg drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
19281bb76ff1Sjsg 					      struct drm_dp_mst_branch *to_find)
19291bb76ff1Sjsg {
19301bb76ff1Sjsg 	struct drm_dp_mst_port *port;
19311bb76ff1Sjsg 	struct drm_dp_mst_branch *rmstb;
19321bb76ff1Sjsg 
19331bb76ff1Sjsg 	if (to_find == mstb)
19341bb76ff1Sjsg 		return mstb;
19351bb76ff1Sjsg 
19361bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
19371bb76ff1Sjsg 		if (port->mstb) {
19381bb76ff1Sjsg 			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
19391bb76ff1Sjsg 			    port->mstb, to_find);
19401bb76ff1Sjsg 			if (rmstb)
19411bb76ff1Sjsg 				return rmstb;
19421bb76ff1Sjsg 		}
19431bb76ff1Sjsg 	}
19441bb76ff1Sjsg 	return NULL;
19451bb76ff1Sjsg }
19461bb76ff1Sjsg 
19471bb76ff1Sjsg static struct drm_dp_mst_branch *
19481bb76ff1Sjsg drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
19491bb76ff1Sjsg 				       struct drm_dp_mst_branch *mstb)
19501bb76ff1Sjsg {
19511bb76ff1Sjsg 	struct drm_dp_mst_branch *rmstb = NULL;
19521bb76ff1Sjsg 
19531bb76ff1Sjsg 	mutex_lock(&mgr->lock);
19541bb76ff1Sjsg 	if (mgr->mst_primary) {
19551bb76ff1Sjsg 		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
19561bb76ff1Sjsg 		    mgr->mst_primary, mstb);
19571bb76ff1Sjsg 
19581bb76ff1Sjsg 		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
19591bb76ff1Sjsg 			rmstb = NULL;
19601bb76ff1Sjsg 	}
19611bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
19621bb76ff1Sjsg 	return rmstb;
19631bb76ff1Sjsg }
19641bb76ff1Sjsg 
19651bb76ff1Sjsg static struct drm_dp_mst_port *
19661bb76ff1Sjsg drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
19671bb76ff1Sjsg 					      struct drm_dp_mst_port *to_find)
19681bb76ff1Sjsg {
19691bb76ff1Sjsg 	struct drm_dp_mst_port *port, *mport;
19701bb76ff1Sjsg 
19711bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
19721bb76ff1Sjsg 		if (port == to_find)
19731bb76ff1Sjsg 			return port;
19741bb76ff1Sjsg 
19751bb76ff1Sjsg 		if (port->mstb) {
19761bb76ff1Sjsg 			mport = drm_dp_mst_topology_get_port_validated_locked(
19771bb76ff1Sjsg 			    port->mstb, to_find);
19781bb76ff1Sjsg 			if (mport)
19791bb76ff1Sjsg 				return mport;
19801bb76ff1Sjsg 		}
19811bb76ff1Sjsg 	}
19821bb76ff1Sjsg 	return NULL;
19831bb76ff1Sjsg }
19841bb76ff1Sjsg 
19851bb76ff1Sjsg static struct drm_dp_mst_port *
19861bb76ff1Sjsg drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
19871bb76ff1Sjsg 				       struct drm_dp_mst_port *port)
19881bb76ff1Sjsg {
19891bb76ff1Sjsg 	struct drm_dp_mst_port *rport = NULL;
19901bb76ff1Sjsg 
19911bb76ff1Sjsg 	mutex_lock(&mgr->lock);
19921bb76ff1Sjsg 	if (mgr->mst_primary) {
19931bb76ff1Sjsg 		rport = drm_dp_mst_topology_get_port_validated_locked(
19941bb76ff1Sjsg 		    mgr->mst_primary, port);
19951bb76ff1Sjsg 
19961bb76ff1Sjsg 		if (rport && !drm_dp_mst_topology_try_get_port(rport))
19971bb76ff1Sjsg 			rport = NULL;
19981bb76ff1Sjsg 	}
19991bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
20001bb76ff1Sjsg 	return rport;
20011bb76ff1Sjsg }
20021bb76ff1Sjsg 
20031bb76ff1Sjsg static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
20041bb76ff1Sjsg {
20051bb76ff1Sjsg 	struct drm_dp_mst_port *port;
20061bb76ff1Sjsg 	int ret;
20071bb76ff1Sjsg 
20081bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
20091bb76ff1Sjsg 		if (port->port_num == port_num) {
20101bb76ff1Sjsg 			ret = drm_dp_mst_topology_try_get_port(port);
20111bb76ff1Sjsg 			return ret ? port : NULL;
20121bb76ff1Sjsg 		}
20131bb76ff1Sjsg 	}
20141bb76ff1Sjsg 
20151bb76ff1Sjsg 	return NULL;
20161bb76ff1Sjsg }
20171bb76ff1Sjsg 
20181bb76ff1Sjsg /*
20191bb76ff1Sjsg  * calculate a new RAD for this MST branch device
20201bb76ff1Sjsg  * if parent has an LCT of 2 then it has 1 nibble of RAD,
20211bb76ff1Sjsg  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
20221bb76ff1Sjsg  */
20231bb76ff1Sjsg static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
20241bb76ff1Sjsg 				 u8 *rad)
20251bb76ff1Sjsg {
20261bb76ff1Sjsg 	int parent_lct = port->parent->lct;
20271bb76ff1Sjsg 	int shift = 4;
20281bb76ff1Sjsg 	int idx = (parent_lct - 1) / 2;
20291bb76ff1Sjsg 
20301bb76ff1Sjsg 	if (parent_lct > 1) {
20311bb76ff1Sjsg 		memcpy(rad, port->parent->rad, idx + 1);
20321bb76ff1Sjsg 		shift = (parent_lct % 2) ? 4 : 0;
20331bb76ff1Sjsg 	} else
20341bb76ff1Sjsg 		rad[0] = 0;
20351bb76ff1Sjsg 
20361bb76ff1Sjsg 	rad[idx] |= port->port_num << shift;
20371bb76ff1Sjsg 	return parent_lct + 1;
20381bb76ff1Sjsg }
20391bb76ff1Sjsg 
20401bb76ff1Sjsg static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
20411bb76ff1Sjsg {
20421bb76ff1Sjsg 	switch (pdt) {
20431bb76ff1Sjsg 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
20441bb76ff1Sjsg 	case DP_PEER_DEVICE_SST_SINK:
20451bb76ff1Sjsg 		return true;
20461bb76ff1Sjsg 	case DP_PEER_DEVICE_MST_BRANCHING:
20471bb76ff1Sjsg 		/* For sst branch device */
20481bb76ff1Sjsg 		if (!mcs)
20491bb76ff1Sjsg 			return true;
20501bb76ff1Sjsg 
20511bb76ff1Sjsg 		return false;
20521bb76ff1Sjsg 	}
20531bb76ff1Sjsg 	return true;
20541bb76ff1Sjsg }
20551bb76ff1Sjsg 
20561bb76ff1Sjsg static int
20571bb76ff1Sjsg drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
20581bb76ff1Sjsg 		    bool new_mcs)
20591bb76ff1Sjsg {
20601bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
20611bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
20621bb76ff1Sjsg 	u8 rad[8], lct;
20631bb76ff1Sjsg 	int ret = 0;
20641bb76ff1Sjsg 
20651bb76ff1Sjsg 	if (port->pdt == new_pdt && port->mcs == new_mcs)
20661bb76ff1Sjsg 		return 0;
20671bb76ff1Sjsg 
20681bb76ff1Sjsg 	/* Teardown the old pdt, if there is one */
20691bb76ff1Sjsg 	if (port->pdt != DP_PEER_DEVICE_NONE) {
20701bb76ff1Sjsg 		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
20711bb76ff1Sjsg 			/*
20721bb76ff1Sjsg 			 * If the new PDT would also have an i2c bus,
20731bb76ff1Sjsg 			 * don't bother with reregistering it
20741bb76ff1Sjsg 			 */
20751bb76ff1Sjsg 			if (new_pdt != DP_PEER_DEVICE_NONE &&
20761bb76ff1Sjsg 			    drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
20771bb76ff1Sjsg 				port->pdt = new_pdt;
20781bb76ff1Sjsg 				port->mcs = new_mcs;
20791bb76ff1Sjsg 				return 0;
20801bb76ff1Sjsg 			}
20811bb76ff1Sjsg 
20821bb76ff1Sjsg 			/* remove i2c over sideband */
20831bb76ff1Sjsg 			drm_dp_mst_unregister_i2c_bus(port);
20841bb76ff1Sjsg 		} else {
20851bb76ff1Sjsg 			mutex_lock(&mgr->lock);
20861bb76ff1Sjsg 			drm_dp_mst_topology_put_mstb(port->mstb);
20871bb76ff1Sjsg 			port->mstb = NULL;
20881bb76ff1Sjsg 			mutex_unlock(&mgr->lock);
20891bb76ff1Sjsg 		}
20901bb76ff1Sjsg 	}
20911bb76ff1Sjsg 
20921bb76ff1Sjsg 	port->pdt = new_pdt;
20931bb76ff1Sjsg 	port->mcs = new_mcs;
20941bb76ff1Sjsg 
20951bb76ff1Sjsg 	if (port->pdt != DP_PEER_DEVICE_NONE) {
20961bb76ff1Sjsg 		if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
20971bb76ff1Sjsg 			/* add i2c over sideband */
20981bb76ff1Sjsg 			ret = drm_dp_mst_register_i2c_bus(port);
20991bb76ff1Sjsg 		} else {
21001bb76ff1Sjsg 			lct = drm_dp_calculate_rad(port, rad);
21011bb76ff1Sjsg 			mstb = drm_dp_add_mst_branch_device(lct, rad);
21021bb76ff1Sjsg 			if (!mstb) {
21031bb76ff1Sjsg 				ret = -ENOMEM;
21041bb76ff1Sjsg 				drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
21051bb76ff1Sjsg 				goto out;
21061bb76ff1Sjsg 			}
21071bb76ff1Sjsg 
21081bb76ff1Sjsg 			mutex_lock(&mgr->lock);
21091bb76ff1Sjsg 			port->mstb = mstb;
21101bb76ff1Sjsg 			mstb->mgr = port->mgr;
21111bb76ff1Sjsg 			mstb->port_parent = port;
21121bb76ff1Sjsg 
21131bb76ff1Sjsg 			/*
21141bb76ff1Sjsg 			 * Make sure this port's memory allocation stays
21151bb76ff1Sjsg 			 * around until its child MSTB releases it
21161bb76ff1Sjsg 			 */
21171bb76ff1Sjsg 			drm_dp_mst_get_port_malloc(port);
21181bb76ff1Sjsg 			mutex_unlock(&mgr->lock);
21191bb76ff1Sjsg 
21201bb76ff1Sjsg 			/* And make sure we send a link address for this */
21211bb76ff1Sjsg 			ret = 1;
21221bb76ff1Sjsg 		}
21231bb76ff1Sjsg 	}
21241bb76ff1Sjsg 
21251bb76ff1Sjsg out:
21261bb76ff1Sjsg 	if (ret < 0)
21271bb76ff1Sjsg 		port->pdt = DP_PEER_DEVICE_NONE;
21281bb76ff1Sjsg 	return ret;
21291bb76ff1Sjsg }
21301bb76ff1Sjsg 
21311bb76ff1Sjsg /**
21321bb76ff1Sjsg  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
21331bb76ff1Sjsg  * @aux: Fake sideband AUX CH
21341bb76ff1Sjsg  * @offset: address of the (first) register to read
21351bb76ff1Sjsg  * @buffer: buffer to store the register values
21361bb76ff1Sjsg  * @size: number of bytes in @buffer
21371bb76ff1Sjsg  *
21381bb76ff1Sjsg  * Performs the same functionality for remote devices via
21391bb76ff1Sjsg  * sideband messaging as drm_dp_dpcd_read() does for local
21401bb76ff1Sjsg  * devices via actual AUX CH.
21411bb76ff1Sjsg  *
21421bb76ff1Sjsg  * Return: Number of bytes read, or negative error code on failure.
21431bb76ff1Sjsg  */
21441bb76ff1Sjsg ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
21451bb76ff1Sjsg 			     unsigned int offset, void *buffer, size_t size)
21461bb76ff1Sjsg {
21471bb76ff1Sjsg 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
21481bb76ff1Sjsg 						    aux);
21491bb76ff1Sjsg 
21501bb76ff1Sjsg 	return drm_dp_send_dpcd_read(port->mgr, port,
21511bb76ff1Sjsg 				     offset, size, buffer);
21521bb76ff1Sjsg }
21531bb76ff1Sjsg 
21541bb76ff1Sjsg /**
21551bb76ff1Sjsg  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
21561bb76ff1Sjsg  * @aux: Fake sideband AUX CH
21571bb76ff1Sjsg  * @offset: address of the (first) register to write
21581bb76ff1Sjsg  * @buffer: buffer containing the values to write
21591bb76ff1Sjsg  * @size: number of bytes in @buffer
21601bb76ff1Sjsg  *
21611bb76ff1Sjsg  * Performs the same functionality for remote devices via
21621bb76ff1Sjsg  * sideband messaging as drm_dp_dpcd_write() does for local
21631bb76ff1Sjsg  * devices via actual AUX CH.
21641bb76ff1Sjsg  *
21651bb76ff1Sjsg  * Return: number of bytes written on success, negative error code on failure.
21661bb76ff1Sjsg  */
21671bb76ff1Sjsg ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
21681bb76ff1Sjsg 			      unsigned int offset, void *buffer, size_t size)
21691bb76ff1Sjsg {
21701bb76ff1Sjsg 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
21711bb76ff1Sjsg 						    aux);
21721bb76ff1Sjsg 
21731bb76ff1Sjsg 	return drm_dp_send_dpcd_write(port->mgr, port,
21741bb76ff1Sjsg 				      offset, size, buffer);
21751bb76ff1Sjsg }
21761bb76ff1Sjsg 
21771bb76ff1Sjsg static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
21781bb76ff1Sjsg {
21791bb76ff1Sjsg 	int ret = 0;
21801bb76ff1Sjsg 
21811bb76ff1Sjsg 	memcpy(mstb->guid, guid, 16);
21821bb76ff1Sjsg 
21831bb76ff1Sjsg 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
21841bb76ff1Sjsg 		if (mstb->port_parent) {
21851bb76ff1Sjsg 			ret = drm_dp_send_dpcd_write(mstb->mgr,
21861bb76ff1Sjsg 						     mstb->port_parent,
21871bb76ff1Sjsg 						     DP_GUID, 16, mstb->guid);
21881bb76ff1Sjsg 		} else {
21891bb76ff1Sjsg 			ret = drm_dp_dpcd_write(mstb->mgr->aux,
21901bb76ff1Sjsg 						DP_GUID, mstb->guid, 16);
21911bb76ff1Sjsg 		}
21921bb76ff1Sjsg 	}
21931bb76ff1Sjsg 
21941bb76ff1Sjsg 	if (ret < 16 && ret > 0)
21951bb76ff1Sjsg 		return -EPROTO;
21961bb76ff1Sjsg 
21971bb76ff1Sjsg 	return ret == 16 ? 0 : ret;
21981bb76ff1Sjsg }
21991bb76ff1Sjsg 
22001bb76ff1Sjsg static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
22011bb76ff1Sjsg 				int pnum,
22021bb76ff1Sjsg 				char *proppath,
22031bb76ff1Sjsg 				size_t proppath_size)
22041bb76ff1Sjsg {
22051bb76ff1Sjsg 	int i;
22061bb76ff1Sjsg 	char temp[8];
22071bb76ff1Sjsg 
22081bb76ff1Sjsg 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
22091bb76ff1Sjsg 	for (i = 0; i < (mstb->lct - 1); i++) {
22101bb76ff1Sjsg 		int shift = (i % 2) ? 0 : 4;
22111bb76ff1Sjsg 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
22121bb76ff1Sjsg 
22131bb76ff1Sjsg 		snprintf(temp, sizeof(temp), "-%d", port_num);
22141bb76ff1Sjsg 		strlcat(proppath, temp, proppath_size);
22151bb76ff1Sjsg 	}
22161bb76ff1Sjsg 	snprintf(temp, sizeof(temp), "-%d", pnum);
22171bb76ff1Sjsg 	strlcat(proppath, temp, proppath_size);
22181bb76ff1Sjsg }
22191bb76ff1Sjsg 
22201bb76ff1Sjsg /**
22211bb76ff1Sjsg  * drm_dp_mst_connector_late_register() - Late MST connector registration
22221bb76ff1Sjsg  * @connector: The MST connector
22231bb76ff1Sjsg  * @port: The MST port for this connector
22241bb76ff1Sjsg  *
22251bb76ff1Sjsg  * Helper to register the remote aux device for this MST port. Drivers should
22261bb76ff1Sjsg  * call this from their mst connector's late_register hook to enable MST aux
22271bb76ff1Sjsg  * devices.
22281bb76ff1Sjsg  *
22291bb76ff1Sjsg  * Return: 0 on success, negative error code on failure.
22301bb76ff1Sjsg  */
22311bb76ff1Sjsg int drm_dp_mst_connector_late_register(struct drm_connector *connector,
22321bb76ff1Sjsg 				       struct drm_dp_mst_port *port)
22331bb76ff1Sjsg {
22341bb76ff1Sjsg #ifdef __linux__
22351bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
22361bb76ff1Sjsg 		    port->aux.name, connector->kdev->kobj.name);
22371bb76ff1Sjsg #else
22381bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "registering %s remote bus\n",
22391bb76ff1Sjsg 		    port->aux.name);
22401bb76ff1Sjsg #endif
22411bb76ff1Sjsg 
22421bb76ff1Sjsg 	port->aux.dev = connector->kdev;
22431bb76ff1Sjsg 	return drm_dp_aux_register_devnode(&port->aux);
22441bb76ff1Sjsg }
22451bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
22461bb76ff1Sjsg 
22471bb76ff1Sjsg /**
22481bb76ff1Sjsg  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
22491bb76ff1Sjsg  * @connector: The MST connector
22501bb76ff1Sjsg  * @port: The MST port for this connector
22511bb76ff1Sjsg  *
22521bb76ff1Sjsg  * Helper to unregister the remote aux device for this MST port, registered by
22531bb76ff1Sjsg  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
22541bb76ff1Sjsg  * connector's early_unregister hook.
22551bb76ff1Sjsg  */
22561bb76ff1Sjsg void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
22571bb76ff1Sjsg 					   struct drm_dp_mst_port *port)
22581bb76ff1Sjsg {
22591bb76ff1Sjsg #ifdef __linux__
22601bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
22611bb76ff1Sjsg 		    port->aux.name, connector->kdev->kobj.name);
22621bb76ff1Sjsg #else
22631bb76ff1Sjsg 	drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus\n",
22641bb76ff1Sjsg 		    port->aux.name);
22651bb76ff1Sjsg #endif
22661bb76ff1Sjsg 	drm_dp_aux_unregister_devnode(&port->aux);
22671bb76ff1Sjsg }
22681bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
22691bb76ff1Sjsg 
22701bb76ff1Sjsg static void
22711bb76ff1Sjsg drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
22721bb76ff1Sjsg 			      struct drm_dp_mst_port *port)
22731bb76ff1Sjsg {
22741bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
22751bb76ff1Sjsg 	char proppath[255];
22761bb76ff1Sjsg 	int ret;
22771bb76ff1Sjsg 
22781bb76ff1Sjsg 	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
22791bb76ff1Sjsg 	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
22801bb76ff1Sjsg 	if (!port->connector) {
22811bb76ff1Sjsg 		ret = -ENOMEM;
22821bb76ff1Sjsg 		goto error;
22831bb76ff1Sjsg 	}
22841bb76ff1Sjsg 
22851bb76ff1Sjsg 	if (port->pdt != DP_PEER_DEVICE_NONE &&
22861bb76ff1Sjsg 	    drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
22871bb76ff1Sjsg 	    port->port_num >= DP_MST_LOGICAL_PORT_0)
2288f005ef32Sjsg 		port->cached_edid = drm_edid_read_ddc(port->connector,
22891bb76ff1Sjsg 						      &port->aux.ddc);
22901bb76ff1Sjsg 
22911bb76ff1Sjsg 	drm_connector_register(port->connector);
22921bb76ff1Sjsg 	return;
22931bb76ff1Sjsg 
22941bb76ff1Sjsg error:
22951bb76ff1Sjsg 	drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
22961bb76ff1Sjsg }
22971bb76ff1Sjsg 
22981bb76ff1Sjsg /*
22991bb76ff1Sjsg  * Drop a topology reference, and unlink the port from the in-memory topology
23001bb76ff1Sjsg  * layout
23011bb76ff1Sjsg  */
23021bb76ff1Sjsg static void
23031bb76ff1Sjsg drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
23041bb76ff1Sjsg 				struct drm_dp_mst_port *port)
23051bb76ff1Sjsg {
23061bb76ff1Sjsg 	mutex_lock(&mgr->lock);
23071bb76ff1Sjsg 	port->parent->num_ports--;
23081bb76ff1Sjsg 	list_del(&port->next);
23091bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
23101bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
23111bb76ff1Sjsg }
23121bb76ff1Sjsg 
23131bb76ff1Sjsg static struct drm_dp_mst_port *
23141bb76ff1Sjsg drm_dp_mst_add_port(struct drm_device *dev,
23151bb76ff1Sjsg 		    struct drm_dp_mst_topology_mgr *mgr,
23161bb76ff1Sjsg 		    struct drm_dp_mst_branch *mstb, u8 port_number)
23171bb76ff1Sjsg {
23181bb76ff1Sjsg 	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
23191bb76ff1Sjsg 
23201bb76ff1Sjsg 	if (!port)
23211bb76ff1Sjsg 		return NULL;
23221bb76ff1Sjsg 
23231bb76ff1Sjsg 	kref_init(&port->topology_kref);
23241bb76ff1Sjsg 	kref_init(&port->malloc_kref);
23251bb76ff1Sjsg 	port->parent = mstb;
23261bb76ff1Sjsg 	port->port_num = port_number;
23271bb76ff1Sjsg 	port->mgr = mgr;
23281bb76ff1Sjsg 	port->aux.name = "DPMST";
23291bb76ff1Sjsg 	port->aux.dev = dev->dev;
23301bb76ff1Sjsg 	port->aux.is_remote = true;
23311bb76ff1Sjsg 
23321bb76ff1Sjsg 	/* initialize the MST downstream port's AUX crc work queue */
23331bb76ff1Sjsg 	port->aux.drm_dev = dev;
23341bb76ff1Sjsg 	drm_dp_remote_aux_init(&port->aux);
23351bb76ff1Sjsg 
23361bb76ff1Sjsg 	/*
23371bb76ff1Sjsg 	 * Make sure the memory allocation for our parent branch stays
23381bb76ff1Sjsg 	 * around until our own memory allocation is released
23391bb76ff1Sjsg 	 */
23401bb76ff1Sjsg 	drm_dp_mst_get_mstb_malloc(mstb);
23411bb76ff1Sjsg 
23421bb76ff1Sjsg 	return port;
23431bb76ff1Sjsg }
23441bb76ff1Sjsg 
23451bb76ff1Sjsg static int
23461bb76ff1Sjsg drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
23471bb76ff1Sjsg 				    struct drm_device *dev,
23481bb76ff1Sjsg 				    struct drm_dp_link_addr_reply_port *port_msg)
23491bb76ff1Sjsg {
23501bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
23511bb76ff1Sjsg 	struct drm_dp_mst_port *port;
23521bb76ff1Sjsg 	int old_ddps = 0, ret;
23531bb76ff1Sjsg 	u8 new_pdt = DP_PEER_DEVICE_NONE;
23541bb76ff1Sjsg 	bool new_mcs = 0;
23551bb76ff1Sjsg 	bool created = false, send_link_addr = false, changed = false;
23561bb76ff1Sjsg 
23571bb76ff1Sjsg 	port = drm_dp_get_port(mstb, port_msg->port_number);
23581bb76ff1Sjsg 	if (!port) {
23591bb76ff1Sjsg 		port = drm_dp_mst_add_port(dev, mgr, mstb,
23601bb76ff1Sjsg 					   port_msg->port_number);
23611bb76ff1Sjsg 		if (!port)
23621bb76ff1Sjsg 			return -ENOMEM;
23631bb76ff1Sjsg 		created = true;
23641bb76ff1Sjsg 		changed = true;
23651bb76ff1Sjsg 	} else if (!port->input && port_msg->input_port && port->connector) {
23661bb76ff1Sjsg 		/* Since port->connector can't be changed here, we create a
23671bb76ff1Sjsg 		 * new port if input_port changes from 0 to 1
23681bb76ff1Sjsg 		 */
23691bb76ff1Sjsg 		drm_dp_mst_topology_unlink_port(mgr, port);
23701bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
23711bb76ff1Sjsg 		port = drm_dp_mst_add_port(dev, mgr, mstb,
23721bb76ff1Sjsg 					   port_msg->port_number);
23731bb76ff1Sjsg 		if (!port)
23741bb76ff1Sjsg 			return -ENOMEM;
23751bb76ff1Sjsg 		changed = true;
23761bb76ff1Sjsg 		created = true;
23771bb76ff1Sjsg 	} else if (port->input && !port_msg->input_port) {
23781bb76ff1Sjsg 		changed = true;
23791bb76ff1Sjsg 	} else if (port->connector) {
23801bb76ff1Sjsg 		/* We're updating a port that's exposed to userspace, so do it
23811bb76ff1Sjsg 		 * under lock
23821bb76ff1Sjsg 		 */
23831bb76ff1Sjsg 		drm_modeset_lock(&mgr->base.lock, NULL);
23841bb76ff1Sjsg 
23851bb76ff1Sjsg 		old_ddps = port->ddps;
23861bb76ff1Sjsg 		changed = port->ddps != port_msg->ddps ||
23871bb76ff1Sjsg 			(port->ddps &&
23881bb76ff1Sjsg 			 (port->ldps != port_msg->legacy_device_plug_status ||
23891bb76ff1Sjsg 			  port->dpcd_rev != port_msg->dpcd_revision ||
23901bb76ff1Sjsg 			  port->mcs != port_msg->mcs ||
23911bb76ff1Sjsg 			  port->pdt != port_msg->peer_device_type ||
23921bb76ff1Sjsg 			  port->num_sdp_stream_sinks !=
23931bb76ff1Sjsg 			  port_msg->num_sdp_stream_sinks));
23941bb76ff1Sjsg 	}
23951bb76ff1Sjsg 
23961bb76ff1Sjsg 	port->input = port_msg->input_port;
23971bb76ff1Sjsg 	if (!port->input)
23981bb76ff1Sjsg 		new_pdt = port_msg->peer_device_type;
23991bb76ff1Sjsg 	new_mcs = port_msg->mcs;
24001bb76ff1Sjsg 	port->ddps = port_msg->ddps;
24011bb76ff1Sjsg 	port->ldps = port_msg->legacy_device_plug_status;
24021bb76ff1Sjsg 	port->dpcd_rev = port_msg->dpcd_revision;
24031bb76ff1Sjsg 	port->num_sdp_streams = port_msg->num_sdp_streams;
24041bb76ff1Sjsg 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
24051bb76ff1Sjsg 
24061bb76ff1Sjsg 	/* manage mstb port lists with mgr lock - take a reference
24071bb76ff1Sjsg 	   for this list */
24081bb76ff1Sjsg 	if (created) {
24091bb76ff1Sjsg 		mutex_lock(&mgr->lock);
24101bb76ff1Sjsg 		drm_dp_mst_topology_get_port(port);
24111bb76ff1Sjsg 		list_add(&port->next, &mstb->ports);
24121bb76ff1Sjsg 		mstb->num_ports++;
24131bb76ff1Sjsg 		mutex_unlock(&mgr->lock);
24141bb76ff1Sjsg 	}
24151bb76ff1Sjsg 
24161bb76ff1Sjsg 	/*
24171bb76ff1Sjsg 	 * Reprobe PBN caps on both hotplug, and when re-probing the link
24181bb76ff1Sjsg 	 * for our parent mstb
24191bb76ff1Sjsg 	 */
24201bb76ff1Sjsg 	if (old_ddps != port->ddps || !created) {
24211bb76ff1Sjsg 		if (port->ddps && !port->input) {
24221bb76ff1Sjsg 			ret = drm_dp_send_enum_path_resources(mgr, mstb,
24231bb76ff1Sjsg 							      port);
24241bb76ff1Sjsg 			if (ret == 1)
24251bb76ff1Sjsg 				changed = true;
24261bb76ff1Sjsg 		} else {
24271bb76ff1Sjsg 			port->full_pbn = 0;
24281bb76ff1Sjsg 		}
24291bb76ff1Sjsg 	}
24301bb76ff1Sjsg 
24311bb76ff1Sjsg 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
24321bb76ff1Sjsg 	if (ret == 1) {
24331bb76ff1Sjsg 		send_link_addr = true;
24341bb76ff1Sjsg 	} else if (ret < 0) {
24351bb76ff1Sjsg 		drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
24361bb76ff1Sjsg 		goto fail;
24371bb76ff1Sjsg 	}
24381bb76ff1Sjsg 
24391bb76ff1Sjsg 	/*
24401bb76ff1Sjsg 	 * If this port wasn't just created, then we're reprobing because
24411bb76ff1Sjsg 	 * we're coming out of suspend. In this case, always resend the link
24421bb76ff1Sjsg 	 * address if there's an MSTB on this port
24431bb76ff1Sjsg 	 */
24441bb76ff1Sjsg 	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
24451bb76ff1Sjsg 	    port->mcs)
24461bb76ff1Sjsg 		send_link_addr = true;
24471bb76ff1Sjsg 
24481bb76ff1Sjsg 	if (port->connector)
24491bb76ff1Sjsg 		drm_modeset_unlock(&mgr->base.lock);
24501bb76ff1Sjsg 	else if (!port->input)
24511bb76ff1Sjsg 		drm_dp_mst_port_add_connector(mstb, port);
24521bb76ff1Sjsg 
24531bb76ff1Sjsg 	if (send_link_addr && port->mstb) {
24541bb76ff1Sjsg 		ret = drm_dp_send_link_address(mgr, port->mstb);
24551bb76ff1Sjsg 		if (ret == 1) /* MSTB below us changed */
24561bb76ff1Sjsg 			changed = true;
24571bb76ff1Sjsg 		else if (ret < 0)
24581bb76ff1Sjsg 			goto fail_put;
24591bb76ff1Sjsg 	}
24601bb76ff1Sjsg 
24611bb76ff1Sjsg 	/* put reference to this port */
24621bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
24631bb76ff1Sjsg 	return changed;
24641bb76ff1Sjsg 
24651bb76ff1Sjsg fail:
24661bb76ff1Sjsg 	drm_dp_mst_topology_unlink_port(mgr, port);
24671bb76ff1Sjsg 	if (port->connector)
24681bb76ff1Sjsg 		drm_modeset_unlock(&mgr->base.lock);
24691bb76ff1Sjsg fail_put:
24701bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
24711bb76ff1Sjsg 	return ret;
24721bb76ff1Sjsg }
24731bb76ff1Sjsg 
24741bb76ff1Sjsg static int
24751bb76ff1Sjsg drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
24761bb76ff1Sjsg 			    struct drm_dp_connection_status_notify *conn_stat)
24771bb76ff1Sjsg {
24781bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
24791bb76ff1Sjsg 	struct drm_dp_mst_port *port;
24801bb76ff1Sjsg 	int old_ddps, ret;
24811bb76ff1Sjsg 	u8 new_pdt;
24821bb76ff1Sjsg 	bool new_mcs;
24831bb76ff1Sjsg 	bool dowork = false, create_connector = false;
24841bb76ff1Sjsg 
24851bb76ff1Sjsg 	port = drm_dp_get_port(mstb, conn_stat->port_number);
24861bb76ff1Sjsg 	if (!port)
24871bb76ff1Sjsg 		return 0;
24881bb76ff1Sjsg 
24891bb76ff1Sjsg 	if (port->connector) {
24901bb76ff1Sjsg 		if (!port->input && conn_stat->input_port) {
24911bb76ff1Sjsg 			/*
24921bb76ff1Sjsg 			 * We can't remove a connector from an already exposed
24931bb76ff1Sjsg 			 * port, so just throw the port out and make sure we
24941bb76ff1Sjsg 			 * reprobe the link address of it's parent MSTB
24951bb76ff1Sjsg 			 */
24961bb76ff1Sjsg 			drm_dp_mst_topology_unlink_port(mgr, port);
24971bb76ff1Sjsg 			mstb->link_address_sent = false;
24981bb76ff1Sjsg 			dowork = true;
24991bb76ff1Sjsg 			goto out;
25001bb76ff1Sjsg 		}
25011bb76ff1Sjsg 
25021bb76ff1Sjsg 		/* Locking is only needed if the port's exposed to userspace */
25031bb76ff1Sjsg 		drm_modeset_lock(&mgr->base.lock, NULL);
25041bb76ff1Sjsg 	} else if (port->input && !conn_stat->input_port) {
25051bb76ff1Sjsg 		create_connector = true;
25061bb76ff1Sjsg 		/* Reprobe link address so we get num_sdp_streams */
25071bb76ff1Sjsg 		mstb->link_address_sent = false;
25081bb76ff1Sjsg 		dowork = true;
25091bb76ff1Sjsg 	}
25101bb76ff1Sjsg 
25111bb76ff1Sjsg 	old_ddps = port->ddps;
25121bb76ff1Sjsg 	port->input = conn_stat->input_port;
25131bb76ff1Sjsg 	port->ldps = conn_stat->legacy_device_plug_status;
25141bb76ff1Sjsg 	port->ddps = conn_stat->displayport_device_plug_status;
25151bb76ff1Sjsg 
25161bb76ff1Sjsg 	if (old_ddps != port->ddps) {
25171bb76ff1Sjsg 		if (port->ddps && !port->input)
25181bb76ff1Sjsg 			drm_dp_send_enum_path_resources(mgr, mstb, port);
25191bb76ff1Sjsg 		else
25201bb76ff1Sjsg 			port->full_pbn = 0;
25211bb76ff1Sjsg 	}
25221bb76ff1Sjsg 
25231bb76ff1Sjsg 	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
25241bb76ff1Sjsg 	new_mcs = conn_stat->message_capability_status;
25251bb76ff1Sjsg 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
25261bb76ff1Sjsg 	if (ret == 1) {
25271bb76ff1Sjsg 		dowork = true;
25281bb76ff1Sjsg 	} else if (ret < 0) {
25291bb76ff1Sjsg 		drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
25301bb76ff1Sjsg 		dowork = false;
25311bb76ff1Sjsg 	}
25321bb76ff1Sjsg 
25331bb76ff1Sjsg 	if (port->connector)
25341bb76ff1Sjsg 		drm_modeset_unlock(&mgr->base.lock);
25351bb76ff1Sjsg 	else if (create_connector)
25361bb76ff1Sjsg 		drm_dp_mst_port_add_connector(mstb, port);
25371bb76ff1Sjsg 
25381bb76ff1Sjsg out:
25391bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
25401bb76ff1Sjsg 	return dowork;
25411bb76ff1Sjsg }
25421bb76ff1Sjsg 
25431bb76ff1Sjsg static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
25441bb76ff1Sjsg 							       u8 lct, u8 *rad)
25451bb76ff1Sjsg {
25461bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
25471bb76ff1Sjsg 	struct drm_dp_mst_port *port;
25481bb76ff1Sjsg 	int i, ret;
25491bb76ff1Sjsg 	/* find the port by iterating down */
25501bb76ff1Sjsg 
25511bb76ff1Sjsg 	mutex_lock(&mgr->lock);
25521bb76ff1Sjsg 	mstb = mgr->mst_primary;
25531bb76ff1Sjsg 
25541bb76ff1Sjsg 	if (!mstb)
25551bb76ff1Sjsg 		goto out;
25561bb76ff1Sjsg 
25571bb76ff1Sjsg 	for (i = 0; i < lct - 1; i++) {
25581bb76ff1Sjsg 		int shift = (i % 2) ? 0 : 4;
25591bb76ff1Sjsg 		int port_num = (rad[i / 2] >> shift) & 0xf;
25601bb76ff1Sjsg 
25611bb76ff1Sjsg 		list_for_each_entry(port, &mstb->ports, next) {
25621bb76ff1Sjsg 			if (port->port_num == port_num) {
25631bb76ff1Sjsg 				mstb = port->mstb;
25641bb76ff1Sjsg 				if (!mstb) {
25651bb76ff1Sjsg 					drm_err(mgr->dev,
25661bb76ff1Sjsg 						"failed to lookup MSTB with lct %d, rad %02x\n",
25671bb76ff1Sjsg 						lct, rad[0]);
25681bb76ff1Sjsg 					goto out;
25691bb76ff1Sjsg 				}
25701bb76ff1Sjsg 
25711bb76ff1Sjsg 				break;
25721bb76ff1Sjsg 			}
25731bb76ff1Sjsg 		}
25741bb76ff1Sjsg 	}
25751bb76ff1Sjsg 	ret = drm_dp_mst_topology_try_get_mstb(mstb);
25761bb76ff1Sjsg 	if (!ret)
25771bb76ff1Sjsg 		mstb = NULL;
25781bb76ff1Sjsg out:
25791bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
25801bb76ff1Sjsg 	return mstb;
25811bb76ff1Sjsg }
25821bb76ff1Sjsg 
25831bb76ff1Sjsg static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
25841bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb,
25851bb76ff1Sjsg 	const uint8_t *guid)
25861bb76ff1Sjsg {
25871bb76ff1Sjsg 	struct drm_dp_mst_branch *found_mstb;
25881bb76ff1Sjsg 	struct drm_dp_mst_port *port;
25891bb76ff1Sjsg 
25902fb2b219Sjsg 	if (!mstb)
25912fb2b219Sjsg 		return NULL;
25922fb2b219Sjsg 
25931bb76ff1Sjsg 	if (memcmp(mstb->guid, guid, 16) == 0)
25941bb76ff1Sjsg 		return mstb;
25951bb76ff1Sjsg 
25961bb76ff1Sjsg 
25971bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
25981bb76ff1Sjsg 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
25991bb76ff1Sjsg 
26001bb76ff1Sjsg 		if (found_mstb)
26011bb76ff1Sjsg 			return found_mstb;
26021bb76ff1Sjsg 	}
26031bb76ff1Sjsg 
26041bb76ff1Sjsg 	return NULL;
26051bb76ff1Sjsg }
26061bb76ff1Sjsg 
26071bb76ff1Sjsg static struct drm_dp_mst_branch *
26081bb76ff1Sjsg drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
26091bb76ff1Sjsg 				     const uint8_t *guid)
26101bb76ff1Sjsg {
26111bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
26121bb76ff1Sjsg 	int ret;
26131bb76ff1Sjsg 
26141bb76ff1Sjsg 	/* find the port by iterating down */
26151bb76ff1Sjsg 	mutex_lock(&mgr->lock);
26161bb76ff1Sjsg 
26171bb76ff1Sjsg 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
26181bb76ff1Sjsg 	if (mstb) {
26191bb76ff1Sjsg 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
26201bb76ff1Sjsg 		if (!ret)
26211bb76ff1Sjsg 			mstb = NULL;
26221bb76ff1Sjsg 	}
26231bb76ff1Sjsg 
26241bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
26251bb76ff1Sjsg 	return mstb;
26261bb76ff1Sjsg }
26271bb76ff1Sjsg 
26281bb76ff1Sjsg static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
26291bb76ff1Sjsg 					       struct drm_dp_mst_branch *mstb)
26301bb76ff1Sjsg {
26311bb76ff1Sjsg 	struct drm_dp_mst_port *port;
26321bb76ff1Sjsg 	int ret;
26331bb76ff1Sjsg 	bool changed = false;
26341bb76ff1Sjsg 
26351bb76ff1Sjsg 	if (!mstb->link_address_sent) {
26361bb76ff1Sjsg 		ret = drm_dp_send_link_address(mgr, mstb);
26371bb76ff1Sjsg 		if (ret == 1)
26381bb76ff1Sjsg 			changed = true;
26391bb76ff1Sjsg 		else if (ret < 0)
26401bb76ff1Sjsg 			return ret;
26411bb76ff1Sjsg 	}
26421bb76ff1Sjsg 
26431bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
26441bb76ff1Sjsg 		if (port->input || !port->ddps || !port->mstb)
26451bb76ff1Sjsg 			continue;
26461bb76ff1Sjsg 
26471bb76ff1Sjsg 		ret = drm_dp_check_and_send_link_address(mgr, port->mstb);
26481bb76ff1Sjsg 		if (ret == 1)
26491bb76ff1Sjsg 			changed = true;
26501bb76ff1Sjsg 		else if (ret < 0)
26511bb76ff1Sjsg 			return ret;
26521bb76ff1Sjsg 	}
26531bb76ff1Sjsg 
26541bb76ff1Sjsg 	return changed;
26551bb76ff1Sjsg }
26561bb76ff1Sjsg 
26571bb76ff1Sjsg static void drm_dp_mst_link_probe_work(struct work_struct *work)
26581bb76ff1Sjsg {
26591bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr =
26601bb76ff1Sjsg 		container_of(work, struct drm_dp_mst_topology_mgr, work);
26611bb76ff1Sjsg 	struct drm_device *dev = mgr->dev;
26621bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
26631bb76ff1Sjsg 	int ret;
26641bb76ff1Sjsg 	bool clear_payload_id_table;
26651bb76ff1Sjsg 
26661bb76ff1Sjsg 	mutex_lock(&mgr->probe_lock);
26671bb76ff1Sjsg 
26681bb76ff1Sjsg 	mutex_lock(&mgr->lock);
26691bb76ff1Sjsg 	clear_payload_id_table = !mgr->payload_id_table_cleared;
26701bb76ff1Sjsg 	mgr->payload_id_table_cleared = true;
26711bb76ff1Sjsg 
26721bb76ff1Sjsg 	mstb = mgr->mst_primary;
26731bb76ff1Sjsg 	if (mstb) {
26741bb76ff1Sjsg 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
26751bb76ff1Sjsg 		if (!ret)
26761bb76ff1Sjsg 			mstb = NULL;
26771bb76ff1Sjsg 	}
26781bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
26791bb76ff1Sjsg 	if (!mstb) {
26801bb76ff1Sjsg 		mutex_unlock(&mgr->probe_lock);
26811bb76ff1Sjsg 		return;
26821bb76ff1Sjsg 	}
26831bb76ff1Sjsg 
26841bb76ff1Sjsg 	/*
26851bb76ff1Sjsg 	 * Certain branch devices seem to incorrectly report an available_pbn
26861bb76ff1Sjsg 	 * of 0 on downstream sinks, even after clearing the
26871bb76ff1Sjsg 	 * DP_PAYLOAD_ALLOCATE_* registers in
26881bb76ff1Sjsg 	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
26891bb76ff1Sjsg 	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
26901bb76ff1Sjsg 	 * things work again.
26911bb76ff1Sjsg 	 */
26921bb76ff1Sjsg 	if (clear_payload_id_table) {
26931bb76ff1Sjsg 		drm_dbg_kms(dev, "Clearing payload ID table\n");
26941bb76ff1Sjsg 		drm_dp_send_clear_payload_id_table(mgr, mstb);
26951bb76ff1Sjsg 	}
26961bb76ff1Sjsg 
26971bb76ff1Sjsg 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
26981bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
26991bb76ff1Sjsg 
27001bb76ff1Sjsg 	mutex_unlock(&mgr->probe_lock);
27011bb76ff1Sjsg 	if (ret > 0)
27021bb76ff1Sjsg 		drm_kms_helper_hotplug_event(dev);
27031bb76ff1Sjsg }
27041bb76ff1Sjsg 
27051bb76ff1Sjsg static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
27061bb76ff1Sjsg 				 u8 *guid)
27071bb76ff1Sjsg {
27081bb76ff1Sjsg 	u64 salt;
27091bb76ff1Sjsg 
27101bb76ff1Sjsg 	if (memchr_inv(guid, 0, 16))
27111bb76ff1Sjsg 		return true;
27121bb76ff1Sjsg 
27131bb76ff1Sjsg 	salt = get_jiffies_64();
27141bb76ff1Sjsg 
27151bb76ff1Sjsg 	memcpy(&guid[0], &salt, sizeof(u64));
27161bb76ff1Sjsg 	memcpy(&guid[8], &salt, sizeof(u64));
27171bb76ff1Sjsg 
27181bb76ff1Sjsg 	return false;
27191bb76ff1Sjsg }
27201bb76ff1Sjsg 
27211bb76ff1Sjsg static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
27221bb76ff1Sjsg 			    u8 port_num, u32 offset, u8 num_bytes)
27231bb76ff1Sjsg {
27241bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body req;
27251bb76ff1Sjsg 
27261bb76ff1Sjsg 	req.req_type = DP_REMOTE_DPCD_READ;
27271bb76ff1Sjsg 	req.u.dpcd_read.port_number = port_num;
27281bb76ff1Sjsg 	req.u.dpcd_read.dpcd_address = offset;
27291bb76ff1Sjsg 	req.u.dpcd_read.num_bytes = num_bytes;
27301bb76ff1Sjsg 	drm_dp_encode_sideband_req(&req, msg);
27311bb76ff1Sjsg }
27321bb76ff1Sjsg 
27331bb76ff1Sjsg static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
27341bb76ff1Sjsg 				    bool up, u8 *msg, int len)
27351bb76ff1Sjsg {
27361bb76ff1Sjsg 	int ret;
27371bb76ff1Sjsg 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
27381bb76ff1Sjsg 	int tosend, total, offset;
27391bb76ff1Sjsg 	int retries = 0;
27401bb76ff1Sjsg 
27411bb76ff1Sjsg retry:
27421bb76ff1Sjsg 	total = len;
27431bb76ff1Sjsg 	offset = 0;
27441bb76ff1Sjsg 	do {
27451bb76ff1Sjsg 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
27461bb76ff1Sjsg 
27471bb76ff1Sjsg 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
27481bb76ff1Sjsg 					&msg[offset],
27491bb76ff1Sjsg 					tosend);
27501bb76ff1Sjsg 		if (ret != tosend) {
27511bb76ff1Sjsg 			if (ret == -EIO && retries < 5) {
27521bb76ff1Sjsg 				retries++;
27531bb76ff1Sjsg 				goto retry;
27541bb76ff1Sjsg 			}
27551bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
27561bb76ff1Sjsg 
27571bb76ff1Sjsg 			return -EIO;
27581bb76ff1Sjsg 		}
27591bb76ff1Sjsg 		offset += tosend;
27601bb76ff1Sjsg 		total -= tosend;
27611bb76ff1Sjsg 	} while (total > 0);
27621bb76ff1Sjsg 	return 0;
27631bb76ff1Sjsg }
27641bb76ff1Sjsg 
27651bb76ff1Sjsg static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
27661bb76ff1Sjsg 				  struct drm_dp_sideband_msg_tx *txmsg)
27671bb76ff1Sjsg {
27681bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = txmsg->dst;
27691bb76ff1Sjsg 	u8 req_type;
27701bb76ff1Sjsg 
27711bb76ff1Sjsg 	req_type = txmsg->msg[0] & 0x7f;
27721bb76ff1Sjsg 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
27731bb76ff1Sjsg 		req_type == DP_RESOURCE_STATUS_NOTIFY ||
27741bb76ff1Sjsg 		req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
27751bb76ff1Sjsg 		hdr->broadcast = 1;
27761bb76ff1Sjsg 	else
27771bb76ff1Sjsg 		hdr->broadcast = 0;
27781bb76ff1Sjsg 	hdr->path_msg = txmsg->path_msg;
27791bb76ff1Sjsg 	if (hdr->broadcast) {
27801bb76ff1Sjsg 		hdr->lct = 1;
27811bb76ff1Sjsg 		hdr->lcr = 6;
27821bb76ff1Sjsg 	} else {
27831bb76ff1Sjsg 		hdr->lct = mstb->lct;
27841bb76ff1Sjsg 		hdr->lcr = mstb->lct - 1;
27851bb76ff1Sjsg 	}
27861bb76ff1Sjsg 
27871bb76ff1Sjsg 	memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
27881bb76ff1Sjsg 
27891bb76ff1Sjsg 	return 0;
27901bb76ff1Sjsg }
27911bb76ff1Sjsg /*
27921bb76ff1Sjsg  * process a single block of the next message in the sideband queue
27931bb76ff1Sjsg  */
27941bb76ff1Sjsg static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
27951bb76ff1Sjsg 				   struct drm_dp_sideband_msg_tx *txmsg,
27961bb76ff1Sjsg 				   bool up)
27971bb76ff1Sjsg {
27981bb76ff1Sjsg 	u8 chunk[48];
27991bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr hdr;
28001bb76ff1Sjsg 	int len, space, idx, tosend;
28011bb76ff1Sjsg 	int ret;
28021bb76ff1Sjsg 
28031bb76ff1Sjsg 	if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
28041bb76ff1Sjsg 		return 0;
28051bb76ff1Sjsg 
28061bb76ff1Sjsg 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
28071bb76ff1Sjsg 
28081bb76ff1Sjsg 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
28091bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
28101bb76ff1Sjsg 
28111bb76ff1Sjsg 	/* make hdr from dst mst */
28121bb76ff1Sjsg 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
28131bb76ff1Sjsg 	if (ret < 0)
28141bb76ff1Sjsg 		return ret;
28151bb76ff1Sjsg 
28161bb76ff1Sjsg 	/* amount left to send in this message */
28171bb76ff1Sjsg 	len = txmsg->cur_len - txmsg->cur_offset;
28181bb76ff1Sjsg 
28191bb76ff1Sjsg 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
28201bb76ff1Sjsg 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
28211bb76ff1Sjsg 
28221bb76ff1Sjsg 	tosend = min(len, space);
28231bb76ff1Sjsg 	if (len == txmsg->cur_len)
28241bb76ff1Sjsg 		hdr.somt = 1;
28251bb76ff1Sjsg 	if (space >= len)
28261bb76ff1Sjsg 		hdr.eomt = 1;
28271bb76ff1Sjsg 
28281bb76ff1Sjsg 
28291bb76ff1Sjsg 	hdr.msg_len = tosend + 1;
28301bb76ff1Sjsg 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
28311bb76ff1Sjsg 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
28321bb76ff1Sjsg 	/* add crc at end */
28331bb76ff1Sjsg 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
28341bb76ff1Sjsg 	idx += tosend + 1;
28351bb76ff1Sjsg 
28361bb76ff1Sjsg 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
28371bb76ff1Sjsg 	if (ret) {
28381bb76ff1Sjsg 		if (drm_debug_enabled(DRM_UT_DP)) {
28391bb76ff1Sjsg 			struct drm_printer p = drm_debug_printer(DBG_PREFIX);
28401bb76ff1Sjsg 
28411bb76ff1Sjsg 			drm_printf(&p, "sideband msg failed to send\n");
28421bb76ff1Sjsg 			drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
28431bb76ff1Sjsg 		}
28441bb76ff1Sjsg 		return ret;
28451bb76ff1Sjsg 	}
28461bb76ff1Sjsg 
28471bb76ff1Sjsg 	txmsg->cur_offset += tosend;
28481bb76ff1Sjsg 	if (txmsg->cur_offset == txmsg->cur_len) {
28491bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
28501bb76ff1Sjsg 		return 1;
28511bb76ff1Sjsg 	}
28521bb76ff1Sjsg 	return 0;
28531bb76ff1Sjsg }
28541bb76ff1Sjsg 
28551bb76ff1Sjsg static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
28561bb76ff1Sjsg {
28571bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
28581bb76ff1Sjsg 	int ret;
28591bb76ff1Sjsg 
28601bb76ff1Sjsg 	WARN_ON(!mutex_is_locked(&mgr->qlock));
28611bb76ff1Sjsg 
28621bb76ff1Sjsg 	/* construct a chunk from the first msg in the tx_msg queue */
28631bb76ff1Sjsg 	if (list_empty(&mgr->tx_msg_downq))
28641bb76ff1Sjsg 		return;
28651bb76ff1Sjsg 
28661bb76ff1Sjsg 	txmsg = list_first_entry(&mgr->tx_msg_downq,
28671bb76ff1Sjsg 				 struct drm_dp_sideband_msg_tx, next);
28681bb76ff1Sjsg 	ret = process_single_tx_qlock(mgr, txmsg, false);
28691bb76ff1Sjsg 	if (ret < 0) {
28701bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
28711bb76ff1Sjsg 		list_del(&txmsg->next);
28721bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
28731bb76ff1Sjsg 		wake_up_all(&mgr->tx_waitq);
28741bb76ff1Sjsg 	}
28751bb76ff1Sjsg }
28761bb76ff1Sjsg 
28771bb76ff1Sjsg static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
28781bb76ff1Sjsg 				 struct drm_dp_sideband_msg_tx *txmsg)
28791bb76ff1Sjsg {
28801bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
28811bb76ff1Sjsg 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
28821bb76ff1Sjsg 
28831bb76ff1Sjsg 	if (drm_debug_enabled(DRM_UT_DP)) {
28841bb76ff1Sjsg 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
28851bb76ff1Sjsg 
28861bb76ff1Sjsg 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
28871bb76ff1Sjsg 	}
28881bb76ff1Sjsg 
28891bb76ff1Sjsg 	if (list_is_singular(&mgr->tx_msg_downq))
28901bb76ff1Sjsg 		process_single_down_tx_qlock(mgr);
28911bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
28921bb76ff1Sjsg }
28931bb76ff1Sjsg 
28941bb76ff1Sjsg static void
28951bb76ff1Sjsg drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
28961bb76ff1Sjsg 			 struct drm_dp_link_address_ack_reply *reply)
28971bb76ff1Sjsg {
28981bb76ff1Sjsg 	struct drm_dp_link_addr_reply_port *port_reply;
28991bb76ff1Sjsg 	int i;
29001bb76ff1Sjsg 
29011bb76ff1Sjsg 	for (i = 0; i < reply->nports; i++) {
29021bb76ff1Sjsg 		port_reply = &reply->ports[i];
29031bb76ff1Sjsg 		drm_dbg_kms(mgr->dev,
29041bb76ff1Sjsg 			    "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
29051bb76ff1Sjsg 			    i,
29061bb76ff1Sjsg 			    port_reply->input_port,
29071bb76ff1Sjsg 			    port_reply->peer_device_type,
29081bb76ff1Sjsg 			    port_reply->port_number,
29091bb76ff1Sjsg 			    port_reply->dpcd_revision,
29101bb76ff1Sjsg 			    port_reply->mcs,
29111bb76ff1Sjsg 			    port_reply->ddps,
29121bb76ff1Sjsg 			    port_reply->legacy_device_plug_status,
29131bb76ff1Sjsg 			    port_reply->num_sdp_streams,
29141bb76ff1Sjsg 			    port_reply->num_sdp_stream_sinks);
29151bb76ff1Sjsg 	}
29161bb76ff1Sjsg }
29171bb76ff1Sjsg 
29181bb76ff1Sjsg static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
29191bb76ff1Sjsg 				     struct drm_dp_mst_branch *mstb)
29201bb76ff1Sjsg {
29211bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
29221bb76ff1Sjsg 	struct drm_dp_link_address_ack_reply *reply;
29231bb76ff1Sjsg 	struct drm_dp_mst_port *port, *tmp;
29241bb76ff1Sjsg 	int i, ret, port_mask = 0;
29251bb76ff1Sjsg 	bool changed = false;
29261bb76ff1Sjsg 
29271bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
29281bb76ff1Sjsg 	if (!txmsg)
29291bb76ff1Sjsg 		return -ENOMEM;
29301bb76ff1Sjsg 
29311bb76ff1Sjsg 	txmsg->dst = mstb;
29321bb76ff1Sjsg 	build_link_address(txmsg);
29331bb76ff1Sjsg 
29341bb76ff1Sjsg 	mstb->link_address_sent = true;
29351bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
29361bb76ff1Sjsg 
29371bb76ff1Sjsg 	/* FIXME: Actually do some real error handling here */
29381bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
2939cb2f7175Sjsg 	if (ret < 0) {
29401bb76ff1Sjsg 		drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
29411bb76ff1Sjsg 		goto out;
29421bb76ff1Sjsg 	}
29431bb76ff1Sjsg 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
29441bb76ff1Sjsg 		drm_err(mgr->dev, "link address NAK received\n");
29451bb76ff1Sjsg 		ret = -EIO;
29461bb76ff1Sjsg 		goto out;
29471bb76ff1Sjsg 	}
29481bb76ff1Sjsg 
29491bb76ff1Sjsg 	reply = &txmsg->reply.u.link_addr;
29501bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
29511bb76ff1Sjsg 	drm_dp_dump_link_address(mgr, reply);
29521bb76ff1Sjsg 
29531bb76ff1Sjsg 	ret = drm_dp_check_mstb_guid(mstb, reply->guid);
29541bb76ff1Sjsg 	if (ret) {
29551bb76ff1Sjsg 		char buf[64];
29561bb76ff1Sjsg 
29571bb76ff1Sjsg 		drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
29581bb76ff1Sjsg 		drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
29591bb76ff1Sjsg 		goto out;
29601bb76ff1Sjsg 	}
29611bb76ff1Sjsg 
29621bb76ff1Sjsg 	for (i = 0; i < reply->nports; i++) {
29631bb76ff1Sjsg 		port_mask |= BIT(reply->ports[i].port_number);
29641bb76ff1Sjsg 		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
29651bb76ff1Sjsg 							  &reply->ports[i]);
29661bb76ff1Sjsg 		if (ret == 1)
29671bb76ff1Sjsg 			changed = true;
29681bb76ff1Sjsg 		else if (ret < 0)
29691bb76ff1Sjsg 			goto out;
29701bb76ff1Sjsg 	}
29711bb76ff1Sjsg 
29721bb76ff1Sjsg 	/* Prune any ports that are currently a part of mstb in our in-memory
29731bb76ff1Sjsg 	 * topology, but were not seen in this link address. Usually this
29741bb76ff1Sjsg 	 * means that they were removed while the topology was out of sync,
29751bb76ff1Sjsg 	 * e.g. during suspend/resume
29761bb76ff1Sjsg 	 */
29771bb76ff1Sjsg 	mutex_lock(&mgr->lock);
29781bb76ff1Sjsg 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
29791bb76ff1Sjsg 		if (port_mask & BIT(port->port_num))
29801bb76ff1Sjsg 			continue;
29811bb76ff1Sjsg 
29821bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
29831bb76ff1Sjsg 			    port->port_num);
29841bb76ff1Sjsg 		list_del(&port->next);
29851bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
29861bb76ff1Sjsg 		changed = true;
29871bb76ff1Sjsg 	}
29881bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
29891bb76ff1Sjsg 
29901bb76ff1Sjsg out:
2991cb2f7175Sjsg 	if (ret < 0)
29921bb76ff1Sjsg 		mstb->link_address_sent = false;
29931bb76ff1Sjsg 	kfree(txmsg);
29941bb76ff1Sjsg 	return ret < 0 ? ret : changed;
29951bb76ff1Sjsg }
29961bb76ff1Sjsg 
29971bb76ff1Sjsg static void
29981bb76ff1Sjsg drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
29991bb76ff1Sjsg 				   struct drm_dp_mst_branch *mstb)
30001bb76ff1Sjsg {
30011bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
30021bb76ff1Sjsg 	int ret;
30031bb76ff1Sjsg 
30041bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
30051bb76ff1Sjsg 	if (!txmsg)
30061bb76ff1Sjsg 		return;
30071bb76ff1Sjsg 
30081bb76ff1Sjsg 	txmsg->dst = mstb;
30091bb76ff1Sjsg 	build_clear_payload_id_table(txmsg);
30101bb76ff1Sjsg 
30111bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
30121bb76ff1Sjsg 
30131bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
30141bb76ff1Sjsg 	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
30151bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
30161bb76ff1Sjsg 
30171bb76ff1Sjsg 	kfree(txmsg);
30181bb76ff1Sjsg }
30191bb76ff1Sjsg 
30201bb76ff1Sjsg static int
30211bb76ff1Sjsg drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
30221bb76ff1Sjsg 				struct drm_dp_mst_branch *mstb,
30231bb76ff1Sjsg 				struct drm_dp_mst_port *port)
30241bb76ff1Sjsg {
30251bb76ff1Sjsg 	struct drm_dp_enum_path_resources_ack_reply *path_res;
30261bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
30271bb76ff1Sjsg 	int ret;
30281bb76ff1Sjsg 
30291bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
30301bb76ff1Sjsg 	if (!txmsg)
30311bb76ff1Sjsg 		return -ENOMEM;
30321bb76ff1Sjsg 
30331bb76ff1Sjsg 	txmsg->dst = mstb;
30341bb76ff1Sjsg 	build_enum_path_resources(txmsg, port->port_num);
30351bb76ff1Sjsg 
30361bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
30371bb76ff1Sjsg 
30381bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
30391bb76ff1Sjsg 	if (ret > 0) {
30401bb76ff1Sjsg 		ret = 0;
30411bb76ff1Sjsg 		path_res = &txmsg->reply.u.path_resources;
30421bb76ff1Sjsg 
30431bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
30441bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
30451bb76ff1Sjsg 		} else {
30461bb76ff1Sjsg 			if (port->port_num != path_res->port_number)
30471bb76ff1Sjsg 				DRM_ERROR("got incorrect port in response\n");
30481bb76ff1Sjsg 
30491bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
30501bb76ff1Sjsg 				    path_res->port_number,
30511bb76ff1Sjsg 				    path_res->full_payload_bw_number,
30521bb76ff1Sjsg 				    path_res->avail_payload_bw_number);
30531bb76ff1Sjsg 
30541bb76ff1Sjsg 			/*
30551bb76ff1Sjsg 			 * If something changed, make sure we send a
30561bb76ff1Sjsg 			 * hotplug
30571bb76ff1Sjsg 			 */
30581bb76ff1Sjsg 			if (port->full_pbn != path_res->full_payload_bw_number ||
30591bb76ff1Sjsg 			    port->fec_capable != path_res->fec_capable)
30601bb76ff1Sjsg 				ret = 1;
30611bb76ff1Sjsg 
30621bb76ff1Sjsg 			port->full_pbn = path_res->full_payload_bw_number;
30631bb76ff1Sjsg 			port->fec_capable = path_res->fec_capable;
30641bb76ff1Sjsg 		}
30651bb76ff1Sjsg 	}
30661bb76ff1Sjsg 
30671bb76ff1Sjsg 	kfree(txmsg);
30681bb76ff1Sjsg 	return ret;
30691bb76ff1Sjsg }
30701bb76ff1Sjsg 
30711bb76ff1Sjsg static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
30721bb76ff1Sjsg {
30731bb76ff1Sjsg 	if (!mstb->port_parent)
30741bb76ff1Sjsg 		return NULL;
30751bb76ff1Sjsg 
30761bb76ff1Sjsg 	if (mstb->port_parent->mstb != mstb)
30771bb76ff1Sjsg 		return mstb->port_parent;
30781bb76ff1Sjsg 
30791bb76ff1Sjsg 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
30801bb76ff1Sjsg }
30811bb76ff1Sjsg 
30821bb76ff1Sjsg /*
30831bb76ff1Sjsg  * Searches upwards in the topology starting from mstb to try to find the
30841bb76ff1Sjsg  * closest available parent of mstb that's still connected to the rest of the
30851bb76ff1Sjsg  * topology. This can be used in order to perform operations like releasing
30861bb76ff1Sjsg  * payloads, where the branch device which owned the payload may no longer be
30871bb76ff1Sjsg  * around and thus would require that the payload on the last living relative
30881bb76ff1Sjsg  * be freed instead.
30891bb76ff1Sjsg  */
30901bb76ff1Sjsg static struct drm_dp_mst_branch *
30911bb76ff1Sjsg drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
30921bb76ff1Sjsg 					struct drm_dp_mst_branch *mstb,
30931bb76ff1Sjsg 					int *port_num)
30941bb76ff1Sjsg {
30951bb76ff1Sjsg 	struct drm_dp_mst_branch *rmstb = NULL;
30961bb76ff1Sjsg 	struct drm_dp_mst_port *found_port;
30971bb76ff1Sjsg 
30981bb76ff1Sjsg 	mutex_lock(&mgr->lock);
30991bb76ff1Sjsg 	if (!mgr->mst_primary)
31001bb76ff1Sjsg 		goto out;
31011bb76ff1Sjsg 
31021bb76ff1Sjsg 	do {
31031bb76ff1Sjsg 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
31041bb76ff1Sjsg 		if (!found_port)
31051bb76ff1Sjsg 			break;
31061bb76ff1Sjsg 
31071bb76ff1Sjsg 		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
31081bb76ff1Sjsg 			rmstb = found_port->parent;
31091bb76ff1Sjsg 			*port_num = found_port->port_num;
31101bb76ff1Sjsg 		} else {
31111bb76ff1Sjsg 			/* Search again, starting from this parent */
31121bb76ff1Sjsg 			mstb = found_port->parent;
31131bb76ff1Sjsg 		}
31141bb76ff1Sjsg 	} while (!rmstb);
31151bb76ff1Sjsg out:
31161bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
31171bb76ff1Sjsg 	return rmstb;
31181bb76ff1Sjsg }
31191bb76ff1Sjsg 
31201bb76ff1Sjsg static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
31211bb76ff1Sjsg 				   struct drm_dp_mst_port *port,
31221bb76ff1Sjsg 				   int id,
31231bb76ff1Sjsg 				   int pbn)
31241bb76ff1Sjsg {
31251bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
31261bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
31271bb76ff1Sjsg 	int ret, port_num;
31281bb76ff1Sjsg 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
31291bb76ff1Sjsg 	int i;
31301bb76ff1Sjsg 
31311bb76ff1Sjsg 	port_num = port->port_num;
31321bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
31331bb76ff1Sjsg 	if (!mstb) {
31341bb76ff1Sjsg 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
31351bb76ff1Sjsg 							       port->parent,
31361bb76ff1Sjsg 							       &port_num);
31371bb76ff1Sjsg 
31381bb76ff1Sjsg 		if (!mstb)
31391bb76ff1Sjsg 			return -EINVAL;
31401bb76ff1Sjsg 	}
31411bb76ff1Sjsg 
31421bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
31431bb76ff1Sjsg 	if (!txmsg) {
31441bb76ff1Sjsg 		ret = -ENOMEM;
31451bb76ff1Sjsg 		goto fail_put;
31461bb76ff1Sjsg 	}
31471bb76ff1Sjsg 
31481bb76ff1Sjsg 	for (i = 0; i < port->num_sdp_streams; i++)
31491bb76ff1Sjsg 		sinks[i] = i;
31501bb76ff1Sjsg 
31511bb76ff1Sjsg 	txmsg->dst = mstb;
31521bb76ff1Sjsg 	build_allocate_payload(txmsg, port_num,
31531bb76ff1Sjsg 			       id,
31541bb76ff1Sjsg 			       pbn, port->num_sdp_streams, sinks);
31551bb76ff1Sjsg 
31561bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
31571bb76ff1Sjsg 
31581bb76ff1Sjsg 	/*
31591bb76ff1Sjsg 	 * FIXME: there is a small chance that between getting the last
31601bb76ff1Sjsg 	 * connected mstb and sending the payload message, the last connected
31611bb76ff1Sjsg 	 * mstb could also be removed from the topology. In the future, this
31621bb76ff1Sjsg 	 * needs to be fixed by restarting the
31631bb76ff1Sjsg 	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
31641bb76ff1Sjsg 	 * timeout if the topology is still connected to the system.
31651bb76ff1Sjsg 	 */
31661bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
31671bb76ff1Sjsg 	if (ret > 0) {
31681bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
31691bb76ff1Sjsg 			ret = -EINVAL;
31701bb76ff1Sjsg 		else
31711bb76ff1Sjsg 			ret = 0;
31721bb76ff1Sjsg 	}
31731bb76ff1Sjsg 	kfree(txmsg);
31741bb76ff1Sjsg fail_put:
31751bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
31761bb76ff1Sjsg 	return ret;
31771bb76ff1Sjsg }
31781bb76ff1Sjsg 
31791bb76ff1Sjsg int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
31801bb76ff1Sjsg 				 struct drm_dp_mst_port *port, bool power_up)
31811bb76ff1Sjsg {
31821bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
31831bb76ff1Sjsg 	int ret;
31841bb76ff1Sjsg 
31851bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
31861bb76ff1Sjsg 	if (!port)
31871bb76ff1Sjsg 		return -EINVAL;
31881bb76ff1Sjsg 
31891bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
31901bb76ff1Sjsg 	if (!txmsg) {
31911bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
31921bb76ff1Sjsg 		return -ENOMEM;
31931bb76ff1Sjsg 	}
31941bb76ff1Sjsg 
31951bb76ff1Sjsg 	txmsg->dst = port->parent;
31961bb76ff1Sjsg 	build_power_updown_phy(txmsg, port->port_num, power_up);
31971bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
31981bb76ff1Sjsg 
31991bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
32001bb76ff1Sjsg 	if (ret > 0) {
32011bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
32021bb76ff1Sjsg 			ret = -EINVAL;
32031bb76ff1Sjsg 		else
32041bb76ff1Sjsg 			ret = 0;
32051bb76ff1Sjsg 	}
32061bb76ff1Sjsg 	kfree(txmsg);
32071bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
32081bb76ff1Sjsg 
32091bb76ff1Sjsg 	return ret;
32101bb76ff1Sjsg }
32111bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
32121bb76ff1Sjsg 
32131bb76ff1Sjsg int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
32141bb76ff1Sjsg 		struct drm_dp_mst_port *port,
32151bb76ff1Sjsg 		struct drm_dp_query_stream_enc_status_ack_reply *status)
32161bb76ff1Sjsg {
32171bb76ff1Sjsg 	struct drm_dp_mst_topology_state *state;
32181bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
32191bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
32201bb76ff1Sjsg 	u8 nonce[7];
32211bb76ff1Sjsg 	int ret;
32221bb76ff1Sjsg 
32231bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
32241bb76ff1Sjsg 	if (!txmsg)
32251bb76ff1Sjsg 		return -ENOMEM;
32261bb76ff1Sjsg 
32271bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
32281bb76ff1Sjsg 	if (!port) {
32291bb76ff1Sjsg 		ret = -EINVAL;
32301bb76ff1Sjsg 		goto out_get_port;
32311bb76ff1Sjsg 	}
32321bb76ff1Sjsg 
32331bb76ff1Sjsg 	get_random_bytes(nonce, sizeof(nonce));
32341bb76ff1Sjsg 
32351bb76ff1Sjsg 	drm_modeset_lock(&mgr->base.lock, NULL);
32361bb76ff1Sjsg 	state = to_drm_dp_mst_topology_state(mgr->base.state);
32371bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(state, port);
32381bb76ff1Sjsg 
32391bb76ff1Sjsg 	/*
32401bb76ff1Sjsg 	 * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message
32411bb76ff1Sjsg 	 *  transaction at the MST Branch device directly connected to the
32421bb76ff1Sjsg 	 *  Source"
32431bb76ff1Sjsg 	 */
32441bb76ff1Sjsg 	txmsg->dst = mgr->mst_primary;
32451bb76ff1Sjsg 
32461bb76ff1Sjsg 	build_query_stream_enc_status(txmsg, payload->vcpi, nonce);
32471bb76ff1Sjsg 
32481bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
32491bb76ff1Sjsg 
32501bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mgr->mst_primary, txmsg);
32511bb76ff1Sjsg 	if (ret < 0) {
32521bb76ff1Sjsg 		goto out;
32531bb76ff1Sjsg 	} else if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
32541bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "query encryption status nak received\n");
32551bb76ff1Sjsg 		ret = -ENXIO;
32561bb76ff1Sjsg 		goto out;
32571bb76ff1Sjsg 	}
32581bb76ff1Sjsg 
32591bb76ff1Sjsg 	ret = 0;
32601bb76ff1Sjsg 	memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status));
32611bb76ff1Sjsg 
32621bb76ff1Sjsg out:
32631bb76ff1Sjsg 	drm_modeset_unlock(&mgr->base.lock);
32641bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
32651bb76ff1Sjsg out_get_port:
32661bb76ff1Sjsg 	kfree(txmsg);
32671bb76ff1Sjsg 	return ret;
32681bb76ff1Sjsg }
32691bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
32701bb76ff1Sjsg 
32711bb76ff1Sjsg static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
32721bb76ff1Sjsg 				       struct drm_dp_mst_atomic_payload *payload)
32731bb76ff1Sjsg {
32741bb76ff1Sjsg 	return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
32751bb76ff1Sjsg 					 payload->time_slots);
32761bb76ff1Sjsg }
32771bb76ff1Sjsg 
32781bb76ff1Sjsg static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
32791bb76ff1Sjsg 				       struct drm_dp_mst_atomic_payload *payload)
32801bb76ff1Sjsg {
32811bb76ff1Sjsg 	int ret;
32821bb76ff1Sjsg 	struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
32831bb76ff1Sjsg 
32841bb76ff1Sjsg 	if (!port)
32851bb76ff1Sjsg 		return -EIO;
32861bb76ff1Sjsg 
32871bb76ff1Sjsg 	ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn);
32881bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
32891bb76ff1Sjsg 	return ret;
32901bb76ff1Sjsg }
32911bb76ff1Sjsg 
32921bb76ff1Sjsg static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
32931bb76ff1Sjsg 					struct drm_dp_mst_topology_state *mst_state,
32941bb76ff1Sjsg 					struct drm_dp_mst_atomic_payload *payload)
32951bb76ff1Sjsg {
32961bb76ff1Sjsg 	drm_dbg_kms(mgr->dev, "\n");
32971bb76ff1Sjsg 
32981bb76ff1Sjsg 	/* it's okay for these to fail */
32991bb76ff1Sjsg 	drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
33001bb76ff1Sjsg 	drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
33011bb76ff1Sjsg 
33021bb76ff1Sjsg 	return 0;
33031bb76ff1Sjsg }
33041bb76ff1Sjsg 
33051bb76ff1Sjsg /**
33061bb76ff1Sjsg  * drm_dp_add_payload_part1() - Execute payload update part 1
33071bb76ff1Sjsg  * @mgr: Manager to use.
33081bb76ff1Sjsg  * @mst_state: The MST atomic state
33091bb76ff1Sjsg  * @payload: The payload to write
33101bb76ff1Sjsg  *
33111bb76ff1Sjsg  * Determines the starting time slot for the given payload, and programs the VCPI for this payload
33121bb76ff1Sjsg  * into hardware. After calling this, the driver should generate ACT and payload packets.
33131bb76ff1Sjsg  *
33141bb76ff1Sjsg  * Returns: 0 on success, error code on failure. In the event that this fails,
33151bb76ff1Sjsg  * @payload.vc_start_slot will also be set to -1.
33161bb76ff1Sjsg  */
33171bb76ff1Sjsg int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
33181bb76ff1Sjsg 			     struct drm_dp_mst_topology_state *mst_state,
33191bb76ff1Sjsg 			     struct drm_dp_mst_atomic_payload *payload)
33201bb76ff1Sjsg {
33211bb76ff1Sjsg 	struct drm_dp_mst_port *port;
33221bb76ff1Sjsg 	int ret;
33231bb76ff1Sjsg 
33241bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
332594994154Sjsg 	if (!port) {
332694994154Sjsg 		drm_dbg_kms(mgr->dev,
332794994154Sjsg 			    "VCPI %d for port %p not in topology, not creating a payload\n",
332894994154Sjsg 			    payload->vcpi, payload->port);
332994994154Sjsg 		payload->vc_start_slot = -1;
33301bb76ff1Sjsg 		return 0;
333194994154Sjsg 	}
33321bb76ff1Sjsg 
33331bb76ff1Sjsg 	if (mgr->payload_count == 0)
33341bb76ff1Sjsg 		mgr->next_start_slot = mst_state->start_slot;
33351bb76ff1Sjsg 
33361bb76ff1Sjsg 	payload->vc_start_slot = mgr->next_start_slot;
33371bb76ff1Sjsg 
33381bb76ff1Sjsg 	ret = drm_dp_create_payload_step1(mgr, payload);
33391bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
33401bb76ff1Sjsg 	if (ret < 0) {
33411bb76ff1Sjsg 		drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
33421bb76ff1Sjsg 			 payload->port, ret);
33431bb76ff1Sjsg 		payload->vc_start_slot = -1;
33441bb76ff1Sjsg 		return ret;
33451bb76ff1Sjsg 	}
33461bb76ff1Sjsg 
33471bb76ff1Sjsg 	mgr->payload_count++;
33481bb76ff1Sjsg 	mgr->next_start_slot += payload->time_slots;
33491bb76ff1Sjsg 
33501bb76ff1Sjsg 	return 0;
33511bb76ff1Sjsg }
33521bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_add_payload_part1);
33531bb76ff1Sjsg 
33541bb76ff1Sjsg /**
33551bb76ff1Sjsg  * drm_dp_remove_payload() - Remove an MST payload
33561bb76ff1Sjsg  * @mgr: Manager to use.
33571bb76ff1Sjsg  * @mst_state: The MST atomic state
3358d0d157cdSjsg  * @old_payload: The payload with its old state
3359d0d157cdSjsg  * @new_payload: The payload to write
33601bb76ff1Sjsg  *
33611bb76ff1Sjsg  * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates
33621bb76ff1Sjsg  * the starting time slots of all other payloads which would have been shifted towards the start of
33631bb76ff1Sjsg  * the VC table as a result. After calling this, the driver should generate ACT and payload packets.
33641bb76ff1Sjsg  */
33651bb76ff1Sjsg void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
33661bb76ff1Sjsg 			   struct drm_dp_mst_topology_state *mst_state,
3367d0d157cdSjsg 			   const struct drm_dp_mst_atomic_payload *old_payload,
3368d0d157cdSjsg 			   struct drm_dp_mst_atomic_payload *new_payload)
33691bb76ff1Sjsg {
33701bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos;
33711bb76ff1Sjsg 	bool send_remove = false;
33721bb76ff1Sjsg 
33731bb76ff1Sjsg 	/* We failed to make the payload, so nothing to do */
3374d0d157cdSjsg 	if (new_payload->vc_start_slot == -1)
33751bb76ff1Sjsg 		return;
33761bb76ff1Sjsg 
33771bb76ff1Sjsg 	mutex_lock(&mgr->lock);
3378d0d157cdSjsg 	send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary);
33791bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
33801bb76ff1Sjsg 
33811bb76ff1Sjsg 	if (send_remove)
3382d0d157cdSjsg 		drm_dp_destroy_payload_step1(mgr, mst_state, new_payload);
33831bb76ff1Sjsg 	else
33841bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
3385d0d157cdSjsg 			    new_payload->vcpi);
33861bb76ff1Sjsg 
33871bb76ff1Sjsg 	list_for_each_entry(pos, &mst_state->payloads, next) {
3388d0d157cdSjsg 		if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
3389d0d157cdSjsg 			pos->vc_start_slot -= old_payload->time_slots;
33901bb76ff1Sjsg 	}
3391d0d157cdSjsg 	new_payload->vc_start_slot = -1;
33921bb76ff1Sjsg 
33931bb76ff1Sjsg 	mgr->payload_count--;
3394d0d157cdSjsg 	mgr->next_start_slot -= old_payload->time_slots;
3395f6f081b8Sjsg 
3396d0d157cdSjsg 	if (new_payload->delete)
3397d0d157cdSjsg 		drm_dp_mst_put_port_malloc(new_payload->port);
33981bb76ff1Sjsg }
33991bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_remove_payload);
34001bb76ff1Sjsg 
34011bb76ff1Sjsg /**
34021bb76ff1Sjsg  * drm_dp_add_payload_part2() - Execute payload update part 2
34031bb76ff1Sjsg  * @mgr: Manager to use.
34041bb76ff1Sjsg  * @state: The global atomic state
34051bb76ff1Sjsg  * @payload: The payload to update
34061bb76ff1Sjsg  *
34071bb76ff1Sjsg  * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this
34081bb76ff1Sjsg  * function will send the sideband messages to finish allocating this payload.
34091bb76ff1Sjsg  *
34101bb76ff1Sjsg  * Returns: 0 on success, negative error code on failure.
34111bb76ff1Sjsg  */
34121bb76ff1Sjsg int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
34131bb76ff1Sjsg 			     struct drm_atomic_state *state,
34141bb76ff1Sjsg 			     struct drm_dp_mst_atomic_payload *payload)
34151bb76ff1Sjsg {
34161bb76ff1Sjsg 	int ret = 0;
34171bb76ff1Sjsg 
34181bb76ff1Sjsg 	/* Skip failed payloads */
34191bb76ff1Sjsg 	if (payload->vc_start_slot == -1) {
34208c294423Sjsg 		drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
34211bb76ff1Sjsg 			    payload->port->connector->name);
34221bb76ff1Sjsg 		return -EIO;
34231bb76ff1Sjsg 	}
34241bb76ff1Sjsg 
34251bb76ff1Sjsg 	ret = drm_dp_create_payload_step2(mgr, payload);
34261bb76ff1Sjsg 	if (ret < 0) {
34271bb76ff1Sjsg 		if (!payload->delete)
34281bb76ff1Sjsg 			drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
34291bb76ff1Sjsg 				payload->port, ret);
34301bb76ff1Sjsg 		else
34311bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n",
34321bb76ff1Sjsg 				    payload->port, ret);
34331bb76ff1Sjsg 	}
34341bb76ff1Sjsg 
34351bb76ff1Sjsg 	return ret;
34361bb76ff1Sjsg }
34371bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_add_payload_part2);
34381bb76ff1Sjsg 
34391bb76ff1Sjsg static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
34401bb76ff1Sjsg 				 struct drm_dp_mst_port *port,
34411bb76ff1Sjsg 				 int offset, int size, u8 *bytes)
34421bb76ff1Sjsg {
34431bb76ff1Sjsg 	int ret = 0;
34441bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
34451bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
34461bb76ff1Sjsg 
34471bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
34481bb76ff1Sjsg 	if (!mstb)
34491bb76ff1Sjsg 		return -EINVAL;
34501bb76ff1Sjsg 
34511bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
34521bb76ff1Sjsg 	if (!txmsg) {
34531bb76ff1Sjsg 		ret = -ENOMEM;
34541bb76ff1Sjsg 		goto fail_put;
34551bb76ff1Sjsg 	}
34561bb76ff1Sjsg 
34571bb76ff1Sjsg 	build_dpcd_read(txmsg, port->port_num, offset, size);
34581bb76ff1Sjsg 	txmsg->dst = port->parent;
34591bb76ff1Sjsg 
34601bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
34611bb76ff1Sjsg 
34621bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
34631bb76ff1Sjsg 	if (ret < 0)
34641bb76ff1Sjsg 		goto fail_free;
34651bb76ff1Sjsg 
34661bb76ff1Sjsg 	if (txmsg->reply.reply_type == 1) {
34671bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
34681bb76ff1Sjsg 			    mstb, port->port_num, offset, size);
34691bb76ff1Sjsg 		ret = -EIO;
34701bb76ff1Sjsg 		goto fail_free;
34711bb76ff1Sjsg 	}
34721bb76ff1Sjsg 
34731bb76ff1Sjsg 	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
34741bb76ff1Sjsg 		ret = -EPROTO;
34751bb76ff1Sjsg 		goto fail_free;
34761bb76ff1Sjsg 	}
34771bb76ff1Sjsg 
34781bb76ff1Sjsg 	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
34791bb76ff1Sjsg 		    size);
34801bb76ff1Sjsg 	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
34811bb76ff1Sjsg 
34821bb76ff1Sjsg fail_free:
34831bb76ff1Sjsg 	kfree(txmsg);
34841bb76ff1Sjsg fail_put:
34851bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
34861bb76ff1Sjsg 
34871bb76ff1Sjsg 	return ret;
34881bb76ff1Sjsg }
34891bb76ff1Sjsg 
34901bb76ff1Sjsg static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
34911bb76ff1Sjsg 				  struct drm_dp_mst_port *port,
34921bb76ff1Sjsg 				  int offset, int size, u8 *bytes)
34931bb76ff1Sjsg {
34941bb76ff1Sjsg 	int ret;
34951bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
34961bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
34971bb76ff1Sjsg 
34981bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
34991bb76ff1Sjsg 	if (!mstb)
35001bb76ff1Sjsg 		return -EINVAL;
35011bb76ff1Sjsg 
35021bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
35031bb76ff1Sjsg 	if (!txmsg) {
35041bb76ff1Sjsg 		ret = -ENOMEM;
35051bb76ff1Sjsg 		goto fail_put;
35061bb76ff1Sjsg 	}
35071bb76ff1Sjsg 
35081bb76ff1Sjsg 	build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
35091bb76ff1Sjsg 	txmsg->dst = mstb;
35101bb76ff1Sjsg 
35111bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
35121bb76ff1Sjsg 
35131bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
35141bb76ff1Sjsg 	if (ret > 0) {
35151bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
35161bb76ff1Sjsg 			ret = -EIO;
35171bb76ff1Sjsg 		else
35181bb76ff1Sjsg 			ret = size;
35191bb76ff1Sjsg 	}
35201bb76ff1Sjsg 
35211bb76ff1Sjsg 	kfree(txmsg);
35221bb76ff1Sjsg fail_put:
35231bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
35241bb76ff1Sjsg 	return ret;
35251bb76ff1Sjsg }
35261bb76ff1Sjsg 
35271bb76ff1Sjsg static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
35281bb76ff1Sjsg {
35291bb76ff1Sjsg 	struct drm_dp_sideband_msg_reply_body reply;
35301bb76ff1Sjsg 
35311bb76ff1Sjsg 	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
35321bb76ff1Sjsg 	reply.req_type = req_type;
35331bb76ff1Sjsg 	drm_dp_encode_sideband_reply(&reply, msg);
35341bb76ff1Sjsg 	return 0;
35351bb76ff1Sjsg }
35361bb76ff1Sjsg 
35371bb76ff1Sjsg static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
35381bb76ff1Sjsg 				    struct drm_dp_mst_branch *mstb,
35391bb76ff1Sjsg 				    int req_type, bool broadcast)
35401bb76ff1Sjsg {
35411bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
35421bb76ff1Sjsg 
35431bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
35441bb76ff1Sjsg 	if (!txmsg)
35451bb76ff1Sjsg 		return -ENOMEM;
35461bb76ff1Sjsg 
35471bb76ff1Sjsg 	txmsg->dst = mstb;
35481bb76ff1Sjsg 	drm_dp_encode_up_ack_reply(txmsg, req_type);
35491bb76ff1Sjsg 
35501bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
35511bb76ff1Sjsg 	/* construct a chunk from the first msg in the tx_msg queue */
35521bb76ff1Sjsg 	process_single_tx_qlock(mgr, txmsg, true);
35531bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
35541bb76ff1Sjsg 
35551bb76ff1Sjsg 	kfree(txmsg);
35561bb76ff1Sjsg 	return 0;
35571bb76ff1Sjsg }
35581bb76ff1Sjsg 
35591bb76ff1Sjsg /**
35601bb76ff1Sjsg  * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
35611bb76ff1Sjsg  * @mgr: The &drm_dp_mst_topology_mgr to use
35621bb76ff1Sjsg  * @link_rate: link rate in 10kbits/s units
35631bb76ff1Sjsg  * @link_lane_count: lane count
35641bb76ff1Sjsg  *
35651bb76ff1Sjsg  * Calculate the total bandwidth of a MultiStream Transport link. The returned
35661bb76ff1Sjsg  * value is in units of PBNs/(timeslots/1 MTP). This value can be used to
35671bb76ff1Sjsg  * convert the number of PBNs required for a given stream to the number of
35681bb76ff1Sjsg  * timeslots this stream requires in each MTP.
35691bb76ff1Sjsg  */
35701bb76ff1Sjsg int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
35711bb76ff1Sjsg 			     int link_rate, int link_lane_count)
35721bb76ff1Sjsg {
35731bb76ff1Sjsg 	if (link_rate == 0 || link_lane_count == 0)
35741bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
35751bb76ff1Sjsg 			    link_rate, link_lane_count);
35761bb76ff1Sjsg 
35771bb76ff1Sjsg 	/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
35781bb76ff1Sjsg 	return link_rate * link_lane_count / 54000;
35791bb76ff1Sjsg }
35801bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_get_vc_payload_bw);
35811bb76ff1Sjsg 
35821bb76ff1Sjsg /**
35831bb76ff1Sjsg  * drm_dp_read_mst_cap() - check whether or not a sink supports MST
35841bb76ff1Sjsg  * @aux: The DP AUX channel to use
35851bb76ff1Sjsg  * @dpcd: A cached copy of the DPCD capabilities for this sink
35861bb76ff1Sjsg  *
35871bb76ff1Sjsg  * Returns: %True if the sink supports MST, %false otherwise
35881bb76ff1Sjsg  */
35891bb76ff1Sjsg bool drm_dp_read_mst_cap(struct drm_dp_aux *aux,
35901bb76ff1Sjsg 			 const u8 dpcd[DP_RECEIVER_CAP_SIZE])
35911bb76ff1Sjsg {
35921bb76ff1Sjsg 	u8 mstm_cap;
35931bb76ff1Sjsg 
35941bb76ff1Sjsg 	if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
35951bb76ff1Sjsg 		return false;
35961bb76ff1Sjsg 
35971bb76ff1Sjsg 	if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1)
35981bb76ff1Sjsg 		return false;
35991bb76ff1Sjsg 
36001bb76ff1Sjsg 	return mstm_cap & DP_MST_CAP;
36011bb76ff1Sjsg }
36021bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_read_mst_cap);
36031bb76ff1Sjsg 
36041bb76ff1Sjsg /**
36051bb76ff1Sjsg  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
36061bb76ff1Sjsg  * @mgr: manager to set state for
36071bb76ff1Sjsg  * @mst_state: true to enable MST on this connector - false to disable.
36081bb76ff1Sjsg  *
36091bb76ff1Sjsg  * This is called by the driver when it detects an MST capable device plugged
36101bb76ff1Sjsg  * into a DP MST capable port, or when a DP MST capable device is unplugged.
36111bb76ff1Sjsg  */
36121bb76ff1Sjsg int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
36131bb76ff1Sjsg {
36141bb76ff1Sjsg 	int ret = 0;
36151bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = NULL;
36161bb76ff1Sjsg 
36171bb76ff1Sjsg 	mutex_lock(&mgr->lock);
36181bb76ff1Sjsg 	if (mst_state == mgr->mst_state)
36191bb76ff1Sjsg 		goto out_unlock;
36201bb76ff1Sjsg 
36211bb76ff1Sjsg 	mgr->mst_state = mst_state;
36221bb76ff1Sjsg 	/* set the device into MST mode */
36231bb76ff1Sjsg 	if (mst_state) {
36241bb76ff1Sjsg 		WARN_ON(mgr->mst_primary);
36251bb76ff1Sjsg 
36261bb76ff1Sjsg 		/* get dpcd info */
36271bb76ff1Sjsg 		ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
36281bb76ff1Sjsg 		if (ret < 0) {
36291bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
36301bb76ff1Sjsg 				    mgr->aux->name, ret);
36311bb76ff1Sjsg 			goto out_unlock;
36321bb76ff1Sjsg 		}
36331bb76ff1Sjsg 
36341bb76ff1Sjsg 		/* add initial branch device at LCT 1 */
36351bb76ff1Sjsg 		mstb = drm_dp_add_mst_branch_device(1, NULL);
36361bb76ff1Sjsg 		if (mstb == NULL) {
36371bb76ff1Sjsg 			ret = -ENOMEM;
36381bb76ff1Sjsg 			goto out_unlock;
36391bb76ff1Sjsg 		}
36401bb76ff1Sjsg 		mstb->mgr = mgr;
36411bb76ff1Sjsg 
36421bb76ff1Sjsg 		/* give this the main reference */
36431bb76ff1Sjsg 		mgr->mst_primary = mstb;
36441bb76ff1Sjsg 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
36451bb76ff1Sjsg 
36461bb76ff1Sjsg 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
36471bb76ff1Sjsg 					 DP_MST_EN |
36481bb76ff1Sjsg 					 DP_UP_REQ_EN |
36491bb76ff1Sjsg 					 DP_UPSTREAM_IS_SRC);
36501bb76ff1Sjsg 		if (ret < 0)
36511bb76ff1Sjsg 			goto out_unlock;
36521bb76ff1Sjsg 
36531bb76ff1Sjsg 		/* Write reset payload */
36541bb76ff1Sjsg 		drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
36551bb76ff1Sjsg 
36561bb76ff1Sjsg 		queue_work(system_long_wq, &mgr->work);
36571bb76ff1Sjsg 
36581bb76ff1Sjsg 		ret = 0;
36591bb76ff1Sjsg 	} else {
36601bb76ff1Sjsg 		/* disable MST on the device */
36611bb76ff1Sjsg 		mstb = mgr->mst_primary;
36621bb76ff1Sjsg 		mgr->mst_primary = NULL;
36631bb76ff1Sjsg 		/* this can fail if the device is gone */
36641bb76ff1Sjsg 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
36651bb76ff1Sjsg 		ret = 0;
36661bb76ff1Sjsg 		mgr->payload_id_table_cleared = false;
36673b10c595Sjsg 
3668*d852286eSjsg 		mgr->reset_rx_state = true;
36691bb76ff1Sjsg 	}
36701bb76ff1Sjsg 
36711bb76ff1Sjsg out_unlock:
36721bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
36731bb76ff1Sjsg 	if (mstb)
36741bb76ff1Sjsg 		drm_dp_mst_topology_put_mstb(mstb);
36751bb76ff1Sjsg 	return ret;
36761bb76ff1Sjsg 
36771bb76ff1Sjsg }
36781bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
36791bb76ff1Sjsg 
36801bb76ff1Sjsg static void
36811bb76ff1Sjsg drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
36821bb76ff1Sjsg {
36831bb76ff1Sjsg 	struct drm_dp_mst_port *port;
36841bb76ff1Sjsg 
36851bb76ff1Sjsg 	/* The link address will need to be re-sent on resume */
36861bb76ff1Sjsg 	mstb->link_address_sent = false;
36871bb76ff1Sjsg 
36881bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next)
36891bb76ff1Sjsg 		if (port->mstb)
36901bb76ff1Sjsg 			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
36911bb76ff1Sjsg }
36921bb76ff1Sjsg 
36931bb76ff1Sjsg /**
36941bb76ff1Sjsg  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
36951bb76ff1Sjsg  * @mgr: manager to suspend
36961bb76ff1Sjsg  *
36971bb76ff1Sjsg  * This function tells the MST device that we can't handle UP messages
36981bb76ff1Sjsg  * anymore. This should stop it from sending any since we are suspended.
36991bb76ff1Sjsg  */
37001bb76ff1Sjsg void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
37011bb76ff1Sjsg {
37021bb76ff1Sjsg 	mutex_lock(&mgr->lock);
37031bb76ff1Sjsg 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
37041bb76ff1Sjsg 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
37051bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37061bb76ff1Sjsg 	flush_work(&mgr->up_req_work);
37071bb76ff1Sjsg 	flush_work(&mgr->work);
37081bb76ff1Sjsg 	flush_work(&mgr->delayed_destroy_work);
37091bb76ff1Sjsg 
37101bb76ff1Sjsg 	mutex_lock(&mgr->lock);
37111bb76ff1Sjsg 	if (mgr->mst_state && mgr->mst_primary)
37121bb76ff1Sjsg 		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
37131bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37141bb76ff1Sjsg }
37151bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
37161bb76ff1Sjsg 
37171bb76ff1Sjsg /**
37181bb76ff1Sjsg  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
37191bb76ff1Sjsg  * @mgr: manager to resume
37201bb76ff1Sjsg  * @sync: whether or not to perform topology reprobing synchronously
37211bb76ff1Sjsg  *
37221bb76ff1Sjsg  * This will fetch DPCD and see if the device is still there,
37231bb76ff1Sjsg  * if it is, it will rewrite the MSTM control bits, and return.
37241bb76ff1Sjsg  *
37251bb76ff1Sjsg  * If the device fails this returns -1, and the driver should do
37261bb76ff1Sjsg  * a full MST reprobe, in case we were undocked.
37271bb76ff1Sjsg  *
37281bb76ff1Sjsg  * During system resume (where it is assumed that the driver will be calling
37291bb76ff1Sjsg  * drm_atomic_helper_resume()) this function should be called beforehand with
37301bb76ff1Sjsg  * @sync set to true. In contexts like runtime resume where the driver is not
37311bb76ff1Sjsg  * expected to be calling drm_atomic_helper_resume(), this function should be
37321bb76ff1Sjsg  * called with @sync set to false in order to avoid deadlocking.
37331bb76ff1Sjsg  *
37341bb76ff1Sjsg  * Returns: -1 if the MST topology was removed while we were suspended, 0
37351bb76ff1Sjsg  * otherwise.
37361bb76ff1Sjsg  */
37371bb76ff1Sjsg int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
37381bb76ff1Sjsg 				   bool sync)
37391bb76ff1Sjsg {
37401bb76ff1Sjsg 	int ret;
37411bb76ff1Sjsg 	u8 guid[16];
37421bb76ff1Sjsg 
37431bb76ff1Sjsg 	mutex_lock(&mgr->lock);
37441bb76ff1Sjsg 	if (!mgr->mst_primary)
37451bb76ff1Sjsg 		goto out_fail;
37461bb76ff1Sjsg 
37471bb76ff1Sjsg 	if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
37481bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
37491bb76ff1Sjsg 		goto out_fail;
37501bb76ff1Sjsg 	}
37511bb76ff1Sjsg 
37521bb76ff1Sjsg 	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
37531bb76ff1Sjsg 				 DP_MST_EN |
37541bb76ff1Sjsg 				 DP_UP_REQ_EN |
37551bb76ff1Sjsg 				 DP_UPSTREAM_IS_SRC);
37561bb76ff1Sjsg 	if (ret < 0) {
37571bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
37581bb76ff1Sjsg 		goto out_fail;
37591bb76ff1Sjsg 	}
37601bb76ff1Sjsg 
37611bb76ff1Sjsg 	/* Some hubs forget their guids after they resume */
37621bb76ff1Sjsg 	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
37631bb76ff1Sjsg 	if (ret != 16) {
37641bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
37651bb76ff1Sjsg 		goto out_fail;
37661bb76ff1Sjsg 	}
37671bb76ff1Sjsg 
37681bb76ff1Sjsg 	ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
37691bb76ff1Sjsg 	if (ret) {
37701bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
37711bb76ff1Sjsg 		goto out_fail;
37721bb76ff1Sjsg 	}
37731bb76ff1Sjsg 
37741bb76ff1Sjsg 	/*
37751bb76ff1Sjsg 	 * For the final step of resuming the topology, we need to bring the
37761bb76ff1Sjsg 	 * state of our in-memory topology back into sync with reality. So,
37771bb76ff1Sjsg 	 * restart the probing process as if we're probing a new hub
37781bb76ff1Sjsg 	 */
37791bb76ff1Sjsg 	queue_work(system_long_wq, &mgr->work);
37801bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37811bb76ff1Sjsg 
37821bb76ff1Sjsg 	if (sync) {
37831bb76ff1Sjsg 		drm_dbg_kms(mgr->dev,
37841bb76ff1Sjsg 			    "Waiting for link probe work to finish re-syncing topology...\n");
37851bb76ff1Sjsg 		flush_work(&mgr->work);
37861bb76ff1Sjsg 	}
37871bb76ff1Sjsg 
37881bb76ff1Sjsg 	return 0;
37891bb76ff1Sjsg 
37901bb76ff1Sjsg out_fail:
37911bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
37921bb76ff1Sjsg 	return -1;
37931bb76ff1Sjsg }
37941bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
37951bb76ff1Sjsg 
3796*d852286eSjsg static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
3797*d852286eSjsg {
3798*d852286eSjsg 	memset(msg, 0, sizeof(*msg));
3799*d852286eSjsg }
3800*d852286eSjsg 
38011bb76ff1Sjsg static bool
38021bb76ff1Sjsg drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
38031bb76ff1Sjsg 		      struct drm_dp_mst_branch **mstb)
38041bb76ff1Sjsg {
38051bb76ff1Sjsg 	int len;
38061bb76ff1Sjsg 	u8 replyblock[32];
38071bb76ff1Sjsg 	int replylen, curreply;
38081bb76ff1Sjsg 	int ret;
38091bb76ff1Sjsg 	u8 hdrlen;
38101bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr hdr;
38111bb76ff1Sjsg 	struct drm_dp_sideband_msg_rx *msg =
38121bb76ff1Sjsg 		up ? &mgr->up_req_recv : &mgr->down_rep_recv;
38131bb76ff1Sjsg 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
38141bb76ff1Sjsg 			   DP_SIDEBAND_MSG_DOWN_REP_BASE;
38151bb76ff1Sjsg 
38161bb76ff1Sjsg 	if (!up)
38171bb76ff1Sjsg 		*mstb = NULL;
38181bb76ff1Sjsg 
38191bb76ff1Sjsg 	len = min(mgr->max_dpcd_transaction_bytes, 16);
38201bb76ff1Sjsg 	ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
38211bb76ff1Sjsg 	if (ret != len) {
38221bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
38231bb76ff1Sjsg 		return false;
38241bb76ff1Sjsg 	}
38251bb76ff1Sjsg 
38261bb76ff1Sjsg 	ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
38271bb76ff1Sjsg 	if (ret == false) {
38281bb76ff1Sjsg 		print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
38291bb76ff1Sjsg 			       1, replyblock, len, false);
38301bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
38311bb76ff1Sjsg 		return false;
38321bb76ff1Sjsg 	}
38331bb76ff1Sjsg 
38341bb76ff1Sjsg 	if (!up) {
38351bb76ff1Sjsg 		/* Caller is responsible for giving back this reference */
38361bb76ff1Sjsg 		*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
38371bb76ff1Sjsg 		if (!*mstb) {
38381bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
38391bb76ff1Sjsg 			return false;
38401bb76ff1Sjsg 		}
38411bb76ff1Sjsg 	}
38421bb76ff1Sjsg 
38431bb76ff1Sjsg 	if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
38441bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
38451bb76ff1Sjsg 		return false;
38461bb76ff1Sjsg 	}
38471bb76ff1Sjsg 
38481bb76ff1Sjsg 	replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
38491bb76ff1Sjsg 	ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
38501bb76ff1Sjsg 	if (!ret) {
38511bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
38521bb76ff1Sjsg 		return false;
38531bb76ff1Sjsg 	}
38541bb76ff1Sjsg 
38551bb76ff1Sjsg 	replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
38561bb76ff1Sjsg 	curreply = len;
38571bb76ff1Sjsg 	while (replylen > 0) {
38581bb76ff1Sjsg 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
38591bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
38601bb76ff1Sjsg 				    replyblock, len);
38611bb76ff1Sjsg 		if (ret != len) {
38621bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
38631bb76ff1Sjsg 				    len, ret);
38641bb76ff1Sjsg 			return false;
38651bb76ff1Sjsg 		}
38661bb76ff1Sjsg 
38671bb76ff1Sjsg 		ret = drm_dp_sideband_append_payload(msg, replyblock, len);
38681bb76ff1Sjsg 		if (!ret) {
38691bb76ff1Sjsg 			drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
38701bb76ff1Sjsg 			return false;
38711bb76ff1Sjsg 		}
38721bb76ff1Sjsg 
38731bb76ff1Sjsg 		curreply += len;
38741bb76ff1Sjsg 		replylen -= len;
38751bb76ff1Sjsg 	}
38761bb76ff1Sjsg 	return true;
38771bb76ff1Sjsg }
38781bb76ff1Sjsg 
38794d21b784Sjsg static int get_msg_request_type(u8 data)
38804d21b784Sjsg {
38814d21b784Sjsg 	return data & 0x7f;
38824d21b784Sjsg }
38834d21b784Sjsg 
38844d21b784Sjsg static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
38854d21b784Sjsg 				   const struct drm_dp_sideband_msg_tx *txmsg,
38864d21b784Sjsg 				   const struct drm_dp_sideband_msg_rx *rxmsg)
38874d21b784Sjsg {
38884d21b784Sjsg 	const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
38894d21b784Sjsg 	const struct drm_dp_mst_branch *mstb = txmsg->dst;
38904d21b784Sjsg 	int tx_req_type = get_msg_request_type(txmsg->msg[0]);
38914d21b784Sjsg 	int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
38924d21b784Sjsg 	char rad_str[64];
38934d21b784Sjsg 
38944d21b784Sjsg 	if (tx_req_type == rx_req_type)
38954d21b784Sjsg 		return true;
38964d21b784Sjsg 
38974d21b784Sjsg 	drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
38984d21b784Sjsg 	drm_dbg_kms(mgr->dev,
38994d21b784Sjsg 		    "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
39004d21b784Sjsg 		    mstb, hdr->seqno, mstb->lct, rad_str,
39014d21b784Sjsg 		    drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
39024d21b784Sjsg 		    drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
39034d21b784Sjsg 
39044d21b784Sjsg 	return false;
39054d21b784Sjsg }
39064d21b784Sjsg 
39071bb76ff1Sjsg static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
39081bb76ff1Sjsg {
39091bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg;
39101bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = NULL;
39111bb76ff1Sjsg 	struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
39121bb76ff1Sjsg 
39131bb76ff1Sjsg 	if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
3914dcf8af2aSjsg 		goto out_clear_reply;
39151bb76ff1Sjsg 
39161bb76ff1Sjsg 	/* Multi-packet message transmission, don't clear the reply */
39171bb76ff1Sjsg 	if (!msg->have_eomt)
39181bb76ff1Sjsg 		goto out;
39191bb76ff1Sjsg 
39201bb76ff1Sjsg 	/* find the message */
39211bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
39221bb76ff1Sjsg 	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
39231bb76ff1Sjsg 					 struct drm_dp_sideband_msg_tx, next);
39241bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
39251bb76ff1Sjsg 
39261bb76ff1Sjsg 	/* Were we actually expecting a response, and from this mstb? */
39271bb76ff1Sjsg 	if (!txmsg || txmsg->dst != mstb) {
39281bb76ff1Sjsg 		struct drm_dp_sideband_msg_hdr *hdr;
39291bb76ff1Sjsg 
39301bb76ff1Sjsg 		hdr = &msg->initial_hdr;
39311bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
39321bb76ff1Sjsg 			    mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
39331bb76ff1Sjsg 		goto out_clear_reply;
39341bb76ff1Sjsg 	}
39351bb76ff1Sjsg 
39364d21b784Sjsg 	if (!verify_rx_request_type(mgr, txmsg, msg))
39374d21b784Sjsg 		goto out_clear_reply;
39384d21b784Sjsg 
39391bb76ff1Sjsg 	drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
39401bb76ff1Sjsg 
39411bb76ff1Sjsg 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
39421bb76ff1Sjsg 		drm_dbg_kms(mgr->dev,
39431bb76ff1Sjsg 			    "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
39441bb76ff1Sjsg 			    txmsg->reply.req_type,
39451bb76ff1Sjsg 			    drm_dp_mst_req_type_str(txmsg->reply.req_type),
39461bb76ff1Sjsg 			    txmsg->reply.u.nak.reason,
39471bb76ff1Sjsg 			    drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
39481bb76ff1Sjsg 			    txmsg->reply.u.nak.nak_data);
39491bb76ff1Sjsg 	}
39501bb76ff1Sjsg 
39511bb76ff1Sjsg 	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
39521bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
39531bb76ff1Sjsg 
39541bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
39551bb76ff1Sjsg 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
39561bb76ff1Sjsg 	list_del(&txmsg->next);
39571bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
39581bb76ff1Sjsg 
39591bb76ff1Sjsg 	wake_up_all(&mgr->tx_waitq);
39601bb76ff1Sjsg 
39611bb76ff1Sjsg 	return 0;
39621bb76ff1Sjsg 
39631bb76ff1Sjsg out_clear_reply:
39641bb76ff1Sjsg 	memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
39651bb76ff1Sjsg out:
39661bb76ff1Sjsg 	if (mstb)
39671bb76ff1Sjsg 		drm_dp_mst_topology_put_mstb(mstb);
39681bb76ff1Sjsg 
39691bb76ff1Sjsg 	return 0;
39701bb76ff1Sjsg }
39711bb76ff1Sjsg 
39721bb76ff1Sjsg static inline bool
39731bb76ff1Sjsg drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
39741bb76ff1Sjsg 			  struct drm_dp_pending_up_req *up_req)
39751bb76ff1Sjsg {
39761bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb = NULL;
39771bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
39781bb76ff1Sjsg 	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
39791bb76ff1Sjsg 	bool hotplug = false, dowork = false;
39801bb76ff1Sjsg 
39811bb76ff1Sjsg 	if (hdr->broadcast) {
39821bb76ff1Sjsg 		const u8 *guid = NULL;
39831bb76ff1Sjsg 
39841bb76ff1Sjsg 		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
39851bb76ff1Sjsg 			guid = msg->u.conn_stat.guid;
39861bb76ff1Sjsg 		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
39871bb76ff1Sjsg 			guid = msg->u.resource_stat.guid;
39881bb76ff1Sjsg 
39891bb76ff1Sjsg 		if (guid)
39901bb76ff1Sjsg 			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
39911bb76ff1Sjsg 	} else {
39921bb76ff1Sjsg 		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
39931bb76ff1Sjsg 	}
39941bb76ff1Sjsg 
39951bb76ff1Sjsg 	if (!mstb) {
39961bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
39971bb76ff1Sjsg 		return false;
39981bb76ff1Sjsg 	}
39991bb76ff1Sjsg 
40001bb76ff1Sjsg 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
40011bb76ff1Sjsg 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
40021bb76ff1Sjsg 		dowork = drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
40031bb76ff1Sjsg 		hotplug = true;
40041bb76ff1Sjsg 	}
40051bb76ff1Sjsg 
40061bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
40071bb76ff1Sjsg 
40081bb76ff1Sjsg 	if (dowork)
40091bb76ff1Sjsg 		queue_work(system_long_wq, &mgr->work);
40101bb76ff1Sjsg 	return hotplug;
40111bb76ff1Sjsg }
40121bb76ff1Sjsg 
40131bb76ff1Sjsg static void drm_dp_mst_up_req_work(struct work_struct *work)
40141bb76ff1Sjsg {
40151bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr =
40161bb76ff1Sjsg 		container_of(work, struct drm_dp_mst_topology_mgr,
40171bb76ff1Sjsg 			     up_req_work);
40181bb76ff1Sjsg 	struct drm_dp_pending_up_req *up_req;
40191bb76ff1Sjsg 	bool send_hotplug = false;
40201bb76ff1Sjsg 
40211bb76ff1Sjsg 	mutex_lock(&mgr->probe_lock);
40221bb76ff1Sjsg 	while (true) {
40231bb76ff1Sjsg 		mutex_lock(&mgr->up_req_lock);
40241bb76ff1Sjsg 		up_req = list_first_entry_or_null(&mgr->up_req_list,
40251bb76ff1Sjsg 						  struct drm_dp_pending_up_req,
40261bb76ff1Sjsg 						  next);
40271bb76ff1Sjsg 		if (up_req)
40281bb76ff1Sjsg 			list_del(&up_req->next);
40291bb76ff1Sjsg 		mutex_unlock(&mgr->up_req_lock);
40301bb76ff1Sjsg 
40311bb76ff1Sjsg 		if (!up_req)
40321bb76ff1Sjsg 			break;
40331bb76ff1Sjsg 
40341bb76ff1Sjsg 		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
40351bb76ff1Sjsg 		kfree(up_req);
40361bb76ff1Sjsg 	}
40371bb76ff1Sjsg 	mutex_unlock(&mgr->probe_lock);
40381bb76ff1Sjsg 
40391bb76ff1Sjsg 	if (send_hotplug)
40401bb76ff1Sjsg 		drm_kms_helper_hotplug_event(mgr->dev);
40411bb76ff1Sjsg }
40421bb76ff1Sjsg 
40431bb76ff1Sjsg static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
40441bb76ff1Sjsg {
40451bb76ff1Sjsg 	struct drm_dp_pending_up_req *up_req;
40461bb76ff1Sjsg 
40471bb76ff1Sjsg 	if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
40481bb76ff1Sjsg 		goto out;
40491bb76ff1Sjsg 
40501bb76ff1Sjsg 	if (!mgr->up_req_recv.have_eomt)
40511bb76ff1Sjsg 		return 0;
40521bb76ff1Sjsg 
40531bb76ff1Sjsg 	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
40541bb76ff1Sjsg 	if (!up_req)
40551bb76ff1Sjsg 		return -ENOMEM;
40561bb76ff1Sjsg 
40571bb76ff1Sjsg 	INIT_LIST_HEAD(&up_req->next);
40581bb76ff1Sjsg 
40591bb76ff1Sjsg 	drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
40601bb76ff1Sjsg 
40611bb76ff1Sjsg 	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
40621bb76ff1Sjsg 	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
40631bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
40641bb76ff1Sjsg 			    up_req->msg.req_type);
40651bb76ff1Sjsg 		kfree(up_req);
40661bb76ff1Sjsg 		goto out;
40671bb76ff1Sjsg 	}
40681bb76ff1Sjsg 
40691bb76ff1Sjsg 	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
40701bb76ff1Sjsg 				 false);
40711bb76ff1Sjsg 
40721bb76ff1Sjsg 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
40731bb76ff1Sjsg 		const struct drm_dp_connection_status_notify *conn_stat =
40741bb76ff1Sjsg 			&up_req->msg.u.conn_stat;
40750c230191Sjsg 		bool handle_csn;
40761bb76ff1Sjsg 
40771bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
40781bb76ff1Sjsg 			    conn_stat->port_number,
40791bb76ff1Sjsg 			    conn_stat->legacy_device_plug_status,
40801bb76ff1Sjsg 			    conn_stat->displayport_device_plug_status,
40811bb76ff1Sjsg 			    conn_stat->message_capability_status,
40821bb76ff1Sjsg 			    conn_stat->input_port,
40831bb76ff1Sjsg 			    conn_stat->peer_device_type);
40840c230191Sjsg 
40850c230191Sjsg 		mutex_lock(&mgr->probe_lock);
40860c230191Sjsg 		handle_csn = mgr->mst_primary->link_address_sent;
40870c230191Sjsg 		mutex_unlock(&mgr->probe_lock);
40880c230191Sjsg 
40890c230191Sjsg 		if (!handle_csn) {
40900c230191Sjsg 			drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
40910c230191Sjsg 			kfree(up_req);
40920c230191Sjsg 			goto out;
40930c230191Sjsg 		}
40941bb76ff1Sjsg 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
40951bb76ff1Sjsg 		const struct drm_dp_resource_status_notify *res_stat =
40961bb76ff1Sjsg 			&up_req->msg.u.resource_stat;
40971bb76ff1Sjsg 
40981bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
40991bb76ff1Sjsg 			    res_stat->port_number,
41001bb76ff1Sjsg 			    res_stat->available_pbn);
41011bb76ff1Sjsg 	}
41021bb76ff1Sjsg 
41031bb76ff1Sjsg 	up_req->hdr = mgr->up_req_recv.initial_hdr;
41041bb76ff1Sjsg 	mutex_lock(&mgr->up_req_lock);
41051bb76ff1Sjsg 	list_add_tail(&up_req->next, &mgr->up_req_list);
41061bb76ff1Sjsg 	mutex_unlock(&mgr->up_req_lock);
41071bb76ff1Sjsg 	queue_work(system_long_wq, &mgr->up_req_work);
41081bb76ff1Sjsg 
41091bb76ff1Sjsg out:
41101bb76ff1Sjsg 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
41111bb76ff1Sjsg 	return 0;
41121bb76ff1Sjsg }
41131bb76ff1Sjsg 
4114*d852286eSjsg static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
4115*d852286eSjsg {
4116*d852286eSjsg 	mutex_lock(&mgr->lock);
4117*d852286eSjsg 	if (mgr->reset_rx_state) {
4118*d852286eSjsg 		mgr->reset_rx_state = false;
4119*d852286eSjsg 		reset_msg_rx_state(&mgr->down_rep_recv);
4120*d852286eSjsg 		reset_msg_rx_state(&mgr->up_req_recv);
4121*d852286eSjsg 	}
4122*d852286eSjsg 	mutex_unlock(&mgr->lock);
4123*d852286eSjsg }
4124*d852286eSjsg 
41251bb76ff1Sjsg /**
412683f201fbSjsg  * drm_dp_mst_hpd_irq_handle_event() - MST hotplug IRQ handle MST event
41271bb76ff1Sjsg  * @mgr: manager to notify irq for.
41281bb76ff1Sjsg  * @esi: 4 bytes from SINK_COUNT_ESI
412983f201fbSjsg  * @ack: 4 bytes used to ack events starting from SINK_COUNT_ESI
41301bb76ff1Sjsg  * @handled: whether the hpd interrupt was consumed or not
41311bb76ff1Sjsg  *
413283f201fbSjsg  * This should be called from the driver when it detects a HPD IRQ,
41331bb76ff1Sjsg  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
413483f201fbSjsg  * topology manager will process the sideband messages received
413583f201fbSjsg  * as indicated in the DEVICE_SERVICE_IRQ_VECTOR_ESI0 and set the
413683f201fbSjsg  * corresponding flags that Driver has to ack the DP receiver later.
413783f201fbSjsg  *
413883f201fbSjsg  * Note that driver shall also call
413983f201fbSjsg  * drm_dp_mst_hpd_irq_send_new_request() if the 'handled' is set
414083f201fbSjsg  * after calling this function, to try to kick off a new request in
414183f201fbSjsg  * the queue if the previous message transaction is completed.
414283f201fbSjsg  *
414383f201fbSjsg  * See also:
414483f201fbSjsg  * drm_dp_mst_hpd_irq_send_new_request()
41451bb76ff1Sjsg  */
414683f201fbSjsg int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u8 *esi,
414783f201fbSjsg 				    u8 *ack, bool *handled)
41481bb76ff1Sjsg {
41491bb76ff1Sjsg 	int ret = 0;
41501bb76ff1Sjsg 	int sc;
41511bb76ff1Sjsg 	*handled = false;
41521bb76ff1Sjsg 	sc = DP_GET_SINK_COUNT(esi[0]);
41531bb76ff1Sjsg 
41541bb76ff1Sjsg 	if (sc != mgr->sink_count) {
41551bb76ff1Sjsg 		mgr->sink_count = sc;
41561bb76ff1Sjsg 		*handled = true;
41571bb76ff1Sjsg 	}
41581bb76ff1Sjsg 
4159*d852286eSjsg 	update_msg_rx_state(mgr);
4160*d852286eSjsg 
41611bb76ff1Sjsg 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
41621bb76ff1Sjsg 		ret = drm_dp_mst_handle_down_rep(mgr);
41631bb76ff1Sjsg 		*handled = true;
416483f201fbSjsg 		ack[1] |= DP_DOWN_REP_MSG_RDY;
41651bb76ff1Sjsg 	}
41661bb76ff1Sjsg 
41671bb76ff1Sjsg 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
41681bb76ff1Sjsg 		ret |= drm_dp_mst_handle_up_req(mgr);
41691bb76ff1Sjsg 		*handled = true;
417083f201fbSjsg 		ack[1] |= DP_UP_REQ_MSG_RDY;
41711bb76ff1Sjsg 	}
41721bb76ff1Sjsg 
41731bb76ff1Sjsg 	return ret;
41741bb76ff1Sjsg }
417583f201fbSjsg EXPORT_SYMBOL(drm_dp_mst_hpd_irq_handle_event);
41761bb76ff1Sjsg 
41771bb76ff1Sjsg /**
417883f201fbSjsg  * drm_dp_mst_hpd_irq_send_new_request() - MST hotplug IRQ kick off new request
417983f201fbSjsg  * @mgr: manager to notify irq for.
418083f201fbSjsg  *
418183f201fbSjsg  * This should be called from the driver when mst irq event is handled
418283f201fbSjsg  * and acked. Note that new down request should only be sent when
418383f201fbSjsg  * previous message transaction is completed. Source is not supposed to generate
418483f201fbSjsg  * interleaved message transactions.
418583f201fbSjsg  */
418683f201fbSjsg void drm_dp_mst_hpd_irq_send_new_request(struct drm_dp_mst_topology_mgr *mgr)
418783f201fbSjsg {
418883f201fbSjsg 	struct drm_dp_sideband_msg_tx *txmsg;
418983f201fbSjsg 	bool kick = true;
419083f201fbSjsg 
419183f201fbSjsg 	mutex_lock(&mgr->qlock);
419283f201fbSjsg 	txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
419383f201fbSjsg 					 struct drm_dp_sideband_msg_tx, next);
419483f201fbSjsg 	/* If last transaction is not completed yet*/
419583f201fbSjsg 	if (!txmsg ||
419683f201fbSjsg 	    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
419783f201fbSjsg 	    txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
419883f201fbSjsg 		kick = false;
419983f201fbSjsg 	mutex_unlock(&mgr->qlock);
420083f201fbSjsg 
420183f201fbSjsg 	if (kick)
420283f201fbSjsg 		drm_dp_mst_kick_tx(mgr);
420383f201fbSjsg }
420483f201fbSjsg EXPORT_SYMBOL(drm_dp_mst_hpd_irq_send_new_request);
420583f201fbSjsg /**
42061bb76ff1Sjsg  * drm_dp_mst_detect_port() - get connection status for an MST port
42071bb76ff1Sjsg  * @connector: DRM connector for this port
42081bb76ff1Sjsg  * @ctx: The acquisition context to use for grabbing locks
42091bb76ff1Sjsg  * @mgr: manager for this port
42101bb76ff1Sjsg  * @port: pointer to a port
42111bb76ff1Sjsg  *
42121bb76ff1Sjsg  * This returns the current connection state for a port.
42131bb76ff1Sjsg  */
42141bb76ff1Sjsg int
42151bb76ff1Sjsg drm_dp_mst_detect_port(struct drm_connector *connector,
42161bb76ff1Sjsg 		       struct drm_modeset_acquire_ctx *ctx,
42171bb76ff1Sjsg 		       struct drm_dp_mst_topology_mgr *mgr,
42181bb76ff1Sjsg 		       struct drm_dp_mst_port *port)
42191bb76ff1Sjsg {
42201bb76ff1Sjsg 	int ret;
42211bb76ff1Sjsg 
42221bb76ff1Sjsg 	/* we need to search for the port in the mgr in case it's gone */
42231bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
42241bb76ff1Sjsg 	if (!port)
42251bb76ff1Sjsg 		return connector_status_disconnected;
42261bb76ff1Sjsg 
42271bb76ff1Sjsg 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
42281bb76ff1Sjsg 	if (ret)
42291bb76ff1Sjsg 		goto out;
42301bb76ff1Sjsg 
42311bb76ff1Sjsg 	ret = connector_status_disconnected;
42321bb76ff1Sjsg 
42331bb76ff1Sjsg 	if (!port->ddps)
42341bb76ff1Sjsg 		goto out;
42351bb76ff1Sjsg 
42361bb76ff1Sjsg 	switch (port->pdt) {
42371bb76ff1Sjsg 	case DP_PEER_DEVICE_NONE:
42381bb76ff1Sjsg 		break;
42391bb76ff1Sjsg 	case DP_PEER_DEVICE_MST_BRANCHING:
42401bb76ff1Sjsg 		if (!port->mcs)
42411bb76ff1Sjsg 			ret = connector_status_connected;
42421bb76ff1Sjsg 		break;
42431bb76ff1Sjsg 
42441bb76ff1Sjsg 	case DP_PEER_DEVICE_SST_SINK:
42451bb76ff1Sjsg 		ret = connector_status_connected;
42461bb76ff1Sjsg 		/* for logical ports - cache the EDID */
42471bb76ff1Sjsg 		if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
4248f005ef32Sjsg 			port->cached_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
42491bb76ff1Sjsg 		break;
42501bb76ff1Sjsg 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
42511bb76ff1Sjsg 		if (port->ldps)
42521bb76ff1Sjsg 			ret = connector_status_connected;
42531bb76ff1Sjsg 		break;
42541bb76ff1Sjsg 	}
42551bb76ff1Sjsg out:
42561bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
42571bb76ff1Sjsg 	return ret;
42581bb76ff1Sjsg }
42591bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_detect_port);
42601bb76ff1Sjsg 
42611bb76ff1Sjsg /**
4262f005ef32Sjsg  * drm_dp_mst_edid_read() - get EDID for an MST port
42631bb76ff1Sjsg  * @connector: toplevel connector to get EDID for
42641bb76ff1Sjsg  * @mgr: manager for this port
42651bb76ff1Sjsg  * @port: unverified pointer to a port.
42661bb76ff1Sjsg  *
42671bb76ff1Sjsg  * This returns an EDID for the port connected to a connector,
42681bb76ff1Sjsg  * It validates the pointer still exists so the caller doesn't require a
42691bb76ff1Sjsg  * reference.
42701bb76ff1Sjsg  */
4271f005ef32Sjsg const struct drm_edid *drm_dp_mst_edid_read(struct drm_connector *connector,
4272f005ef32Sjsg 					    struct drm_dp_mst_topology_mgr *mgr,
4273f005ef32Sjsg 					    struct drm_dp_mst_port *port)
42741bb76ff1Sjsg {
4275f005ef32Sjsg 	const struct drm_edid *drm_edid;
42761bb76ff1Sjsg 
42771bb76ff1Sjsg 	/* we need to search for the port in the mgr in case it's gone */
42781bb76ff1Sjsg 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
42791bb76ff1Sjsg 	if (!port)
42801bb76ff1Sjsg 		return NULL;
42811bb76ff1Sjsg 
42821bb76ff1Sjsg 	if (port->cached_edid)
4283f005ef32Sjsg 		drm_edid = drm_edid_dup(port->cached_edid);
4284f005ef32Sjsg 	else
4285f005ef32Sjsg 		drm_edid = drm_edid_read_ddc(connector, &port->aux.ddc);
4286f005ef32Sjsg 
42871bb76ff1Sjsg 	drm_dp_mst_topology_put_port(port);
4288f005ef32Sjsg 
4289f005ef32Sjsg 	return drm_edid;
4290f005ef32Sjsg }
4291f005ef32Sjsg EXPORT_SYMBOL(drm_dp_mst_edid_read);
4292f005ef32Sjsg 
4293f005ef32Sjsg /**
4294f005ef32Sjsg  * drm_dp_mst_get_edid() - get EDID for an MST port
4295f005ef32Sjsg  * @connector: toplevel connector to get EDID for
4296f005ef32Sjsg  * @mgr: manager for this port
4297f005ef32Sjsg  * @port: unverified pointer to a port.
4298f005ef32Sjsg  *
4299f005ef32Sjsg  * This function is deprecated; please use drm_dp_mst_edid_read() instead.
4300f005ef32Sjsg  *
4301f005ef32Sjsg  * This returns an EDID for the port connected to a connector,
4302f005ef32Sjsg  * It validates the pointer still exists so the caller doesn't require a
4303f005ef32Sjsg  * reference.
4304f005ef32Sjsg  */
4305f005ef32Sjsg struct edid *drm_dp_mst_get_edid(struct drm_connector *connector,
4306f005ef32Sjsg 				 struct drm_dp_mst_topology_mgr *mgr,
4307f005ef32Sjsg 				 struct drm_dp_mst_port *port)
4308f005ef32Sjsg {
4309f005ef32Sjsg 	const struct drm_edid *drm_edid;
4310f005ef32Sjsg 	struct edid *edid;
4311f005ef32Sjsg 
4312f005ef32Sjsg 	drm_edid = drm_dp_mst_edid_read(connector, mgr, port);
4313f005ef32Sjsg 
4314f005ef32Sjsg 	edid = drm_edid_duplicate(drm_edid_raw(drm_edid));
4315f005ef32Sjsg 
4316f005ef32Sjsg 	drm_edid_free(drm_edid);
4317f005ef32Sjsg 
43181bb76ff1Sjsg 	return edid;
43191bb76ff1Sjsg }
43201bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_get_edid);
43211bb76ff1Sjsg 
43221bb76ff1Sjsg /**
43231bb76ff1Sjsg  * drm_dp_atomic_find_time_slots() - Find and add time slots to the state
43241bb76ff1Sjsg  * @state: global atomic state
43251bb76ff1Sjsg  * @mgr: MST topology manager for the port
43261bb76ff1Sjsg  * @port: port to find time slots for
43271bb76ff1Sjsg  * @pbn: bandwidth required for the mode in PBN
43281bb76ff1Sjsg  *
43291bb76ff1Sjsg  * Allocates time slots to @port, replacing any previous time slot allocations it may
43301bb76ff1Sjsg  * have had. Any atomic drivers which support MST must call this function in
43311bb76ff1Sjsg  * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to
43321bb76ff1Sjsg  * change the current time slot allocation for the new state, and ensure the MST
43331bb76ff1Sjsg  * atomic state is added whenever the state of payloads in the topology changes.
43341bb76ff1Sjsg  *
43351bb76ff1Sjsg  * Allocations set by this function are not checked against the bandwidth
43361bb76ff1Sjsg  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
43371bb76ff1Sjsg  *
43381bb76ff1Sjsg  * Additionally, it is OK to call this function multiple times on the same
43391bb76ff1Sjsg  * @port as needed. It is not OK however, to call this function and
43401bb76ff1Sjsg  * drm_dp_atomic_release_time_slots() in the same atomic check phase.
43411bb76ff1Sjsg  *
43421bb76ff1Sjsg  * See also:
43431bb76ff1Sjsg  * drm_dp_atomic_release_time_slots()
43441bb76ff1Sjsg  * drm_dp_mst_atomic_check()
43451bb76ff1Sjsg  *
43461bb76ff1Sjsg  * Returns:
43471bb76ff1Sjsg  * Total slots in the atomic state assigned for this port, or a negative error
43481bb76ff1Sjsg  * code if the port no longer exists
43491bb76ff1Sjsg  */
43501bb76ff1Sjsg int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
43511bb76ff1Sjsg 				  struct drm_dp_mst_topology_mgr *mgr,
43521bb76ff1Sjsg 				  struct drm_dp_mst_port *port, int pbn)
43531bb76ff1Sjsg {
43541bb76ff1Sjsg 	struct drm_dp_mst_topology_state *topology_state;
43551bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload = NULL;
43561bb76ff1Sjsg 	struct drm_connector_state *conn_state;
43571bb76ff1Sjsg 	int prev_slots = 0, prev_bw = 0, req_slots;
43581bb76ff1Sjsg 
43591bb76ff1Sjsg 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
43601bb76ff1Sjsg 	if (IS_ERR(topology_state))
43611bb76ff1Sjsg 		return PTR_ERR(topology_state);
43621bb76ff1Sjsg 
43631bb76ff1Sjsg 	conn_state = drm_atomic_get_new_connector_state(state, port->connector);
43641bb76ff1Sjsg 	topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc);
43651bb76ff1Sjsg 
43661bb76ff1Sjsg 	/* Find the current allocation for this port, if any */
43671bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(topology_state, port);
43681bb76ff1Sjsg 	if (payload) {
43691bb76ff1Sjsg 		prev_slots = payload->time_slots;
43701bb76ff1Sjsg 		prev_bw = payload->pbn;
43711bb76ff1Sjsg 
43721bb76ff1Sjsg 		/*
43731bb76ff1Sjsg 		 * This should never happen, unless the driver tries
43741bb76ff1Sjsg 		 * releasing and allocating the same timeslot allocation,
43751bb76ff1Sjsg 		 * which is an error
43761bb76ff1Sjsg 		 */
43771bb76ff1Sjsg 		if (drm_WARN_ON(mgr->dev, payload->delete)) {
43781bb76ff1Sjsg 			drm_err(mgr->dev,
43791bb76ff1Sjsg 				"cannot allocate and release time slots on [MST PORT:%p] in the same state\n",
43801bb76ff1Sjsg 				port);
43811bb76ff1Sjsg 			return -EINVAL;
43821bb76ff1Sjsg 		}
43831bb76ff1Sjsg 	}
43841bb76ff1Sjsg 
43851bb76ff1Sjsg 	req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div);
43861bb76ff1Sjsg 
43871bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n",
43881bb76ff1Sjsg 		       port->connector->base.id, port->connector->name,
43891bb76ff1Sjsg 		       port, prev_slots, req_slots);
43901bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
43911bb76ff1Sjsg 		       port->connector->base.id, port->connector->name,
43921bb76ff1Sjsg 		       port, prev_bw, pbn);
43931bb76ff1Sjsg 
43941bb76ff1Sjsg 	/* Add the new allocation to the state, note the VCPI isn't assigned until the end */
43951bb76ff1Sjsg 	if (!payload) {
43961bb76ff1Sjsg 		payload = kzalloc(sizeof(*payload), GFP_KERNEL);
43971bb76ff1Sjsg 		if (!payload)
43981bb76ff1Sjsg 			return -ENOMEM;
43991bb76ff1Sjsg 
44001bb76ff1Sjsg 		drm_dp_mst_get_port_malloc(port);
44011bb76ff1Sjsg 		payload->port = port;
44021bb76ff1Sjsg 		payload->vc_start_slot = -1;
44031bb76ff1Sjsg 		list_add(&payload->next, &topology_state->payloads);
44041bb76ff1Sjsg 	}
44051bb76ff1Sjsg 	payload->time_slots = req_slots;
44061bb76ff1Sjsg 	payload->pbn = pbn;
44071bb76ff1Sjsg 
44081bb76ff1Sjsg 	return req_slots;
44091bb76ff1Sjsg }
44101bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_atomic_find_time_slots);
44111bb76ff1Sjsg 
44121bb76ff1Sjsg /**
44131bb76ff1Sjsg  * drm_dp_atomic_release_time_slots() - Release allocated time slots
44141bb76ff1Sjsg  * @state: global atomic state
44151bb76ff1Sjsg  * @mgr: MST topology manager for the port
44161bb76ff1Sjsg  * @port: The port to release the time slots from
44171bb76ff1Sjsg  *
44181bb76ff1Sjsg  * Releases any time slots that have been allocated to a port in the atomic
44191bb76ff1Sjsg  * state. Any atomic drivers which support MST must call this function
44201bb76ff1Sjsg  * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback.
44211bb76ff1Sjsg  * This helper will check whether time slots would be released by the new state and
44221bb76ff1Sjsg  * respond accordingly, along with ensuring the MST state is always added to the
44231bb76ff1Sjsg  * atomic state whenever a new state would modify the state of payloads on the
44241bb76ff1Sjsg  * topology.
44251bb76ff1Sjsg  *
44261bb76ff1Sjsg  * It is OK to call this even if @port has been removed from the system.
44271bb76ff1Sjsg  * Additionally, it is OK to call this function multiple times on the same
44281bb76ff1Sjsg  * @port as needed. It is not OK however, to call this function and
44291bb76ff1Sjsg  * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check
44301bb76ff1Sjsg  * phase.
44311bb76ff1Sjsg  *
44321bb76ff1Sjsg  * See also:
44331bb76ff1Sjsg  * drm_dp_atomic_find_time_slots()
44341bb76ff1Sjsg  * drm_dp_mst_atomic_check()
44351bb76ff1Sjsg  *
44361bb76ff1Sjsg  * Returns:
44371bb76ff1Sjsg  * 0 on success, negative error code otherwise
44381bb76ff1Sjsg  */
44391bb76ff1Sjsg int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state,
44401bb76ff1Sjsg 				     struct drm_dp_mst_topology_mgr *mgr,
44411bb76ff1Sjsg 				     struct drm_dp_mst_port *port)
44421bb76ff1Sjsg {
44431bb76ff1Sjsg 	struct drm_dp_mst_topology_state *topology_state;
44441bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
44451bb76ff1Sjsg 	struct drm_connector_state *old_conn_state, *new_conn_state;
44461bb76ff1Sjsg 	bool update_payload = true;
44471bb76ff1Sjsg 
44481bb76ff1Sjsg 	old_conn_state = drm_atomic_get_old_connector_state(state, port->connector);
44491bb76ff1Sjsg 	if (!old_conn_state->crtc)
44501bb76ff1Sjsg 		return 0;
44511bb76ff1Sjsg 
44521bb76ff1Sjsg 	/* If the CRTC isn't disabled by this state, don't release it's payload */
44531bb76ff1Sjsg 	new_conn_state = drm_atomic_get_new_connector_state(state, port->connector);
44541bb76ff1Sjsg 	if (new_conn_state->crtc) {
44551bb76ff1Sjsg 		struct drm_crtc_state *crtc_state =
44561bb76ff1Sjsg 			drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
44571bb76ff1Sjsg 
44581bb76ff1Sjsg 		/* No modeset means no payload changes, so it's safe to not pull in the MST state */
44591bb76ff1Sjsg 		if (!crtc_state || !drm_atomic_crtc_needs_modeset(crtc_state))
44601bb76ff1Sjsg 			return 0;
44611bb76ff1Sjsg 
44621bb76ff1Sjsg 		if (!crtc_state->mode_changed && !crtc_state->connectors_changed)
44631bb76ff1Sjsg 			update_payload = false;
44641bb76ff1Sjsg 	}
44651bb76ff1Sjsg 
44661bb76ff1Sjsg 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
44671bb76ff1Sjsg 	if (IS_ERR(topology_state))
44681bb76ff1Sjsg 		return PTR_ERR(topology_state);
44691bb76ff1Sjsg 
44701bb76ff1Sjsg 	topology_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
44711bb76ff1Sjsg 	if (!update_payload)
44721bb76ff1Sjsg 		return 0;
44731bb76ff1Sjsg 
44741bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(topology_state, port);
44751bb76ff1Sjsg 	if (WARN_ON(!payload)) {
44761bb76ff1Sjsg 		drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n",
44771bb76ff1Sjsg 			port, &topology_state->base);
44781bb76ff1Sjsg 		return -EINVAL;
44791bb76ff1Sjsg 	}
44801bb76ff1Sjsg 
44811bb76ff1Sjsg 	if (new_conn_state->crtc)
44821bb76ff1Sjsg 		return 0;
44831bb76ff1Sjsg 
44841bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, payload->time_slots);
44851bb76ff1Sjsg 	if (!payload->delete) {
44861bb76ff1Sjsg 		payload->pbn = 0;
44871bb76ff1Sjsg 		payload->delete = true;
44881bb76ff1Sjsg 		topology_state->payload_mask &= ~BIT(payload->vcpi - 1);
44891bb76ff1Sjsg 	}
44901bb76ff1Sjsg 
44911bb76ff1Sjsg 	return 0;
44921bb76ff1Sjsg }
44931bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_atomic_release_time_slots);
44941bb76ff1Sjsg 
44951bb76ff1Sjsg /**
44961bb76ff1Sjsg  * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers
44971bb76ff1Sjsg  * @state: global atomic state
44981bb76ff1Sjsg  *
44991bb76ff1Sjsg  * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs
45001bb76ff1Sjsg  * currently assigned to an MST topology. Drivers must call this hook from their
45011bb76ff1Sjsg  * &drm_mode_config_helper_funcs.atomic_commit_setup hook.
45021bb76ff1Sjsg  *
45031bb76ff1Sjsg  * Returns:
45041bb76ff1Sjsg  * 0 if all CRTC commits were retrieved successfully, negative error code otherwise
45051bb76ff1Sjsg  */
45061bb76ff1Sjsg int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state)
45071bb76ff1Sjsg {
45081bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr;
45091bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
45101bb76ff1Sjsg 	struct drm_crtc *crtc;
45111bb76ff1Sjsg 	struct drm_crtc_state *crtc_state;
45121bb76ff1Sjsg 	int i, j, commit_idx, num_commit_deps;
45131bb76ff1Sjsg 
45141bb76ff1Sjsg 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
45151bb76ff1Sjsg 		if (!mst_state->pending_crtc_mask)
45161bb76ff1Sjsg 			continue;
45171bb76ff1Sjsg 
45181bb76ff1Sjsg 		num_commit_deps = hweight32(mst_state->pending_crtc_mask);
45191bb76ff1Sjsg 		mst_state->commit_deps = kmalloc_array(num_commit_deps,
45201bb76ff1Sjsg 						       sizeof(*mst_state->commit_deps), GFP_KERNEL);
45211bb76ff1Sjsg 		if (!mst_state->commit_deps)
45221bb76ff1Sjsg 			return -ENOMEM;
45231bb76ff1Sjsg 		mst_state->num_commit_deps = num_commit_deps;
45241bb76ff1Sjsg 
45251bb76ff1Sjsg 		commit_idx = 0;
45261bb76ff1Sjsg 		for_each_new_crtc_in_state(state, crtc, crtc_state, j) {
45271bb76ff1Sjsg 			if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) {
45281bb76ff1Sjsg 				mst_state->commit_deps[commit_idx++] =
45291bb76ff1Sjsg 					drm_crtc_commit_get(crtc_state->commit);
45301bb76ff1Sjsg 			}
45311bb76ff1Sjsg 		}
45321bb76ff1Sjsg 	}
45331bb76ff1Sjsg 
45341bb76ff1Sjsg 	return 0;
45351bb76ff1Sjsg }
45361bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit);
45371bb76ff1Sjsg 
45381bb76ff1Sjsg /**
45391bb76ff1Sjsg  * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies,
45401bb76ff1Sjsg  * prepare new MST state for commit
45411bb76ff1Sjsg  * @state: global atomic state
45421bb76ff1Sjsg  *
45431bb76ff1Sjsg  * Goes through any MST topologies in this atomic state, and waits for any pending commits which
45441bb76ff1Sjsg  * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before
45451bb76ff1Sjsg  * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing
45461bb76ff1Sjsg  * with eachother by forcing them to be executed sequentially in situations where the only resources
45471bb76ff1Sjsg  * the modeset objects in these commits share are an MST topology.
45481bb76ff1Sjsg  *
45491bb76ff1Sjsg  * This function also prepares the new MST state for commit by performing some state preparation
45501bb76ff1Sjsg  * which can't be done until this point, such as reading back the final VC start slots (which are
45511bb76ff1Sjsg  * determined at commit-time) from the previous state.
45521bb76ff1Sjsg  *
45531bb76ff1Sjsg  * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(),
45541bb76ff1Sjsg  * or whatever their equivalent of that is.
45551bb76ff1Sjsg  */
45561bb76ff1Sjsg void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
45571bb76ff1Sjsg {
45581bb76ff1Sjsg 	struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state;
45591bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr;
45601bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *old_payload, *new_payload;
45611bb76ff1Sjsg 	int i, j, ret;
45621bb76ff1Sjsg 
45631bb76ff1Sjsg 	for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) {
45641bb76ff1Sjsg 		for (j = 0; j < old_mst_state->num_commit_deps; j++) {
45651bb76ff1Sjsg 			ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]);
45661bb76ff1Sjsg 			if (ret < 0)
45671bb76ff1Sjsg 				drm_err(state->dev, "Failed to wait for %s: %d\n",
45681bb76ff1Sjsg 					old_mst_state->commit_deps[j]->crtc->name, ret);
45691bb76ff1Sjsg 		}
45701bb76ff1Sjsg 
45711bb76ff1Sjsg 		/* Now that previous state is committed, it's safe to copy over the start slot
45721bb76ff1Sjsg 		 * assignments
45731bb76ff1Sjsg 		 */
45741bb76ff1Sjsg 		list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
45751bb76ff1Sjsg 			if (old_payload->delete)
45761bb76ff1Sjsg 				continue;
45771bb76ff1Sjsg 
45781bb76ff1Sjsg 			new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
45791bb76ff1Sjsg 								       old_payload->port);
45801bb76ff1Sjsg 			new_payload->vc_start_slot = old_payload->vc_start_slot;
45811bb76ff1Sjsg 		}
45821bb76ff1Sjsg 	}
45831bb76ff1Sjsg }
45841bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies);
45851bb76ff1Sjsg 
45861bb76ff1Sjsg /**
45871bb76ff1Sjsg  * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating
45881bb76ff1Sjsg  * in SST mode
45891bb76ff1Sjsg  * @new_conn_state: The new connector state of the &drm_connector
45901bb76ff1Sjsg  * @mgr: The MST topology manager for the &drm_connector
45911bb76ff1Sjsg  *
45921bb76ff1Sjsg  * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to
45931bb76ff1Sjsg  * serialize non-blocking commits happening on the real DP connector of an MST topology switching
45941bb76ff1Sjsg  * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's
45951bb76ff1Sjsg  * MST topology will never share the same &drm_encoder.
45961bb76ff1Sjsg  *
45971bb76ff1Sjsg  * This function takes care of this serialization issue, by checking a root MST connector's atomic
45981bb76ff1Sjsg  * state to determine if it is about to have a modeset - and then pulling in the MST topology state
45991bb76ff1Sjsg  * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask.
46001bb76ff1Sjsg  *
46011bb76ff1Sjsg  * Drivers implementing MST must call this function from the
46021bb76ff1Sjsg  * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of
46031bb76ff1Sjsg  * driving MST sinks.
46041bb76ff1Sjsg  *
46051bb76ff1Sjsg  * Returns:
46061bb76ff1Sjsg  * 0 on success, negative error code otherwise
46071bb76ff1Sjsg  */
46081bb76ff1Sjsg int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state,
46091bb76ff1Sjsg 				      struct drm_dp_mst_topology_mgr *mgr)
46101bb76ff1Sjsg {
46111bb76ff1Sjsg 	struct drm_atomic_state *state = new_conn_state->state;
46121bb76ff1Sjsg 	struct drm_connector_state *old_conn_state =
46131bb76ff1Sjsg 		drm_atomic_get_old_connector_state(state, new_conn_state->connector);
46141bb76ff1Sjsg 	struct drm_crtc_state *crtc_state;
46151bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state = NULL;
46161bb76ff1Sjsg 
46171bb76ff1Sjsg 	if (new_conn_state->crtc) {
46181bb76ff1Sjsg 		crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
46191bb76ff1Sjsg 		if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
46201bb76ff1Sjsg 			mst_state = drm_atomic_get_mst_topology_state(state, mgr);
46211bb76ff1Sjsg 			if (IS_ERR(mst_state))
46221bb76ff1Sjsg 				return PTR_ERR(mst_state);
46231bb76ff1Sjsg 
46241bb76ff1Sjsg 			mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc);
46251bb76ff1Sjsg 		}
46261bb76ff1Sjsg 	}
46271bb76ff1Sjsg 
46281bb76ff1Sjsg 	if (old_conn_state->crtc) {
46291bb76ff1Sjsg 		crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc);
46301bb76ff1Sjsg 		if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) {
46311bb76ff1Sjsg 			if (!mst_state) {
46321bb76ff1Sjsg 				mst_state = drm_atomic_get_mst_topology_state(state, mgr);
46331bb76ff1Sjsg 				if (IS_ERR(mst_state))
46341bb76ff1Sjsg 					return PTR_ERR(mst_state);
46351bb76ff1Sjsg 			}
46361bb76ff1Sjsg 
46371bb76ff1Sjsg 			mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc);
46381bb76ff1Sjsg 		}
46391bb76ff1Sjsg 	}
46401bb76ff1Sjsg 
46411bb76ff1Sjsg 	return 0;
46421bb76ff1Sjsg }
46431bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check);
46441bb76ff1Sjsg 
46451bb76ff1Sjsg /**
46461bb76ff1Sjsg  * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
46471bb76ff1Sjsg  * @mst_state: mst_state to update
46481bb76ff1Sjsg  * @link_encoding_cap: the ecoding format on the link
46491bb76ff1Sjsg  */
46501bb76ff1Sjsg void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
46511bb76ff1Sjsg {
46521bb76ff1Sjsg 	if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
46531bb76ff1Sjsg 		mst_state->total_avail_slots = 64;
46541bb76ff1Sjsg 		mst_state->start_slot = 0;
46551bb76ff1Sjsg 	} else {
46561bb76ff1Sjsg 		mst_state->total_avail_slots = 63;
46571bb76ff1Sjsg 		mst_state->start_slot = 1;
46581bb76ff1Sjsg 	}
46591bb76ff1Sjsg 
46601bb76ff1Sjsg 	DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
46611bb76ff1Sjsg 		      (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
46621bb76ff1Sjsg 		      mst_state);
46631bb76ff1Sjsg }
46641bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_update_slots);
46651bb76ff1Sjsg 
46661bb76ff1Sjsg static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
46671bb76ff1Sjsg 				     int id, u8 start_slot, u8 num_slots)
46681bb76ff1Sjsg {
46691bb76ff1Sjsg 	u8 payload_alloc[3], status;
46701bb76ff1Sjsg 	int ret;
46711bb76ff1Sjsg 	int retries = 0;
46721bb76ff1Sjsg 
46731bb76ff1Sjsg 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
46741bb76ff1Sjsg 			   DP_PAYLOAD_TABLE_UPDATED);
46751bb76ff1Sjsg 
46761bb76ff1Sjsg 	payload_alloc[0] = id;
46771bb76ff1Sjsg 	payload_alloc[1] = start_slot;
46781bb76ff1Sjsg 	payload_alloc[2] = num_slots;
46791bb76ff1Sjsg 
46801bb76ff1Sjsg 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
46811bb76ff1Sjsg 	if (ret != 3) {
46821bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
46831bb76ff1Sjsg 		goto fail;
46841bb76ff1Sjsg 	}
46851bb76ff1Sjsg 
46861bb76ff1Sjsg retry:
46871bb76ff1Sjsg 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
46881bb76ff1Sjsg 	if (ret < 0) {
46891bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
46901bb76ff1Sjsg 		goto fail;
46911bb76ff1Sjsg 	}
46921bb76ff1Sjsg 
46931bb76ff1Sjsg 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
46941bb76ff1Sjsg 		retries++;
46951bb76ff1Sjsg 		if (retries < 20) {
46961bb76ff1Sjsg 			usleep_range(10000, 20000);
46971bb76ff1Sjsg 			goto retry;
46981bb76ff1Sjsg 		}
46991bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
47001bb76ff1Sjsg 			    status);
47011bb76ff1Sjsg 		ret = -EINVAL;
47021bb76ff1Sjsg 		goto fail;
47031bb76ff1Sjsg 	}
47041bb76ff1Sjsg 	ret = 0;
47051bb76ff1Sjsg fail:
47061bb76ff1Sjsg 	return ret;
47071bb76ff1Sjsg }
47081bb76ff1Sjsg 
47091bb76ff1Sjsg static int do_get_act_status(struct drm_dp_aux *aux)
47101bb76ff1Sjsg {
47111bb76ff1Sjsg 	int ret;
47121bb76ff1Sjsg 	u8 status;
47131bb76ff1Sjsg 
47141bb76ff1Sjsg 	ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
47151bb76ff1Sjsg 	if (ret < 0)
47161bb76ff1Sjsg 		return ret;
47171bb76ff1Sjsg 
47181bb76ff1Sjsg 	return status;
47191bb76ff1Sjsg }
47201bb76ff1Sjsg 
47211bb76ff1Sjsg /**
47221bb76ff1Sjsg  * drm_dp_check_act_status() - Polls for ACT handled status.
47231bb76ff1Sjsg  * @mgr: manager to use
47241bb76ff1Sjsg  *
47251bb76ff1Sjsg  * Tries waiting for the MST hub to finish updating it's payload table by
47261bb76ff1Sjsg  * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
47271bb76ff1Sjsg  * take that long).
47281bb76ff1Sjsg  *
47291bb76ff1Sjsg  * Returns:
47301bb76ff1Sjsg  * 0 if the ACT was handled in time, negative error code on failure.
47311bb76ff1Sjsg  */
47321bb76ff1Sjsg int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
47331bb76ff1Sjsg {
47341bb76ff1Sjsg 	/*
47351bb76ff1Sjsg 	 * There doesn't seem to be any recommended retry count or timeout in
47361bb76ff1Sjsg 	 * the MST specification. Since some hubs have been observed to take
47371bb76ff1Sjsg 	 * over 1 second to update their payload allocations under certain
47381bb76ff1Sjsg 	 * conditions, we use a rather large timeout value.
47391bb76ff1Sjsg 	 */
47401bb76ff1Sjsg 	const int timeout_ms = 3000;
47411bb76ff1Sjsg 	int ret, status;
47421bb76ff1Sjsg 
47431bb76ff1Sjsg 	ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
47441bb76ff1Sjsg 				 status & DP_PAYLOAD_ACT_HANDLED || status < 0,
47451bb76ff1Sjsg 				 200, timeout_ms * USEC_PER_MSEC);
47461bb76ff1Sjsg 	if (ret < 0 && status >= 0) {
47471bb76ff1Sjsg 		drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
47481bb76ff1Sjsg 			timeout_ms, status);
47491bb76ff1Sjsg 		return -EINVAL;
47501bb76ff1Sjsg 	} else if (status < 0) {
47511bb76ff1Sjsg 		/*
47521bb76ff1Sjsg 		 * Failure here isn't unexpected - the hub may have
47531bb76ff1Sjsg 		 * just been unplugged
47541bb76ff1Sjsg 		 */
47551bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
47561bb76ff1Sjsg 		return status;
47571bb76ff1Sjsg 	}
47581bb76ff1Sjsg 
47591bb76ff1Sjsg 	return 0;
47601bb76ff1Sjsg }
47611bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_check_act_status);
47621bb76ff1Sjsg 
47631bb76ff1Sjsg /**
47641bb76ff1Sjsg  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
4765065458a7Sjsg  * @clock: dot clock
4766065458a7Sjsg  * @bpp: bpp as .4 binary fixed point
47671bb76ff1Sjsg  *
47681bb76ff1Sjsg  * This uses the formula in the spec to calculate the PBN value for a mode.
47691bb76ff1Sjsg  */
4770065458a7Sjsg int drm_dp_calc_pbn_mode(int clock, int bpp)
47711bb76ff1Sjsg {
47721bb76ff1Sjsg 	/*
47731bb76ff1Sjsg 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
47741bb76ff1Sjsg 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
47751bb76ff1Sjsg 	 * common multiplier to render an integer PBN for all link rate/lane
47761bb76ff1Sjsg 	 * counts combinations
47771bb76ff1Sjsg 	 * calculate
47781bb76ff1Sjsg 	 * peak_kbps *= (1006/1000)
47791bb76ff1Sjsg 	 * peak_kbps *= (64/54)
47801bb76ff1Sjsg 	 * peak_kbps *= 8    convert to bytes
47811bb76ff1Sjsg 	 */
4782065458a7Sjsg 	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006 >> 4),
4783065458a7Sjsg 				1000 * 8 * 54 * 1000);
47841bb76ff1Sjsg }
47851bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
47861bb76ff1Sjsg 
47871bb76ff1Sjsg /* we want to kick the TX after we've ack the up/down IRQs. */
47881bb76ff1Sjsg static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
47891bb76ff1Sjsg {
47901bb76ff1Sjsg 	queue_work(system_long_wq, &mgr->tx_work);
47911bb76ff1Sjsg }
47921bb76ff1Sjsg 
47931bb76ff1Sjsg /*
47941bb76ff1Sjsg  * Helper function for parsing DP device types into convenient strings
47951bb76ff1Sjsg  * for use with dp_mst_topology
47961bb76ff1Sjsg  */
47971bb76ff1Sjsg static const char *pdt_to_string(u8 pdt)
47981bb76ff1Sjsg {
47991bb76ff1Sjsg 	switch (pdt) {
48001bb76ff1Sjsg 	case DP_PEER_DEVICE_NONE:
48011bb76ff1Sjsg 		return "NONE";
48021bb76ff1Sjsg 	case DP_PEER_DEVICE_SOURCE_OR_SST:
48031bb76ff1Sjsg 		return "SOURCE OR SST";
48041bb76ff1Sjsg 	case DP_PEER_DEVICE_MST_BRANCHING:
48051bb76ff1Sjsg 		return "MST BRANCHING";
48061bb76ff1Sjsg 	case DP_PEER_DEVICE_SST_SINK:
48071bb76ff1Sjsg 		return "SST SINK";
48081bb76ff1Sjsg 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
48091bb76ff1Sjsg 		return "DP LEGACY CONV";
48101bb76ff1Sjsg 	default:
48111bb76ff1Sjsg 		return "ERR";
48121bb76ff1Sjsg 	}
48131bb76ff1Sjsg }
48141bb76ff1Sjsg 
48151bb76ff1Sjsg static void drm_dp_mst_dump_mstb(struct seq_file *m,
48161bb76ff1Sjsg 				 struct drm_dp_mst_branch *mstb)
48171bb76ff1Sjsg {
48181bb76ff1Sjsg 	struct drm_dp_mst_port *port;
48191bb76ff1Sjsg 	int tabs = mstb->lct;
48201bb76ff1Sjsg 	char prefix[10];
48211bb76ff1Sjsg 	int i;
48221bb76ff1Sjsg 
48231bb76ff1Sjsg 	for (i = 0; i < tabs; i++)
48241bb76ff1Sjsg 		prefix[i] = '\t';
48251bb76ff1Sjsg 	prefix[i] = '\0';
48261bb76ff1Sjsg 
48271bb76ff1Sjsg 	seq_printf(m, "%smstb - [%p]: num_ports: %d\n", prefix, mstb, mstb->num_ports);
48281bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
48291bb76ff1Sjsg 		seq_printf(m, "%sport %d - [%p] (%s - %s): ddps: %d, ldps: %d, sdp: %d/%d, fec: %s, conn: %p\n",
48301bb76ff1Sjsg 			   prefix,
48311bb76ff1Sjsg 			   port->port_num,
48321bb76ff1Sjsg 			   port,
48331bb76ff1Sjsg 			   port->input ? "input" : "output",
48341bb76ff1Sjsg 			   pdt_to_string(port->pdt),
48351bb76ff1Sjsg 			   port->ddps,
48361bb76ff1Sjsg 			   port->ldps,
48371bb76ff1Sjsg 			   port->num_sdp_streams,
48381bb76ff1Sjsg 			   port->num_sdp_stream_sinks,
48391bb76ff1Sjsg 			   port->fec_capable ? "true" : "false",
48401bb76ff1Sjsg 			   port->connector);
48411bb76ff1Sjsg 		if (port->mstb)
48421bb76ff1Sjsg 			drm_dp_mst_dump_mstb(m, port->mstb);
48431bb76ff1Sjsg 	}
48441bb76ff1Sjsg }
48451bb76ff1Sjsg 
48461bb76ff1Sjsg #define DP_PAYLOAD_TABLE_SIZE		64
48471bb76ff1Sjsg 
48481bb76ff1Sjsg static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
48491bb76ff1Sjsg 				  char *buf)
48501bb76ff1Sjsg {
48511bb76ff1Sjsg 	int i;
48521bb76ff1Sjsg 
48531bb76ff1Sjsg 	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
48541bb76ff1Sjsg 		if (drm_dp_dpcd_read(mgr->aux,
48551bb76ff1Sjsg 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
48561bb76ff1Sjsg 				     &buf[i], 16) != 16)
48571bb76ff1Sjsg 			return false;
48581bb76ff1Sjsg 	}
48591bb76ff1Sjsg 	return true;
48601bb76ff1Sjsg }
48611bb76ff1Sjsg 
48621bb76ff1Sjsg static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
48631bb76ff1Sjsg 			       struct drm_dp_mst_port *port, char *name,
48641bb76ff1Sjsg 			       int namelen)
48651bb76ff1Sjsg {
48661bb76ff1Sjsg 	struct edid *mst_edid;
48671bb76ff1Sjsg 
48681bb76ff1Sjsg 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
48691bb76ff1Sjsg 	drm_edid_get_monitor_name(mst_edid, name, namelen);
48701bb76ff1Sjsg 	kfree(mst_edid);
48711bb76ff1Sjsg }
48721bb76ff1Sjsg 
48731bb76ff1Sjsg /**
48741bb76ff1Sjsg  * drm_dp_mst_dump_topology(): dump topology to seq file.
48751bb76ff1Sjsg  * @m: seq_file to dump output to
48761bb76ff1Sjsg  * @mgr: manager to dump current topology for.
48771bb76ff1Sjsg  *
48781bb76ff1Sjsg  * helper to dump MST topology to a seq file for debugfs.
48791bb76ff1Sjsg  */
48801bb76ff1Sjsg void drm_dp_mst_dump_topology(struct seq_file *m,
48811bb76ff1Sjsg 			      struct drm_dp_mst_topology_mgr *mgr)
48821bb76ff1Sjsg {
48831bb76ff1Sjsg 	struct drm_dp_mst_topology_state *state;
48841bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
48851bb76ff1Sjsg 	int i, ret;
48861bb76ff1Sjsg 
48871bb76ff1Sjsg 	mutex_lock(&mgr->lock);
48881bb76ff1Sjsg 	if (mgr->mst_primary)
48891bb76ff1Sjsg 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
48901bb76ff1Sjsg 
48911bb76ff1Sjsg 	/* dump VCPIs */
48921bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
48931bb76ff1Sjsg 
48941bb76ff1Sjsg 	ret = drm_modeset_lock_single_interruptible(&mgr->base.lock);
48951bb76ff1Sjsg 	if (ret < 0)
48961bb76ff1Sjsg 		return;
48971bb76ff1Sjsg 
48981bb76ff1Sjsg 	state = to_drm_dp_mst_topology_state(mgr->base.state);
48991bb76ff1Sjsg 	seq_printf(m, "\n*** Atomic state info ***\n");
49001bb76ff1Sjsg 	seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
49011bb76ff1Sjsg 		   state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
49021bb76ff1Sjsg 
49031bb76ff1Sjsg 	seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc |     sink name     |\n");
49041bb76ff1Sjsg 	for (i = 0; i < mgr->max_payloads; i++) {
49051bb76ff1Sjsg 		list_for_each_entry(payload, &state->payloads, next) {
49061bb76ff1Sjsg 			char name[14];
49071bb76ff1Sjsg 
49081bb76ff1Sjsg 			if (payload->vcpi != i || payload->delete)
49091bb76ff1Sjsg 				continue;
49101bb76ff1Sjsg 
49111bb76ff1Sjsg 			fetch_monitor_name(mgr, payload->port, name, sizeof(name));
49121bb76ff1Sjsg 			seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n",
49131bb76ff1Sjsg 				   i,
49141bb76ff1Sjsg 				   payload->port->port_num,
49151bb76ff1Sjsg 				   payload->vcpi,
49161bb76ff1Sjsg 				   payload->vc_start_slot,
49171bb76ff1Sjsg 				   payload->vc_start_slot + payload->time_slots - 1,
49181bb76ff1Sjsg 				   payload->pbn,
49191bb76ff1Sjsg 				   payload->dsc_enabled ? "Y" : "N",
49201bb76ff1Sjsg 				   (*name != 0) ? name : "Unknown");
49211bb76ff1Sjsg 		}
49221bb76ff1Sjsg 	}
49231bb76ff1Sjsg 
49241bb76ff1Sjsg 	seq_printf(m, "\n*** DPCD Info ***\n");
49251bb76ff1Sjsg 	mutex_lock(&mgr->lock);
49261bb76ff1Sjsg 	if (mgr->mst_primary) {
49271bb76ff1Sjsg 		u8 buf[DP_PAYLOAD_TABLE_SIZE];
49281bb76ff1Sjsg 		int ret;
49291bb76ff1Sjsg 
49301bb76ff1Sjsg 		if (drm_dp_read_dpcd_caps(mgr->aux, buf) < 0) {
49311bb76ff1Sjsg 			seq_printf(m, "dpcd read failed\n");
49321bb76ff1Sjsg 			goto out;
49331bb76ff1Sjsg 		}
49341bb76ff1Sjsg 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
49351bb76ff1Sjsg 
49361bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
49371bb76ff1Sjsg 		if (ret != 2) {
49381bb76ff1Sjsg 			seq_printf(m, "faux/mst read failed\n");
49391bb76ff1Sjsg 			goto out;
49401bb76ff1Sjsg 		}
49411bb76ff1Sjsg 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
49421bb76ff1Sjsg 
49431bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
49441bb76ff1Sjsg 		if (ret != 1) {
49451bb76ff1Sjsg 			seq_printf(m, "mst ctrl read failed\n");
49461bb76ff1Sjsg 			goto out;
49471bb76ff1Sjsg 		}
49481bb76ff1Sjsg 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
49491bb76ff1Sjsg 
49501bb76ff1Sjsg 		/* dump the standard OUI branch header */
49511bb76ff1Sjsg 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
49521bb76ff1Sjsg 		if (ret != DP_BRANCH_OUI_HEADER_SIZE) {
49531bb76ff1Sjsg 			seq_printf(m, "branch oui read failed\n");
49541bb76ff1Sjsg 			goto out;
49551bb76ff1Sjsg 		}
49561bb76ff1Sjsg 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
49571bb76ff1Sjsg 
49581bb76ff1Sjsg 		for (i = 0x3; i < 0x8 && buf[i]; i++)
49591bb76ff1Sjsg 			seq_printf(m, "%c", buf[i]);
49601bb76ff1Sjsg 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
49611bb76ff1Sjsg 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
49621bb76ff1Sjsg 		if (dump_dp_payload_table(mgr, buf))
49631bb76ff1Sjsg 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
49641bb76ff1Sjsg 	}
49651bb76ff1Sjsg 
49661bb76ff1Sjsg out:
49671bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
49681bb76ff1Sjsg 	drm_modeset_unlock(&mgr->base.lock);
49691bb76ff1Sjsg }
49701bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_dump_topology);
49711bb76ff1Sjsg 
49721bb76ff1Sjsg static void drm_dp_tx_work(struct work_struct *work)
49731bb76ff1Sjsg {
49741bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
49751bb76ff1Sjsg 
49761bb76ff1Sjsg 	mutex_lock(&mgr->qlock);
49771bb76ff1Sjsg 	if (!list_empty(&mgr->tx_msg_downq))
49781bb76ff1Sjsg 		process_single_down_tx_qlock(mgr);
49791bb76ff1Sjsg 	mutex_unlock(&mgr->qlock);
49801bb76ff1Sjsg }
49811bb76ff1Sjsg 
49821bb76ff1Sjsg static inline void
49831bb76ff1Sjsg drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
49841bb76ff1Sjsg {
49851bb76ff1Sjsg 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
49861bb76ff1Sjsg 
49871bb76ff1Sjsg 	if (port->connector) {
49881bb76ff1Sjsg 		drm_connector_unregister(port->connector);
49891bb76ff1Sjsg 		drm_connector_put(port->connector);
49901bb76ff1Sjsg 	}
49911bb76ff1Sjsg 
49921bb76ff1Sjsg 	drm_dp_mst_put_port_malloc(port);
49931bb76ff1Sjsg }
49941bb76ff1Sjsg 
49951bb76ff1Sjsg static inline void
49961bb76ff1Sjsg drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
49971bb76ff1Sjsg {
49981bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
49991bb76ff1Sjsg 	struct drm_dp_mst_port *port, *port_tmp;
50001bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
50011bb76ff1Sjsg 	bool wake_tx = false;
50021bb76ff1Sjsg 
50031bb76ff1Sjsg 	mutex_lock(&mgr->lock);
50041bb76ff1Sjsg 	list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
50051bb76ff1Sjsg 		list_del(&port->next);
50061bb76ff1Sjsg 		drm_dp_mst_topology_put_port(port);
50071bb76ff1Sjsg 	}
50081bb76ff1Sjsg 	mutex_unlock(&mgr->lock);
50091bb76ff1Sjsg 
50101bb76ff1Sjsg 	/* drop any tx slot msg */
50111bb76ff1Sjsg 	mutex_lock(&mstb->mgr->qlock);
50121bb76ff1Sjsg 	list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
50131bb76ff1Sjsg 		if (txmsg->dst != mstb)
50141bb76ff1Sjsg 			continue;
50151bb76ff1Sjsg 
50161bb76ff1Sjsg 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
50171bb76ff1Sjsg 		list_del(&txmsg->next);
50181bb76ff1Sjsg 		wake_tx = true;
50191bb76ff1Sjsg 	}
50201bb76ff1Sjsg 	mutex_unlock(&mstb->mgr->qlock);
50211bb76ff1Sjsg 
50221bb76ff1Sjsg 	if (wake_tx)
50231bb76ff1Sjsg 		wake_up_all(&mstb->mgr->tx_waitq);
50241bb76ff1Sjsg 
50251bb76ff1Sjsg 	drm_dp_mst_put_mstb_malloc(mstb);
50261bb76ff1Sjsg }
50271bb76ff1Sjsg 
50281bb76ff1Sjsg static void drm_dp_delayed_destroy_work(struct work_struct *work)
50291bb76ff1Sjsg {
50301bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr =
50311bb76ff1Sjsg 		container_of(work, struct drm_dp_mst_topology_mgr,
50321bb76ff1Sjsg 			     delayed_destroy_work);
50331bb76ff1Sjsg 	bool send_hotplug = false, go_again;
50341bb76ff1Sjsg 
50351bb76ff1Sjsg 	/*
50361bb76ff1Sjsg 	 * Not a regular list traverse as we have to drop the destroy
50371bb76ff1Sjsg 	 * connector lock before destroying the mstb/port, to avoid AB->BA
50381bb76ff1Sjsg 	 * ordering between this lock and the config mutex.
50391bb76ff1Sjsg 	 */
50401bb76ff1Sjsg 	do {
50411bb76ff1Sjsg 		go_again = false;
50421bb76ff1Sjsg 
50431bb76ff1Sjsg 		for (;;) {
50441bb76ff1Sjsg 			struct drm_dp_mst_branch *mstb;
50451bb76ff1Sjsg 
50461bb76ff1Sjsg 			mutex_lock(&mgr->delayed_destroy_lock);
50471bb76ff1Sjsg 			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
50481bb76ff1Sjsg 							struct drm_dp_mst_branch,
50491bb76ff1Sjsg 							destroy_next);
50501bb76ff1Sjsg 			if (mstb)
50511bb76ff1Sjsg 				list_del(&mstb->destroy_next);
50521bb76ff1Sjsg 			mutex_unlock(&mgr->delayed_destroy_lock);
50531bb76ff1Sjsg 
50541bb76ff1Sjsg 			if (!mstb)
50551bb76ff1Sjsg 				break;
50561bb76ff1Sjsg 
50571bb76ff1Sjsg 			drm_dp_delayed_destroy_mstb(mstb);
50581bb76ff1Sjsg 			go_again = true;
50591bb76ff1Sjsg 		}
50601bb76ff1Sjsg 
50611bb76ff1Sjsg 		for (;;) {
50621bb76ff1Sjsg 			struct drm_dp_mst_port *port;
50631bb76ff1Sjsg 
50641bb76ff1Sjsg 			mutex_lock(&mgr->delayed_destroy_lock);
50651bb76ff1Sjsg 			port = list_first_entry_or_null(&mgr->destroy_port_list,
50661bb76ff1Sjsg 							struct drm_dp_mst_port,
50671bb76ff1Sjsg 							next);
50681bb76ff1Sjsg 			if (port)
50691bb76ff1Sjsg 				list_del(&port->next);
50701bb76ff1Sjsg 			mutex_unlock(&mgr->delayed_destroy_lock);
50711bb76ff1Sjsg 
50721bb76ff1Sjsg 			if (!port)
50731bb76ff1Sjsg 				break;
50741bb76ff1Sjsg 
50751bb76ff1Sjsg 			drm_dp_delayed_destroy_port(port);
50761bb76ff1Sjsg 			send_hotplug = true;
50771bb76ff1Sjsg 			go_again = true;
50781bb76ff1Sjsg 		}
50791bb76ff1Sjsg 	} while (go_again);
50801bb76ff1Sjsg 
50811bb76ff1Sjsg 	if (send_hotplug)
50821bb76ff1Sjsg 		drm_kms_helper_hotplug_event(mgr->dev);
50831bb76ff1Sjsg }
50841bb76ff1Sjsg 
50851bb76ff1Sjsg static struct drm_private_state *
50861bb76ff1Sjsg drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
50871bb76ff1Sjsg {
50881bb76ff1Sjsg 	struct drm_dp_mst_topology_state *state, *old_state =
50891bb76ff1Sjsg 		to_dp_mst_topology_state(obj->state);
50901bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos, *payload;
50911bb76ff1Sjsg 
50921bb76ff1Sjsg 	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
50931bb76ff1Sjsg 	if (!state)
50941bb76ff1Sjsg 		return NULL;
50951bb76ff1Sjsg 
50961bb76ff1Sjsg 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
50971bb76ff1Sjsg 
50981bb76ff1Sjsg 	INIT_LIST_HEAD(&state->payloads);
50991bb76ff1Sjsg 	state->commit_deps = NULL;
51001bb76ff1Sjsg 	state->num_commit_deps = 0;
51011bb76ff1Sjsg 	state->pending_crtc_mask = 0;
51021bb76ff1Sjsg 
51031bb76ff1Sjsg 	list_for_each_entry(pos, &old_state->payloads, next) {
51041bb76ff1Sjsg 		/* Prune leftover freed timeslot allocations */
51051bb76ff1Sjsg 		if (pos->delete)
51061bb76ff1Sjsg 			continue;
51071bb76ff1Sjsg 
51081bb76ff1Sjsg 		payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL);
51091bb76ff1Sjsg 		if (!payload)
51101bb76ff1Sjsg 			goto fail;
51111bb76ff1Sjsg 
51121bb76ff1Sjsg 		drm_dp_mst_get_port_malloc(payload->port);
51131bb76ff1Sjsg 		list_add(&payload->next, &state->payloads);
51141bb76ff1Sjsg 	}
51151bb76ff1Sjsg 
51161bb76ff1Sjsg 	return &state->base;
51171bb76ff1Sjsg 
51181bb76ff1Sjsg fail:
51191bb76ff1Sjsg 	list_for_each_entry_safe(pos, payload, &state->payloads, next) {
51201bb76ff1Sjsg 		drm_dp_mst_put_port_malloc(pos->port);
51211bb76ff1Sjsg 		kfree(pos);
51221bb76ff1Sjsg 	}
51231bb76ff1Sjsg 	kfree(state);
51241bb76ff1Sjsg 
51251bb76ff1Sjsg 	return NULL;
51261bb76ff1Sjsg }
51271bb76ff1Sjsg 
51281bb76ff1Sjsg static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
51291bb76ff1Sjsg 				     struct drm_private_state *state)
51301bb76ff1Sjsg {
51311bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state =
51321bb76ff1Sjsg 		to_dp_mst_topology_state(state);
51331bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos, *tmp;
51341bb76ff1Sjsg 	int i;
51351bb76ff1Sjsg 
51361bb76ff1Sjsg 	list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) {
51371bb76ff1Sjsg 		/* We only keep references to ports with active payloads */
51381bb76ff1Sjsg 		if (!pos->delete)
51391bb76ff1Sjsg 			drm_dp_mst_put_port_malloc(pos->port);
51401bb76ff1Sjsg 		kfree(pos);
51411bb76ff1Sjsg 	}
51421bb76ff1Sjsg 
51431bb76ff1Sjsg 	for (i = 0; i < mst_state->num_commit_deps; i++)
51441bb76ff1Sjsg 		drm_crtc_commit_put(mst_state->commit_deps[i]);
51451bb76ff1Sjsg 
51461bb76ff1Sjsg 	kfree(mst_state->commit_deps);
51471bb76ff1Sjsg 	kfree(mst_state);
51481bb76ff1Sjsg }
51491bb76ff1Sjsg 
51501bb76ff1Sjsg static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
51511bb76ff1Sjsg 						 struct drm_dp_mst_branch *branch)
51521bb76ff1Sjsg {
51531bb76ff1Sjsg 	while (port->parent) {
51541bb76ff1Sjsg 		if (port->parent == branch)
51551bb76ff1Sjsg 			return true;
51561bb76ff1Sjsg 
51571bb76ff1Sjsg 		if (port->parent->port_parent)
51581bb76ff1Sjsg 			port = port->parent->port_parent;
51591bb76ff1Sjsg 		else
51601bb76ff1Sjsg 			break;
51611bb76ff1Sjsg 	}
51621bb76ff1Sjsg 	return false;
51631bb76ff1Sjsg }
51641bb76ff1Sjsg 
51651bb76ff1Sjsg static int
51661bb76ff1Sjsg drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
51671bb76ff1Sjsg 				      struct drm_dp_mst_topology_state *state);
51681bb76ff1Sjsg 
51691bb76ff1Sjsg static int
51701bb76ff1Sjsg drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
51711bb76ff1Sjsg 				      struct drm_dp_mst_topology_state *state)
51721bb76ff1Sjsg {
51731bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
51741bb76ff1Sjsg 	struct drm_dp_mst_port *port;
51751bb76ff1Sjsg 	int pbn_used = 0, ret;
51761bb76ff1Sjsg 	bool found = false;
51771bb76ff1Sjsg 
51781bb76ff1Sjsg 	/* Check that we have at least one port in our state that's downstream
51791bb76ff1Sjsg 	 * of this branch, otherwise we can skip this branch
51801bb76ff1Sjsg 	 */
51811bb76ff1Sjsg 	list_for_each_entry(payload, &state->payloads, next) {
51821bb76ff1Sjsg 		if (!payload->pbn ||
51831bb76ff1Sjsg 		    !drm_dp_mst_port_downstream_of_branch(payload->port, mstb))
51841bb76ff1Sjsg 			continue;
51851bb76ff1Sjsg 
51861bb76ff1Sjsg 		found = true;
51871bb76ff1Sjsg 		break;
51881bb76ff1Sjsg 	}
51891bb76ff1Sjsg 	if (!found)
51901bb76ff1Sjsg 		return 0;
51911bb76ff1Sjsg 
51921bb76ff1Sjsg 	if (mstb->port_parent)
51931bb76ff1Sjsg 		drm_dbg_atomic(mstb->mgr->dev,
51941bb76ff1Sjsg 			       "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
51951bb76ff1Sjsg 			       mstb->port_parent->parent, mstb->port_parent, mstb);
51961bb76ff1Sjsg 	else
51971bb76ff1Sjsg 		drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
51981bb76ff1Sjsg 
51991bb76ff1Sjsg 	list_for_each_entry(port, &mstb->ports, next) {
52001bb76ff1Sjsg 		ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
52011bb76ff1Sjsg 		if (ret < 0)
52021bb76ff1Sjsg 			return ret;
52031bb76ff1Sjsg 
52041bb76ff1Sjsg 		pbn_used += ret;
52051bb76ff1Sjsg 	}
52061bb76ff1Sjsg 
52071bb76ff1Sjsg 	return pbn_used;
52081bb76ff1Sjsg }
52091bb76ff1Sjsg 
52101bb76ff1Sjsg static int
52111bb76ff1Sjsg drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
52121bb76ff1Sjsg 				      struct drm_dp_mst_topology_state *state)
52131bb76ff1Sjsg {
52141bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
52151bb76ff1Sjsg 	int pbn_used = 0;
52161bb76ff1Sjsg 
52171bb76ff1Sjsg 	if (port->pdt == DP_PEER_DEVICE_NONE)
52181bb76ff1Sjsg 		return 0;
52191bb76ff1Sjsg 
52201bb76ff1Sjsg 	if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
52211bb76ff1Sjsg 		payload = drm_atomic_get_mst_payload_state(state, port);
52221bb76ff1Sjsg 		if (!payload)
52231bb76ff1Sjsg 			return 0;
52241bb76ff1Sjsg 
52251bb76ff1Sjsg 		/*
52261bb76ff1Sjsg 		 * This could happen if the sink deasserted its HPD line, but
52271bb76ff1Sjsg 		 * the branch device still reports it as attached (PDT != NONE).
52281bb76ff1Sjsg 		 */
52291bb76ff1Sjsg 		if (!port->full_pbn) {
52301bb76ff1Sjsg 			drm_dbg_atomic(port->mgr->dev,
52311bb76ff1Sjsg 				       "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
52321bb76ff1Sjsg 				       port->parent, port);
52331bb76ff1Sjsg 			return -EINVAL;
52341bb76ff1Sjsg 		}
52351bb76ff1Sjsg 
52361bb76ff1Sjsg 		pbn_used = payload->pbn;
52371bb76ff1Sjsg 	} else {
52381bb76ff1Sjsg 		pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
52391bb76ff1Sjsg 								 state);
52401bb76ff1Sjsg 		if (pbn_used <= 0)
52411bb76ff1Sjsg 			return pbn_used;
52421bb76ff1Sjsg 	}
52431bb76ff1Sjsg 
52441bb76ff1Sjsg 	if (pbn_used > port->full_pbn) {
52451bb76ff1Sjsg 		drm_dbg_atomic(port->mgr->dev,
52461bb76ff1Sjsg 			       "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
52471bb76ff1Sjsg 			       port->parent, port, pbn_used, port->full_pbn);
52481bb76ff1Sjsg 		return -ENOSPC;
52491bb76ff1Sjsg 	}
52501bb76ff1Sjsg 
52511bb76ff1Sjsg 	drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
52521bb76ff1Sjsg 		       port->parent, port, pbn_used, port->full_pbn);
52531bb76ff1Sjsg 
52541bb76ff1Sjsg 	return pbn_used;
52551bb76ff1Sjsg }
52561bb76ff1Sjsg 
52571bb76ff1Sjsg static inline int
52581bb76ff1Sjsg drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr,
52591bb76ff1Sjsg 					     struct drm_dp_mst_topology_state *mst_state)
52601bb76ff1Sjsg {
52611bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
52621bb76ff1Sjsg 	int avail_slots = mst_state->total_avail_slots, payload_count = 0;
52631bb76ff1Sjsg 
52641bb76ff1Sjsg 	list_for_each_entry(payload, &mst_state->payloads, next) {
52651bb76ff1Sjsg 		/* Releasing payloads is always OK-even if the port is gone */
52661bb76ff1Sjsg 		if (payload->delete) {
52671bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n",
52681bb76ff1Sjsg 				       payload->port);
52691bb76ff1Sjsg 			continue;
52701bb76ff1Sjsg 		}
52711bb76ff1Sjsg 
52721bb76ff1Sjsg 		drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n",
52731bb76ff1Sjsg 			       payload->port, payload->time_slots);
52741bb76ff1Sjsg 
52751bb76ff1Sjsg 		avail_slots -= payload->time_slots;
52761bb76ff1Sjsg 		if (avail_slots < 0) {
52771bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev,
52781bb76ff1Sjsg 				       "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n",
52791bb76ff1Sjsg 				       payload->port, mst_state, avail_slots + payload->time_slots);
52801bb76ff1Sjsg 			return -ENOSPC;
52811bb76ff1Sjsg 		}
52821bb76ff1Sjsg 
52831bb76ff1Sjsg 		if (++payload_count > mgr->max_payloads) {
52841bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev,
52851bb76ff1Sjsg 				       "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
52861bb76ff1Sjsg 				       mgr, mst_state, mgr->max_payloads);
52871bb76ff1Sjsg 			return -EINVAL;
52881bb76ff1Sjsg 		}
52891bb76ff1Sjsg 
52901bb76ff1Sjsg 		/* Assign a VCPI */
52911bb76ff1Sjsg 		if (!payload->vcpi) {
52921bb76ff1Sjsg 			payload->vcpi = ffz(mst_state->payload_mask) + 1;
52931bb76ff1Sjsg 			drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n",
52941bb76ff1Sjsg 				       payload->port, payload->vcpi);
52951bb76ff1Sjsg 			mst_state->payload_mask |= BIT(payload->vcpi - 1);
52961bb76ff1Sjsg 		}
52971bb76ff1Sjsg 	}
52981bb76ff1Sjsg 
52991bb76ff1Sjsg 	if (!payload_count)
53001bb76ff1Sjsg 		mst_state->pbn_div = 0;
53011bb76ff1Sjsg 
53021bb76ff1Sjsg 	drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n",
53031bb76ff1Sjsg 		       mgr, mst_state, mst_state->pbn_div, avail_slots,
53041bb76ff1Sjsg 		       mst_state->total_avail_slots - avail_slots);
53051bb76ff1Sjsg 
53061bb76ff1Sjsg 	return 0;
53071bb76ff1Sjsg }
53081bb76ff1Sjsg 
53091bb76ff1Sjsg /**
53101bb76ff1Sjsg  * drm_dp_mst_add_affected_dsc_crtcs
53111bb76ff1Sjsg  * @state: Pointer to the new struct drm_dp_mst_topology_state
53121bb76ff1Sjsg  * @mgr: MST topology manager
53131bb76ff1Sjsg  *
53141bb76ff1Sjsg  * Whenever there is a change in mst topology
53151bb76ff1Sjsg  * DSC configuration would have to be recalculated
53161bb76ff1Sjsg  * therefore we need to trigger modeset on all affected
53171bb76ff1Sjsg  * CRTCs in that topology
53181bb76ff1Sjsg  *
53191bb76ff1Sjsg  * See also:
53201bb76ff1Sjsg  * drm_dp_mst_atomic_enable_dsc()
53211bb76ff1Sjsg  */
53221bb76ff1Sjsg int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
53231bb76ff1Sjsg {
53241bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
53251bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *pos;
53261bb76ff1Sjsg 	struct drm_connector *connector;
53271bb76ff1Sjsg 	struct drm_connector_state *conn_state;
53281bb76ff1Sjsg 	struct drm_crtc *crtc;
53291bb76ff1Sjsg 	struct drm_crtc_state *crtc_state;
53301bb76ff1Sjsg 
53311bb76ff1Sjsg 	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
53321bb76ff1Sjsg 
53331bb76ff1Sjsg 	if (IS_ERR(mst_state))
53341bb76ff1Sjsg 		return PTR_ERR(mst_state);
53351bb76ff1Sjsg 
53361bb76ff1Sjsg 	list_for_each_entry(pos, &mst_state->payloads, next) {
53371bb76ff1Sjsg 
53381bb76ff1Sjsg 		connector = pos->port->connector;
53391bb76ff1Sjsg 
53401bb76ff1Sjsg 		if (!connector)
53411bb76ff1Sjsg 			return -EINVAL;
53421bb76ff1Sjsg 
53431bb76ff1Sjsg 		conn_state = drm_atomic_get_connector_state(state, connector);
53441bb76ff1Sjsg 
53451bb76ff1Sjsg 		if (IS_ERR(conn_state))
53461bb76ff1Sjsg 			return PTR_ERR(conn_state);
53471bb76ff1Sjsg 
53481bb76ff1Sjsg 		crtc = conn_state->crtc;
53491bb76ff1Sjsg 
53501bb76ff1Sjsg 		if (!crtc)
53511bb76ff1Sjsg 			continue;
53521bb76ff1Sjsg 
53531bb76ff1Sjsg 		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
53541bb76ff1Sjsg 			continue;
53551bb76ff1Sjsg 
53561bb76ff1Sjsg 		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
53571bb76ff1Sjsg 
53581bb76ff1Sjsg 		if (IS_ERR(crtc_state))
53591bb76ff1Sjsg 			return PTR_ERR(crtc_state);
53601bb76ff1Sjsg 
53611bb76ff1Sjsg 		drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
53621bb76ff1Sjsg 			       mgr, crtc);
53631bb76ff1Sjsg 
53641bb76ff1Sjsg 		crtc_state->mode_changed = true;
53651bb76ff1Sjsg 	}
53661bb76ff1Sjsg 	return 0;
53671bb76ff1Sjsg }
53681bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
53691bb76ff1Sjsg 
53701bb76ff1Sjsg /**
53711bb76ff1Sjsg  * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
53721bb76ff1Sjsg  * @state: Pointer to the new drm_atomic_state
53731bb76ff1Sjsg  * @port: Pointer to the affected MST Port
53741bb76ff1Sjsg  * @pbn: Newly recalculated bw required for link with DSC enabled
53751bb76ff1Sjsg  * @enable: Boolean flag to enable or disable DSC on the port
53761bb76ff1Sjsg  *
53771bb76ff1Sjsg  * This function enables DSC on the given Port
53781bb76ff1Sjsg  * by recalculating its vcpi from pbn provided
53791bb76ff1Sjsg  * and sets dsc_enable flag to keep track of which
53801bb76ff1Sjsg  * ports have DSC enabled
53811bb76ff1Sjsg  *
53821bb76ff1Sjsg  */
53831bb76ff1Sjsg int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
53841bb76ff1Sjsg 				 struct drm_dp_mst_port *port,
53851bb76ff1Sjsg 				 int pbn, bool enable)
53861bb76ff1Sjsg {
53871bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
53881bb76ff1Sjsg 	struct drm_dp_mst_atomic_payload *payload;
53891bb76ff1Sjsg 	int time_slots = 0;
53901bb76ff1Sjsg 
53911bb76ff1Sjsg 	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
53921bb76ff1Sjsg 	if (IS_ERR(mst_state))
53931bb76ff1Sjsg 		return PTR_ERR(mst_state);
53941bb76ff1Sjsg 
53951bb76ff1Sjsg 	payload = drm_atomic_get_mst_payload_state(mst_state, port);
53961bb76ff1Sjsg 	if (!payload) {
53971bb76ff1Sjsg 		drm_dbg_atomic(state->dev,
53981bb76ff1Sjsg 			       "[MST PORT:%p] Couldn't find payload in mst state %p\n",
53991bb76ff1Sjsg 			       port, mst_state);
54001bb76ff1Sjsg 		return -EINVAL;
54011bb76ff1Sjsg 	}
54021bb76ff1Sjsg 
54031bb76ff1Sjsg 	if (payload->dsc_enabled == enable) {
54041bb76ff1Sjsg 		drm_dbg_atomic(state->dev,
54051bb76ff1Sjsg 			       "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n",
54061bb76ff1Sjsg 			       port, enable, payload->time_slots);
54071bb76ff1Sjsg 		time_slots = payload->time_slots;
54081bb76ff1Sjsg 	}
54091bb76ff1Sjsg 
54101bb76ff1Sjsg 	if (enable) {
54111bb76ff1Sjsg 		time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn);
54121bb76ff1Sjsg 		drm_dbg_atomic(state->dev,
54131bb76ff1Sjsg 			       "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n",
54141bb76ff1Sjsg 			       port, time_slots);
54151bb76ff1Sjsg 		if (time_slots < 0)
54161bb76ff1Sjsg 			return -EINVAL;
54171bb76ff1Sjsg 	}
54181bb76ff1Sjsg 
54191bb76ff1Sjsg 	payload->dsc_enabled = enable;
54201bb76ff1Sjsg 
54211bb76ff1Sjsg 	return time_slots;
54221bb76ff1Sjsg }
54231bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
54241bb76ff1Sjsg 
54251bb76ff1Sjsg /**
54261bb76ff1Sjsg  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
54271bb76ff1Sjsg  * atomic update is valid
54281bb76ff1Sjsg  * @state: Pointer to the new &struct drm_dp_mst_topology_state
54291bb76ff1Sjsg  *
54301bb76ff1Sjsg  * Checks the given topology state for an atomic update to ensure that it's
54311bb76ff1Sjsg  * valid. This includes checking whether there's enough bandwidth to support
54321bb76ff1Sjsg  * the new timeslot allocations in the atomic update.
54331bb76ff1Sjsg  *
54341bb76ff1Sjsg  * Any atomic drivers supporting DP MST must make sure to call this after
54351bb76ff1Sjsg  * checking the rest of their state in their
54361bb76ff1Sjsg  * &drm_mode_config_funcs.atomic_check() callback.
54371bb76ff1Sjsg  *
54381bb76ff1Sjsg  * See also:
54391bb76ff1Sjsg  * drm_dp_atomic_find_time_slots()
54401bb76ff1Sjsg  * drm_dp_atomic_release_time_slots()
54411bb76ff1Sjsg  *
54421bb76ff1Sjsg  * Returns:
54431bb76ff1Sjsg  *
54441bb76ff1Sjsg  * 0 if the new state is valid, negative error code otherwise.
54451bb76ff1Sjsg  */
54461bb76ff1Sjsg int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
54471bb76ff1Sjsg {
54481bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr;
54491bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
54501bb76ff1Sjsg 	int i, ret = 0;
54511bb76ff1Sjsg 
54521bb76ff1Sjsg 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
54531bb76ff1Sjsg 		if (!mgr->mst_state)
54541bb76ff1Sjsg 			continue;
54551bb76ff1Sjsg 
54561bb76ff1Sjsg 		ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state);
54571bb76ff1Sjsg 		if (ret)
54581bb76ff1Sjsg 			break;
54591bb76ff1Sjsg 
54601bb76ff1Sjsg 		mutex_lock(&mgr->lock);
54611bb76ff1Sjsg 		ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
54621bb76ff1Sjsg 							    mst_state);
54631bb76ff1Sjsg 		mutex_unlock(&mgr->lock);
54641bb76ff1Sjsg 		if (ret < 0)
54651bb76ff1Sjsg 			break;
54661bb76ff1Sjsg 		else
54671bb76ff1Sjsg 			ret = 0;
54681bb76ff1Sjsg 	}
54691bb76ff1Sjsg 
54701bb76ff1Sjsg 	return ret;
54711bb76ff1Sjsg }
54721bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_atomic_check);
54731bb76ff1Sjsg 
54741bb76ff1Sjsg const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
54751bb76ff1Sjsg 	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
54761bb76ff1Sjsg 	.atomic_destroy_state = drm_dp_mst_destroy_state,
54771bb76ff1Sjsg };
54781bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
54791bb76ff1Sjsg 
54801bb76ff1Sjsg /**
54811bb76ff1Sjsg  * drm_atomic_get_mst_topology_state: get MST topology state
54821bb76ff1Sjsg  * @state: global atomic state
54831bb76ff1Sjsg  * @mgr: MST topology manager, also the private object in this case
54841bb76ff1Sjsg  *
54851bb76ff1Sjsg  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
54861bb76ff1Sjsg  * state vtable so that the private object state returned is that of a MST
54871bb76ff1Sjsg  * topology object.
54881bb76ff1Sjsg  *
54891bb76ff1Sjsg  * RETURNS:
54901bb76ff1Sjsg  *
54911bb76ff1Sjsg  * The MST topology state or error pointer.
54921bb76ff1Sjsg  */
54931bb76ff1Sjsg struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
54941bb76ff1Sjsg 								    struct drm_dp_mst_topology_mgr *mgr)
54951bb76ff1Sjsg {
54961bb76ff1Sjsg 	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
54971bb76ff1Sjsg }
54981bb76ff1Sjsg EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
54991bb76ff1Sjsg 
55001bb76ff1Sjsg /**
550161b9d4c9Sjsg  * drm_atomic_get_old_mst_topology_state: get old MST topology state in atomic state, if any
55021bb76ff1Sjsg  * @state: global atomic state
55031bb76ff1Sjsg  * @mgr: MST topology manager, also the private object in this case
55041bb76ff1Sjsg  *
550561b9d4c9Sjsg  * This function wraps drm_atomic_get_old_private_obj_state() passing in the MST atomic
55061bb76ff1Sjsg  * state vtable so that the private object state returned is that of a MST
55071bb76ff1Sjsg  * topology object.
55081bb76ff1Sjsg  *
55091bb76ff1Sjsg  * Returns:
55101bb76ff1Sjsg  *
551161b9d4c9Sjsg  * The old MST topology state, or NULL if there's no topology state for this MST mgr
551261b9d4c9Sjsg  * in the global atomic state
551361b9d4c9Sjsg  */
551461b9d4c9Sjsg struct drm_dp_mst_topology_state *
551561b9d4c9Sjsg drm_atomic_get_old_mst_topology_state(struct drm_atomic_state *state,
551661b9d4c9Sjsg 				      struct drm_dp_mst_topology_mgr *mgr)
551761b9d4c9Sjsg {
551861b9d4c9Sjsg 	struct drm_private_state *old_priv_state =
551961b9d4c9Sjsg 		drm_atomic_get_old_private_obj_state(state, &mgr->base);
552061b9d4c9Sjsg 
552161b9d4c9Sjsg 	return old_priv_state ? to_dp_mst_topology_state(old_priv_state) : NULL;
552261b9d4c9Sjsg }
552361b9d4c9Sjsg EXPORT_SYMBOL(drm_atomic_get_old_mst_topology_state);
552461b9d4c9Sjsg 
552561b9d4c9Sjsg /**
552661b9d4c9Sjsg  * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any
552761b9d4c9Sjsg  * @state: global atomic state
552861b9d4c9Sjsg  * @mgr: MST topology manager, also the private object in this case
552961b9d4c9Sjsg  *
553061b9d4c9Sjsg  * This function wraps drm_atomic_get_new_private_obj_state() passing in the MST atomic
553161b9d4c9Sjsg  * state vtable so that the private object state returned is that of a MST
553261b9d4c9Sjsg  * topology object.
553361b9d4c9Sjsg  *
553461b9d4c9Sjsg  * Returns:
553561b9d4c9Sjsg  *
553661b9d4c9Sjsg  * The new MST topology state, or NULL if there's no topology state for this MST mgr
55371bb76ff1Sjsg  * in the global atomic state
55381bb76ff1Sjsg  */
55391bb76ff1Sjsg struct drm_dp_mst_topology_state *
55401bb76ff1Sjsg drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state,
55411bb76ff1Sjsg 				      struct drm_dp_mst_topology_mgr *mgr)
55421bb76ff1Sjsg {
554361b9d4c9Sjsg 	struct drm_private_state *new_priv_state =
55441bb76ff1Sjsg 		drm_atomic_get_new_private_obj_state(state, &mgr->base);
55451bb76ff1Sjsg 
554661b9d4c9Sjsg 	return new_priv_state ? to_dp_mst_topology_state(new_priv_state) : NULL;
55471bb76ff1Sjsg }
55481bb76ff1Sjsg EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state);
55491bb76ff1Sjsg 
55501bb76ff1Sjsg /**
55511bb76ff1Sjsg  * drm_dp_mst_topology_mgr_init - initialise a topology manager
55521bb76ff1Sjsg  * @mgr: manager struct to initialise
55531bb76ff1Sjsg  * @dev: device providing this structure - for i2c addition.
55541bb76ff1Sjsg  * @aux: DP helper aux channel to talk to this device
55551bb76ff1Sjsg  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
55561bb76ff1Sjsg  * @max_payloads: maximum number of payloads this GPU can source
55571bb76ff1Sjsg  * @conn_base_id: the connector object ID the MST device is connected to.
55581bb76ff1Sjsg  *
55591bb76ff1Sjsg  * Return 0 for success, or negative error code on failure
55601bb76ff1Sjsg  */
55611bb76ff1Sjsg int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
55621bb76ff1Sjsg 				 struct drm_device *dev, struct drm_dp_aux *aux,
55631bb76ff1Sjsg 				 int max_dpcd_transaction_bytes, int max_payloads,
55641bb76ff1Sjsg 				 int conn_base_id)
55651bb76ff1Sjsg {
55661bb76ff1Sjsg 	struct drm_dp_mst_topology_state *mst_state;
55671bb76ff1Sjsg 
55681bb76ff1Sjsg 	rw_init(&mgr->lock, "mst");
55691bb76ff1Sjsg 	rw_init(&mgr->qlock, "mstq");
55701bb76ff1Sjsg 	rw_init(&mgr->delayed_destroy_lock, "mstdc");
55711bb76ff1Sjsg 	rw_init(&mgr->up_req_lock, "mstup");
55721bb76ff1Sjsg 	rw_init(&mgr->probe_lock, "mstprb");
55731bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
55741bb76ff1Sjsg 	rw_init(&mgr->topology_ref_history_lock, "msttr");
55751bb76ff1Sjsg 	stack_depot_init();
55761bb76ff1Sjsg #endif
55771bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
55781bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->destroy_port_list);
55791bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
55801bb76ff1Sjsg 	INIT_LIST_HEAD(&mgr->up_req_list);
55811bb76ff1Sjsg 
55821bb76ff1Sjsg 	/*
55831bb76ff1Sjsg 	 * delayed_destroy_work will be queued on a dedicated WQ, so that any
55841bb76ff1Sjsg 	 * requeuing will be also flushed when deiniting the topology manager.
55851bb76ff1Sjsg 	 */
55861bb76ff1Sjsg 	mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
55871bb76ff1Sjsg 	if (mgr->delayed_destroy_wq == NULL)
55881bb76ff1Sjsg 		return -ENOMEM;
55891bb76ff1Sjsg 
55901bb76ff1Sjsg 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
55911bb76ff1Sjsg 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
55921bb76ff1Sjsg 	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
55931bb76ff1Sjsg 	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
55941bb76ff1Sjsg 	init_waitqueue_head(&mgr->tx_waitq);
55951bb76ff1Sjsg 	mgr->dev = dev;
55961bb76ff1Sjsg 	mgr->aux = aux;
55971bb76ff1Sjsg 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
55981bb76ff1Sjsg 	mgr->max_payloads = max_payloads;
55991bb76ff1Sjsg 	mgr->conn_base_id = conn_base_id;
56001bb76ff1Sjsg 
56011bb76ff1Sjsg 	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
56021bb76ff1Sjsg 	if (mst_state == NULL)
56031bb76ff1Sjsg 		return -ENOMEM;
56041bb76ff1Sjsg 
56051bb76ff1Sjsg 	mst_state->total_avail_slots = 63;
56061bb76ff1Sjsg 	mst_state->start_slot = 1;
56071bb76ff1Sjsg 
56081bb76ff1Sjsg 	mst_state->mgr = mgr;
56091bb76ff1Sjsg 	INIT_LIST_HEAD(&mst_state->payloads);
56101bb76ff1Sjsg 
56111bb76ff1Sjsg 	drm_atomic_private_obj_init(dev, &mgr->base,
56121bb76ff1Sjsg 				    &mst_state->base,
56131bb76ff1Sjsg 				    &drm_dp_mst_topology_state_funcs);
56141bb76ff1Sjsg 
56151bb76ff1Sjsg 	return 0;
56161bb76ff1Sjsg }
56171bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
56181bb76ff1Sjsg 
56191bb76ff1Sjsg /**
56201bb76ff1Sjsg  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
56211bb76ff1Sjsg  * @mgr: manager to destroy
56221bb76ff1Sjsg  */
56231bb76ff1Sjsg void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
56241bb76ff1Sjsg {
56251bb76ff1Sjsg 	drm_dp_mst_topology_mgr_set_mst(mgr, false);
56261bb76ff1Sjsg 	flush_work(&mgr->work);
56271bb76ff1Sjsg 	/* The following will also drain any requeued work on the WQ. */
56281bb76ff1Sjsg 	if (mgr->delayed_destroy_wq) {
56291bb76ff1Sjsg 		destroy_workqueue(mgr->delayed_destroy_wq);
56301bb76ff1Sjsg 		mgr->delayed_destroy_wq = NULL;
56311bb76ff1Sjsg 	}
56321bb76ff1Sjsg 	mgr->dev = NULL;
56331bb76ff1Sjsg 	mgr->aux = NULL;
56341bb76ff1Sjsg 	drm_atomic_private_obj_fini(&mgr->base);
56351bb76ff1Sjsg 	mgr->funcs = NULL;
56361bb76ff1Sjsg 
56371bb76ff1Sjsg 	mutex_destroy(&mgr->delayed_destroy_lock);
56381bb76ff1Sjsg 	mutex_destroy(&mgr->qlock);
56391bb76ff1Sjsg 	mutex_destroy(&mgr->lock);
56401bb76ff1Sjsg 	mutex_destroy(&mgr->up_req_lock);
56411bb76ff1Sjsg 	mutex_destroy(&mgr->probe_lock);
56421bb76ff1Sjsg #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
56431bb76ff1Sjsg 	mutex_destroy(&mgr->topology_ref_history_lock);
56441bb76ff1Sjsg #endif
56451bb76ff1Sjsg }
56461bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
56471bb76ff1Sjsg 
56481bb76ff1Sjsg static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
56491bb76ff1Sjsg {
56501bb76ff1Sjsg 	int i;
56511bb76ff1Sjsg 
56521bb76ff1Sjsg 	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
56531bb76ff1Sjsg 		return false;
56541bb76ff1Sjsg 
56551bb76ff1Sjsg 	for (i = 0; i < num - 1; i++) {
56561bb76ff1Sjsg 		if (msgs[i].flags & I2C_M_RD ||
56571bb76ff1Sjsg 		    msgs[i].len > 0xff)
56581bb76ff1Sjsg 			return false;
56591bb76ff1Sjsg 	}
56601bb76ff1Sjsg 
56611bb76ff1Sjsg 	return msgs[num - 1].flags & I2C_M_RD &&
56621bb76ff1Sjsg 		msgs[num - 1].len <= 0xff;
56631bb76ff1Sjsg }
56641bb76ff1Sjsg 
56651bb76ff1Sjsg static bool remote_i2c_write_ok(const struct i2c_msg msgs[], int num)
56661bb76ff1Sjsg {
56671bb76ff1Sjsg 	int i;
56681bb76ff1Sjsg 
56691bb76ff1Sjsg 	for (i = 0; i < num - 1; i++) {
56701bb76ff1Sjsg 		if (msgs[i].flags & I2C_M_RD || !(msgs[i].flags & I2C_M_STOP) ||
56711bb76ff1Sjsg 		    msgs[i].len > 0xff)
56721bb76ff1Sjsg 			return false;
56731bb76ff1Sjsg 	}
56741bb76ff1Sjsg 
56751bb76ff1Sjsg 	return !(msgs[num - 1].flags & I2C_M_RD) && msgs[num - 1].len <= 0xff;
56761bb76ff1Sjsg }
56771bb76ff1Sjsg 
56781bb76ff1Sjsg static int drm_dp_mst_i2c_read(struct drm_dp_mst_branch *mstb,
56791bb76ff1Sjsg 			       struct drm_dp_mst_port *port,
56801bb76ff1Sjsg 			       struct i2c_msg *msgs, int num)
56811bb76ff1Sjsg {
56821bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
56831bb76ff1Sjsg 	unsigned int i;
56841bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body msg;
56851bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
56861bb76ff1Sjsg 	int ret;
56871bb76ff1Sjsg 
56881bb76ff1Sjsg 	memset(&msg, 0, sizeof(msg));
56891bb76ff1Sjsg 	msg.req_type = DP_REMOTE_I2C_READ;
56901bb76ff1Sjsg 	msg.u.i2c_read.num_transactions = num - 1;
56911bb76ff1Sjsg 	msg.u.i2c_read.port_number = port->port_num;
56921bb76ff1Sjsg 	for (i = 0; i < num - 1; i++) {
56931bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
56941bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
56951bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
56961bb76ff1Sjsg 		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
56971bb76ff1Sjsg 	}
56981bb76ff1Sjsg 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
56991bb76ff1Sjsg 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
57001bb76ff1Sjsg 
57011bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
57021bb76ff1Sjsg 	if (!txmsg) {
57031bb76ff1Sjsg 		ret = -ENOMEM;
57041bb76ff1Sjsg 		goto out;
57051bb76ff1Sjsg 	}
57061bb76ff1Sjsg 
57071bb76ff1Sjsg 	txmsg->dst = mstb;
57081bb76ff1Sjsg 	drm_dp_encode_sideband_req(&msg, txmsg);
57091bb76ff1Sjsg 
57101bb76ff1Sjsg 	drm_dp_queue_down_tx(mgr, txmsg);
57111bb76ff1Sjsg 
57121bb76ff1Sjsg 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
57131bb76ff1Sjsg 	if (ret > 0) {
57141bb76ff1Sjsg 
57151bb76ff1Sjsg 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
57161bb76ff1Sjsg 			ret = -EREMOTEIO;
57171bb76ff1Sjsg 			goto out;
57181bb76ff1Sjsg 		}
57191bb76ff1Sjsg 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
57201bb76ff1Sjsg 			ret = -EIO;
57211bb76ff1Sjsg 			goto out;
57221bb76ff1Sjsg 		}
57231bb76ff1Sjsg 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
57241bb76ff1Sjsg 		ret = num;
57251bb76ff1Sjsg 	}
57261bb76ff1Sjsg out:
57271bb76ff1Sjsg 	kfree(txmsg);
57281bb76ff1Sjsg 	return ret;
57291bb76ff1Sjsg }
57301bb76ff1Sjsg 
57311bb76ff1Sjsg static int drm_dp_mst_i2c_write(struct drm_dp_mst_branch *mstb,
57321bb76ff1Sjsg 				struct drm_dp_mst_port *port,
57331bb76ff1Sjsg 				struct i2c_msg *msgs, int num)
57341bb76ff1Sjsg {
57351bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
57361bb76ff1Sjsg 	unsigned int i;
57371bb76ff1Sjsg 	struct drm_dp_sideband_msg_req_body msg;
57381bb76ff1Sjsg 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
57391bb76ff1Sjsg 	int ret;
57401bb76ff1Sjsg 
57411bb76ff1Sjsg 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
57421bb76ff1Sjsg 	if (!txmsg) {
57431bb76ff1Sjsg 		ret = -ENOMEM;
57441bb76ff1Sjsg 		goto out;
57451bb76ff1Sjsg 	}
57461bb76ff1Sjsg 	for (i = 0; i < num; i++) {
57471bb76ff1Sjsg 		memset(&msg, 0, sizeof(msg));
57481bb76ff1Sjsg 		msg.req_type = DP_REMOTE_I2C_WRITE;
57491bb76ff1Sjsg 		msg.u.i2c_write.port_number = port->port_num;
57501bb76ff1Sjsg 		msg.u.i2c_write.write_i2c_device_id = msgs[i].addr;
57511bb76ff1Sjsg 		msg.u.i2c_write.num_bytes = msgs[i].len;
57521bb76ff1Sjsg 		msg.u.i2c_write.bytes = msgs[i].buf;
57531bb76ff1Sjsg 
57541bb76ff1Sjsg 		memset(txmsg, 0, sizeof(*txmsg));
57551bb76ff1Sjsg 		txmsg->dst = mstb;
57561bb76ff1Sjsg 
57571bb76ff1Sjsg 		drm_dp_encode_sideband_req(&msg, txmsg);
57581bb76ff1Sjsg 		drm_dp_queue_down_tx(mgr, txmsg);
57591bb76ff1Sjsg 
57601bb76ff1Sjsg 		ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
57611bb76ff1Sjsg 		if (ret > 0) {
57621bb76ff1Sjsg 			if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
57631bb76ff1Sjsg 				ret = -EREMOTEIO;
57641bb76ff1Sjsg 				goto out;
57651bb76ff1Sjsg 			}
57661bb76ff1Sjsg 		} else {
57671bb76ff1Sjsg 			goto out;
57681bb76ff1Sjsg 		}
57691bb76ff1Sjsg 	}
57701bb76ff1Sjsg 	ret = num;
57711bb76ff1Sjsg out:
57721bb76ff1Sjsg 	kfree(txmsg);
57731bb76ff1Sjsg 	return ret;
57741bb76ff1Sjsg }
57751bb76ff1Sjsg 
57761bb76ff1Sjsg /* I2C device */
57771bb76ff1Sjsg static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
57781bb76ff1Sjsg 			       struct i2c_msg *msgs, int num)
57791bb76ff1Sjsg {
57801bb76ff1Sjsg 	struct drm_dp_aux *aux = adapter->algo_data;
57811bb76ff1Sjsg 	struct drm_dp_mst_port *port =
57821bb76ff1Sjsg 		container_of(aux, struct drm_dp_mst_port, aux);
57831bb76ff1Sjsg 	struct drm_dp_mst_branch *mstb;
57841bb76ff1Sjsg 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
57851bb76ff1Sjsg 	int ret;
57861bb76ff1Sjsg 
57871bb76ff1Sjsg 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
57881bb76ff1Sjsg 	if (!mstb)
57891bb76ff1Sjsg 		return -EREMOTEIO;
57901bb76ff1Sjsg 
57911bb76ff1Sjsg 	if (remote_i2c_read_ok(msgs, num)) {
57921bb76ff1Sjsg 		ret = drm_dp_mst_i2c_read(mstb, port, msgs, num);
57931bb76ff1Sjsg 	} else if (remote_i2c_write_ok(msgs, num)) {
57941bb76ff1Sjsg 		ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
57951bb76ff1Sjsg 	} else {
57961bb76ff1Sjsg 		drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
57971bb76ff1Sjsg 		ret = -EIO;
57981bb76ff1Sjsg 	}
57991bb76ff1Sjsg 
58001bb76ff1Sjsg 	drm_dp_mst_topology_put_mstb(mstb);
58011bb76ff1Sjsg 	return ret;
58021bb76ff1Sjsg }
58031bb76ff1Sjsg 
58041bb76ff1Sjsg static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
58051bb76ff1Sjsg {
58061bb76ff1Sjsg 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
58071bb76ff1Sjsg 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
58081bb76ff1Sjsg 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
58091bb76ff1Sjsg 	       I2C_FUNC_10BIT_ADDR;
58101bb76ff1Sjsg }
58111bb76ff1Sjsg 
58121bb76ff1Sjsg static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
58131bb76ff1Sjsg 	.functionality = drm_dp_mst_i2c_functionality,
58141bb76ff1Sjsg 	.master_xfer = drm_dp_mst_i2c_xfer,
58151bb76ff1Sjsg };
58161bb76ff1Sjsg 
58171bb76ff1Sjsg /**
58181bb76ff1Sjsg  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
58191bb76ff1Sjsg  * @port: The port to add the I2C bus on
58201bb76ff1Sjsg  *
58211bb76ff1Sjsg  * Returns 0 on success or a negative error code on failure.
58221bb76ff1Sjsg  */
58231bb76ff1Sjsg static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
58241bb76ff1Sjsg {
58251bb76ff1Sjsg 	struct drm_dp_aux *aux = &port->aux;
58261bb76ff1Sjsg #ifdef __linux__
58271bb76ff1Sjsg 	struct device *parent_dev = port->mgr->dev->dev;
58281bb76ff1Sjsg #endif
58291bb76ff1Sjsg 
58301bb76ff1Sjsg 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
58311bb76ff1Sjsg 	aux->ddc.algo_data = aux;
58321bb76ff1Sjsg 	aux->ddc.retries = 3;
58331bb76ff1Sjsg 
58341bb76ff1Sjsg #ifdef __linux__
58351bb76ff1Sjsg 	aux->ddc.class = I2C_CLASS_DDC;
58361bb76ff1Sjsg 	aux->ddc.owner = THIS_MODULE;
58371bb76ff1Sjsg 	/* FIXME: set the kdev of the port's connector as parent */
58381bb76ff1Sjsg 	aux->ddc.dev.parent = parent_dev;
58391bb76ff1Sjsg 	aux->ddc.dev.of_node = parent_dev->of_node;
58401bb76ff1Sjsg #endif
58411bb76ff1Sjsg 
5842f005ef32Sjsg 	strscpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
58431bb76ff1Sjsg 		sizeof(aux->ddc.name));
58441bb76ff1Sjsg 
58451bb76ff1Sjsg 	return i2c_add_adapter(&aux->ddc);
58461bb76ff1Sjsg }
58471bb76ff1Sjsg 
58481bb76ff1Sjsg /**
58491bb76ff1Sjsg  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
58501bb76ff1Sjsg  * @port: The port to remove the I2C bus from
58511bb76ff1Sjsg  */
58521bb76ff1Sjsg static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
58531bb76ff1Sjsg {
58541bb76ff1Sjsg 	i2c_del_adapter(&port->aux.ddc);
58551bb76ff1Sjsg }
58561bb76ff1Sjsg 
58571bb76ff1Sjsg /**
58581bb76ff1Sjsg  * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
58591bb76ff1Sjsg  * @port: The port to check
58601bb76ff1Sjsg  *
58611bb76ff1Sjsg  * A single physical MST hub object can be represented in the topology
58621bb76ff1Sjsg  * by multiple branches, with virtual ports between those branches.
58631bb76ff1Sjsg  *
58641bb76ff1Sjsg  * As of DP1.4, An MST hub with internal (virtual) ports must expose
58651bb76ff1Sjsg  * certain DPCD registers over those ports. See sections 2.6.1.1.1
58661bb76ff1Sjsg  * and 2.6.1.1.2 of Display Port specification v1.4 for details.
58671bb76ff1Sjsg  *
58681bb76ff1Sjsg  * May acquire mgr->lock
58691bb76ff1Sjsg  *
58701bb76ff1Sjsg  * Returns:
58711bb76ff1Sjsg  * true if the port is a virtual DP peer device, false otherwise
58721bb76ff1Sjsg  */
58731bb76ff1Sjsg static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
58741bb76ff1Sjsg {
58751bb76ff1Sjsg 	struct drm_dp_mst_port *downstream_port;
58761bb76ff1Sjsg 
58771bb76ff1Sjsg 	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
58781bb76ff1Sjsg 		return false;
58791bb76ff1Sjsg 
58801bb76ff1Sjsg 	/* Virtual DP Sink (Internal Display Panel) */
58811bb76ff1Sjsg 	if (port->port_num >= 8)
58821bb76ff1Sjsg 		return true;
58831bb76ff1Sjsg 
58841bb76ff1Sjsg 	/* DP-to-HDMI Protocol Converter */
58851bb76ff1Sjsg 	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
58861bb76ff1Sjsg 	    !port->mcs &&
58871bb76ff1Sjsg 	    port->ldps)
58881bb76ff1Sjsg 		return true;
58891bb76ff1Sjsg 
58901bb76ff1Sjsg 	/* DP-to-DP */
58911bb76ff1Sjsg 	mutex_lock(&port->mgr->lock);
58921bb76ff1Sjsg 	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
58931bb76ff1Sjsg 	    port->mstb &&
58941bb76ff1Sjsg 	    port->mstb->num_ports == 2) {
58951bb76ff1Sjsg 		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
58961bb76ff1Sjsg 			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
58971bb76ff1Sjsg 			    !downstream_port->input) {
58981bb76ff1Sjsg 				mutex_unlock(&port->mgr->lock);
58991bb76ff1Sjsg 				return true;
59001bb76ff1Sjsg 			}
59011bb76ff1Sjsg 		}
59021bb76ff1Sjsg 	}
59031bb76ff1Sjsg 	mutex_unlock(&port->mgr->lock);
59041bb76ff1Sjsg 
59051bb76ff1Sjsg 	return false;
59061bb76ff1Sjsg }
59071bb76ff1Sjsg 
59081bb76ff1Sjsg /**
59091bb76ff1Sjsg  * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
59101bb76ff1Sjsg  * @port: The port to check. A leaf of the MST tree with an attached display.
59111bb76ff1Sjsg  *
59121bb76ff1Sjsg  * Depending on the situation, DSC may be enabled via the endpoint aux,
59131bb76ff1Sjsg  * the immediately upstream aux, or the connector's physical aux.
59141bb76ff1Sjsg  *
59151bb76ff1Sjsg  * This is both the correct aux to read DSC_CAPABILITY and the
59161bb76ff1Sjsg  * correct aux to write DSC_ENABLED.
59171bb76ff1Sjsg  *
59181bb76ff1Sjsg  * This operation can be expensive (up to four aux reads), so
59191bb76ff1Sjsg  * the caller should cache the return.
59201bb76ff1Sjsg  *
59211bb76ff1Sjsg  * Returns:
59221bb76ff1Sjsg  * NULL if DSC cannot be enabled on this port, otherwise the aux device
59231bb76ff1Sjsg  */
59241bb76ff1Sjsg struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
59251bb76ff1Sjsg {
59261bb76ff1Sjsg 	struct drm_dp_mst_port *immediate_upstream_port;
59271bb76ff1Sjsg 	struct drm_dp_mst_port *fec_port;
59281bb76ff1Sjsg 	struct drm_dp_desc desc = {};
59291bb76ff1Sjsg 	u8 endpoint_fec;
59301bb76ff1Sjsg 	u8 endpoint_dsc;
59311bb76ff1Sjsg 
59321bb76ff1Sjsg 	if (!port)
59331bb76ff1Sjsg 		return NULL;
59341bb76ff1Sjsg 
59351bb76ff1Sjsg 	if (port->parent->port_parent)
59361bb76ff1Sjsg 		immediate_upstream_port = port->parent->port_parent;
59371bb76ff1Sjsg 	else
59381bb76ff1Sjsg 		immediate_upstream_port = NULL;
59391bb76ff1Sjsg 
59401bb76ff1Sjsg 	fec_port = immediate_upstream_port;
59411bb76ff1Sjsg 	while (fec_port) {
59421bb76ff1Sjsg 		/*
59431bb76ff1Sjsg 		 * Each physical link (i.e. not a virtual port) between the
59441bb76ff1Sjsg 		 * output and the primary device must support FEC
59451bb76ff1Sjsg 		 */
59461bb76ff1Sjsg 		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
59471bb76ff1Sjsg 		    !fec_port->fec_capable)
59481bb76ff1Sjsg 			return NULL;
59491bb76ff1Sjsg 
59501bb76ff1Sjsg 		fec_port = fec_port->parent->port_parent;
59511bb76ff1Sjsg 	}
59521bb76ff1Sjsg 
59531bb76ff1Sjsg 	/* DP-to-DP peer device */
59541bb76ff1Sjsg 	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
59551bb76ff1Sjsg 		u8 upstream_dsc;
59561bb76ff1Sjsg 
59571bb76ff1Sjsg 		if (drm_dp_dpcd_read(&port->aux,
59581bb76ff1Sjsg 				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
59591bb76ff1Sjsg 			return NULL;
59601bb76ff1Sjsg 		if (drm_dp_dpcd_read(&port->aux,
59611bb76ff1Sjsg 				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
59621bb76ff1Sjsg 			return NULL;
59631bb76ff1Sjsg 		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
59641bb76ff1Sjsg 				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
59651bb76ff1Sjsg 			return NULL;
59661bb76ff1Sjsg 
59671bb76ff1Sjsg 		/* Enpoint decompression with DP-to-DP peer device */
59681bb76ff1Sjsg 		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
59691bb76ff1Sjsg 		    (endpoint_fec & DP_FEC_CAPABLE) &&
59701bb76ff1Sjsg 		    (upstream_dsc & DP_DSC_PASSTHROUGH_IS_SUPPORTED)) {
59711bb76ff1Sjsg 			port->passthrough_aux = &immediate_upstream_port->aux;
59721bb76ff1Sjsg 			return &port->aux;
59731bb76ff1Sjsg 		}
59741bb76ff1Sjsg 
59751bb76ff1Sjsg 		/* Virtual DPCD decompression with DP-to-DP peer device */
59761bb76ff1Sjsg 		return &immediate_upstream_port->aux;
59771bb76ff1Sjsg 	}
59781bb76ff1Sjsg 
59791bb76ff1Sjsg 	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
59801bb76ff1Sjsg 	if (drm_dp_mst_is_virtual_dpcd(port))
59811bb76ff1Sjsg 		return &port->aux;
59821bb76ff1Sjsg 
59831bb76ff1Sjsg 	/*
59841bb76ff1Sjsg 	 * Synaptics quirk
59851bb76ff1Sjsg 	 * Applies to ports for which:
59861bb76ff1Sjsg 	 * - Physical aux has Synaptics OUI
59871bb76ff1Sjsg 	 * - DPv1.4 or higher
59881bb76ff1Sjsg 	 * - Port is on primary branch device
59891bb76ff1Sjsg 	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
59901bb76ff1Sjsg 	 */
59911bb76ff1Sjsg 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
59921bb76ff1Sjsg 		return NULL;
59931bb76ff1Sjsg 
59941bb76ff1Sjsg 	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
59951bb76ff1Sjsg 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
59961bb76ff1Sjsg 	    port->parent == port->mgr->mst_primary) {
59971bb76ff1Sjsg 		u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
59981bb76ff1Sjsg 
59991bb76ff1Sjsg 		if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
60001bb76ff1Sjsg 			return NULL;
60011bb76ff1Sjsg 
60021bb76ff1Sjsg 		if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
60031bb76ff1Sjsg 		    ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
60041bb76ff1Sjsg 		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
60051bb76ff1Sjsg 			return port->mgr->aux;
60061bb76ff1Sjsg 	}
60071bb76ff1Sjsg 
60081bb76ff1Sjsg 	/*
60091bb76ff1Sjsg 	 * The check below verifies if the MST sink
60101bb76ff1Sjsg 	 * connected to the GPU is capable of DSC -
60111bb76ff1Sjsg 	 * therefore the endpoint needs to be
60121bb76ff1Sjsg 	 * both DSC and FEC capable.
60131bb76ff1Sjsg 	 */
60141bb76ff1Sjsg 	if (drm_dp_dpcd_read(&port->aux,
60151bb76ff1Sjsg 	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
60161bb76ff1Sjsg 		return NULL;
60171bb76ff1Sjsg 	if (drm_dp_dpcd_read(&port->aux,
60181bb76ff1Sjsg 	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
60191bb76ff1Sjsg 		return NULL;
60201bb76ff1Sjsg 	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
60211bb76ff1Sjsg 	   (endpoint_fec & DP_FEC_CAPABLE))
60221bb76ff1Sjsg 		return &port->aux;
60231bb76ff1Sjsg 
60241bb76ff1Sjsg 	return NULL;
60251bb76ff1Sjsg }
60261bb76ff1Sjsg EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
6027