Home | History | Annotate | Line # | Download | only in drm
drm_dp_mst_topology.c revision 1.1.1.2
      1 /*	$NetBSD: drm_dp_mst_topology.c,v 1.1.1.2 2021/12/18 20:11:01 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2014 Red Hat
      5  *
      6  * Permission to use, copy, modify, distribute, and sell this software and its
      7  * documentation for any purpose is hereby granted without fee, provided that
      8  * the above copyright notice appear in all copies and that both that copyright
      9  * notice and this permission notice appear in supporting documentation, and
     10  * that the name of the copyright holders not be used in advertising or
     11  * publicity pertaining to distribution of the software without specific,
     12  * written prior permission.  The copyright holders make no representations
     13  * about the suitability of this software for any purpose.  It is provided "as
     14  * is" without express or implied warranty.
     15  *
     16  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
     17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
     18  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
     19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
     20  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
     21  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
     22  * OF THIS SOFTWARE.
     23  */
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.1.1.2 2021/12/18 20:11:01 riastradh Exp $");
     27 
     28 #include <linux/delay.h>
     29 #include <linux/errno.h>
     30 #include <linux/i2c.h>
     31 #include <linux/init.h>
     32 #include <linux/kernel.h>
     33 #include <linux/sched.h>
     34 #include <linux/seq_file.h>
     35 
     36 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
     37 #include <linux/stacktrace.h>
     38 #include <linux/sort.h>
     39 #include <linux/timekeeping.h>
     40 #include <linux/math64.h>
     41 #endif
     42 
     43 #include <drm/drm_atomic.h>
     44 #include <drm/drm_atomic_helper.h>
     45 #include <drm/drm_dp_mst_helper.h>
     46 #include <drm/drm_drv.h>
     47 #include <drm/drm_print.h>
     48 #include <drm/drm_probe_helper.h>
     49 
     50 #include "drm_crtc_helper_internal.h"
     51 #include "drm_dp_mst_topology_internal.h"
     52 
     53 /**
     54  * DOC: dp mst helper
     55  *
     56  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
     57  * protocol. The helpers contain a topology manager and bandwidth manager.
     58  * The helpers encapsulate the sending and received of sideband msgs.
     59  */
     60 struct drm_dp_pending_up_req {
     61 	struct drm_dp_sideband_msg_hdr hdr;
     62 	struct drm_dp_sideband_msg_req_body msg;
     63 	struct list_head next;
     64 };
     65 
     66 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
     67 				  char *buf);
     68 
     69 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
     70 
     71 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
     72 				     int id,
     73 				     struct drm_dp_payload *payload);
     74 
     75 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
     76 				 struct drm_dp_mst_port *port,
     77 				 int offset, int size, u8 *bytes);
     78 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
     79 				  struct drm_dp_mst_port *port,
     80 				  int offset, int size, u8 *bytes);
     81 
     82 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
     83 				    struct drm_dp_mst_branch *mstb);
     84 
     85 static void
     86 drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
     87 				   struct drm_dp_mst_branch *mstb);
     88 
     89 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
     90 					   struct drm_dp_mst_branch *mstb,
     91 					   struct drm_dp_mst_port *port);
     92 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
     93 				 u8 *guid);
     94 
     95 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
     96 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
     97 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
     98 
     99 #define DBG_PREFIX "[dp_mst]"
    100 
    101 #define DP_STR(x) [DP_ ## x] = #x
    102 
    103 static const char *drm_dp_mst_req_type_str(u8 req_type)
    104 {
    105 	static const char * const req_type_str[] = {
    106 		DP_STR(GET_MSG_TRANSACTION_VERSION),
    107 		DP_STR(LINK_ADDRESS),
    108 		DP_STR(CONNECTION_STATUS_NOTIFY),
    109 		DP_STR(ENUM_PATH_RESOURCES),
    110 		DP_STR(ALLOCATE_PAYLOAD),
    111 		DP_STR(QUERY_PAYLOAD),
    112 		DP_STR(RESOURCE_STATUS_NOTIFY),
    113 		DP_STR(CLEAR_PAYLOAD_ID_TABLE),
    114 		DP_STR(REMOTE_DPCD_READ),
    115 		DP_STR(REMOTE_DPCD_WRITE),
    116 		DP_STR(REMOTE_I2C_READ),
    117 		DP_STR(REMOTE_I2C_WRITE),
    118 		DP_STR(POWER_UP_PHY),
    119 		DP_STR(POWER_DOWN_PHY),
    120 		DP_STR(SINK_EVENT_NOTIFY),
    121 		DP_STR(QUERY_STREAM_ENC_STATUS),
    122 	};
    123 
    124 	if (req_type >= ARRAY_SIZE(req_type_str) ||
    125 	    !req_type_str[req_type])
    126 		return "unknown";
    127 
    128 	return req_type_str[req_type];
    129 }
    130 
    131 #undef DP_STR
    132 #define DP_STR(x) [DP_NAK_ ## x] = #x
    133 
    134 static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
    135 {
    136 	static const char * const nak_reason_str[] = {
    137 		DP_STR(WRITE_FAILURE),
    138 		DP_STR(INVALID_READ),
    139 		DP_STR(CRC_FAILURE),
    140 		DP_STR(BAD_PARAM),
    141 		DP_STR(DEFER),
    142 		DP_STR(LINK_FAILURE),
    143 		DP_STR(NO_RESOURCES),
    144 		DP_STR(DPCD_FAIL),
    145 		DP_STR(I2C_NAK),
    146 		DP_STR(ALLOCATE_FAIL),
    147 	};
    148 
    149 	if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
    150 	    !nak_reason_str[nak_reason])
    151 		return "unknown";
    152 
    153 	return nak_reason_str[nak_reason];
    154 }
    155 
    156 #undef DP_STR
    157 #define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x
    158 
    159 static const char *drm_dp_mst_sideband_tx_state_str(int state)
    160 {
    161 	static const char * const sideband_reason_str[] = {
    162 		DP_STR(QUEUED),
    163 		DP_STR(START_SEND),
    164 		DP_STR(SENT),
    165 		DP_STR(RX),
    166 		DP_STR(TIMEOUT),
    167 	};
    168 
    169 	if (state >= ARRAY_SIZE(sideband_reason_str) ||
    170 	    !sideband_reason_str[state])
    171 		return "unknown";
    172 
    173 	return sideband_reason_str[state];
    174 }
    175 
    176 static int
    177 drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len)
    178 {
    179 	int i;
    180 	u8 unpacked_rad[16];
    181 
    182 	for (i = 0; i < lct; i++) {
    183 		if (i % 2)
    184 			unpacked_rad[i] = rad[i / 2] >> 4;
    185 		else
    186 			unpacked_rad[i] = rad[i / 2] & BIT_MASK(4);
    187 	}
    188 
    189 	/* TODO: Eventually add something to printk so we can format the rad
    190 	 * like this: 1.2.3
    191 	 */
    192 	return snprintf(out, len, "%*phC", lct, unpacked_rad);
    193 }
    194 
    195 /* sideband msg handling */
    196 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
    197 {
    198 	u8 bitmask = 0x80;
    199 	u8 bitshift = 7;
    200 	u8 array_index = 0;
    201 	int number_of_bits = num_nibbles * 4;
    202 	u8 remainder = 0;
    203 
    204 	while (number_of_bits != 0) {
    205 		number_of_bits--;
    206 		remainder <<= 1;
    207 		remainder |= (data[array_index] & bitmask) >> bitshift;
    208 		bitmask >>= 1;
    209 		bitshift--;
    210 		if (bitmask == 0) {
    211 			bitmask = 0x80;
    212 			bitshift = 7;
    213 			array_index++;
    214 		}
    215 		if ((remainder & 0x10) == 0x10)
    216 			remainder ^= 0x13;
    217 	}
    218 
    219 	number_of_bits = 4;
    220 	while (number_of_bits != 0) {
    221 		number_of_bits--;
    222 		remainder <<= 1;
    223 		if ((remainder & 0x10) != 0)
    224 			remainder ^= 0x13;
    225 	}
    226 
    227 	return remainder;
    228 }
    229 
    230 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
    231 {
    232 	u8 bitmask = 0x80;
    233 	u8 bitshift = 7;
    234 	u8 array_index = 0;
    235 	int number_of_bits = number_of_bytes * 8;
    236 	u16 remainder = 0;
    237 
    238 	while (number_of_bits != 0) {
    239 		number_of_bits--;
    240 		remainder <<= 1;
    241 		remainder |= (data[array_index] & bitmask) >> bitshift;
    242 		bitmask >>= 1;
    243 		bitshift--;
    244 		if (bitmask == 0) {
    245 			bitmask = 0x80;
    246 			bitshift = 7;
    247 			array_index++;
    248 		}
    249 		if ((remainder & 0x100) == 0x100)
    250 			remainder ^= 0xd5;
    251 	}
    252 
    253 	number_of_bits = 8;
    254 	while (number_of_bits != 0) {
    255 		number_of_bits--;
    256 		remainder <<= 1;
    257 		if ((remainder & 0x100) != 0)
    258 			remainder ^= 0xd5;
    259 	}
    260 
    261 	return remainder & 0xff;
    262 }
    263 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
    264 {
    265 	u8 size = 3;
    266 	size += (hdr->lct / 2);
    267 	return size;
    268 }
    269 
    270 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    271 					   u8 *buf, int *len)
    272 {
    273 	int idx = 0;
    274 	int i;
    275 	u8 crc4;
    276 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
    277 	for (i = 0; i < (hdr->lct / 2); i++)
    278 		buf[idx++] = hdr->rad[i];
    279 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
    280 		(hdr->msg_len & 0x3f);
    281 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
    282 
    283 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
    284 	buf[idx - 1] |= (crc4 & 0xf);
    285 
    286 	*len = idx;
    287 }
    288 
    289 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    290 					   u8 *buf, int buflen, u8 *hdrlen)
    291 {
    292 	u8 crc4;
    293 	u8 len;
    294 	int i;
    295 	u8 idx;
    296 	if (buf[0] == 0)
    297 		return false;
    298 	len = 3;
    299 	len += ((buf[0] & 0xf0) >> 4) / 2;
    300 	if (len > buflen)
    301 		return false;
    302 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
    303 
    304 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
    305 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
    306 		return false;
    307 	}
    308 
    309 	hdr->lct = (buf[0] & 0xf0) >> 4;
    310 	hdr->lcr = (buf[0] & 0xf);
    311 	idx = 1;
    312 	for (i = 0; i < (hdr->lct / 2); i++)
    313 		hdr->rad[i] = buf[idx++];
    314 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
    315 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
    316 	hdr->msg_len = buf[idx] & 0x3f;
    317 	idx++;
    318 	hdr->somt = (buf[idx] >> 7) & 0x1;
    319 	hdr->eomt = (buf[idx] >> 6) & 0x1;
    320 	hdr->seqno = (buf[idx] >> 4) & 0x1;
    321 	idx++;
    322 	*hdrlen = idx;
    323 	return true;
    324 }
    325 
    326 void
    327 drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req,
    328 			   struct drm_dp_sideband_msg_tx *raw)
    329 {
    330 	int idx = 0;
    331 	int i;
    332 	u8 *buf = raw->msg;
    333 	buf[idx++] = req->req_type & 0x7f;
    334 
    335 	switch (req->req_type) {
    336 	case DP_ENUM_PATH_RESOURCES:
    337 	case DP_POWER_DOWN_PHY:
    338 	case DP_POWER_UP_PHY:
    339 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
    340 		idx++;
    341 		break;
    342 	case DP_ALLOCATE_PAYLOAD:
    343 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
    344 			(req->u.allocate_payload.number_sdp_streams & 0xf);
    345 		idx++;
    346 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
    347 		idx++;
    348 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
    349 		idx++;
    350 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
    351 		idx++;
    352 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
    353 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
    354 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
    355 			idx++;
    356 		}
    357 		if (req->u.allocate_payload.number_sdp_streams & 1) {
    358 			i = req->u.allocate_payload.number_sdp_streams - 1;
    359 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
    360 			idx++;
    361 		}
    362 		break;
    363 	case DP_QUERY_PAYLOAD:
    364 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
    365 		idx++;
    366 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
    367 		idx++;
    368 		break;
    369 	case DP_REMOTE_DPCD_READ:
    370 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
    371 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
    372 		idx++;
    373 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
    374 		idx++;
    375 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
    376 		idx++;
    377 		buf[idx] = (req->u.dpcd_read.num_bytes);
    378 		idx++;
    379 		break;
    380 
    381 	case DP_REMOTE_DPCD_WRITE:
    382 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
    383 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
    384 		idx++;
    385 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
    386 		idx++;
    387 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
    388 		idx++;
    389 		buf[idx] = (req->u.dpcd_write.num_bytes);
    390 		idx++;
    391 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
    392 		idx += req->u.dpcd_write.num_bytes;
    393 		break;
    394 	case DP_REMOTE_I2C_READ:
    395 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
    396 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
    397 		idx++;
    398 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
    399 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
    400 			idx++;
    401 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
    402 			idx++;
    403 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
    404 			idx += req->u.i2c_read.transactions[i].num_bytes;
    405 
    406 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4;
    407 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
    408 			idx++;
    409 		}
    410 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
    411 		idx++;
    412 		buf[idx] = (req->u.i2c_read.num_bytes_read);
    413 		idx++;
    414 		break;
    415 
    416 	case DP_REMOTE_I2C_WRITE:
    417 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
    418 		idx++;
    419 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
    420 		idx++;
    421 		buf[idx] = (req->u.i2c_write.num_bytes);
    422 		idx++;
    423 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
    424 		idx += req->u.i2c_write.num_bytes;
    425 		break;
    426 	}
    427 	raw->cur_len = idx;
    428 }
    429 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req);
    430 
    431 /* Decode a sideband request we've encoded, mainly used for debugging */
    432 int
    433 drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw,
    434 			   struct drm_dp_sideband_msg_req_body *req)
    435 {
    436 	const u8 *buf = raw->msg;
    437 	int i, idx = 0;
    438 
    439 	req->req_type = buf[idx++] & 0x7f;
    440 	switch (req->req_type) {
    441 	case DP_ENUM_PATH_RESOURCES:
    442 	case DP_POWER_DOWN_PHY:
    443 	case DP_POWER_UP_PHY:
    444 		req->u.port_num.port_number = (buf[idx] >> 4) & 0xf;
    445 		break;
    446 	case DP_ALLOCATE_PAYLOAD:
    447 		{
    448 			struct drm_dp_allocate_payload *a =
    449 				&req->u.allocate_payload;
    450 
    451 			a->number_sdp_streams = buf[idx] & 0xf;
    452 			a->port_number = (buf[idx] >> 4) & 0xf;
    453 
    454 			WARN_ON(buf[++idx] & 0x80);
    455 			a->vcpi = buf[idx] & 0x7f;
    456 
    457 			a->pbn = buf[++idx] << 8;
    458 			a->pbn |= buf[++idx];
    459 
    460 			idx++;
    461 			for (i = 0; i < a->number_sdp_streams; i++) {
    462 				a->sdp_stream_sink[i] =
    463 					(buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf;
    464 			}
    465 		}
    466 		break;
    467 	case DP_QUERY_PAYLOAD:
    468 		req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf;
    469 		WARN_ON(buf[++idx] & 0x80);
    470 		req->u.query_payload.vcpi = buf[idx] & 0x7f;
    471 		break;
    472 	case DP_REMOTE_DPCD_READ:
    473 		{
    474 			struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read;
    475 
    476 			r->port_number = (buf[idx] >> 4) & 0xf;
    477 
    478 			r->dpcd_address = (buf[idx] << 16) & 0xf0000;
    479 			r->dpcd_address |= (buf[++idx] << 8) & 0xff00;
    480 			r->dpcd_address |= buf[++idx] & 0xff;
    481 
    482 			r->num_bytes = buf[++idx];
    483 		}
    484 		break;
    485 	case DP_REMOTE_DPCD_WRITE:
    486 		{
    487 			struct drm_dp_remote_dpcd_write *w =
    488 				&req->u.dpcd_write;
    489 
    490 			w->port_number = (buf[idx] >> 4) & 0xf;
    491 
    492 			w->dpcd_address = (buf[idx] << 16) & 0xf0000;
    493 			w->dpcd_address |= (buf[++idx] << 8) & 0xff00;
    494 			w->dpcd_address |= buf[++idx] & 0xff;
    495 
    496 			w->num_bytes = buf[++idx];
    497 
    498 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
    499 					   GFP_KERNEL);
    500 			if (!w->bytes)
    501 				return -ENOMEM;
    502 		}
    503 		break;
    504 	case DP_REMOTE_I2C_READ:
    505 		{
    506 			struct drm_dp_remote_i2c_read *r = &req->u.i2c_read;
    507 			struct drm_dp_remote_i2c_read_tx *tx;
    508 			bool failed = false;
    509 
    510 			r->num_transactions = buf[idx] & 0x3;
    511 			r->port_number = (buf[idx] >> 4) & 0xf;
    512 			for (i = 0; i < r->num_transactions; i++) {
    513 				tx = &r->transactions[i];
    514 
    515 				tx->i2c_dev_id = buf[++idx] & 0x7f;
    516 				tx->num_bytes = buf[++idx];
    517 				tx->bytes = kmemdup(&buf[++idx],
    518 						    tx->num_bytes,
    519 						    GFP_KERNEL);
    520 				if (!tx->bytes) {
    521 					failed = true;
    522 					break;
    523 				}
    524 				idx += tx->num_bytes;
    525 				tx->no_stop_bit = (buf[idx] >> 5) & 0x1;
    526 				tx->i2c_transaction_delay = buf[idx] & 0xf;
    527 			}
    528 
    529 			if (failed) {
    530 				for (i = 0; i < r->num_transactions; i++) {
    531 					tx = &r->transactions[i];
    532 					kfree(tx->bytes);
    533 				}
    534 				return -ENOMEM;
    535 			}
    536 
    537 			r->read_i2c_device_id = buf[++idx] & 0x7f;
    538 			r->num_bytes_read = buf[++idx];
    539 		}
    540 		break;
    541 	case DP_REMOTE_I2C_WRITE:
    542 		{
    543 			struct drm_dp_remote_i2c_write *w = &req->u.i2c_write;
    544 
    545 			w->port_number = (buf[idx] >> 4) & 0xf;
    546 			w->write_i2c_device_id = buf[++idx] & 0x7f;
    547 			w->num_bytes = buf[++idx];
    548 			w->bytes = kmemdup(&buf[++idx], w->num_bytes,
    549 					   GFP_KERNEL);
    550 			if (!w->bytes)
    551 				return -ENOMEM;
    552 		}
    553 		break;
    554 	}
    555 
    556 	return 0;
    557 }
    558 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req);
    559 
    560 void
    561 drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req,
    562 				  int indent, struct drm_printer *printer)
    563 {
    564 	int i;
    565 
    566 #define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__)
    567 	if (req->req_type == DP_LINK_ADDRESS) {
    568 		/* No contents to print */
    569 		P("type=%s\n", drm_dp_mst_req_type_str(req->req_type));
    570 		return;
    571 	}
    572 
    573 	P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type));
    574 	indent++;
    575 
    576 	switch (req->req_type) {
    577 	case DP_ENUM_PATH_RESOURCES:
    578 	case DP_POWER_DOWN_PHY:
    579 	case DP_POWER_UP_PHY:
    580 		P("port=%d\n", req->u.port_num.port_number);
    581 		break;
    582 	case DP_ALLOCATE_PAYLOAD:
    583 		P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n",
    584 		  req->u.allocate_payload.port_number,
    585 		  req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn,
    586 		  req->u.allocate_payload.number_sdp_streams,
    587 		  req->u.allocate_payload.number_sdp_streams,
    588 		  req->u.allocate_payload.sdp_stream_sink);
    589 		break;
    590 	case DP_QUERY_PAYLOAD:
    591 		P("port=%d vcpi=%d\n",
    592 		  req->u.query_payload.port_number,
    593 		  req->u.query_payload.vcpi);
    594 		break;
    595 	case DP_REMOTE_DPCD_READ:
    596 		P("port=%d dpcd_addr=%05x len=%d\n",
    597 		  req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address,
    598 		  req->u.dpcd_read.num_bytes);
    599 		break;
    600 	case DP_REMOTE_DPCD_WRITE:
    601 		P("port=%d addr=%05x len=%d: %*ph\n",
    602 		  req->u.dpcd_write.port_number,
    603 		  req->u.dpcd_write.dpcd_address,
    604 		  req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes,
    605 		  req->u.dpcd_write.bytes);
    606 		break;
    607 	case DP_REMOTE_I2C_READ:
    608 		P("port=%d num_tx=%d id=%d size=%d:\n",
    609 		  req->u.i2c_read.port_number,
    610 		  req->u.i2c_read.num_transactions,
    611 		  req->u.i2c_read.read_i2c_device_id,
    612 		  req->u.i2c_read.num_bytes_read);
    613 
    614 		indent++;
    615 		for (i = 0; i < req->u.i2c_read.num_transactions; i++) {
    616 			const struct drm_dp_remote_i2c_read_tx *rtx =
    617 				&req->u.i2c_read.transactions[i];
    618 
    619 			P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n",
    620 			  i, rtx->i2c_dev_id, rtx->num_bytes,
    621 			  rtx->no_stop_bit, rtx->i2c_transaction_delay,
    622 			  rtx->num_bytes, rtx->bytes);
    623 		}
    624 		break;
    625 	case DP_REMOTE_I2C_WRITE:
    626 		P("port=%d id=%d size=%d: %*ph\n",
    627 		  req->u.i2c_write.port_number,
    628 		  req->u.i2c_write.write_i2c_device_id,
    629 		  req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes,
    630 		  req->u.i2c_write.bytes);
    631 		break;
    632 	default:
    633 		P("???\n");
    634 		break;
    635 	}
    636 #undef P
    637 }
    638 EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body);
    639 
    640 static inline void
    641 drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p,
    642 				const struct drm_dp_sideband_msg_tx *txmsg)
    643 {
    644 	struct drm_dp_sideband_msg_req_body req;
    645 	char buf[64];
    646 	int ret;
    647 	int i;
    648 
    649 	drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf,
    650 			      sizeof(buf));
    651 	drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n",
    652 		   txmsg->cur_offset, txmsg->cur_len, txmsg->seqno,
    653 		   drm_dp_mst_sideband_tx_state_str(txmsg->state),
    654 		   txmsg->path_msg, buf);
    655 
    656 	ret = drm_dp_decode_sideband_req(txmsg, &req);
    657 	if (ret) {
    658 		drm_printf(p, "<failed to decode sideband req: %d>\n", ret);
    659 		return;
    660 	}
    661 	drm_dp_dump_sideband_msg_req_body(&req, 1, p);
    662 
    663 	switch (req.req_type) {
    664 	case DP_REMOTE_DPCD_WRITE:
    665 		kfree(req.u.dpcd_write.bytes);
    666 		break;
    667 	case DP_REMOTE_I2C_READ:
    668 		for (i = 0; i < req.u.i2c_read.num_transactions; i++)
    669 			kfree(req.u.i2c_read.transactions[i].bytes);
    670 		break;
    671 	case DP_REMOTE_I2C_WRITE:
    672 		kfree(req.u.i2c_write.bytes);
    673 		break;
    674 	}
    675 }
    676 
    677 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
    678 {
    679 	u8 crc4;
    680 	crc4 = drm_dp_msg_data_crc4(msg, len);
    681 	msg[len] = crc4;
    682 }
    683 
    684 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
    685 					 struct drm_dp_sideband_msg_tx *raw)
    686 {
    687 	int idx = 0;
    688 	u8 *buf = raw->msg;
    689 
    690 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
    691 
    692 	raw->cur_len = idx;
    693 }
    694 
    695 /* this adds a chunk of msg to the builder to get the final msg */
    696 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
    697 				      u8 *replybuf, u8 replybuflen, bool hdr)
    698 {
    699 	int ret;
    700 	u8 crc4;
    701 
    702 	if (hdr) {
    703 		u8 hdrlen;
    704 		struct drm_dp_sideband_msg_hdr recv_hdr;
    705 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
    706 		if (ret == false) {
    707 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
    708 			return false;
    709 		}
    710 
    711 		/*
    712 		 * ignore out-of-order messages or messages that are part of a
    713 		 * failed transaction
    714 		 */
    715 		if (!recv_hdr.somt && !msg->have_somt)
    716 			return false;
    717 
    718 		/* get length contained in this portion */
    719 		msg->curchunk_len = recv_hdr.msg_len;
    720 		msg->curchunk_hdrlen = hdrlen;
    721 
    722 		/* we have already gotten an somt - don't bother parsing */
    723 		if (recv_hdr.somt && msg->have_somt)
    724 			return false;
    725 
    726 		if (recv_hdr.somt) {
    727 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
    728 			msg->have_somt = true;
    729 		}
    730 		if (recv_hdr.eomt)
    731 			msg->have_eomt = true;
    732 
    733 		/* copy the bytes for the remainder of this header chunk */
    734 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
    735 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
    736 	} else {
    737 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
    738 		msg->curchunk_idx += replybuflen;
    739 	}
    740 
    741 	if (msg->curchunk_idx >= msg->curchunk_len) {
    742 		/* do CRC */
    743 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
    744 		/* copy chunk into bigger msg */
    745 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
    746 		msg->curlen += msg->curchunk_len - 1;
    747 	}
    748 	return true;
    749 }
    750 
    751 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
    752 					       struct drm_dp_sideband_msg_reply_body *repmsg)
    753 {
    754 	int idx = 1;
    755 	int i;
    756 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
    757 	idx += 16;
    758 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
    759 	idx++;
    760 	if (idx > raw->curlen)
    761 		goto fail_len;
    762 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
    763 		if (raw->msg[idx] & 0x80)
    764 			repmsg->u.link_addr.ports[i].input_port = 1;
    765 
    766 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
    767 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
    768 
    769 		idx++;
    770 		if (idx > raw->curlen)
    771 			goto fail_len;
    772 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
    773 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
    774 		if (repmsg->u.link_addr.ports[i].input_port == 0)
    775 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    776 		idx++;
    777 		if (idx > raw->curlen)
    778 			goto fail_len;
    779 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
    780 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
    781 			idx++;
    782 			if (idx > raw->curlen)
    783 				goto fail_len;
    784 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
    785 			idx += 16;
    786 			if (idx > raw->curlen)
    787 				goto fail_len;
    788 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
    789 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
    790 			idx++;
    791 
    792 		}
    793 		if (idx > raw->curlen)
    794 			goto fail_len;
    795 	}
    796 
    797 	return true;
    798 fail_len:
    799 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    800 	return false;
    801 }
    802 
    803 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
    804 						   struct drm_dp_sideband_msg_reply_body *repmsg)
    805 {
    806 	int idx = 1;
    807 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
    808 	idx++;
    809 	if (idx > raw->curlen)
    810 		goto fail_len;
    811 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
    812 	idx++;
    813 	if (idx > raw->curlen)
    814 		goto fail_len;
    815 
    816 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
    817 	return true;
    818 fail_len:
    819 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    820 	return false;
    821 }
    822 
    823 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
    824 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    825 {
    826 	int idx = 1;
    827 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
    828 	idx++;
    829 	if (idx > raw->curlen)
    830 		goto fail_len;
    831 	return true;
    832 fail_len:
    833 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
    834 	return false;
    835 }
    836 
    837 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
    838 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    839 {
    840 	int idx = 1;
    841 
    842 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
    843 	idx++;
    844 	if (idx > raw->curlen)
    845 		goto fail_len;
    846 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
    847 	idx++;
    848 	/* TODO check */
    849 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
    850 	return true;
    851 fail_len:
    852 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
    853 	return false;
    854 }
    855 
    856 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
    857 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    858 {
    859 	int idx = 1;
    860 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
    861 	repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1;
    862 	idx++;
    863 	if (idx > raw->curlen)
    864 		goto fail_len;
    865 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    866 	idx += 2;
    867 	if (idx > raw->curlen)
    868 		goto fail_len;
    869 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    870 	idx += 2;
    871 	if (idx > raw->curlen)
    872 		goto fail_len;
    873 	return true;
    874 fail_len:
    875 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
    876 	return false;
    877 }
    878 
    879 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    880 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    881 {
    882 	int idx = 1;
    883 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    884 	idx++;
    885 	if (idx > raw->curlen)
    886 		goto fail_len;
    887 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
    888 	idx++;
    889 	if (idx > raw->curlen)
    890 		goto fail_len;
    891 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    892 	idx += 2;
    893 	if (idx > raw->curlen)
    894 		goto fail_len;
    895 	return true;
    896 fail_len:
    897 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
    898 	return false;
    899 }
    900 
    901 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    902 						    struct drm_dp_sideband_msg_reply_body *repmsg)
    903 {
    904 	int idx = 1;
    905 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    906 	idx++;
    907 	if (idx > raw->curlen)
    908 		goto fail_len;
    909 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    910 	idx += 2;
    911 	if (idx > raw->curlen)
    912 		goto fail_len;
    913 	return true;
    914 fail_len:
    915 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
    916 	return false;
    917 }
    918 
    919 static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
    920 						       struct drm_dp_sideband_msg_reply_body *repmsg)
    921 {
    922 	int idx = 1;
    923 
    924 	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
    925 	idx++;
    926 	if (idx > raw->curlen) {
    927 		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
    928 			      idx, raw->curlen);
    929 		return false;
    930 	}
    931 	return true;
    932 }
    933 
    934 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
    935 					struct drm_dp_sideband_msg_reply_body *msg)
    936 {
    937 	memset(msg, 0, sizeof(*msg));
    938 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
    939 	msg->req_type = (raw->msg[0] & 0x7f);
    940 
    941 	if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
    942 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
    943 		msg->u.nak.reason = raw->msg[17];
    944 		msg->u.nak.nak_data = raw->msg[18];
    945 		return false;
    946 	}
    947 
    948 	switch (msg->req_type) {
    949 	case DP_LINK_ADDRESS:
    950 		return drm_dp_sideband_parse_link_address(raw, msg);
    951 	case DP_QUERY_PAYLOAD:
    952 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
    953 	case DP_REMOTE_DPCD_READ:
    954 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
    955 	case DP_REMOTE_DPCD_WRITE:
    956 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
    957 	case DP_REMOTE_I2C_READ:
    958 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
    959 	case DP_ENUM_PATH_RESOURCES:
    960 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
    961 	case DP_ALLOCATE_PAYLOAD:
    962 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
    963 	case DP_POWER_DOWN_PHY:
    964 	case DP_POWER_UP_PHY:
    965 		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
    966 	case DP_CLEAR_PAYLOAD_ID_TABLE:
    967 		return true; /* since there's nothing to parse */
    968 	default:
    969 		DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
    970 			  drm_dp_mst_req_type_str(msg->req_type));
    971 		return false;
    972 	}
    973 }
    974 
    975 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
    976 							   struct drm_dp_sideband_msg_req_body *msg)
    977 {
    978 	int idx = 1;
    979 
    980 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    981 	idx++;
    982 	if (idx > raw->curlen)
    983 		goto fail_len;
    984 
    985 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
    986 	idx += 16;
    987 	if (idx > raw->curlen)
    988 		goto fail_len;
    989 
    990 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
    991 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    992 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
    993 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
    994 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
    995 	idx++;
    996 	return true;
    997 fail_len:
    998 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
    999 	return false;
   1000 }
   1001 
   1002 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
   1003 							   struct drm_dp_sideband_msg_req_body *msg)
   1004 {
   1005 	int idx = 1;
   1006 
   1007 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
   1008 	idx++;
   1009 	if (idx > raw->curlen)
   1010 		goto fail_len;
   1011 
   1012 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
   1013 	idx += 16;
   1014 	if (idx > raw->curlen)
   1015 		goto fail_len;
   1016 
   1017 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
   1018 	idx++;
   1019 	return true;
   1020 fail_len:
   1021 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
   1022 	return false;
   1023 }
   1024 
   1025 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
   1026 				      struct drm_dp_sideband_msg_req_body *msg)
   1027 {
   1028 	memset(msg, 0, sizeof(*msg));
   1029 	msg->req_type = (raw->msg[0] & 0x7f);
   1030 
   1031 	switch (msg->req_type) {
   1032 	case DP_CONNECTION_STATUS_NOTIFY:
   1033 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
   1034 	case DP_RESOURCE_STATUS_NOTIFY:
   1035 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
   1036 	default:
   1037 		DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
   1038 			  drm_dp_mst_req_type_str(msg->req_type));
   1039 		return false;
   1040 	}
   1041 }
   1042 
   1043 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
   1044 {
   1045 	struct drm_dp_sideband_msg_req_body req;
   1046 
   1047 	req.req_type = DP_REMOTE_DPCD_WRITE;
   1048 	req.u.dpcd_write.port_number = port_num;
   1049 	req.u.dpcd_write.dpcd_address = offset;
   1050 	req.u.dpcd_write.num_bytes = num_bytes;
   1051 	req.u.dpcd_write.bytes = bytes;
   1052 	drm_dp_encode_sideband_req(&req, msg);
   1053 
   1054 	return 0;
   1055 }
   1056 
   1057 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
   1058 {
   1059 	struct drm_dp_sideband_msg_req_body req;
   1060 
   1061 	req.req_type = DP_LINK_ADDRESS;
   1062 	drm_dp_encode_sideband_req(&req, msg);
   1063 	return 0;
   1064 }
   1065 
   1066 static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
   1067 {
   1068 	struct drm_dp_sideband_msg_req_body req;
   1069 
   1070 	req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
   1071 	drm_dp_encode_sideband_req(&req, msg);
   1072 	return 0;
   1073 }
   1074 
   1075 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
   1076 {
   1077 	struct drm_dp_sideband_msg_req_body req;
   1078 
   1079 	req.req_type = DP_ENUM_PATH_RESOURCES;
   1080 	req.u.port_num.port_number = port_num;
   1081 	drm_dp_encode_sideband_req(&req, msg);
   1082 	msg->path_msg = true;
   1083 	return 0;
   1084 }
   1085 
   1086 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
   1087 				  u8 vcpi, uint16_t pbn,
   1088 				  u8 number_sdp_streams,
   1089 				  u8 *sdp_stream_sink)
   1090 {
   1091 	struct drm_dp_sideband_msg_req_body req;
   1092 	memset(&req, 0, sizeof(req));
   1093 	req.req_type = DP_ALLOCATE_PAYLOAD;
   1094 	req.u.allocate_payload.port_number = port_num;
   1095 	req.u.allocate_payload.vcpi = vcpi;
   1096 	req.u.allocate_payload.pbn = pbn;
   1097 	req.u.allocate_payload.number_sdp_streams = number_sdp_streams;
   1098 	memcpy(req.u.allocate_payload.sdp_stream_sink, sdp_stream_sink,
   1099 		   number_sdp_streams);
   1100 	drm_dp_encode_sideband_req(&req, msg);
   1101 	msg->path_msg = true;
   1102 	return 0;
   1103 }
   1104 
   1105 static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
   1106 				  int port_num, bool power_up)
   1107 {
   1108 	struct drm_dp_sideband_msg_req_body req;
   1109 
   1110 	if (power_up)
   1111 		req.req_type = DP_POWER_UP_PHY;
   1112 	else
   1113 		req.req_type = DP_POWER_DOWN_PHY;
   1114 
   1115 	req.u.port_num.port_number = port_num;
   1116 	drm_dp_encode_sideband_req(&req, msg);
   1117 	msg->path_msg = true;
   1118 	return 0;
   1119 }
   1120 
   1121 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
   1122 					struct drm_dp_vcpi *vcpi)
   1123 {
   1124 	int ret, vcpi_ret;
   1125 
   1126 	mutex_lock(&mgr->payload_lock);
   1127 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
   1128 	if (ret > mgr->max_payloads) {
   1129 		ret = -EINVAL;
   1130 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
   1131 		goto out_unlock;
   1132 	}
   1133 
   1134 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
   1135 	if (vcpi_ret > mgr->max_payloads) {
   1136 		ret = -EINVAL;
   1137 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
   1138 		goto out_unlock;
   1139 	}
   1140 
   1141 	set_bit(ret, &mgr->payload_mask);
   1142 	set_bit(vcpi_ret, &mgr->vcpi_mask);
   1143 	vcpi->vcpi = vcpi_ret + 1;
   1144 	mgr->proposed_vcpis[ret - 1] = vcpi;
   1145 out_unlock:
   1146 	mutex_unlock(&mgr->payload_lock);
   1147 	return ret;
   1148 }
   1149 
   1150 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
   1151 				      int vcpi)
   1152 {
   1153 	int i;
   1154 	if (vcpi == 0)
   1155 		return;
   1156 
   1157 	mutex_lock(&mgr->payload_lock);
   1158 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
   1159 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
   1160 
   1161 	for (i = 0; i < mgr->max_payloads; i++) {
   1162 		if (mgr->proposed_vcpis[i] &&
   1163 		    mgr->proposed_vcpis[i]->vcpi == vcpi) {
   1164 			mgr->proposed_vcpis[i] = NULL;
   1165 			clear_bit(i + 1, &mgr->payload_mask);
   1166 		}
   1167 	}
   1168 	mutex_unlock(&mgr->payload_lock);
   1169 }
   1170 
   1171 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
   1172 			      struct drm_dp_sideband_msg_tx *txmsg)
   1173 {
   1174 	unsigned int state;
   1175 
   1176 	/*
   1177 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
   1178 	 * cases we check here are terminal states. For those the barriers
   1179 	 * provided by the wake_up/wait_event pair are enough.
   1180 	 */
   1181 	state = READ_ONCE(txmsg->state);
   1182 	return (state == DRM_DP_SIDEBAND_TX_RX ||
   1183 		state == DRM_DP_SIDEBAND_TX_TIMEOUT);
   1184 }
   1185 
   1186 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
   1187 				    struct drm_dp_sideband_msg_tx *txmsg)
   1188 {
   1189 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   1190 	int ret;
   1191 
   1192 	ret = wait_event_timeout(mgr->tx_waitq,
   1193 				 check_txmsg_state(mgr, txmsg),
   1194 				 (4 * HZ));
   1195 	mutex_lock(&mstb->mgr->qlock);
   1196 	if (ret > 0) {
   1197 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
   1198 			ret = -EIO;
   1199 			goto out;
   1200 		}
   1201 	} else {
   1202 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
   1203 
   1204 		/* dump some state */
   1205 		ret = -EIO;
   1206 
   1207 		/* remove from q */
   1208 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
   1209 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
   1210 			list_del(&txmsg->next);
   1211 		}
   1212 
   1213 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
   1214 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
   1215 			mstb->tx_slots[txmsg->seqno] = NULL;
   1216 		}
   1217 		mgr->is_waiting_for_dwn_reply = false;
   1218 
   1219 	}
   1220 out:
   1221 	if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
   1222 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   1223 
   1224 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   1225 	}
   1226 	mutex_unlock(&mgr->qlock);
   1227 
   1228 	drm_dp_mst_kick_tx(mgr);
   1229 	return ret;
   1230 }
   1231 
   1232 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
   1233 {
   1234 	struct drm_dp_mst_branch *mstb;
   1235 
   1236 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
   1237 	if (!mstb)
   1238 		return NULL;
   1239 
   1240 	mstb->lct = lct;
   1241 	if (lct > 1)
   1242 		memcpy(mstb->rad, rad, lct / 2);
   1243 	INIT_LIST_HEAD(&mstb->ports);
   1244 	kref_init(&mstb->topology_kref);
   1245 	kref_init(&mstb->malloc_kref);
   1246 	return mstb;
   1247 }
   1248 
   1249 static void drm_dp_free_mst_branch_device(struct kref *kref)
   1250 {
   1251 	struct drm_dp_mst_branch *mstb =
   1252 		container_of(kref, struct drm_dp_mst_branch, malloc_kref);
   1253 
   1254 	if (mstb->port_parent)
   1255 		drm_dp_mst_put_port_malloc(mstb->port_parent);
   1256 
   1257 	kfree(mstb);
   1258 }
   1259 
   1260 /**
   1261  * DOC: Branch device and port refcounting
   1262  *
   1263  * Topology refcount overview
   1264  * ~~~~~~~~~~~~~~~~~~~~~~~~~~
   1265  *
   1266  * The refcounting schemes for &struct drm_dp_mst_branch and &struct
   1267  * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
   1268  * two different kinds of refcounts: topology refcounts, and malloc refcounts.
   1269  *
   1270  * Topology refcounts are not exposed to drivers, and are handled internally
   1271  * by the DP MST helpers. The helpers use them in order to prevent the
   1272  * in-memory topology state from being changed in the middle of critical
   1273  * operations like changing the internal state of payload allocations. This
   1274  * means each branch and port will be considered to be connected to the rest
   1275  * of the topology until its topology refcount reaches zero. Additionally,
   1276  * for ports this means that their associated &struct drm_connector will stay
   1277  * registered with userspace until the port's refcount reaches 0.
   1278  *
   1279  * Malloc refcount overview
   1280  * ~~~~~~~~~~~~~~~~~~~~~~~~
   1281  *
   1282  * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
   1283  * drm_dp_mst_branch allocated even after all of its topology references have
   1284  * been dropped, so that the driver or MST helpers can safely access each
   1285  * branch's last known state before it was disconnected from the topology.
   1286  * When the malloc refcount of a port or branch reaches 0, the memory
   1287  * allocation containing the &struct drm_dp_mst_branch or &struct
   1288  * drm_dp_mst_port respectively will be freed.
   1289  *
   1290  * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
   1291  * to drivers. As of writing this documentation, there are no drivers that
   1292  * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
   1293  * helpers. Exposing this API to drivers in a race-free manner would take more
   1294  * tweaking of the refcounting scheme, however patches are welcome provided
   1295  * there is a legitimate driver usecase for this.
   1296  *
   1297  * Refcount relationships in a topology
   1298  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   1299  *
   1300  * Let's take a look at why the relationship between topology and malloc
   1301  * refcounts is designed the way it is.
   1302  *
   1303  * .. kernel-figure:: dp-mst/topology-figure-1.dot
   1304  *
   1305  *    An example of topology and malloc refs in a DP MST topology with two
   1306  *    active payloads. Topology refcount increments are indicated by solid
   1307  *    lines, and malloc refcount increments are indicated by dashed lines.
   1308  *    Each starts from the branch which incremented the refcount, and ends at
   1309  *    the branch to which the refcount belongs to, i.e. the arrow points the
   1310  *    same way as the C pointers used to reference a structure.
   1311  *
   1312  * As you can see in the above figure, every branch increments the topology
   1313  * refcount of its children, and increments the malloc refcount of its
   1314  * parent. Additionally, every payload increments the malloc refcount of its
   1315  * assigned port by 1.
   1316  *
   1317  * So, what would happen if MSTB #3 from the above figure was unplugged from
   1318  * the system, but the driver hadn't yet removed payload #2 from port #3? The
   1319  * topology would start to look like the figure below.
   1320  *
   1321  * .. kernel-figure:: dp-mst/topology-figure-2.dot
   1322  *
   1323  *    Ports and branch devices which have been released from memory are
   1324  *    colored grey, and references which have been removed are colored red.
   1325  *
   1326  * Whenever a port or branch device's topology refcount reaches zero, it will
   1327  * decrement the topology refcounts of all its children, the malloc refcount
   1328  * of its parent, and finally its own malloc refcount. For MSTB #4 and port
   1329  * #4, this means they both have been disconnected from the topology and freed
   1330  * from memory. But, because payload #2 is still holding a reference to port
   1331  * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
   1332  * is still accessible from memory. This also means port #3 has not yet
   1333  * decremented the malloc refcount of MSTB #3, so its &struct
   1334  * drm_dp_mst_branch will also stay allocated in memory until port #3's
   1335  * malloc refcount reaches 0.
   1336  *
   1337  * This relationship is necessary because in order to release payload #2, we
   1338  * need to be able to figure out the last relative of port #3 that's still
   1339  * connected to the topology. In this case, we would travel up the topology as
   1340  * shown below.
   1341  *
   1342  * .. kernel-figure:: dp-mst/topology-figure-3.dot
   1343  *
   1344  * And finally, remove payload #2 by communicating with port #2 through
   1345  * sideband transactions.
   1346  */
   1347 
   1348 /**
   1349  * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
   1350  * device
   1351  * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
   1352  *
   1353  * Increments &drm_dp_mst_branch.malloc_kref. When
   1354  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   1355  * will be released and @mstb may no longer be used.
   1356  *
   1357  * See also: drm_dp_mst_put_mstb_malloc()
   1358  */
   1359 static void
   1360 drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
   1361 {
   1362 	kref_get(&mstb->malloc_kref);
   1363 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
   1364 }
   1365 
   1366 /**
   1367  * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
   1368  * device
   1369  * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
   1370  *
   1371  * Decrements &drm_dp_mst_branch.malloc_kref. When
   1372  * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
   1373  * will be released and @mstb may no longer be used.
   1374  *
   1375  * See also: drm_dp_mst_get_mstb_malloc()
   1376  */
   1377 static void
   1378 drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
   1379 {
   1380 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
   1381 	kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
   1382 }
   1383 
   1384 static void drm_dp_free_mst_port(struct kref *kref)
   1385 {
   1386 	struct drm_dp_mst_port *port =
   1387 		container_of(kref, struct drm_dp_mst_port, malloc_kref);
   1388 
   1389 	drm_dp_mst_put_mstb_malloc(port->parent);
   1390 	kfree(port);
   1391 }
   1392 
   1393 /**
   1394  * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
   1395  * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
   1396  *
   1397  * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   1398  * reaches 0, the memory allocation for @port will be released and @port may
   1399  * no longer be used.
   1400  *
   1401  * Because @port could potentially be freed at any time by the DP MST helpers
   1402  * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
   1403  * function, drivers that which to make use of &struct drm_dp_mst_port should
   1404  * ensure that they grab at least one main malloc reference to their MST ports
   1405  * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
   1406  * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
   1407  *
   1408  * See also: drm_dp_mst_put_port_malloc()
   1409  */
   1410 void
   1411 drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
   1412 {
   1413 	kref_get(&port->malloc_kref);
   1414 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
   1415 }
   1416 EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
   1417 
   1418 /**
   1419  * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
   1420  * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
   1421  *
   1422  * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
   1423  * reaches 0, the memory allocation for @port will be released and @port may
   1424  * no longer be used.
   1425  *
   1426  * See also: drm_dp_mst_get_port_malloc()
   1427  */
   1428 void
   1429 drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
   1430 {
   1431 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
   1432 	kref_put(&port->malloc_kref, drm_dp_free_mst_port);
   1433 }
   1434 EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
   1435 
   1436 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   1437 
   1438 #define STACK_DEPTH 8
   1439 
   1440 static noinline void
   1441 __topology_ref_save(struct drm_dp_mst_topology_mgr *mgr,
   1442 		    struct drm_dp_mst_topology_ref_history *history,
   1443 		    enum drm_dp_mst_topology_ref_type type)
   1444 {
   1445 	struct drm_dp_mst_topology_ref_entry *entry = NULL;
   1446 	depot_stack_handle_t backtrace;
   1447 	ulong stack_entries[STACK_DEPTH];
   1448 	uint n;
   1449 	int i;
   1450 
   1451 	n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1);
   1452 	backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL);
   1453 	if (!backtrace)
   1454 		return;
   1455 
   1456 	/* Try to find an existing entry for this backtrace */
   1457 	for (i = 0; i < history->len; i++) {
   1458 		if (history->entries[i].backtrace == backtrace) {
   1459 			entry = &history->entries[i];
   1460 			break;
   1461 		}
   1462 	}
   1463 
   1464 	/* Otherwise add one */
   1465 	if (!entry) {
   1466 		struct drm_dp_mst_topology_ref_entry *new;
   1467 		int new_len = history->len + 1;
   1468 
   1469 		new = krealloc(history->entries, sizeof(*new) * new_len,
   1470 			       GFP_KERNEL);
   1471 		if (!new)
   1472 			return;
   1473 
   1474 		entry = &new[history->len];
   1475 		history->len = new_len;
   1476 		history->entries = new;
   1477 
   1478 		entry->backtrace = backtrace;
   1479 		entry->type = type;
   1480 		entry->count = 0;
   1481 	}
   1482 	entry->count++;
   1483 	entry->ts_nsec = ktime_get_ns();
   1484 }
   1485 
   1486 static int
   1487 topology_ref_history_cmp(const void *a, const void *b)
   1488 {
   1489 	const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b;
   1490 
   1491 	if (entry_a->ts_nsec > entry_b->ts_nsec)
   1492 		return 1;
   1493 	else if (entry_a->ts_nsec < entry_b->ts_nsec)
   1494 		return -1;
   1495 	else
   1496 		return 0;
   1497 }
   1498 
   1499 static inline const char *
   1500 topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type)
   1501 {
   1502 	if (type == DRM_DP_MST_TOPOLOGY_REF_GET)
   1503 		return "get";
   1504 	else
   1505 		return "put";
   1506 }
   1507 
   1508 static void
   1509 __dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history,
   1510 			    void *ptr, const char *type_str)
   1511 {
   1512 	struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   1513 	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
   1514 	int i;
   1515 
   1516 	if (!buf)
   1517 		return;
   1518 
   1519 	if (!history->len)
   1520 		goto out;
   1521 
   1522 	/* First, sort the list so that it goes from oldest to newest
   1523 	 * reference entry
   1524 	 */
   1525 	sort(history->entries, history->len, sizeof(*history->entries),
   1526 	     topology_ref_history_cmp, NULL);
   1527 
   1528 	drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n",
   1529 		   type_str, ptr);
   1530 
   1531 	for (i = 0; i < history->len; i++) {
   1532 		const struct drm_dp_mst_topology_ref_entry *entry =
   1533 			&history->entries[i];
   1534 		ulong *entries;
   1535 		uint nr_entries;
   1536 		u64 ts_nsec = entry->ts_nsec;
   1537 		u32 rem_nsec = do_div(ts_nsec, 1000000000);
   1538 
   1539 		nr_entries = stack_depot_fetch(entry->backtrace, &entries);
   1540 		stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4);
   1541 
   1542 		drm_printf(&p, "  %d %ss (last at %5llu.%06u):\n%s",
   1543 			   entry->count,
   1544 			   topology_ref_type_to_str(entry->type),
   1545 			   ts_nsec, rem_nsec / 1000, buf);
   1546 	}
   1547 
   1548 	/* Now free the history, since this is the only time we expose it */
   1549 	kfree(history->entries);
   1550 out:
   1551 	kfree(buf);
   1552 }
   1553 
   1554 static __always_inline void
   1555 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb)
   1556 {
   1557 	__dump_topology_ref_history(&mstb->topology_ref_history, mstb,
   1558 				    "MSTB");
   1559 }
   1560 
   1561 static __always_inline void
   1562 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port)
   1563 {
   1564 	__dump_topology_ref_history(&port->topology_ref_history, port,
   1565 				    "Port");
   1566 }
   1567 
   1568 static __always_inline void
   1569 save_mstb_topology_ref(struct drm_dp_mst_branch *mstb,
   1570 		       enum drm_dp_mst_topology_ref_type type)
   1571 {
   1572 	__topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type);
   1573 }
   1574 
   1575 static __always_inline void
   1576 save_port_topology_ref(struct drm_dp_mst_port *port,
   1577 		       enum drm_dp_mst_topology_ref_type type)
   1578 {
   1579 	__topology_ref_save(port->mgr, &port->topology_ref_history, type);
   1580 }
   1581 
   1582 static inline void
   1583 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr)
   1584 {
   1585 	mutex_lock(&mgr->topology_ref_history_lock);
   1586 }
   1587 
   1588 static inline void
   1589 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr)
   1590 {
   1591 	mutex_unlock(&mgr->topology_ref_history_lock);
   1592 }
   1593 #else
   1594 static inline void
   1595 topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {}
   1596 static inline void
   1597 topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {}
   1598 static inline void
   1599 drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {}
   1600 static inline void
   1601 drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {}
   1602 #define save_mstb_topology_ref(mstb, type)
   1603 #define save_port_topology_ref(port, type)
   1604 #endif
   1605 
   1606 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
   1607 {
   1608 	struct drm_dp_mst_branch *mstb =
   1609 		container_of(kref, struct drm_dp_mst_branch, topology_kref);
   1610 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   1611 
   1612 	drm_dp_mst_dump_mstb_topology_history(mstb);
   1613 
   1614 	INIT_LIST_HEAD(&mstb->destroy_next);
   1615 
   1616 	/*
   1617 	 * This can get called under mgr->mutex, so we need to perform the
   1618 	 * actual destruction of the mstb in another worker
   1619 	 */
   1620 	mutex_lock(&mgr->delayed_destroy_lock);
   1621 	list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
   1622 	mutex_unlock(&mgr->delayed_destroy_lock);
   1623 	schedule_work(&mgr->delayed_destroy_work);
   1624 }
   1625 
   1626 /**
   1627  * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
   1628  * branch device unless it's zero
   1629  * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
   1630  *
   1631  * Attempts to grab a topology reference to @mstb, if it hasn't yet been
   1632  * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
   1633  * reached 0). Holding a topology reference implies that a malloc reference
   1634  * will be held to @mstb as long as the user holds the topology reference.
   1635  *
   1636  * Care should be taken to ensure that the user has at least one malloc
   1637  * reference to @mstb. If you already have a topology reference to @mstb, you
   1638  * should use drm_dp_mst_topology_get_mstb() instead.
   1639  *
   1640  * See also:
   1641  * drm_dp_mst_topology_get_mstb()
   1642  * drm_dp_mst_topology_put_mstb()
   1643  *
   1644  * Returns:
   1645  * * 1: A topology reference was grabbed successfully
   1646  * * 0: @port is no longer in the topology, no reference was grabbed
   1647  */
   1648 static int __must_check
   1649 drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
   1650 {
   1651 	int ret;
   1652 
   1653 	topology_ref_history_lock(mstb->mgr);
   1654 	ret = kref_get_unless_zero(&mstb->topology_kref);
   1655 	if (ret) {
   1656 		DRM_DEBUG("mstb %p (%d)\n",
   1657 			  mstb, kref_read(&mstb->topology_kref));
   1658 		save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
   1659 	}
   1660 
   1661 	topology_ref_history_unlock(mstb->mgr);
   1662 
   1663 	return ret;
   1664 }
   1665 
   1666 /**
   1667  * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
   1668  * branch device
   1669  * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
   1670  *
   1671  * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
   1672  * not it's already reached 0. This is only valid to use in scenarios where
   1673  * you are already guaranteed to have at least one active topology reference
   1674  * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
   1675  *
   1676  * See also:
   1677  * drm_dp_mst_topology_try_get_mstb()
   1678  * drm_dp_mst_topology_put_mstb()
   1679  */
   1680 static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
   1681 {
   1682 	topology_ref_history_lock(mstb->mgr);
   1683 
   1684 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
   1685 	WARN_ON(kref_read(&mstb->topology_kref) == 0);
   1686 	kref_get(&mstb->topology_kref);
   1687 	DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
   1688 
   1689 	topology_ref_history_unlock(mstb->mgr);
   1690 }
   1691 
   1692 /**
   1693  * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
   1694  * device
   1695  * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
   1696  *
   1697  * Releases a topology reference from @mstb by decrementing
   1698  * &drm_dp_mst_branch.topology_kref.
   1699  *
   1700  * See also:
   1701  * drm_dp_mst_topology_try_get_mstb()
   1702  * drm_dp_mst_topology_get_mstb()
   1703  */
   1704 static void
   1705 drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
   1706 {
   1707 	topology_ref_history_lock(mstb->mgr);
   1708 
   1709 	DRM_DEBUG("mstb %p (%d)\n",
   1710 		  mstb, kref_read(&mstb->topology_kref) - 1);
   1711 	save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
   1712 
   1713 	topology_ref_history_unlock(mstb->mgr);
   1714 	kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
   1715 }
   1716 
   1717 static void drm_dp_destroy_port(struct kref *kref)
   1718 {
   1719 	struct drm_dp_mst_port *port =
   1720 		container_of(kref, struct drm_dp_mst_port, topology_kref);
   1721 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   1722 
   1723 	drm_dp_mst_dump_port_topology_history(port);
   1724 
   1725 	/* There's nothing that needs locking to destroy an input port yet */
   1726 	if (port->input) {
   1727 		drm_dp_mst_put_port_malloc(port);
   1728 		return;
   1729 	}
   1730 
   1731 	kfree(port->cached_edid);
   1732 
   1733 	/*
   1734 	 * we can't destroy the connector here, as we might be holding the
   1735 	 * mode_config.mutex from an EDID retrieval
   1736 	 */
   1737 	mutex_lock(&mgr->delayed_destroy_lock);
   1738 	list_add(&port->next, &mgr->destroy_port_list);
   1739 	mutex_unlock(&mgr->delayed_destroy_lock);
   1740 	schedule_work(&mgr->delayed_destroy_work);
   1741 }
   1742 
   1743 /**
   1744  * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
   1745  * port unless it's zero
   1746  * @port: &struct drm_dp_mst_port to increment the topology refcount of
   1747  *
   1748  * Attempts to grab a topology reference to @port, if it hasn't yet been
   1749  * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
   1750  * 0). Holding a topology reference implies that a malloc reference will be
   1751  * held to @port as long as the user holds the topology reference.
   1752  *
   1753  * Care should be taken to ensure that the user has at least one malloc
   1754  * reference to @port. If you already have a topology reference to @port, you
   1755  * should use drm_dp_mst_topology_get_port() instead.
   1756  *
   1757  * See also:
   1758  * drm_dp_mst_topology_get_port()
   1759  * drm_dp_mst_topology_put_port()
   1760  *
   1761  * Returns:
   1762  * * 1: A topology reference was grabbed successfully
   1763  * * 0: @port is no longer in the topology, no reference was grabbed
   1764  */
   1765 static int __must_check
   1766 drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
   1767 {
   1768 	int ret;
   1769 
   1770 	topology_ref_history_lock(port->mgr);
   1771 	ret = kref_get_unless_zero(&port->topology_kref);
   1772 	if (ret) {
   1773 		DRM_DEBUG("port %p (%d)\n",
   1774 			  port, kref_read(&port->topology_kref));
   1775 		save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
   1776 	}
   1777 
   1778 	topology_ref_history_unlock(port->mgr);
   1779 	return ret;
   1780 }
   1781 
   1782 /**
   1783  * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
   1784  * @port: The &struct drm_dp_mst_port to increment the topology refcount of
   1785  *
   1786  * Increments &drm_dp_mst_port.topology_refcount without checking whether or
   1787  * not it's already reached 0. This is only valid to use in scenarios where
   1788  * you are already guaranteed to have at least one active topology reference
   1789  * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
   1790  *
   1791  * See also:
   1792  * drm_dp_mst_topology_try_get_port()
   1793  * drm_dp_mst_topology_put_port()
   1794  */
   1795 static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
   1796 {
   1797 	topology_ref_history_lock(port->mgr);
   1798 
   1799 	WARN_ON(kref_read(&port->topology_kref) == 0);
   1800 	kref_get(&port->topology_kref);
   1801 	DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
   1802 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
   1803 
   1804 	topology_ref_history_unlock(port->mgr);
   1805 }
   1806 
   1807 /**
   1808  * drm_dp_mst_topology_put_port() - release a topology reference to a port
   1809  * @port: The &struct drm_dp_mst_port to release the topology reference from
   1810  *
   1811  * Releases a topology reference from @port by decrementing
   1812  * &drm_dp_mst_port.topology_kref.
   1813  *
   1814  * See also:
   1815  * drm_dp_mst_topology_try_get_port()
   1816  * drm_dp_mst_topology_get_port()
   1817  */
   1818 static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
   1819 {
   1820 	topology_ref_history_lock(port->mgr);
   1821 
   1822 	DRM_DEBUG("port %p (%d)\n",
   1823 		  port, kref_read(&port->topology_kref) - 1);
   1824 	save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
   1825 
   1826 	topology_ref_history_unlock(port->mgr);
   1827 	kref_put(&port->topology_kref, drm_dp_destroy_port);
   1828 }
   1829 
   1830 static struct drm_dp_mst_branch *
   1831 drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
   1832 					      struct drm_dp_mst_branch *to_find)
   1833 {
   1834 	struct drm_dp_mst_port *port;
   1835 	struct drm_dp_mst_branch *rmstb;
   1836 
   1837 	if (to_find == mstb)
   1838 		return mstb;
   1839 
   1840 	list_for_each_entry(port, &mstb->ports, next) {
   1841 		if (port->mstb) {
   1842 			rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
   1843 			    port->mstb, to_find);
   1844 			if (rmstb)
   1845 				return rmstb;
   1846 		}
   1847 	}
   1848 	return NULL;
   1849 }
   1850 
   1851 static struct drm_dp_mst_branch *
   1852 drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
   1853 				       struct drm_dp_mst_branch *mstb)
   1854 {
   1855 	struct drm_dp_mst_branch *rmstb = NULL;
   1856 
   1857 	mutex_lock(&mgr->lock);
   1858 	if (mgr->mst_primary) {
   1859 		rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
   1860 		    mgr->mst_primary, mstb);
   1861 
   1862 		if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
   1863 			rmstb = NULL;
   1864 	}
   1865 	mutex_unlock(&mgr->lock);
   1866 	return rmstb;
   1867 }
   1868 
   1869 static struct drm_dp_mst_port *
   1870 drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
   1871 					      struct drm_dp_mst_port *to_find)
   1872 {
   1873 	struct drm_dp_mst_port *port, *mport;
   1874 
   1875 	list_for_each_entry(port, &mstb->ports, next) {
   1876 		if (port == to_find)
   1877 			return port;
   1878 
   1879 		if (port->mstb) {
   1880 			mport = drm_dp_mst_topology_get_port_validated_locked(
   1881 			    port->mstb, to_find);
   1882 			if (mport)
   1883 				return mport;
   1884 		}
   1885 	}
   1886 	return NULL;
   1887 }
   1888 
   1889 static struct drm_dp_mst_port *
   1890 drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
   1891 				       struct drm_dp_mst_port *port)
   1892 {
   1893 	struct drm_dp_mst_port *rport = NULL;
   1894 
   1895 	mutex_lock(&mgr->lock);
   1896 	if (mgr->mst_primary) {
   1897 		rport = drm_dp_mst_topology_get_port_validated_locked(
   1898 		    mgr->mst_primary, port);
   1899 
   1900 		if (rport && !drm_dp_mst_topology_try_get_port(rport))
   1901 			rport = NULL;
   1902 	}
   1903 	mutex_unlock(&mgr->lock);
   1904 	return rport;
   1905 }
   1906 
   1907 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
   1908 {
   1909 	struct drm_dp_mst_port *port;
   1910 	int ret;
   1911 
   1912 	list_for_each_entry(port, &mstb->ports, next) {
   1913 		if (port->port_num == port_num) {
   1914 			ret = drm_dp_mst_topology_try_get_port(port);
   1915 			return ret ? port : NULL;
   1916 		}
   1917 	}
   1918 
   1919 	return NULL;
   1920 }
   1921 
   1922 /*
   1923  * calculate a new RAD for this MST branch device
   1924  * if parent has an LCT of 2 then it has 1 nibble of RAD,
   1925  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   1926  */
   1927 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
   1928 				 u8 *rad)
   1929 {
   1930 	int parent_lct = port->parent->lct;
   1931 	int shift = 4;
   1932 	int idx = (parent_lct - 1) / 2;
   1933 	if (parent_lct > 1) {
   1934 		memcpy(rad, port->parent->rad, idx + 1);
   1935 		shift = (parent_lct % 2) ? 4 : 0;
   1936 	} else
   1937 		rad[0] = 0;
   1938 
   1939 	rad[idx] |= port->port_num << shift;
   1940 	return parent_lct + 1;
   1941 }
   1942 
   1943 static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs)
   1944 {
   1945 	switch (pdt) {
   1946 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   1947 	case DP_PEER_DEVICE_SST_SINK:
   1948 		return true;
   1949 	case DP_PEER_DEVICE_MST_BRANCHING:
   1950 		/* For sst branch device */
   1951 		if (!mcs)
   1952 			return true;
   1953 
   1954 		return false;
   1955 	}
   1956 	return true;
   1957 }
   1958 
   1959 static int
   1960 drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
   1961 		    bool new_mcs)
   1962 {
   1963 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   1964 	struct drm_dp_mst_branch *mstb;
   1965 	u8 rad[8], lct;
   1966 	int ret = 0;
   1967 
   1968 	if (port->pdt == new_pdt && port->mcs == new_mcs)
   1969 		return 0;
   1970 
   1971 	/* Teardown the old pdt, if there is one */
   1972 	if (port->pdt != DP_PEER_DEVICE_NONE) {
   1973 		if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
   1974 			/*
   1975 			 * If the new PDT would also have an i2c bus,
   1976 			 * don't bother with reregistering it
   1977 			 */
   1978 			if (new_pdt != DP_PEER_DEVICE_NONE &&
   1979 			    drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) {
   1980 				port->pdt = new_pdt;
   1981 				port->mcs = new_mcs;
   1982 				return 0;
   1983 			}
   1984 
   1985 			/* remove i2c over sideband */
   1986 			drm_dp_mst_unregister_i2c_bus(&port->aux);
   1987 		} else {
   1988 			mutex_lock(&mgr->lock);
   1989 			drm_dp_mst_topology_put_mstb(port->mstb);
   1990 			port->mstb = NULL;
   1991 			mutex_unlock(&mgr->lock);
   1992 		}
   1993 	}
   1994 
   1995 	port->pdt = new_pdt;
   1996 	port->mcs = new_mcs;
   1997 
   1998 	if (port->pdt != DP_PEER_DEVICE_NONE) {
   1999 		if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
   2000 			/* add i2c over sideband */
   2001 			ret = drm_dp_mst_register_i2c_bus(&port->aux);
   2002 		} else {
   2003 			lct = drm_dp_calculate_rad(port, rad);
   2004 			mstb = drm_dp_add_mst_branch_device(lct, rad);
   2005 			if (!mstb) {
   2006 				ret = -ENOMEM;
   2007 				DRM_ERROR("Failed to create MSTB for port %p",
   2008 					  port);
   2009 				goto out;
   2010 			}
   2011 
   2012 			mutex_lock(&mgr->lock);
   2013 			port->mstb = mstb;
   2014 			mstb->mgr = port->mgr;
   2015 			mstb->port_parent = port;
   2016 
   2017 			/*
   2018 			 * Make sure this port's memory allocation stays
   2019 			 * around until its child MSTB releases it
   2020 			 */
   2021 			drm_dp_mst_get_port_malloc(port);
   2022 			mutex_unlock(&mgr->lock);
   2023 
   2024 			/* And make sure we send a link address for this */
   2025 			ret = 1;
   2026 		}
   2027 	}
   2028 
   2029 out:
   2030 	if (ret < 0)
   2031 		port->pdt = DP_PEER_DEVICE_NONE;
   2032 	return ret;
   2033 }
   2034 
   2035 /**
   2036  * drm_dp_mst_dpcd_read() - read a series of bytes from the DPCD via sideband
   2037  * @aux: Fake sideband AUX CH
   2038  * @offset: address of the (first) register to read
   2039  * @buffer: buffer to store the register values
   2040  * @size: number of bytes in @buffer
   2041  *
   2042  * Performs the same functionality for remote devices via
   2043  * sideband messaging as drm_dp_dpcd_read() does for local
   2044  * devices via actual AUX CH.
   2045  *
   2046  * Return: Number of bytes read, or negative error code on failure.
   2047  */
   2048 ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
   2049 			     unsigned int offset, void *buffer, size_t size)
   2050 {
   2051 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
   2052 						    aux);
   2053 
   2054 	return drm_dp_send_dpcd_read(port->mgr, port,
   2055 				     offset, size, buffer);
   2056 }
   2057 
   2058 /**
   2059  * drm_dp_mst_dpcd_write() - write a series of bytes to the DPCD via sideband
   2060  * @aux: Fake sideband AUX CH
   2061  * @offset: address of the (first) register to write
   2062  * @buffer: buffer containing the values to write
   2063  * @size: number of bytes in @buffer
   2064  *
   2065  * Performs the same functionality for remote devices via
   2066  * sideband messaging as drm_dp_dpcd_write() does for local
   2067  * devices via actual AUX CH.
   2068  *
   2069  * Return: 0 on success, negative error code on failure.
   2070  */
   2071 ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
   2072 			      unsigned int offset, void *buffer, size_t size)
   2073 {
   2074 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port,
   2075 						    aux);
   2076 
   2077 	return drm_dp_send_dpcd_write(port->mgr, port,
   2078 				      offset, size, buffer);
   2079 }
   2080 
   2081 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
   2082 {
   2083 	int ret;
   2084 
   2085 	memcpy(mstb->guid, guid, 16);
   2086 
   2087 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
   2088 		if (mstb->port_parent) {
   2089 			ret = drm_dp_send_dpcd_write(
   2090 					mstb->mgr,
   2091 					mstb->port_parent,
   2092 					DP_GUID,
   2093 					16,
   2094 					mstb->guid);
   2095 		} else {
   2096 
   2097 			ret = drm_dp_dpcd_write(
   2098 					mstb->mgr->aux,
   2099 					DP_GUID,
   2100 					mstb->guid,
   2101 					16);
   2102 		}
   2103 	}
   2104 }
   2105 
   2106 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
   2107 				int pnum,
   2108 				char *proppath,
   2109 				size_t proppath_size)
   2110 {
   2111 	int i;
   2112 	char temp[8];
   2113 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
   2114 	for (i = 0; i < (mstb->lct - 1); i++) {
   2115 		int shift = (i % 2) ? 0 : 4;
   2116 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
   2117 		snprintf(temp, sizeof(temp), "-%d", port_num);
   2118 		strlcat(proppath, temp, proppath_size);
   2119 	}
   2120 	snprintf(temp, sizeof(temp), "-%d", pnum);
   2121 	strlcat(proppath, temp, proppath_size);
   2122 }
   2123 
   2124 /**
   2125  * drm_dp_mst_connector_late_register() - Late MST connector registration
   2126  * @connector: The MST connector
   2127  * @port: The MST port for this connector
   2128  *
   2129  * Helper to register the remote aux device for this MST port. Drivers should
   2130  * call this from their mst connector's late_register hook to enable MST aux
   2131  * devices.
   2132  *
   2133  * Return: 0 on success, negative error code on failure.
   2134  */
   2135 int drm_dp_mst_connector_late_register(struct drm_connector *connector,
   2136 				       struct drm_dp_mst_port *port)
   2137 {
   2138 	DRM_DEBUG_KMS("registering %s remote bus for %s\n",
   2139 		      port->aux.name, connector->kdev->kobj.name);
   2140 
   2141 	port->aux.dev = connector->kdev;
   2142 	return drm_dp_aux_register_devnode(&port->aux);
   2143 }
   2144 EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
   2145 
   2146 /**
   2147  * drm_dp_mst_connector_early_unregister() - Early MST connector unregistration
   2148  * @connector: The MST connector
   2149  * @port: The MST port for this connector
   2150  *
   2151  * Helper to unregister the remote aux device for this MST port, registered by
   2152  * drm_dp_mst_connector_late_register(). Drivers should call this from their mst
   2153  * connector's early_unregister hook.
   2154  */
   2155 void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
   2156 					   struct drm_dp_mst_port *port)
   2157 {
   2158 	DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
   2159 		      port->aux.name, connector->kdev->kobj.name);
   2160 	drm_dp_aux_unregister_devnode(&port->aux);
   2161 }
   2162 EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
   2163 
   2164 static void
   2165 drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
   2166 			      struct drm_dp_mst_port *port)
   2167 {
   2168 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   2169 	char proppath[255];
   2170 	int ret;
   2171 
   2172 	build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
   2173 	port->connector = mgr->cbs->add_connector(mgr, port, proppath);
   2174 	if (!port->connector) {
   2175 		ret = -ENOMEM;
   2176 		goto error;
   2177 	}
   2178 
   2179 	if (port->pdt != DP_PEER_DEVICE_NONE &&
   2180 	    drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) {
   2181 		port->cached_edid = drm_get_edid(port->connector,
   2182 						 &port->aux.ddc);
   2183 		drm_connector_set_tile_property(port->connector);
   2184 	}
   2185 
   2186 	mgr->cbs->register_connector(port->connector);
   2187 	return;
   2188 
   2189 error:
   2190 	DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
   2191 }
   2192 
   2193 /*
   2194  * Drop a topology reference, and unlink the port from the in-memory topology
   2195  * layout
   2196  */
   2197 static void
   2198 drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr,
   2199 				struct drm_dp_mst_port *port)
   2200 {
   2201 	mutex_lock(&mgr->lock);
   2202 	port->parent->num_ports--;
   2203 	list_del(&port->next);
   2204 	mutex_unlock(&mgr->lock);
   2205 	drm_dp_mst_topology_put_port(port);
   2206 }
   2207 
   2208 static struct drm_dp_mst_port *
   2209 drm_dp_mst_add_port(struct drm_device *dev,
   2210 		    struct drm_dp_mst_topology_mgr *mgr,
   2211 		    struct drm_dp_mst_branch *mstb, u8 port_number)
   2212 {
   2213 	struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL);
   2214 
   2215 	if (!port)
   2216 		return NULL;
   2217 
   2218 	kref_init(&port->topology_kref);
   2219 	kref_init(&port->malloc_kref);
   2220 	port->parent = mstb;
   2221 	port->port_num = port_number;
   2222 	port->mgr = mgr;
   2223 	port->aux.name = "DPMST";
   2224 	port->aux.dev = dev->dev;
   2225 	port->aux.is_remote = true;
   2226 
   2227 	/* initialize the MST downstream port's AUX crc work queue */
   2228 	drm_dp_remote_aux_init(&port->aux);
   2229 
   2230 	/*
   2231 	 * Make sure the memory allocation for our parent branch stays
   2232 	 * around until our own memory allocation is released
   2233 	 */
   2234 	drm_dp_mst_get_mstb_malloc(mstb);
   2235 
   2236 	return port;
   2237 }
   2238 
   2239 static int
   2240 drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
   2241 				    struct drm_device *dev,
   2242 				    struct drm_dp_link_addr_reply_port *port_msg)
   2243 {
   2244 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   2245 	struct drm_dp_mst_port *port;
   2246 	int old_ddps = 0, ret;
   2247 	u8 new_pdt = DP_PEER_DEVICE_NONE;
   2248 	bool new_mcs = 0;
   2249 	bool created = false, send_link_addr = false, changed = false;
   2250 
   2251 	port = drm_dp_get_port(mstb, port_msg->port_number);
   2252 	if (!port) {
   2253 		port = drm_dp_mst_add_port(dev, mgr, mstb,
   2254 					   port_msg->port_number);
   2255 		if (!port)
   2256 			return -ENOMEM;
   2257 		created = true;
   2258 		changed = true;
   2259 	} else if (!port->input && port_msg->input_port && port->connector) {
   2260 		/* Since port->connector can't be changed here, we create a
   2261 		 * new port if input_port changes from 0 to 1
   2262 		 */
   2263 		drm_dp_mst_topology_unlink_port(mgr, port);
   2264 		drm_dp_mst_topology_put_port(port);
   2265 		port = drm_dp_mst_add_port(dev, mgr, mstb,
   2266 					   port_msg->port_number);
   2267 		if (!port)
   2268 			return -ENOMEM;
   2269 		changed = true;
   2270 		created = true;
   2271 	} else if (port->input && !port_msg->input_port) {
   2272 		changed = true;
   2273 	} else if (port->connector) {
   2274 		/* We're updating a port that's exposed to userspace, so do it
   2275 		 * under lock
   2276 		 */
   2277 		drm_modeset_lock(&mgr->base.lock, NULL);
   2278 
   2279 		old_ddps = port->ddps;
   2280 		changed = port->ddps != port_msg->ddps ||
   2281 			(port->ddps &&
   2282 			 (port->ldps != port_msg->legacy_device_plug_status ||
   2283 			  port->dpcd_rev != port_msg->dpcd_revision ||
   2284 			  port->mcs != port_msg->mcs ||
   2285 			  port->pdt != port_msg->peer_device_type ||
   2286 			  port->num_sdp_stream_sinks !=
   2287 			  port_msg->num_sdp_stream_sinks));
   2288 	}
   2289 
   2290 	port->input = port_msg->input_port;
   2291 	if (!port->input)
   2292 		new_pdt = port_msg->peer_device_type;
   2293 	new_mcs = port_msg->mcs;
   2294 	port->ddps = port_msg->ddps;
   2295 	port->ldps = port_msg->legacy_device_plug_status;
   2296 	port->dpcd_rev = port_msg->dpcd_revision;
   2297 	port->num_sdp_streams = port_msg->num_sdp_streams;
   2298 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
   2299 
   2300 	/* manage mstb port lists with mgr lock - take a reference
   2301 	   for this list */
   2302 	if (created) {
   2303 		mutex_lock(&mgr->lock);
   2304 		drm_dp_mst_topology_get_port(port);
   2305 		list_add(&port->next, &mstb->ports);
   2306 		mstb->num_ports++;
   2307 		mutex_unlock(&mgr->lock);
   2308 	}
   2309 
   2310 	if (old_ddps != port->ddps) {
   2311 		if (port->ddps) {
   2312 			if (!port->input) {
   2313 				drm_dp_send_enum_path_resources(mgr, mstb,
   2314 								port);
   2315 			}
   2316 		} else {
   2317 			port->available_pbn = 0;
   2318 		}
   2319 	}
   2320 
   2321 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
   2322 	if (ret == 1) {
   2323 		send_link_addr = true;
   2324 	} else if (ret < 0) {
   2325 		DRM_ERROR("Failed to change PDT on port %p: %d\n",
   2326 			  port, ret);
   2327 		goto fail;
   2328 	}
   2329 
   2330 	/*
   2331 	 * If this port wasn't just created, then we're reprobing because
   2332 	 * we're coming out of suspend. In this case, always resend the link
   2333 	 * address if there's an MSTB on this port
   2334 	 */
   2335 	if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
   2336 	    port->mcs)
   2337 		send_link_addr = true;
   2338 
   2339 	if (port->connector)
   2340 		drm_modeset_unlock(&mgr->base.lock);
   2341 	else if (!port->input)
   2342 		drm_dp_mst_port_add_connector(mstb, port);
   2343 
   2344 	if (send_link_addr && port->mstb) {
   2345 		ret = drm_dp_send_link_address(mgr, port->mstb);
   2346 		if (ret == 1) /* MSTB below us changed */
   2347 			changed = true;
   2348 		else if (ret < 0)
   2349 			goto fail_put;
   2350 	}
   2351 
   2352 	/* put reference to this port */
   2353 	drm_dp_mst_topology_put_port(port);
   2354 	return changed;
   2355 
   2356 fail:
   2357 	drm_dp_mst_topology_unlink_port(mgr, port);
   2358 	if (port->connector)
   2359 		drm_modeset_unlock(&mgr->base.lock);
   2360 fail_put:
   2361 	drm_dp_mst_topology_put_port(port);
   2362 	return ret;
   2363 }
   2364 
   2365 static void
   2366 drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
   2367 			    struct drm_dp_connection_status_notify *conn_stat)
   2368 {
   2369 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   2370 	struct drm_dp_mst_port *port;
   2371 	int old_ddps, old_input, ret, i;
   2372 	u8 new_pdt;
   2373 	bool new_mcs;
   2374 	bool dowork = false, create_connector = false;
   2375 
   2376 	port = drm_dp_get_port(mstb, conn_stat->port_number);
   2377 	if (!port)
   2378 		return;
   2379 
   2380 	if (port->connector) {
   2381 		if (!port->input && conn_stat->input_port) {
   2382 			/*
   2383 			 * We can't remove a connector from an already exposed
   2384 			 * port, so just throw the port out and make sure we
   2385 			 * reprobe the link address of it's parent MSTB
   2386 			 */
   2387 			drm_dp_mst_topology_unlink_port(mgr, port);
   2388 			mstb->link_address_sent = false;
   2389 			dowork = true;
   2390 			goto out;
   2391 		}
   2392 
   2393 		/* Locking is only needed if the port's exposed to userspace */
   2394 		drm_modeset_lock(&mgr->base.lock, NULL);
   2395 	} else if (port->input && !conn_stat->input_port) {
   2396 		create_connector = true;
   2397 		/* Reprobe link address so we get num_sdp_streams */
   2398 		mstb->link_address_sent = false;
   2399 		dowork = true;
   2400 	}
   2401 
   2402 	old_ddps = port->ddps;
   2403 	old_input = port->input;
   2404 	port->input = conn_stat->input_port;
   2405 	port->ldps = conn_stat->legacy_device_plug_status;
   2406 	port->ddps = conn_stat->displayport_device_plug_status;
   2407 
   2408 	if (old_ddps != port->ddps) {
   2409 		if (port->ddps) {
   2410 			dowork = true;
   2411 		} else {
   2412 			port->available_pbn = 0;
   2413 		}
   2414 	}
   2415 
   2416 	new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
   2417 	new_mcs = conn_stat->message_capability_status;
   2418 	ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs);
   2419 	if (ret == 1) {
   2420 		dowork = true;
   2421 	} else if (ret < 0) {
   2422 		DRM_ERROR("Failed to change PDT for port %p: %d\n",
   2423 			  port, ret);
   2424 		dowork = false;
   2425 	}
   2426 
   2427 	if (!old_input && old_ddps != port->ddps && !port->ddps) {
   2428 		for (i = 0; i < mgr->max_payloads; i++) {
   2429 			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   2430 			struct drm_dp_mst_port *port_validated;
   2431 
   2432 			if (!vcpi)
   2433 				continue;
   2434 
   2435 			port_validated =
   2436 				container_of(vcpi, struct drm_dp_mst_port, vcpi);
   2437 			port_validated =
   2438 				drm_dp_mst_topology_get_port_validated(mgr, port_validated);
   2439 			if (!port_validated) {
   2440 				mutex_lock(&mgr->payload_lock);
   2441 				vcpi->num_slots = 0;
   2442 				mutex_unlock(&mgr->payload_lock);
   2443 			} else {
   2444 				drm_dp_mst_topology_put_port(port_validated);
   2445 			}
   2446 		}
   2447 	}
   2448 
   2449 	if (port->connector)
   2450 		drm_modeset_unlock(&mgr->base.lock);
   2451 	else if (create_connector)
   2452 		drm_dp_mst_port_add_connector(mstb, port);
   2453 
   2454 out:
   2455 	drm_dp_mst_topology_put_port(port);
   2456 	if (dowork)
   2457 		queue_work(system_long_wq, &mstb->mgr->work);
   2458 }
   2459 
   2460 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
   2461 							       u8 lct, u8 *rad)
   2462 {
   2463 	struct drm_dp_mst_branch *mstb;
   2464 	struct drm_dp_mst_port *port;
   2465 	int i, ret;
   2466 	/* find the port by iterating down */
   2467 
   2468 	mutex_lock(&mgr->lock);
   2469 	mstb = mgr->mst_primary;
   2470 
   2471 	if (!mstb)
   2472 		goto out;
   2473 
   2474 	for (i = 0; i < lct - 1; i++) {
   2475 		int shift = (i % 2) ? 0 : 4;
   2476 		int port_num = (rad[i / 2] >> shift) & 0xf;
   2477 
   2478 		list_for_each_entry(port, &mstb->ports, next) {
   2479 			if (port->port_num == port_num) {
   2480 				mstb = port->mstb;
   2481 				if (!mstb) {
   2482 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
   2483 					goto out;
   2484 				}
   2485 
   2486 				break;
   2487 			}
   2488 		}
   2489 	}
   2490 	ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2491 	if (!ret)
   2492 		mstb = NULL;
   2493 out:
   2494 	mutex_unlock(&mgr->lock);
   2495 	return mstb;
   2496 }
   2497 
   2498 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
   2499 	struct drm_dp_mst_branch *mstb,
   2500 	const uint8_t *guid)
   2501 {
   2502 	struct drm_dp_mst_branch *found_mstb;
   2503 	struct drm_dp_mst_port *port;
   2504 
   2505 	if (memcmp(mstb->guid, guid, 16) == 0)
   2506 		return mstb;
   2507 
   2508 
   2509 	list_for_each_entry(port, &mstb->ports, next) {
   2510 		if (!port->mstb)
   2511 			continue;
   2512 
   2513 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
   2514 
   2515 		if (found_mstb)
   2516 			return found_mstb;
   2517 	}
   2518 
   2519 	return NULL;
   2520 }
   2521 
   2522 static struct drm_dp_mst_branch *
   2523 drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
   2524 				     const uint8_t *guid)
   2525 {
   2526 	struct drm_dp_mst_branch *mstb;
   2527 	int ret;
   2528 
   2529 	/* find the port by iterating down */
   2530 	mutex_lock(&mgr->lock);
   2531 
   2532 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
   2533 	if (mstb) {
   2534 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2535 		if (!ret)
   2536 			mstb = NULL;
   2537 	}
   2538 
   2539 	mutex_unlock(&mgr->lock);
   2540 	return mstb;
   2541 }
   2542 
   2543 static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   2544 					       struct drm_dp_mst_branch *mstb)
   2545 {
   2546 	struct drm_dp_mst_port *port;
   2547 	int ret;
   2548 	bool changed = false;
   2549 
   2550 	if (!mstb->link_address_sent) {
   2551 		ret = drm_dp_send_link_address(mgr, mstb);
   2552 		if (ret == 1)
   2553 			changed = true;
   2554 		else if (ret < 0)
   2555 			return ret;
   2556 	}
   2557 
   2558 	list_for_each_entry(port, &mstb->ports, next) {
   2559 		struct drm_dp_mst_branch *mstb_child = NULL;
   2560 
   2561 		if (port->input || !port->ddps)
   2562 			continue;
   2563 
   2564 		if (!port->available_pbn) {
   2565 			drm_modeset_lock(&mgr->base.lock, NULL);
   2566 			drm_dp_send_enum_path_resources(mgr, mstb, port);
   2567 			drm_modeset_unlock(&mgr->base.lock);
   2568 			changed = true;
   2569 		}
   2570 
   2571 		if (port->mstb)
   2572 			mstb_child = drm_dp_mst_topology_get_mstb_validated(
   2573 			    mgr, port->mstb);
   2574 
   2575 		if (mstb_child) {
   2576 			ret = drm_dp_check_and_send_link_address(mgr,
   2577 								 mstb_child);
   2578 			drm_dp_mst_topology_put_mstb(mstb_child);
   2579 			if (ret == 1)
   2580 				changed = true;
   2581 			else if (ret < 0)
   2582 				return ret;
   2583 		}
   2584 	}
   2585 
   2586 	return changed;
   2587 }
   2588 
   2589 static void drm_dp_mst_link_probe_work(struct work_struct *work)
   2590 {
   2591 	struct drm_dp_mst_topology_mgr *mgr =
   2592 		container_of(work, struct drm_dp_mst_topology_mgr, work);
   2593 	struct drm_device *dev = mgr->dev;
   2594 	struct drm_dp_mst_branch *mstb;
   2595 	int ret;
   2596 	bool clear_payload_id_table;
   2597 
   2598 	mutex_lock(&mgr->probe_lock);
   2599 
   2600 	mutex_lock(&mgr->lock);
   2601 	clear_payload_id_table = !mgr->payload_id_table_cleared;
   2602 	mgr->payload_id_table_cleared = true;
   2603 
   2604 	mstb = mgr->mst_primary;
   2605 	if (mstb) {
   2606 		ret = drm_dp_mst_topology_try_get_mstb(mstb);
   2607 		if (!ret)
   2608 			mstb = NULL;
   2609 	}
   2610 	mutex_unlock(&mgr->lock);
   2611 	if (!mstb) {
   2612 		mutex_unlock(&mgr->probe_lock);
   2613 		return;
   2614 	}
   2615 
   2616 	/*
   2617 	 * Certain branch devices seem to incorrectly report an available_pbn
   2618 	 * of 0 on downstream sinks, even after clearing the
   2619 	 * DP_PAYLOAD_ALLOCATE_* registers in
   2620 	 * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C
   2621 	 * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make
   2622 	 * things work again.
   2623 	 */
   2624 	if (clear_payload_id_table) {
   2625 		DRM_DEBUG_KMS("Clearing payload ID table\n");
   2626 		drm_dp_send_clear_payload_id_table(mgr, mstb);
   2627 	}
   2628 
   2629 	ret = drm_dp_check_and_send_link_address(mgr, mstb);
   2630 	drm_dp_mst_topology_put_mstb(mstb);
   2631 
   2632 	mutex_unlock(&mgr->probe_lock);
   2633 	if (ret)
   2634 		drm_kms_helper_hotplug_event(dev);
   2635 }
   2636 
   2637 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
   2638 				 u8 *guid)
   2639 {
   2640 	u64 salt;
   2641 
   2642 	if (memchr_inv(guid, 0, 16))
   2643 		return true;
   2644 
   2645 	salt = get_jiffies_64();
   2646 
   2647 	memcpy(&guid[0], &salt, sizeof(u64));
   2648 	memcpy(&guid[8], &salt, sizeof(u64));
   2649 
   2650 	return false;
   2651 }
   2652 
   2653 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
   2654 {
   2655 	struct drm_dp_sideband_msg_req_body req;
   2656 
   2657 	req.req_type = DP_REMOTE_DPCD_READ;
   2658 	req.u.dpcd_read.port_number = port_num;
   2659 	req.u.dpcd_read.dpcd_address = offset;
   2660 	req.u.dpcd_read.num_bytes = num_bytes;
   2661 	drm_dp_encode_sideband_req(&req, msg);
   2662 
   2663 	return 0;
   2664 }
   2665 
   2666 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
   2667 				    bool up, u8 *msg, int len)
   2668 {
   2669 	int ret;
   2670 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
   2671 	int tosend, total, offset;
   2672 	int retries = 0;
   2673 
   2674 retry:
   2675 	total = len;
   2676 	offset = 0;
   2677 	do {
   2678 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
   2679 
   2680 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
   2681 					&msg[offset],
   2682 					tosend);
   2683 		if (ret != tosend) {
   2684 			if (ret == -EIO && retries < 5) {
   2685 				retries++;
   2686 				goto retry;
   2687 			}
   2688 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
   2689 
   2690 			return -EIO;
   2691 		}
   2692 		offset += tosend;
   2693 		total -= tosend;
   2694 	} while (total > 0);
   2695 	return 0;
   2696 }
   2697 
   2698 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
   2699 				  struct drm_dp_sideband_msg_tx *txmsg)
   2700 {
   2701 	struct drm_dp_mst_branch *mstb = txmsg->dst;
   2702 	u8 req_type;
   2703 
   2704 	/* both msg slots are full */
   2705 	if (txmsg->seqno == -1) {
   2706 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
   2707 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
   2708 			return -EAGAIN;
   2709 		}
   2710 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
   2711 			txmsg->seqno = mstb->last_seqno;
   2712 			mstb->last_seqno ^= 1;
   2713 		} else if (mstb->tx_slots[0] == NULL)
   2714 			txmsg->seqno = 0;
   2715 		else
   2716 			txmsg->seqno = 1;
   2717 		mstb->tx_slots[txmsg->seqno] = txmsg;
   2718 	}
   2719 
   2720 	req_type = txmsg->msg[0] & 0x7f;
   2721 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
   2722 		req_type == DP_RESOURCE_STATUS_NOTIFY)
   2723 		hdr->broadcast = 1;
   2724 	else
   2725 		hdr->broadcast = 0;
   2726 	hdr->path_msg = txmsg->path_msg;
   2727 	hdr->lct = mstb->lct;
   2728 	hdr->lcr = mstb->lct - 1;
   2729 	if (mstb->lct > 1)
   2730 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
   2731 	hdr->seqno = txmsg->seqno;
   2732 	return 0;
   2733 }
   2734 /*
   2735  * process a single block of the next message in the sideband queue
   2736  */
   2737 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   2738 				   struct drm_dp_sideband_msg_tx *txmsg,
   2739 				   bool up)
   2740 {
   2741 	u8 chunk[48];
   2742 	struct drm_dp_sideband_msg_hdr hdr;
   2743 	int len, space, idx, tosend;
   2744 	int ret;
   2745 
   2746 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
   2747 
   2748 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
   2749 		txmsg->seqno = -1;
   2750 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
   2751 	}
   2752 
   2753 	/* make hdr from dst mst - for replies use seqno
   2754 	   otherwise assign one */
   2755 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
   2756 	if (ret < 0)
   2757 		return ret;
   2758 
   2759 	/* amount left to send in this message */
   2760 	len = txmsg->cur_len - txmsg->cur_offset;
   2761 
   2762 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
   2763 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
   2764 
   2765 	tosend = min(len, space);
   2766 	if (len == txmsg->cur_len)
   2767 		hdr.somt = 1;
   2768 	if (space >= len)
   2769 		hdr.eomt = 1;
   2770 
   2771 
   2772 	hdr.msg_len = tosend + 1;
   2773 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
   2774 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
   2775 	/* add crc at end */
   2776 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
   2777 	idx += tosend + 1;
   2778 
   2779 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
   2780 	if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) {
   2781 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   2782 
   2783 		drm_printf(&p, "sideband msg failed to send\n");
   2784 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   2785 		return ret;
   2786 	}
   2787 
   2788 	txmsg->cur_offset += tosend;
   2789 	if (txmsg->cur_offset == txmsg->cur_len) {
   2790 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
   2791 		return 1;
   2792 	}
   2793 	return 0;
   2794 }
   2795 
   2796 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
   2797 {
   2798 	struct drm_dp_sideband_msg_tx *txmsg;
   2799 	int ret;
   2800 
   2801 	WARN_ON(!mutex_is_locked(&mgr->qlock));
   2802 
   2803 	/* construct a chunk from the first msg in the tx_msg queue */
   2804 	if (list_empty(&mgr->tx_msg_downq))
   2805 		return;
   2806 
   2807 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
   2808 	ret = process_single_tx_qlock(mgr, txmsg, false);
   2809 	if (ret == 1) {
   2810 		/* txmsg is sent it should be in the slots now */
   2811 		mgr->is_waiting_for_dwn_reply = true;
   2812 		list_del(&txmsg->next);
   2813 	} else if (ret) {
   2814 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   2815 		mgr->is_waiting_for_dwn_reply = false;
   2816 		list_del(&txmsg->next);
   2817 		if (txmsg->seqno != -1)
   2818 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   2819 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   2820 		wake_up_all(&mgr->tx_waitq);
   2821 	}
   2822 }
   2823 
   2824 /* called holding qlock */
   2825 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   2826 				       struct drm_dp_sideband_msg_tx *txmsg)
   2827 {
   2828 	int ret;
   2829 
   2830 	/* construct a chunk from the first msg in the tx_msg queue */
   2831 	ret = process_single_tx_qlock(mgr, txmsg, true);
   2832 
   2833 	if (ret != 1)
   2834 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   2835 
   2836 	if (txmsg->seqno != -1) {
   2837 		WARN_ON((unsigned int)txmsg->seqno >
   2838 			ARRAY_SIZE(txmsg->dst->tx_slots));
   2839 		txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   2840 	}
   2841 }
   2842 
   2843 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
   2844 				 struct drm_dp_sideband_msg_tx *txmsg)
   2845 {
   2846 	mutex_lock(&mgr->qlock);
   2847 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
   2848 
   2849 	if (drm_debug_enabled(DRM_UT_DP)) {
   2850 		struct drm_printer p = drm_debug_printer(DBG_PREFIX);
   2851 
   2852 		drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
   2853 	}
   2854 
   2855 	if (list_is_singular(&mgr->tx_msg_downq) &&
   2856 	    !mgr->is_waiting_for_dwn_reply)
   2857 		process_single_down_tx_qlock(mgr);
   2858 	mutex_unlock(&mgr->qlock);
   2859 }
   2860 
   2861 static void
   2862 drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
   2863 {
   2864 	struct drm_dp_link_addr_reply_port *port_reply;
   2865 	int i;
   2866 
   2867 	for (i = 0; i < reply->nports; i++) {
   2868 		port_reply = &reply->ports[i];
   2869 		DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
   2870 			      i,
   2871 			      port_reply->input_port,
   2872 			      port_reply->peer_device_type,
   2873 			      port_reply->port_number,
   2874 			      port_reply->dpcd_revision,
   2875 			      port_reply->mcs,
   2876 			      port_reply->ddps,
   2877 			      port_reply->legacy_device_plug_status,
   2878 			      port_reply->num_sdp_streams,
   2879 			      port_reply->num_sdp_stream_sinks);
   2880 	}
   2881 }
   2882 
   2883 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   2884 				     struct drm_dp_mst_branch *mstb)
   2885 {
   2886 	struct drm_dp_sideband_msg_tx *txmsg;
   2887 	struct drm_dp_link_address_ack_reply *reply;
   2888 	struct drm_dp_mst_port *port, *tmp;
   2889 	int i, len, ret, port_mask = 0;
   2890 	bool changed = false;
   2891 
   2892 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2893 	if (!txmsg)
   2894 		return -ENOMEM;
   2895 
   2896 	txmsg->dst = mstb;
   2897 	len = build_link_address(txmsg);
   2898 
   2899 	mstb->link_address_sent = true;
   2900 	drm_dp_queue_down_tx(mgr, txmsg);
   2901 
   2902 	/* FIXME: Actually do some real error handling here */
   2903 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   2904 	if (ret <= 0) {
   2905 		DRM_ERROR("Sending link address failed with %d\n", ret);
   2906 		goto out;
   2907 	}
   2908 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   2909 		DRM_ERROR("link address NAK received\n");
   2910 		ret = -EIO;
   2911 		goto out;
   2912 	}
   2913 
   2914 	reply = &txmsg->reply.u.link_addr;
   2915 	DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
   2916 	drm_dp_dump_link_address(reply);
   2917 
   2918 	drm_dp_check_mstb_guid(mstb, reply->guid);
   2919 
   2920 	for (i = 0; i < reply->nports; i++) {
   2921 		port_mask |= BIT(reply->ports[i].port_number);
   2922 		ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev,
   2923 							  &reply->ports[i]);
   2924 		if (ret == 1)
   2925 			changed = true;
   2926 		else if (ret < 0)
   2927 			goto out;
   2928 	}
   2929 
   2930 	/* Prune any ports that are currently a part of mstb in our in-memory
   2931 	 * topology, but were not seen in this link address. Usually this
   2932 	 * means that they were removed while the topology was out of sync,
   2933 	 * e.g. during suspend/resume
   2934 	 */
   2935 	mutex_lock(&mgr->lock);
   2936 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
   2937 		if (port_mask & BIT(port->port_num))
   2938 			continue;
   2939 
   2940 		DRM_DEBUG_KMS("port %d was not in link address, removing\n",
   2941 			      port->port_num);
   2942 		list_del(&port->next);
   2943 		drm_dp_mst_topology_put_port(port);
   2944 		changed = true;
   2945 	}
   2946 	mutex_unlock(&mgr->lock);
   2947 
   2948 out:
   2949 	if (ret <= 0)
   2950 		mstb->link_address_sent = false;
   2951 	kfree(txmsg);
   2952 	return ret < 0 ? ret : changed;
   2953 }
   2954 
   2955 void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
   2956 					struct drm_dp_mst_branch *mstb)
   2957 {
   2958 	struct drm_dp_sideband_msg_tx *txmsg;
   2959 	int len, ret;
   2960 
   2961 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2962 	if (!txmsg)
   2963 		return;
   2964 
   2965 	txmsg->dst = mstb;
   2966 	len = build_clear_payload_id_table(txmsg);
   2967 
   2968 	drm_dp_queue_down_tx(mgr, txmsg);
   2969 
   2970 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   2971 	if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   2972 		DRM_DEBUG_KMS("clear payload table id nak received\n");
   2973 
   2974 	kfree(txmsg);
   2975 }
   2976 
   2977 static int
   2978 drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
   2979 				struct drm_dp_mst_branch *mstb,
   2980 				struct drm_dp_mst_port *port)
   2981 {
   2982 	struct drm_dp_enum_path_resources_ack_reply *path_res;
   2983 	struct drm_dp_sideband_msg_tx *txmsg;
   2984 	int len;
   2985 	int ret;
   2986 
   2987 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2988 	if (!txmsg)
   2989 		return -ENOMEM;
   2990 
   2991 	txmsg->dst = mstb;
   2992 	len = build_enum_path_resources(txmsg, port->port_num);
   2993 
   2994 	drm_dp_queue_down_tx(mgr, txmsg);
   2995 
   2996 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   2997 	if (ret > 0) {
   2998 		path_res = &txmsg->reply.u.path_resources;
   2999 
   3000 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   3001 			DRM_DEBUG_KMS("enum path resources nak received\n");
   3002 		} else {
   3003 			if (port->port_num != path_res->port_number)
   3004 				DRM_ERROR("got incorrect port in response\n");
   3005 
   3006 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
   3007 				      path_res->port_number,
   3008 				      path_res->full_payload_bw_number,
   3009 				      path_res->avail_payload_bw_number);
   3010 			port->available_pbn =
   3011 				path_res->avail_payload_bw_number;
   3012 			port->fec_capable = path_res->fec_capable;
   3013 		}
   3014 	}
   3015 
   3016 	kfree(txmsg);
   3017 	return 0;
   3018 }
   3019 
   3020 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
   3021 {
   3022 	if (!mstb->port_parent)
   3023 		return NULL;
   3024 
   3025 	if (mstb->port_parent->mstb != mstb)
   3026 		return mstb->port_parent;
   3027 
   3028 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
   3029 }
   3030 
   3031 /*
   3032  * Searches upwards in the topology starting from mstb to try to find the
   3033  * closest available parent of mstb that's still connected to the rest of the
   3034  * topology. This can be used in order to perform operations like releasing
   3035  * payloads, where the branch device which owned the payload may no longer be
   3036  * around and thus would require that the payload on the last living relative
   3037  * be freed instead.
   3038  */
   3039 static struct drm_dp_mst_branch *
   3040 drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
   3041 					struct drm_dp_mst_branch *mstb,
   3042 					int *port_num)
   3043 {
   3044 	struct drm_dp_mst_branch *rmstb = NULL;
   3045 	struct drm_dp_mst_port *found_port;
   3046 
   3047 	mutex_lock(&mgr->lock);
   3048 	if (!mgr->mst_primary)
   3049 		goto out;
   3050 
   3051 	do {
   3052 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
   3053 		if (!found_port)
   3054 			break;
   3055 
   3056 		if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
   3057 			rmstb = found_port->parent;
   3058 			*port_num = found_port->port_num;
   3059 		} else {
   3060 			/* Search again, starting from this parent */
   3061 			mstb = found_port->parent;
   3062 		}
   3063 	} while (!rmstb);
   3064 out:
   3065 	mutex_unlock(&mgr->lock);
   3066 	return rmstb;
   3067 }
   3068 
   3069 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
   3070 				   struct drm_dp_mst_port *port,
   3071 				   int id,
   3072 				   int pbn)
   3073 {
   3074 	struct drm_dp_sideband_msg_tx *txmsg;
   3075 	struct drm_dp_mst_branch *mstb;
   3076 	int len, ret, port_num;
   3077 	u8 sinks[DRM_DP_MAX_SDP_STREAMS];
   3078 	int i;
   3079 
   3080 	port_num = port->port_num;
   3081 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3082 	if (!mstb) {
   3083 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
   3084 							       port->parent,
   3085 							       &port_num);
   3086 
   3087 		if (!mstb)
   3088 			return -EINVAL;
   3089 	}
   3090 
   3091 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3092 	if (!txmsg) {
   3093 		ret = -ENOMEM;
   3094 		goto fail_put;
   3095 	}
   3096 
   3097 	for (i = 0; i < port->num_sdp_streams; i++)
   3098 		sinks[i] = i;
   3099 
   3100 	txmsg->dst = mstb;
   3101 	len = build_allocate_payload(txmsg, port_num,
   3102 				     id,
   3103 				     pbn, port->num_sdp_streams, sinks);
   3104 
   3105 	drm_dp_queue_down_tx(mgr, txmsg);
   3106 
   3107 	/*
   3108 	 * FIXME: there is a small chance that between getting the last
   3109 	 * connected mstb and sending the payload message, the last connected
   3110 	 * mstb could also be removed from the topology. In the future, this
   3111 	 * needs to be fixed by restarting the
   3112 	 * drm_dp_get_last_connected_port_and_mstb() search in the event of a
   3113 	 * timeout if the topology is still connected to the system.
   3114 	 */
   3115 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3116 	if (ret > 0) {
   3117 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3118 			ret = -EINVAL;
   3119 		else
   3120 			ret = 0;
   3121 	}
   3122 	kfree(txmsg);
   3123 fail_put:
   3124 	drm_dp_mst_topology_put_mstb(mstb);
   3125 	return ret;
   3126 }
   3127 
   3128 int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
   3129 				 struct drm_dp_mst_port *port, bool power_up)
   3130 {
   3131 	struct drm_dp_sideband_msg_tx *txmsg;
   3132 	int len, ret;
   3133 
   3134 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   3135 	if (!port)
   3136 		return -EINVAL;
   3137 
   3138 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3139 	if (!txmsg) {
   3140 		drm_dp_mst_topology_put_port(port);
   3141 		return -ENOMEM;
   3142 	}
   3143 
   3144 	txmsg->dst = port->parent;
   3145 	len = build_power_updown_phy(txmsg, port->port_num, power_up);
   3146 	drm_dp_queue_down_tx(mgr, txmsg);
   3147 
   3148 	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
   3149 	if (ret > 0) {
   3150 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3151 			ret = -EINVAL;
   3152 		else
   3153 			ret = 0;
   3154 	}
   3155 	kfree(txmsg);
   3156 	drm_dp_mst_topology_put_port(port);
   3157 
   3158 	return ret;
   3159 }
   3160 EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
   3161 
   3162 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   3163 				       int id,
   3164 				       struct drm_dp_payload *payload)
   3165 {
   3166 	int ret;
   3167 
   3168 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
   3169 	if (ret < 0) {
   3170 		payload->payload_state = 0;
   3171 		return ret;
   3172 	}
   3173 	payload->payload_state = DP_PAYLOAD_LOCAL;
   3174 	return 0;
   3175 }
   3176 
   3177 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   3178 				       struct drm_dp_mst_port *port,
   3179 				       int id,
   3180 				       struct drm_dp_payload *payload)
   3181 {
   3182 	int ret;
   3183 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
   3184 	if (ret < 0)
   3185 		return ret;
   3186 	payload->payload_state = DP_PAYLOAD_REMOTE;
   3187 	return ret;
   3188 }
   3189 
   3190 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   3191 					struct drm_dp_mst_port *port,
   3192 					int id,
   3193 					struct drm_dp_payload *payload)
   3194 {
   3195 	DRM_DEBUG_KMS("\n");
   3196 	/* it's okay for these to fail */
   3197 	if (port) {
   3198 		drm_dp_payload_send_msg(mgr, port, id, 0);
   3199 	}
   3200 
   3201 	drm_dp_dpcd_write_payload(mgr, id, payload);
   3202 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
   3203 	return 0;
   3204 }
   3205 
   3206 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   3207 					int id,
   3208 					struct drm_dp_payload *payload)
   3209 {
   3210 	payload->payload_state = 0;
   3211 	return 0;
   3212 }
   3213 
   3214 /**
   3215  * drm_dp_update_payload_part1() - Execute payload update part 1
   3216  * @mgr: manager to use.
   3217  *
   3218  * This iterates over all proposed virtual channels, and tries to
   3219  * allocate space in the link for them. For 0->slots transitions,
   3220  * this step just writes the VCPI to the MST device. For slots->0
   3221  * transitions, this writes the updated VCPIs and removes the
   3222  * remote VC payloads.
   3223  *
   3224  * after calling this the driver should generate ACT and payload
   3225  * packets.
   3226  */
   3227 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
   3228 {
   3229 	struct drm_dp_payload req_payload;
   3230 	struct drm_dp_mst_port *port;
   3231 	int i, j;
   3232 	int cur_slots = 1;
   3233 
   3234 	mutex_lock(&mgr->payload_lock);
   3235 	for (i = 0; i < mgr->max_payloads; i++) {
   3236 		struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   3237 		struct drm_dp_payload *payload = &mgr->payloads[i];
   3238 		bool put_port = false;
   3239 
   3240 		/* solve the current payloads - compare to the hw ones
   3241 		   - update the hw view */
   3242 		req_payload.start_slot = cur_slots;
   3243 		if (vcpi) {
   3244 			port = container_of(vcpi, struct drm_dp_mst_port,
   3245 					    vcpi);
   3246 
   3247 			/* Validated ports don't matter if we're releasing
   3248 			 * VCPI
   3249 			 */
   3250 			if (vcpi->num_slots) {
   3251 				port = drm_dp_mst_topology_get_port_validated(
   3252 				    mgr, port);
   3253 				if (!port) {
   3254 					mutex_unlock(&mgr->payload_lock);
   3255 					return -EINVAL;
   3256 				}
   3257 				put_port = true;
   3258 			}
   3259 
   3260 			req_payload.num_slots = vcpi->num_slots;
   3261 			req_payload.vcpi = vcpi->vcpi;
   3262 		} else {
   3263 			port = NULL;
   3264 			req_payload.num_slots = 0;
   3265 		}
   3266 
   3267 		payload->start_slot = req_payload.start_slot;
   3268 		/* work out what is required to happen with this payload */
   3269 		if (payload->num_slots != req_payload.num_slots) {
   3270 
   3271 			/* need to push an update for this payload */
   3272 			if (req_payload.num_slots) {
   3273 				drm_dp_create_payload_step1(mgr, vcpi->vcpi,
   3274 							    &req_payload);
   3275 				payload->num_slots = req_payload.num_slots;
   3276 				payload->vcpi = req_payload.vcpi;
   3277 
   3278 			} else if (payload->num_slots) {
   3279 				payload->num_slots = 0;
   3280 				drm_dp_destroy_payload_step1(mgr, port,
   3281 							     payload->vcpi,
   3282 							     payload);
   3283 				req_payload.payload_state =
   3284 					payload->payload_state;
   3285 				payload->start_slot = 0;
   3286 			}
   3287 			payload->payload_state = req_payload.payload_state;
   3288 		}
   3289 		cur_slots += req_payload.num_slots;
   3290 
   3291 		if (put_port)
   3292 			drm_dp_mst_topology_put_port(port);
   3293 	}
   3294 
   3295 	for (i = 0; i < mgr->max_payloads; /* do nothing */) {
   3296 		if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) {
   3297 			i++;
   3298 			continue;
   3299 		}
   3300 
   3301 		DRM_DEBUG_KMS("removing payload %d\n", i);
   3302 		for (j = i; j < mgr->max_payloads - 1; j++) {
   3303 			mgr->payloads[j] = mgr->payloads[j + 1];
   3304 			mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
   3305 
   3306 			if (mgr->proposed_vcpis[j] &&
   3307 			    mgr->proposed_vcpis[j]->num_slots) {
   3308 				set_bit(j + 1, &mgr->payload_mask);
   3309 			} else {
   3310 				clear_bit(j + 1, &mgr->payload_mask);
   3311 			}
   3312 		}
   3313 
   3314 		memset(&mgr->payloads[mgr->max_payloads - 1], 0,
   3315 		       sizeof(struct drm_dp_payload));
   3316 		mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
   3317 		clear_bit(mgr->max_payloads, &mgr->payload_mask);
   3318 	}
   3319 	mutex_unlock(&mgr->payload_lock);
   3320 
   3321 	return 0;
   3322 }
   3323 EXPORT_SYMBOL(drm_dp_update_payload_part1);
   3324 
   3325 /**
   3326  * drm_dp_update_payload_part2() - Execute payload update part 2
   3327  * @mgr: manager to use.
   3328  *
   3329  * This iterates over all proposed virtual channels, and tries to
   3330  * allocate space in the link for them. For 0->slots transitions,
   3331  * this step writes the remote VC payload commands. For slots->0
   3332  * this just resets some internal state.
   3333  */
   3334 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
   3335 {
   3336 	struct drm_dp_mst_port *port;
   3337 	int i;
   3338 	int ret = 0;
   3339 	mutex_lock(&mgr->payload_lock);
   3340 	for (i = 0; i < mgr->max_payloads; i++) {
   3341 
   3342 		if (!mgr->proposed_vcpis[i])
   3343 			continue;
   3344 
   3345 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   3346 
   3347 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
   3348 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
   3349 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   3350 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   3351 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   3352 		}
   3353 		if (ret) {
   3354 			mutex_unlock(&mgr->payload_lock);
   3355 			return ret;
   3356 		}
   3357 	}
   3358 	mutex_unlock(&mgr->payload_lock);
   3359 	return 0;
   3360 }
   3361 EXPORT_SYMBOL(drm_dp_update_payload_part2);
   3362 
   3363 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
   3364 				 struct drm_dp_mst_port *port,
   3365 				 int offset, int size, u8 *bytes)
   3366 {
   3367 	int len;
   3368 	int ret = 0;
   3369 	struct drm_dp_sideband_msg_tx *txmsg;
   3370 	struct drm_dp_mst_branch *mstb;
   3371 
   3372 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3373 	if (!mstb)
   3374 		return -EINVAL;
   3375 
   3376 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3377 	if (!txmsg) {
   3378 		ret = -ENOMEM;
   3379 		goto fail_put;
   3380 	}
   3381 
   3382 	len = build_dpcd_read(txmsg, port->port_num, offset, size);
   3383 	txmsg->dst = port->parent;
   3384 
   3385 	drm_dp_queue_down_tx(mgr, txmsg);
   3386 
   3387 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3388 	if (ret < 0)
   3389 		goto fail_free;
   3390 
   3391 	/* DPCD read should never be NACKed */
   3392 	if (txmsg->reply.reply_type == 1) {
   3393 		DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
   3394 			  mstb, port->port_num, offset, size);
   3395 		ret = -EIO;
   3396 		goto fail_free;
   3397 	}
   3398 
   3399 	if (txmsg->reply.u.remote_dpcd_read_ack.num_bytes != size) {
   3400 		ret = -EPROTO;
   3401 		goto fail_free;
   3402 	}
   3403 
   3404 	ret = min_t(size_t, txmsg->reply.u.remote_dpcd_read_ack.num_bytes,
   3405 		    size);
   3406 	memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, ret);
   3407 
   3408 fail_free:
   3409 	kfree(txmsg);
   3410 fail_put:
   3411 	drm_dp_mst_topology_put_mstb(mstb);
   3412 
   3413 	return ret;
   3414 }
   3415 
   3416 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
   3417 				  struct drm_dp_mst_port *port,
   3418 				  int offset, int size, u8 *bytes)
   3419 {
   3420 	int len;
   3421 	int ret;
   3422 	struct drm_dp_sideband_msg_tx *txmsg;
   3423 	struct drm_dp_mst_branch *mstb;
   3424 
   3425 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   3426 	if (!mstb)
   3427 		return -EINVAL;
   3428 
   3429 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3430 	if (!txmsg) {
   3431 		ret = -ENOMEM;
   3432 		goto fail_put;
   3433 	}
   3434 
   3435 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
   3436 	txmsg->dst = mstb;
   3437 
   3438 	drm_dp_queue_down_tx(mgr, txmsg);
   3439 
   3440 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3441 	if (ret > 0) {
   3442 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3443 			ret = -EIO;
   3444 		else
   3445 			ret = 0;
   3446 	}
   3447 	kfree(txmsg);
   3448 fail_put:
   3449 	drm_dp_mst_topology_put_mstb(mstb);
   3450 	return ret;
   3451 }
   3452 
   3453 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
   3454 {
   3455 	struct drm_dp_sideband_msg_reply_body reply;
   3456 
   3457 	reply.reply_type = DP_SIDEBAND_REPLY_ACK;
   3458 	reply.req_type = req_type;
   3459 	drm_dp_encode_sideband_reply(&reply, msg);
   3460 	return 0;
   3461 }
   3462 
   3463 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
   3464 				    struct drm_dp_mst_branch *mstb,
   3465 				    int req_type, int seqno, bool broadcast)
   3466 {
   3467 	struct drm_dp_sideband_msg_tx *txmsg;
   3468 
   3469 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3470 	if (!txmsg)
   3471 		return -ENOMEM;
   3472 
   3473 	txmsg->dst = mstb;
   3474 	txmsg->seqno = seqno;
   3475 	drm_dp_encode_up_ack_reply(txmsg, req_type);
   3476 
   3477 	mutex_lock(&mgr->qlock);
   3478 
   3479 	process_single_up_tx_qlock(mgr, txmsg);
   3480 
   3481 	mutex_unlock(&mgr->qlock);
   3482 
   3483 	kfree(txmsg);
   3484 	return 0;
   3485 }
   3486 
   3487 static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8  dp_link_count)
   3488 {
   3489 	if (dp_link_bw == 0 || dp_link_count == 0)
   3490 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
   3491 			      dp_link_bw, dp_link_count);
   3492 
   3493 	return dp_link_bw * dp_link_count / 2;
   3494 }
   3495 
   3496 /**
   3497  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   3498  * @mgr: manager to set state for
   3499  * @mst_state: true to enable MST on this connector - false to disable.
   3500  *
   3501  * This is called by the driver when it detects an MST capable device plugged
   3502  * into a DP MST capable port, or when a DP MST capable device is unplugged.
   3503  */
   3504 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
   3505 {
   3506 	int ret = 0;
   3507 	int i = 0;
   3508 	struct drm_dp_mst_branch *mstb = NULL;
   3509 
   3510 	mutex_lock(&mgr->lock);
   3511 	if (mst_state == mgr->mst_state)
   3512 		goto out_unlock;
   3513 
   3514 	mgr->mst_state = mst_state;
   3515 	/* set the device into MST mode */
   3516 	if (mst_state) {
   3517 		WARN_ON(mgr->mst_primary);
   3518 
   3519 		/* get dpcd info */
   3520 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   3521 		if (ret != DP_RECEIVER_CAP_SIZE) {
   3522 			DRM_DEBUG_KMS("failed to read DPCD\n");
   3523 			goto out_unlock;
   3524 		}
   3525 
   3526 		mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1],
   3527 							mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
   3528 		if (mgr->pbn_div == 0) {
   3529 			ret = -EINVAL;
   3530 			goto out_unlock;
   3531 		}
   3532 
   3533 		/* add initial branch device at LCT 1 */
   3534 		mstb = drm_dp_add_mst_branch_device(1, NULL);
   3535 		if (mstb == NULL) {
   3536 			ret = -ENOMEM;
   3537 			goto out_unlock;
   3538 		}
   3539 		mstb->mgr = mgr;
   3540 
   3541 		/* give this the main reference */
   3542 		mgr->mst_primary = mstb;
   3543 		drm_dp_mst_topology_get_mstb(mgr->mst_primary);
   3544 
   3545 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3546 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   3547 		if (ret < 0) {
   3548 			goto out_unlock;
   3549 		}
   3550 
   3551 		{
   3552 			struct drm_dp_payload reset_pay;
   3553 			reset_pay.start_slot = 0;
   3554 			reset_pay.num_slots = 0x3f;
   3555 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
   3556 		}
   3557 
   3558 		queue_work(system_long_wq, &mgr->work);
   3559 
   3560 		ret = 0;
   3561 	} else {
   3562 		/* disable MST on the device */
   3563 		mstb = mgr->mst_primary;
   3564 		mgr->mst_primary = NULL;
   3565 		/* this can fail if the device is gone */
   3566 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
   3567 		ret = 0;
   3568 		mutex_lock(&mgr->payload_lock);
   3569 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
   3570 		mgr->payload_mask = 0;
   3571 		set_bit(0, &mgr->payload_mask);
   3572 		for (i = 0; i < mgr->max_payloads; i++) {
   3573 			struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
   3574 
   3575 			if (vcpi) {
   3576 				vcpi->vcpi = 0;
   3577 				vcpi->num_slots = 0;
   3578 			}
   3579 			mgr->proposed_vcpis[i] = NULL;
   3580 		}
   3581 		mgr->vcpi_mask = 0;
   3582 		mutex_unlock(&mgr->payload_lock);
   3583 
   3584 		mgr->payload_id_table_cleared = false;
   3585 	}
   3586 
   3587 out_unlock:
   3588 	mutex_unlock(&mgr->lock);
   3589 	if (mstb)
   3590 		drm_dp_mst_topology_put_mstb(mstb);
   3591 	return ret;
   3592 
   3593 }
   3594 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
   3595 
   3596 static void
   3597 drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
   3598 {
   3599 	struct drm_dp_mst_port *port;
   3600 
   3601 	/* The link address will need to be re-sent on resume */
   3602 	mstb->link_address_sent = false;
   3603 
   3604 	list_for_each_entry(port, &mstb->ports, next) {
   3605 		/* The PBN for each port will also need to be re-probed */
   3606 		port->available_pbn = 0;
   3607 
   3608 		if (port->mstb)
   3609 			drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
   3610 	}
   3611 }
   3612 
   3613 /**
   3614  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   3615  * @mgr: manager to suspend
   3616  *
   3617  * This function tells the MST device that we can't handle UP messages
   3618  * anymore. This should stop it from sending any since we are suspended.
   3619  */
   3620 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
   3621 {
   3622 	mutex_lock(&mgr->lock);
   3623 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3624 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
   3625 	mutex_unlock(&mgr->lock);
   3626 	flush_work(&mgr->up_req_work);
   3627 	flush_work(&mgr->work);
   3628 	flush_work(&mgr->delayed_destroy_work);
   3629 
   3630 	mutex_lock(&mgr->lock);
   3631 	if (mgr->mst_state && mgr->mst_primary)
   3632 		drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary);
   3633 	mutex_unlock(&mgr->lock);
   3634 }
   3635 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
   3636 
   3637 /**
   3638  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   3639  * @mgr: manager to resume
   3640  * @sync: whether or not to perform topology reprobing synchronously
   3641  *
   3642  * This will fetch DPCD and see if the device is still there,
   3643  * if it is, it will rewrite the MSTM control bits, and return.
   3644  *
   3645  * If the device fails this returns -1, and the driver should do
   3646  * a full MST reprobe, in case we were undocked.
   3647  *
   3648  * During system resume (where it is assumed that the driver will be calling
   3649  * drm_atomic_helper_resume()) this function should be called beforehand with
   3650  * @sync set to true. In contexts like runtime resume where the driver is not
   3651  * expected to be calling drm_atomic_helper_resume(), this function should be
   3652  * called with @sync set to false in order to avoid deadlocking.
   3653  *
   3654  * Returns: -1 if the MST topology was removed while we were suspended, 0
   3655  * otherwise.
   3656  */
   3657 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
   3658 				   bool sync)
   3659 {
   3660 	int ret;
   3661 	u8 guid[16];
   3662 
   3663 	mutex_lock(&mgr->lock);
   3664 	if (!mgr->mst_primary)
   3665 		goto out_fail;
   3666 
   3667 	ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
   3668 			       DP_RECEIVER_CAP_SIZE);
   3669 	if (ret != DP_RECEIVER_CAP_SIZE) {
   3670 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   3671 		goto out_fail;
   3672 	}
   3673 
   3674 	ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   3675 				 DP_MST_EN |
   3676 				 DP_UP_REQ_EN |
   3677 				 DP_UPSTREAM_IS_SRC);
   3678 	if (ret < 0) {
   3679 		DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
   3680 		goto out_fail;
   3681 	}
   3682 
   3683 	/* Some hubs forget their guids after they resume */
   3684 	ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
   3685 	if (ret != 16) {
   3686 		DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   3687 		goto out_fail;
   3688 	}
   3689 	drm_dp_check_mstb_guid(mgr->mst_primary, guid);
   3690 
   3691 	/*
   3692 	 * For the final step of resuming the topology, we need to bring the
   3693 	 * state of our in-memory topology back into sync with reality. So,
   3694 	 * restart the probing process as if we're probing a new hub
   3695 	 */
   3696 	queue_work(system_long_wq, &mgr->work);
   3697 	mutex_unlock(&mgr->lock);
   3698 
   3699 	if (sync) {
   3700 		DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
   3701 		flush_work(&mgr->work);
   3702 	}
   3703 
   3704 	return 0;
   3705 
   3706 out_fail:
   3707 	mutex_unlock(&mgr->lock);
   3708 	return -1;
   3709 }
   3710 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
   3711 
   3712 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
   3713 {
   3714 	int len;
   3715 	u8 replyblock[32];
   3716 	int replylen, origlen, curreply;
   3717 	int ret;
   3718 	struct drm_dp_sideband_msg_rx *msg;
   3719 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
   3720 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
   3721 
   3722 	len = min(mgr->max_dpcd_transaction_bytes, 16);
   3723 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
   3724 			       replyblock, len);
   3725 	if (ret != len) {
   3726 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
   3727 		return false;
   3728 	}
   3729 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
   3730 	if (!ret) {
   3731 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
   3732 		return false;
   3733 	}
   3734 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
   3735 
   3736 	origlen = replylen;
   3737 	replylen -= len;
   3738 	curreply = len;
   3739 	while (replylen > 0) {
   3740 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
   3741 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
   3742 				    replyblock, len);
   3743 		if (ret != len) {
   3744 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
   3745 				      len, ret);
   3746 			return false;
   3747 		}
   3748 
   3749 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
   3750 		if (!ret) {
   3751 			DRM_DEBUG_KMS("failed to build sideband msg\n");
   3752 			return false;
   3753 		}
   3754 
   3755 		curreply += len;
   3756 		replylen -= len;
   3757 	}
   3758 	return true;
   3759 }
   3760 
   3761 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
   3762 {
   3763 	struct drm_dp_sideband_msg_tx *txmsg;
   3764 	struct drm_dp_mst_branch *mstb;
   3765 	struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
   3766 	int slot = -1;
   3767 
   3768 	if (!drm_dp_get_one_sb_msg(mgr, false))
   3769 		goto clear_down_rep_recv;
   3770 
   3771 	if (!mgr->down_rep_recv.have_eomt)
   3772 		return 0;
   3773 
   3774 	mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
   3775 	if (!mstb) {
   3776 		DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
   3777 			      hdr->lct);
   3778 		goto clear_down_rep_recv;
   3779 	}
   3780 
   3781 	/* find the message */
   3782 	slot = hdr->seqno;
   3783 	mutex_lock(&mgr->qlock);
   3784 	txmsg = mstb->tx_slots[slot];
   3785 	/* remove from slots */
   3786 	mutex_unlock(&mgr->qlock);
   3787 
   3788 	if (!txmsg) {
   3789 		DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
   3790 			      mstb, hdr->seqno, hdr->lct, hdr->rad[0],
   3791 			      mgr->down_rep_recv.msg[0]);
   3792 		goto no_msg;
   3793 	}
   3794 
   3795 	drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
   3796 
   3797 	if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
   3798 		DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
   3799 			      txmsg->reply.req_type,
   3800 			      drm_dp_mst_req_type_str(txmsg->reply.req_type),
   3801 			      txmsg->reply.u.nak.reason,
   3802 			      drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
   3803 			      txmsg->reply.u.nak.nak_data);
   3804 
   3805 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   3806 	drm_dp_mst_topology_put_mstb(mstb);
   3807 
   3808 	mutex_lock(&mgr->qlock);
   3809 	txmsg->state = DRM_DP_SIDEBAND_TX_RX;
   3810 	mstb->tx_slots[slot] = NULL;
   3811 	mgr->is_waiting_for_dwn_reply = false;
   3812 	mutex_unlock(&mgr->qlock);
   3813 
   3814 	wake_up_all(&mgr->tx_waitq);
   3815 
   3816 	return 0;
   3817 
   3818 no_msg:
   3819 	drm_dp_mst_topology_put_mstb(mstb);
   3820 clear_down_rep_recv:
   3821 	mutex_lock(&mgr->qlock);
   3822 	mgr->is_waiting_for_dwn_reply = false;
   3823 	mutex_unlock(&mgr->qlock);
   3824 	memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   3825 
   3826 	return 0;
   3827 }
   3828 
   3829 static inline bool
   3830 drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
   3831 			  struct drm_dp_pending_up_req *up_req)
   3832 {
   3833 	struct drm_dp_mst_branch *mstb = NULL;
   3834 	struct drm_dp_sideband_msg_req_body *msg = &up_req->msg;
   3835 	struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr;
   3836 	bool hotplug = false;
   3837 
   3838 	if (hdr->broadcast) {
   3839 		const u8 *guid = NULL;
   3840 
   3841 		if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY)
   3842 			guid = msg->u.conn_stat.guid;
   3843 		else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
   3844 			guid = msg->u.resource_stat.guid;
   3845 
   3846 		if (guid)
   3847 			mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
   3848 	} else {
   3849 		mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
   3850 	}
   3851 
   3852 	if (!mstb) {
   3853 		DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
   3854 			      hdr->lct);
   3855 		return false;
   3856 	}
   3857 
   3858 	/* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */
   3859 	if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) {
   3860 		drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat);
   3861 		hotplug = true;
   3862 	}
   3863 
   3864 	drm_dp_mst_topology_put_mstb(mstb);
   3865 	return hotplug;
   3866 }
   3867 
   3868 static void drm_dp_mst_up_req_work(struct work_struct *work)
   3869 {
   3870 	struct drm_dp_mst_topology_mgr *mgr =
   3871 		container_of(work, struct drm_dp_mst_topology_mgr,
   3872 			     up_req_work);
   3873 	struct drm_dp_pending_up_req *up_req;
   3874 	bool send_hotplug = false;
   3875 
   3876 	mutex_lock(&mgr->probe_lock);
   3877 	while (true) {
   3878 		mutex_lock(&mgr->up_req_lock);
   3879 		up_req = list_first_entry_or_null(&mgr->up_req_list,
   3880 						  struct drm_dp_pending_up_req,
   3881 						  next);
   3882 		if (up_req)
   3883 			list_del(&up_req->next);
   3884 		mutex_unlock(&mgr->up_req_lock);
   3885 
   3886 		if (!up_req)
   3887 			break;
   3888 
   3889 		send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req);
   3890 		kfree(up_req);
   3891 	}
   3892 	mutex_unlock(&mgr->probe_lock);
   3893 
   3894 	if (send_hotplug)
   3895 		drm_kms_helper_hotplug_event(mgr->dev);
   3896 }
   3897 
   3898 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
   3899 {
   3900 	struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
   3901 	struct drm_dp_pending_up_req *up_req;
   3902 	bool seqno;
   3903 
   3904 	if (!drm_dp_get_one_sb_msg(mgr, true))
   3905 		goto out;
   3906 
   3907 	if (!mgr->up_req_recv.have_eomt)
   3908 		return 0;
   3909 
   3910 	up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
   3911 	if (!up_req) {
   3912 		DRM_ERROR("Not enough memory to process MST up req\n");
   3913 		return -ENOMEM;
   3914 	}
   3915 	INIT_LIST_HEAD(&up_req->next);
   3916 
   3917 	seqno = hdr->seqno;
   3918 	drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
   3919 
   3920 	if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
   3921 	    up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
   3922 		DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
   3923 			      up_req->msg.req_type);
   3924 		kfree(up_req);
   3925 		goto out;
   3926 	}
   3927 
   3928 	drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
   3929 				 seqno, false);
   3930 
   3931 	if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
   3932 		const struct drm_dp_connection_status_notify *conn_stat =
   3933 			&up_req->msg.u.conn_stat;
   3934 
   3935 		DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
   3936 			      conn_stat->port_number,
   3937 			      conn_stat->legacy_device_plug_status,
   3938 			      conn_stat->displayport_device_plug_status,
   3939 			      conn_stat->message_capability_status,
   3940 			      conn_stat->input_port,
   3941 			      conn_stat->peer_device_type);
   3942 	} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
   3943 		const struct drm_dp_resource_status_notify *res_stat =
   3944 			&up_req->msg.u.resource_stat;
   3945 
   3946 		DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
   3947 			      res_stat->port_number,
   3948 			      res_stat->available_pbn);
   3949 	}
   3950 
   3951 	up_req->hdr = *hdr;
   3952 	mutex_lock(&mgr->up_req_lock);
   3953 	list_add_tail(&up_req->next, &mgr->up_req_list);
   3954 	mutex_unlock(&mgr->up_req_lock);
   3955 	queue_work(system_long_wq, &mgr->up_req_work);
   3956 
   3957 out:
   3958 	memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   3959 	return 0;
   3960 }
   3961 
   3962 /**
   3963  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   3964  * @mgr: manager to notify irq for.
   3965  * @esi: 4 bytes from SINK_COUNT_ESI
   3966  * @handled: whether the hpd interrupt was consumed or not
   3967  *
   3968  * This should be called from the driver when it detects a short IRQ,
   3969  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   3970  * topology manager will process the sideband messages received as a result
   3971  * of this.
   3972  */
   3973 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
   3974 {
   3975 	int ret = 0;
   3976 	int sc;
   3977 	*handled = false;
   3978 	sc = esi[0] & 0x3f;
   3979 
   3980 	if (sc != mgr->sink_count) {
   3981 		mgr->sink_count = sc;
   3982 		*handled = true;
   3983 	}
   3984 
   3985 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
   3986 		ret = drm_dp_mst_handle_down_rep(mgr);
   3987 		*handled = true;
   3988 	}
   3989 
   3990 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
   3991 		ret |= drm_dp_mst_handle_up_req(mgr);
   3992 		*handled = true;
   3993 	}
   3994 
   3995 	drm_dp_mst_kick_tx(mgr);
   3996 	return ret;
   3997 }
   3998 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
   3999 
   4000 /**
   4001  * drm_dp_mst_detect_port() - get connection status for an MST port
   4002  * @connector: DRM connector for this port
   4003  * @ctx: The acquisition context to use for grabbing locks
   4004  * @mgr: manager for this port
   4005  * @port: pointer to a port
   4006  *
   4007  * This returns the current connection state for a port.
   4008  */
   4009 int
   4010 drm_dp_mst_detect_port(struct drm_connector *connector,
   4011 		       struct drm_modeset_acquire_ctx *ctx,
   4012 		       struct drm_dp_mst_topology_mgr *mgr,
   4013 		       struct drm_dp_mst_port *port)
   4014 {
   4015 	int ret;
   4016 
   4017 	/* we need to search for the port in the mgr in case it's gone */
   4018 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4019 	if (!port)
   4020 		return connector_status_disconnected;
   4021 
   4022 	ret = drm_modeset_lock(&mgr->base.lock, ctx);
   4023 	if (ret)
   4024 		goto out;
   4025 
   4026 	ret = connector_status_disconnected;
   4027 
   4028 	if (!port->ddps)
   4029 		goto out;
   4030 
   4031 	switch (port->pdt) {
   4032 	case DP_PEER_DEVICE_NONE:
   4033 	case DP_PEER_DEVICE_MST_BRANCHING:
   4034 		if (!port->mcs)
   4035 			ret = connector_status_connected;
   4036 		break;
   4037 
   4038 	case DP_PEER_DEVICE_SST_SINK:
   4039 		ret = connector_status_connected;
   4040 		/* for logical ports - cache the EDID */
   4041 		if (port->port_num >= 8 && !port->cached_edid) {
   4042 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
   4043 		}
   4044 		break;
   4045 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   4046 		if (port->ldps)
   4047 			ret = connector_status_connected;
   4048 		break;
   4049 	}
   4050 out:
   4051 	drm_dp_mst_topology_put_port(port);
   4052 	return ret;
   4053 }
   4054 EXPORT_SYMBOL(drm_dp_mst_detect_port);
   4055 
   4056 /**
   4057  * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
   4058  * @mgr: manager for this port
   4059  * @port: unverified pointer to a port.
   4060  *
   4061  * This returns whether the port supports audio or not.
   4062  */
   4063 bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
   4064 					struct drm_dp_mst_port *port)
   4065 {
   4066 	bool ret = false;
   4067 
   4068 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4069 	if (!port)
   4070 		return ret;
   4071 	ret = port->has_audio;
   4072 	drm_dp_mst_topology_put_port(port);
   4073 	return ret;
   4074 }
   4075 EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
   4076 
   4077 /**
   4078  * drm_dp_mst_get_edid() - get EDID for an MST port
   4079  * @connector: toplevel connector to get EDID for
   4080  * @mgr: manager for this port
   4081  * @port: unverified pointer to a port.
   4082  *
   4083  * This returns an EDID for the port connected to a connector,
   4084  * It validates the pointer still exists so the caller doesn't require a
   4085  * reference.
   4086  */
   4087 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4088 {
   4089 	struct edid *edid = NULL;
   4090 
   4091 	/* we need to search for the port in the mgr in case it's gone */
   4092 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4093 	if (!port)
   4094 		return NULL;
   4095 
   4096 	if (port->cached_edid)
   4097 		edid = drm_edid_duplicate(port->cached_edid);
   4098 	else {
   4099 		edid = drm_get_edid(connector, &port->aux.ddc);
   4100 	}
   4101 	port->has_audio = drm_detect_monitor_audio(edid);
   4102 	drm_dp_mst_topology_put_port(port);
   4103 	return edid;
   4104 }
   4105 EXPORT_SYMBOL(drm_dp_mst_get_edid);
   4106 
   4107 /**
   4108  * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value
   4109  * @mgr: manager to use
   4110  * @pbn: payload bandwidth to convert into slots.
   4111  *
   4112  * Calculate the number of VCPI slots that will be required for the given PBN
   4113  * value. This function is deprecated, and should not be used in atomic
   4114  * drivers.
   4115  *
   4116  * RETURNS:
   4117  * The total slots required for this port, or error.
   4118  */
   4119 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
   4120 			   int pbn)
   4121 {
   4122 	int num_slots;
   4123 
   4124 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   4125 
   4126 	/* max. time slots - one slot for MTP header */
   4127 	if (num_slots > 63)
   4128 		return -ENOSPC;
   4129 	return num_slots;
   4130 }
   4131 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
   4132 
   4133 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4134 			    struct drm_dp_vcpi *vcpi, int pbn, int slots)
   4135 {
   4136 	int ret;
   4137 
   4138 	/* max. time slots - one slot for MTP header */
   4139 	if (slots > 63)
   4140 		return -ENOSPC;
   4141 
   4142 	vcpi->pbn = pbn;
   4143 	vcpi->aligned_pbn = slots * mgr->pbn_div;
   4144 	vcpi->num_slots = slots;
   4145 
   4146 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
   4147 	if (ret < 0)
   4148 		return ret;
   4149 	return 0;
   4150 }
   4151 
   4152 /**
   4153  * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
   4154  * @state: global atomic state
   4155  * @mgr: MST topology manager for the port
   4156  * @port: port to find vcpi slots for
   4157  * @pbn: bandwidth required for the mode in PBN
   4158  * @pbn_div: divider for DSC mode that takes FEC into account
   4159  *
   4160  * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
   4161  * may have had. Any atomic drivers which support MST must call this function
   4162  * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
   4163  * current VCPI allocation for the new state, but only when
   4164  * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
   4165  * to ensure compatibility with userspace applications that still use the
   4166  * legacy modesetting UAPI.
   4167  *
   4168  * Allocations set by this function are not checked against the bandwidth
   4169  * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
   4170  *
   4171  * Additionally, it is OK to call this function multiple times on the same
   4172  * @port as needed. It is not OK however, to call this function and
   4173  * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
   4174  *
   4175  * See also:
   4176  * drm_dp_atomic_release_vcpi_slots()
   4177  * drm_dp_mst_atomic_check()
   4178  *
   4179  * Returns:
   4180  * Total slots in the atomic state assigned for this port, or a negative error
   4181  * code if the port no longer exists
   4182  */
   4183 int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
   4184 				  struct drm_dp_mst_topology_mgr *mgr,
   4185 				  struct drm_dp_mst_port *port, int pbn,
   4186 				  int pbn_div)
   4187 {
   4188 	struct drm_dp_mst_topology_state *topology_state;
   4189 	struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
   4190 	int prev_slots, prev_bw, req_slots;
   4191 
   4192 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
   4193 	if (IS_ERR(topology_state))
   4194 		return PTR_ERR(topology_state);
   4195 
   4196 	/* Find the current allocation for this port, if any */
   4197 	list_for_each_entry(pos, &topology_state->vcpis, next) {
   4198 		if (pos->port == port) {
   4199 			vcpi = pos;
   4200 			prev_slots = vcpi->vcpi;
   4201 			prev_bw = vcpi->pbn;
   4202 
   4203 			/*
   4204 			 * This should never happen, unless the driver tries
   4205 			 * releasing and allocating the same VCPI allocation,
   4206 			 * which is an error
   4207 			 */
   4208 			if (WARN_ON(!prev_slots)) {
   4209 				DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
   4210 					  port);
   4211 				return -EINVAL;
   4212 			}
   4213 
   4214 			break;
   4215 		}
   4216 	}
   4217 	if (!vcpi) {
   4218 		prev_slots = 0;
   4219 		prev_bw = 0;
   4220 	}
   4221 
   4222 	if (pbn_div <= 0)
   4223 		pbn_div = mgr->pbn_div;
   4224 
   4225 	req_slots = DIV_ROUND_UP(pbn, pbn_div);
   4226 
   4227 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
   4228 			 port->connector->base.id, port->connector->name,
   4229 			 port, prev_slots, req_slots);
   4230 	DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
   4231 			 port->connector->base.id, port->connector->name,
   4232 			 port, prev_bw, pbn);
   4233 
   4234 	/* Add the new allocation to the state */
   4235 	if (!vcpi) {
   4236 		vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
   4237 		if (!vcpi)
   4238 			return -ENOMEM;
   4239 
   4240 		drm_dp_mst_get_port_malloc(port);
   4241 		vcpi->port = port;
   4242 		list_add(&vcpi->next, &topology_state->vcpis);
   4243 	}
   4244 	vcpi->vcpi = req_slots;
   4245 	vcpi->pbn = pbn;
   4246 
   4247 	return req_slots;
   4248 }
   4249 EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
   4250 
   4251 /**
   4252  * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
   4253  * @state: global atomic state
   4254  * @mgr: MST topology manager for the port
   4255  * @port: The port to release the VCPI slots from
   4256  *
   4257  * Releases any VCPI slots that have been allocated to a port in the atomic
   4258  * state. Any atomic drivers which support MST must call this function in
   4259  * their &drm_connector_helper_funcs.atomic_check() callback when the
   4260  * connector will no longer have VCPI allocated (e.g. because its CRTC was
   4261  * removed) when it had VCPI allocated in the previous atomic state.
   4262  *
   4263  * It is OK to call this even if @port has been removed from the system.
   4264  * Additionally, it is OK to call this function multiple times on the same
   4265  * @port as needed. It is not OK however, to call this function and
   4266  * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
   4267  * phase.
   4268  *
   4269  * See also:
   4270  * drm_dp_atomic_find_vcpi_slots()
   4271  * drm_dp_mst_atomic_check()
   4272  *
   4273  * Returns:
   4274  * 0 if all slots for this port were added back to
   4275  * &drm_dp_mst_topology_state.avail_slots or negative error code
   4276  */
   4277 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
   4278 				     struct drm_dp_mst_topology_mgr *mgr,
   4279 				     struct drm_dp_mst_port *port)
   4280 {
   4281 	struct drm_dp_mst_topology_state *topology_state;
   4282 	struct drm_dp_vcpi_allocation *pos;
   4283 	bool found = false;
   4284 
   4285 	topology_state = drm_atomic_get_mst_topology_state(state, mgr);
   4286 	if (IS_ERR(topology_state))
   4287 		return PTR_ERR(topology_state);
   4288 
   4289 	list_for_each_entry(pos, &topology_state->vcpis, next) {
   4290 		if (pos->port == port) {
   4291 			found = true;
   4292 			break;
   4293 		}
   4294 	}
   4295 	if (WARN_ON(!found)) {
   4296 		DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
   4297 			  port, &topology_state->base);
   4298 		return -EINVAL;
   4299 	}
   4300 
   4301 	DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
   4302 	if (pos->vcpi) {
   4303 		drm_dp_mst_put_port_malloc(port);
   4304 		pos->vcpi = 0;
   4305 	}
   4306 
   4307 	return 0;
   4308 }
   4309 EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
   4310 
   4311 /**
   4312  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   4313  * @mgr: manager for this port
   4314  * @port: port to allocate a virtual channel for.
   4315  * @pbn: payload bandwidth number to request
   4316  * @slots: returned number of slots for this PBN.
   4317  */
   4318 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4319 			      struct drm_dp_mst_port *port, int pbn, int slots)
   4320 {
   4321 	int ret;
   4322 
   4323 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4324 	if (!port)
   4325 		return false;
   4326 
   4327 	if (slots < 0)
   4328 		return false;
   4329 
   4330 	if (port->vcpi.vcpi > 0) {
   4331 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
   4332 			      port->vcpi.vcpi, port->vcpi.pbn, pbn);
   4333 		if (pbn == port->vcpi.pbn) {
   4334 			drm_dp_mst_topology_put_port(port);
   4335 			return true;
   4336 		}
   4337 	}
   4338 
   4339 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
   4340 	if (ret) {
   4341 		DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
   4342 			      DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
   4343 		goto out;
   4344 	}
   4345 	DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
   4346 		      pbn, port->vcpi.num_slots);
   4347 
   4348 	/* Keep port allocated until its payload has been removed */
   4349 	drm_dp_mst_get_port_malloc(port);
   4350 	drm_dp_mst_topology_put_port(port);
   4351 	return true;
   4352 out:
   4353 	return false;
   4354 }
   4355 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
   4356 
   4357 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4358 {
   4359 	int slots = 0;
   4360 	port = drm_dp_mst_topology_get_port_validated(mgr, port);
   4361 	if (!port)
   4362 		return slots;
   4363 
   4364 	slots = port->vcpi.num_slots;
   4365 	drm_dp_mst_topology_put_port(port);
   4366 	return slots;
   4367 }
   4368 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
   4369 
   4370 /**
   4371  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   4372  * @mgr: manager for this port
   4373  * @port: unverified pointer to a port.
   4374  *
   4375  * This just resets the number of slots for the ports VCPI for later programming.
   4376  */
   4377 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   4378 {
   4379 	/*
   4380 	 * A port with VCPI will remain allocated until its VCPI is
   4381 	 * released, no verified ref needed
   4382 	 */
   4383 
   4384 	port->vcpi.num_slots = 0;
   4385 }
   4386 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
   4387 
   4388 /**
   4389  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   4390  * @mgr: manager for this port
   4391  * @port: port to deallocate vcpi for
   4392  *
   4393  * This can be called unconditionally, regardless of whether
   4394  * drm_dp_mst_allocate_vcpi() succeeded or not.
   4395  */
   4396 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   4397 				struct drm_dp_mst_port *port)
   4398 {
   4399 	if (!port->vcpi.vcpi)
   4400 		return;
   4401 
   4402 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   4403 	port->vcpi.num_slots = 0;
   4404 	port->vcpi.pbn = 0;
   4405 	port->vcpi.aligned_pbn = 0;
   4406 	port->vcpi.vcpi = 0;
   4407 	drm_dp_mst_put_port_malloc(port);
   4408 }
   4409 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
   4410 
   4411 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
   4412 				     int id, struct drm_dp_payload *payload)
   4413 {
   4414 	u8 payload_alloc[3], status;
   4415 	int ret;
   4416 	int retries = 0;
   4417 
   4418 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
   4419 			   DP_PAYLOAD_TABLE_UPDATED);
   4420 
   4421 	payload_alloc[0] = id;
   4422 	payload_alloc[1] = payload->start_slot;
   4423 	payload_alloc[2] = payload->num_slots;
   4424 
   4425 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
   4426 	if (ret != 3) {
   4427 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
   4428 		goto fail;
   4429 	}
   4430 
   4431 retry:
   4432 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   4433 	if (ret < 0) {
   4434 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   4435 		goto fail;
   4436 	}
   4437 
   4438 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
   4439 		retries++;
   4440 		if (retries < 20) {
   4441 			usleep_range(10000, 20000);
   4442 			goto retry;
   4443 		}
   4444 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
   4445 		ret = -EINVAL;
   4446 		goto fail;
   4447 	}
   4448 	ret = 0;
   4449 fail:
   4450 	return ret;
   4451 }
   4452 
   4453 
   4454 /**
   4455  * drm_dp_check_act_status() - Check ACT handled status.
   4456  * @mgr: manager to use
   4457  *
   4458  * Check the payload status bits in the DPCD for ACT handled completion.
   4459  */
   4460 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
   4461 {
   4462 	u8 status;
   4463 	int ret;
   4464 	int count = 0;
   4465 
   4466 	do {
   4467 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   4468 
   4469 		if (ret < 0) {
   4470 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   4471 			goto fail;
   4472 		}
   4473 
   4474 		if (status & DP_PAYLOAD_ACT_HANDLED)
   4475 			break;
   4476 		count++;
   4477 		udelay(100);
   4478 
   4479 	} while (count < 30);
   4480 
   4481 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
   4482 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
   4483 		ret = -EINVAL;
   4484 		goto fail;
   4485 	}
   4486 	return 0;
   4487 fail:
   4488 	return ret;
   4489 }
   4490 EXPORT_SYMBOL(drm_dp_check_act_status);
   4491 
   4492 /**
   4493  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   4494  * @clock: dot clock for the mode
   4495  * @bpp: bpp for the mode.
   4496  * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel
   4497  *
   4498  * This uses the formula in the spec to calculate the PBN value for a mode.
   4499  */
   4500 int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc)
   4501 {
   4502 	/*
   4503 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
   4504 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
   4505 	 * common multiplier to render an integer PBN for all link rate/lane
   4506 	 * counts combinations
   4507 	 * calculate
   4508 	 * peak_kbps *= (1006/1000)
   4509 	 * peak_kbps *= (64/54)
   4510 	 * peak_kbps *= 8    convert to bytes
   4511 	 *
   4512 	 * If the bpp is in units of 1/16, further divide by 16. Put this
   4513 	 * factor in the numerator rather than the denominator to avoid
   4514 	 * integer overflow
   4515 	 */
   4516 
   4517 	if (dsc)
   4518 		return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006),
   4519 					8 * 54 * 1000 * 1000);
   4520 
   4521 	return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006),
   4522 				8 * 54 * 1000 * 1000);
   4523 }
   4524 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
   4525 
   4526 /* we want to kick the TX after we've ack the up/down IRQs. */
   4527 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
   4528 {
   4529 	queue_work(system_long_wq, &mgr->tx_work);
   4530 }
   4531 
   4532 static void drm_dp_mst_dump_mstb(struct seq_file *m,
   4533 				 struct drm_dp_mst_branch *mstb)
   4534 {
   4535 	struct drm_dp_mst_port *port;
   4536 	int tabs = mstb->lct;
   4537 	char prefix[10];
   4538 	int i;
   4539 
   4540 	for (i = 0; i < tabs; i++)
   4541 		prefix[i] = '\t';
   4542 	prefix[i] = '\0';
   4543 
   4544 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
   4545 	list_for_each_entry(port, &mstb->ports, next) {
   4546 		seq_printf(m, "%sport: %d: input: %d: pdt: %d, ddps: %d ldps: %d, sdp: %d/%d, %p, conn: %p\n", prefix, port->port_num, port->input, port->pdt, port->ddps, port->ldps, port->num_sdp_streams, port->num_sdp_stream_sinks, port, port->connector);
   4547 		if (port->mstb)
   4548 			drm_dp_mst_dump_mstb(m, port->mstb);
   4549 	}
   4550 }
   4551 
   4552 #define DP_PAYLOAD_TABLE_SIZE		64
   4553 
   4554 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
   4555 				  char *buf)
   4556 {
   4557 	int i;
   4558 
   4559 	for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) {
   4560 		if (drm_dp_dpcd_read(mgr->aux,
   4561 				     DP_PAYLOAD_TABLE_UPDATE_STATUS + i,
   4562 				     &buf[i], 16) != 16)
   4563 			return false;
   4564 	}
   4565 	return true;
   4566 }
   4567 
   4568 static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr,
   4569 			       struct drm_dp_mst_port *port, char *name,
   4570 			       int namelen)
   4571 {
   4572 	struct edid *mst_edid;
   4573 
   4574 	mst_edid = drm_dp_mst_get_edid(port->connector, mgr, port);
   4575 	drm_edid_get_monitor_name(mst_edid, name, namelen);
   4576 }
   4577 
   4578 /**
   4579  * drm_dp_mst_dump_topology(): dump topology to seq file.
   4580  * @m: seq_file to dump output to
   4581  * @mgr: manager to dump current topology for.
   4582  *
   4583  * helper to dump MST topology to a seq file for debugfs.
   4584  */
   4585 void drm_dp_mst_dump_topology(struct seq_file *m,
   4586 			      struct drm_dp_mst_topology_mgr *mgr)
   4587 {
   4588 	int i;
   4589 	struct drm_dp_mst_port *port;
   4590 
   4591 	mutex_lock(&mgr->lock);
   4592 	if (mgr->mst_primary)
   4593 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
   4594 
   4595 	/* dump VCPIs */
   4596 	mutex_unlock(&mgr->lock);
   4597 
   4598 	mutex_lock(&mgr->payload_lock);
   4599 	seq_printf(m, "vcpi: %lx %lx %d\n", mgr->payload_mask, mgr->vcpi_mask,
   4600 		mgr->max_payloads);
   4601 
   4602 	for (i = 0; i < mgr->max_payloads; i++) {
   4603 		if (mgr->proposed_vcpis[i]) {
   4604 			char name[14];
   4605 
   4606 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   4607 			fetch_monitor_name(mgr, port, name, sizeof(name));
   4608 			seq_printf(m, "vcpi %d: %d %d %d sink name: %s\n", i,
   4609 				   port->port_num, port->vcpi.vcpi,
   4610 				   port->vcpi.num_slots,
   4611 				   (*name != 0) ? name :  "Unknown");
   4612 		} else
   4613 			seq_printf(m, "vcpi %d:unused\n", i);
   4614 	}
   4615 	for (i = 0; i < mgr->max_payloads; i++) {
   4616 		seq_printf(m, "payload %d: %d, %d, %d\n",
   4617 			   i,
   4618 			   mgr->payloads[i].payload_state,
   4619 			   mgr->payloads[i].start_slot,
   4620 			   mgr->payloads[i].num_slots);
   4621 
   4622 
   4623 	}
   4624 	mutex_unlock(&mgr->payload_lock);
   4625 
   4626 	mutex_lock(&mgr->lock);
   4627 	if (mgr->mst_primary) {
   4628 		u8 buf[DP_PAYLOAD_TABLE_SIZE];
   4629 		int ret;
   4630 
   4631 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
   4632 		seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
   4633 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
   4634 		seq_printf(m, "faux/mst: %*ph\n", 2, buf);
   4635 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
   4636 		seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
   4637 
   4638 		/* dump the standard OUI branch header */
   4639 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
   4640 		seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
   4641 		for (i = 0x3; i < 0x8 && buf[i]; i++)
   4642 			seq_printf(m, "%c", buf[i]);
   4643 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
   4644 			   buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
   4645 		if (dump_dp_payload_table(mgr, buf))
   4646 			seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
   4647 	}
   4648 
   4649 	mutex_unlock(&mgr->lock);
   4650 
   4651 }
   4652 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
   4653 
   4654 static void drm_dp_tx_work(struct work_struct *work)
   4655 {
   4656 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
   4657 
   4658 	mutex_lock(&mgr->qlock);
   4659 	if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
   4660 		process_single_down_tx_qlock(mgr);
   4661 	mutex_unlock(&mgr->qlock);
   4662 }
   4663 
   4664 static inline void
   4665 drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
   4666 {
   4667 	if (port->connector)
   4668 		port->mgr->cbs->destroy_connector(port->mgr, port->connector);
   4669 
   4670 	drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
   4671 	drm_dp_mst_put_port_malloc(port);
   4672 }
   4673 
   4674 static inline void
   4675 drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
   4676 {
   4677 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
   4678 	struct drm_dp_mst_port *port, *tmp;
   4679 	bool wake_tx = false;
   4680 
   4681 	mutex_lock(&mgr->lock);
   4682 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
   4683 		list_del(&port->next);
   4684 		drm_dp_mst_topology_put_port(port);
   4685 	}
   4686 	mutex_unlock(&mgr->lock);
   4687 
   4688 	/* drop any tx slots msg */
   4689 	mutex_lock(&mstb->mgr->qlock);
   4690 	if (mstb->tx_slots[0]) {
   4691 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   4692 		mstb->tx_slots[0] = NULL;
   4693 		wake_tx = true;
   4694 	}
   4695 	if (mstb->tx_slots[1]) {
   4696 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   4697 		mstb->tx_slots[1] = NULL;
   4698 		wake_tx = true;
   4699 	}
   4700 	mutex_unlock(&mstb->mgr->qlock);
   4701 
   4702 	if (wake_tx)
   4703 		wake_up_all(&mstb->mgr->tx_waitq);
   4704 
   4705 	drm_dp_mst_put_mstb_malloc(mstb);
   4706 }
   4707 
   4708 static void drm_dp_delayed_destroy_work(struct work_struct *work)
   4709 {
   4710 	struct drm_dp_mst_topology_mgr *mgr =
   4711 		container_of(work, struct drm_dp_mst_topology_mgr,
   4712 			     delayed_destroy_work);
   4713 	bool send_hotplug = false, go_again;
   4714 
   4715 	/*
   4716 	 * Not a regular list traverse as we have to drop the destroy
   4717 	 * connector lock before destroying the mstb/port, to avoid AB->BA
   4718 	 * ordering between this lock and the config mutex.
   4719 	 */
   4720 	do {
   4721 		go_again = false;
   4722 
   4723 		for (;;) {
   4724 			struct drm_dp_mst_branch *mstb;
   4725 
   4726 			mutex_lock(&mgr->delayed_destroy_lock);
   4727 			mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list,
   4728 							struct drm_dp_mst_branch,
   4729 							destroy_next);
   4730 			if (mstb)
   4731 				list_del(&mstb->destroy_next);
   4732 			mutex_unlock(&mgr->delayed_destroy_lock);
   4733 
   4734 			if (!mstb)
   4735 				break;
   4736 
   4737 			drm_dp_delayed_destroy_mstb(mstb);
   4738 			go_again = true;
   4739 		}
   4740 
   4741 		for (;;) {
   4742 			struct drm_dp_mst_port *port;
   4743 
   4744 			mutex_lock(&mgr->delayed_destroy_lock);
   4745 			port = list_first_entry_or_null(&mgr->destroy_port_list,
   4746 							struct drm_dp_mst_port,
   4747 							next);
   4748 			if (port)
   4749 				list_del(&port->next);
   4750 			mutex_unlock(&mgr->delayed_destroy_lock);
   4751 
   4752 			if (!port)
   4753 				break;
   4754 
   4755 			drm_dp_delayed_destroy_port(port);
   4756 			send_hotplug = true;
   4757 			go_again = true;
   4758 		}
   4759 	} while (go_again);
   4760 
   4761 	if (send_hotplug)
   4762 		drm_kms_helper_hotplug_event(mgr->dev);
   4763 }
   4764 
   4765 static struct drm_private_state *
   4766 drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
   4767 {
   4768 	struct drm_dp_mst_topology_state *state, *old_state =
   4769 		to_dp_mst_topology_state(obj->state);
   4770 	struct drm_dp_vcpi_allocation *pos, *vcpi;
   4771 
   4772 	state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
   4773 	if (!state)
   4774 		return NULL;
   4775 
   4776 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
   4777 
   4778 	INIT_LIST_HEAD(&state->vcpis);
   4779 
   4780 	list_for_each_entry(pos, &old_state->vcpis, next) {
   4781 		/* Prune leftover freed VCPI allocations */
   4782 		if (!pos->vcpi)
   4783 			continue;
   4784 
   4785 		vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
   4786 		if (!vcpi)
   4787 			goto fail;
   4788 
   4789 		drm_dp_mst_get_port_malloc(vcpi->port);
   4790 		list_add(&vcpi->next, &state->vcpis);
   4791 	}
   4792 
   4793 	return &state->base;
   4794 
   4795 fail:
   4796 	list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
   4797 		drm_dp_mst_put_port_malloc(pos->port);
   4798 		kfree(pos);
   4799 	}
   4800 	kfree(state);
   4801 
   4802 	return NULL;
   4803 }
   4804 
   4805 static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
   4806 				     struct drm_private_state *state)
   4807 {
   4808 	struct drm_dp_mst_topology_state *mst_state =
   4809 		to_dp_mst_topology_state(state);
   4810 	struct drm_dp_vcpi_allocation *pos, *tmp;
   4811 
   4812 	list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
   4813 		/* We only keep references to ports with non-zero VCPIs */
   4814 		if (pos->vcpi)
   4815 			drm_dp_mst_put_port_malloc(pos->port);
   4816 		kfree(pos);
   4817 	}
   4818 
   4819 	kfree(mst_state);
   4820 }
   4821 
   4822 static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
   4823 						 struct drm_dp_mst_branch *branch)
   4824 {
   4825 	while (port->parent) {
   4826 		if (port->parent == branch)
   4827 			return true;
   4828 
   4829 		if (port->parent->port_parent)
   4830 			port = port->parent->port_parent;
   4831 		else
   4832 			break;
   4833 	}
   4834 	return false;
   4835 }
   4836 
   4837 static inline
   4838 int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch,
   4839 				     struct drm_dp_mst_topology_state *mst_state)
   4840 {
   4841 	struct drm_dp_mst_port *port;
   4842 	struct drm_dp_vcpi_allocation *vcpi;
   4843 	int pbn_limit = 0, pbn_used = 0;
   4844 
   4845 	list_for_each_entry(port, &branch->ports, next) {
   4846 		if (port->mstb)
   4847 			if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state))
   4848 				return -ENOSPC;
   4849 
   4850 		if (port->available_pbn > 0)
   4851 			pbn_limit = port->available_pbn;
   4852 	}
   4853 	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n",
   4854 			 branch, pbn_limit);
   4855 
   4856 	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
   4857 		if (!vcpi->pbn)
   4858 			continue;
   4859 
   4860 		if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch))
   4861 			pbn_used += vcpi->pbn;
   4862 	}
   4863 	DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
   4864 			 branch, pbn_used);
   4865 
   4866 	if (pbn_used > pbn_limit) {
   4867 		DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n",
   4868 				 branch);
   4869 		return -ENOSPC;
   4870 	}
   4871 	return 0;
   4872 }
   4873 
   4874 static inline int
   4875 drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
   4876 					 struct drm_dp_mst_topology_state *mst_state)
   4877 {
   4878 	struct drm_dp_vcpi_allocation *vcpi;
   4879 	int avail_slots = 63, payload_count = 0;
   4880 
   4881 	list_for_each_entry(vcpi, &mst_state->vcpis, next) {
   4882 		/* Releasing VCPI is always OK-even if the port is gone */
   4883 		if (!vcpi->vcpi) {
   4884 			DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
   4885 					 vcpi->port);
   4886 			continue;
   4887 		}
   4888 
   4889 		DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
   4890 				 vcpi->port, vcpi->vcpi);
   4891 
   4892 		avail_slots -= vcpi->vcpi;
   4893 		if (avail_slots < 0) {
   4894 			DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
   4895 					 vcpi->port, mst_state,
   4896 					 avail_slots + vcpi->vcpi);
   4897 			return -ENOSPC;
   4898 		}
   4899 
   4900 		if (++payload_count > mgr->max_payloads) {
   4901 			DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
   4902 					 mgr, mst_state, mgr->max_payloads);
   4903 			return -EINVAL;
   4904 		}
   4905 	}
   4906 	DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
   4907 			 mgr, mst_state, avail_slots,
   4908 			 63 - avail_slots);
   4909 
   4910 	return 0;
   4911 }
   4912 
   4913 /**
   4914  * drm_dp_mst_add_affected_dsc_crtcs
   4915  * @state: Pointer to the new struct drm_dp_mst_topology_state
   4916  * @mgr: MST topology manager
   4917  *
   4918  * Whenever there is a change in mst topology
   4919  * DSC configuration would have to be recalculated
   4920  * therefore we need to trigger modeset on all affected
   4921  * CRTCs in that topology
   4922  *
   4923  * See also:
   4924  * drm_dp_mst_atomic_enable_dsc()
   4925  */
   4926 int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr)
   4927 {
   4928 	struct drm_dp_mst_topology_state *mst_state;
   4929 	struct drm_dp_vcpi_allocation *pos;
   4930 	struct drm_connector *connector;
   4931 	struct drm_connector_state *conn_state;
   4932 	struct drm_crtc *crtc;
   4933 	struct drm_crtc_state *crtc_state;
   4934 
   4935 	mst_state = drm_atomic_get_mst_topology_state(state, mgr);
   4936 
   4937 	if (IS_ERR(mst_state))
   4938 		return -EINVAL;
   4939 
   4940 	list_for_each_entry(pos, &mst_state->vcpis, next) {
   4941 
   4942 		connector = pos->port->connector;
   4943 
   4944 		if (!connector)
   4945 			return -EINVAL;
   4946 
   4947 		conn_state = drm_atomic_get_connector_state(state, connector);
   4948 
   4949 		if (IS_ERR(conn_state))
   4950 			return PTR_ERR(conn_state);
   4951 
   4952 		crtc = conn_state->crtc;
   4953 
   4954 		if (WARN_ON(!crtc))
   4955 			return -EINVAL;
   4956 
   4957 		if (!drm_dp_mst_dsc_aux_for_port(pos->port))
   4958 			continue;
   4959 
   4960 		crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc);
   4961 
   4962 		if (IS_ERR(crtc_state))
   4963 			return PTR_ERR(crtc_state);
   4964 
   4965 		DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
   4966 				 mgr, crtc);
   4967 
   4968 		crtc_state->mode_changed = true;
   4969 	}
   4970 	return 0;
   4971 }
   4972 EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs);
   4973 
   4974 /**
   4975  * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off
   4976  * @state: Pointer to the new drm_atomic_state
   4977  * @port: Pointer to the affected MST Port
   4978  * @pbn: Newly recalculated bw required for link with DSC enabled
   4979  * @pbn_div: Divider to calculate correct number of pbn per slot
   4980  * @enable: Boolean flag to enable or disable DSC on the port
   4981  *
   4982  * This function enables DSC on the given Port
   4983  * by recalculating its vcpi from pbn provided
   4984  * and sets dsc_enable flag to keep track of which
   4985  * ports have DSC enabled
   4986  *
   4987  */
   4988 int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
   4989 				 struct drm_dp_mst_port *port,
   4990 				 int pbn, int pbn_div,
   4991 				 bool enable)
   4992 {
   4993 	struct drm_dp_mst_topology_state *mst_state;
   4994 	struct drm_dp_vcpi_allocation *pos;
   4995 	bool found = false;
   4996 	int vcpi = 0;
   4997 
   4998 	mst_state = drm_atomic_get_mst_topology_state(state, port->mgr);
   4999 
   5000 	if (IS_ERR(mst_state))
   5001 		return PTR_ERR(mst_state);
   5002 
   5003 	list_for_each_entry(pos, &mst_state->vcpis, next) {
   5004 		if (pos->port == port) {
   5005 			found = true;
   5006 			break;
   5007 		}
   5008 	}
   5009 
   5010 	if (!found) {
   5011 		DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
   5012 				 port, mst_state);
   5013 		return -EINVAL;
   5014 	}
   5015 
   5016 	if (pos->dsc_enabled == enable) {
   5017 		DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
   5018 				 port, enable, pos->vcpi);
   5019 		vcpi = pos->vcpi;
   5020 	}
   5021 
   5022 	if (enable) {
   5023 		vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
   5024 		DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
   5025 				 port, vcpi);
   5026 		if (vcpi < 0)
   5027 			return -EINVAL;
   5028 	}
   5029 
   5030 	pos->dsc_enabled = enable;
   5031 
   5032 	return vcpi;
   5033 }
   5034 EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
   5035 /**
   5036  * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
   5037  * atomic update is valid
   5038  * @state: Pointer to the new &struct drm_dp_mst_topology_state
   5039  *
   5040  * Checks the given topology state for an atomic update to ensure that it's
   5041  * valid. This includes checking whether there's enough bandwidth to support
   5042  * the new VCPI allocations in the atomic update.
   5043  *
   5044  * Any atomic drivers supporting DP MST must make sure to call this after
   5045  * checking the rest of their state in their
   5046  * &drm_mode_config_funcs.atomic_check() callback.
   5047  *
   5048  * See also:
   5049  * drm_dp_atomic_find_vcpi_slots()
   5050  * drm_dp_atomic_release_vcpi_slots()
   5051  *
   5052  * Returns:
   5053  *
   5054  * 0 if the new state is valid, negative error code otherwise.
   5055  */
   5056 int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
   5057 {
   5058 	struct drm_dp_mst_topology_mgr *mgr;
   5059 	struct drm_dp_mst_topology_state *mst_state;
   5060 	int i, ret = 0;
   5061 
   5062 	for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
   5063 		if (!mgr->mst_state)
   5064 			continue;
   5065 
   5066 		ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
   5067 		if (ret)
   5068 			break;
   5069 		ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
   5070 		if (ret)
   5071 			break;
   5072 	}
   5073 
   5074 	return ret;
   5075 }
   5076 EXPORT_SYMBOL(drm_dp_mst_atomic_check);
   5077 
   5078 const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
   5079 	.atomic_duplicate_state = drm_dp_mst_duplicate_state,
   5080 	.atomic_destroy_state = drm_dp_mst_destroy_state,
   5081 };
   5082 EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
   5083 
   5084 /**
   5085  * drm_atomic_get_mst_topology_state: get MST topology state
   5086  *
   5087  * @state: global atomic state
   5088  * @mgr: MST topology manager, also the private object in this case
   5089  *
   5090  * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic
   5091  * state vtable so that the private object state returned is that of a MST
   5092  * topology object. Also, drm_atomic_get_private_obj_state() expects the caller
   5093  * to care of the locking, so warn if don't hold the connection_mutex.
   5094  *
   5095  * RETURNS:
   5096  *
   5097  * The MST topology state or error pointer.
   5098  */
   5099 struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state,
   5100 								    struct drm_dp_mst_topology_mgr *mgr)
   5101 {
   5102 	return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base));
   5103 }
   5104 EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
   5105 
   5106 /**
   5107  * drm_dp_mst_topology_mgr_init - initialise a topology manager
   5108  * @mgr: manager struct to initialise
   5109  * @dev: device providing this structure - for i2c addition.
   5110  * @aux: DP helper aux channel to talk to this device
   5111  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   5112  * @max_payloads: maximum number of payloads this GPU can source
   5113  * @conn_base_id: the connector object ID the MST device is connected to.
   5114  *
   5115  * Return 0 for success, or negative error code on failure
   5116  */
   5117 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
   5118 				 struct drm_device *dev, struct drm_dp_aux *aux,
   5119 				 int max_dpcd_transaction_bytes,
   5120 				 int max_payloads, int conn_base_id)
   5121 {
   5122 	struct drm_dp_mst_topology_state *mst_state;
   5123 
   5124 	mutex_init(&mgr->lock);
   5125 	mutex_init(&mgr->qlock);
   5126 	mutex_init(&mgr->payload_lock);
   5127 	mutex_init(&mgr->delayed_destroy_lock);
   5128 	mutex_init(&mgr->up_req_lock);
   5129 	mutex_init(&mgr->probe_lock);
   5130 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   5131 	mutex_init(&mgr->topology_ref_history_lock);
   5132 #endif
   5133 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
   5134 	INIT_LIST_HEAD(&mgr->destroy_port_list);
   5135 	INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
   5136 	INIT_LIST_HEAD(&mgr->up_req_list);
   5137 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
   5138 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
   5139 	INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
   5140 	INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work);
   5141 	init_waitqueue_head(&mgr->tx_waitq);
   5142 	mgr->dev = dev;
   5143 	mgr->aux = aux;
   5144 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
   5145 	mgr->max_payloads = max_payloads;
   5146 	mgr->conn_base_id = conn_base_id;
   5147 	if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
   5148 	    max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
   5149 		return -EINVAL;
   5150 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
   5151 	if (!mgr->payloads)
   5152 		return -ENOMEM;
   5153 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
   5154 	if (!mgr->proposed_vcpis)
   5155 		return -ENOMEM;
   5156 	set_bit(0, &mgr->payload_mask);
   5157 
   5158 	mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL);
   5159 	if (mst_state == NULL)
   5160 		return -ENOMEM;
   5161 
   5162 	mst_state->mgr = mgr;
   5163 	INIT_LIST_HEAD(&mst_state->vcpis);
   5164 
   5165 	drm_atomic_private_obj_init(dev, &mgr->base,
   5166 				    &mst_state->base,
   5167 				    &drm_dp_mst_topology_state_funcs);
   5168 
   5169 	return 0;
   5170 }
   5171 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
   5172 
   5173 /**
   5174  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   5175  * @mgr: manager to destroy
   5176  */
   5177 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
   5178 {
   5179 	drm_dp_mst_topology_mgr_set_mst(mgr, false);
   5180 	flush_work(&mgr->work);
   5181 	cancel_work_sync(&mgr->delayed_destroy_work);
   5182 	mutex_lock(&mgr->payload_lock);
   5183 	kfree(mgr->payloads);
   5184 	mgr->payloads = NULL;
   5185 	kfree(mgr->proposed_vcpis);
   5186 	mgr->proposed_vcpis = NULL;
   5187 	mutex_unlock(&mgr->payload_lock);
   5188 	mgr->dev = NULL;
   5189 	mgr->aux = NULL;
   5190 	drm_atomic_private_obj_fini(&mgr->base);
   5191 	mgr->funcs = NULL;
   5192 
   5193 	mutex_destroy(&mgr->delayed_destroy_lock);
   5194 	mutex_destroy(&mgr->payload_lock);
   5195 	mutex_destroy(&mgr->qlock);
   5196 	mutex_destroy(&mgr->lock);
   5197 	mutex_destroy(&mgr->up_req_lock);
   5198 	mutex_destroy(&mgr->probe_lock);
   5199 #if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
   5200 	mutex_destroy(&mgr->topology_ref_history_lock);
   5201 #endif
   5202 }
   5203 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
   5204 
   5205 static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
   5206 {
   5207 	int i;
   5208 
   5209 	if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
   5210 		return false;
   5211 
   5212 	for (i = 0; i < num - 1; i++) {
   5213 		if (msgs[i].flags & I2C_M_RD ||
   5214 		    msgs[i].len > 0xff)
   5215 			return false;
   5216 	}
   5217 
   5218 	return msgs[num - 1].flags & I2C_M_RD &&
   5219 		msgs[num - 1].len <= 0xff;
   5220 }
   5221 
   5222 /* I2C device */
   5223 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
   5224 			       int num)
   5225 {
   5226 	struct drm_dp_aux *aux = adapter->algo_data;
   5227 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
   5228 	struct drm_dp_mst_branch *mstb;
   5229 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   5230 	unsigned int i;
   5231 	struct drm_dp_sideband_msg_req_body msg;
   5232 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   5233 	int ret;
   5234 
   5235 	mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
   5236 	if (!mstb)
   5237 		return -EREMOTEIO;
   5238 
   5239 	if (!remote_i2c_read_ok(msgs, num)) {
   5240 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
   5241 		ret = -EIO;
   5242 		goto out;
   5243 	}
   5244 
   5245 	memset(&msg, 0, sizeof(msg));
   5246 	msg.req_type = DP_REMOTE_I2C_READ;
   5247 	msg.u.i2c_read.num_transactions = num - 1;
   5248 	msg.u.i2c_read.port_number = port->port_num;
   5249 	for (i = 0; i < num - 1; i++) {
   5250 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
   5251 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
   5252 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
   5253 		msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
   5254 	}
   5255 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
   5256 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
   5257 
   5258 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   5259 	if (!txmsg) {
   5260 		ret = -ENOMEM;
   5261 		goto out;
   5262 	}
   5263 
   5264 	txmsg->dst = mstb;
   5265 	drm_dp_encode_sideband_req(&msg, txmsg);
   5266 
   5267 	drm_dp_queue_down_tx(mgr, txmsg);
   5268 
   5269 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   5270 	if (ret > 0) {
   5271 
   5272 		if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
   5273 			ret = -EREMOTEIO;
   5274 			goto out;
   5275 		}
   5276 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
   5277 			ret = -EIO;
   5278 			goto out;
   5279 		}
   5280 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
   5281 		ret = num;
   5282 	}
   5283 out:
   5284 	kfree(txmsg);
   5285 	drm_dp_mst_topology_put_mstb(mstb);
   5286 	return ret;
   5287 }
   5288 
   5289 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
   5290 {
   5291 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
   5292 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
   5293 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
   5294 	       I2C_FUNC_10BIT_ADDR;
   5295 }
   5296 
   5297 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
   5298 	.functionality = drm_dp_mst_i2c_functionality,
   5299 	.master_xfer = drm_dp_mst_i2c_xfer,
   5300 };
   5301 
   5302 /**
   5303  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
   5304  * @aux: DisplayPort AUX channel
   5305  *
   5306  * Returns 0 on success or a negative error code on failure.
   5307  */
   5308 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
   5309 {
   5310 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
   5311 	aux->ddc.algo_data = aux;
   5312 	aux->ddc.retries = 3;
   5313 
   5314 	aux->ddc.class = I2C_CLASS_DDC;
   5315 	aux->ddc.owner = THIS_MODULE;
   5316 	aux->ddc.dev.parent = aux->dev;
   5317 	aux->ddc.dev.of_node = aux->dev->of_node;
   5318 
   5319 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
   5320 		sizeof(aux->ddc.name));
   5321 
   5322 	return i2c_add_adapter(&aux->ddc);
   5323 }
   5324 
   5325 /**
   5326  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
   5327  * @aux: DisplayPort AUX channel
   5328  */
   5329 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
   5330 {
   5331 	i2c_del_adapter(&aux->ddc);
   5332 }
   5333 
   5334 /**
   5335  * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device
   5336  * @port: The port to check
   5337  *
   5338  * A single physical MST hub object can be represented in the topology
   5339  * by multiple branches, with virtual ports between those branches.
   5340  *
   5341  * As of DP1.4, An MST hub with internal (virtual) ports must expose
   5342  * certain DPCD registers over those ports. See sections 2.6.1.1.1
   5343  * and 2.6.1.1.2 of Display Port specification v1.4 for details.
   5344  *
   5345  * May acquire mgr->lock
   5346  *
   5347  * Returns:
   5348  * true if the port is a virtual DP peer device, false otherwise
   5349  */
   5350 static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port)
   5351 {
   5352 	struct drm_dp_mst_port *downstream_port;
   5353 
   5354 	if (!port || port->dpcd_rev < DP_DPCD_REV_14)
   5355 		return false;
   5356 
   5357 	/* Virtual DP Sink (Internal Display Panel) */
   5358 	if (port->port_num >= 8)
   5359 		return true;
   5360 
   5361 	/* DP-to-HDMI Protocol Converter */
   5362 	if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV &&
   5363 	    !port->mcs &&
   5364 	    port->ldps)
   5365 		return true;
   5366 
   5367 	/* DP-to-DP */
   5368 	mutex_lock(&port->mgr->lock);
   5369 	if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING &&
   5370 	    port->mstb &&
   5371 	    port->mstb->num_ports == 2) {
   5372 		list_for_each_entry(downstream_port, &port->mstb->ports, next) {
   5373 			if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK &&
   5374 			    !downstream_port->input) {
   5375 				mutex_unlock(&port->mgr->lock);
   5376 				return true;
   5377 			}
   5378 		}
   5379 	}
   5380 	mutex_unlock(&port->mgr->lock);
   5381 
   5382 	return false;
   5383 }
   5384 
   5385 /**
   5386  * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC
   5387  * @port: The port to check. A leaf of the MST tree with an attached display.
   5388  *
   5389  * Depending on the situation, DSC may be enabled via the endpoint aux,
   5390  * the immediately upstream aux, or the connector's physical aux.
   5391  *
   5392  * This is both the correct aux to read DSC_CAPABILITY and the
   5393  * correct aux to write DSC_ENABLED.
   5394  *
   5395  * This operation can be expensive (up to four aux reads), so
   5396  * the caller should cache the return.
   5397  *
   5398  * Returns:
   5399  * NULL if DSC cannot be enabled on this port, otherwise the aux device
   5400  */
   5401 struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
   5402 {
   5403 	struct drm_dp_mst_port *immediate_upstream_port;
   5404 	struct drm_dp_mst_port *fec_port;
   5405 	struct drm_dp_desc desc = { 0 };
   5406 	u8 endpoint_fec;
   5407 	u8 endpoint_dsc;
   5408 
   5409 	if (!port)
   5410 		return NULL;
   5411 
   5412 	if (port->parent->port_parent)
   5413 		immediate_upstream_port = port->parent->port_parent;
   5414 	else
   5415 		immediate_upstream_port = NULL;
   5416 
   5417 	fec_port = immediate_upstream_port;
   5418 	while (fec_port) {
   5419 		/*
   5420 		 * Each physical link (i.e. not a virtual port) between the
   5421 		 * output and the primary device must support FEC
   5422 		 */
   5423 		if (!drm_dp_mst_is_virtual_dpcd(fec_port) &&
   5424 		    !fec_port->fec_capable)
   5425 			return NULL;
   5426 
   5427 		fec_port = fec_port->parent->port_parent;
   5428 	}
   5429 
   5430 	/* DP-to-DP peer device */
   5431 	if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) {
   5432 		u8 upstream_dsc;
   5433 
   5434 		if (drm_dp_dpcd_read(&port->aux,
   5435 				     DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
   5436 			return NULL;
   5437 		if (drm_dp_dpcd_read(&port->aux,
   5438 				     DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
   5439 			return NULL;
   5440 		if (drm_dp_dpcd_read(&immediate_upstream_port->aux,
   5441 				     DP_DSC_SUPPORT, &upstream_dsc, 1) != 1)
   5442 			return NULL;
   5443 
   5444 		/* Enpoint decompression with DP-to-DP peer device */
   5445 		if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
   5446 		    (endpoint_fec & DP_FEC_CAPABLE) &&
   5447 		    (upstream_dsc & 0x2) /* DSC passthrough */)
   5448 			return &port->aux;
   5449 
   5450 		/* Virtual DPCD decompression with DP-to-DP peer device */
   5451 		return &immediate_upstream_port->aux;
   5452 	}
   5453 
   5454 	/* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */
   5455 	if (drm_dp_mst_is_virtual_dpcd(port))
   5456 		return &port->aux;
   5457 
   5458 	/*
   5459 	 * Synaptics quirk
   5460 	 * Applies to ports for which:
   5461 	 * - Physical aux has Synaptics OUI
   5462 	 * - DPv1.4 or higher
   5463 	 * - Port is on primary branch device
   5464 	 * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG)
   5465 	 */
   5466 	if (drm_dp_read_desc(port->mgr->aux, &desc, true))
   5467 		return NULL;
   5468 
   5469 	if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
   5470 	    port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
   5471 	    port->parent == port->mgr->mst_primary) {
   5472 		u8 downstreamport;
   5473 
   5474 		if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
   5475 				     &downstreamport, 1) < 0)
   5476 			return NULL;
   5477 
   5478 		if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
   5479 		   ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
   5480 		     != DP_DWN_STRM_PORT_TYPE_ANALOG))
   5481 			return port->mgr->aux;
   5482 	}
   5483 
   5484 	/*
   5485 	 * The check below verifies if the MST sink
   5486 	 * connected to the GPU is capable of DSC -
   5487 	 * therefore the endpoint needs to be
   5488 	 * both DSC and FEC capable.
   5489 	 */
   5490 	if (drm_dp_dpcd_read(&port->aux,
   5491 	   DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1)
   5492 		return NULL;
   5493 	if (drm_dp_dpcd_read(&port->aux,
   5494 	   DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1)
   5495 		return NULL;
   5496 	if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) &&
   5497 	   (endpoint_fec & DP_FEC_CAPABLE))
   5498 		return &port->aux;
   5499 
   5500 	return NULL;
   5501 }
   5502 EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port);
   5503