Home | History | Annotate | Line # | Download | only in drm
drm_dp_mst_topology.c revision 1.5
      1 /*	$NetBSD: drm_dp_mst_topology.c,v 1.5 2020/02/14 14:34:57 maya Exp $	*/
      2 
      3 /*
      4  * Copyright  2014 Red Hat
      5  *
      6  * Permission to use, copy, modify, distribute, and sell this software and its
      7  * documentation for any purpose is hereby granted without fee, provided that
      8  * the above copyright notice appear in all copies and that both that copyright
      9  * notice and this permission notice appear in supporting documentation, and
     10  * that the name of the copyright holders not be used in advertising or
     11  * publicity pertaining to distribution of the software without specific,
     12  * written prior permission.  The copyright holders make no representations
     13  * about the suitability of this software for any purpose.  It is provided "as
     14  * is" without express or implied warranty.
     15  *
     16  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
     17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
     18  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
     19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
     20  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
     21  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
     22  * OF THIS SOFTWARE.
     23  */
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.5 2020/02/14 14:34:57 maya Exp $");
     27 
     28 #include <linux/kernel.h>
     29 #include <linux/delay.h>
     30 #include <linux/init.h>
     31 #include <linux/errno.h>
     32 #include <linux/sched.h>
     33 #include <linux/seq_file.h>
     34 #include <linux/i2c.h>
     35 #include <drm/drm_dp_mst_helper.h>
     36 #include <drm/drmP.h>
     37 
     38 #include <drm/drm_fixed.h>
     39 
     40 #include <linux/nbsd-namespace.h>
     41 
     42 /**
     43  * DOC: dp mst helper
     44  *
     45  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
     46  * protocol. The helpers contain a topology manager and bandwidth manager.
     47  * The helpers encapsulate the sending and received of sideband msgs.
     48  */
     49 #if IS_ENABLED(CONFIG_DEBUG_FS)
     50 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
     51 				  char *buf);
     52 #endif
     53 static int test_calc_pbn_mode(void);
     54 
     55 static void drm_dp_put_port(struct drm_dp_mst_port *port);
     56 
     57 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
     58 				     int id,
     59 				     struct drm_dp_payload *payload);
     60 
     61 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
     62 				  struct drm_dp_mst_port *port,
     63 				  int offset, int size, u8 *bytes);
     64 
     65 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
     66 				     struct drm_dp_mst_branch *mstb);
     67 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
     68 					   struct drm_dp_mst_branch *mstb,
     69 					   struct drm_dp_mst_port *port);
     70 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
     71 				 u8 *guid);
     72 
     73 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
     74 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
     75 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
     76 /* sideband msg handling */
     77 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
     78 {
     79 	u8 bitmask = 0x80;
     80 	u8 bitshift = 7;
     81 	u8 array_index = 0;
     82 	int number_of_bits = num_nibbles * 4;
     83 	u8 remainder = 0;
     84 
     85 	while (number_of_bits != 0) {
     86 		number_of_bits--;
     87 		remainder <<= 1;
     88 		remainder |= (data[array_index] & bitmask) >> bitshift;
     89 		bitmask >>= 1;
     90 		bitshift--;
     91 		if (bitmask == 0) {
     92 			bitmask = 0x80;
     93 			bitshift = 7;
     94 			array_index++;
     95 		}
     96 		if ((remainder & 0x10) == 0x10)
     97 			remainder ^= 0x13;
     98 	}
     99 
    100 	number_of_bits = 4;
    101 	while (number_of_bits != 0) {
    102 		number_of_bits--;
    103 		remainder <<= 1;
    104 		if ((remainder & 0x10) != 0)
    105 			remainder ^= 0x13;
    106 	}
    107 
    108 	return remainder;
    109 }
    110 
    111 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
    112 {
    113 	u8 bitmask = 0x80;
    114 	u8 bitshift = 7;
    115 	u8 array_index = 0;
    116 	int number_of_bits = number_of_bytes * 8;
    117 	u16 remainder = 0;
    118 
    119 	while (number_of_bits != 0) {
    120 		number_of_bits--;
    121 		remainder <<= 1;
    122 		remainder |= (data[array_index] & bitmask) >> bitshift;
    123 		bitmask >>= 1;
    124 		bitshift--;
    125 		if (bitmask == 0) {
    126 			bitmask = 0x80;
    127 			bitshift = 7;
    128 			array_index++;
    129 		}
    130 		if ((remainder & 0x100) == 0x100)
    131 			remainder ^= 0xd5;
    132 	}
    133 
    134 	number_of_bits = 8;
    135 	while (number_of_bits != 0) {
    136 		number_of_bits--;
    137 		remainder <<= 1;
    138 		if ((remainder & 0x100) != 0)
    139 			remainder ^= 0xd5;
    140 	}
    141 
    142 	return remainder & 0xff;
    143 }
    144 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
    145 {
    146 	u8 size = 3;
    147 	size += (hdr->lct / 2);
    148 	return size;
    149 }
    150 
    151 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    152 					   u8 *buf, int *len)
    153 {
    154 	int idx = 0;
    155 	int i;
    156 	u8 crc4;
    157 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
    158 	for (i = 0; i < (hdr->lct / 2); i++)
    159 		buf[idx++] = hdr->rad[i];
    160 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
    161 		(hdr->msg_len & 0x3f);
    162 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
    163 
    164 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
    165 	buf[idx - 1] |= (crc4 & 0xf);
    166 
    167 	*len = idx;
    168 }
    169 
    170 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    171 					   u8 *buf, int buflen, u8 *hdrlen)
    172 {
    173 	u8 crc4;
    174 	u8 len;
    175 	int i;
    176 	u8 idx;
    177 	if (buf[0] == 0)
    178 		return false;
    179 	len = 3;
    180 	len += ((buf[0] & 0xf0) >> 4) / 2;
    181 	if (len > buflen)
    182 		return false;
    183 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
    184 
    185 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
    186 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
    187 		return false;
    188 	}
    189 
    190 	hdr->lct = (buf[0] & 0xf0) >> 4;
    191 	hdr->lcr = (buf[0] & 0xf);
    192 	idx = 1;
    193 	for (i = 0; i < (hdr->lct / 2); i++)
    194 		hdr->rad[i] = buf[idx++];
    195 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
    196 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
    197 	hdr->msg_len = buf[idx] & 0x3f;
    198 	idx++;
    199 	hdr->somt = (buf[idx] >> 7) & 0x1;
    200 	hdr->eomt = (buf[idx] >> 6) & 0x1;
    201 	hdr->seqno = (buf[idx] >> 4) & 0x1;
    202 	idx++;
    203 	*hdrlen = idx;
    204 	return true;
    205 }
    206 
    207 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
    208 				       struct drm_dp_sideband_msg_tx *raw)
    209 {
    210 	int idx = 0;
    211 	int i;
    212 	u8 *buf = raw->msg;
    213 	buf[idx++] = req->req_type & 0x7f;
    214 
    215 	switch (req->req_type) {
    216 	case DP_ENUM_PATH_RESOURCES:
    217 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
    218 		idx++;
    219 		break;
    220 	case DP_ALLOCATE_PAYLOAD:
    221 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
    222 			(req->u.allocate_payload.number_sdp_streams & 0xf);
    223 		idx++;
    224 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
    225 		idx++;
    226 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
    227 		idx++;
    228 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
    229 		idx++;
    230 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
    231 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
    232 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
    233 			idx++;
    234 		}
    235 		if (req->u.allocate_payload.number_sdp_streams & 1) {
    236 			i = req->u.allocate_payload.number_sdp_streams - 1;
    237 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
    238 			idx++;
    239 		}
    240 		break;
    241 	case DP_QUERY_PAYLOAD:
    242 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
    243 		idx++;
    244 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
    245 		idx++;
    246 		break;
    247 	case DP_REMOTE_DPCD_READ:
    248 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
    249 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
    250 		idx++;
    251 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
    252 		idx++;
    253 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
    254 		idx++;
    255 		buf[idx] = (req->u.dpcd_read.num_bytes);
    256 		idx++;
    257 		break;
    258 
    259 	case DP_REMOTE_DPCD_WRITE:
    260 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
    261 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
    262 		idx++;
    263 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
    264 		idx++;
    265 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
    266 		idx++;
    267 		buf[idx] = (req->u.dpcd_write.num_bytes);
    268 		idx++;
    269 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
    270 		idx += req->u.dpcd_write.num_bytes;
    271 		break;
    272 	case DP_REMOTE_I2C_READ:
    273 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
    274 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
    275 		idx++;
    276 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
    277 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
    278 			idx++;
    279 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
    280 			idx++;
    281 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
    282 			idx += req->u.i2c_read.transactions[i].num_bytes;
    283 
    284 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
    285 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
    286 			idx++;
    287 		}
    288 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
    289 		idx++;
    290 		buf[idx] = (req->u.i2c_read.num_bytes_read);
    291 		idx++;
    292 		break;
    293 
    294 	case DP_REMOTE_I2C_WRITE:
    295 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
    296 		idx++;
    297 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
    298 		idx++;
    299 		buf[idx] = (req->u.i2c_write.num_bytes);
    300 		idx++;
    301 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
    302 		idx += req->u.i2c_write.num_bytes;
    303 		break;
    304 	}
    305 	raw->cur_len = idx;
    306 }
    307 
    308 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
    309 {
    310 	u8 crc4;
    311 	crc4 = drm_dp_msg_data_crc4(msg, len);
    312 	msg[len] = crc4;
    313 }
    314 
    315 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
    316 					 struct drm_dp_sideband_msg_tx *raw)
    317 {
    318 	int idx = 0;
    319 	u8 *buf = raw->msg;
    320 
    321 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
    322 
    323 	raw->cur_len = idx;
    324 }
    325 
    326 /* this adds a chunk of msg to the builder to get the final msg */
    327 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
    328 				      u8 *replybuf, u8 replybuflen, bool hdr)
    329 {
    330 	int ret;
    331 	u8 crc4 __unused;	/* XXX Mistake?  */
    332 
    333 	if (hdr) {
    334 		u8 hdrlen;
    335 		struct drm_dp_sideband_msg_hdr recv_hdr;
    336 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
    337 		if (ret == false) {
    338 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
    339 			return false;
    340 		}
    341 
    342 		/*
    343 		 * ignore out-of-order messages or messages that are part of a
    344 		 * failed transaction
    345 		 */
    346 		if (!recv_hdr.somt && !msg->have_somt)
    347 			return false;
    348 
    349 		/* get length contained in this portion */
    350 		msg->curchunk_len = recv_hdr.msg_len;
    351 		msg->curchunk_hdrlen = hdrlen;
    352 
    353 		/* we have already gotten an somt - don't bother parsing */
    354 		if (recv_hdr.somt && msg->have_somt)
    355 			return false;
    356 
    357 		if (recv_hdr.somt) {
    358 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
    359 			msg->have_somt = true;
    360 		}
    361 		if (recv_hdr.eomt)
    362 			msg->have_eomt = true;
    363 
    364 		/* copy the bytes for the remainder of this header chunk */
    365 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
    366 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
    367 	} else {
    368 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
    369 		msg->curchunk_idx += replybuflen;
    370 	}
    371 
    372 	if (msg->curchunk_idx >= msg->curchunk_len) {
    373 		/* do CRC */
    374 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
    375 		/* copy chunk into bigger msg */
    376 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
    377 		msg->curlen += msg->curchunk_len - 1;
    378 	}
    379 	return true;
    380 }
    381 
    382 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
    383 					       struct drm_dp_sideband_msg_reply_body *repmsg)
    384 {
    385 	int idx = 1;
    386 	int i;
    387 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
    388 	idx += 16;
    389 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
    390 	idx++;
    391 	if (idx > raw->curlen)
    392 		goto fail_len;
    393 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
    394 		if (raw->msg[idx] & 0x80)
    395 			repmsg->u.link_addr.ports[i].input_port = 1;
    396 
    397 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
    398 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
    399 
    400 		idx++;
    401 		if (idx > raw->curlen)
    402 			goto fail_len;
    403 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
    404 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
    405 		if (repmsg->u.link_addr.ports[i].input_port == 0)
    406 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    407 		idx++;
    408 		if (idx > raw->curlen)
    409 			goto fail_len;
    410 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
    411 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
    412 			idx++;
    413 			if (idx > raw->curlen)
    414 				goto fail_len;
    415 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
    416 			idx += 16;
    417 			if (idx > raw->curlen)
    418 				goto fail_len;
    419 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
    420 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
    421 			idx++;
    422 
    423 		}
    424 		if (idx > raw->curlen)
    425 			goto fail_len;
    426 	}
    427 
    428 	return true;
    429 fail_len:
    430 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    431 	return false;
    432 }
    433 
    434 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
    435 						   struct drm_dp_sideband_msg_reply_body *repmsg)
    436 {
    437 	int idx = 1;
    438 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
    439 	idx++;
    440 	if (idx > raw->curlen)
    441 		goto fail_len;
    442 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
    443 	if (idx > raw->curlen)
    444 		goto fail_len;
    445 
    446 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
    447 	return true;
    448 fail_len:
    449 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    450 	return false;
    451 }
    452 
    453 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
    454 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    455 {
    456 	int idx = 1;
    457 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
    458 	idx++;
    459 	if (idx > raw->curlen)
    460 		goto fail_len;
    461 	return true;
    462 fail_len:
    463 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
    464 	return false;
    465 }
    466 
    467 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
    468 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    469 {
    470 	int idx = 1;
    471 
    472 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
    473 	idx++;
    474 	if (idx > raw->curlen)
    475 		goto fail_len;
    476 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
    477 	idx++;
    478 	/* TODO check */
    479 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
    480 	return true;
    481 fail_len:
    482 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
    483 	return false;
    484 }
    485 
    486 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
    487 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    488 {
    489 	int idx = 1;
    490 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
    491 	idx++;
    492 	if (idx > raw->curlen)
    493 		goto fail_len;
    494 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    495 	idx += 2;
    496 	if (idx > raw->curlen)
    497 		goto fail_len;
    498 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    499 	idx += 2;
    500 	if (idx > raw->curlen)
    501 		goto fail_len;
    502 	return true;
    503 fail_len:
    504 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
    505 	return false;
    506 }
    507 
    508 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    509 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    510 {
    511 	int idx = 1;
    512 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    513 	idx++;
    514 	if (idx > raw->curlen)
    515 		goto fail_len;
    516 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
    517 	idx++;
    518 	if (idx > raw->curlen)
    519 		goto fail_len;
    520 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    521 	idx += 2;
    522 	if (idx > raw->curlen)
    523 		goto fail_len;
    524 	return true;
    525 fail_len:
    526 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
    527 	return false;
    528 }
    529 
    530 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    531 						    struct drm_dp_sideband_msg_reply_body *repmsg)
    532 {
    533 	int idx = 1;
    534 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    535 	idx++;
    536 	if (idx > raw->curlen)
    537 		goto fail_len;
    538 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    539 	idx += 2;
    540 	if (idx > raw->curlen)
    541 		goto fail_len;
    542 	return true;
    543 fail_len:
    544 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
    545 	return false;
    546 }
    547 
    548 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
    549 					struct drm_dp_sideband_msg_reply_body *msg)
    550 {
    551 	memset(msg, 0, sizeof(*msg));
    552 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
    553 	msg->req_type = (raw->msg[0] & 0x7f);
    554 
    555 	if (msg->reply_type) {
    556 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
    557 		msg->u.nak.reason = raw->msg[17];
    558 		msg->u.nak.nak_data = raw->msg[18];
    559 		return false;
    560 	}
    561 
    562 	switch (msg->req_type) {
    563 	case DP_LINK_ADDRESS:
    564 		return drm_dp_sideband_parse_link_address(raw, msg);
    565 	case DP_QUERY_PAYLOAD:
    566 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
    567 	case DP_REMOTE_DPCD_READ:
    568 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
    569 	case DP_REMOTE_DPCD_WRITE:
    570 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
    571 	case DP_REMOTE_I2C_READ:
    572 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
    573 	case DP_ENUM_PATH_RESOURCES:
    574 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
    575 	case DP_ALLOCATE_PAYLOAD:
    576 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
    577 	default:
    578 		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
    579 		return false;
    580 	}
    581 }
    582 
    583 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
    584 							   struct drm_dp_sideband_msg_req_body *msg)
    585 {
    586 	int idx = 1;
    587 
    588 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    589 	idx++;
    590 	if (idx > raw->curlen)
    591 		goto fail_len;
    592 
    593 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
    594 	idx += 16;
    595 	if (idx > raw->curlen)
    596 		goto fail_len;
    597 
    598 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
    599 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    600 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
    601 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
    602 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
    603 	idx++;
    604 	return true;
    605 fail_len:
    606 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
    607 	return false;
    608 }
    609 
    610 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
    611 							   struct drm_dp_sideband_msg_req_body *msg)
    612 {
    613 	int idx = 1;
    614 
    615 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    616 	idx++;
    617 	if (idx > raw->curlen)
    618 		goto fail_len;
    619 
    620 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
    621 	idx += 16;
    622 	if (idx > raw->curlen)
    623 		goto fail_len;
    624 
    625 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    626 	idx++;
    627 	return true;
    628 fail_len:
    629 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
    630 	return false;
    631 }
    632 
    633 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
    634 				      struct drm_dp_sideband_msg_req_body *msg)
    635 {
    636 	memset(msg, 0, sizeof(*msg));
    637 	msg->req_type = (raw->msg[0] & 0x7f);
    638 
    639 	switch (msg->req_type) {
    640 	case DP_CONNECTION_STATUS_NOTIFY:
    641 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
    642 	case DP_RESOURCE_STATUS_NOTIFY:
    643 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
    644 	default:
    645 		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
    646 		return false;
    647 	}
    648 }
    649 
    650 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
    651 {
    652 	struct drm_dp_sideband_msg_req_body req;
    653 
    654 	req.req_type = DP_REMOTE_DPCD_WRITE;
    655 	req.u.dpcd_write.port_number = port_num;
    656 	req.u.dpcd_write.dpcd_address = offset;
    657 	req.u.dpcd_write.num_bytes = num_bytes;
    658 	req.u.dpcd_write.bytes = bytes;
    659 	drm_dp_encode_sideband_req(&req, msg);
    660 
    661 	return 0;
    662 }
    663 
    664 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
    665 {
    666 	struct drm_dp_sideband_msg_req_body req;
    667 
    668 	req.req_type = DP_LINK_ADDRESS;
    669 	drm_dp_encode_sideband_req(&req, msg);
    670 	return 0;
    671 }
    672 
    673 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
    674 {
    675 	struct drm_dp_sideband_msg_req_body req;
    676 
    677 	req.req_type = DP_ENUM_PATH_RESOURCES;
    678 	req.u.port_num.port_number = port_num;
    679 	drm_dp_encode_sideband_req(&req, msg);
    680 	msg->path_msg = true;
    681 	return 0;
    682 }
    683 
    684 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
    685 				  u8 vcpi, uint16_t pbn)
    686 {
    687 	struct drm_dp_sideband_msg_req_body req;
    688 	memset(&req, 0, sizeof(req));
    689 	req.req_type = DP_ALLOCATE_PAYLOAD;
    690 	req.u.allocate_payload.port_number = port_num;
    691 	req.u.allocate_payload.vcpi = vcpi;
    692 	req.u.allocate_payload.pbn = pbn;
    693 	drm_dp_encode_sideband_req(&req, msg);
    694 	msg->path_msg = true;
    695 	return 0;
    696 }
    697 
    698 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
    699 					struct drm_dp_vcpi *vcpi)
    700 {
    701 	int ret, vcpi_ret;
    702 
    703 	mutex_lock(&mgr->payload_lock);
    704 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
    705 	if (ret > mgr->max_payloads) {
    706 		ret = -EINVAL;
    707 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
    708 		goto out_unlock;
    709 	}
    710 
    711 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
    712 	if (vcpi_ret > mgr->max_payloads) {
    713 		ret = -EINVAL;
    714 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
    715 		goto out_unlock;
    716 	}
    717 
    718 	set_bit(ret, &mgr->payload_mask);
    719 	set_bit(vcpi_ret, &mgr->vcpi_mask);
    720 	vcpi->vcpi = vcpi_ret + 1;
    721 	mgr->proposed_vcpis[ret - 1] = vcpi;
    722 out_unlock:
    723 	mutex_unlock(&mgr->payload_lock);
    724 	return ret;
    725 }
    726 
    727 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
    728 				      int vcpi)
    729 {
    730 	int i;
    731 	if (vcpi == 0)
    732 		return;
    733 
    734 	mutex_lock(&mgr->payload_lock);
    735 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
    736 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
    737 
    738 	for (i = 0; i < mgr->max_payloads; i++) {
    739 		if (mgr->proposed_vcpis[i])
    740 			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
    741 				mgr->proposed_vcpis[i] = NULL;
    742 				clear_bit(i + 1, &mgr->payload_mask);
    743 			}
    744 	}
    745 	mutex_unlock(&mgr->payload_lock);
    746 }
    747 
    748 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
    749 			      struct drm_dp_sideband_msg_tx *txmsg)
    750 {
    751 	bool ret;
    752 
    753 	/*
    754 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
    755 	 * cases we check here are terminal states. For those the barriers
    756 	 * provided by the wake_up/wait_event pair are enough.
    757 	 */
    758 	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
    759 	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
    760 	return ret;
    761 }
    762 
    763 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
    764 				    struct drm_dp_sideband_msg_tx *txmsg)
    765 {
    766 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
    767 	int ret;
    768 
    769 #ifdef __NetBSD__
    770 	mutex_lock(&mstb->mgr->qlock);
    771 	DRM_TIMED_WAIT_UNTIL(ret, &mgr->tx_waitq, &mstb->mgr->qlock, 4*HZ,
    772 	    check_txmsg_state(mgr, txmsg));
    773 #else
    774 	ret = wait_event_timeout(mgr->tx_waitq,
    775 				 check_txmsg_state(mgr, txmsg),
    776 				 (4 * HZ));
    777 	mutex_lock(&mstb->mgr->qlock);
    778 #endif
    779 	if (ret > 0) {
    780 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
    781 			ret = -EIO;
    782 			goto out;
    783 		}
    784 	} else {
    785 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
    786 
    787 		/* dump some state */
    788 		ret = -EIO;
    789 
    790 		/* remove from q */
    791 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
    792 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
    793 			list_del(&txmsg->next);
    794 		}
    795 
    796 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
    797 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
    798 			mstb->tx_slots[txmsg->seqno] = NULL;
    799 		}
    800 	}
    801 out:
    802 	mutex_unlock(&mgr->qlock);
    803 
    804 	return ret;
    805 }
    806 
    807 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
    808 {
    809 	struct drm_dp_mst_branch *mstb;
    810 
    811 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
    812 	if (!mstb)
    813 		return NULL;
    814 
    815 	mstb->lct = lct;
    816 	if (lct > 1)
    817 		memcpy(mstb->rad, rad, lct / 2);
    818 	INIT_LIST_HEAD(&mstb->ports);
    819 	kref_init(&mstb->kref);
    820 	return mstb;
    821 }
    822 
    823 static void drm_dp_free_mst_port(struct kref *kref);
    824 
    825 static void drm_dp_free_mst_branch_device(struct kref *kref)
    826 {
    827 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
    828 	if (mstb->port_parent) {
    829 		if (list_empty(&mstb->port_parent->next))
    830 			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
    831 	}
    832 	kfree(mstb);
    833 }
    834 
    835 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
    836 {
    837 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
    838 	struct drm_dp_mst_port *port, *tmp;
    839 	bool wake_tx = false;
    840 
    841 	/*
    842 	 * init kref again to be used by ports to remove mst branch when it is
    843 	 * not needed anymore
    844 	 */
    845 	kref_init(kref);
    846 
    847 	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
    848 		kref_get(&mstb->port_parent->kref);
    849 
    850 	/*
    851 	 * destroy all ports - don't need lock
    852 	 * as there are no more references to the mst branch
    853 	 * device at this point.
    854 	 */
    855 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
    856 		list_del(&port->next);
    857 		drm_dp_put_port(port);
    858 	}
    859 
    860 	/* drop any tx slots msg */
    861 	mutex_lock(&mstb->mgr->qlock);
    862 	if (mstb->tx_slots[0]) {
    863 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
    864 		mstb->tx_slots[0] = NULL;
    865 		wake_tx = true;
    866 	}
    867 	if (mstb->tx_slots[1]) {
    868 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
    869 		mstb->tx_slots[1] = NULL;
    870 		wake_tx = true;
    871 	}
    872 #ifdef __NetBSD__
    873 	if (wake_tx)
    874 		DRM_WAKEUP_ONE(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
    875 	mutex_unlock(&mstb->mgr->qlock);
    876 #else
    877 	mutex_unlock(&mstb->mgr->qlock);
    878 
    879 	if (wake_tx)
    880 		wake_up(&mstb->mgr->tx_waitq);
    881 #endif
    882 
    883 	kref_put(kref, drm_dp_free_mst_branch_device);
    884 }
    885 
    886 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
    887 {
    888 	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
    889 }
    890 
    891 
    892 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
    893 {
    894 	struct drm_dp_mst_branch *mstb;
    895 
    896 	switch (old_pdt) {
    897 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
    898 	case DP_PEER_DEVICE_SST_SINK:
    899 		/* remove i2c over sideband */
    900 		drm_dp_mst_unregister_i2c_bus(&port->aux);
    901 		break;
    902 	case DP_PEER_DEVICE_MST_BRANCHING:
    903 		mstb = port->mstb;
    904 		port->mstb = NULL;
    905 		drm_dp_put_mst_branch_device(mstb);
    906 		break;
    907 	}
    908 }
    909 
    910 static void drm_dp_destroy_port(struct kref *kref)
    911 {
    912 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
    913 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
    914 
    915 	if (!port->input) {
    916 		port->vcpi.num_slots = 0;
    917 
    918 		kfree(port->cached_edid);
    919 
    920 		/*
    921 		 * The only time we don't have a connector
    922 		 * on an output port is if the connector init
    923 		 * fails.
    924 		 */
    925 		if (port->connector) {
    926 			/* we can't destroy the connector here, as
    927 			 * we might be holding the mode_config.mutex
    928 			 * from an EDID retrieval */
    929 
    930 			mutex_lock(&mgr->destroy_connector_lock);
    931 			kref_get(&port->parent->kref);
    932 			list_add(&port->next, &mgr->destroy_connector_list);
    933 			mutex_unlock(&mgr->destroy_connector_lock);
    934 			schedule_work(&mgr->destroy_connector_work);
    935 			return;
    936 		}
    937 		/* no need to clean up vcpi
    938 		 * as if we have no connector we never setup a vcpi */
    939 		drm_dp_port_teardown_pdt(port, port->pdt);
    940 		port->pdt = DP_PEER_DEVICE_NONE;
    941 	}
    942 	kfree(port);
    943 }
    944 
    945 static void drm_dp_put_port(struct drm_dp_mst_port *port)
    946 {
    947 	kref_put(&port->kref, drm_dp_destroy_port);
    948 }
    949 
    950 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
    951 {
    952 	struct drm_dp_mst_port *port;
    953 	struct drm_dp_mst_branch *rmstb;
    954 	if (to_find == mstb) {
    955 		kref_get(&mstb->kref);
    956 		return mstb;
    957 	}
    958 	list_for_each_entry(port, &mstb->ports, next) {
    959 		if (port->mstb) {
    960 			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
    961 			if (rmstb)
    962 				return rmstb;
    963 		}
    964 	}
    965 	return NULL;
    966 }
    967 
    968 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
    969 {
    970 	struct drm_dp_mst_branch *rmstb = NULL;
    971 	mutex_lock(&mgr->lock);
    972 	if (mgr->mst_primary)
    973 		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
    974 	mutex_unlock(&mgr->lock);
    975 	return rmstb;
    976 }
    977 
    978 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
    979 {
    980 	struct drm_dp_mst_port *port, *mport;
    981 
    982 	list_for_each_entry(port, &mstb->ports, next) {
    983 		if (port == to_find) {
    984 			kref_get(&port->kref);
    985 			return port;
    986 		}
    987 		if (port->mstb) {
    988 			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
    989 			if (mport)
    990 				return mport;
    991 		}
    992 	}
    993 	return NULL;
    994 }
    995 
    996 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
    997 {
    998 	struct drm_dp_mst_port *rport = NULL;
    999 	mutex_lock(&mgr->lock);
   1000 	if (mgr->mst_primary)
   1001 		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
   1002 	mutex_unlock(&mgr->lock);
   1003 	return rport;
   1004 }
   1005 
   1006 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
   1007 {
   1008 	struct drm_dp_mst_port *port;
   1009 
   1010 	list_for_each_entry(port, &mstb->ports, next) {
   1011 		if (port->port_num == port_num) {
   1012 			kref_get(&port->kref);
   1013 			return port;
   1014 		}
   1015 	}
   1016 
   1017 	return NULL;
   1018 }
   1019 
   1020 /*
   1021  * calculate a new RAD for this MST branch device
   1022  * if parent has an LCT of 2 then it has 1 nibble of RAD,
   1023  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   1024  */
   1025 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
   1026 				 u8 *rad)
   1027 {
   1028 	int parent_lct = port->parent->lct;
   1029 	int shift = 4;
   1030 	int idx = (parent_lct - 1) / 2;
   1031 	if (parent_lct > 1) {
   1032 		memcpy(rad, port->parent->rad, idx + 1);
   1033 		shift = (parent_lct % 2) ? 4 : 0;
   1034 	} else
   1035 		rad[0] = 0;
   1036 
   1037 	rad[idx] |= port->port_num << shift;
   1038 	return parent_lct + 1;
   1039 }
   1040 
   1041 /*
   1042  * return sends link address for new mstb
   1043  */
   1044 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
   1045 {
   1046 	int ret __unused;
   1047 	u8 rad[6], lct;
   1048 	bool send_link = false;
   1049 	switch (port->pdt) {
   1050 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   1051 	case DP_PEER_DEVICE_SST_SINK:
   1052 		/* add i2c over sideband */
   1053 		ret = drm_dp_mst_register_i2c_bus(&port->aux);
   1054 		break;
   1055 	case DP_PEER_DEVICE_MST_BRANCHING:
   1056 		lct = drm_dp_calculate_rad(port, rad);
   1057 
   1058 		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
   1059 		port->mstb->mgr = port->mgr;
   1060 		port->mstb->port_parent = port;
   1061 
   1062 		send_link = true;
   1063 		break;
   1064 	}
   1065 	return send_link;
   1066 }
   1067 
   1068 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
   1069 {
   1070 	int ret __unused;
   1071 
   1072 	memcpy(mstb->guid, guid, 16);
   1073 
   1074 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
   1075 		if (mstb->port_parent) {
   1076 			ret = drm_dp_send_dpcd_write(
   1077 					mstb->mgr,
   1078 					mstb->port_parent,
   1079 					DP_GUID,
   1080 					16,
   1081 					mstb->guid);
   1082 		} else {
   1083 
   1084 			ret = drm_dp_dpcd_write(
   1085 					mstb->mgr->aux,
   1086 					DP_GUID,
   1087 					mstb->guid,
   1088 					16);
   1089 		}
   1090 	}
   1091 }
   1092 
   1093 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
   1094 				int pnum,
   1095 				char *proppath,
   1096 				size_t proppath_size)
   1097 {
   1098 	int i;
   1099 	char temp[8];
   1100 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
   1101 	for (i = 0; i < (mstb->lct - 1); i++) {
   1102 		int shift = (i % 2) ? 0 : 4;
   1103 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
   1104 		snprintf(temp, sizeof(temp), "-%d", port_num);
   1105 		strlcat(proppath, temp, proppath_size);
   1106 	}
   1107 	snprintf(temp, sizeof(temp), "-%d", pnum);
   1108 	strlcat(proppath, temp, proppath_size);
   1109 }
   1110 
   1111 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
   1112 			    struct device *dev,
   1113 			    struct drm_dp_link_addr_reply_port *port_msg)
   1114 {
   1115 	struct drm_dp_mst_port *port;
   1116 	bool ret;
   1117 	bool created = false;
   1118 	int old_pdt = 0;
   1119 	int old_ddps = 0;
   1120 	port = drm_dp_get_port(mstb, port_msg->port_number);
   1121 	if (!port) {
   1122 		port = kzalloc(sizeof(*port), GFP_KERNEL);
   1123 		if (!port)
   1124 			return;
   1125 		kref_init(&port->kref);
   1126 		port->parent = mstb;
   1127 		port->port_num = port_msg->port_number;
   1128 		port->mgr = mstb->mgr;
   1129 		port->aux.name = "DPMST";
   1130 		port->aux.dev = dev;
   1131 		created = true;
   1132 	} else {
   1133 		old_pdt = port->pdt;
   1134 		old_ddps = port->ddps;
   1135 	}
   1136 
   1137 	port->pdt = port_msg->peer_device_type;
   1138 	port->input = port_msg->input_port;
   1139 	port->mcs = port_msg->mcs;
   1140 	port->ddps = port_msg->ddps;
   1141 	port->ldps = port_msg->legacy_device_plug_status;
   1142 	port->dpcd_rev = port_msg->dpcd_revision;
   1143 	port->num_sdp_streams = port_msg->num_sdp_streams;
   1144 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
   1145 
   1146 	/* manage mstb port lists with mgr lock - take a reference
   1147 	   for this list */
   1148 	if (created) {
   1149 		mutex_lock(&mstb->mgr->lock);
   1150 		kref_get(&port->kref);
   1151 		list_add(&port->next, &mstb->ports);
   1152 		mutex_unlock(&mstb->mgr->lock);
   1153 	}
   1154 
   1155 	if (old_ddps != port->ddps) {
   1156 		if (port->ddps) {
   1157 			if (!port->input)
   1158 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
   1159 		} else {
   1160 			port->available_pbn = 0;
   1161 			}
   1162 	}
   1163 
   1164 	if (old_pdt != port->pdt && !port->input) {
   1165 		drm_dp_port_teardown_pdt(port, old_pdt);
   1166 
   1167 		ret = drm_dp_port_setup_pdt(port);
   1168 		if (ret == true)
   1169 			drm_dp_send_link_address(mstb->mgr, port->mstb);
   1170 	}
   1171 
   1172 	if (created && !port->input) {
   1173 		char proppath[255];
   1174 
   1175 		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
   1176 		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
   1177 		if (!port->connector) {
   1178 			/* remove it from the port list */
   1179 			mutex_lock(&mstb->mgr->lock);
   1180 			list_del(&port->next);
   1181 			mutex_unlock(&mstb->mgr->lock);
   1182 			/* drop port list reference */
   1183 			drm_dp_put_port(port);
   1184 			goto out;
   1185 		}
   1186 		if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
   1187 		     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
   1188 		    port->port_num >= DP_MST_LOGICAL_PORT_0) {
   1189 			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
   1190 			drm_mode_connector_set_tile_property(port->connector);
   1191 		}
   1192 		(*mstb->mgr->cbs->register_connector)(port->connector);
   1193 	}
   1194 
   1195 out:
   1196 	/* put reference to this port */
   1197 	drm_dp_put_port(port);
   1198 }
   1199 
   1200 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
   1201 			       struct drm_dp_connection_status_notify *conn_stat)
   1202 {
   1203 	struct drm_dp_mst_port *port;
   1204 	int old_pdt;
   1205 	int old_ddps;
   1206 	bool dowork = false;
   1207 	port = drm_dp_get_port(mstb, conn_stat->port_number);
   1208 	if (!port)
   1209 		return;
   1210 
   1211 	old_ddps = port->ddps;
   1212 	old_pdt = port->pdt;
   1213 	port->pdt = conn_stat->peer_device_type;
   1214 	port->mcs = conn_stat->message_capability_status;
   1215 	port->ldps = conn_stat->legacy_device_plug_status;
   1216 	port->ddps = conn_stat->displayport_device_plug_status;
   1217 
   1218 	if (old_ddps != port->ddps) {
   1219 		if (port->ddps) {
   1220 			dowork = true;
   1221 		} else {
   1222 			port->available_pbn = 0;
   1223 		}
   1224 	}
   1225 	if (old_pdt != port->pdt && !port->input) {
   1226 		drm_dp_port_teardown_pdt(port, old_pdt);
   1227 
   1228 		if (drm_dp_port_setup_pdt(port))
   1229 			dowork = true;
   1230 	}
   1231 
   1232 	drm_dp_put_port(port);
   1233 	if (dowork)
   1234 		queue_work(system_long_wq, &mstb->mgr->work);
   1235 
   1236 }
   1237 
   1238 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
   1239 							       u8 lct, u8 *rad)
   1240 {
   1241 	struct drm_dp_mst_branch *mstb;
   1242 	struct drm_dp_mst_port *port;
   1243 	int i;
   1244 	/* find the port by iterating down */
   1245 
   1246 	mutex_lock(&mgr->lock);
   1247 	mstb = mgr->mst_primary;
   1248 
   1249 	for (i = 0; i < lct - 1; i++) {
   1250 		int shift = (i % 2) ? 0 : 4;
   1251 		int port_num = (rad[i / 2] >> shift) & 0xf;
   1252 
   1253 		list_for_each_entry(port, &mstb->ports, next) {
   1254 			if (port->port_num == port_num) {
   1255 				mstb = port->mstb;
   1256 				if (!mstb) {
   1257 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
   1258 					goto out;
   1259 				}
   1260 
   1261 				break;
   1262 			}
   1263 		}
   1264 	}
   1265 	kref_get(&mstb->kref);
   1266 out:
   1267 	mutex_unlock(&mgr->lock);
   1268 	return mstb;
   1269 }
   1270 
   1271 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
   1272 	struct drm_dp_mst_branch *mstb,
   1273 	uint8_t *guid)
   1274 {
   1275 	struct drm_dp_mst_branch *found_mstb;
   1276 	struct drm_dp_mst_port *port;
   1277 
   1278 	if (memcmp(mstb->guid, guid, 16) == 0)
   1279 		return mstb;
   1280 
   1281 
   1282 	list_for_each_entry(port, &mstb->ports, next) {
   1283 		if (!port->mstb)
   1284 			continue;
   1285 
   1286 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
   1287 
   1288 		if (found_mstb)
   1289 			return found_mstb;
   1290 	}
   1291 
   1292 	return NULL;
   1293 }
   1294 
   1295 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
   1296 	struct drm_dp_mst_topology_mgr *mgr,
   1297 	uint8_t *guid)
   1298 {
   1299 	struct drm_dp_mst_branch *mstb;
   1300 
   1301 	/* find the port by iterating down */
   1302 	mutex_lock(&mgr->lock);
   1303 
   1304 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
   1305 
   1306 	if (mstb)
   1307 		kref_get(&mstb->kref);
   1308 
   1309 	mutex_unlock(&mgr->lock);
   1310 	return mstb;
   1311 }
   1312 
   1313 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   1314 					       struct drm_dp_mst_branch *mstb)
   1315 {
   1316 	struct drm_dp_mst_port *port;
   1317 	struct drm_dp_mst_branch *mstb_child;
   1318 	if (!mstb->link_address_sent)
   1319 		drm_dp_send_link_address(mgr, mstb);
   1320 
   1321 	list_for_each_entry(port, &mstb->ports, next) {
   1322 		if (port->input)
   1323 			continue;
   1324 
   1325 		if (!port->ddps)
   1326 			continue;
   1327 
   1328 		if (!port->available_pbn)
   1329 			drm_dp_send_enum_path_resources(mgr, mstb, port);
   1330 
   1331 		if (port->mstb) {
   1332 			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
   1333 			if (mstb_child) {
   1334 				drm_dp_check_and_send_link_address(mgr, mstb_child);
   1335 				drm_dp_put_mst_branch_device(mstb_child);
   1336 			}
   1337 		}
   1338 	}
   1339 }
   1340 
   1341 static void drm_dp_mst_link_probe_work(struct work_struct *work)
   1342 {
   1343 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
   1344 	struct drm_dp_mst_branch *mstb;
   1345 
   1346 	mutex_lock(&mgr->lock);
   1347 	mstb = mgr->mst_primary;
   1348 	if (mstb) {
   1349 		kref_get(&mstb->kref);
   1350 	}
   1351 	mutex_unlock(&mgr->lock);
   1352 	if (mstb) {
   1353 		drm_dp_check_and_send_link_address(mgr, mstb);
   1354 		drm_dp_put_mst_branch_device(mstb);
   1355 	}
   1356 }
   1357 
   1358 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
   1359 				 u8 *guid)
   1360 {
   1361 	static u8 zero_guid[16];
   1362 
   1363 	if (!memcmp(guid, zero_guid, 16)) {
   1364 		u64 salt = get_jiffies_64();
   1365 		memcpy(&guid[0], &salt, sizeof(u64));
   1366 		memcpy(&guid[8], &salt, sizeof(u64));
   1367 		return false;
   1368 	}
   1369 	return true;
   1370 }
   1371 
   1372 #if 0
   1373 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
   1374 {
   1375 	struct drm_dp_sideband_msg_req_body req;
   1376 
   1377 	req.req_type = DP_REMOTE_DPCD_READ;
   1378 	req.u.dpcd_read.port_number = port_num;
   1379 	req.u.dpcd_read.dpcd_address = offset;
   1380 	req.u.dpcd_read.num_bytes = num_bytes;
   1381 	drm_dp_encode_sideband_req(&req, msg);
   1382 
   1383 	return 0;
   1384 }
   1385 #endif
   1386 
   1387 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
   1388 				    bool up, u8 *msg, int len)
   1389 {
   1390 	int ret;
   1391 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
   1392 	int tosend, total, offset;
   1393 	int retries = 0;
   1394 
   1395 retry:
   1396 	total = len;
   1397 	offset = 0;
   1398 	do {
   1399 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
   1400 
   1401 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
   1402 					&msg[offset],
   1403 					tosend);
   1404 		if (ret != tosend) {
   1405 			if (ret == -EIO && retries < 5) {
   1406 				retries++;
   1407 				goto retry;
   1408 			}
   1409 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
   1410 
   1411 			return -EIO;
   1412 		}
   1413 		offset += tosend;
   1414 		total -= tosend;
   1415 	} while (total > 0);
   1416 	return 0;
   1417 }
   1418 
   1419 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
   1420 				  struct drm_dp_sideband_msg_tx *txmsg)
   1421 {
   1422 	struct drm_dp_mst_branch *mstb = txmsg->dst;
   1423 	u8 req_type;
   1424 
   1425 	/* both msg slots are full */
   1426 	if (txmsg->seqno == -1) {
   1427 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
   1428 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
   1429 			return -EAGAIN;
   1430 		}
   1431 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
   1432 			txmsg->seqno = mstb->last_seqno;
   1433 			mstb->last_seqno ^= 1;
   1434 		} else if (mstb->tx_slots[0] == NULL)
   1435 			txmsg->seqno = 0;
   1436 		else
   1437 			txmsg->seqno = 1;
   1438 		mstb->tx_slots[txmsg->seqno] = txmsg;
   1439 	}
   1440 
   1441 	req_type = txmsg->msg[0] & 0x7f;
   1442 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
   1443 		req_type == DP_RESOURCE_STATUS_NOTIFY)
   1444 		hdr->broadcast = 1;
   1445 	else
   1446 		hdr->broadcast = 0;
   1447 	hdr->path_msg = txmsg->path_msg;
   1448 	hdr->lct = mstb->lct;
   1449 	hdr->lcr = mstb->lct - 1;
   1450 	if (mstb->lct > 1)
   1451 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
   1452 	hdr->seqno = txmsg->seqno;
   1453 	return 0;
   1454 }
   1455 /*
   1456  * process a single block of the next message in the sideband queue
   1457  */
   1458 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   1459 				   struct drm_dp_sideband_msg_tx *txmsg,
   1460 				   bool up)
   1461 {
   1462 	u8 chunk[48];
   1463 	struct drm_dp_sideband_msg_hdr hdr;
   1464 	int len, space, idx, tosend;
   1465 	int ret;
   1466 
   1467 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
   1468 
   1469 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
   1470 		txmsg->seqno = -1;
   1471 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
   1472 	}
   1473 
   1474 	/* make hdr from dst mst - for replies use seqno
   1475 	   otherwise assign one */
   1476 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
   1477 	if (ret < 0)
   1478 		return ret;
   1479 
   1480 	/* amount left to send in this message */
   1481 	len = txmsg->cur_len - txmsg->cur_offset;
   1482 
   1483 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
   1484 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
   1485 
   1486 	tosend = min(len, space);
   1487 	if (len == txmsg->cur_len)
   1488 		hdr.somt = 1;
   1489 	if (space >= len)
   1490 		hdr.eomt = 1;
   1491 
   1492 
   1493 	hdr.msg_len = tosend + 1;
   1494 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
   1495 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
   1496 	/* add crc at end */
   1497 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
   1498 	idx += tosend + 1;
   1499 
   1500 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
   1501 	if (ret) {
   1502 		DRM_DEBUG_KMS("sideband msg failed to send\n");
   1503 		return ret;
   1504 	}
   1505 
   1506 	txmsg->cur_offset += tosend;
   1507 	if (txmsg->cur_offset == txmsg->cur_len) {
   1508 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
   1509 		return 1;
   1510 	}
   1511 	return 0;
   1512 }
   1513 
   1514 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
   1515 {
   1516 	struct drm_dp_sideband_msg_tx *txmsg;
   1517 	int ret;
   1518 
   1519 	WARN_ON(!mutex_is_locked(&mgr->qlock));
   1520 
   1521 	/* construct a chunk from the first msg in the tx_msg queue */
   1522 	if (list_empty(&mgr->tx_msg_downq)) {
   1523 		mgr->tx_down_in_progress = false;
   1524 		return;
   1525 	}
   1526 	mgr->tx_down_in_progress = true;
   1527 
   1528 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
   1529 	ret = process_single_tx_qlock(mgr, txmsg, false);
   1530 	if (ret == 1) {
   1531 		/* txmsg is sent it should be in the slots now */
   1532 		list_del(&txmsg->next);
   1533 	} else if (ret) {
   1534 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   1535 		list_del(&txmsg->next);
   1536 		if (txmsg->seqno != -1)
   1537 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   1538 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   1539 #ifdef __NetBSD__
   1540 		DRM_WAKEUP_ONE(&mgr->tx_waitq, &mgr->qlock);
   1541 #else
   1542 		wake_up(&mgr->tx_waitq);
   1543 #endif
   1544 	}
   1545 	if (list_empty(&mgr->tx_msg_downq)) {
   1546 		mgr->tx_down_in_progress = false;
   1547 		return;
   1548 	}
   1549 }
   1550 
   1551 /* called holding qlock */
   1552 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   1553 				       struct drm_dp_sideband_msg_tx *txmsg)
   1554 {
   1555 	int ret;
   1556 
   1557 	/* construct a chunk from the first msg in the tx_msg queue */
   1558 	ret = process_single_tx_qlock(mgr, txmsg, true);
   1559 
   1560 	if (ret != 1)
   1561 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   1562 
   1563 	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   1564 }
   1565 
   1566 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
   1567 				 struct drm_dp_sideband_msg_tx *txmsg)
   1568 {
   1569 	mutex_lock(&mgr->qlock);
   1570 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
   1571 	if (!mgr->tx_down_in_progress)
   1572 		process_single_down_tx_qlock(mgr);
   1573 	mutex_unlock(&mgr->qlock);
   1574 }
   1575 
   1576 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   1577 				     struct drm_dp_mst_branch *mstb)
   1578 {
   1579 	int len __unused;
   1580 	struct drm_dp_sideband_msg_tx *txmsg;
   1581 	int ret;
   1582 
   1583 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1584 	if (!txmsg)
   1585 		return;
   1586 
   1587 	txmsg->dst = mstb;
   1588 	len = build_link_address(txmsg);
   1589 
   1590 	mstb->link_address_sent = true;
   1591 	drm_dp_queue_down_tx(mgr, txmsg);
   1592 
   1593 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1594 	if (ret > 0) {
   1595 		int i;
   1596 
   1597 		if (txmsg->reply.reply_type == 1)
   1598 			DRM_DEBUG_KMS("link address nak received\n");
   1599 		else {
   1600 			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
   1601 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
   1602 				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
   1603 				       txmsg->reply.u.link_addr.ports[i].input_port,
   1604 				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
   1605 				       txmsg->reply.u.link_addr.ports[i].port_number,
   1606 				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
   1607 				       txmsg->reply.u.link_addr.ports[i].mcs,
   1608 				       txmsg->reply.u.link_addr.ports[i].ddps,
   1609 				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
   1610 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
   1611 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
   1612 			}
   1613 
   1614 			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
   1615 
   1616 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
   1617 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
   1618 			}
   1619 			(*mgr->cbs->hotplug)(mgr);
   1620 		}
   1621 	} else {
   1622 		mstb->link_address_sent = false;
   1623 		DRM_DEBUG_KMS("link address failed %d\n", ret);
   1624 	}
   1625 
   1626 	kfree(txmsg);
   1627 }
   1628 
   1629 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
   1630 					   struct drm_dp_mst_branch *mstb,
   1631 					   struct drm_dp_mst_port *port)
   1632 {
   1633 	int len __unused;
   1634 	struct drm_dp_sideband_msg_tx *txmsg;
   1635 	int ret;
   1636 
   1637 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1638 	if (!txmsg)
   1639 		return -ENOMEM;
   1640 
   1641 	txmsg->dst = mstb;
   1642 	len = build_enum_path_resources(txmsg, port->port_num);
   1643 
   1644 	drm_dp_queue_down_tx(mgr, txmsg);
   1645 
   1646 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1647 	if (ret > 0) {
   1648 		if (txmsg->reply.reply_type == 1)
   1649 			DRM_DEBUG_KMS("enum path resources nak received\n");
   1650 		else {
   1651 			if (port->port_num != txmsg->reply.u.path_resources.port_number)
   1652 				DRM_ERROR("got incorrect port in response\n");
   1653 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
   1654 			       txmsg->reply.u.path_resources.avail_payload_bw_number);
   1655 			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
   1656 		}
   1657 	}
   1658 
   1659 	kfree(txmsg);
   1660 	return 0;
   1661 }
   1662 
   1663 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
   1664 {
   1665 	if (!mstb->port_parent)
   1666 		return NULL;
   1667 
   1668 	if (mstb->port_parent->mstb != mstb)
   1669 		return mstb->port_parent;
   1670 
   1671 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
   1672 }
   1673 
   1674 static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
   1675 									 struct drm_dp_mst_branch *mstb,
   1676 									 int *port_num)
   1677 {
   1678 	struct drm_dp_mst_branch *rmstb = NULL;
   1679 	struct drm_dp_mst_port *found_port;
   1680 	mutex_lock(&mgr->lock);
   1681 	if (mgr->mst_primary) {
   1682 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
   1683 
   1684 		if (found_port) {
   1685 			rmstb = found_port->parent;
   1686 			kref_get(&rmstb->kref);
   1687 			*port_num = found_port->port_num;
   1688 		}
   1689 	}
   1690 	mutex_unlock(&mgr->lock);
   1691 	return rmstb;
   1692 }
   1693 
   1694 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
   1695 				   struct drm_dp_mst_port *port,
   1696 				   int id,
   1697 				   int pbn)
   1698 {
   1699 	struct drm_dp_sideband_msg_tx *txmsg;
   1700 	struct drm_dp_mst_branch *mstb;
   1701 	int len __unused, ret, port_num;
   1702 
   1703 	port = drm_dp_get_validated_port_ref(mgr, port);
   1704 	if (!port)
   1705 		return -EINVAL;
   1706 
   1707 	port_num = port->port_num;
   1708 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   1709 	if (!mstb) {
   1710 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
   1711 
   1712 		if (!mstb) {
   1713 			drm_dp_put_port(port);
   1714 			return -EINVAL;
   1715 		}
   1716 	}
   1717 
   1718 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1719 	if (!txmsg) {
   1720 		ret = -ENOMEM;
   1721 		goto fail_put;
   1722 	}
   1723 
   1724 	txmsg->dst = mstb;
   1725 	len = build_allocate_payload(txmsg, port_num,
   1726 				     id,
   1727 				     pbn);
   1728 
   1729 	drm_dp_queue_down_tx(mgr, txmsg);
   1730 
   1731 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1732 	if (ret > 0) {
   1733 		if (txmsg->reply.reply_type == 1) {
   1734 			ret = -EINVAL;
   1735 		} else
   1736 			ret = 0;
   1737 	}
   1738 	kfree(txmsg);
   1739 fail_put:
   1740 	drm_dp_put_mst_branch_device(mstb);
   1741 	drm_dp_put_port(port);
   1742 	return ret;
   1743 }
   1744 
   1745 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   1746 				       int id,
   1747 				       struct drm_dp_payload *payload)
   1748 {
   1749 	int ret;
   1750 
   1751 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
   1752 	if (ret < 0) {
   1753 		payload->payload_state = 0;
   1754 		return ret;
   1755 	}
   1756 	payload->payload_state = DP_PAYLOAD_LOCAL;
   1757 	return 0;
   1758 }
   1759 
   1760 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   1761 				       struct drm_dp_mst_port *port,
   1762 				       int id,
   1763 				       struct drm_dp_payload *payload)
   1764 {
   1765 	int ret;
   1766 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
   1767 	if (ret < 0)
   1768 		return ret;
   1769 	payload->payload_state = DP_PAYLOAD_REMOTE;
   1770 	return ret;
   1771 }
   1772 
   1773 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   1774 					struct drm_dp_mst_port *port,
   1775 					int id,
   1776 					struct drm_dp_payload *payload)
   1777 {
   1778 	DRM_DEBUG_KMS("\n");
   1779 	/* its okay for these to fail */
   1780 	if (port) {
   1781 		drm_dp_payload_send_msg(mgr, port, id, 0);
   1782 	}
   1783 
   1784 	drm_dp_dpcd_write_payload(mgr, id, payload);
   1785 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
   1786 	return 0;
   1787 }
   1788 
   1789 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   1790 					int id,
   1791 					struct drm_dp_payload *payload)
   1792 {
   1793 	payload->payload_state = 0;
   1794 	return 0;
   1795 }
   1796 
   1797 /**
   1798  * drm_dp_update_payload_part1() - Execute payload update part 1
   1799  * @mgr: manager to use.
   1800  *
   1801  * This iterates over all proposed virtual channels, and tries to
   1802  * allocate space in the link for them. For 0->slots transitions,
   1803  * this step just writes the VCPI to the MST device. For slots->0
   1804  * transitions, this writes the updated VCPIs and removes the
   1805  * remote VC payloads.
   1806  *
   1807  * after calling this the driver should generate ACT and payload
   1808  * packets.
   1809  */
   1810 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
   1811 {
   1812 	int i, j;
   1813 	int cur_slots = 1;
   1814 	struct drm_dp_payload req_payload;
   1815 	struct drm_dp_mst_port *port;
   1816 
   1817 	mutex_lock(&mgr->payload_lock);
   1818 	for (i = 0; i < mgr->max_payloads; i++) {
   1819 		/* solve the current payloads - compare to the hw ones
   1820 		   - update the hw view */
   1821 		req_payload.start_slot = cur_slots;
   1822 		if (mgr->proposed_vcpis[i]) {
   1823 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   1824 			port = drm_dp_get_validated_port_ref(mgr, port);
   1825 			if (!port) {
   1826 				mutex_unlock(&mgr->payload_lock);
   1827 				return -EINVAL;
   1828 			}
   1829 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
   1830 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
   1831 		} else {
   1832 			port = NULL;
   1833 			req_payload.num_slots = 0;
   1834 		}
   1835 
   1836 		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
   1837 			mgr->payloads[i].start_slot = req_payload.start_slot;
   1838 		}
   1839 		/* work out what is required to happen with this payload */
   1840 		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
   1841 
   1842 			/* need to push an update for this payload */
   1843 			if (req_payload.num_slots) {
   1844 				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
   1845 				mgr->payloads[i].num_slots = req_payload.num_slots;
   1846 				mgr->payloads[i].vcpi = req_payload.vcpi;
   1847 			} else if (mgr->payloads[i].num_slots) {
   1848 				mgr->payloads[i].num_slots = 0;
   1849 				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
   1850 				req_payload.payload_state = mgr->payloads[i].payload_state;
   1851 				mgr->payloads[i].start_slot = 0;
   1852 			}
   1853 			mgr->payloads[i].payload_state = req_payload.payload_state;
   1854 		}
   1855 		cur_slots += req_payload.num_slots;
   1856 
   1857 		if (port)
   1858 			drm_dp_put_port(port);
   1859 	}
   1860 
   1861 	for (i = 0; i < mgr->max_payloads; i++) {
   1862 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   1863 			DRM_DEBUG_KMS("removing payload %d\n", i);
   1864 			for (j = i; j < mgr->max_payloads - 1; j++) {
   1865 				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
   1866 				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
   1867 				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
   1868 					set_bit(j + 1, &mgr->payload_mask);
   1869 				} else {
   1870 					clear_bit(j + 1, &mgr->payload_mask);
   1871 				}
   1872 			}
   1873 			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
   1874 			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
   1875 			clear_bit(mgr->max_payloads, &mgr->payload_mask);
   1876 
   1877 		}
   1878 	}
   1879 	mutex_unlock(&mgr->payload_lock);
   1880 
   1881 	return 0;
   1882 }
   1883 EXPORT_SYMBOL(drm_dp_update_payload_part1);
   1884 
   1885 /**
   1886  * drm_dp_update_payload_part2() - Execute payload update part 2
   1887  * @mgr: manager to use.
   1888  *
   1889  * This iterates over all proposed virtual channels, and tries to
   1890  * allocate space in the link for them. For 0->slots transitions,
   1891  * this step writes the remote VC payload commands. For slots->0
   1892  * this just resets some internal state.
   1893  */
   1894 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
   1895 {
   1896 	struct drm_dp_mst_port *port;
   1897 	int i;
   1898 	int ret = 0;
   1899 	mutex_lock(&mgr->payload_lock);
   1900 	for (i = 0; i < mgr->max_payloads; i++) {
   1901 
   1902 		if (!mgr->proposed_vcpis[i])
   1903 			continue;
   1904 
   1905 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   1906 
   1907 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
   1908 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
   1909 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   1910 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   1911 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   1912 		}
   1913 		if (ret) {
   1914 			mutex_unlock(&mgr->payload_lock);
   1915 			return ret;
   1916 		}
   1917 	}
   1918 	mutex_unlock(&mgr->payload_lock);
   1919 	return 0;
   1920 }
   1921 EXPORT_SYMBOL(drm_dp_update_payload_part2);
   1922 
   1923 #if 0 /* unused as of yet */
   1924 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
   1925 				 struct drm_dp_mst_port *port,
   1926 				 int offset, int size)
   1927 {
   1928 	int len;
   1929 	struct drm_dp_sideband_msg_tx *txmsg;
   1930 
   1931 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1932 	if (!txmsg)
   1933 		return -ENOMEM;
   1934 
   1935 	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
   1936 	txmsg->dst = port->parent;
   1937 
   1938 	drm_dp_queue_down_tx(mgr, txmsg);
   1939 
   1940 	return 0;
   1941 }
   1942 #endif
   1943 
   1944 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
   1945 				  struct drm_dp_mst_port *port,
   1946 				  int offset, int size, u8 *bytes)
   1947 {
   1948 	int len __unused;
   1949 	int ret;
   1950 	struct drm_dp_sideband_msg_tx *txmsg;
   1951 	struct drm_dp_mst_branch *mstb;
   1952 
   1953 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   1954 	if (!mstb)
   1955 		return -EINVAL;
   1956 
   1957 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1958 	if (!txmsg) {
   1959 		ret = -ENOMEM;
   1960 		goto fail_put;
   1961 	}
   1962 
   1963 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
   1964 	txmsg->dst = mstb;
   1965 
   1966 	drm_dp_queue_down_tx(mgr, txmsg);
   1967 
   1968 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1969 	if (ret > 0) {
   1970 		if (txmsg->reply.reply_type == 1) {
   1971 			ret = -EINVAL;
   1972 		} else
   1973 			ret = 0;
   1974 	}
   1975 	kfree(txmsg);
   1976 fail_put:
   1977 	drm_dp_put_mst_branch_device(mstb);
   1978 	return ret;
   1979 }
   1980 
   1981 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
   1982 {
   1983 	struct drm_dp_sideband_msg_reply_body reply;
   1984 
   1985 	reply.reply_type = 1;
   1986 	reply.req_type = req_type;
   1987 	drm_dp_encode_sideband_reply(&reply, msg);
   1988 	return 0;
   1989 }
   1990 
   1991 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
   1992 				    struct drm_dp_mst_branch *mstb,
   1993 				    int req_type, int seqno, bool broadcast)
   1994 {
   1995 	struct drm_dp_sideband_msg_tx *txmsg;
   1996 
   1997 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1998 	if (!txmsg)
   1999 		return -ENOMEM;
   2000 
   2001 	txmsg->dst = mstb;
   2002 	txmsg->seqno = seqno;
   2003 	drm_dp_encode_up_ack_reply(txmsg, req_type);
   2004 
   2005 	mutex_lock(&mgr->qlock);
   2006 
   2007 	process_single_up_tx_qlock(mgr, txmsg);
   2008 
   2009 	mutex_unlock(&mgr->qlock);
   2010 
   2011 	kfree(txmsg);
   2012 	return 0;
   2013 }
   2014 
   2015 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
   2016 				     int dp_link_count,
   2017 				     int *out)
   2018 {
   2019 	switch (dp_link_bw) {
   2020 	default:
   2021 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
   2022 			      dp_link_bw, dp_link_count);
   2023 		return false;
   2024 
   2025 	case DP_LINK_BW_1_62:
   2026 		*out = 3 * dp_link_count;
   2027 		break;
   2028 	case DP_LINK_BW_2_7:
   2029 		*out = 5 * dp_link_count;
   2030 		break;
   2031 	case DP_LINK_BW_5_4:
   2032 		*out = 10 * dp_link_count;
   2033 		break;
   2034 	}
   2035 	return true;
   2036 }
   2037 
   2038 /**
   2039  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   2040  * @mgr: manager to set state for
   2041  * @mst_state: true to enable MST on this connector - false to disable.
   2042  *
   2043  * This is called by the driver when it detects an MST capable device plugged
   2044  * into a DP MST capable port, or when a DP MST capable device is unplugged.
   2045  */
   2046 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
   2047 {
   2048 	int ret = 0;
   2049 	struct drm_dp_mst_branch *mstb = NULL;
   2050 
   2051 	mutex_lock(&mgr->lock);
   2052 	if (mst_state == mgr->mst_state)
   2053 		goto out_unlock;
   2054 
   2055 	mgr->mst_state = mst_state;
   2056 	/* set the device into MST mode */
   2057 	if (mst_state) {
   2058 		WARN_ON(mgr->mst_primary);
   2059 
   2060 		/* get dpcd info */
   2061 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   2062 		if (ret != DP_RECEIVER_CAP_SIZE) {
   2063 			DRM_DEBUG_KMS("failed to read DPCD\n");
   2064 			goto out_unlock;
   2065 		}
   2066 
   2067 		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
   2068 					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
   2069 					      &mgr->pbn_div)) {
   2070 			ret = -EINVAL;
   2071 			goto out_unlock;
   2072 		}
   2073 
   2074 		mgr->total_pbn = 2560;
   2075 		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
   2076 		mgr->avail_slots = mgr->total_slots;
   2077 
   2078 		/* add initial branch device at LCT 1 */
   2079 		mstb = drm_dp_add_mst_branch_device(1, NULL);
   2080 		if (mstb == NULL) {
   2081 			ret = -ENOMEM;
   2082 			goto out_unlock;
   2083 		}
   2084 		mstb->mgr = mgr;
   2085 
   2086 		/* give this the main reference */
   2087 		mgr->mst_primary = mstb;
   2088 		kref_get(&mgr->mst_primary->kref);
   2089 
   2090 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2091 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   2092 		if (ret < 0) {
   2093 			goto out_unlock;
   2094 		}
   2095 
   2096 		{
   2097 			struct drm_dp_payload reset_pay;
   2098 			reset_pay.start_slot = 0;
   2099 			reset_pay.num_slots = 0x3f;
   2100 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
   2101 		}
   2102 
   2103 		queue_work(system_long_wq, &mgr->work);
   2104 
   2105 		ret = 0;
   2106 	} else {
   2107 		/* disable MST on the device */
   2108 		mstb = mgr->mst_primary;
   2109 		mgr->mst_primary = NULL;
   2110 		/* this can fail if the device is gone */
   2111 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
   2112 		ret = 0;
   2113 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
   2114 		mgr->payload_mask = 0;
   2115 		set_bit(0, &mgr->payload_mask);
   2116 		mgr->vcpi_mask = 0;
   2117 	}
   2118 
   2119 out_unlock:
   2120 	mutex_unlock(&mgr->lock);
   2121 	if (mstb)
   2122 		drm_dp_put_mst_branch_device(mstb);
   2123 	return ret;
   2124 
   2125 }
   2126 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
   2127 
   2128 /**
   2129  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   2130  * @mgr: manager to suspend
   2131  *
   2132  * This function tells the MST device that we can't handle UP messages
   2133  * anymore. This should stop it from sending any since we are suspended.
   2134  */
   2135 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
   2136 {
   2137 	mutex_lock(&mgr->lock);
   2138 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2139 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
   2140 	mutex_unlock(&mgr->lock);
   2141 	flush_work(&mgr->work);
   2142 	flush_work(&mgr->destroy_connector_work);
   2143 }
   2144 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
   2145 
   2146 /**
   2147  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   2148  * @mgr: manager to resume
   2149  *
   2150  * This will fetch DPCD and see if the device is still there,
   2151  * if it is, it will rewrite the MSTM control bits, and return.
   2152  *
   2153  * if the device fails this returns -1, and the driver should do
   2154  * a full MST reprobe, in case we were undocked.
   2155  */
   2156 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
   2157 {
   2158 	int ret = 0;
   2159 
   2160 	mutex_lock(&mgr->lock);
   2161 
   2162 	if (mgr->mst_primary) {
   2163 		int sret;
   2164 		u8 guid[16];
   2165 
   2166 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   2167 		if (sret != DP_RECEIVER_CAP_SIZE) {
   2168 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   2169 			ret = -1;
   2170 			goto out_unlock;
   2171 		}
   2172 
   2173 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2174 					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   2175 		if (ret < 0) {
   2176 			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
   2177 			ret = -1;
   2178 			goto out_unlock;
   2179 		}
   2180 
   2181 		/* Some hubs forget their guids after they resume */
   2182 		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
   2183 		if (sret != 16) {
   2184 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   2185 			ret = -1;
   2186 			goto out_unlock;
   2187 		}
   2188 		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
   2189 
   2190 		ret = 0;
   2191 	} else
   2192 		ret = -1;
   2193 
   2194 out_unlock:
   2195 	mutex_unlock(&mgr->lock);
   2196 	return ret;
   2197 }
   2198 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
   2199 
   2200 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
   2201 {
   2202 	int len;
   2203 	u8 replyblock[32];
   2204 	int replylen, origlen __unused, curreply;
   2205 	int ret;
   2206 	struct drm_dp_sideband_msg_rx *msg;
   2207 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
   2208 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
   2209 
   2210 	len = min(mgr->max_dpcd_transaction_bytes, 16);
   2211 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
   2212 			       replyblock, len);
   2213 	if (ret != len) {
   2214 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
   2215 		return false;
   2216 	}
   2217 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
   2218 	if (!ret) {
   2219 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
   2220 		return false;
   2221 	}
   2222 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
   2223 
   2224 	origlen = replylen;
   2225 	replylen -= len;
   2226 	curreply = len;
   2227 	while (replylen > 0) {
   2228 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
   2229 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
   2230 				    replyblock, len);
   2231 		if (ret != len) {
   2232 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
   2233 				      len, ret);
   2234 			return false;
   2235 		}
   2236 
   2237 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
   2238 		if (!ret) {
   2239 			DRM_DEBUG_KMS("failed to build sideband msg\n");
   2240 			return false;
   2241 		}
   2242 
   2243 		curreply += len;
   2244 		replylen -= len;
   2245 	}
   2246 	return true;
   2247 }
   2248 
   2249 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
   2250 {
   2251 	int ret = 0;
   2252 
   2253 	if (!drm_dp_get_one_sb_msg(mgr, false)) {
   2254 		memset(&mgr->down_rep_recv, 0,
   2255 		       sizeof(struct drm_dp_sideband_msg_rx));
   2256 		return 0;
   2257 	}
   2258 
   2259 	if (mgr->down_rep_recv.have_eomt) {
   2260 		struct drm_dp_sideband_msg_tx *txmsg;
   2261 		struct drm_dp_mst_branch *mstb;
   2262 		int slot = -1;
   2263 		mstb = drm_dp_get_mst_branch_device(mgr,
   2264 						    mgr->down_rep_recv.initial_hdr.lct,
   2265 						    mgr->down_rep_recv.initial_hdr.rad);
   2266 
   2267 		if (!mstb) {
   2268 			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
   2269 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2270 			return 0;
   2271 		}
   2272 
   2273 		/* find the message */
   2274 		slot = mgr->down_rep_recv.initial_hdr.seqno;
   2275 		mutex_lock(&mgr->qlock);
   2276 		txmsg = mstb->tx_slots[slot];
   2277 		/* remove from slots */
   2278 		mutex_unlock(&mgr->qlock);
   2279 
   2280 		if (!txmsg) {
   2281 			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
   2282 			       mstb,
   2283 			       mgr->down_rep_recv.initial_hdr.seqno,
   2284 			       mgr->down_rep_recv.initial_hdr.lct,
   2285 				      mgr->down_rep_recv.initial_hdr.rad[0],
   2286 				      mgr->down_rep_recv.msg[0]);
   2287 			drm_dp_put_mst_branch_device(mstb);
   2288 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2289 			return 0;
   2290 		}
   2291 
   2292 		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
   2293 		if (txmsg->reply.reply_type == 1) {
   2294 			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
   2295 		}
   2296 
   2297 		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2298 		drm_dp_put_mst_branch_device(mstb);
   2299 
   2300 		mutex_lock(&mgr->qlock);
   2301 		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
   2302 		mstb->tx_slots[slot] = NULL;
   2303 #ifdef __NetBSD__
   2304 		DRM_WAKEUP_ONE(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
   2305 		mutex_unlock(&mgr->qlock);
   2306 #else
   2307 		mutex_unlock(&mgr->qlock);
   2308 
   2309 		wake_up(&mgr->tx_waitq);
   2310 #endif
   2311 	}
   2312 	return ret;
   2313 }
   2314 
   2315 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
   2316 {
   2317 	int ret = 0;
   2318 
   2319 	if (!drm_dp_get_one_sb_msg(mgr, true)) {
   2320 		memset(&mgr->up_req_recv, 0,
   2321 		       sizeof(struct drm_dp_sideband_msg_rx));
   2322 		return 0;
   2323 	}
   2324 
   2325 	if (mgr->up_req_recv.have_eomt) {
   2326 		struct drm_dp_sideband_msg_req_body msg;
   2327 		struct drm_dp_mst_branch *mstb = NULL;
   2328 		bool seqno;
   2329 
   2330 		if (!mgr->up_req_recv.initial_hdr.broadcast) {
   2331 			mstb = drm_dp_get_mst_branch_device(mgr,
   2332 							    mgr->up_req_recv.initial_hdr.lct,
   2333 							    mgr->up_req_recv.initial_hdr.rad);
   2334 			if (!mstb) {
   2335 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2336 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2337 				return 0;
   2338 			}
   2339 		}
   2340 
   2341 		seqno = mgr->up_req_recv.initial_hdr.seqno;
   2342 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
   2343 
   2344 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
   2345 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
   2346 
   2347 			if (!mstb)
   2348 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
   2349 
   2350 			if (!mstb) {
   2351 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2352 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2353 				return 0;
   2354 			}
   2355 
   2356 			drm_dp_update_port(mstb, &msg.u.conn_stat);
   2357 
   2358 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
   2359 			(*mgr->cbs->hotplug)(mgr);
   2360 
   2361 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
   2362 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
   2363 			if (!mstb)
   2364 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
   2365 
   2366 			if (!mstb) {
   2367 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2368 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2369 				return 0;
   2370 			}
   2371 
   2372 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
   2373 		}
   2374 
   2375 		if (mstb)
   2376 			drm_dp_put_mst_branch_device(mstb);
   2377 
   2378 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2379 	}
   2380 	return ret;
   2381 }
   2382 
   2383 /**
   2384  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   2385  * @mgr: manager to notify irq for.
   2386  * @esi: 4 bytes from SINK_COUNT_ESI
   2387  * @handled: whether the hpd interrupt was consumed or not
   2388  *
   2389  * This should be called from the driver when it detects a short IRQ,
   2390  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   2391  * topology manager will process the sideband messages received as a result
   2392  * of this.
   2393  */
   2394 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
   2395 {
   2396 	int ret = 0;
   2397 	int sc;
   2398 	*handled = false;
   2399 	sc = esi[0] & 0x3f;
   2400 
   2401 	if (sc != mgr->sink_count) {
   2402 		mgr->sink_count = sc;
   2403 		*handled = true;
   2404 	}
   2405 
   2406 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
   2407 		ret = drm_dp_mst_handle_down_rep(mgr);
   2408 		*handled = true;
   2409 	}
   2410 
   2411 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
   2412 		ret |= drm_dp_mst_handle_up_req(mgr);
   2413 		*handled = true;
   2414 	}
   2415 
   2416 	drm_dp_mst_kick_tx(mgr);
   2417 	return ret;
   2418 }
   2419 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
   2420 
   2421 /**
   2422  * drm_dp_mst_detect_port() - get connection status for an MST port
   2423  * @mgr: manager for this port
   2424  * @port: unverified pointer to a port
   2425  *
   2426  * This returns the current connection state for a port. It validates the
   2427  * port pointer still exists so the caller doesn't require a reference
   2428  */
   2429 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
   2430 						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2431 {
   2432 	enum drm_connector_status status = connector_status_disconnected;
   2433 
   2434 	/* we need to search for the port in the mgr in case its gone */
   2435 	port = drm_dp_get_validated_port_ref(mgr, port);
   2436 	if (!port)
   2437 		return connector_status_disconnected;
   2438 
   2439 	if (!port->ddps)
   2440 		goto out;
   2441 
   2442 	switch (port->pdt) {
   2443 	case DP_PEER_DEVICE_NONE:
   2444 	case DP_PEER_DEVICE_MST_BRANCHING:
   2445 		break;
   2446 
   2447 	case DP_PEER_DEVICE_SST_SINK:
   2448 		status = connector_status_connected;
   2449 		/* for logical ports - cache the EDID */
   2450 		if (port->port_num >= 8 && !port->cached_edid) {
   2451 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
   2452 		}
   2453 		break;
   2454 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   2455 		if (port->ldps)
   2456 			status = connector_status_connected;
   2457 		break;
   2458 	}
   2459 out:
   2460 	drm_dp_put_port(port);
   2461 	return status;
   2462 }
   2463 EXPORT_SYMBOL(drm_dp_mst_detect_port);
   2464 
   2465 /**
   2466  * drm_dp_mst_get_edid() - get EDID for an MST port
   2467  * @connector: toplevel connector to get EDID for
   2468  * @mgr: manager for this port
   2469  * @port: unverified pointer to a port.
   2470  *
   2471  * This returns an EDID for the port connected to a connector,
   2472  * It validates the pointer still exists so the caller doesn't require a
   2473  * reference.
   2474  */
   2475 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2476 {
   2477 	struct edid *edid = NULL;
   2478 
   2479 	/* we need to search for the port in the mgr in case its gone */
   2480 	port = drm_dp_get_validated_port_ref(mgr, port);
   2481 	if (!port)
   2482 		return NULL;
   2483 
   2484 	if (port->cached_edid)
   2485 		edid = drm_edid_duplicate(port->cached_edid);
   2486 	else {
   2487 		edid = drm_get_edid(connector, &port->aux.ddc);
   2488 		drm_mode_connector_set_tile_property(connector);
   2489 	}
   2490 	drm_dp_put_port(port);
   2491 	return edid;
   2492 }
   2493 EXPORT_SYMBOL(drm_dp_mst_get_edid);
   2494 
   2495 /**
   2496  * drm_dp_find_vcpi_slots() - find slots for this PBN value
   2497  * @mgr: manager to use
   2498  * @pbn: payload bandwidth to convert into slots.
   2499  */
   2500 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
   2501 			   int pbn)
   2502 {
   2503 	int num_slots;
   2504 
   2505 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   2506 
   2507 	if (num_slots > mgr->avail_slots)
   2508 		return -ENOSPC;
   2509 	return num_slots;
   2510 }
   2511 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
   2512 
   2513 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   2514 			    struct drm_dp_vcpi *vcpi, int pbn)
   2515 {
   2516 	int num_slots;
   2517 	int ret;
   2518 
   2519 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   2520 
   2521 	if (num_slots > mgr->avail_slots)
   2522 		return -ENOSPC;
   2523 
   2524 	vcpi->pbn = pbn;
   2525 	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
   2526 	vcpi->num_slots = num_slots;
   2527 
   2528 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
   2529 	if (ret < 0)
   2530 		return ret;
   2531 	return 0;
   2532 }
   2533 
   2534 /**
   2535  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   2536  * @mgr: manager for this port
   2537  * @port: port to allocate a virtual channel for.
   2538  * @pbn: payload bandwidth number to request
   2539  * @slots: returned number of slots for this PBN.
   2540  */
   2541 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
   2542 {
   2543 	int ret;
   2544 
   2545 	port = drm_dp_get_validated_port_ref(mgr, port);
   2546 	if (!port)
   2547 		return false;
   2548 
   2549 	if (port->vcpi.vcpi > 0) {
   2550 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
   2551 		if (pbn == port->vcpi.pbn) {
   2552 			*slots = port->vcpi.num_slots;
   2553 			drm_dp_put_port(port);
   2554 			return true;
   2555 		}
   2556 	}
   2557 
   2558 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
   2559 	if (ret) {
   2560 		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
   2561 		goto out;
   2562 	}
   2563 	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
   2564 	*slots = port->vcpi.num_slots;
   2565 
   2566 	drm_dp_put_port(port);
   2567 	return true;
   2568 out:
   2569 	return false;
   2570 }
   2571 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
   2572 
   2573 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2574 {
   2575 	int slots = 0;
   2576 	port = drm_dp_get_validated_port_ref(mgr, port);
   2577 	if (!port)
   2578 		return slots;
   2579 
   2580 	slots = port->vcpi.num_slots;
   2581 	drm_dp_put_port(port);
   2582 	return slots;
   2583 }
   2584 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
   2585 
   2586 /**
   2587  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   2588  * @mgr: manager for this port
   2589  * @port: unverified pointer to a port.
   2590  *
   2591  * This just resets the number of slots for the ports VCPI for later programming.
   2592  */
   2593 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2594 {
   2595 	port = drm_dp_get_validated_port_ref(mgr, port);
   2596 	if (!port)
   2597 		return;
   2598 	port->vcpi.num_slots = 0;
   2599 	drm_dp_put_port(port);
   2600 }
   2601 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
   2602 
   2603 /**
   2604  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   2605  * @mgr: manager for this port
   2606  * @port: unverified port to deallocate vcpi for
   2607  */
   2608 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2609 {
   2610 	port = drm_dp_get_validated_port_ref(mgr, port);
   2611 	if (!port)
   2612 		return;
   2613 
   2614 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   2615 	port->vcpi.num_slots = 0;
   2616 	port->vcpi.pbn = 0;
   2617 	port->vcpi.aligned_pbn = 0;
   2618 	port->vcpi.vcpi = 0;
   2619 	drm_dp_put_port(port);
   2620 }
   2621 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
   2622 
   2623 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
   2624 				     int id, struct drm_dp_payload *payload)
   2625 {
   2626 	u8 payload_alloc[3], status;
   2627 	int ret;
   2628 	int retries = 0;
   2629 
   2630 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
   2631 			   DP_PAYLOAD_TABLE_UPDATED);
   2632 
   2633 	payload_alloc[0] = id;
   2634 	payload_alloc[1] = payload->start_slot;
   2635 	payload_alloc[2] = payload->num_slots;
   2636 
   2637 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
   2638 	if (ret != 3) {
   2639 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
   2640 		goto fail;
   2641 	}
   2642 
   2643 retry:
   2644 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   2645 	if (ret < 0) {
   2646 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   2647 		goto fail;
   2648 	}
   2649 
   2650 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
   2651 		retries++;
   2652 		if (retries < 20) {
   2653 			usleep_range(10000, 20000);
   2654 			goto retry;
   2655 		}
   2656 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
   2657 		ret = -EINVAL;
   2658 		goto fail;
   2659 	}
   2660 	ret = 0;
   2661 fail:
   2662 	return ret;
   2663 }
   2664 
   2665 
   2666 /**
   2667  * drm_dp_check_act_status() - Check ACT handled status.
   2668  * @mgr: manager to use
   2669  *
   2670  * Check the payload status bits in the DPCD for ACT handled completion.
   2671  */
   2672 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
   2673 {
   2674 	u8 status;
   2675 	int ret;
   2676 	int count = 0;
   2677 
   2678 	do {
   2679 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   2680 
   2681 		if (ret < 0) {
   2682 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   2683 			goto fail;
   2684 		}
   2685 
   2686 		if (status & DP_PAYLOAD_ACT_HANDLED)
   2687 			break;
   2688 		count++;
   2689 		udelay(100);
   2690 
   2691 	} while (count < 30);
   2692 
   2693 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
   2694 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
   2695 		ret = -EINVAL;
   2696 		goto fail;
   2697 	}
   2698 	return 0;
   2699 fail:
   2700 	return ret;
   2701 }
   2702 EXPORT_SYMBOL(drm_dp_check_act_status);
   2703 
   2704 /**
   2705  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   2706  * @clock: dot clock for the mode
   2707  * @bpp: bpp for the mode.
   2708  *
   2709  * This uses the formula in the spec to calculate the PBN value for a mode.
   2710  */
   2711 int drm_dp_calc_pbn_mode(int clock, int bpp)
   2712 {
   2713 	u64 kbps;
   2714 	s64 peak_kbps;
   2715 	u32 numerator;
   2716 	u32 denominator;
   2717 
   2718 	kbps = clock * bpp;
   2719 
   2720 	/*
   2721 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
   2722 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
   2723 	 * common multiplier to render an integer PBN for all link rate/lane
   2724 	 * counts combinations
   2725 	 * calculate
   2726 	 * peak_kbps *= (1006/1000)
   2727 	 * peak_kbps *= (64/54)
   2728 	 * peak_kbps *= 8    convert to bytes
   2729 	 */
   2730 
   2731 	numerator = 64 * 1006;
   2732 	denominator = 54 * 8 * 1000 * 1000;
   2733 
   2734 	kbps *= numerator;
   2735 	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
   2736 
   2737 	return drm_fixp2int_ceil(peak_kbps);
   2738 }
   2739 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
   2740 
   2741 static int test_calc_pbn_mode(void)
   2742 {
   2743 	int ret;
   2744 	ret = drm_dp_calc_pbn_mode(154000, 30);
   2745 	if (ret != 689) {
   2746 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2747 				154000, 30, 689, ret);
   2748 		return -EINVAL;
   2749 	}
   2750 	ret = drm_dp_calc_pbn_mode(234000, 30);
   2751 	if (ret != 1047) {
   2752 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2753 				234000, 30, 1047, ret);
   2754 		return -EINVAL;
   2755 	}
   2756 	ret = drm_dp_calc_pbn_mode(297000, 24);
   2757 	if (ret != 1063) {
   2758 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2759 				297000, 24, 1063, ret);
   2760 		return -EINVAL;
   2761 	}
   2762 	return 0;
   2763 }
   2764 
   2765 /* we want to kick the TX after we've ack the up/down IRQs. */
   2766 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
   2767 {
   2768 	queue_work(system_long_wq, &mgr->tx_work);
   2769 }
   2770 
   2771 #if IS_ENABLED(CONFIG_DEBUG_FS)
   2772 static void drm_dp_mst_dump_mstb(struct seq_file *m,
   2773 				 struct drm_dp_mst_branch *mstb)
   2774 {
   2775 	struct drm_dp_mst_port *port;
   2776 	int tabs = mstb->lct;
   2777 	char prefix[10];
   2778 	int i;
   2779 
   2780 	for (i = 0; i < tabs; i++)
   2781 		prefix[i] = '\t';
   2782 	prefix[i] = '\0';
   2783 
   2784 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
   2785 	list_for_each_entry(port, &mstb->ports, next) {
   2786 		seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
   2787 		if (port->mstb)
   2788 			drm_dp_mst_dump_mstb(m, port->mstb);
   2789 	}
   2790 }
   2791 
   2792 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
   2793 				  char *buf)
   2794 {
   2795 	int ret;
   2796 	int i;
   2797 	for (i = 0; i < 4; i++) {
   2798 		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
   2799 		if (ret != 16)
   2800 			break;
   2801 	}
   2802 	if (i == 4)
   2803 		return true;
   2804 	return false;
   2805 }
   2806 
   2807 /**
   2808  * drm_dp_mst_dump_topology(): dump topology to seq file.
   2809  * @m: seq_file to dump output to
   2810  * @mgr: manager to dump current topology for.
   2811  *
   2812  * helper to dump MST topology to a seq file for debugfs.
   2813  */
   2814 void drm_dp_mst_dump_topology(struct seq_file *m,
   2815 			      struct drm_dp_mst_topology_mgr *mgr)
   2816 {
   2817 	int i;
   2818 	struct drm_dp_mst_port *port;
   2819 	mutex_lock(&mgr->lock);
   2820 	if (mgr->mst_primary)
   2821 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
   2822 
   2823 	/* dump VCPIs */
   2824 	mutex_unlock(&mgr->lock);
   2825 
   2826 	mutex_lock(&mgr->payload_lock);
   2827 	seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
   2828 
   2829 	for (i = 0; i < mgr->max_payloads; i++) {
   2830 		if (mgr->proposed_vcpis[i]) {
   2831 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   2832 			seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
   2833 		} else
   2834 			seq_printf(m, "vcpi %d:unsed\n", i);
   2835 	}
   2836 	for (i = 0; i < mgr->max_payloads; i++) {
   2837 		seq_printf(m, "payload %d: %d, %d, %d\n",
   2838 			   i,
   2839 			   mgr->payloads[i].payload_state,
   2840 			   mgr->payloads[i].start_slot,
   2841 			   mgr->payloads[i].num_slots);
   2842 
   2843 
   2844 	}
   2845 	mutex_unlock(&mgr->payload_lock);
   2846 
   2847 	mutex_lock(&mgr->lock);
   2848 	if (mgr->mst_primary) {
   2849 		u8 buf[64];
   2850 		bool bret;
   2851 		int ret;
   2852 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
   2853 		seq_printf(m, "dpcd: ");
   2854 		for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
   2855 			seq_printf(m, "%02x ", buf[i]);
   2856 		seq_printf(m, "\n");
   2857 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
   2858 		seq_printf(m, "faux/mst: ");
   2859 		for (i = 0; i < 2; i++)
   2860 			seq_printf(m, "%02x ", buf[i]);
   2861 		seq_printf(m, "\n");
   2862 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
   2863 		seq_printf(m, "mst ctrl: ");
   2864 		for (i = 0; i < 1; i++)
   2865 			seq_printf(m, "%02x ", buf[i]);
   2866 		seq_printf(m, "\n");
   2867 
   2868 		/* dump the standard OUI branch header */
   2869 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
   2870 		seq_printf(m, "branch oui: ");
   2871 		for (i = 0; i < 0x3; i++)
   2872 			seq_printf(m, "%02x", buf[i]);
   2873 		seq_printf(m, " devid: ");
   2874 		for (i = 0x3; i < 0x8; i++)
   2875 			seq_printf(m, "%c", buf[i]);
   2876 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
   2877 		seq_printf(m, "\n");
   2878 		bret = dump_dp_payload_table(mgr, buf);
   2879 		if (bret == true) {
   2880 			seq_printf(m, "payload table: ");
   2881 			for (i = 0; i < 63; i++)
   2882 				seq_printf(m, "%02x ", buf[i]);
   2883 			seq_printf(m, "\n");
   2884 		}
   2885 
   2886 	}
   2887 
   2888 	mutex_unlock(&mgr->lock);
   2889 
   2890 }
   2891 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
   2892 #endif	/* IS_ENABLED(CONFIG_DEBUG_FS) */
   2893 
   2894 static void drm_dp_tx_work(struct work_struct *work)
   2895 {
   2896 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
   2897 
   2898 	mutex_lock(&mgr->qlock);
   2899 	if (mgr->tx_down_in_progress)
   2900 		process_single_down_tx_qlock(mgr);
   2901 	mutex_unlock(&mgr->qlock);
   2902 }
   2903 
   2904 static void drm_dp_free_mst_port(struct kref *kref)
   2905 {
   2906 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
   2907 	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
   2908 	kfree(port);
   2909 }
   2910 
   2911 static void drm_dp_destroy_connector_work(struct work_struct *work)
   2912 {
   2913 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
   2914 	struct drm_dp_mst_port *port;
   2915 	bool send_hotplug = false;
   2916 	/*
   2917 	 * Not a regular list traverse as we have to drop the destroy
   2918 	 * connector lock before destroying the connector, to avoid AB->BA
   2919 	 * ordering between this lock and the config mutex.
   2920 	 */
   2921 	for (;;) {
   2922 		mutex_lock(&mgr->destroy_connector_lock);
   2923 		port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
   2924 		if (!port) {
   2925 			mutex_unlock(&mgr->destroy_connector_lock);
   2926 			break;
   2927 		}
   2928 		list_del(&port->next);
   2929 		mutex_unlock(&mgr->destroy_connector_lock);
   2930 
   2931 		kref_init(&port->kref);
   2932 		INIT_LIST_HEAD(&port->next);
   2933 
   2934 		mgr->cbs->destroy_connector(mgr, port->connector);
   2935 
   2936 		drm_dp_port_teardown_pdt(port, port->pdt);
   2937 		port->pdt = DP_PEER_DEVICE_NONE;
   2938 
   2939 		if (!port->input && port->vcpi.vcpi > 0) {
   2940 			drm_dp_mst_reset_vcpi_slots(mgr, port);
   2941 			drm_dp_update_payload_part1(mgr);
   2942 			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   2943 		}
   2944 
   2945 		kref_put(&port->kref, drm_dp_free_mst_port);
   2946 		send_hotplug = true;
   2947 	}
   2948 	if (send_hotplug)
   2949 		(*mgr->cbs->hotplug)(mgr);
   2950 }
   2951 
   2952 /**
   2953  * drm_dp_mst_topology_mgr_init - initialise a topology manager
   2954  * @mgr: manager struct to initialise
   2955  * @dev: device providing this structure - for i2c addition.
   2956  * @aux: DP helper aux channel to talk to this device
   2957  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   2958  * @max_payloads: maximum number of payloads this GPU can source
   2959  * @conn_base_id: the connector object ID the MST device is connected to.
   2960  *
   2961  * Return 0 for success, or negative error code on failure
   2962  */
   2963 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
   2964 				 struct device *dev, struct drm_dp_aux *aux,
   2965 				 int max_dpcd_transaction_bytes,
   2966 				 int max_payloads, int conn_base_id)
   2967 {
   2968 	mutex_init(&mgr->lock);
   2969 	mutex_init(&mgr->qlock);
   2970 	mutex_init(&mgr->payload_lock);
   2971 	mutex_init(&mgr->destroy_connector_lock);
   2972 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
   2973 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
   2974 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
   2975 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
   2976 	INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
   2977 #ifdef __NetBSD__
   2978 	DRM_INIT_WAITQUEUE(&mgr->tx_waitq, "dpmstwait");
   2979 #else
   2980 	init_waitqueue_head(&mgr->tx_waitq);
   2981 #endif
   2982 	mgr->dev = dev;
   2983 	mgr->aux = aux;
   2984 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
   2985 	mgr->max_payloads = max_payloads;
   2986 	mgr->conn_base_id = conn_base_id;
   2987 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
   2988 	if (!mgr->payloads)
   2989 		return -ENOMEM;
   2990 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
   2991 	if (!mgr->proposed_vcpis)
   2992 		return -ENOMEM;
   2993 	set_bit(0, &mgr->payload_mask);
   2994 	test_calc_pbn_mode();
   2995 	return 0;
   2996 }
   2997 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
   2998 
   2999 /**
   3000  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   3001  * @mgr: manager to destroy
   3002  */
   3003 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
   3004 {
   3005 	flush_work(&mgr->work);
   3006 	flush_work(&mgr->destroy_connector_work);
   3007 	mutex_lock(&mgr->payload_lock);
   3008 	kfree(mgr->payloads);
   3009 	mgr->payloads = NULL;
   3010 	kfree(mgr->proposed_vcpis);
   3011 	mgr->proposed_vcpis = NULL;
   3012 	mutex_unlock(&mgr->payload_lock);
   3013 	mgr->dev = NULL;
   3014 	mgr->aux = NULL;
   3015 #ifdef __NetBSD__
   3016 	DRM_DESTROY_WAITQUEUE(&mgr->tx_waitq);
   3017 #endif
   3018 	mutex_destroy(&mgr->destroy_connector_lock);
   3019 	mutex_destroy(&mgr->payload_lock);
   3020 	mutex_destroy(&mgr->qlock);
   3021 	mutex_destroy(&mgr->lock);
   3022 }
   3023 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
   3024 
   3025 /* I2C device */
   3026 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
   3027 			       int num)
   3028 {
   3029 	struct drm_dp_aux *aux = adapter->algo_data;
   3030 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
   3031 	struct drm_dp_mst_branch *mstb;
   3032 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   3033 	unsigned int i;
   3034 	bool reading = false;
   3035 	struct drm_dp_sideband_msg_req_body msg;
   3036 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   3037 	int ret;
   3038 
   3039 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   3040 	if (!mstb)
   3041 		return -EREMOTEIO;
   3042 
   3043 	/* construct i2c msg */
   3044 	/* see if last msg is a read */
   3045 	if (msgs[num - 1].flags & I2C_M_RD)
   3046 		reading = true;
   3047 
   3048 	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
   3049 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
   3050 		ret = -EIO;
   3051 		goto out;
   3052 	}
   3053 
   3054 	memset(&msg, 0, sizeof(msg));
   3055 	msg.req_type = DP_REMOTE_I2C_READ;
   3056 	msg.u.i2c_read.num_transactions = num - 1;
   3057 	msg.u.i2c_read.port_number = port->port_num;
   3058 	for (i = 0; i < num - 1; i++) {
   3059 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
   3060 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
   3061 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
   3062 	}
   3063 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
   3064 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
   3065 
   3066 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3067 	if (!txmsg) {
   3068 		ret = -ENOMEM;
   3069 		goto out;
   3070 	}
   3071 
   3072 	txmsg->dst = mstb;
   3073 	drm_dp_encode_sideband_req(&msg, txmsg);
   3074 
   3075 	drm_dp_queue_down_tx(mgr, txmsg);
   3076 
   3077 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3078 	if (ret > 0) {
   3079 
   3080 		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
   3081 			ret = -EREMOTEIO;
   3082 			goto out;
   3083 		}
   3084 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
   3085 			ret = -EIO;
   3086 			goto out;
   3087 		}
   3088 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
   3089 		ret = num;
   3090 	}
   3091 out:
   3092 	kfree(txmsg);
   3093 	drm_dp_put_mst_branch_device(mstb);
   3094 	return ret;
   3095 }
   3096 
   3097 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
   3098 {
   3099 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
   3100 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
   3101 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
   3102 	       I2C_FUNC_10BIT_ADDR;
   3103 }
   3104 
   3105 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
   3106 	.functionality = drm_dp_mst_i2c_functionality,
   3107 	.master_xfer = drm_dp_mst_i2c_xfer,
   3108 };
   3109 
   3110 /**
   3111  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
   3112  * @aux: DisplayPort AUX channel
   3113  *
   3114  * Returns 0 on success or a negative error code on failure.
   3115  */
   3116 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
   3117 {
   3118 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
   3119 	aux->ddc.algo_data = aux;
   3120 	aux->ddc.retries = 3;
   3121 
   3122 	aux->ddc.class = I2C_CLASS_DDC;
   3123 	aux->ddc.owner = THIS_MODULE;
   3124 	aux->ddc.dev.parent = aux->dev;
   3125 #ifndef __NetBSD__		/* XXX of? */
   3126 	aux->ddc.dev.of_node = aux->dev->of_node;
   3127 #endif
   3128 
   3129 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
   3130 		sizeof(aux->ddc.name));
   3131 
   3132 	return i2c_add_adapter(&aux->ddc);
   3133 }
   3134 
   3135 /**
   3136  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
   3137  * @aux: DisplayPort AUX channel
   3138  */
   3139 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
   3140 {
   3141 	i2c_del_adapter(&aux->ddc);
   3142 }
   3143