Home | History | Annotate | Line # | Download | only in drm
drm_dp_mst_topology.c revision 1.3.2.2
      1 /*	$NetBSD: drm_dp_mst_topology.c,v 1.3.2.2 2018/09/06 06:56:09 pgoyette Exp $	*/
      2 
      3 /*
      4  * Copyright  2014 Red Hat
      5  *
      6  * Permission to use, copy, modify, distribute, and sell this software and its
      7  * documentation for any purpose is hereby granted without fee, provided that
      8  * the above copyright notice appear in all copies and that both that copyright
      9  * notice and this permission notice appear in supporting documentation, and
     10  * that the name of the copyright holders not be used in advertising or
     11  * publicity pertaining to distribution of the software without specific,
     12  * written prior permission.  The copyright holders make no representations
     13  * about the suitability of this software for any purpose.  It is provided "as
     14  * is" without express or implied warranty.
     15  *
     16  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
     17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
     18  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
     19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
     20  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
     21  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
     22  * OF THIS SOFTWARE.
     23  */
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.3.2.2 2018/09/06 06:56:09 pgoyette Exp $");
     27 
     28 #include <linux/kernel.h>
     29 #include <linux/delay.h>
     30 #include <linux/init.h>
     31 #include <linux/errno.h>
     32 #include <linux/sched.h>
     33 #include <linux/seq_file.h>
     34 #include <linux/i2c.h>
     35 #include <linux/device.h>
     36 #include <linux/export.h>
     37 #include <linux/module.h>
     38 #include <drm/drm_dp_mst_helper.h>
     39 #include <drm/drmP.h>
     40 
     41 #include <drm/drm_fixed.h>
     42 
     43 /**
     44  * DOC: dp mst helper
     45  *
     46  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
     47  * protocol. The helpers contain a topology manager and bandwidth manager.
     48  * The helpers encapsulate the sending and received of sideband msgs.
     49  */
     50 #if IS_ENABLED(CONFIG_DEBUG_FS)
     51 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
     52 				  char *buf);
     53 #endif
     54 static int test_calc_pbn_mode(void);
     55 
     56 static void drm_dp_put_port(struct drm_dp_mst_port *port);
     57 
     58 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
     59 				     int id,
     60 				     struct drm_dp_payload *payload);
     61 
     62 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
     63 				  struct drm_dp_mst_port *port,
     64 				  int offset, int size, u8 *bytes);
     65 
     66 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
     67 				     struct drm_dp_mst_branch *mstb);
     68 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
     69 					   struct drm_dp_mst_branch *mstb,
     70 					   struct drm_dp_mst_port *port);
     71 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
     72 				 u8 *guid);
     73 
     74 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
     75 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
     76 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
     77 /* sideband msg handling */
     78 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
     79 {
     80 	u8 bitmask = 0x80;
     81 	u8 bitshift = 7;
     82 	u8 array_index = 0;
     83 	int number_of_bits = num_nibbles * 4;
     84 	u8 remainder = 0;
     85 
     86 	while (number_of_bits != 0) {
     87 		number_of_bits--;
     88 		remainder <<= 1;
     89 		remainder |= (data[array_index] & bitmask) >> bitshift;
     90 		bitmask >>= 1;
     91 		bitshift--;
     92 		if (bitmask == 0) {
     93 			bitmask = 0x80;
     94 			bitshift = 7;
     95 			array_index++;
     96 		}
     97 		if ((remainder & 0x10) == 0x10)
     98 			remainder ^= 0x13;
     99 	}
    100 
    101 	number_of_bits = 4;
    102 	while (number_of_bits != 0) {
    103 		number_of_bits--;
    104 		remainder <<= 1;
    105 		if ((remainder & 0x10) != 0)
    106 			remainder ^= 0x13;
    107 	}
    108 
    109 	return remainder;
    110 }
    111 
    112 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
    113 {
    114 	u8 bitmask = 0x80;
    115 	u8 bitshift = 7;
    116 	u8 array_index = 0;
    117 	int number_of_bits = number_of_bytes * 8;
    118 	u16 remainder = 0;
    119 
    120 	while (number_of_bits != 0) {
    121 		number_of_bits--;
    122 		remainder <<= 1;
    123 		remainder |= (data[array_index] & bitmask) >> bitshift;
    124 		bitmask >>= 1;
    125 		bitshift--;
    126 		if (bitmask == 0) {
    127 			bitmask = 0x80;
    128 			bitshift = 7;
    129 			array_index++;
    130 		}
    131 		if ((remainder & 0x100) == 0x100)
    132 			remainder ^= 0xd5;
    133 	}
    134 
    135 	number_of_bits = 8;
    136 	while (number_of_bits != 0) {
    137 		number_of_bits--;
    138 		remainder <<= 1;
    139 		if ((remainder & 0x100) != 0)
    140 			remainder ^= 0xd5;
    141 	}
    142 
    143 	return remainder & 0xff;
    144 }
    145 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
    146 {
    147 	u8 size = 3;
    148 	size += (hdr->lct / 2);
    149 	return size;
    150 }
    151 
    152 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    153 					   u8 *buf, int *len)
    154 {
    155 	int idx = 0;
    156 	int i;
    157 	u8 crc4;
    158 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
    159 	for (i = 0; i < (hdr->lct / 2); i++)
    160 		buf[idx++] = hdr->rad[i];
    161 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
    162 		(hdr->msg_len & 0x3f);
    163 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
    164 
    165 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
    166 	buf[idx - 1] |= (crc4 & 0xf);
    167 
    168 	*len = idx;
    169 }
    170 
    171 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    172 					   u8 *buf, int buflen, u8 *hdrlen)
    173 {
    174 	u8 crc4;
    175 	u8 len;
    176 	int i;
    177 	u8 idx;
    178 	if (buf[0] == 0)
    179 		return false;
    180 	len = 3;
    181 	len += ((buf[0] & 0xf0) >> 4) / 2;
    182 	if (len > buflen)
    183 		return false;
    184 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
    185 
    186 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
    187 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
    188 		return false;
    189 	}
    190 
    191 	hdr->lct = (buf[0] & 0xf0) >> 4;
    192 	hdr->lcr = (buf[0] & 0xf);
    193 	idx = 1;
    194 	for (i = 0; i < (hdr->lct / 2); i++)
    195 		hdr->rad[i] = buf[idx++];
    196 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
    197 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
    198 	hdr->msg_len = buf[idx] & 0x3f;
    199 	idx++;
    200 	hdr->somt = (buf[idx] >> 7) & 0x1;
    201 	hdr->eomt = (buf[idx] >> 6) & 0x1;
    202 	hdr->seqno = (buf[idx] >> 4) & 0x1;
    203 	idx++;
    204 	*hdrlen = idx;
    205 	return true;
    206 }
    207 
    208 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
    209 				       struct drm_dp_sideband_msg_tx *raw)
    210 {
    211 	int idx = 0;
    212 	int i;
    213 	u8 *buf = raw->msg;
    214 	buf[idx++] = req->req_type & 0x7f;
    215 
    216 	switch (req->req_type) {
    217 	case DP_ENUM_PATH_RESOURCES:
    218 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
    219 		idx++;
    220 		break;
    221 	case DP_ALLOCATE_PAYLOAD:
    222 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
    223 			(req->u.allocate_payload.number_sdp_streams & 0xf);
    224 		idx++;
    225 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
    226 		idx++;
    227 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
    228 		idx++;
    229 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
    230 		idx++;
    231 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
    232 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
    233 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
    234 			idx++;
    235 		}
    236 		if (req->u.allocate_payload.number_sdp_streams & 1) {
    237 			i = req->u.allocate_payload.number_sdp_streams - 1;
    238 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
    239 			idx++;
    240 		}
    241 		break;
    242 	case DP_QUERY_PAYLOAD:
    243 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
    244 		idx++;
    245 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
    246 		idx++;
    247 		break;
    248 	case DP_REMOTE_DPCD_READ:
    249 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
    250 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
    251 		idx++;
    252 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
    253 		idx++;
    254 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
    255 		idx++;
    256 		buf[idx] = (req->u.dpcd_read.num_bytes);
    257 		idx++;
    258 		break;
    259 
    260 	case DP_REMOTE_DPCD_WRITE:
    261 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
    262 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
    263 		idx++;
    264 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
    265 		idx++;
    266 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
    267 		idx++;
    268 		buf[idx] = (req->u.dpcd_write.num_bytes);
    269 		idx++;
    270 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
    271 		idx += req->u.dpcd_write.num_bytes;
    272 		break;
    273 	case DP_REMOTE_I2C_READ:
    274 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
    275 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
    276 		idx++;
    277 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
    278 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
    279 			idx++;
    280 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
    281 			idx++;
    282 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
    283 			idx += req->u.i2c_read.transactions[i].num_bytes;
    284 
    285 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
    286 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
    287 			idx++;
    288 		}
    289 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
    290 		idx++;
    291 		buf[idx] = (req->u.i2c_read.num_bytes_read);
    292 		idx++;
    293 		break;
    294 
    295 	case DP_REMOTE_I2C_WRITE:
    296 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
    297 		idx++;
    298 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
    299 		idx++;
    300 		buf[idx] = (req->u.i2c_write.num_bytes);
    301 		idx++;
    302 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
    303 		idx += req->u.i2c_write.num_bytes;
    304 		break;
    305 	}
    306 	raw->cur_len = idx;
    307 }
    308 
    309 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
    310 {
    311 	u8 crc4;
    312 	crc4 = drm_dp_msg_data_crc4(msg, len);
    313 	msg[len] = crc4;
    314 }
    315 
    316 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
    317 					 struct drm_dp_sideband_msg_tx *raw)
    318 {
    319 	int idx = 0;
    320 	u8 *buf = raw->msg;
    321 
    322 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
    323 
    324 	raw->cur_len = idx;
    325 }
    326 
    327 /* this adds a chunk of msg to the builder to get the final msg */
    328 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
    329 				      u8 *replybuf, u8 replybuflen, bool hdr)
    330 {
    331 	int ret;
    332 	u8 crc4 __unused;	/* XXX Mistake?  */
    333 
    334 	if (hdr) {
    335 		u8 hdrlen;
    336 		struct drm_dp_sideband_msg_hdr recv_hdr;
    337 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
    338 		if (ret == false) {
    339 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
    340 			return false;
    341 		}
    342 
    343 		/*
    344 		 * ignore out-of-order messages or messages that are part of a
    345 		 * failed transaction
    346 		 */
    347 		if (!recv_hdr.somt && !msg->have_somt)
    348 			return false;
    349 
    350 		/* get length contained in this portion */
    351 		msg->curchunk_len = recv_hdr.msg_len;
    352 		msg->curchunk_hdrlen = hdrlen;
    353 
    354 		/* we have already gotten an somt - don't bother parsing */
    355 		if (recv_hdr.somt && msg->have_somt)
    356 			return false;
    357 
    358 		if (recv_hdr.somt) {
    359 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
    360 			msg->have_somt = true;
    361 		}
    362 		if (recv_hdr.eomt)
    363 			msg->have_eomt = true;
    364 
    365 		/* copy the bytes for the remainder of this header chunk */
    366 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
    367 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
    368 	} else {
    369 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
    370 		msg->curchunk_idx += replybuflen;
    371 	}
    372 
    373 	if (msg->curchunk_idx >= msg->curchunk_len) {
    374 		/* do CRC */
    375 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
    376 		/* copy chunk into bigger msg */
    377 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
    378 		msg->curlen += msg->curchunk_len - 1;
    379 	}
    380 	return true;
    381 }
    382 
    383 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
    384 					       struct drm_dp_sideband_msg_reply_body *repmsg)
    385 {
    386 	int idx = 1;
    387 	int i;
    388 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
    389 	idx += 16;
    390 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
    391 	idx++;
    392 	if (idx > raw->curlen)
    393 		goto fail_len;
    394 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
    395 		if (raw->msg[idx] & 0x80)
    396 			repmsg->u.link_addr.ports[i].input_port = 1;
    397 
    398 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
    399 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
    400 
    401 		idx++;
    402 		if (idx > raw->curlen)
    403 			goto fail_len;
    404 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
    405 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
    406 		if (repmsg->u.link_addr.ports[i].input_port == 0)
    407 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    408 		idx++;
    409 		if (idx > raw->curlen)
    410 			goto fail_len;
    411 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
    412 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
    413 			idx++;
    414 			if (idx > raw->curlen)
    415 				goto fail_len;
    416 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
    417 			idx += 16;
    418 			if (idx > raw->curlen)
    419 				goto fail_len;
    420 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
    421 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
    422 			idx++;
    423 
    424 		}
    425 		if (idx > raw->curlen)
    426 			goto fail_len;
    427 	}
    428 
    429 	return true;
    430 fail_len:
    431 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    432 	return false;
    433 }
    434 
    435 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
    436 						   struct drm_dp_sideband_msg_reply_body *repmsg)
    437 {
    438 	int idx = 1;
    439 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
    440 	idx++;
    441 	if (idx > raw->curlen)
    442 		goto fail_len;
    443 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
    444 	if (idx > raw->curlen)
    445 		goto fail_len;
    446 
    447 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
    448 	return true;
    449 fail_len:
    450 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    451 	return false;
    452 }
    453 
    454 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
    455 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    456 {
    457 	int idx = 1;
    458 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
    459 	idx++;
    460 	if (idx > raw->curlen)
    461 		goto fail_len;
    462 	return true;
    463 fail_len:
    464 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
    465 	return false;
    466 }
    467 
    468 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
    469 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    470 {
    471 	int idx = 1;
    472 
    473 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
    474 	idx++;
    475 	if (idx > raw->curlen)
    476 		goto fail_len;
    477 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
    478 	idx++;
    479 	/* TODO check */
    480 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
    481 	return true;
    482 fail_len:
    483 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
    484 	return false;
    485 }
    486 
    487 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
    488 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    489 {
    490 	int idx = 1;
    491 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
    492 	idx++;
    493 	if (idx > raw->curlen)
    494 		goto fail_len;
    495 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    496 	idx += 2;
    497 	if (idx > raw->curlen)
    498 		goto fail_len;
    499 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    500 	idx += 2;
    501 	if (idx > raw->curlen)
    502 		goto fail_len;
    503 	return true;
    504 fail_len:
    505 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
    506 	return false;
    507 }
    508 
    509 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    510 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    511 {
    512 	int idx = 1;
    513 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    514 	idx++;
    515 	if (idx > raw->curlen)
    516 		goto fail_len;
    517 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
    518 	idx++;
    519 	if (idx > raw->curlen)
    520 		goto fail_len;
    521 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    522 	idx += 2;
    523 	if (idx > raw->curlen)
    524 		goto fail_len;
    525 	return true;
    526 fail_len:
    527 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
    528 	return false;
    529 }
    530 
    531 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    532 						    struct drm_dp_sideband_msg_reply_body *repmsg)
    533 {
    534 	int idx = 1;
    535 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    536 	idx++;
    537 	if (idx > raw->curlen)
    538 		goto fail_len;
    539 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    540 	idx += 2;
    541 	if (idx > raw->curlen)
    542 		goto fail_len;
    543 	return true;
    544 fail_len:
    545 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
    546 	return false;
    547 }
    548 
    549 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
    550 					struct drm_dp_sideband_msg_reply_body *msg)
    551 {
    552 	memset(msg, 0, sizeof(*msg));
    553 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
    554 	msg->req_type = (raw->msg[0] & 0x7f);
    555 
    556 	if (msg->reply_type) {
    557 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
    558 		msg->u.nak.reason = raw->msg[17];
    559 		msg->u.nak.nak_data = raw->msg[18];
    560 		return false;
    561 	}
    562 
    563 	switch (msg->req_type) {
    564 	case DP_LINK_ADDRESS:
    565 		return drm_dp_sideband_parse_link_address(raw, msg);
    566 	case DP_QUERY_PAYLOAD:
    567 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
    568 	case DP_REMOTE_DPCD_READ:
    569 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
    570 	case DP_REMOTE_DPCD_WRITE:
    571 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
    572 	case DP_REMOTE_I2C_READ:
    573 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
    574 	case DP_ENUM_PATH_RESOURCES:
    575 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
    576 	case DP_ALLOCATE_PAYLOAD:
    577 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
    578 	default:
    579 		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
    580 		return false;
    581 	}
    582 }
    583 
    584 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
    585 							   struct drm_dp_sideband_msg_req_body *msg)
    586 {
    587 	int idx = 1;
    588 
    589 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    590 	idx++;
    591 	if (idx > raw->curlen)
    592 		goto fail_len;
    593 
    594 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
    595 	idx += 16;
    596 	if (idx > raw->curlen)
    597 		goto fail_len;
    598 
    599 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
    600 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    601 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
    602 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
    603 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
    604 	idx++;
    605 	return true;
    606 fail_len:
    607 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
    608 	return false;
    609 }
    610 
    611 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
    612 							   struct drm_dp_sideband_msg_req_body *msg)
    613 {
    614 	int idx = 1;
    615 
    616 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    617 	idx++;
    618 	if (idx > raw->curlen)
    619 		goto fail_len;
    620 
    621 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
    622 	idx += 16;
    623 	if (idx > raw->curlen)
    624 		goto fail_len;
    625 
    626 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    627 	idx++;
    628 	return true;
    629 fail_len:
    630 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
    631 	return false;
    632 }
    633 
    634 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
    635 				      struct drm_dp_sideband_msg_req_body *msg)
    636 {
    637 	memset(msg, 0, sizeof(*msg));
    638 	msg->req_type = (raw->msg[0] & 0x7f);
    639 
    640 	switch (msg->req_type) {
    641 	case DP_CONNECTION_STATUS_NOTIFY:
    642 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
    643 	case DP_RESOURCE_STATUS_NOTIFY:
    644 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
    645 	default:
    646 		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
    647 		return false;
    648 	}
    649 }
    650 
    651 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
    652 {
    653 	struct drm_dp_sideband_msg_req_body req;
    654 
    655 	req.req_type = DP_REMOTE_DPCD_WRITE;
    656 	req.u.dpcd_write.port_number = port_num;
    657 	req.u.dpcd_write.dpcd_address = offset;
    658 	req.u.dpcd_write.num_bytes = num_bytes;
    659 	req.u.dpcd_write.bytes = bytes;
    660 	drm_dp_encode_sideband_req(&req, msg);
    661 
    662 	return 0;
    663 }
    664 
    665 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
    666 {
    667 	struct drm_dp_sideband_msg_req_body req;
    668 
    669 	req.req_type = DP_LINK_ADDRESS;
    670 	drm_dp_encode_sideband_req(&req, msg);
    671 	return 0;
    672 }
    673 
    674 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
    675 {
    676 	struct drm_dp_sideband_msg_req_body req;
    677 
    678 	req.req_type = DP_ENUM_PATH_RESOURCES;
    679 	req.u.port_num.port_number = port_num;
    680 	drm_dp_encode_sideband_req(&req, msg);
    681 	msg->path_msg = true;
    682 	return 0;
    683 }
    684 
    685 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
    686 				  u8 vcpi, uint16_t pbn)
    687 {
    688 	struct drm_dp_sideband_msg_req_body req;
    689 	memset(&req, 0, sizeof(req));
    690 	req.req_type = DP_ALLOCATE_PAYLOAD;
    691 	req.u.allocate_payload.port_number = port_num;
    692 	req.u.allocate_payload.vcpi = vcpi;
    693 	req.u.allocate_payload.pbn = pbn;
    694 	drm_dp_encode_sideband_req(&req, msg);
    695 	msg->path_msg = true;
    696 	return 0;
    697 }
    698 
    699 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
    700 					struct drm_dp_vcpi *vcpi)
    701 {
    702 	int ret, vcpi_ret;
    703 
    704 	mutex_lock(&mgr->payload_lock);
    705 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
    706 	if (ret > mgr->max_payloads) {
    707 		ret = -EINVAL;
    708 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
    709 		goto out_unlock;
    710 	}
    711 
    712 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
    713 	if (vcpi_ret > mgr->max_payloads) {
    714 		ret = -EINVAL;
    715 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
    716 		goto out_unlock;
    717 	}
    718 
    719 	set_bit(ret, &mgr->payload_mask);
    720 	set_bit(vcpi_ret, &mgr->vcpi_mask);
    721 	vcpi->vcpi = vcpi_ret + 1;
    722 	mgr->proposed_vcpis[ret - 1] = vcpi;
    723 out_unlock:
    724 	mutex_unlock(&mgr->payload_lock);
    725 	return ret;
    726 }
    727 
    728 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
    729 				      int vcpi)
    730 {
    731 	int i;
    732 	if (vcpi == 0)
    733 		return;
    734 
    735 	mutex_lock(&mgr->payload_lock);
    736 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
    737 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
    738 
    739 	for (i = 0; i < mgr->max_payloads; i++) {
    740 		if (mgr->proposed_vcpis[i])
    741 			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
    742 				mgr->proposed_vcpis[i] = NULL;
    743 				clear_bit(i + 1, &mgr->payload_mask);
    744 			}
    745 	}
    746 	mutex_unlock(&mgr->payload_lock);
    747 }
    748 
    749 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
    750 			      struct drm_dp_sideband_msg_tx *txmsg)
    751 {
    752 	bool ret;
    753 
    754 	/*
    755 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
    756 	 * cases we check here are terminal states. For those the barriers
    757 	 * provided by the wake_up/wait_event pair are enough.
    758 	 */
    759 	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
    760 	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
    761 	return ret;
    762 }
    763 
    764 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
    765 				    struct drm_dp_sideband_msg_tx *txmsg)
    766 {
    767 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
    768 	int ret;
    769 
    770 #ifdef __NetBSD__
    771 	mutex_lock(&mstb->mgr->qlock);
    772 	DRM_TIMED_WAIT_UNTIL(ret, &mgr->tx_waitq, &mstb->mgr->qlock, 4*HZ,
    773 	    check_txmsg_state(mgr, txmsg));
    774 #else
    775 	ret = wait_event_timeout(mgr->tx_waitq,
    776 				 check_txmsg_state(mgr, txmsg),
    777 				 (4 * HZ));
    778 	mutex_lock(&mstb->mgr->qlock);
    779 #endif
    780 	if (ret > 0) {
    781 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
    782 			ret = -EIO;
    783 			goto out;
    784 		}
    785 	} else {
    786 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
    787 
    788 		/* dump some state */
    789 		ret = -EIO;
    790 
    791 		/* remove from q */
    792 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
    793 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
    794 			list_del(&txmsg->next);
    795 		}
    796 
    797 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
    798 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
    799 			mstb->tx_slots[txmsg->seqno] = NULL;
    800 		}
    801 	}
    802 out:
    803 	mutex_unlock(&mgr->qlock);
    804 
    805 	return ret;
    806 }
    807 
    808 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
    809 {
    810 	struct drm_dp_mst_branch *mstb;
    811 
    812 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
    813 	if (!mstb)
    814 		return NULL;
    815 
    816 	mstb->lct = lct;
    817 	if (lct > 1)
    818 		memcpy(mstb->rad, rad, lct / 2);
    819 	INIT_LIST_HEAD(&mstb->ports);
    820 	kref_init(&mstb->kref);
    821 	return mstb;
    822 }
    823 
    824 static void drm_dp_free_mst_port(struct kref *kref);
    825 
    826 static void drm_dp_free_mst_branch_device(struct kref *kref)
    827 {
    828 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
    829 	if (mstb->port_parent) {
    830 		if (list_empty(&mstb->port_parent->next))
    831 			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
    832 	}
    833 	kfree(mstb);
    834 }
    835 
    836 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
    837 {
    838 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
    839 	struct drm_dp_mst_port *port, *tmp;
    840 	bool wake_tx = false;
    841 
    842 	/*
    843 	 * init kref again to be used by ports to remove mst branch when it is
    844 	 * not needed anymore
    845 	 */
    846 	kref_init(kref);
    847 
    848 	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
    849 		kref_get(&mstb->port_parent->kref);
    850 
    851 	/*
    852 	 * destroy all ports - don't need lock
    853 	 * as there are no more references to the mst branch
    854 	 * device at this point.
    855 	 */
    856 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
    857 		list_del(&port->next);
    858 		drm_dp_put_port(port);
    859 	}
    860 
    861 	/* drop any tx slots msg */
    862 	mutex_lock(&mstb->mgr->qlock);
    863 	if (mstb->tx_slots[0]) {
    864 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
    865 		mstb->tx_slots[0] = NULL;
    866 		wake_tx = true;
    867 	}
    868 	if (mstb->tx_slots[1]) {
    869 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
    870 		mstb->tx_slots[1] = NULL;
    871 		wake_tx = true;
    872 	}
    873 #ifdef __NetBSD__
    874 	if (wake_tx)
    875 		DRM_WAKEUP_ONE(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
    876 	mutex_unlock(&mstb->mgr->qlock);
    877 #else
    878 	mutex_unlock(&mstb->mgr->qlock);
    879 
    880 	if (wake_tx)
    881 		wake_up(&mstb->mgr->tx_waitq);
    882 #endif
    883 
    884 	kref_put(kref, drm_dp_free_mst_branch_device);
    885 }
    886 
    887 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
    888 {
    889 	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
    890 }
    891 
    892 
    893 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
    894 {
    895 	struct drm_dp_mst_branch *mstb;
    896 
    897 	switch (old_pdt) {
    898 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
    899 	case DP_PEER_DEVICE_SST_SINK:
    900 		/* remove i2c over sideband */
    901 		drm_dp_mst_unregister_i2c_bus(&port->aux);
    902 		break;
    903 	case DP_PEER_DEVICE_MST_BRANCHING:
    904 		mstb = port->mstb;
    905 		port->mstb = NULL;
    906 		drm_dp_put_mst_branch_device(mstb);
    907 		break;
    908 	}
    909 }
    910 
    911 static void drm_dp_destroy_port(struct kref *kref)
    912 {
    913 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
    914 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
    915 
    916 	if (!port->input) {
    917 		port->vcpi.num_slots = 0;
    918 
    919 		kfree(port->cached_edid);
    920 
    921 		/*
    922 		 * The only time we don't have a connector
    923 		 * on an output port is if the connector init
    924 		 * fails.
    925 		 */
    926 		if (port->connector) {
    927 			/* we can't destroy the connector here, as
    928 			 * we might be holding the mode_config.mutex
    929 			 * from an EDID retrieval */
    930 
    931 			mutex_lock(&mgr->destroy_connector_lock);
    932 			kref_get(&port->parent->kref);
    933 			list_add(&port->next, &mgr->destroy_connector_list);
    934 			mutex_unlock(&mgr->destroy_connector_lock);
    935 			schedule_work(&mgr->destroy_connector_work);
    936 			return;
    937 		}
    938 		/* no need to clean up vcpi
    939 		 * as if we have no connector we never setup a vcpi */
    940 		drm_dp_port_teardown_pdt(port, port->pdt);
    941 		port->pdt = DP_PEER_DEVICE_NONE;
    942 	}
    943 	kfree(port);
    944 }
    945 
    946 static void drm_dp_put_port(struct drm_dp_mst_port *port)
    947 {
    948 	kref_put(&port->kref, drm_dp_destroy_port);
    949 }
    950 
    951 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
    952 {
    953 	struct drm_dp_mst_port *port;
    954 	struct drm_dp_mst_branch *rmstb;
    955 	if (to_find == mstb) {
    956 		kref_get(&mstb->kref);
    957 		return mstb;
    958 	}
    959 	list_for_each_entry(port, &mstb->ports, next) {
    960 		if (port->mstb) {
    961 			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
    962 			if (rmstb)
    963 				return rmstb;
    964 		}
    965 	}
    966 	return NULL;
    967 }
    968 
    969 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
    970 {
    971 	struct drm_dp_mst_branch *rmstb = NULL;
    972 	mutex_lock(&mgr->lock);
    973 	if (mgr->mst_primary)
    974 		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
    975 	mutex_unlock(&mgr->lock);
    976 	return rmstb;
    977 }
    978 
    979 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
    980 {
    981 	struct drm_dp_mst_port *port, *mport;
    982 
    983 	list_for_each_entry(port, &mstb->ports, next) {
    984 		if (port == to_find) {
    985 			kref_get(&port->kref);
    986 			return port;
    987 		}
    988 		if (port->mstb) {
    989 			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
    990 			if (mport)
    991 				return mport;
    992 		}
    993 	}
    994 	return NULL;
    995 }
    996 
    997 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
    998 {
    999 	struct drm_dp_mst_port *rport = NULL;
   1000 	mutex_lock(&mgr->lock);
   1001 	if (mgr->mst_primary)
   1002 		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
   1003 	mutex_unlock(&mgr->lock);
   1004 	return rport;
   1005 }
   1006 
   1007 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
   1008 {
   1009 	struct drm_dp_mst_port *port;
   1010 
   1011 	list_for_each_entry(port, &mstb->ports, next) {
   1012 		if (port->port_num == port_num) {
   1013 			kref_get(&port->kref);
   1014 			return port;
   1015 		}
   1016 	}
   1017 
   1018 	return NULL;
   1019 }
   1020 
   1021 /*
   1022  * calculate a new RAD for this MST branch device
   1023  * if parent has an LCT of 2 then it has 1 nibble of RAD,
   1024  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   1025  */
   1026 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
   1027 				 u8 *rad)
   1028 {
   1029 	int parent_lct = port->parent->lct;
   1030 	int shift = 4;
   1031 	int idx = (parent_lct - 1) / 2;
   1032 	if (parent_lct > 1) {
   1033 		memcpy(rad, port->parent->rad, idx + 1);
   1034 		shift = (parent_lct % 2) ? 4 : 0;
   1035 	} else
   1036 		rad[0] = 0;
   1037 
   1038 	rad[idx] |= port->port_num << shift;
   1039 	return parent_lct + 1;
   1040 }
   1041 
   1042 /*
   1043  * return sends link address for new mstb
   1044  */
   1045 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
   1046 {
   1047 	int ret __unused;
   1048 	u8 rad[6], lct;
   1049 	bool send_link = false;
   1050 	switch (port->pdt) {
   1051 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   1052 	case DP_PEER_DEVICE_SST_SINK:
   1053 		/* add i2c over sideband */
   1054 		ret = drm_dp_mst_register_i2c_bus(&port->aux);
   1055 		break;
   1056 	case DP_PEER_DEVICE_MST_BRANCHING:
   1057 		lct = drm_dp_calculate_rad(port, rad);
   1058 
   1059 		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
   1060 		port->mstb->mgr = port->mgr;
   1061 		port->mstb->port_parent = port;
   1062 
   1063 		send_link = true;
   1064 		break;
   1065 	}
   1066 	return send_link;
   1067 }
   1068 
   1069 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
   1070 {
   1071 	int ret __unused;
   1072 
   1073 	memcpy(mstb->guid, guid, 16);
   1074 
   1075 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
   1076 		if (mstb->port_parent) {
   1077 			ret = drm_dp_send_dpcd_write(
   1078 					mstb->mgr,
   1079 					mstb->port_parent,
   1080 					DP_GUID,
   1081 					16,
   1082 					mstb->guid);
   1083 		} else {
   1084 
   1085 			ret = drm_dp_dpcd_write(
   1086 					mstb->mgr->aux,
   1087 					DP_GUID,
   1088 					mstb->guid,
   1089 					16);
   1090 		}
   1091 	}
   1092 }
   1093 
   1094 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
   1095 				int pnum,
   1096 				char *proppath,
   1097 				size_t proppath_size)
   1098 {
   1099 	int i;
   1100 	char temp[8];
   1101 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
   1102 	for (i = 0; i < (mstb->lct - 1); i++) {
   1103 		int shift = (i % 2) ? 0 : 4;
   1104 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
   1105 		snprintf(temp, sizeof(temp), "-%d", port_num);
   1106 		strlcat(proppath, temp, proppath_size);
   1107 	}
   1108 	snprintf(temp, sizeof(temp), "-%d", pnum);
   1109 	strlcat(proppath, temp, proppath_size);
   1110 }
   1111 
   1112 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
   1113 			    struct device *dev,
   1114 			    struct drm_dp_link_addr_reply_port *port_msg)
   1115 {
   1116 	struct drm_dp_mst_port *port;
   1117 	bool ret;
   1118 	bool created = false;
   1119 	int old_pdt = 0;
   1120 	int old_ddps = 0;
   1121 	port = drm_dp_get_port(mstb, port_msg->port_number);
   1122 	if (!port) {
   1123 		port = kzalloc(sizeof(*port), GFP_KERNEL);
   1124 		if (!port)
   1125 			return;
   1126 		kref_init(&port->kref);
   1127 		port->parent = mstb;
   1128 		port->port_num = port_msg->port_number;
   1129 		port->mgr = mstb->mgr;
   1130 		port->aux.name = "DPMST";
   1131 		port->aux.dev = dev;
   1132 		created = true;
   1133 	} else {
   1134 		old_pdt = port->pdt;
   1135 		old_ddps = port->ddps;
   1136 	}
   1137 
   1138 	port->pdt = port_msg->peer_device_type;
   1139 	port->input = port_msg->input_port;
   1140 	port->mcs = port_msg->mcs;
   1141 	port->ddps = port_msg->ddps;
   1142 	port->ldps = port_msg->legacy_device_plug_status;
   1143 	port->dpcd_rev = port_msg->dpcd_revision;
   1144 	port->num_sdp_streams = port_msg->num_sdp_streams;
   1145 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
   1146 
   1147 	/* manage mstb port lists with mgr lock - take a reference
   1148 	   for this list */
   1149 	if (created) {
   1150 		mutex_lock(&mstb->mgr->lock);
   1151 		kref_get(&port->kref);
   1152 		list_add(&port->next, &mstb->ports);
   1153 		mutex_unlock(&mstb->mgr->lock);
   1154 	}
   1155 
   1156 	if (old_ddps != port->ddps) {
   1157 		if (port->ddps) {
   1158 			if (!port->input)
   1159 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
   1160 		} else {
   1161 			port->available_pbn = 0;
   1162 			}
   1163 	}
   1164 
   1165 	if (old_pdt != port->pdt && !port->input) {
   1166 		drm_dp_port_teardown_pdt(port, old_pdt);
   1167 
   1168 		ret = drm_dp_port_setup_pdt(port);
   1169 		if (ret == true)
   1170 			drm_dp_send_link_address(mstb->mgr, port->mstb);
   1171 	}
   1172 
   1173 	if (created && !port->input) {
   1174 		char proppath[255];
   1175 
   1176 		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
   1177 		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
   1178 		if (!port->connector) {
   1179 			/* remove it from the port list */
   1180 			mutex_lock(&mstb->mgr->lock);
   1181 			list_del(&port->next);
   1182 			mutex_unlock(&mstb->mgr->lock);
   1183 			/* drop port list reference */
   1184 			drm_dp_put_port(port);
   1185 			goto out;
   1186 		}
   1187 		if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
   1188 		     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
   1189 		    port->port_num >= DP_MST_LOGICAL_PORT_0) {
   1190 			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
   1191 			drm_mode_connector_set_tile_property(port->connector);
   1192 		}
   1193 		(*mstb->mgr->cbs->register_connector)(port->connector);
   1194 	}
   1195 
   1196 out:
   1197 	/* put reference to this port */
   1198 	drm_dp_put_port(port);
   1199 }
   1200 
   1201 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
   1202 			       struct drm_dp_connection_status_notify *conn_stat)
   1203 {
   1204 	struct drm_dp_mst_port *port;
   1205 	int old_pdt;
   1206 	int old_ddps;
   1207 	bool dowork = false;
   1208 	port = drm_dp_get_port(mstb, conn_stat->port_number);
   1209 	if (!port)
   1210 		return;
   1211 
   1212 	old_ddps = port->ddps;
   1213 	old_pdt = port->pdt;
   1214 	port->pdt = conn_stat->peer_device_type;
   1215 	port->mcs = conn_stat->message_capability_status;
   1216 	port->ldps = conn_stat->legacy_device_plug_status;
   1217 	port->ddps = conn_stat->displayport_device_plug_status;
   1218 
   1219 	if (old_ddps != port->ddps) {
   1220 		if (port->ddps) {
   1221 			dowork = true;
   1222 		} else {
   1223 			port->available_pbn = 0;
   1224 		}
   1225 	}
   1226 	if (old_pdt != port->pdt && !port->input) {
   1227 		drm_dp_port_teardown_pdt(port, old_pdt);
   1228 
   1229 		if (drm_dp_port_setup_pdt(port))
   1230 			dowork = true;
   1231 	}
   1232 
   1233 	drm_dp_put_port(port);
   1234 	if (dowork)
   1235 		queue_work(system_long_wq, &mstb->mgr->work);
   1236 
   1237 }
   1238 
   1239 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
   1240 							       u8 lct, u8 *rad)
   1241 {
   1242 	struct drm_dp_mst_branch *mstb;
   1243 	struct drm_dp_mst_port *port;
   1244 	int i;
   1245 	/* find the port by iterating down */
   1246 
   1247 	mutex_lock(&mgr->lock);
   1248 	mstb = mgr->mst_primary;
   1249 
   1250 	for (i = 0; i < lct - 1; i++) {
   1251 		int shift = (i % 2) ? 0 : 4;
   1252 		int port_num = (rad[i / 2] >> shift) & 0xf;
   1253 
   1254 		list_for_each_entry(port, &mstb->ports, next) {
   1255 			if (port->port_num == port_num) {
   1256 				mstb = port->mstb;
   1257 				if (!mstb) {
   1258 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
   1259 					goto out;
   1260 				}
   1261 
   1262 				break;
   1263 			}
   1264 		}
   1265 	}
   1266 	kref_get(&mstb->kref);
   1267 out:
   1268 	mutex_unlock(&mgr->lock);
   1269 	return mstb;
   1270 }
   1271 
   1272 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
   1273 	struct drm_dp_mst_branch *mstb,
   1274 	uint8_t *guid)
   1275 {
   1276 	struct drm_dp_mst_branch *found_mstb;
   1277 	struct drm_dp_mst_port *port;
   1278 
   1279 	if (memcmp(mstb->guid, guid, 16) == 0)
   1280 		return mstb;
   1281 
   1282 
   1283 	list_for_each_entry(port, &mstb->ports, next) {
   1284 		if (!port->mstb)
   1285 			continue;
   1286 
   1287 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
   1288 
   1289 		if (found_mstb)
   1290 			return found_mstb;
   1291 	}
   1292 
   1293 	return NULL;
   1294 }
   1295 
   1296 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
   1297 	struct drm_dp_mst_topology_mgr *mgr,
   1298 	uint8_t *guid)
   1299 {
   1300 	struct drm_dp_mst_branch *mstb;
   1301 
   1302 	/* find the port by iterating down */
   1303 	mutex_lock(&mgr->lock);
   1304 
   1305 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
   1306 
   1307 	if (mstb)
   1308 		kref_get(&mstb->kref);
   1309 
   1310 	mutex_unlock(&mgr->lock);
   1311 	return mstb;
   1312 }
   1313 
   1314 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   1315 					       struct drm_dp_mst_branch *mstb)
   1316 {
   1317 	struct drm_dp_mst_port *port;
   1318 	struct drm_dp_mst_branch *mstb_child;
   1319 	if (!mstb->link_address_sent)
   1320 		drm_dp_send_link_address(mgr, mstb);
   1321 
   1322 	list_for_each_entry(port, &mstb->ports, next) {
   1323 		if (port->input)
   1324 			continue;
   1325 
   1326 		if (!port->ddps)
   1327 			continue;
   1328 
   1329 		if (!port->available_pbn)
   1330 			drm_dp_send_enum_path_resources(mgr, mstb, port);
   1331 
   1332 		if (port->mstb) {
   1333 			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
   1334 			if (mstb_child) {
   1335 				drm_dp_check_and_send_link_address(mgr, mstb_child);
   1336 				drm_dp_put_mst_branch_device(mstb_child);
   1337 			}
   1338 		}
   1339 	}
   1340 }
   1341 
   1342 static void drm_dp_mst_link_probe_work(struct work_struct *work)
   1343 {
   1344 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
   1345 	struct drm_dp_mst_branch *mstb;
   1346 
   1347 	mutex_lock(&mgr->lock);
   1348 	mstb = mgr->mst_primary;
   1349 	if (mstb) {
   1350 		kref_get(&mstb->kref);
   1351 	}
   1352 	mutex_unlock(&mgr->lock);
   1353 	if (mstb) {
   1354 		drm_dp_check_and_send_link_address(mgr, mstb);
   1355 		drm_dp_put_mst_branch_device(mstb);
   1356 	}
   1357 }
   1358 
   1359 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
   1360 				 u8 *guid)
   1361 {
   1362 	static u8 zero_guid[16];
   1363 
   1364 	if (!memcmp(guid, zero_guid, 16)) {
   1365 		u64 salt = get_jiffies_64();
   1366 		memcpy(&guid[0], &salt, sizeof(u64));
   1367 		memcpy(&guid[8], &salt, sizeof(u64));
   1368 		return false;
   1369 	}
   1370 	return true;
   1371 }
   1372 
   1373 #if 0
   1374 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
   1375 {
   1376 	struct drm_dp_sideband_msg_req_body req;
   1377 
   1378 	req.req_type = DP_REMOTE_DPCD_READ;
   1379 	req.u.dpcd_read.port_number = port_num;
   1380 	req.u.dpcd_read.dpcd_address = offset;
   1381 	req.u.dpcd_read.num_bytes = num_bytes;
   1382 	drm_dp_encode_sideband_req(&req, msg);
   1383 
   1384 	return 0;
   1385 }
   1386 #endif
   1387 
   1388 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
   1389 				    bool up, u8 *msg, int len)
   1390 {
   1391 	int ret;
   1392 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
   1393 	int tosend, total, offset;
   1394 	int retries = 0;
   1395 
   1396 retry:
   1397 	total = len;
   1398 	offset = 0;
   1399 	do {
   1400 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
   1401 
   1402 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
   1403 					&msg[offset],
   1404 					tosend);
   1405 		if (ret != tosend) {
   1406 			if (ret == -EIO && retries < 5) {
   1407 				retries++;
   1408 				goto retry;
   1409 			}
   1410 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
   1411 
   1412 			return -EIO;
   1413 		}
   1414 		offset += tosend;
   1415 		total -= tosend;
   1416 	} while (total > 0);
   1417 	return 0;
   1418 }
   1419 
   1420 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
   1421 				  struct drm_dp_sideband_msg_tx *txmsg)
   1422 {
   1423 	struct drm_dp_mst_branch *mstb = txmsg->dst;
   1424 	u8 req_type;
   1425 
   1426 	/* both msg slots are full */
   1427 	if (txmsg->seqno == -1) {
   1428 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
   1429 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
   1430 			return -EAGAIN;
   1431 		}
   1432 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
   1433 			txmsg->seqno = mstb->last_seqno;
   1434 			mstb->last_seqno ^= 1;
   1435 		} else if (mstb->tx_slots[0] == NULL)
   1436 			txmsg->seqno = 0;
   1437 		else
   1438 			txmsg->seqno = 1;
   1439 		mstb->tx_slots[txmsg->seqno] = txmsg;
   1440 	}
   1441 
   1442 	req_type = txmsg->msg[0] & 0x7f;
   1443 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
   1444 		req_type == DP_RESOURCE_STATUS_NOTIFY)
   1445 		hdr->broadcast = 1;
   1446 	else
   1447 		hdr->broadcast = 0;
   1448 	hdr->path_msg = txmsg->path_msg;
   1449 	hdr->lct = mstb->lct;
   1450 	hdr->lcr = mstb->lct - 1;
   1451 	if (mstb->lct > 1)
   1452 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
   1453 	hdr->seqno = txmsg->seqno;
   1454 	return 0;
   1455 }
   1456 /*
   1457  * process a single block of the next message in the sideband queue
   1458  */
   1459 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   1460 				   struct drm_dp_sideband_msg_tx *txmsg,
   1461 				   bool up)
   1462 {
   1463 	u8 chunk[48];
   1464 	struct drm_dp_sideband_msg_hdr hdr;
   1465 	int len, space, idx, tosend;
   1466 	int ret;
   1467 
   1468 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
   1469 
   1470 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
   1471 		txmsg->seqno = -1;
   1472 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
   1473 	}
   1474 
   1475 	/* make hdr from dst mst - for replies use seqno
   1476 	   otherwise assign one */
   1477 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
   1478 	if (ret < 0)
   1479 		return ret;
   1480 
   1481 	/* amount left to send in this message */
   1482 	len = txmsg->cur_len - txmsg->cur_offset;
   1483 
   1484 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
   1485 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
   1486 
   1487 	tosend = min(len, space);
   1488 	if (len == txmsg->cur_len)
   1489 		hdr.somt = 1;
   1490 	if (space >= len)
   1491 		hdr.eomt = 1;
   1492 
   1493 
   1494 	hdr.msg_len = tosend + 1;
   1495 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
   1496 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
   1497 	/* add crc at end */
   1498 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
   1499 	idx += tosend + 1;
   1500 
   1501 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
   1502 	if (ret) {
   1503 		DRM_DEBUG_KMS("sideband msg failed to send\n");
   1504 		return ret;
   1505 	}
   1506 
   1507 	txmsg->cur_offset += tosend;
   1508 	if (txmsg->cur_offset == txmsg->cur_len) {
   1509 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
   1510 		return 1;
   1511 	}
   1512 	return 0;
   1513 }
   1514 
   1515 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
   1516 {
   1517 	struct drm_dp_sideband_msg_tx *txmsg;
   1518 	int ret;
   1519 
   1520 	WARN_ON(!mutex_is_locked(&mgr->qlock));
   1521 
   1522 	/* construct a chunk from the first msg in the tx_msg queue */
   1523 	if (list_empty(&mgr->tx_msg_downq)) {
   1524 		mgr->tx_down_in_progress = false;
   1525 		return;
   1526 	}
   1527 	mgr->tx_down_in_progress = true;
   1528 
   1529 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
   1530 	ret = process_single_tx_qlock(mgr, txmsg, false);
   1531 	if (ret == 1) {
   1532 		/* txmsg is sent it should be in the slots now */
   1533 		list_del(&txmsg->next);
   1534 	} else if (ret) {
   1535 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   1536 		list_del(&txmsg->next);
   1537 		if (txmsg->seqno != -1)
   1538 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   1539 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   1540 #ifdef __NetBSD__
   1541 		DRM_WAKEUP_ONE(&mgr->tx_waitq, &mgr->qlock);
   1542 #else
   1543 		wake_up(&mgr->tx_waitq);
   1544 #endif
   1545 	}
   1546 	if (list_empty(&mgr->tx_msg_downq)) {
   1547 		mgr->tx_down_in_progress = false;
   1548 		return;
   1549 	}
   1550 }
   1551 
   1552 /* called holding qlock */
   1553 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   1554 				       struct drm_dp_sideband_msg_tx *txmsg)
   1555 {
   1556 	int ret;
   1557 
   1558 	/* construct a chunk from the first msg in the tx_msg queue */
   1559 	ret = process_single_tx_qlock(mgr, txmsg, true);
   1560 
   1561 	if (ret != 1)
   1562 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   1563 
   1564 	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   1565 }
   1566 
   1567 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
   1568 				 struct drm_dp_sideband_msg_tx *txmsg)
   1569 {
   1570 	mutex_lock(&mgr->qlock);
   1571 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
   1572 	if (!mgr->tx_down_in_progress)
   1573 		process_single_down_tx_qlock(mgr);
   1574 	mutex_unlock(&mgr->qlock);
   1575 }
   1576 
   1577 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   1578 				     struct drm_dp_mst_branch *mstb)
   1579 {
   1580 	int len __unused;
   1581 	struct drm_dp_sideband_msg_tx *txmsg;
   1582 	int ret;
   1583 
   1584 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1585 	if (!txmsg)
   1586 		return;
   1587 
   1588 	txmsg->dst = mstb;
   1589 	len = build_link_address(txmsg);
   1590 
   1591 	mstb->link_address_sent = true;
   1592 	drm_dp_queue_down_tx(mgr, txmsg);
   1593 
   1594 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1595 	if (ret > 0) {
   1596 		int i;
   1597 
   1598 		if (txmsg->reply.reply_type == 1)
   1599 			DRM_DEBUG_KMS("link address nak received\n");
   1600 		else {
   1601 			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
   1602 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
   1603 				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
   1604 				       txmsg->reply.u.link_addr.ports[i].input_port,
   1605 				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
   1606 				       txmsg->reply.u.link_addr.ports[i].port_number,
   1607 				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
   1608 				       txmsg->reply.u.link_addr.ports[i].mcs,
   1609 				       txmsg->reply.u.link_addr.ports[i].ddps,
   1610 				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
   1611 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
   1612 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
   1613 			}
   1614 
   1615 			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
   1616 
   1617 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
   1618 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
   1619 			}
   1620 			(*mgr->cbs->hotplug)(mgr);
   1621 		}
   1622 	} else {
   1623 		mstb->link_address_sent = false;
   1624 		DRM_DEBUG_KMS("link address failed %d\n", ret);
   1625 	}
   1626 
   1627 	kfree(txmsg);
   1628 }
   1629 
   1630 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
   1631 					   struct drm_dp_mst_branch *mstb,
   1632 					   struct drm_dp_mst_port *port)
   1633 {
   1634 	int len __unused;
   1635 	struct drm_dp_sideband_msg_tx *txmsg;
   1636 	int ret;
   1637 
   1638 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1639 	if (!txmsg)
   1640 		return -ENOMEM;
   1641 
   1642 	txmsg->dst = mstb;
   1643 	len = build_enum_path_resources(txmsg, port->port_num);
   1644 
   1645 	drm_dp_queue_down_tx(mgr, txmsg);
   1646 
   1647 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1648 	if (ret > 0) {
   1649 		if (txmsg->reply.reply_type == 1)
   1650 			DRM_DEBUG_KMS("enum path resources nak received\n");
   1651 		else {
   1652 			if (port->port_num != txmsg->reply.u.path_resources.port_number)
   1653 				DRM_ERROR("got incorrect port in response\n");
   1654 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
   1655 			       txmsg->reply.u.path_resources.avail_payload_bw_number);
   1656 			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
   1657 		}
   1658 	}
   1659 
   1660 	kfree(txmsg);
   1661 	return 0;
   1662 }
   1663 
   1664 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
   1665 {
   1666 	if (!mstb->port_parent)
   1667 		return NULL;
   1668 
   1669 	if (mstb->port_parent->mstb != mstb)
   1670 		return mstb->port_parent;
   1671 
   1672 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
   1673 }
   1674 
   1675 static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
   1676 									 struct drm_dp_mst_branch *mstb,
   1677 									 int *port_num)
   1678 {
   1679 	struct drm_dp_mst_branch *rmstb = NULL;
   1680 	struct drm_dp_mst_port *found_port;
   1681 	mutex_lock(&mgr->lock);
   1682 	if (mgr->mst_primary) {
   1683 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
   1684 
   1685 		if (found_port) {
   1686 			rmstb = found_port->parent;
   1687 			kref_get(&rmstb->kref);
   1688 			*port_num = found_port->port_num;
   1689 		}
   1690 	}
   1691 	mutex_unlock(&mgr->lock);
   1692 	return rmstb;
   1693 }
   1694 
   1695 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
   1696 				   struct drm_dp_mst_port *port,
   1697 				   int id,
   1698 				   int pbn)
   1699 {
   1700 	struct drm_dp_sideband_msg_tx *txmsg;
   1701 	struct drm_dp_mst_branch *mstb;
   1702 	int len __unused, ret, port_num;
   1703 
   1704 	port = drm_dp_get_validated_port_ref(mgr, port);
   1705 	if (!port)
   1706 		return -EINVAL;
   1707 
   1708 	port_num = port->port_num;
   1709 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   1710 	if (!mstb) {
   1711 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
   1712 
   1713 		if (!mstb) {
   1714 			drm_dp_put_port(port);
   1715 			return -EINVAL;
   1716 		}
   1717 	}
   1718 
   1719 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1720 	if (!txmsg) {
   1721 		ret = -ENOMEM;
   1722 		goto fail_put;
   1723 	}
   1724 
   1725 	txmsg->dst = mstb;
   1726 	len = build_allocate_payload(txmsg, port_num,
   1727 				     id,
   1728 				     pbn);
   1729 
   1730 	drm_dp_queue_down_tx(mgr, txmsg);
   1731 
   1732 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1733 	if (ret > 0) {
   1734 		if (txmsg->reply.reply_type == 1) {
   1735 			ret = -EINVAL;
   1736 		} else
   1737 			ret = 0;
   1738 	}
   1739 	kfree(txmsg);
   1740 fail_put:
   1741 	drm_dp_put_mst_branch_device(mstb);
   1742 	drm_dp_put_port(port);
   1743 	return ret;
   1744 }
   1745 
   1746 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   1747 				       int id,
   1748 				       struct drm_dp_payload *payload)
   1749 {
   1750 	int ret;
   1751 
   1752 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
   1753 	if (ret < 0) {
   1754 		payload->payload_state = 0;
   1755 		return ret;
   1756 	}
   1757 	payload->payload_state = DP_PAYLOAD_LOCAL;
   1758 	return 0;
   1759 }
   1760 
   1761 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   1762 				       struct drm_dp_mst_port *port,
   1763 				       int id,
   1764 				       struct drm_dp_payload *payload)
   1765 {
   1766 	int ret;
   1767 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
   1768 	if (ret < 0)
   1769 		return ret;
   1770 	payload->payload_state = DP_PAYLOAD_REMOTE;
   1771 	return ret;
   1772 }
   1773 
   1774 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   1775 					struct drm_dp_mst_port *port,
   1776 					int id,
   1777 					struct drm_dp_payload *payload)
   1778 {
   1779 	DRM_DEBUG_KMS("\n");
   1780 	/* its okay for these to fail */
   1781 	if (port) {
   1782 		drm_dp_payload_send_msg(mgr, port, id, 0);
   1783 	}
   1784 
   1785 	drm_dp_dpcd_write_payload(mgr, id, payload);
   1786 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
   1787 	return 0;
   1788 }
   1789 
   1790 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   1791 					int id,
   1792 					struct drm_dp_payload *payload)
   1793 {
   1794 	payload->payload_state = 0;
   1795 	return 0;
   1796 }
   1797 
   1798 /**
   1799  * drm_dp_update_payload_part1() - Execute payload update part 1
   1800  * @mgr: manager to use.
   1801  *
   1802  * This iterates over all proposed virtual channels, and tries to
   1803  * allocate space in the link for them. For 0->slots transitions,
   1804  * this step just writes the VCPI to the MST device. For slots->0
   1805  * transitions, this writes the updated VCPIs and removes the
   1806  * remote VC payloads.
   1807  *
   1808  * after calling this the driver should generate ACT and payload
   1809  * packets.
   1810  */
   1811 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
   1812 {
   1813 	int i, j;
   1814 	int cur_slots = 1;
   1815 	struct drm_dp_payload req_payload;
   1816 	struct drm_dp_mst_port *port;
   1817 
   1818 	mutex_lock(&mgr->payload_lock);
   1819 	for (i = 0; i < mgr->max_payloads; i++) {
   1820 		/* solve the current payloads - compare to the hw ones
   1821 		   - update the hw view */
   1822 		req_payload.start_slot = cur_slots;
   1823 		if (mgr->proposed_vcpis[i]) {
   1824 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   1825 			port = drm_dp_get_validated_port_ref(mgr, port);
   1826 			if (!port) {
   1827 				mutex_unlock(&mgr->payload_lock);
   1828 				return -EINVAL;
   1829 			}
   1830 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
   1831 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
   1832 		} else {
   1833 			port = NULL;
   1834 			req_payload.num_slots = 0;
   1835 		}
   1836 
   1837 		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
   1838 			mgr->payloads[i].start_slot = req_payload.start_slot;
   1839 		}
   1840 		/* work out what is required to happen with this payload */
   1841 		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
   1842 
   1843 			/* need to push an update for this payload */
   1844 			if (req_payload.num_slots) {
   1845 				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
   1846 				mgr->payloads[i].num_slots = req_payload.num_slots;
   1847 				mgr->payloads[i].vcpi = req_payload.vcpi;
   1848 			} else if (mgr->payloads[i].num_slots) {
   1849 				mgr->payloads[i].num_slots = 0;
   1850 				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
   1851 				req_payload.payload_state = mgr->payloads[i].payload_state;
   1852 				mgr->payloads[i].start_slot = 0;
   1853 			}
   1854 			mgr->payloads[i].payload_state = req_payload.payload_state;
   1855 		}
   1856 		cur_slots += req_payload.num_slots;
   1857 
   1858 		if (port)
   1859 			drm_dp_put_port(port);
   1860 	}
   1861 
   1862 	for (i = 0; i < mgr->max_payloads; i++) {
   1863 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   1864 			DRM_DEBUG_KMS("removing payload %d\n", i);
   1865 			for (j = i; j < mgr->max_payloads - 1; j++) {
   1866 				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
   1867 				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
   1868 				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
   1869 					set_bit(j + 1, &mgr->payload_mask);
   1870 				} else {
   1871 					clear_bit(j + 1, &mgr->payload_mask);
   1872 				}
   1873 			}
   1874 			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
   1875 			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
   1876 			clear_bit(mgr->max_payloads, &mgr->payload_mask);
   1877 
   1878 		}
   1879 	}
   1880 	mutex_unlock(&mgr->payload_lock);
   1881 
   1882 	return 0;
   1883 }
   1884 EXPORT_SYMBOL(drm_dp_update_payload_part1);
   1885 
   1886 /**
   1887  * drm_dp_update_payload_part2() - Execute payload update part 2
   1888  * @mgr: manager to use.
   1889  *
   1890  * This iterates over all proposed virtual channels, and tries to
   1891  * allocate space in the link for them. For 0->slots transitions,
   1892  * this step writes the remote VC payload commands. For slots->0
   1893  * this just resets some internal state.
   1894  */
   1895 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
   1896 {
   1897 	struct drm_dp_mst_port *port;
   1898 	int i;
   1899 	int ret = 0;
   1900 	mutex_lock(&mgr->payload_lock);
   1901 	for (i = 0; i < mgr->max_payloads; i++) {
   1902 
   1903 		if (!mgr->proposed_vcpis[i])
   1904 			continue;
   1905 
   1906 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   1907 
   1908 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
   1909 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
   1910 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   1911 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   1912 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   1913 		}
   1914 		if (ret) {
   1915 			mutex_unlock(&mgr->payload_lock);
   1916 			return ret;
   1917 		}
   1918 	}
   1919 	mutex_unlock(&mgr->payload_lock);
   1920 	return 0;
   1921 }
   1922 EXPORT_SYMBOL(drm_dp_update_payload_part2);
   1923 
   1924 #if 0 /* unused as of yet */
   1925 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
   1926 				 struct drm_dp_mst_port *port,
   1927 				 int offset, int size)
   1928 {
   1929 	int len;
   1930 	struct drm_dp_sideband_msg_tx *txmsg;
   1931 
   1932 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1933 	if (!txmsg)
   1934 		return -ENOMEM;
   1935 
   1936 	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
   1937 	txmsg->dst = port->parent;
   1938 
   1939 	drm_dp_queue_down_tx(mgr, txmsg);
   1940 
   1941 	return 0;
   1942 }
   1943 #endif
   1944 
   1945 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
   1946 				  struct drm_dp_mst_port *port,
   1947 				  int offset, int size, u8 *bytes)
   1948 {
   1949 	int len __unused;
   1950 	int ret;
   1951 	struct drm_dp_sideband_msg_tx *txmsg;
   1952 	struct drm_dp_mst_branch *mstb;
   1953 
   1954 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   1955 	if (!mstb)
   1956 		return -EINVAL;
   1957 
   1958 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1959 	if (!txmsg) {
   1960 		ret = -ENOMEM;
   1961 		goto fail_put;
   1962 	}
   1963 
   1964 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
   1965 	txmsg->dst = mstb;
   1966 
   1967 	drm_dp_queue_down_tx(mgr, txmsg);
   1968 
   1969 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1970 	if (ret > 0) {
   1971 		if (txmsg->reply.reply_type == 1) {
   1972 			ret = -EINVAL;
   1973 		} else
   1974 			ret = 0;
   1975 	}
   1976 	kfree(txmsg);
   1977 fail_put:
   1978 	drm_dp_put_mst_branch_device(mstb);
   1979 	return ret;
   1980 }
   1981 
   1982 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
   1983 {
   1984 	struct drm_dp_sideband_msg_reply_body reply;
   1985 
   1986 	reply.reply_type = 1;
   1987 	reply.req_type = req_type;
   1988 	drm_dp_encode_sideband_reply(&reply, msg);
   1989 	return 0;
   1990 }
   1991 
   1992 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
   1993 				    struct drm_dp_mst_branch *mstb,
   1994 				    int req_type, int seqno, bool broadcast)
   1995 {
   1996 	struct drm_dp_sideband_msg_tx *txmsg;
   1997 
   1998 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1999 	if (!txmsg)
   2000 		return -ENOMEM;
   2001 
   2002 	txmsg->dst = mstb;
   2003 	txmsg->seqno = seqno;
   2004 	drm_dp_encode_up_ack_reply(txmsg, req_type);
   2005 
   2006 	mutex_lock(&mgr->qlock);
   2007 
   2008 	process_single_up_tx_qlock(mgr, txmsg);
   2009 
   2010 	mutex_unlock(&mgr->qlock);
   2011 
   2012 	kfree(txmsg);
   2013 	return 0;
   2014 }
   2015 
   2016 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
   2017 				     int dp_link_count,
   2018 				     int *out)
   2019 {
   2020 	switch (dp_link_bw) {
   2021 	default:
   2022 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
   2023 			      dp_link_bw, dp_link_count);
   2024 		return false;
   2025 
   2026 	case DP_LINK_BW_1_62:
   2027 		*out = 3 * dp_link_count;
   2028 		break;
   2029 	case DP_LINK_BW_2_7:
   2030 		*out = 5 * dp_link_count;
   2031 		break;
   2032 	case DP_LINK_BW_5_4:
   2033 		*out = 10 * dp_link_count;
   2034 		break;
   2035 	}
   2036 	return true;
   2037 }
   2038 
   2039 /**
   2040  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   2041  * @mgr: manager to set state for
   2042  * @mst_state: true to enable MST on this connector - false to disable.
   2043  *
   2044  * This is called by the driver when it detects an MST capable device plugged
   2045  * into a DP MST capable port, or when a DP MST capable device is unplugged.
   2046  */
   2047 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
   2048 {
   2049 	int ret = 0;
   2050 	struct drm_dp_mst_branch *mstb = NULL;
   2051 
   2052 	mutex_lock(&mgr->lock);
   2053 	if (mst_state == mgr->mst_state)
   2054 		goto out_unlock;
   2055 
   2056 	mgr->mst_state = mst_state;
   2057 	/* set the device into MST mode */
   2058 	if (mst_state) {
   2059 		WARN_ON(mgr->mst_primary);
   2060 
   2061 		/* get dpcd info */
   2062 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   2063 		if (ret != DP_RECEIVER_CAP_SIZE) {
   2064 			DRM_DEBUG_KMS("failed to read DPCD\n");
   2065 			goto out_unlock;
   2066 		}
   2067 
   2068 		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
   2069 					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
   2070 					      &mgr->pbn_div)) {
   2071 			ret = -EINVAL;
   2072 			goto out_unlock;
   2073 		}
   2074 
   2075 		mgr->total_pbn = 2560;
   2076 		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
   2077 		mgr->avail_slots = mgr->total_slots;
   2078 
   2079 		/* add initial branch device at LCT 1 */
   2080 		mstb = drm_dp_add_mst_branch_device(1, NULL);
   2081 		if (mstb == NULL) {
   2082 			ret = -ENOMEM;
   2083 			goto out_unlock;
   2084 		}
   2085 		mstb->mgr = mgr;
   2086 
   2087 		/* give this the main reference */
   2088 		mgr->mst_primary = mstb;
   2089 		kref_get(&mgr->mst_primary->kref);
   2090 
   2091 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2092 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   2093 		if (ret < 0) {
   2094 			goto out_unlock;
   2095 		}
   2096 
   2097 		{
   2098 			struct drm_dp_payload reset_pay;
   2099 			reset_pay.start_slot = 0;
   2100 			reset_pay.num_slots = 0x3f;
   2101 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
   2102 		}
   2103 
   2104 		queue_work(system_long_wq, &mgr->work);
   2105 
   2106 		ret = 0;
   2107 	} else {
   2108 		/* disable MST on the device */
   2109 		mstb = mgr->mst_primary;
   2110 		mgr->mst_primary = NULL;
   2111 		/* this can fail if the device is gone */
   2112 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
   2113 		ret = 0;
   2114 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
   2115 		mgr->payload_mask = 0;
   2116 		set_bit(0, &mgr->payload_mask);
   2117 		mgr->vcpi_mask = 0;
   2118 	}
   2119 
   2120 out_unlock:
   2121 	mutex_unlock(&mgr->lock);
   2122 	if (mstb)
   2123 		drm_dp_put_mst_branch_device(mstb);
   2124 	return ret;
   2125 
   2126 }
   2127 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
   2128 
   2129 /**
   2130  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   2131  * @mgr: manager to suspend
   2132  *
   2133  * This function tells the MST device that we can't handle UP messages
   2134  * anymore. This should stop it from sending any since we are suspended.
   2135  */
   2136 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
   2137 {
   2138 	mutex_lock(&mgr->lock);
   2139 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2140 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
   2141 	mutex_unlock(&mgr->lock);
   2142 	flush_work(&mgr->work);
   2143 	flush_work(&mgr->destroy_connector_work);
   2144 }
   2145 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
   2146 
   2147 /**
   2148  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   2149  * @mgr: manager to resume
   2150  *
   2151  * This will fetch DPCD and see if the device is still there,
   2152  * if it is, it will rewrite the MSTM control bits, and return.
   2153  *
   2154  * if the device fails this returns -1, and the driver should do
   2155  * a full MST reprobe, in case we were undocked.
   2156  */
   2157 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
   2158 {
   2159 	int ret = 0;
   2160 
   2161 	mutex_lock(&mgr->lock);
   2162 
   2163 	if (mgr->mst_primary) {
   2164 		int sret;
   2165 		u8 guid[16];
   2166 
   2167 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   2168 		if (sret != DP_RECEIVER_CAP_SIZE) {
   2169 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   2170 			ret = -1;
   2171 			goto out_unlock;
   2172 		}
   2173 
   2174 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2175 					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   2176 		if (ret < 0) {
   2177 			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
   2178 			ret = -1;
   2179 			goto out_unlock;
   2180 		}
   2181 
   2182 		/* Some hubs forget their guids after they resume */
   2183 		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
   2184 		if (sret != 16) {
   2185 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   2186 			ret = -1;
   2187 			goto out_unlock;
   2188 		}
   2189 		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
   2190 
   2191 		ret = 0;
   2192 	} else
   2193 		ret = -1;
   2194 
   2195 out_unlock:
   2196 	mutex_unlock(&mgr->lock);
   2197 	return ret;
   2198 }
   2199 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
   2200 
   2201 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
   2202 {
   2203 	int len;
   2204 	u8 replyblock[32];
   2205 	int replylen, origlen __unused, curreply;
   2206 	int ret;
   2207 	struct drm_dp_sideband_msg_rx *msg;
   2208 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
   2209 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
   2210 
   2211 	len = min(mgr->max_dpcd_transaction_bytes, 16);
   2212 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
   2213 			       replyblock, len);
   2214 	if (ret != len) {
   2215 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
   2216 		return false;
   2217 	}
   2218 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
   2219 	if (!ret) {
   2220 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
   2221 		return false;
   2222 	}
   2223 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
   2224 
   2225 	origlen = replylen;
   2226 	replylen -= len;
   2227 	curreply = len;
   2228 	while (replylen > 0) {
   2229 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
   2230 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
   2231 				    replyblock, len);
   2232 		if (ret != len) {
   2233 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
   2234 				      len, ret);
   2235 			return false;
   2236 		}
   2237 
   2238 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
   2239 		if (!ret) {
   2240 			DRM_DEBUG_KMS("failed to build sideband msg\n");
   2241 			return false;
   2242 		}
   2243 
   2244 		curreply += len;
   2245 		replylen -= len;
   2246 	}
   2247 	return true;
   2248 }
   2249 
   2250 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
   2251 {
   2252 	int ret = 0;
   2253 
   2254 	if (!drm_dp_get_one_sb_msg(mgr, false)) {
   2255 		memset(&mgr->down_rep_recv, 0,
   2256 		       sizeof(struct drm_dp_sideband_msg_rx));
   2257 		return 0;
   2258 	}
   2259 
   2260 	if (mgr->down_rep_recv.have_eomt) {
   2261 		struct drm_dp_sideband_msg_tx *txmsg;
   2262 		struct drm_dp_mst_branch *mstb;
   2263 		int slot = -1;
   2264 		mstb = drm_dp_get_mst_branch_device(mgr,
   2265 						    mgr->down_rep_recv.initial_hdr.lct,
   2266 						    mgr->down_rep_recv.initial_hdr.rad);
   2267 
   2268 		if (!mstb) {
   2269 			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
   2270 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2271 			return 0;
   2272 		}
   2273 
   2274 		/* find the message */
   2275 		slot = mgr->down_rep_recv.initial_hdr.seqno;
   2276 		mutex_lock(&mgr->qlock);
   2277 		txmsg = mstb->tx_slots[slot];
   2278 		/* remove from slots */
   2279 		mutex_unlock(&mgr->qlock);
   2280 
   2281 		if (!txmsg) {
   2282 			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
   2283 			       mstb,
   2284 			       mgr->down_rep_recv.initial_hdr.seqno,
   2285 			       mgr->down_rep_recv.initial_hdr.lct,
   2286 				      mgr->down_rep_recv.initial_hdr.rad[0],
   2287 				      mgr->down_rep_recv.msg[0]);
   2288 			drm_dp_put_mst_branch_device(mstb);
   2289 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2290 			return 0;
   2291 		}
   2292 
   2293 		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
   2294 		if (txmsg->reply.reply_type == 1) {
   2295 			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
   2296 		}
   2297 
   2298 		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2299 		drm_dp_put_mst_branch_device(mstb);
   2300 
   2301 		mutex_lock(&mgr->qlock);
   2302 		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
   2303 		mstb->tx_slots[slot] = NULL;
   2304 #ifdef __NetBSD__
   2305 		DRM_WAKEUP_ONE(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
   2306 		mutex_unlock(&mgr->qlock);
   2307 #else
   2308 		mutex_unlock(&mgr->qlock);
   2309 
   2310 		wake_up(&mgr->tx_waitq);
   2311 #endif
   2312 	}
   2313 	return ret;
   2314 }
   2315 
   2316 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
   2317 {
   2318 	int ret = 0;
   2319 
   2320 	if (!drm_dp_get_one_sb_msg(mgr, true)) {
   2321 		memset(&mgr->up_req_recv, 0,
   2322 		       sizeof(struct drm_dp_sideband_msg_rx));
   2323 		return 0;
   2324 	}
   2325 
   2326 	if (mgr->up_req_recv.have_eomt) {
   2327 		struct drm_dp_sideband_msg_req_body msg;
   2328 		struct drm_dp_mst_branch *mstb = NULL;
   2329 		bool seqno;
   2330 
   2331 		if (!mgr->up_req_recv.initial_hdr.broadcast) {
   2332 			mstb = drm_dp_get_mst_branch_device(mgr,
   2333 							    mgr->up_req_recv.initial_hdr.lct,
   2334 							    mgr->up_req_recv.initial_hdr.rad);
   2335 			if (!mstb) {
   2336 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2337 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2338 				return 0;
   2339 			}
   2340 		}
   2341 
   2342 		seqno = mgr->up_req_recv.initial_hdr.seqno;
   2343 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
   2344 
   2345 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
   2346 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
   2347 
   2348 			if (!mstb)
   2349 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
   2350 
   2351 			if (!mstb) {
   2352 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2353 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2354 				return 0;
   2355 			}
   2356 
   2357 			drm_dp_update_port(mstb, &msg.u.conn_stat);
   2358 
   2359 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
   2360 			(*mgr->cbs->hotplug)(mgr);
   2361 
   2362 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
   2363 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
   2364 			if (!mstb)
   2365 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
   2366 
   2367 			if (!mstb) {
   2368 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2369 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2370 				return 0;
   2371 			}
   2372 
   2373 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
   2374 		}
   2375 
   2376 		if (mstb)
   2377 			drm_dp_put_mst_branch_device(mstb);
   2378 
   2379 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2380 	}
   2381 	return ret;
   2382 }
   2383 
   2384 /**
   2385  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   2386  * @mgr: manager to notify irq for.
   2387  * @esi: 4 bytes from SINK_COUNT_ESI
   2388  * @handled: whether the hpd interrupt was consumed or not
   2389  *
   2390  * This should be called from the driver when it detects a short IRQ,
   2391  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   2392  * topology manager will process the sideband messages received as a result
   2393  * of this.
   2394  */
   2395 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
   2396 {
   2397 	int ret = 0;
   2398 	int sc;
   2399 	*handled = false;
   2400 	sc = esi[0] & 0x3f;
   2401 
   2402 	if (sc != mgr->sink_count) {
   2403 		mgr->sink_count = sc;
   2404 		*handled = true;
   2405 	}
   2406 
   2407 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
   2408 		ret = drm_dp_mst_handle_down_rep(mgr);
   2409 		*handled = true;
   2410 	}
   2411 
   2412 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
   2413 		ret |= drm_dp_mst_handle_up_req(mgr);
   2414 		*handled = true;
   2415 	}
   2416 
   2417 	drm_dp_mst_kick_tx(mgr);
   2418 	return ret;
   2419 }
   2420 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
   2421 
   2422 /**
   2423  * drm_dp_mst_detect_port() - get connection status for an MST port
   2424  * @mgr: manager for this port
   2425  * @port: unverified pointer to a port
   2426  *
   2427  * This returns the current connection state for a port. It validates the
   2428  * port pointer still exists so the caller doesn't require a reference
   2429  */
   2430 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
   2431 						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2432 {
   2433 	enum drm_connector_status status = connector_status_disconnected;
   2434 
   2435 	/* we need to search for the port in the mgr in case its gone */
   2436 	port = drm_dp_get_validated_port_ref(mgr, port);
   2437 	if (!port)
   2438 		return connector_status_disconnected;
   2439 
   2440 	if (!port->ddps)
   2441 		goto out;
   2442 
   2443 	switch (port->pdt) {
   2444 	case DP_PEER_DEVICE_NONE:
   2445 	case DP_PEER_DEVICE_MST_BRANCHING:
   2446 		break;
   2447 
   2448 	case DP_PEER_DEVICE_SST_SINK:
   2449 		status = connector_status_connected;
   2450 		/* for logical ports - cache the EDID */
   2451 		if (port->port_num >= 8 && !port->cached_edid) {
   2452 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
   2453 		}
   2454 		break;
   2455 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   2456 		if (port->ldps)
   2457 			status = connector_status_connected;
   2458 		break;
   2459 	}
   2460 out:
   2461 	drm_dp_put_port(port);
   2462 	return status;
   2463 }
   2464 EXPORT_SYMBOL(drm_dp_mst_detect_port);
   2465 
   2466 /**
   2467  * drm_dp_mst_get_edid() - get EDID for an MST port
   2468  * @connector: toplevel connector to get EDID for
   2469  * @mgr: manager for this port
   2470  * @port: unverified pointer to a port.
   2471  *
   2472  * This returns an EDID for the port connected to a connector,
   2473  * It validates the pointer still exists so the caller doesn't require a
   2474  * reference.
   2475  */
   2476 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2477 {
   2478 	struct edid *edid = NULL;
   2479 
   2480 	/* we need to search for the port in the mgr in case its gone */
   2481 	port = drm_dp_get_validated_port_ref(mgr, port);
   2482 	if (!port)
   2483 		return NULL;
   2484 
   2485 	if (port->cached_edid)
   2486 		edid = drm_edid_duplicate(port->cached_edid);
   2487 	else {
   2488 		edid = drm_get_edid(connector, &port->aux.ddc);
   2489 		drm_mode_connector_set_tile_property(connector);
   2490 	}
   2491 	drm_dp_put_port(port);
   2492 	return edid;
   2493 }
   2494 EXPORT_SYMBOL(drm_dp_mst_get_edid);
   2495 
   2496 /**
   2497  * drm_dp_find_vcpi_slots() - find slots for this PBN value
   2498  * @mgr: manager to use
   2499  * @pbn: payload bandwidth to convert into slots.
   2500  */
   2501 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
   2502 			   int pbn)
   2503 {
   2504 	int num_slots;
   2505 
   2506 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   2507 
   2508 	if (num_slots > mgr->avail_slots)
   2509 		return -ENOSPC;
   2510 	return num_slots;
   2511 }
   2512 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
   2513 
   2514 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   2515 			    struct drm_dp_vcpi *vcpi, int pbn)
   2516 {
   2517 	int num_slots;
   2518 	int ret;
   2519 
   2520 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   2521 
   2522 	if (num_slots > mgr->avail_slots)
   2523 		return -ENOSPC;
   2524 
   2525 	vcpi->pbn = pbn;
   2526 	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
   2527 	vcpi->num_slots = num_slots;
   2528 
   2529 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
   2530 	if (ret < 0)
   2531 		return ret;
   2532 	return 0;
   2533 }
   2534 
   2535 /**
   2536  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   2537  * @mgr: manager for this port
   2538  * @port: port to allocate a virtual channel for.
   2539  * @pbn: payload bandwidth number to request
   2540  * @slots: returned number of slots for this PBN.
   2541  */
   2542 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
   2543 {
   2544 	int ret;
   2545 
   2546 	port = drm_dp_get_validated_port_ref(mgr, port);
   2547 	if (!port)
   2548 		return false;
   2549 
   2550 	if (port->vcpi.vcpi > 0) {
   2551 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
   2552 		if (pbn == port->vcpi.pbn) {
   2553 			*slots = port->vcpi.num_slots;
   2554 			drm_dp_put_port(port);
   2555 			return true;
   2556 		}
   2557 	}
   2558 
   2559 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
   2560 	if (ret) {
   2561 		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
   2562 		goto out;
   2563 	}
   2564 	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
   2565 	*slots = port->vcpi.num_slots;
   2566 
   2567 	drm_dp_put_port(port);
   2568 	return true;
   2569 out:
   2570 	return false;
   2571 }
   2572 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
   2573 
   2574 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2575 {
   2576 	int slots = 0;
   2577 	port = drm_dp_get_validated_port_ref(mgr, port);
   2578 	if (!port)
   2579 		return slots;
   2580 
   2581 	slots = port->vcpi.num_slots;
   2582 	drm_dp_put_port(port);
   2583 	return slots;
   2584 }
   2585 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
   2586 
   2587 /**
   2588  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   2589  * @mgr: manager for this port
   2590  * @port: unverified pointer to a port.
   2591  *
   2592  * This just resets the number of slots for the ports VCPI for later programming.
   2593  */
   2594 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2595 {
   2596 	port = drm_dp_get_validated_port_ref(mgr, port);
   2597 	if (!port)
   2598 		return;
   2599 	port->vcpi.num_slots = 0;
   2600 	drm_dp_put_port(port);
   2601 }
   2602 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
   2603 
   2604 /**
   2605  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   2606  * @mgr: manager for this port
   2607  * @port: unverified port to deallocate vcpi for
   2608  */
   2609 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2610 {
   2611 	port = drm_dp_get_validated_port_ref(mgr, port);
   2612 	if (!port)
   2613 		return;
   2614 
   2615 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   2616 	port->vcpi.num_slots = 0;
   2617 	port->vcpi.pbn = 0;
   2618 	port->vcpi.aligned_pbn = 0;
   2619 	port->vcpi.vcpi = 0;
   2620 	drm_dp_put_port(port);
   2621 }
   2622 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
   2623 
   2624 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
   2625 				     int id, struct drm_dp_payload *payload)
   2626 {
   2627 	u8 payload_alloc[3], status;
   2628 	int ret;
   2629 	int retries = 0;
   2630 
   2631 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
   2632 			   DP_PAYLOAD_TABLE_UPDATED);
   2633 
   2634 	payload_alloc[0] = id;
   2635 	payload_alloc[1] = payload->start_slot;
   2636 	payload_alloc[2] = payload->num_slots;
   2637 
   2638 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
   2639 	if (ret != 3) {
   2640 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
   2641 		goto fail;
   2642 	}
   2643 
   2644 retry:
   2645 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   2646 	if (ret < 0) {
   2647 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   2648 		goto fail;
   2649 	}
   2650 
   2651 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
   2652 		retries++;
   2653 		if (retries < 20) {
   2654 			usleep_range(10000, 20000);
   2655 			goto retry;
   2656 		}
   2657 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
   2658 		ret = -EINVAL;
   2659 		goto fail;
   2660 	}
   2661 	ret = 0;
   2662 fail:
   2663 	return ret;
   2664 }
   2665 
   2666 
   2667 /**
   2668  * drm_dp_check_act_status() - Check ACT handled status.
   2669  * @mgr: manager to use
   2670  *
   2671  * Check the payload status bits in the DPCD for ACT handled completion.
   2672  */
   2673 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
   2674 {
   2675 	u8 status;
   2676 	int ret;
   2677 	int count = 0;
   2678 
   2679 	do {
   2680 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   2681 
   2682 		if (ret < 0) {
   2683 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   2684 			goto fail;
   2685 		}
   2686 
   2687 		if (status & DP_PAYLOAD_ACT_HANDLED)
   2688 			break;
   2689 		count++;
   2690 		udelay(100);
   2691 
   2692 	} while (count < 30);
   2693 
   2694 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
   2695 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
   2696 		ret = -EINVAL;
   2697 		goto fail;
   2698 	}
   2699 	return 0;
   2700 fail:
   2701 	return ret;
   2702 }
   2703 EXPORT_SYMBOL(drm_dp_check_act_status);
   2704 
   2705 /**
   2706  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   2707  * @clock: dot clock for the mode
   2708  * @bpp: bpp for the mode.
   2709  *
   2710  * This uses the formula in the spec to calculate the PBN value for a mode.
   2711  */
   2712 int drm_dp_calc_pbn_mode(int clock, int bpp)
   2713 {
   2714 	u64 kbps;
   2715 	s64 peak_kbps;
   2716 	u32 numerator;
   2717 	u32 denominator;
   2718 
   2719 	kbps = clock * bpp;
   2720 
   2721 	/*
   2722 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
   2723 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
   2724 	 * common multiplier to render an integer PBN for all link rate/lane
   2725 	 * counts combinations
   2726 	 * calculate
   2727 	 * peak_kbps *= (1006/1000)
   2728 	 * peak_kbps *= (64/54)
   2729 	 * peak_kbps *= 8    convert to bytes
   2730 	 */
   2731 
   2732 	numerator = 64 * 1006;
   2733 	denominator = 54 * 8 * 1000 * 1000;
   2734 
   2735 	kbps *= numerator;
   2736 	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
   2737 
   2738 	return drm_fixp2int_ceil(peak_kbps);
   2739 }
   2740 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
   2741 
   2742 static int test_calc_pbn_mode(void)
   2743 {
   2744 	int ret;
   2745 	ret = drm_dp_calc_pbn_mode(154000, 30);
   2746 	if (ret != 689) {
   2747 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2748 				154000, 30, 689, ret);
   2749 		return -EINVAL;
   2750 	}
   2751 	ret = drm_dp_calc_pbn_mode(234000, 30);
   2752 	if (ret != 1047) {
   2753 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2754 				234000, 30, 1047, ret);
   2755 		return -EINVAL;
   2756 	}
   2757 	ret = drm_dp_calc_pbn_mode(297000, 24);
   2758 	if (ret != 1063) {
   2759 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2760 				297000, 24, 1063, ret);
   2761 		return -EINVAL;
   2762 	}
   2763 	return 0;
   2764 }
   2765 
   2766 /* we want to kick the TX after we've ack the up/down IRQs. */
   2767 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
   2768 {
   2769 	queue_work(system_long_wq, &mgr->tx_work);
   2770 }
   2771 
   2772 #if IS_ENABLED(CONFIG_DEBUG_FS)
   2773 static void drm_dp_mst_dump_mstb(struct seq_file *m,
   2774 				 struct drm_dp_mst_branch *mstb)
   2775 {
   2776 	struct drm_dp_mst_port *port;
   2777 	int tabs = mstb->lct;
   2778 	char prefix[10];
   2779 	int i;
   2780 
   2781 	for (i = 0; i < tabs; i++)
   2782 		prefix[i] = '\t';
   2783 	prefix[i] = '\0';
   2784 
   2785 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
   2786 	list_for_each_entry(port, &mstb->ports, next) {
   2787 		seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
   2788 		if (port->mstb)
   2789 			drm_dp_mst_dump_mstb(m, port->mstb);
   2790 	}
   2791 }
   2792 
   2793 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
   2794 				  char *buf)
   2795 {
   2796 	int ret;
   2797 	int i;
   2798 	for (i = 0; i < 4; i++) {
   2799 		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
   2800 		if (ret != 16)
   2801 			break;
   2802 	}
   2803 	if (i == 4)
   2804 		return true;
   2805 	return false;
   2806 }
   2807 
   2808 /**
   2809  * drm_dp_mst_dump_topology(): dump topology to seq file.
   2810  * @m: seq_file to dump output to
   2811  * @mgr: manager to dump current topology for.
   2812  *
   2813  * helper to dump MST topology to a seq file for debugfs.
   2814  */
   2815 void drm_dp_mst_dump_topology(struct seq_file *m,
   2816 			      struct drm_dp_mst_topology_mgr *mgr)
   2817 {
   2818 	int i;
   2819 	struct drm_dp_mst_port *port;
   2820 	mutex_lock(&mgr->lock);
   2821 	if (mgr->mst_primary)
   2822 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
   2823 
   2824 	/* dump VCPIs */
   2825 	mutex_unlock(&mgr->lock);
   2826 
   2827 	mutex_lock(&mgr->payload_lock);
   2828 	seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
   2829 
   2830 	for (i = 0; i < mgr->max_payloads; i++) {
   2831 		if (mgr->proposed_vcpis[i]) {
   2832 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   2833 			seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
   2834 		} else
   2835 			seq_printf(m, "vcpi %d:unsed\n", i);
   2836 	}
   2837 	for (i = 0; i < mgr->max_payloads; i++) {
   2838 		seq_printf(m, "payload %d: %d, %d, %d\n",
   2839 			   i,
   2840 			   mgr->payloads[i].payload_state,
   2841 			   mgr->payloads[i].start_slot,
   2842 			   mgr->payloads[i].num_slots);
   2843 
   2844 
   2845 	}
   2846 	mutex_unlock(&mgr->payload_lock);
   2847 
   2848 	mutex_lock(&mgr->lock);
   2849 	if (mgr->mst_primary) {
   2850 		u8 buf[64];
   2851 		bool bret;
   2852 		int ret;
   2853 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
   2854 		seq_printf(m, "dpcd: ");
   2855 		for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
   2856 			seq_printf(m, "%02x ", buf[i]);
   2857 		seq_printf(m, "\n");
   2858 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
   2859 		seq_printf(m, "faux/mst: ");
   2860 		for (i = 0; i < 2; i++)
   2861 			seq_printf(m, "%02x ", buf[i]);
   2862 		seq_printf(m, "\n");
   2863 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
   2864 		seq_printf(m, "mst ctrl: ");
   2865 		for (i = 0; i < 1; i++)
   2866 			seq_printf(m, "%02x ", buf[i]);
   2867 		seq_printf(m, "\n");
   2868 
   2869 		/* dump the standard OUI branch header */
   2870 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
   2871 		seq_printf(m, "branch oui: ");
   2872 		for (i = 0; i < 0x3; i++)
   2873 			seq_printf(m, "%02x", buf[i]);
   2874 		seq_printf(m, " devid: ");
   2875 		for (i = 0x3; i < 0x8; i++)
   2876 			seq_printf(m, "%c", buf[i]);
   2877 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
   2878 		seq_printf(m, "\n");
   2879 		bret = dump_dp_payload_table(mgr, buf);
   2880 		if (bret == true) {
   2881 			seq_printf(m, "payload table: ");
   2882 			for (i = 0; i < 63; i++)
   2883 				seq_printf(m, "%02x ", buf[i]);
   2884 			seq_printf(m, "\n");
   2885 		}
   2886 
   2887 	}
   2888 
   2889 	mutex_unlock(&mgr->lock);
   2890 
   2891 }
   2892 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
   2893 #endif	/* IS_ENABLED(CONFIG_DEBUG_FS) */
   2894 
   2895 static void drm_dp_tx_work(struct work_struct *work)
   2896 {
   2897 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
   2898 
   2899 	mutex_lock(&mgr->qlock);
   2900 	if (mgr->tx_down_in_progress)
   2901 		process_single_down_tx_qlock(mgr);
   2902 	mutex_unlock(&mgr->qlock);
   2903 }
   2904 
   2905 static void drm_dp_free_mst_port(struct kref *kref)
   2906 {
   2907 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
   2908 	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
   2909 	kfree(port);
   2910 }
   2911 
   2912 static void drm_dp_destroy_connector_work(struct work_struct *work)
   2913 {
   2914 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
   2915 	struct drm_dp_mst_port *port;
   2916 	bool send_hotplug = false;
   2917 	/*
   2918 	 * Not a regular list traverse as we have to drop the destroy
   2919 	 * connector lock before destroying the connector, to avoid AB->BA
   2920 	 * ordering between this lock and the config mutex.
   2921 	 */
   2922 	for (;;) {
   2923 		mutex_lock(&mgr->destroy_connector_lock);
   2924 		port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
   2925 		if (!port) {
   2926 			mutex_unlock(&mgr->destroy_connector_lock);
   2927 			break;
   2928 		}
   2929 		list_del(&port->next);
   2930 		mutex_unlock(&mgr->destroy_connector_lock);
   2931 
   2932 		kref_init(&port->kref);
   2933 		INIT_LIST_HEAD(&port->next);
   2934 
   2935 		mgr->cbs->destroy_connector(mgr, port->connector);
   2936 
   2937 		drm_dp_port_teardown_pdt(port, port->pdt);
   2938 		port->pdt = DP_PEER_DEVICE_NONE;
   2939 
   2940 		if (!port->input && port->vcpi.vcpi > 0) {
   2941 			drm_dp_mst_reset_vcpi_slots(mgr, port);
   2942 			drm_dp_update_payload_part1(mgr);
   2943 			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   2944 		}
   2945 
   2946 		kref_put(&port->kref, drm_dp_free_mst_port);
   2947 		send_hotplug = true;
   2948 	}
   2949 	if (send_hotplug)
   2950 		(*mgr->cbs->hotplug)(mgr);
   2951 }
   2952 
   2953 /**
   2954  * drm_dp_mst_topology_mgr_init - initialise a topology manager
   2955  * @mgr: manager struct to initialise
   2956  * @dev: device providing this structure - for i2c addition.
   2957  * @aux: DP helper aux channel to talk to this device
   2958  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   2959  * @max_payloads: maximum number of payloads this GPU can source
   2960  * @conn_base_id: the connector object ID the MST device is connected to.
   2961  *
   2962  * Return 0 for success, or negative error code on failure
   2963  */
   2964 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
   2965 				 struct device *dev, struct drm_dp_aux *aux,
   2966 				 int max_dpcd_transaction_bytes,
   2967 				 int max_payloads, int conn_base_id)
   2968 {
   2969 #ifdef __NetBSD__
   2970 	linux_mutex_init(&mgr->lock);
   2971 	linux_mutex_init(&mgr->qlock);
   2972 	linux_mutex_init(&mgr->payload_lock);
   2973 	linux_mutex_init(&mgr->destroy_connector_lock);
   2974 #else
   2975 	mutex_init(&mgr->lock);
   2976 	mutex_init(&mgr->qlock);
   2977 	mutex_init(&mgr->payload_lock);
   2978 	mutex_init(&mgr->destroy_connector_lock);
   2979 #endif
   2980 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
   2981 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
   2982 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
   2983 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
   2984 	INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
   2985 #ifdef __NetBSD__
   2986 	DRM_INIT_WAITQUEUE(&mgr->tx_waitq, "dpmstwait");
   2987 #else
   2988 	init_waitqueue_head(&mgr->tx_waitq);
   2989 #endif
   2990 	mgr->dev = dev;
   2991 	mgr->aux = aux;
   2992 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
   2993 	mgr->max_payloads = max_payloads;
   2994 	mgr->conn_base_id = conn_base_id;
   2995 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
   2996 	if (!mgr->payloads)
   2997 		return -ENOMEM;
   2998 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
   2999 	if (!mgr->proposed_vcpis)
   3000 		return -ENOMEM;
   3001 	set_bit(0, &mgr->payload_mask);
   3002 	test_calc_pbn_mode();
   3003 	return 0;
   3004 }
   3005 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
   3006 
   3007 /**
   3008  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   3009  * @mgr: manager to destroy
   3010  */
   3011 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
   3012 {
   3013 	flush_work(&mgr->work);
   3014 	flush_work(&mgr->destroy_connector_work);
   3015 	mutex_lock(&mgr->payload_lock);
   3016 	kfree(mgr->payloads);
   3017 	mgr->payloads = NULL;
   3018 	kfree(mgr->proposed_vcpis);
   3019 	mgr->proposed_vcpis = NULL;
   3020 	mutex_unlock(&mgr->payload_lock);
   3021 	mgr->dev = NULL;
   3022 	mgr->aux = NULL;
   3023 #ifdef __NetBSD__
   3024 	DRM_DESTROY_WAITQUEUE(&mgr->tx_waitq);
   3025 	linux_mutex_destroy(&mgr->destroy_connector_lock);
   3026 	linux_mutex_destroy(&mgr->payload_lock);
   3027 	linux_mutex_destroy(&mgr->qlock);
   3028 	linux_mutex_destroy(&mgr->lock);
   3029 #else
   3030 	mutex_destroy(&mgr->destroy_connector_lock);
   3031 	mutex_destroy(&mgr->payload_lock);
   3032 	mutex_destroy(&mgr->qlock);
   3033 	mutex_destroy(&mgr->lock);
   3034 #endif
   3035 }
   3036 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
   3037 
   3038 /* I2C device */
   3039 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
   3040 			       int num)
   3041 {
   3042 	struct drm_dp_aux *aux = adapter->algo_data;
   3043 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
   3044 	struct drm_dp_mst_branch *mstb;
   3045 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   3046 	unsigned int i;
   3047 	bool reading = false;
   3048 	struct drm_dp_sideband_msg_req_body msg;
   3049 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   3050 	int ret;
   3051 
   3052 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   3053 	if (!mstb)
   3054 		return -EREMOTEIO;
   3055 
   3056 	/* construct i2c msg */
   3057 	/* see if last msg is a read */
   3058 	if (msgs[num - 1].flags & I2C_M_RD)
   3059 		reading = true;
   3060 
   3061 	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
   3062 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
   3063 		ret = -EIO;
   3064 		goto out;
   3065 	}
   3066 
   3067 	memset(&msg, 0, sizeof(msg));
   3068 	msg.req_type = DP_REMOTE_I2C_READ;
   3069 	msg.u.i2c_read.num_transactions = num - 1;
   3070 	msg.u.i2c_read.port_number = port->port_num;
   3071 	for (i = 0; i < num - 1; i++) {
   3072 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
   3073 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
   3074 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
   3075 	}
   3076 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
   3077 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
   3078 
   3079 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3080 	if (!txmsg) {
   3081 		ret = -ENOMEM;
   3082 		goto out;
   3083 	}
   3084 
   3085 	txmsg->dst = mstb;
   3086 	drm_dp_encode_sideband_req(&msg, txmsg);
   3087 
   3088 	drm_dp_queue_down_tx(mgr, txmsg);
   3089 
   3090 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3091 	if (ret > 0) {
   3092 
   3093 		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
   3094 			ret = -EREMOTEIO;
   3095 			goto out;
   3096 		}
   3097 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
   3098 			ret = -EIO;
   3099 			goto out;
   3100 		}
   3101 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
   3102 		ret = num;
   3103 	}
   3104 out:
   3105 	kfree(txmsg);
   3106 	drm_dp_put_mst_branch_device(mstb);
   3107 	return ret;
   3108 }
   3109 
   3110 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
   3111 {
   3112 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
   3113 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
   3114 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
   3115 	       I2C_FUNC_10BIT_ADDR;
   3116 }
   3117 
   3118 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
   3119 	.functionality = drm_dp_mst_i2c_functionality,
   3120 	.master_xfer = drm_dp_mst_i2c_xfer,
   3121 };
   3122 
   3123 /**
   3124  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
   3125  * @aux: DisplayPort AUX channel
   3126  *
   3127  * Returns 0 on success or a negative error code on failure.
   3128  */
   3129 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
   3130 {
   3131 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
   3132 	aux->ddc.algo_data = aux;
   3133 	aux->ddc.retries = 3;
   3134 
   3135 	aux->ddc.class = I2C_CLASS_DDC;
   3136 	aux->ddc.owner = THIS_MODULE;
   3137 	aux->ddc.dev.parent = aux->dev;
   3138 #ifndef __NetBSD__		/* XXX of? */
   3139 	aux->ddc.dev.of_node = aux->dev->of_node;
   3140 #endif
   3141 
   3142 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
   3143 		sizeof(aux->ddc.name));
   3144 
   3145 	return i2c_add_adapter(&aux->ddc);
   3146 }
   3147 
   3148 /**
   3149  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
   3150  * @aux: DisplayPort AUX channel
   3151  */
   3152 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
   3153 {
   3154 	i2c_del_adapter(&aux->ddc);
   3155 }
   3156