Home | History | Annotate | Line # | Download | only in drm
drm_dp_mst_topology.c revision 1.4
      1 /*	$NetBSD: drm_dp_mst_topology.c,v 1.4 2020/02/14 04:35:19 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright  2014 Red Hat
      5  *
      6  * Permission to use, copy, modify, distribute, and sell this software and its
      7  * documentation for any purpose is hereby granted without fee, provided that
      8  * the above copyright notice appear in all copies and that both that copyright
      9  * notice and this permission notice appear in supporting documentation, and
     10  * that the name of the copyright holders not be used in advertising or
     11  * publicity pertaining to distribution of the software without specific,
     12  * written prior permission.  The copyright holders make no representations
     13  * about the suitability of this software for any purpose.  It is provided "as
     14  * is" without express or implied warranty.
     15  *
     16  * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
     17  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
     18  * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
     19  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
     20  * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
     21  * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
     22  * OF THIS SOFTWARE.
     23  */
     24 
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: drm_dp_mst_topology.c,v 1.4 2020/02/14 04:35:19 riastradh Exp $");
     27 
     28 #include <linux/kernel.h>
     29 #include <linux/delay.h>
     30 #include <linux/init.h>
     31 #include <linux/errno.h>
     32 #include <linux/sched.h>
     33 #include <linux/seq_file.h>
     34 #include <linux/i2c.h>
     35 #include <linux/device.h>
     36 #include <linux/export.h>
     37 #include <linux/module.h>
     38 #include <drm/drm_dp_mst_helper.h>
     39 #include <drm/drmP.h>
     40 
     41 #include <drm/drm_fixed.h>
     42 
     43 #include <linux/nbsd-namespace.h>
     44 
     45 /**
     46  * DOC: dp mst helper
     47  *
     48  * These functions contain parts of the DisplayPort 1.2a MultiStream Transport
     49  * protocol. The helpers contain a topology manager and bandwidth manager.
     50  * The helpers encapsulate the sending and received of sideband msgs.
     51  */
     52 #if IS_ENABLED(CONFIG_DEBUG_FS)
     53 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
     54 				  char *buf);
     55 #endif
     56 static int test_calc_pbn_mode(void);
     57 
     58 static void drm_dp_put_port(struct drm_dp_mst_port *port);
     59 
     60 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
     61 				     int id,
     62 				     struct drm_dp_payload *payload);
     63 
     64 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
     65 				  struct drm_dp_mst_port *port,
     66 				  int offset, int size, u8 *bytes);
     67 
     68 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
     69 				     struct drm_dp_mst_branch *mstb);
     70 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
     71 					   struct drm_dp_mst_branch *mstb,
     72 					   struct drm_dp_mst_port *port);
     73 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
     74 				 u8 *guid);
     75 
     76 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
     77 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
     78 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
     79 /* sideband msg handling */
     80 static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
     81 {
     82 	u8 bitmask = 0x80;
     83 	u8 bitshift = 7;
     84 	u8 array_index = 0;
     85 	int number_of_bits = num_nibbles * 4;
     86 	u8 remainder = 0;
     87 
     88 	while (number_of_bits != 0) {
     89 		number_of_bits--;
     90 		remainder <<= 1;
     91 		remainder |= (data[array_index] & bitmask) >> bitshift;
     92 		bitmask >>= 1;
     93 		bitshift--;
     94 		if (bitmask == 0) {
     95 			bitmask = 0x80;
     96 			bitshift = 7;
     97 			array_index++;
     98 		}
     99 		if ((remainder & 0x10) == 0x10)
    100 			remainder ^= 0x13;
    101 	}
    102 
    103 	number_of_bits = 4;
    104 	while (number_of_bits != 0) {
    105 		number_of_bits--;
    106 		remainder <<= 1;
    107 		if ((remainder & 0x10) != 0)
    108 			remainder ^= 0x13;
    109 	}
    110 
    111 	return remainder;
    112 }
    113 
    114 static u8 drm_dp_msg_data_crc4(const uint8_t *data, u8 number_of_bytes)
    115 {
    116 	u8 bitmask = 0x80;
    117 	u8 bitshift = 7;
    118 	u8 array_index = 0;
    119 	int number_of_bits = number_of_bytes * 8;
    120 	u16 remainder = 0;
    121 
    122 	while (number_of_bits != 0) {
    123 		number_of_bits--;
    124 		remainder <<= 1;
    125 		remainder |= (data[array_index] & bitmask) >> bitshift;
    126 		bitmask >>= 1;
    127 		bitshift--;
    128 		if (bitmask == 0) {
    129 			bitmask = 0x80;
    130 			bitshift = 7;
    131 			array_index++;
    132 		}
    133 		if ((remainder & 0x100) == 0x100)
    134 			remainder ^= 0xd5;
    135 	}
    136 
    137 	number_of_bits = 8;
    138 	while (number_of_bits != 0) {
    139 		number_of_bits--;
    140 		remainder <<= 1;
    141 		if ((remainder & 0x100) != 0)
    142 			remainder ^= 0xd5;
    143 	}
    144 
    145 	return remainder & 0xff;
    146 }
    147 static inline u8 drm_dp_calc_sb_hdr_size(struct drm_dp_sideband_msg_hdr *hdr)
    148 {
    149 	u8 size = 3;
    150 	size += (hdr->lct / 2);
    151 	return size;
    152 }
    153 
    154 static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    155 					   u8 *buf, int *len)
    156 {
    157 	int idx = 0;
    158 	int i;
    159 	u8 crc4;
    160 	buf[idx++] = ((hdr->lct & 0xf) << 4) | (hdr->lcr & 0xf);
    161 	for (i = 0; i < (hdr->lct / 2); i++)
    162 		buf[idx++] = hdr->rad[i];
    163 	buf[idx++] = (hdr->broadcast << 7) | (hdr->path_msg << 6) |
    164 		(hdr->msg_len & 0x3f);
    165 	buf[idx++] = (hdr->somt << 7) | (hdr->eomt << 6) | (hdr->seqno << 4);
    166 
    167 	crc4 = drm_dp_msg_header_crc4(buf, (idx * 2) - 1);
    168 	buf[idx - 1] |= (crc4 & 0xf);
    169 
    170 	*len = idx;
    171 }
    172 
    173 static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
    174 					   u8 *buf, int buflen, u8 *hdrlen)
    175 {
    176 	u8 crc4;
    177 	u8 len;
    178 	int i;
    179 	u8 idx;
    180 	if (buf[0] == 0)
    181 		return false;
    182 	len = 3;
    183 	len += ((buf[0] & 0xf0) >> 4) / 2;
    184 	if (len > buflen)
    185 		return false;
    186 	crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
    187 
    188 	if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
    189 		DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
    190 		return false;
    191 	}
    192 
    193 	hdr->lct = (buf[0] & 0xf0) >> 4;
    194 	hdr->lcr = (buf[0] & 0xf);
    195 	idx = 1;
    196 	for (i = 0; i < (hdr->lct / 2); i++)
    197 		hdr->rad[i] = buf[idx++];
    198 	hdr->broadcast = (buf[idx] >> 7) & 0x1;
    199 	hdr->path_msg = (buf[idx] >> 6) & 0x1;
    200 	hdr->msg_len = buf[idx] & 0x3f;
    201 	idx++;
    202 	hdr->somt = (buf[idx] >> 7) & 0x1;
    203 	hdr->eomt = (buf[idx] >> 6) & 0x1;
    204 	hdr->seqno = (buf[idx] >> 4) & 0x1;
    205 	idx++;
    206 	*hdrlen = idx;
    207 	return true;
    208 }
    209 
    210 static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
    211 				       struct drm_dp_sideband_msg_tx *raw)
    212 {
    213 	int idx = 0;
    214 	int i;
    215 	u8 *buf = raw->msg;
    216 	buf[idx++] = req->req_type & 0x7f;
    217 
    218 	switch (req->req_type) {
    219 	case DP_ENUM_PATH_RESOURCES:
    220 		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
    221 		idx++;
    222 		break;
    223 	case DP_ALLOCATE_PAYLOAD:
    224 		buf[idx] = (req->u.allocate_payload.port_number & 0xf) << 4 |
    225 			(req->u.allocate_payload.number_sdp_streams & 0xf);
    226 		idx++;
    227 		buf[idx] = (req->u.allocate_payload.vcpi & 0x7f);
    228 		idx++;
    229 		buf[idx] = (req->u.allocate_payload.pbn >> 8);
    230 		idx++;
    231 		buf[idx] = (req->u.allocate_payload.pbn & 0xff);
    232 		idx++;
    233 		for (i = 0; i < req->u.allocate_payload.number_sdp_streams / 2; i++) {
    234 			buf[idx] = ((req->u.allocate_payload.sdp_stream_sink[i * 2] & 0xf) << 4) |
    235 				(req->u.allocate_payload.sdp_stream_sink[i * 2 + 1] & 0xf);
    236 			idx++;
    237 		}
    238 		if (req->u.allocate_payload.number_sdp_streams & 1) {
    239 			i = req->u.allocate_payload.number_sdp_streams - 1;
    240 			buf[idx] = (req->u.allocate_payload.sdp_stream_sink[i] & 0xf) << 4;
    241 			idx++;
    242 		}
    243 		break;
    244 	case DP_QUERY_PAYLOAD:
    245 		buf[idx] = (req->u.query_payload.port_number & 0xf) << 4;
    246 		idx++;
    247 		buf[idx] = (req->u.query_payload.vcpi & 0x7f);
    248 		idx++;
    249 		break;
    250 	case DP_REMOTE_DPCD_READ:
    251 		buf[idx] = (req->u.dpcd_read.port_number & 0xf) << 4;
    252 		buf[idx] |= ((req->u.dpcd_read.dpcd_address & 0xf0000) >> 16) & 0xf;
    253 		idx++;
    254 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff00) >> 8;
    255 		idx++;
    256 		buf[idx] = (req->u.dpcd_read.dpcd_address & 0xff);
    257 		idx++;
    258 		buf[idx] = (req->u.dpcd_read.num_bytes);
    259 		idx++;
    260 		break;
    261 
    262 	case DP_REMOTE_DPCD_WRITE:
    263 		buf[idx] = (req->u.dpcd_write.port_number & 0xf) << 4;
    264 		buf[idx] |= ((req->u.dpcd_write.dpcd_address & 0xf0000) >> 16) & 0xf;
    265 		idx++;
    266 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff00) >> 8;
    267 		idx++;
    268 		buf[idx] = (req->u.dpcd_write.dpcd_address & 0xff);
    269 		idx++;
    270 		buf[idx] = (req->u.dpcd_write.num_bytes);
    271 		idx++;
    272 		memcpy(&buf[idx], req->u.dpcd_write.bytes, req->u.dpcd_write.num_bytes);
    273 		idx += req->u.dpcd_write.num_bytes;
    274 		break;
    275 	case DP_REMOTE_I2C_READ:
    276 		buf[idx] = (req->u.i2c_read.port_number & 0xf) << 4;
    277 		buf[idx] |= (req->u.i2c_read.num_transactions & 0x3);
    278 		idx++;
    279 		for (i = 0; i < (req->u.i2c_read.num_transactions & 0x3); i++) {
    280 			buf[idx] = req->u.i2c_read.transactions[i].i2c_dev_id & 0x7f;
    281 			idx++;
    282 			buf[idx] = req->u.i2c_read.transactions[i].num_bytes;
    283 			idx++;
    284 			memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes);
    285 			idx += req->u.i2c_read.transactions[i].num_bytes;
    286 
    287 			buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5;
    288 			buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf);
    289 			idx++;
    290 		}
    291 		buf[idx] = (req->u.i2c_read.read_i2c_device_id) & 0x7f;
    292 		idx++;
    293 		buf[idx] = (req->u.i2c_read.num_bytes_read);
    294 		idx++;
    295 		break;
    296 
    297 	case DP_REMOTE_I2C_WRITE:
    298 		buf[idx] = (req->u.i2c_write.port_number & 0xf) << 4;
    299 		idx++;
    300 		buf[idx] = (req->u.i2c_write.write_i2c_device_id) & 0x7f;
    301 		idx++;
    302 		buf[idx] = (req->u.i2c_write.num_bytes);
    303 		idx++;
    304 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
    305 		idx += req->u.i2c_write.num_bytes;
    306 		break;
    307 	}
    308 	raw->cur_len = idx;
    309 }
    310 
    311 static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len)
    312 {
    313 	u8 crc4;
    314 	crc4 = drm_dp_msg_data_crc4(msg, len);
    315 	msg[len] = crc4;
    316 }
    317 
    318 static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *rep,
    319 					 struct drm_dp_sideband_msg_tx *raw)
    320 {
    321 	int idx = 0;
    322 	u8 *buf = raw->msg;
    323 
    324 	buf[idx++] = (rep->reply_type & 0x1) << 7 | (rep->req_type & 0x7f);
    325 
    326 	raw->cur_len = idx;
    327 }
    328 
    329 /* this adds a chunk of msg to the builder to get the final msg */
    330 static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
    331 				      u8 *replybuf, u8 replybuflen, bool hdr)
    332 {
    333 	int ret;
    334 	u8 crc4 __unused;	/* XXX Mistake?  */
    335 
    336 	if (hdr) {
    337 		u8 hdrlen;
    338 		struct drm_dp_sideband_msg_hdr recv_hdr;
    339 		ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
    340 		if (ret == false) {
    341 			print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
    342 			return false;
    343 		}
    344 
    345 		/*
    346 		 * ignore out-of-order messages or messages that are part of a
    347 		 * failed transaction
    348 		 */
    349 		if (!recv_hdr.somt && !msg->have_somt)
    350 			return false;
    351 
    352 		/* get length contained in this portion */
    353 		msg->curchunk_len = recv_hdr.msg_len;
    354 		msg->curchunk_hdrlen = hdrlen;
    355 
    356 		/* we have already gotten an somt - don't bother parsing */
    357 		if (recv_hdr.somt && msg->have_somt)
    358 			return false;
    359 
    360 		if (recv_hdr.somt) {
    361 			memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
    362 			msg->have_somt = true;
    363 		}
    364 		if (recv_hdr.eomt)
    365 			msg->have_eomt = true;
    366 
    367 		/* copy the bytes for the remainder of this header chunk */
    368 		msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
    369 		memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
    370 	} else {
    371 		memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
    372 		msg->curchunk_idx += replybuflen;
    373 	}
    374 
    375 	if (msg->curchunk_idx >= msg->curchunk_len) {
    376 		/* do CRC */
    377 		crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
    378 		/* copy chunk into bigger msg */
    379 		memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
    380 		msg->curlen += msg->curchunk_len - 1;
    381 	}
    382 	return true;
    383 }
    384 
    385 static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
    386 					       struct drm_dp_sideband_msg_reply_body *repmsg)
    387 {
    388 	int idx = 1;
    389 	int i;
    390 	memcpy(repmsg->u.link_addr.guid, &raw->msg[idx], 16);
    391 	idx += 16;
    392 	repmsg->u.link_addr.nports = raw->msg[idx] & 0xf;
    393 	idx++;
    394 	if (idx > raw->curlen)
    395 		goto fail_len;
    396 	for (i = 0; i < repmsg->u.link_addr.nports; i++) {
    397 		if (raw->msg[idx] & 0x80)
    398 			repmsg->u.link_addr.ports[i].input_port = 1;
    399 
    400 		repmsg->u.link_addr.ports[i].peer_device_type = (raw->msg[idx] >> 4) & 0x7;
    401 		repmsg->u.link_addr.ports[i].port_number = (raw->msg[idx] & 0xf);
    402 
    403 		idx++;
    404 		if (idx > raw->curlen)
    405 			goto fail_len;
    406 		repmsg->u.link_addr.ports[i].mcs = (raw->msg[idx] >> 7) & 0x1;
    407 		repmsg->u.link_addr.ports[i].ddps = (raw->msg[idx] >> 6) & 0x1;
    408 		if (repmsg->u.link_addr.ports[i].input_port == 0)
    409 			repmsg->u.link_addr.ports[i].legacy_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    410 		idx++;
    411 		if (idx > raw->curlen)
    412 			goto fail_len;
    413 		if (repmsg->u.link_addr.ports[i].input_port == 0) {
    414 			repmsg->u.link_addr.ports[i].dpcd_revision = (raw->msg[idx]);
    415 			idx++;
    416 			if (idx > raw->curlen)
    417 				goto fail_len;
    418 			memcpy(repmsg->u.link_addr.ports[i].peer_guid, &raw->msg[idx], 16);
    419 			idx += 16;
    420 			if (idx > raw->curlen)
    421 				goto fail_len;
    422 			repmsg->u.link_addr.ports[i].num_sdp_streams = (raw->msg[idx] >> 4) & 0xf;
    423 			repmsg->u.link_addr.ports[i].num_sdp_stream_sinks = (raw->msg[idx] & 0xf);
    424 			idx++;
    425 
    426 		}
    427 		if (idx > raw->curlen)
    428 			goto fail_len;
    429 	}
    430 
    431 	return true;
    432 fail_len:
    433 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    434 	return false;
    435 }
    436 
    437 static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx *raw,
    438 						   struct drm_dp_sideband_msg_reply_body *repmsg)
    439 {
    440 	int idx = 1;
    441 	repmsg->u.remote_dpcd_read_ack.port_number = raw->msg[idx] & 0xf;
    442 	idx++;
    443 	if (idx > raw->curlen)
    444 		goto fail_len;
    445 	repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
    446 	if (idx > raw->curlen)
    447 		goto fail_len;
    448 
    449 	memcpy(repmsg->u.remote_dpcd_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_dpcd_read_ack.num_bytes);
    450 	return true;
    451 fail_len:
    452 	DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen);
    453 	return false;
    454 }
    455 
    456 static bool drm_dp_sideband_parse_remote_dpcd_write(struct drm_dp_sideband_msg_rx *raw,
    457 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    458 {
    459 	int idx = 1;
    460 	repmsg->u.remote_dpcd_write_ack.port_number = raw->msg[idx] & 0xf;
    461 	idx++;
    462 	if (idx > raw->curlen)
    463 		goto fail_len;
    464 	return true;
    465 fail_len:
    466 	DRM_DEBUG_KMS("parse length fail %d %d\n", idx, raw->curlen);
    467 	return false;
    468 }
    469 
    470 static bool drm_dp_sideband_parse_remote_i2c_read_ack(struct drm_dp_sideband_msg_rx *raw,
    471 						      struct drm_dp_sideband_msg_reply_body *repmsg)
    472 {
    473 	int idx = 1;
    474 
    475 	repmsg->u.remote_i2c_read_ack.port_number = (raw->msg[idx] & 0xf);
    476 	idx++;
    477 	if (idx > raw->curlen)
    478 		goto fail_len;
    479 	repmsg->u.remote_i2c_read_ack.num_bytes = raw->msg[idx];
    480 	idx++;
    481 	/* TODO check */
    482 	memcpy(repmsg->u.remote_i2c_read_ack.bytes, &raw->msg[idx], repmsg->u.remote_i2c_read_ack.num_bytes);
    483 	return true;
    484 fail_len:
    485 	DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen);
    486 	return false;
    487 }
    488 
    489 static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband_msg_rx *raw,
    490 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    491 {
    492 	int idx = 1;
    493 	repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf;
    494 	idx++;
    495 	if (idx > raw->curlen)
    496 		goto fail_len;
    497 	repmsg->u.path_resources.full_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    498 	idx += 2;
    499 	if (idx > raw->curlen)
    500 		goto fail_len;
    501 	repmsg->u.path_resources.avail_payload_bw_number = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    502 	idx += 2;
    503 	if (idx > raw->curlen)
    504 		goto fail_len;
    505 	return true;
    506 fail_len:
    507 	DRM_DEBUG_KMS("enum resource parse length fail %d %d\n", idx, raw->curlen);
    508 	return false;
    509 }
    510 
    511 static bool drm_dp_sideband_parse_allocate_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    512 							  struct drm_dp_sideband_msg_reply_body *repmsg)
    513 {
    514 	int idx = 1;
    515 	repmsg->u.allocate_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    516 	idx++;
    517 	if (idx > raw->curlen)
    518 		goto fail_len;
    519 	repmsg->u.allocate_payload.vcpi = raw->msg[idx];
    520 	idx++;
    521 	if (idx > raw->curlen)
    522 		goto fail_len;
    523 	repmsg->u.allocate_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx+1]);
    524 	idx += 2;
    525 	if (idx > raw->curlen)
    526 		goto fail_len;
    527 	return true;
    528 fail_len:
    529 	DRM_DEBUG_KMS("allocate payload parse length fail %d %d\n", idx, raw->curlen);
    530 	return false;
    531 }
    532 
    533 static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_rx *raw,
    534 						    struct drm_dp_sideband_msg_reply_body *repmsg)
    535 {
    536 	int idx = 1;
    537 	repmsg->u.query_payload.port_number = (raw->msg[idx] >> 4) & 0xf;
    538 	idx++;
    539 	if (idx > raw->curlen)
    540 		goto fail_len;
    541 	repmsg->u.query_payload.allocated_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    542 	idx += 2;
    543 	if (idx > raw->curlen)
    544 		goto fail_len;
    545 	return true;
    546 fail_len:
    547 	DRM_DEBUG_KMS("query payload parse length fail %d %d\n", idx, raw->curlen);
    548 	return false;
    549 }
    550 
    551 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
    552 					struct drm_dp_sideband_msg_reply_body *msg)
    553 {
    554 	memset(msg, 0, sizeof(*msg));
    555 	msg->reply_type = (raw->msg[0] & 0x80) >> 7;
    556 	msg->req_type = (raw->msg[0] & 0x7f);
    557 
    558 	if (msg->reply_type) {
    559 		memcpy(msg->u.nak.guid, &raw->msg[1], 16);
    560 		msg->u.nak.reason = raw->msg[17];
    561 		msg->u.nak.nak_data = raw->msg[18];
    562 		return false;
    563 	}
    564 
    565 	switch (msg->req_type) {
    566 	case DP_LINK_ADDRESS:
    567 		return drm_dp_sideband_parse_link_address(raw, msg);
    568 	case DP_QUERY_PAYLOAD:
    569 		return drm_dp_sideband_parse_query_payload_ack(raw, msg);
    570 	case DP_REMOTE_DPCD_READ:
    571 		return drm_dp_sideband_parse_remote_dpcd_read(raw, msg);
    572 	case DP_REMOTE_DPCD_WRITE:
    573 		return drm_dp_sideband_parse_remote_dpcd_write(raw, msg);
    574 	case DP_REMOTE_I2C_READ:
    575 		return drm_dp_sideband_parse_remote_i2c_read_ack(raw, msg);
    576 	case DP_ENUM_PATH_RESOURCES:
    577 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
    578 	case DP_ALLOCATE_PAYLOAD:
    579 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
    580 	default:
    581 		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
    582 		return false;
    583 	}
    584 }
    585 
    586 static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
    587 							   struct drm_dp_sideband_msg_req_body *msg)
    588 {
    589 	int idx = 1;
    590 
    591 	msg->u.conn_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    592 	idx++;
    593 	if (idx > raw->curlen)
    594 		goto fail_len;
    595 
    596 	memcpy(msg->u.conn_stat.guid, &raw->msg[idx], 16);
    597 	idx += 16;
    598 	if (idx > raw->curlen)
    599 		goto fail_len;
    600 
    601 	msg->u.conn_stat.legacy_device_plug_status = (raw->msg[idx] >> 6) & 0x1;
    602 	msg->u.conn_stat.displayport_device_plug_status = (raw->msg[idx] >> 5) & 0x1;
    603 	msg->u.conn_stat.message_capability_status = (raw->msg[idx] >> 4) & 0x1;
    604 	msg->u.conn_stat.input_port = (raw->msg[idx] >> 3) & 0x1;
    605 	msg->u.conn_stat.peer_device_type = (raw->msg[idx] & 0x7);
    606 	idx++;
    607 	return true;
    608 fail_len:
    609 	DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
    610 	return false;
    611 }
    612 
    613 static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
    614 							   struct drm_dp_sideband_msg_req_body *msg)
    615 {
    616 	int idx = 1;
    617 
    618 	msg->u.resource_stat.port_number = (raw->msg[idx] & 0xf0) >> 4;
    619 	idx++;
    620 	if (idx > raw->curlen)
    621 		goto fail_len;
    622 
    623 	memcpy(msg->u.resource_stat.guid, &raw->msg[idx], 16);
    624 	idx += 16;
    625 	if (idx > raw->curlen)
    626 		goto fail_len;
    627 
    628 	msg->u.resource_stat.available_pbn = (raw->msg[idx] << 8) | (raw->msg[idx + 1]);
    629 	idx++;
    630 	return true;
    631 fail_len:
    632 	DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
    633 	return false;
    634 }
    635 
    636 static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
    637 				      struct drm_dp_sideband_msg_req_body *msg)
    638 {
    639 	memset(msg, 0, sizeof(*msg));
    640 	msg->req_type = (raw->msg[0] & 0x7f);
    641 
    642 	switch (msg->req_type) {
    643 	case DP_CONNECTION_STATUS_NOTIFY:
    644 		return drm_dp_sideband_parse_connection_status_notify(raw, msg);
    645 	case DP_RESOURCE_STATUS_NOTIFY:
    646 		return drm_dp_sideband_parse_resource_status_notify(raw, msg);
    647 	default:
    648 		DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
    649 		return false;
    650 	}
    651 }
    652 
    653 static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
    654 {
    655 	struct drm_dp_sideband_msg_req_body req;
    656 
    657 	req.req_type = DP_REMOTE_DPCD_WRITE;
    658 	req.u.dpcd_write.port_number = port_num;
    659 	req.u.dpcd_write.dpcd_address = offset;
    660 	req.u.dpcd_write.num_bytes = num_bytes;
    661 	req.u.dpcd_write.bytes = bytes;
    662 	drm_dp_encode_sideband_req(&req, msg);
    663 
    664 	return 0;
    665 }
    666 
    667 static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
    668 {
    669 	struct drm_dp_sideband_msg_req_body req;
    670 
    671 	req.req_type = DP_LINK_ADDRESS;
    672 	drm_dp_encode_sideband_req(&req, msg);
    673 	return 0;
    674 }
    675 
    676 static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
    677 {
    678 	struct drm_dp_sideband_msg_req_body req;
    679 
    680 	req.req_type = DP_ENUM_PATH_RESOURCES;
    681 	req.u.port_num.port_number = port_num;
    682 	drm_dp_encode_sideband_req(&req, msg);
    683 	msg->path_msg = true;
    684 	return 0;
    685 }
    686 
    687 static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
    688 				  u8 vcpi, uint16_t pbn)
    689 {
    690 	struct drm_dp_sideband_msg_req_body req;
    691 	memset(&req, 0, sizeof(req));
    692 	req.req_type = DP_ALLOCATE_PAYLOAD;
    693 	req.u.allocate_payload.port_number = port_num;
    694 	req.u.allocate_payload.vcpi = vcpi;
    695 	req.u.allocate_payload.pbn = pbn;
    696 	drm_dp_encode_sideband_req(&req, msg);
    697 	msg->path_msg = true;
    698 	return 0;
    699 }
    700 
    701 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
    702 					struct drm_dp_vcpi *vcpi)
    703 {
    704 	int ret, vcpi_ret;
    705 
    706 	mutex_lock(&mgr->payload_lock);
    707 	ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
    708 	if (ret > mgr->max_payloads) {
    709 		ret = -EINVAL;
    710 		DRM_DEBUG_KMS("out of payload ids %d\n", ret);
    711 		goto out_unlock;
    712 	}
    713 
    714 	vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
    715 	if (vcpi_ret > mgr->max_payloads) {
    716 		ret = -EINVAL;
    717 		DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
    718 		goto out_unlock;
    719 	}
    720 
    721 	set_bit(ret, &mgr->payload_mask);
    722 	set_bit(vcpi_ret, &mgr->vcpi_mask);
    723 	vcpi->vcpi = vcpi_ret + 1;
    724 	mgr->proposed_vcpis[ret - 1] = vcpi;
    725 out_unlock:
    726 	mutex_unlock(&mgr->payload_lock);
    727 	return ret;
    728 }
    729 
    730 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
    731 				      int vcpi)
    732 {
    733 	int i;
    734 	if (vcpi == 0)
    735 		return;
    736 
    737 	mutex_lock(&mgr->payload_lock);
    738 	DRM_DEBUG_KMS("putting payload %d\n", vcpi);
    739 	clear_bit(vcpi - 1, &mgr->vcpi_mask);
    740 
    741 	for (i = 0; i < mgr->max_payloads; i++) {
    742 		if (mgr->proposed_vcpis[i])
    743 			if (mgr->proposed_vcpis[i]->vcpi == vcpi) {
    744 				mgr->proposed_vcpis[i] = NULL;
    745 				clear_bit(i + 1, &mgr->payload_mask);
    746 			}
    747 	}
    748 	mutex_unlock(&mgr->payload_lock);
    749 }
    750 
    751 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr,
    752 			      struct drm_dp_sideband_msg_tx *txmsg)
    753 {
    754 	bool ret;
    755 
    756 	/*
    757 	 * All updates to txmsg->state are protected by mgr->qlock, and the two
    758 	 * cases we check here are terminal states. For those the barriers
    759 	 * provided by the wake_up/wait_event pair are enough.
    760 	 */
    761 	ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX ||
    762 	       txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT);
    763 	return ret;
    764 }
    765 
    766 static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
    767 				    struct drm_dp_sideband_msg_tx *txmsg)
    768 {
    769 	struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
    770 	int ret;
    771 
    772 #ifdef __NetBSD__
    773 	mutex_lock(&mstb->mgr->qlock);
    774 	DRM_TIMED_WAIT_UNTIL(ret, &mgr->tx_waitq, &mstb->mgr->qlock, 4*HZ,
    775 	    check_txmsg_state(mgr, txmsg));
    776 #else
    777 	ret = wait_event_timeout(mgr->tx_waitq,
    778 				 check_txmsg_state(mgr, txmsg),
    779 				 (4 * HZ));
    780 	mutex_lock(&mstb->mgr->qlock);
    781 #endif
    782 	if (ret > 0) {
    783 		if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
    784 			ret = -EIO;
    785 			goto out;
    786 		}
    787 	} else {
    788 		DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
    789 
    790 		/* dump some state */
    791 		ret = -EIO;
    792 
    793 		/* remove from q */
    794 		if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
    795 		    txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
    796 			list_del(&txmsg->next);
    797 		}
    798 
    799 		if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
    800 		    txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
    801 			mstb->tx_slots[txmsg->seqno] = NULL;
    802 		}
    803 	}
    804 out:
    805 	mutex_unlock(&mgr->qlock);
    806 
    807 	return ret;
    808 }
    809 
    810 static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
    811 {
    812 	struct drm_dp_mst_branch *mstb;
    813 
    814 	mstb = kzalloc(sizeof(*mstb), GFP_KERNEL);
    815 	if (!mstb)
    816 		return NULL;
    817 
    818 	mstb->lct = lct;
    819 	if (lct > 1)
    820 		memcpy(mstb->rad, rad, lct / 2);
    821 	INIT_LIST_HEAD(&mstb->ports);
    822 	kref_init(&mstb->kref);
    823 	return mstb;
    824 }
    825 
    826 static void drm_dp_free_mst_port(struct kref *kref);
    827 
    828 static void drm_dp_free_mst_branch_device(struct kref *kref)
    829 {
    830 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
    831 	if (mstb->port_parent) {
    832 		if (list_empty(&mstb->port_parent->next))
    833 			kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
    834 	}
    835 	kfree(mstb);
    836 }
    837 
    838 static void drm_dp_destroy_mst_branch_device(struct kref *kref)
    839 {
    840 	struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
    841 	struct drm_dp_mst_port *port, *tmp;
    842 	bool wake_tx = false;
    843 
    844 	/*
    845 	 * init kref again to be used by ports to remove mst branch when it is
    846 	 * not needed anymore
    847 	 */
    848 	kref_init(kref);
    849 
    850 	if (mstb->port_parent && list_empty(&mstb->port_parent->next))
    851 		kref_get(&mstb->port_parent->kref);
    852 
    853 	/*
    854 	 * destroy all ports - don't need lock
    855 	 * as there are no more references to the mst branch
    856 	 * device at this point.
    857 	 */
    858 	list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
    859 		list_del(&port->next);
    860 		drm_dp_put_port(port);
    861 	}
    862 
    863 	/* drop any tx slots msg */
    864 	mutex_lock(&mstb->mgr->qlock);
    865 	if (mstb->tx_slots[0]) {
    866 		mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
    867 		mstb->tx_slots[0] = NULL;
    868 		wake_tx = true;
    869 	}
    870 	if (mstb->tx_slots[1]) {
    871 		mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
    872 		mstb->tx_slots[1] = NULL;
    873 		wake_tx = true;
    874 	}
    875 #ifdef __NetBSD__
    876 	if (wake_tx)
    877 		DRM_WAKEUP_ONE(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
    878 	mutex_unlock(&mstb->mgr->qlock);
    879 #else
    880 	mutex_unlock(&mstb->mgr->qlock);
    881 
    882 	if (wake_tx)
    883 		wake_up(&mstb->mgr->tx_waitq);
    884 #endif
    885 
    886 	kref_put(kref, drm_dp_free_mst_branch_device);
    887 }
    888 
    889 static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
    890 {
    891 	kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
    892 }
    893 
    894 
    895 static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
    896 {
    897 	struct drm_dp_mst_branch *mstb;
    898 
    899 	switch (old_pdt) {
    900 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
    901 	case DP_PEER_DEVICE_SST_SINK:
    902 		/* remove i2c over sideband */
    903 		drm_dp_mst_unregister_i2c_bus(&port->aux);
    904 		break;
    905 	case DP_PEER_DEVICE_MST_BRANCHING:
    906 		mstb = port->mstb;
    907 		port->mstb = NULL;
    908 		drm_dp_put_mst_branch_device(mstb);
    909 		break;
    910 	}
    911 }
    912 
    913 static void drm_dp_destroy_port(struct kref *kref)
    914 {
    915 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
    916 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
    917 
    918 	if (!port->input) {
    919 		port->vcpi.num_slots = 0;
    920 
    921 		kfree(port->cached_edid);
    922 
    923 		/*
    924 		 * The only time we don't have a connector
    925 		 * on an output port is if the connector init
    926 		 * fails.
    927 		 */
    928 		if (port->connector) {
    929 			/* we can't destroy the connector here, as
    930 			 * we might be holding the mode_config.mutex
    931 			 * from an EDID retrieval */
    932 
    933 			mutex_lock(&mgr->destroy_connector_lock);
    934 			kref_get(&port->parent->kref);
    935 			list_add(&port->next, &mgr->destroy_connector_list);
    936 			mutex_unlock(&mgr->destroy_connector_lock);
    937 			schedule_work(&mgr->destroy_connector_work);
    938 			return;
    939 		}
    940 		/* no need to clean up vcpi
    941 		 * as if we have no connector we never setup a vcpi */
    942 		drm_dp_port_teardown_pdt(port, port->pdt);
    943 		port->pdt = DP_PEER_DEVICE_NONE;
    944 	}
    945 	kfree(port);
    946 }
    947 
    948 static void drm_dp_put_port(struct drm_dp_mst_port *port)
    949 {
    950 	kref_put(&port->kref, drm_dp_destroy_port);
    951 }
    952 
    953 static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
    954 {
    955 	struct drm_dp_mst_port *port;
    956 	struct drm_dp_mst_branch *rmstb;
    957 	if (to_find == mstb) {
    958 		kref_get(&mstb->kref);
    959 		return mstb;
    960 	}
    961 	list_for_each_entry(port, &mstb->ports, next) {
    962 		if (port->mstb) {
    963 			rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
    964 			if (rmstb)
    965 				return rmstb;
    966 		}
    967 	}
    968 	return NULL;
    969 }
    970 
    971 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
    972 {
    973 	struct drm_dp_mst_branch *rmstb = NULL;
    974 	mutex_lock(&mgr->lock);
    975 	if (mgr->mst_primary)
    976 		rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
    977 	mutex_unlock(&mgr->lock);
    978 	return rmstb;
    979 }
    980 
    981 static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
    982 {
    983 	struct drm_dp_mst_port *port, *mport;
    984 
    985 	list_for_each_entry(port, &mstb->ports, next) {
    986 		if (port == to_find) {
    987 			kref_get(&port->kref);
    988 			return port;
    989 		}
    990 		if (port->mstb) {
    991 			mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
    992 			if (mport)
    993 				return mport;
    994 		}
    995 	}
    996 	return NULL;
    997 }
    998 
    999 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   1000 {
   1001 	struct drm_dp_mst_port *rport = NULL;
   1002 	mutex_lock(&mgr->lock);
   1003 	if (mgr->mst_primary)
   1004 		rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
   1005 	mutex_unlock(&mgr->lock);
   1006 	return rport;
   1007 }
   1008 
   1009 static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
   1010 {
   1011 	struct drm_dp_mst_port *port;
   1012 
   1013 	list_for_each_entry(port, &mstb->ports, next) {
   1014 		if (port->port_num == port_num) {
   1015 			kref_get(&port->kref);
   1016 			return port;
   1017 		}
   1018 	}
   1019 
   1020 	return NULL;
   1021 }
   1022 
   1023 /*
   1024  * calculate a new RAD for this MST branch device
   1025  * if parent has an LCT of 2 then it has 1 nibble of RAD,
   1026  * if parent has an LCT of 3 then it has 2 nibbles of RAD,
   1027  */
   1028 static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
   1029 				 u8 *rad)
   1030 {
   1031 	int parent_lct = port->parent->lct;
   1032 	int shift = 4;
   1033 	int idx = (parent_lct - 1) / 2;
   1034 	if (parent_lct > 1) {
   1035 		memcpy(rad, port->parent->rad, idx + 1);
   1036 		shift = (parent_lct % 2) ? 4 : 0;
   1037 	} else
   1038 		rad[0] = 0;
   1039 
   1040 	rad[idx] |= port->port_num << shift;
   1041 	return parent_lct + 1;
   1042 }
   1043 
   1044 /*
   1045  * return sends link address for new mstb
   1046  */
   1047 static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
   1048 {
   1049 	int ret __unused;
   1050 	u8 rad[6], lct;
   1051 	bool send_link = false;
   1052 	switch (port->pdt) {
   1053 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   1054 	case DP_PEER_DEVICE_SST_SINK:
   1055 		/* add i2c over sideband */
   1056 		ret = drm_dp_mst_register_i2c_bus(&port->aux);
   1057 		break;
   1058 	case DP_PEER_DEVICE_MST_BRANCHING:
   1059 		lct = drm_dp_calculate_rad(port, rad);
   1060 
   1061 		port->mstb = drm_dp_add_mst_branch_device(lct, rad);
   1062 		port->mstb->mgr = port->mgr;
   1063 		port->mstb->port_parent = port;
   1064 
   1065 		send_link = true;
   1066 		break;
   1067 	}
   1068 	return send_link;
   1069 }
   1070 
   1071 static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
   1072 {
   1073 	int ret __unused;
   1074 
   1075 	memcpy(mstb->guid, guid, 16);
   1076 
   1077 	if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
   1078 		if (mstb->port_parent) {
   1079 			ret = drm_dp_send_dpcd_write(
   1080 					mstb->mgr,
   1081 					mstb->port_parent,
   1082 					DP_GUID,
   1083 					16,
   1084 					mstb->guid);
   1085 		} else {
   1086 
   1087 			ret = drm_dp_dpcd_write(
   1088 					mstb->mgr->aux,
   1089 					DP_GUID,
   1090 					mstb->guid,
   1091 					16);
   1092 		}
   1093 	}
   1094 }
   1095 
   1096 static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
   1097 				int pnum,
   1098 				char *proppath,
   1099 				size_t proppath_size)
   1100 {
   1101 	int i;
   1102 	char temp[8];
   1103 	snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
   1104 	for (i = 0; i < (mstb->lct - 1); i++) {
   1105 		int shift = (i % 2) ? 0 : 4;
   1106 		int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
   1107 		snprintf(temp, sizeof(temp), "-%d", port_num);
   1108 		strlcat(proppath, temp, proppath_size);
   1109 	}
   1110 	snprintf(temp, sizeof(temp), "-%d", pnum);
   1111 	strlcat(proppath, temp, proppath_size);
   1112 }
   1113 
   1114 static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
   1115 			    struct device *dev,
   1116 			    struct drm_dp_link_addr_reply_port *port_msg)
   1117 {
   1118 	struct drm_dp_mst_port *port;
   1119 	bool ret;
   1120 	bool created = false;
   1121 	int old_pdt = 0;
   1122 	int old_ddps = 0;
   1123 	port = drm_dp_get_port(mstb, port_msg->port_number);
   1124 	if (!port) {
   1125 		port = kzalloc(sizeof(*port), GFP_KERNEL);
   1126 		if (!port)
   1127 			return;
   1128 		kref_init(&port->kref);
   1129 		port->parent = mstb;
   1130 		port->port_num = port_msg->port_number;
   1131 		port->mgr = mstb->mgr;
   1132 		port->aux.name = "DPMST";
   1133 		port->aux.dev = dev;
   1134 		created = true;
   1135 	} else {
   1136 		old_pdt = port->pdt;
   1137 		old_ddps = port->ddps;
   1138 	}
   1139 
   1140 	port->pdt = port_msg->peer_device_type;
   1141 	port->input = port_msg->input_port;
   1142 	port->mcs = port_msg->mcs;
   1143 	port->ddps = port_msg->ddps;
   1144 	port->ldps = port_msg->legacy_device_plug_status;
   1145 	port->dpcd_rev = port_msg->dpcd_revision;
   1146 	port->num_sdp_streams = port_msg->num_sdp_streams;
   1147 	port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
   1148 
   1149 	/* manage mstb port lists with mgr lock - take a reference
   1150 	   for this list */
   1151 	if (created) {
   1152 		mutex_lock(&mstb->mgr->lock);
   1153 		kref_get(&port->kref);
   1154 		list_add(&port->next, &mstb->ports);
   1155 		mutex_unlock(&mstb->mgr->lock);
   1156 	}
   1157 
   1158 	if (old_ddps != port->ddps) {
   1159 		if (port->ddps) {
   1160 			if (!port->input)
   1161 				drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
   1162 		} else {
   1163 			port->available_pbn = 0;
   1164 			}
   1165 	}
   1166 
   1167 	if (old_pdt != port->pdt && !port->input) {
   1168 		drm_dp_port_teardown_pdt(port, old_pdt);
   1169 
   1170 		ret = drm_dp_port_setup_pdt(port);
   1171 		if (ret == true)
   1172 			drm_dp_send_link_address(mstb->mgr, port->mstb);
   1173 	}
   1174 
   1175 	if (created && !port->input) {
   1176 		char proppath[255];
   1177 
   1178 		build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
   1179 		port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
   1180 		if (!port->connector) {
   1181 			/* remove it from the port list */
   1182 			mutex_lock(&mstb->mgr->lock);
   1183 			list_del(&port->next);
   1184 			mutex_unlock(&mstb->mgr->lock);
   1185 			/* drop port list reference */
   1186 			drm_dp_put_port(port);
   1187 			goto out;
   1188 		}
   1189 		if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
   1190 		     port->pdt == DP_PEER_DEVICE_SST_SINK) &&
   1191 		    port->port_num >= DP_MST_LOGICAL_PORT_0) {
   1192 			port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
   1193 			drm_mode_connector_set_tile_property(port->connector);
   1194 		}
   1195 		(*mstb->mgr->cbs->register_connector)(port->connector);
   1196 	}
   1197 
   1198 out:
   1199 	/* put reference to this port */
   1200 	drm_dp_put_port(port);
   1201 }
   1202 
   1203 static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
   1204 			       struct drm_dp_connection_status_notify *conn_stat)
   1205 {
   1206 	struct drm_dp_mst_port *port;
   1207 	int old_pdt;
   1208 	int old_ddps;
   1209 	bool dowork = false;
   1210 	port = drm_dp_get_port(mstb, conn_stat->port_number);
   1211 	if (!port)
   1212 		return;
   1213 
   1214 	old_ddps = port->ddps;
   1215 	old_pdt = port->pdt;
   1216 	port->pdt = conn_stat->peer_device_type;
   1217 	port->mcs = conn_stat->message_capability_status;
   1218 	port->ldps = conn_stat->legacy_device_plug_status;
   1219 	port->ddps = conn_stat->displayport_device_plug_status;
   1220 
   1221 	if (old_ddps != port->ddps) {
   1222 		if (port->ddps) {
   1223 			dowork = true;
   1224 		} else {
   1225 			port->available_pbn = 0;
   1226 		}
   1227 	}
   1228 	if (old_pdt != port->pdt && !port->input) {
   1229 		drm_dp_port_teardown_pdt(port, old_pdt);
   1230 
   1231 		if (drm_dp_port_setup_pdt(port))
   1232 			dowork = true;
   1233 	}
   1234 
   1235 	drm_dp_put_port(port);
   1236 	if (dowork)
   1237 		queue_work(system_long_wq, &mstb->mgr->work);
   1238 
   1239 }
   1240 
   1241 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr,
   1242 							       u8 lct, u8 *rad)
   1243 {
   1244 	struct drm_dp_mst_branch *mstb;
   1245 	struct drm_dp_mst_port *port;
   1246 	int i;
   1247 	/* find the port by iterating down */
   1248 
   1249 	mutex_lock(&mgr->lock);
   1250 	mstb = mgr->mst_primary;
   1251 
   1252 	for (i = 0; i < lct - 1; i++) {
   1253 		int shift = (i % 2) ? 0 : 4;
   1254 		int port_num = (rad[i / 2] >> shift) & 0xf;
   1255 
   1256 		list_for_each_entry(port, &mstb->ports, next) {
   1257 			if (port->port_num == port_num) {
   1258 				mstb = port->mstb;
   1259 				if (!mstb) {
   1260 					DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
   1261 					goto out;
   1262 				}
   1263 
   1264 				break;
   1265 			}
   1266 		}
   1267 	}
   1268 	kref_get(&mstb->kref);
   1269 out:
   1270 	mutex_unlock(&mgr->lock);
   1271 	return mstb;
   1272 }
   1273 
   1274 static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
   1275 	struct drm_dp_mst_branch *mstb,
   1276 	uint8_t *guid)
   1277 {
   1278 	struct drm_dp_mst_branch *found_mstb;
   1279 	struct drm_dp_mst_port *port;
   1280 
   1281 	if (memcmp(mstb->guid, guid, 16) == 0)
   1282 		return mstb;
   1283 
   1284 
   1285 	list_for_each_entry(port, &mstb->ports, next) {
   1286 		if (!port->mstb)
   1287 			continue;
   1288 
   1289 		found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
   1290 
   1291 		if (found_mstb)
   1292 			return found_mstb;
   1293 	}
   1294 
   1295 	return NULL;
   1296 }
   1297 
   1298 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
   1299 	struct drm_dp_mst_topology_mgr *mgr,
   1300 	uint8_t *guid)
   1301 {
   1302 	struct drm_dp_mst_branch *mstb;
   1303 
   1304 	/* find the port by iterating down */
   1305 	mutex_lock(&mgr->lock);
   1306 
   1307 	mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
   1308 
   1309 	if (mstb)
   1310 		kref_get(&mstb->kref);
   1311 
   1312 	mutex_unlock(&mgr->lock);
   1313 	return mstb;
   1314 }
   1315 
   1316 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   1317 					       struct drm_dp_mst_branch *mstb)
   1318 {
   1319 	struct drm_dp_mst_port *port;
   1320 	struct drm_dp_mst_branch *mstb_child;
   1321 	if (!mstb->link_address_sent)
   1322 		drm_dp_send_link_address(mgr, mstb);
   1323 
   1324 	list_for_each_entry(port, &mstb->ports, next) {
   1325 		if (port->input)
   1326 			continue;
   1327 
   1328 		if (!port->ddps)
   1329 			continue;
   1330 
   1331 		if (!port->available_pbn)
   1332 			drm_dp_send_enum_path_resources(mgr, mstb, port);
   1333 
   1334 		if (port->mstb) {
   1335 			mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
   1336 			if (mstb_child) {
   1337 				drm_dp_check_and_send_link_address(mgr, mstb_child);
   1338 				drm_dp_put_mst_branch_device(mstb_child);
   1339 			}
   1340 		}
   1341 	}
   1342 }
   1343 
   1344 static void drm_dp_mst_link_probe_work(struct work_struct *work)
   1345 {
   1346 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
   1347 	struct drm_dp_mst_branch *mstb;
   1348 
   1349 	mutex_lock(&mgr->lock);
   1350 	mstb = mgr->mst_primary;
   1351 	if (mstb) {
   1352 		kref_get(&mstb->kref);
   1353 	}
   1354 	mutex_unlock(&mgr->lock);
   1355 	if (mstb) {
   1356 		drm_dp_check_and_send_link_address(mgr, mstb);
   1357 		drm_dp_put_mst_branch_device(mstb);
   1358 	}
   1359 }
   1360 
   1361 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
   1362 				 u8 *guid)
   1363 {
   1364 	static u8 zero_guid[16];
   1365 
   1366 	if (!memcmp(guid, zero_guid, 16)) {
   1367 		u64 salt = get_jiffies_64();
   1368 		memcpy(&guid[0], &salt, sizeof(u64));
   1369 		memcpy(&guid[8], &salt, sizeof(u64));
   1370 		return false;
   1371 	}
   1372 	return true;
   1373 }
   1374 
   1375 #if 0
   1376 static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
   1377 {
   1378 	struct drm_dp_sideband_msg_req_body req;
   1379 
   1380 	req.req_type = DP_REMOTE_DPCD_READ;
   1381 	req.u.dpcd_read.port_number = port_num;
   1382 	req.u.dpcd_read.dpcd_address = offset;
   1383 	req.u.dpcd_read.num_bytes = num_bytes;
   1384 	drm_dp_encode_sideband_req(&req, msg);
   1385 
   1386 	return 0;
   1387 }
   1388 #endif
   1389 
   1390 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
   1391 				    bool up, u8 *msg, int len)
   1392 {
   1393 	int ret;
   1394 	int regbase = up ? DP_SIDEBAND_MSG_UP_REP_BASE : DP_SIDEBAND_MSG_DOWN_REQ_BASE;
   1395 	int tosend, total, offset;
   1396 	int retries = 0;
   1397 
   1398 retry:
   1399 	total = len;
   1400 	offset = 0;
   1401 	do {
   1402 		tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total);
   1403 
   1404 		ret = drm_dp_dpcd_write(mgr->aux, regbase + offset,
   1405 					&msg[offset],
   1406 					tosend);
   1407 		if (ret != tosend) {
   1408 			if (ret == -EIO && retries < 5) {
   1409 				retries++;
   1410 				goto retry;
   1411 			}
   1412 			DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
   1413 
   1414 			return -EIO;
   1415 		}
   1416 		offset += tosend;
   1417 		total -= tosend;
   1418 	} while (total > 0);
   1419 	return 0;
   1420 }
   1421 
   1422 static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
   1423 				  struct drm_dp_sideband_msg_tx *txmsg)
   1424 {
   1425 	struct drm_dp_mst_branch *mstb = txmsg->dst;
   1426 	u8 req_type;
   1427 
   1428 	/* both msg slots are full */
   1429 	if (txmsg->seqno == -1) {
   1430 		if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
   1431 			DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
   1432 			return -EAGAIN;
   1433 		}
   1434 		if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
   1435 			txmsg->seqno = mstb->last_seqno;
   1436 			mstb->last_seqno ^= 1;
   1437 		} else if (mstb->tx_slots[0] == NULL)
   1438 			txmsg->seqno = 0;
   1439 		else
   1440 			txmsg->seqno = 1;
   1441 		mstb->tx_slots[txmsg->seqno] = txmsg;
   1442 	}
   1443 
   1444 	req_type = txmsg->msg[0] & 0x7f;
   1445 	if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
   1446 		req_type == DP_RESOURCE_STATUS_NOTIFY)
   1447 		hdr->broadcast = 1;
   1448 	else
   1449 		hdr->broadcast = 0;
   1450 	hdr->path_msg = txmsg->path_msg;
   1451 	hdr->lct = mstb->lct;
   1452 	hdr->lcr = mstb->lct - 1;
   1453 	if (mstb->lct > 1)
   1454 		memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
   1455 	hdr->seqno = txmsg->seqno;
   1456 	return 0;
   1457 }
   1458 /*
   1459  * process a single block of the next message in the sideband queue
   1460  */
   1461 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   1462 				   struct drm_dp_sideband_msg_tx *txmsg,
   1463 				   bool up)
   1464 {
   1465 	u8 chunk[48];
   1466 	struct drm_dp_sideband_msg_hdr hdr;
   1467 	int len, space, idx, tosend;
   1468 	int ret;
   1469 
   1470 	memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
   1471 
   1472 	if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
   1473 		txmsg->seqno = -1;
   1474 		txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
   1475 	}
   1476 
   1477 	/* make hdr from dst mst - for replies use seqno
   1478 	   otherwise assign one */
   1479 	ret = set_hdr_from_dst_qlock(&hdr, txmsg);
   1480 	if (ret < 0)
   1481 		return ret;
   1482 
   1483 	/* amount left to send in this message */
   1484 	len = txmsg->cur_len - txmsg->cur_offset;
   1485 
   1486 	/* 48 - sideband msg size - 1 byte for data CRC, x header bytes */
   1487 	space = 48 - 1 - drm_dp_calc_sb_hdr_size(&hdr);
   1488 
   1489 	tosend = min(len, space);
   1490 	if (len == txmsg->cur_len)
   1491 		hdr.somt = 1;
   1492 	if (space >= len)
   1493 		hdr.eomt = 1;
   1494 
   1495 
   1496 	hdr.msg_len = tosend + 1;
   1497 	drm_dp_encode_sideband_msg_hdr(&hdr, chunk, &idx);
   1498 	memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend);
   1499 	/* add crc at end */
   1500 	drm_dp_crc_sideband_chunk_req(&chunk[idx], tosend);
   1501 	idx += tosend + 1;
   1502 
   1503 	ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx);
   1504 	if (ret) {
   1505 		DRM_DEBUG_KMS("sideband msg failed to send\n");
   1506 		return ret;
   1507 	}
   1508 
   1509 	txmsg->cur_offset += tosend;
   1510 	if (txmsg->cur_offset == txmsg->cur_len) {
   1511 		txmsg->state = DRM_DP_SIDEBAND_TX_SENT;
   1512 		return 1;
   1513 	}
   1514 	return 0;
   1515 }
   1516 
   1517 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
   1518 {
   1519 	struct drm_dp_sideband_msg_tx *txmsg;
   1520 	int ret;
   1521 
   1522 	WARN_ON(!mutex_is_locked(&mgr->qlock));
   1523 
   1524 	/* construct a chunk from the first msg in the tx_msg queue */
   1525 	if (list_empty(&mgr->tx_msg_downq)) {
   1526 		mgr->tx_down_in_progress = false;
   1527 		return;
   1528 	}
   1529 	mgr->tx_down_in_progress = true;
   1530 
   1531 	txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
   1532 	ret = process_single_tx_qlock(mgr, txmsg, false);
   1533 	if (ret == 1) {
   1534 		/* txmsg is sent it should be in the slots now */
   1535 		list_del(&txmsg->next);
   1536 	} else if (ret) {
   1537 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   1538 		list_del(&txmsg->next);
   1539 		if (txmsg->seqno != -1)
   1540 			txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   1541 		txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
   1542 #ifdef __NetBSD__
   1543 		DRM_WAKEUP_ONE(&mgr->tx_waitq, &mgr->qlock);
   1544 #else
   1545 		wake_up(&mgr->tx_waitq);
   1546 #endif
   1547 	}
   1548 	if (list_empty(&mgr->tx_msg_downq)) {
   1549 		mgr->tx_down_in_progress = false;
   1550 		return;
   1551 	}
   1552 }
   1553 
   1554 /* called holding qlock */
   1555 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
   1556 				       struct drm_dp_sideband_msg_tx *txmsg)
   1557 {
   1558 	int ret;
   1559 
   1560 	/* construct a chunk from the first msg in the tx_msg queue */
   1561 	ret = process_single_tx_qlock(mgr, txmsg, true);
   1562 
   1563 	if (ret != 1)
   1564 		DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
   1565 
   1566 	txmsg->dst->tx_slots[txmsg->seqno] = NULL;
   1567 }
   1568 
   1569 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
   1570 				 struct drm_dp_sideband_msg_tx *txmsg)
   1571 {
   1572 	mutex_lock(&mgr->qlock);
   1573 	list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
   1574 	if (!mgr->tx_down_in_progress)
   1575 		process_single_down_tx_qlock(mgr);
   1576 	mutex_unlock(&mgr->qlock);
   1577 }
   1578 
   1579 static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
   1580 				     struct drm_dp_mst_branch *mstb)
   1581 {
   1582 	int len __unused;
   1583 	struct drm_dp_sideband_msg_tx *txmsg;
   1584 	int ret;
   1585 
   1586 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1587 	if (!txmsg)
   1588 		return;
   1589 
   1590 	txmsg->dst = mstb;
   1591 	len = build_link_address(txmsg);
   1592 
   1593 	mstb->link_address_sent = true;
   1594 	drm_dp_queue_down_tx(mgr, txmsg);
   1595 
   1596 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1597 	if (ret > 0) {
   1598 		int i;
   1599 
   1600 		if (txmsg->reply.reply_type == 1)
   1601 			DRM_DEBUG_KMS("link address nak received\n");
   1602 		else {
   1603 			DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
   1604 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
   1605 				DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
   1606 				       txmsg->reply.u.link_addr.ports[i].input_port,
   1607 				       txmsg->reply.u.link_addr.ports[i].peer_device_type,
   1608 				       txmsg->reply.u.link_addr.ports[i].port_number,
   1609 				       txmsg->reply.u.link_addr.ports[i].dpcd_revision,
   1610 				       txmsg->reply.u.link_addr.ports[i].mcs,
   1611 				       txmsg->reply.u.link_addr.ports[i].ddps,
   1612 				       txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status,
   1613 				       txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
   1614 				       txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
   1615 			}
   1616 
   1617 			drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
   1618 
   1619 			for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
   1620 				drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
   1621 			}
   1622 			(*mgr->cbs->hotplug)(mgr);
   1623 		}
   1624 	} else {
   1625 		mstb->link_address_sent = false;
   1626 		DRM_DEBUG_KMS("link address failed %d\n", ret);
   1627 	}
   1628 
   1629 	kfree(txmsg);
   1630 }
   1631 
   1632 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
   1633 					   struct drm_dp_mst_branch *mstb,
   1634 					   struct drm_dp_mst_port *port)
   1635 {
   1636 	int len __unused;
   1637 	struct drm_dp_sideband_msg_tx *txmsg;
   1638 	int ret;
   1639 
   1640 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1641 	if (!txmsg)
   1642 		return -ENOMEM;
   1643 
   1644 	txmsg->dst = mstb;
   1645 	len = build_enum_path_resources(txmsg, port->port_num);
   1646 
   1647 	drm_dp_queue_down_tx(mgr, txmsg);
   1648 
   1649 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1650 	if (ret > 0) {
   1651 		if (txmsg->reply.reply_type == 1)
   1652 			DRM_DEBUG_KMS("enum path resources nak received\n");
   1653 		else {
   1654 			if (port->port_num != txmsg->reply.u.path_resources.port_number)
   1655 				DRM_ERROR("got incorrect port in response\n");
   1656 			DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
   1657 			       txmsg->reply.u.path_resources.avail_payload_bw_number);
   1658 			port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number;
   1659 		}
   1660 	}
   1661 
   1662 	kfree(txmsg);
   1663 	return 0;
   1664 }
   1665 
   1666 static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
   1667 {
   1668 	if (!mstb->port_parent)
   1669 		return NULL;
   1670 
   1671 	if (mstb->port_parent->mstb != mstb)
   1672 		return mstb->port_parent;
   1673 
   1674 	return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
   1675 }
   1676 
   1677 static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
   1678 									 struct drm_dp_mst_branch *mstb,
   1679 									 int *port_num)
   1680 {
   1681 	struct drm_dp_mst_branch *rmstb = NULL;
   1682 	struct drm_dp_mst_port *found_port;
   1683 	mutex_lock(&mgr->lock);
   1684 	if (mgr->mst_primary) {
   1685 		found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
   1686 
   1687 		if (found_port) {
   1688 			rmstb = found_port->parent;
   1689 			kref_get(&rmstb->kref);
   1690 			*port_num = found_port->port_num;
   1691 		}
   1692 	}
   1693 	mutex_unlock(&mgr->lock);
   1694 	return rmstb;
   1695 }
   1696 
   1697 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
   1698 				   struct drm_dp_mst_port *port,
   1699 				   int id,
   1700 				   int pbn)
   1701 {
   1702 	struct drm_dp_sideband_msg_tx *txmsg;
   1703 	struct drm_dp_mst_branch *mstb;
   1704 	int len __unused, ret, port_num;
   1705 
   1706 	port = drm_dp_get_validated_port_ref(mgr, port);
   1707 	if (!port)
   1708 		return -EINVAL;
   1709 
   1710 	port_num = port->port_num;
   1711 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   1712 	if (!mstb) {
   1713 		mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
   1714 
   1715 		if (!mstb) {
   1716 			drm_dp_put_port(port);
   1717 			return -EINVAL;
   1718 		}
   1719 	}
   1720 
   1721 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1722 	if (!txmsg) {
   1723 		ret = -ENOMEM;
   1724 		goto fail_put;
   1725 	}
   1726 
   1727 	txmsg->dst = mstb;
   1728 	len = build_allocate_payload(txmsg, port_num,
   1729 				     id,
   1730 				     pbn);
   1731 
   1732 	drm_dp_queue_down_tx(mgr, txmsg);
   1733 
   1734 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1735 	if (ret > 0) {
   1736 		if (txmsg->reply.reply_type == 1) {
   1737 			ret = -EINVAL;
   1738 		} else
   1739 			ret = 0;
   1740 	}
   1741 	kfree(txmsg);
   1742 fail_put:
   1743 	drm_dp_put_mst_branch_device(mstb);
   1744 	drm_dp_put_port(port);
   1745 	return ret;
   1746 }
   1747 
   1748 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   1749 				       int id,
   1750 				       struct drm_dp_payload *payload)
   1751 {
   1752 	int ret;
   1753 
   1754 	ret = drm_dp_dpcd_write_payload(mgr, id, payload);
   1755 	if (ret < 0) {
   1756 		payload->payload_state = 0;
   1757 		return ret;
   1758 	}
   1759 	payload->payload_state = DP_PAYLOAD_LOCAL;
   1760 	return 0;
   1761 }
   1762 
   1763 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   1764 				       struct drm_dp_mst_port *port,
   1765 				       int id,
   1766 				       struct drm_dp_payload *payload)
   1767 {
   1768 	int ret;
   1769 	ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn);
   1770 	if (ret < 0)
   1771 		return ret;
   1772 	payload->payload_state = DP_PAYLOAD_REMOTE;
   1773 	return ret;
   1774 }
   1775 
   1776 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
   1777 					struct drm_dp_mst_port *port,
   1778 					int id,
   1779 					struct drm_dp_payload *payload)
   1780 {
   1781 	DRM_DEBUG_KMS("\n");
   1782 	/* its okay for these to fail */
   1783 	if (port) {
   1784 		drm_dp_payload_send_msg(mgr, port, id, 0);
   1785 	}
   1786 
   1787 	drm_dp_dpcd_write_payload(mgr, id, payload);
   1788 	payload->payload_state = DP_PAYLOAD_DELETE_LOCAL;
   1789 	return 0;
   1790 }
   1791 
   1792 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
   1793 					int id,
   1794 					struct drm_dp_payload *payload)
   1795 {
   1796 	payload->payload_state = 0;
   1797 	return 0;
   1798 }
   1799 
   1800 /**
   1801  * drm_dp_update_payload_part1() - Execute payload update part 1
   1802  * @mgr: manager to use.
   1803  *
   1804  * This iterates over all proposed virtual channels, and tries to
   1805  * allocate space in the link for them. For 0->slots transitions,
   1806  * this step just writes the VCPI to the MST device. For slots->0
   1807  * transitions, this writes the updated VCPIs and removes the
   1808  * remote VC payloads.
   1809  *
   1810  * after calling this the driver should generate ACT and payload
   1811  * packets.
   1812  */
   1813 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
   1814 {
   1815 	int i, j;
   1816 	int cur_slots = 1;
   1817 	struct drm_dp_payload req_payload;
   1818 	struct drm_dp_mst_port *port;
   1819 
   1820 	mutex_lock(&mgr->payload_lock);
   1821 	for (i = 0; i < mgr->max_payloads; i++) {
   1822 		/* solve the current payloads - compare to the hw ones
   1823 		   - update the hw view */
   1824 		req_payload.start_slot = cur_slots;
   1825 		if (mgr->proposed_vcpis[i]) {
   1826 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   1827 			port = drm_dp_get_validated_port_ref(mgr, port);
   1828 			if (!port) {
   1829 				mutex_unlock(&mgr->payload_lock);
   1830 				return -EINVAL;
   1831 			}
   1832 			req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
   1833 			req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
   1834 		} else {
   1835 			port = NULL;
   1836 			req_payload.num_slots = 0;
   1837 		}
   1838 
   1839 		if (mgr->payloads[i].start_slot != req_payload.start_slot) {
   1840 			mgr->payloads[i].start_slot = req_payload.start_slot;
   1841 		}
   1842 		/* work out what is required to happen with this payload */
   1843 		if (mgr->payloads[i].num_slots != req_payload.num_slots) {
   1844 
   1845 			/* need to push an update for this payload */
   1846 			if (req_payload.num_slots) {
   1847 				drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
   1848 				mgr->payloads[i].num_slots = req_payload.num_slots;
   1849 				mgr->payloads[i].vcpi = req_payload.vcpi;
   1850 			} else if (mgr->payloads[i].num_slots) {
   1851 				mgr->payloads[i].num_slots = 0;
   1852 				drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
   1853 				req_payload.payload_state = mgr->payloads[i].payload_state;
   1854 				mgr->payloads[i].start_slot = 0;
   1855 			}
   1856 			mgr->payloads[i].payload_state = req_payload.payload_state;
   1857 		}
   1858 		cur_slots += req_payload.num_slots;
   1859 
   1860 		if (port)
   1861 			drm_dp_put_port(port);
   1862 	}
   1863 
   1864 	for (i = 0; i < mgr->max_payloads; i++) {
   1865 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   1866 			DRM_DEBUG_KMS("removing payload %d\n", i);
   1867 			for (j = i; j < mgr->max_payloads - 1; j++) {
   1868 				memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
   1869 				mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
   1870 				if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
   1871 					set_bit(j + 1, &mgr->payload_mask);
   1872 				} else {
   1873 					clear_bit(j + 1, &mgr->payload_mask);
   1874 				}
   1875 			}
   1876 			memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
   1877 			mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
   1878 			clear_bit(mgr->max_payloads, &mgr->payload_mask);
   1879 
   1880 		}
   1881 	}
   1882 	mutex_unlock(&mgr->payload_lock);
   1883 
   1884 	return 0;
   1885 }
   1886 EXPORT_SYMBOL(drm_dp_update_payload_part1);
   1887 
   1888 /**
   1889  * drm_dp_update_payload_part2() - Execute payload update part 2
   1890  * @mgr: manager to use.
   1891  *
   1892  * This iterates over all proposed virtual channels, and tries to
   1893  * allocate space in the link for them. For 0->slots transitions,
   1894  * this step writes the remote VC payload commands. For slots->0
   1895  * this just resets some internal state.
   1896  */
   1897 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
   1898 {
   1899 	struct drm_dp_mst_port *port;
   1900 	int i;
   1901 	int ret = 0;
   1902 	mutex_lock(&mgr->payload_lock);
   1903 	for (i = 0; i < mgr->max_payloads; i++) {
   1904 
   1905 		if (!mgr->proposed_vcpis[i])
   1906 			continue;
   1907 
   1908 		port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   1909 
   1910 		DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
   1911 		if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
   1912 			ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   1913 		} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
   1914 			ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
   1915 		}
   1916 		if (ret) {
   1917 			mutex_unlock(&mgr->payload_lock);
   1918 			return ret;
   1919 		}
   1920 	}
   1921 	mutex_unlock(&mgr->payload_lock);
   1922 	return 0;
   1923 }
   1924 EXPORT_SYMBOL(drm_dp_update_payload_part2);
   1925 
   1926 #if 0 /* unused as of yet */
   1927 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
   1928 				 struct drm_dp_mst_port *port,
   1929 				 int offset, int size)
   1930 {
   1931 	int len;
   1932 	struct drm_dp_sideband_msg_tx *txmsg;
   1933 
   1934 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1935 	if (!txmsg)
   1936 		return -ENOMEM;
   1937 
   1938 	len = build_dpcd_read(txmsg, port->port_num, 0, 8);
   1939 	txmsg->dst = port->parent;
   1940 
   1941 	drm_dp_queue_down_tx(mgr, txmsg);
   1942 
   1943 	return 0;
   1944 }
   1945 #endif
   1946 
   1947 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
   1948 				  struct drm_dp_mst_port *port,
   1949 				  int offset, int size, u8 *bytes)
   1950 {
   1951 	int len __unused;
   1952 	int ret;
   1953 	struct drm_dp_sideband_msg_tx *txmsg;
   1954 	struct drm_dp_mst_branch *mstb;
   1955 
   1956 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   1957 	if (!mstb)
   1958 		return -EINVAL;
   1959 
   1960 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   1961 	if (!txmsg) {
   1962 		ret = -ENOMEM;
   1963 		goto fail_put;
   1964 	}
   1965 
   1966 	len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
   1967 	txmsg->dst = mstb;
   1968 
   1969 	drm_dp_queue_down_tx(mgr, txmsg);
   1970 
   1971 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   1972 	if (ret > 0) {
   1973 		if (txmsg->reply.reply_type == 1) {
   1974 			ret = -EINVAL;
   1975 		} else
   1976 			ret = 0;
   1977 	}
   1978 	kfree(txmsg);
   1979 fail_put:
   1980 	drm_dp_put_mst_branch_device(mstb);
   1981 	return ret;
   1982 }
   1983 
   1984 static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
   1985 {
   1986 	struct drm_dp_sideband_msg_reply_body reply;
   1987 
   1988 	reply.reply_type = 1;
   1989 	reply.req_type = req_type;
   1990 	drm_dp_encode_sideband_reply(&reply, msg);
   1991 	return 0;
   1992 }
   1993 
   1994 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
   1995 				    struct drm_dp_mst_branch *mstb,
   1996 				    int req_type, int seqno, bool broadcast)
   1997 {
   1998 	struct drm_dp_sideband_msg_tx *txmsg;
   1999 
   2000 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   2001 	if (!txmsg)
   2002 		return -ENOMEM;
   2003 
   2004 	txmsg->dst = mstb;
   2005 	txmsg->seqno = seqno;
   2006 	drm_dp_encode_up_ack_reply(txmsg, req_type);
   2007 
   2008 	mutex_lock(&mgr->qlock);
   2009 
   2010 	process_single_up_tx_qlock(mgr, txmsg);
   2011 
   2012 	mutex_unlock(&mgr->qlock);
   2013 
   2014 	kfree(txmsg);
   2015 	return 0;
   2016 }
   2017 
   2018 static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
   2019 				     int dp_link_count,
   2020 				     int *out)
   2021 {
   2022 	switch (dp_link_bw) {
   2023 	default:
   2024 		DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
   2025 			      dp_link_bw, dp_link_count);
   2026 		return false;
   2027 
   2028 	case DP_LINK_BW_1_62:
   2029 		*out = 3 * dp_link_count;
   2030 		break;
   2031 	case DP_LINK_BW_2_7:
   2032 		*out = 5 * dp_link_count;
   2033 		break;
   2034 	case DP_LINK_BW_5_4:
   2035 		*out = 10 * dp_link_count;
   2036 		break;
   2037 	}
   2038 	return true;
   2039 }
   2040 
   2041 /**
   2042  * drm_dp_mst_topology_mgr_set_mst() - Set the MST state for a topology manager
   2043  * @mgr: manager to set state for
   2044  * @mst_state: true to enable MST on this connector - false to disable.
   2045  *
   2046  * This is called by the driver when it detects an MST capable device plugged
   2047  * into a DP MST capable port, or when a DP MST capable device is unplugged.
   2048  */
   2049 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
   2050 {
   2051 	int ret = 0;
   2052 	struct drm_dp_mst_branch *mstb = NULL;
   2053 
   2054 	mutex_lock(&mgr->lock);
   2055 	if (mst_state == mgr->mst_state)
   2056 		goto out_unlock;
   2057 
   2058 	mgr->mst_state = mst_state;
   2059 	/* set the device into MST mode */
   2060 	if (mst_state) {
   2061 		WARN_ON(mgr->mst_primary);
   2062 
   2063 		/* get dpcd info */
   2064 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   2065 		if (ret != DP_RECEIVER_CAP_SIZE) {
   2066 			DRM_DEBUG_KMS("failed to read DPCD\n");
   2067 			goto out_unlock;
   2068 		}
   2069 
   2070 		if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
   2071 					      mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
   2072 					      &mgr->pbn_div)) {
   2073 			ret = -EINVAL;
   2074 			goto out_unlock;
   2075 		}
   2076 
   2077 		mgr->total_pbn = 2560;
   2078 		mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
   2079 		mgr->avail_slots = mgr->total_slots;
   2080 
   2081 		/* add initial branch device at LCT 1 */
   2082 		mstb = drm_dp_add_mst_branch_device(1, NULL);
   2083 		if (mstb == NULL) {
   2084 			ret = -ENOMEM;
   2085 			goto out_unlock;
   2086 		}
   2087 		mstb->mgr = mgr;
   2088 
   2089 		/* give this the main reference */
   2090 		mgr->mst_primary = mstb;
   2091 		kref_get(&mgr->mst_primary->kref);
   2092 
   2093 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2094 							 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   2095 		if (ret < 0) {
   2096 			goto out_unlock;
   2097 		}
   2098 
   2099 		{
   2100 			struct drm_dp_payload reset_pay;
   2101 			reset_pay.start_slot = 0;
   2102 			reset_pay.num_slots = 0x3f;
   2103 			drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
   2104 		}
   2105 
   2106 		queue_work(system_long_wq, &mgr->work);
   2107 
   2108 		ret = 0;
   2109 	} else {
   2110 		/* disable MST on the device */
   2111 		mstb = mgr->mst_primary;
   2112 		mgr->mst_primary = NULL;
   2113 		/* this can fail if the device is gone */
   2114 		drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
   2115 		ret = 0;
   2116 		memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
   2117 		mgr->payload_mask = 0;
   2118 		set_bit(0, &mgr->payload_mask);
   2119 		mgr->vcpi_mask = 0;
   2120 	}
   2121 
   2122 out_unlock:
   2123 	mutex_unlock(&mgr->lock);
   2124 	if (mstb)
   2125 		drm_dp_put_mst_branch_device(mstb);
   2126 	return ret;
   2127 
   2128 }
   2129 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst);
   2130 
   2131 /**
   2132  * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager
   2133  * @mgr: manager to suspend
   2134  *
   2135  * This function tells the MST device that we can't handle UP messages
   2136  * anymore. This should stop it from sending any since we are suspended.
   2137  */
   2138 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
   2139 {
   2140 	mutex_lock(&mgr->lock);
   2141 	drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2142 			   DP_MST_EN | DP_UPSTREAM_IS_SRC);
   2143 	mutex_unlock(&mgr->lock);
   2144 	flush_work(&mgr->work);
   2145 	flush_work(&mgr->destroy_connector_work);
   2146 }
   2147 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
   2148 
   2149 /**
   2150  * drm_dp_mst_topology_mgr_resume() - resume the MST manager
   2151  * @mgr: manager to resume
   2152  *
   2153  * This will fetch DPCD and see if the device is still there,
   2154  * if it is, it will rewrite the MSTM control bits, and return.
   2155  *
   2156  * if the device fails this returns -1, and the driver should do
   2157  * a full MST reprobe, in case we were undocked.
   2158  */
   2159 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
   2160 {
   2161 	int ret = 0;
   2162 
   2163 	mutex_lock(&mgr->lock);
   2164 
   2165 	if (mgr->mst_primary) {
   2166 		int sret;
   2167 		u8 guid[16];
   2168 
   2169 		sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
   2170 		if (sret != DP_RECEIVER_CAP_SIZE) {
   2171 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   2172 			ret = -1;
   2173 			goto out_unlock;
   2174 		}
   2175 
   2176 		ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
   2177 					 DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
   2178 		if (ret < 0) {
   2179 			DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
   2180 			ret = -1;
   2181 			goto out_unlock;
   2182 		}
   2183 
   2184 		/* Some hubs forget their guids after they resume */
   2185 		sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
   2186 		if (sret != 16) {
   2187 			DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
   2188 			ret = -1;
   2189 			goto out_unlock;
   2190 		}
   2191 		drm_dp_check_mstb_guid(mgr->mst_primary, guid);
   2192 
   2193 		ret = 0;
   2194 	} else
   2195 		ret = -1;
   2196 
   2197 out_unlock:
   2198 	mutex_unlock(&mgr->lock);
   2199 	return ret;
   2200 }
   2201 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
   2202 
   2203 static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
   2204 {
   2205 	int len;
   2206 	u8 replyblock[32];
   2207 	int replylen, origlen __unused, curreply;
   2208 	int ret;
   2209 	struct drm_dp_sideband_msg_rx *msg;
   2210 	int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
   2211 	msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
   2212 
   2213 	len = min(mgr->max_dpcd_transaction_bytes, 16);
   2214 	ret = drm_dp_dpcd_read(mgr->aux, basereg,
   2215 			       replyblock, len);
   2216 	if (ret != len) {
   2217 		DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
   2218 		return false;
   2219 	}
   2220 	ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
   2221 	if (!ret) {
   2222 		DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
   2223 		return false;
   2224 	}
   2225 	replylen = msg->curchunk_len + msg->curchunk_hdrlen;
   2226 
   2227 	origlen = replylen;
   2228 	replylen -= len;
   2229 	curreply = len;
   2230 	while (replylen > 0) {
   2231 		len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
   2232 		ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
   2233 				    replyblock, len);
   2234 		if (ret != len) {
   2235 			DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
   2236 				      len, ret);
   2237 			return false;
   2238 		}
   2239 
   2240 		ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
   2241 		if (!ret) {
   2242 			DRM_DEBUG_KMS("failed to build sideband msg\n");
   2243 			return false;
   2244 		}
   2245 
   2246 		curreply += len;
   2247 		replylen -= len;
   2248 	}
   2249 	return true;
   2250 }
   2251 
   2252 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
   2253 {
   2254 	int ret = 0;
   2255 
   2256 	if (!drm_dp_get_one_sb_msg(mgr, false)) {
   2257 		memset(&mgr->down_rep_recv, 0,
   2258 		       sizeof(struct drm_dp_sideband_msg_rx));
   2259 		return 0;
   2260 	}
   2261 
   2262 	if (mgr->down_rep_recv.have_eomt) {
   2263 		struct drm_dp_sideband_msg_tx *txmsg;
   2264 		struct drm_dp_mst_branch *mstb;
   2265 		int slot = -1;
   2266 		mstb = drm_dp_get_mst_branch_device(mgr,
   2267 						    mgr->down_rep_recv.initial_hdr.lct,
   2268 						    mgr->down_rep_recv.initial_hdr.rad);
   2269 
   2270 		if (!mstb) {
   2271 			DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct);
   2272 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2273 			return 0;
   2274 		}
   2275 
   2276 		/* find the message */
   2277 		slot = mgr->down_rep_recv.initial_hdr.seqno;
   2278 		mutex_lock(&mgr->qlock);
   2279 		txmsg = mstb->tx_slots[slot];
   2280 		/* remove from slots */
   2281 		mutex_unlock(&mgr->qlock);
   2282 
   2283 		if (!txmsg) {
   2284 			DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
   2285 			       mstb,
   2286 			       mgr->down_rep_recv.initial_hdr.seqno,
   2287 			       mgr->down_rep_recv.initial_hdr.lct,
   2288 				      mgr->down_rep_recv.initial_hdr.rad[0],
   2289 				      mgr->down_rep_recv.msg[0]);
   2290 			drm_dp_put_mst_branch_device(mstb);
   2291 			memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2292 			return 0;
   2293 		}
   2294 
   2295 		drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
   2296 		if (txmsg->reply.reply_type == 1) {
   2297 			DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
   2298 		}
   2299 
   2300 		memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2301 		drm_dp_put_mst_branch_device(mstb);
   2302 
   2303 		mutex_lock(&mgr->qlock);
   2304 		txmsg->state = DRM_DP_SIDEBAND_TX_RX;
   2305 		mstb->tx_slots[slot] = NULL;
   2306 #ifdef __NetBSD__
   2307 		DRM_WAKEUP_ONE(&mstb->mgr->tx_waitq, &mstb->mgr->qlock);
   2308 		mutex_unlock(&mgr->qlock);
   2309 #else
   2310 		mutex_unlock(&mgr->qlock);
   2311 
   2312 		wake_up(&mgr->tx_waitq);
   2313 #endif
   2314 	}
   2315 	return ret;
   2316 }
   2317 
   2318 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
   2319 {
   2320 	int ret = 0;
   2321 
   2322 	if (!drm_dp_get_one_sb_msg(mgr, true)) {
   2323 		memset(&mgr->up_req_recv, 0,
   2324 		       sizeof(struct drm_dp_sideband_msg_rx));
   2325 		return 0;
   2326 	}
   2327 
   2328 	if (mgr->up_req_recv.have_eomt) {
   2329 		struct drm_dp_sideband_msg_req_body msg;
   2330 		struct drm_dp_mst_branch *mstb = NULL;
   2331 		bool seqno;
   2332 
   2333 		if (!mgr->up_req_recv.initial_hdr.broadcast) {
   2334 			mstb = drm_dp_get_mst_branch_device(mgr,
   2335 							    mgr->up_req_recv.initial_hdr.lct,
   2336 							    mgr->up_req_recv.initial_hdr.rad);
   2337 			if (!mstb) {
   2338 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2339 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2340 				return 0;
   2341 			}
   2342 		}
   2343 
   2344 		seqno = mgr->up_req_recv.initial_hdr.seqno;
   2345 		drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
   2346 
   2347 		if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
   2348 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
   2349 
   2350 			if (!mstb)
   2351 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
   2352 
   2353 			if (!mstb) {
   2354 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2355 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2356 				return 0;
   2357 			}
   2358 
   2359 			drm_dp_update_port(mstb, &msg.u.conn_stat);
   2360 
   2361 			DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
   2362 			(*mgr->cbs->hotplug)(mgr);
   2363 
   2364 		} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
   2365 			drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
   2366 			if (!mstb)
   2367 				mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
   2368 
   2369 			if (!mstb) {
   2370 				DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
   2371 				memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2372 				return 0;
   2373 			}
   2374 
   2375 			DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
   2376 		}
   2377 
   2378 		if (mstb)
   2379 			drm_dp_put_mst_branch_device(mstb);
   2380 
   2381 		memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
   2382 	}
   2383 	return ret;
   2384 }
   2385 
   2386 /**
   2387  * drm_dp_mst_hpd_irq() - MST hotplug IRQ notify
   2388  * @mgr: manager to notify irq for.
   2389  * @esi: 4 bytes from SINK_COUNT_ESI
   2390  * @handled: whether the hpd interrupt was consumed or not
   2391  *
   2392  * This should be called from the driver when it detects a short IRQ,
   2393  * along with the value of the DEVICE_SERVICE_IRQ_VECTOR_ESI0. The
   2394  * topology manager will process the sideband messages received as a result
   2395  * of this.
   2396  */
   2397 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled)
   2398 {
   2399 	int ret = 0;
   2400 	int sc;
   2401 	*handled = false;
   2402 	sc = esi[0] & 0x3f;
   2403 
   2404 	if (sc != mgr->sink_count) {
   2405 		mgr->sink_count = sc;
   2406 		*handled = true;
   2407 	}
   2408 
   2409 	if (esi[1] & DP_DOWN_REP_MSG_RDY) {
   2410 		ret = drm_dp_mst_handle_down_rep(mgr);
   2411 		*handled = true;
   2412 	}
   2413 
   2414 	if (esi[1] & DP_UP_REQ_MSG_RDY) {
   2415 		ret |= drm_dp_mst_handle_up_req(mgr);
   2416 		*handled = true;
   2417 	}
   2418 
   2419 	drm_dp_mst_kick_tx(mgr);
   2420 	return ret;
   2421 }
   2422 EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
   2423 
   2424 /**
   2425  * drm_dp_mst_detect_port() - get connection status for an MST port
   2426  * @mgr: manager for this port
   2427  * @port: unverified pointer to a port
   2428  *
   2429  * This returns the current connection state for a port. It validates the
   2430  * port pointer still exists so the caller doesn't require a reference
   2431  */
   2432 enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
   2433 						 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2434 {
   2435 	enum drm_connector_status status = connector_status_disconnected;
   2436 
   2437 	/* we need to search for the port in the mgr in case its gone */
   2438 	port = drm_dp_get_validated_port_ref(mgr, port);
   2439 	if (!port)
   2440 		return connector_status_disconnected;
   2441 
   2442 	if (!port->ddps)
   2443 		goto out;
   2444 
   2445 	switch (port->pdt) {
   2446 	case DP_PEER_DEVICE_NONE:
   2447 	case DP_PEER_DEVICE_MST_BRANCHING:
   2448 		break;
   2449 
   2450 	case DP_PEER_DEVICE_SST_SINK:
   2451 		status = connector_status_connected;
   2452 		/* for logical ports - cache the EDID */
   2453 		if (port->port_num >= 8 && !port->cached_edid) {
   2454 			port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
   2455 		}
   2456 		break;
   2457 	case DP_PEER_DEVICE_DP_LEGACY_CONV:
   2458 		if (port->ldps)
   2459 			status = connector_status_connected;
   2460 		break;
   2461 	}
   2462 out:
   2463 	drm_dp_put_port(port);
   2464 	return status;
   2465 }
   2466 EXPORT_SYMBOL(drm_dp_mst_detect_port);
   2467 
   2468 /**
   2469  * drm_dp_mst_get_edid() - get EDID for an MST port
   2470  * @connector: toplevel connector to get EDID for
   2471  * @mgr: manager for this port
   2472  * @port: unverified pointer to a port.
   2473  *
   2474  * This returns an EDID for the port connected to a connector,
   2475  * It validates the pointer still exists so the caller doesn't require a
   2476  * reference.
   2477  */
   2478 struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2479 {
   2480 	struct edid *edid = NULL;
   2481 
   2482 	/* we need to search for the port in the mgr in case its gone */
   2483 	port = drm_dp_get_validated_port_ref(mgr, port);
   2484 	if (!port)
   2485 		return NULL;
   2486 
   2487 	if (port->cached_edid)
   2488 		edid = drm_edid_duplicate(port->cached_edid);
   2489 	else {
   2490 		edid = drm_get_edid(connector, &port->aux.ddc);
   2491 		drm_mode_connector_set_tile_property(connector);
   2492 	}
   2493 	drm_dp_put_port(port);
   2494 	return edid;
   2495 }
   2496 EXPORT_SYMBOL(drm_dp_mst_get_edid);
   2497 
   2498 /**
   2499  * drm_dp_find_vcpi_slots() - find slots for this PBN value
   2500  * @mgr: manager to use
   2501  * @pbn: payload bandwidth to convert into slots.
   2502  */
   2503 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr,
   2504 			   int pbn)
   2505 {
   2506 	int num_slots;
   2507 
   2508 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   2509 
   2510 	if (num_slots > mgr->avail_slots)
   2511 		return -ENOSPC;
   2512 	return num_slots;
   2513 }
   2514 EXPORT_SYMBOL(drm_dp_find_vcpi_slots);
   2515 
   2516 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
   2517 			    struct drm_dp_vcpi *vcpi, int pbn)
   2518 {
   2519 	int num_slots;
   2520 	int ret;
   2521 
   2522 	num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
   2523 
   2524 	if (num_slots > mgr->avail_slots)
   2525 		return -ENOSPC;
   2526 
   2527 	vcpi->pbn = pbn;
   2528 	vcpi->aligned_pbn = num_slots * mgr->pbn_div;
   2529 	vcpi->num_slots = num_slots;
   2530 
   2531 	ret = drm_dp_mst_assign_payload_id(mgr, vcpi);
   2532 	if (ret < 0)
   2533 		return ret;
   2534 	return 0;
   2535 }
   2536 
   2537 /**
   2538  * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
   2539  * @mgr: manager for this port
   2540  * @port: port to allocate a virtual channel for.
   2541  * @pbn: payload bandwidth number to request
   2542  * @slots: returned number of slots for this PBN.
   2543  */
   2544 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots)
   2545 {
   2546 	int ret;
   2547 
   2548 	port = drm_dp_get_validated_port_ref(mgr, port);
   2549 	if (!port)
   2550 		return false;
   2551 
   2552 	if (port->vcpi.vcpi > 0) {
   2553 		DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
   2554 		if (pbn == port->vcpi.pbn) {
   2555 			*slots = port->vcpi.num_slots;
   2556 			drm_dp_put_port(port);
   2557 			return true;
   2558 		}
   2559 	}
   2560 
   2561 	ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn);
   2562 	if (ret) {
   2563 		DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots, ret);
   2564 		goto out;
   2565 	}
   2566 	DRM_DEBUG_KMS("initing vcpi for %d %d\n", pbn, port->vcpi.num_slots);
   2567 	*slots = port->vcpi.num_slots;
   2568 
   2569 	drm_dp_put_port(port);
   2570 	return true;
   2571 out:
   2572 	return false;
   2573 }
   2574 EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
   2575 
   2576 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2577 {
   2578 	int slots = 0;
   2579 	port = drm_dp_get_validated_port_ref(mgr, port);
   2580 	if (!port)
   2581 		return slots;
   2582 
   2583 	slots = port->vcpi.num_slots;
   2584 	drm_dp_put_port(port);
   2585 	return slots;
   2586 }
   2587 EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
   2588 
   2589 /**
   2590  * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI
   2591  * @mgr: manager for this port
   2592  * @port: unverified pointer to a port.
   2593  *
   2594  * This just resets the number of slots for the ports VCPI for later programming.
   2595  */
   2596 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2597 {
   2598 	port = drm_dp_get_validated_port_ref(mgr, port);
   2599 	if (!port)
   2600 		return;
   2601 	port->vcpi.num_slots = 0;
   2602 	drm_dp_put_port(port);
   2603 }
   2604 EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
   2605 
   2606 /**
   2607  * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
   2608  * @mgr: manager for this port
   2609  * @port: unverified port to deallocate vcpi for
   2610  */
   2611 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
   2612 {
   2613 	port = drm_dp_get_validated_port_ref(mgr, port);
   2614 	if (!port)
   2615 		return;
   2616 
   2617 	drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   2618 	port->vcpi.num_slots = 0;
   2619 	port->vcpi.pbn = 0;
   2620 	port->vcpi.aligned_pbn = 0;
   2621 	port->vcpi.vcpi = 0;
   2622 	drm_dp_put_port(port);
   2623 }
   2624 EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
   2625 
   2626 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
   2627 				     int id, struct drm_dp_payload *payload)
   2628 {
   2629 	u8 payload_alloc[3], status;
   2630 	int ret;
   2631 	int retries = 0;
   2632 
   2633 	drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
   2634 			   DP_PAYLOAD_TABLE_UPDATED);
   2635 
   2636 	payload_alloc[0] = id;
   2637 	payload_alloc[1] = payload->start_slot;
   2638 	payload_alloc[2] = payload->num_slots;
   2639 
   2640 	ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
   2641 	if (ret != 3) {
   2642 		DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
   2643 		goto fail;
   2644 	}
   2645 
   2646 retry:
   2647 	ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   2648 	if (ret < 0) {
   2649 		DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   2650 		goto fail;
   2651 	}
   2652 
   2653 	if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
   2654 		retries++;
   2655 		if (retries < 20) {
   2656 			usleep_range(10000, 20000);
   2657 			goto retry;
   2658 		}
   2659 		DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
   2660 		ret = -EINVAL;
   2661 		goto fail;
   2662 	}
   2663 	ret = 0;
   2664 fail:
   2665 	return ret;
   2666 }
   2667 
   2668 
   2669 /**
   2670  * drm_dp_check_act_status() - Check ACT handled status.
   2671  * @mgr: manager to use
   2672  *
   2673  * Check the payload status bits in the DPCD for ACT handled completion.
   2674  */
   2675 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
   2676 {
   2677 	u8 status;
   2678 	int ret;
   2679 	int count = 0;
   2680 
   2681 	do {
   2682 		ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
   2683 
   2684 		if (ret < 0) {
   2685 			DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
   2686 			goto fail;
   2687 		}
   2688 
   2689 		if (status & DP_PAYLOAD_ACT_HANDLED)
   2690 			break;
   2691 		count++;
   2692 		udelay(100);
   2693 
   2694 	} while (count < 30);
   2695 
   2696 	if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
   2697 		DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
   2698 		ret = -EINVAL;
   2699 		goto fail;
   2700 	}
   2701 	return 0;
   2702 fail:
   2703 	return ret;
   2704 }
   2705 EXPORT_SYMBOL(drm_dp_check_act_status);
   2706 
   2707 /**
   2708  * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode.
   2709  * @clock: dot clock for the mode
   2710  * @bpp: bpp for the mode.
   2711  *
   2712  * This uses the formula in the spec to calculate the PBN value for a mode.
   2713  */
   2714 int drm_dp_calc_pbn_mode(int clock, int bpp)
   2715 {
   2716 	u64 kbps;
   2717 	s64 peak_kbps;
   2718 	u32 numerator;
   2719 	u32 denominator;
   2720 
   2721 	kbps = clock * bpp;
   2722 
   2723 	/*
   2724 	 * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
   2725 	 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
   2726 	 * common multiplier to render an integer PBN for all link rate/lane
   2727 	 * counts combinations
   2728 	 * calculate
   2729 	 * peak_kbps *= (1006/1000)
   2730 	 * peak_kbps *= (64/54)
   2731 	 * peak_kbps *= 8    convert to bytes
   2732 	 */
   2733 
   2734 	numerator = 64 * 1006;
   2735 	denominator = 54 * 8 * 1000 * 1000;
   2736 
   2737 	kbps *= numerator;
   2738 	peak_kbps = drm_fixp_from_fraction(kbps, denominator);
   2739 
   2740 	return drm_fixp2int_ceil(peak_kbps);
   2741 }
   2742 EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
   2743 
   2744 static int test_calc_pbn_mode(void)
   2745 {
   2746 	int ret;
   2747 	ret = drm_dp_calc_pbn_mode(154000, 30);
   2748 	if (ret != 689) {
   2749 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2750 				154000, 30, 689, ret);
   2751 		return -EINVAL;
   2752 	}
   2753 	ret = drm_dp_calc_pbn_mode(234000, 30);
   2754 	if (ret != 1047) {
   2755 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2756 				234000, 30, 1047, ret);
   2757 		return -EINVAL;
   2758 	}
   2759 	ret = drm_dp_calc_pbn_mode(297000, 24);
   2760 	if (ret != 1063) {
   2761 		DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
   2762 				297000, 24, 1063, ret);
   2763 		return -EINVAL;
   2764 	}
   2765 	return 0;
   2766 }
   2767 
   2768 /* we want to kick the TX after we've ack the up/down IRQs. */
   2769 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr)
   2770 {
   2771 	queue_work(system_long_wq, &mgr->tx_work);
   2772 }
   2773 
   2774 #if IS_ENABLED(CONFIG_DEBUG_FS)
   2775 static void drm_dp_mst_dump_mstb(struct seq_file *m,
   2776 				 struct drm_dp_mst_branch *mstb)
   2777 {
   2778 	struct drm_dp_mst_port *port;
   2779 	int tabs = mstb->lct;
   2780 	char prefix[10];
   2781 	int i;
   2782 
   2783 	for (i = 0; i < tabs; i++)
   2784 		prefix[i] = '\t';
   2785 	prefix[i] = '\0';
   2786 
   2787 	seq_printf(m, "%smst: %p, %d\n", prefix, mstb, mstb->num_ports);
   2788 	list_for_each_entry(port, &mstb->ports, next) {
   2789 		seq_printf(m, "%sport: %d: ddps: %d ldps: %d, %p, conn: %p\n", prefix, port->port_num, port->ddps, port->ldps, port, port->connector);
   2790 		if (port->mstb)
   2791 			drm_dp_mst_dump_mstb(m, port->mstb);
   2792 	}
   2793 }
   2794 
   2795 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
   2796 				  char *buf)
   2797 {
   2798 	int ret;
   2799 	int i;
   2800 	for (i = 0; i < 4; i++) {
   2801 		ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16);
   2802 		if (ret != 16)
   2803 			break;
   2804 	}
   2805 	if (i == 4)
   2806 		return true;
   2807 	return false;
   2808 }
   2809 
   2810 /**
   2811  * drm_dp_mst_dump_topology(): dump topology to seq file.
   2812  * @m: seq_file to dump output to
   2813  * @mgr: manager to dump current topology for.
   2814  *
   2815  * helper to dump MST topology to a seq file for debugfs.
   2816  */
   2817 void drm_dp_mst_dump_topology(struct seq_file *m,
   2818 			      struct drm_dp_mst_topology_mgr *mgr)
   2819 {
   2820 	int i;
   2821 	struct drm_dp_mst_port *port;
   2822 	mutex_lock(&mgr->lock);
   2823 	if (mgr->mst_primary)
   2824 		drm_dp_mst_dump_mstb(m, mgr->mst_primary);
   2825 
   2826 	/* dump VCPIs */
   2827 	mutex_unlock(&mgr->lock);
   2828 
   2829 	mutex_lock(&mgr->payload_lock);
   2830 	seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask);
   2831 
   2832 	for (i = 0; i < mgr->max_payloads; i++) {
   2833 		if (mgr->proposed_vcpis[i]) {
   2834 			port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
   2835 			seq_printf(m, "vcpi %d: %d %d %d\n", i, port->port_num, port->vcpi.vcpi, port->vcpi.num_slots);
   2836 		} else
   2837 			seq_printf(m, "vcpi %d:unsed\n", i);
   2838 	}
   2839 	for (i = 0; i < mgr->max_payloads; i++) {
   2840 		seq_printf(m, "payload %d: %d, %d, %d\n",
   2841 			   i,
   2842 			   mgr->payloads[i].payload_state,
   2843 			   mgr->payloads[i].start_slot,
   2844 			   mgr->payloads[i].num_slots);
   2845 
   2846 
   2847 	}
   2848 	mutex_unlock(&mgr->payload_lock);
   2849 
   2850 	mutex_lock(&mgr->lock);
   2851 	if (mgr->mst_primary) {
   2852 		u8 buf[64];
   2853 		bool bret;
   2854 		int ret;
   2855 		ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
   2856 		seq_printf(m, "dpcd: ");
   2857 		for (i = 0; i < DP_RECEIVER_CAP_SIZE; i++)
   2858 			seq_printf(m, "%02x ", buf[i]);
   2859 		seq_printf(m, "\n");
   2860 		ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
   2861 		seq_printf(m, "faux/mst: ");
   2862 		for (i = 0; i < 2; i++)
   2863 			seq_printf(m, "%02x ", buf[i]);
   2864 		seq_printf(m, "\n");
   2865 		ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
   2866 		seq_printf(m, "mst ctrl: ");
   2867 		for (i = 0; i < 1; i++)
   2868 			seq_printf(m, "%02x ", buf[i]);
   2869 		seq_printf(m, "\n");
   2870 
   2871 		/* dump the standard OUI branch header */
   2872 		ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
   2873 		seq_printf(m, "branch oui: ");
   2874 		for (i = 0; i < 0x3; i++)
   2875 			seq_printf(m, "%02x", buf[i]);
   2876 		seq_printf(m, " devid: ");
   2877 		for (i = 0x3; i < 0x8; i++)
   2878 			seq_printf(m, "%c", buf[i]);
   2879 		seq_printf(m, " revision: hw: %x.%x sw: %x.%x", buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]);
   2880 		seq_printf(m, "\n");
   2881 		bret = dump_dp_payload_table(mgr, buf);
   2882 		if (bret == true) {
   2883 			seq_printf(m, "payload table: ");
   2884 			for (i = 0; i < 63; i++)
   2885 				seq_printf(m, "%02x ", buf[i]);
   2886 			seq_printf(m, "\n");
   2887 		}
   2888 
   2889 	}
   2890 
   2891 	mutex_unlock(&mgr->lock);
   2892 
   2893 }
   2894 EXPORT_SYMBOL(drm_dp_mst_dump_topology);
   2895 #endif	/* IS_ENABLED(CONFIG_DEBUG_FS) */
   2896 
   2897 static void drm_dp_tx_work(struct work_struct *work)
   2898 {
   2899 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
   2900 
   2901 	mutex_lock(&mgr->qlock);
   2902 	if (mgr->tx_down_in_progress)
   2903 		process_single_down_tx_qlock(mgr);
   2904 	mutex_unlock(&mgr->qlock);
   2905 }
   2906 
   2907 static void drm_dp_free_mst_port(struct kref *kref)
   2908 {
   2909 	struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
   2910 	kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
   2911 	kfree(port);
   2912 }
   2913 
   2914 static void drm_dp_destroy_connector_work(struct work_struct *work)
   2915 {
   2916 	struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
   2917 	struct drm_dp_mst_port *port;
   2918 	bool send_hotplug = false;
   2919 	/*
   2920 	 * Not a regular list traverse as we have to drop the destroy
   2921 	 * connector lock before destroying the connector, to avoid AB->BA
   2922 	 * ordering between this lock and the config mutex.
   2923 	 */
   2924 	for (;;) {
   2925 		mutex_lock(&mgr->destroy_connector_lock);
   2926 		port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next);
   2927 		if (!port) {
   2928 			mutex_unlock(&mgr->destroy_connector_lock);
   2929 			break;
   2930 		}
   2931 		list_del(&port->next);
   2932 		mutex_unlock(&mgr->destroy_connector_lock);
   2933 
   2934 		kref_init(&port->kref);
   2935 		INIT_LIST_HEAD(&port->next);
   2936 
   2937 		mgr->cbs->destroy_connector(mgr, port->connector);
   2938 
   2939 		drm_dp_port_teardown_pdt(port, port->pdt);
   2940 		port->pdt = DP_PEER_DEVICE_NONE;
   2941 
   2942 		if (!port->input && port->vcpi.vcpi > 0) {
   2943 			drm_dp_mst_reset_vcpi_slots(mgr, port);
   2944 			drm_dp_update_payload_part1(mgr);
   2945 			drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
   2946 		}
   2947 
   2948 		kref_put(&port->kref, drm_dp_free_mst_port);
   2949 		send_hotplug = true;
   2950 	}
   2951 	if (send_hotplug)
   2952 		(*mgr->cbs->hotplug)(mgr);
   2953 }
   2954 
   2955 /**
   2956  * drm_dp_mst_topology_mgr_init - initialise a topology manager
   2957  * @mgr: manager struct to initialise
   2958  * @dev: device providing this structure - for i2c addition.
   2959  * @aux: DP helper aux channel to talk to this device
   2960  * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
   2961  * @max_payloads: maximum number of payloads this GPU can source
   2962  * @conn_base_id: the connector object ID the MST device is connected to.
   2963  *
   2964  * Return 0 for success, or negative error code on failure
   2965  */
   2966 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
   2967 				 struct device *dev, struct drm_dp_aux *aux,
   2968 				 int max_dpcd_transaction_bytes,
   2969 				 int max_payloads, int conn_base_id)
   2970 {
   2971 	mutex_init(&mgr->lock);
   2972 	mutex_init(&mgr->qlock);
   2973 	mutex_init(&mgr->payload_lock);
   2974 	mutex_init(&mgr->destroy_connector_lock);
   2975 	INIT_LIST_HEAD(&mgr->tx_msg_downq);
   2976 	INIT_LIST_HEAD(&mgr->destroy_connector_list);
   2977 	INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
   2978 	INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
   2979 	INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work);
   2980 #ifdef __NetBSD__
   2981 	DRM_INIT_WAITQUEUE(&mgr->tx_waitq, "dpmstwait");
   2982 #else
   2983 	init_waitqueue_head(&mgr->tx_waitq);
   2984 #endif
   2985 	mgr->dev = dev;
   2986 	mgr->aux = aux;
   2987 	mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
   2988 	mgr->max_payloads = max_payloads;
   2989 	mgr->conn_base_id = conn_base_id;
   2990 	mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL);
   2991 	if (!mgr->payloads)
   2992 		return -ENOMEM;
   2993 	mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL);
   2994 	if (!mgr->proposed_vcpis)
   2995 		return -ENOMEM;
   2996 	set_bit(0, &mgr->payload_mask);
   2997 	test_calc_pbn_mode();
   2998 	return 0;
   2999 }
   3000 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
   3001 
   3002 /**
   3003  * drm_dp_mst_topology_mgr_destroy() - destroy topology manager.
   3004  * @mgr: manager to destroy
   3005  */
   3006 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
   3007 {
   3008 	flush_work(&mgr->work);
   3009 	flush_work(&mgr->destroy_connector_work);
   3010 	mutex_lock(&mgr->payload_lock);
   3011 	kfree(mgr->payloads);
   3012 	mgr->payloads = NULL;
   3013 	kfree(mgr->proposed_vcpis);
   3014 	mgr->proposed_vcpis = NULL;
   3015 	mutex_unlock(&mgr->payload_lock);
   3016 	mgr->dev = NULL;
   3017 	mgr->aux = NULL;
   3018 #ifdef __NetBSD__
   3019 	DRM_DESTROY_WAITQUEUE(&mgr->tx_waitq);
   3020 #endif
   3021 	mutex_destroy(&mgr->destroy_connector_lock);
   3022 	mutex_destroy(&mgr->payload_lock);
   3023 	mutex_destroy(&mgr->qlock);
   3024 	mutex_destroy(&mgr->lock);
   3025 }
   3026 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
   3027 
   3028 /* I2C device */
   3029 static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
   3030 			       int num)
   3031 {
   3032 	struct drm_dp_aux *aux = adapter->algo_data;
   3033 	struct drm_dp_mst_port *port = container_of(aux, struct drm_dp_mst_port, aux);
   3034 	struct drm_dp_mst_branch *mstb;
   3035 	struct drm_dp_mst_topology_mgr *mgr = port->mgr;
   3036 	unsigned int i;
   3037 	bool reading = false;
   3038 	struct drm_dp_sideband_msg_req_body msg;
   3039 	struct drm_dp_sideband_msg_tx *txmsg = NULL;
   3040 	int ret;
   3041 
   3042 	mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
   3043 	if (!mstb)
   3044 		return -EREMOTEIO;
   3045 
   3046 	/* construct i2c msg */
   3047 	/* see if last msg is a read */
   3048 	if (msgs[num - 1].flags & I2C_M_RD)
   3049 		reading = true;
   3050 
   3051 	if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
   3052 		DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
   3053 		ret = -EIO;
   3054 		goto out;
   3055 	}
   3056 
   3057 	memset(&msg, 0, sizeof(msg));
   3058 	msg.req_type = DP_REMOTE_I2C_READ;
   3059 	msg.u.i2c_read.num_transactions = num - 1;
   3060 	msg.u.i2c_read.port_number = port->port_num;
   3061 	for (i = 0; i < num - 1; i++) {
   3062 		msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
   3063 		msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
   3064 		msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
   3065 	}
   3066 	msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
   3067 	msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
   3068 
   3069 	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
   3070 	if (!txmsg) {
   3071 		ret = -ENOMEM;
   3072 		goto out;
   3073 	}
   3074 
   3075 	txmsg->dst = mstb;
   3076 	drm_dp_encode_sideband_req(&msg, txmsg);
   3077 
   3078 	drm_dp_queue_down_tx(mgr, txmsg);
   3079 
   3080 	ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
   3081 	if (ret > 0) {
   3082 
   3083 		if (txmsg->reply.reply_type == 1) { /* got a NAK back */
   3084 			ret = -EREMOTEIO;
   3085 			goto out;
   3086 		}
   3087 		if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) {
   3088 			ret = -EIO;
   3089 			goto out;
   3090 		}
   3091 		memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len);
   3092 		ret = num;
   3093 	}
   3094 out:
   3095 	kfree(txmsg);
   3096 	drm_dp_put_mst_branch_device(mstb);
   3097 	return ret;
   3098 }
   3099 
   3100 static u32 drm_dp_mst_i2c_functionality(struct i2c_adapter *adapter)
   3101 {
   3102 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
   3103 	       I2C_FUNC_SMBUS_READ_BLOCK_DATA |
   3104 	       I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
   3105 	       I2C_FUNC_10BIT_ADDR;
   3106 }
   3107 
   3108 static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
   3109 	.functionality = drm_dp_mst_i2c_functionality,
   3110 	.master_xfer = drm_dp_mst_i2c_xfer,
   3111 };
   3112 
   3113 /**
   3114  * drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
   3115  * @aux: DisplayPort AUX channel
   3116  *
   3117  * Returns 0 on success or a negative error code on failure.
   3118  */
   3119 static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
   3120 {
   3121 	aux->ddc.algo = &drm_dp_mst_i2c_algo;
   3122 	aux->ddc.algo_data = aux;
   3123 	aux->ddc.retries = 3;
   3124 
   3125 	aux->ddc.class = I2C_CLASS_DDC;
   3126 	aux->ddc.owner = THIS_MODULE;
   3127 	aux->ddc.dev.parent = aux->dev;
   3128 #ifndef __NetBSD__		/* XXX of? */
   3129 	aux->ddc.dev.of_node = aux->dev->of_node;
   3130 #endif
   3131 
   3132 	strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
   3133 		sizeof(aux->ddc.name));
   3134 
   3135 	return i2c_add_adapter(&aux->ddc);
   3136 }
   3137 
   3138 /**
   3139  * drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
   3140  * @aux: DisplayPort AUX channel
   3141  */
   3142 static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
   3143 {
   3144 	i2c_del_adapter(&aux->ddc);
   3145 }
   3146